OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/platform/platform.h" | 7 #include "src/base/platform/platform.h" |
8 #include "src/full-codegen.h" | 8 #include "src/full-codegen.h" |
9 #include "src/heap/mark-compact.h" | 9 #include "src/heap/mark-compact.h" |
10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 HeapObjectCallback size_func) { | 40 HeapObjectCallback size_func) { |
41 Space* owner = page->owner(); | 41 Space* owner = page->owner(); |
42 DCHECK(owner == page->heap()->old_pointer_space() || | 42 DCHECK(owner == page->heap()->old_pointer_space() || |
43 owner == page->heap()->old_data_space() || | 43 owner == page->heap()->old_data_space() || |
44 owner == page->heap()->map_space() || | 44 owner == page->heap()->map_space() || |
45 owner == page->heap()->cell_space() || | 45 owner == page->heap()->cell_space() || |
46 owner == page->heap()->property_cell_space() || | 46 owner == page->heap()->property_cell_space() || |
47 owner == page->heap()->code_space()); | 47 owner == page->heap()->code_space()); |
48 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), | 48 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), |
49 page->area_end(), kOnePageOnly, size_func); | 49 page->area_end(), kOnePageOnly, size_func); |
50 DCHECK(page->WasSweptPrecisely() || | 50 DCHECK(page->WasSwept() || page->SweepingCompleted()); |
51 (static_cast<PagedSpace*>(owner)->swept_precisely() && | |
52 page->SweepingCompleted())); | |
53 } | 51 } |
54 | 52 |
55 | 53 |
56 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, | 54 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, |
57 HeapObjectIterator::PageMode mode, | 55 HeapObjectIterator::PageMode mode, |
58 HeapObjectCallback size_f) { | 56 HeapObjectCallback size_f) { |
59 // Check that we actually can iterate this space. | |
60 DCHECK(space->swept_precisely()); | |
61 | |
62 space_ = space; | 57 space_ = space; |
63 cur_addr_ = cur; | 58 cur_addr_ = cur; |
64 cur_end_ = end; | 59 cur_end_ = end; |
65 page_mode_ = mode; | 60 page_mode_ = mode; |
66 size_func_ = size_f; | 61 size_func_ = size_f; |
67 } | 62 } |
68 | 63 |
69 | 64 |
70 // We have hit the end of the page and should advance to the next block of | 65 // We have hit the end of the page and should advance to the next block of |
71 // objects. This happens at the end of the page. | 66 // objects. This happens at the end of the page. |
72 bool HeapObjectIterator::AdvanceToNextPage() { | 67 bool HeapObjectIterator::AdvanceToNextPage() { |
73 DCHECK(cur_addr_ == cur_end_); | 68 DCHECK(cur_addr_ == cur_end_); |
74 if (page_mode_ == kOnePageOnly) return false; | 69 if (page_mode_ == kOnePageOnly) return false; |
75 Page* cur_page; | 70 Page* cur_page; |
76 if (cur_addr_ == NULL) { | 71 if (cur_addr_ == NULL) { |
77 cur_page = space_->anchor(); | 72 cur_page = space_->anchor(); |
78 } else { | 73 } else { |
79 cur_page = Page::FromAddress(cur_addr_ - 1); | 74 cur_page = Page::FromAddress(cur_addr_ - 1); |
80 DCHECK(cur_addr_ == cur_page->area_end()); | 75 DCHECK(cur_addr_ == cur_page->area_end()); |
81 } | 76 } |
82 cur_page = cur_page->next_page(); | 77 cur_page = cur_page->next_page(); |
83 if (cur_page == space_->anchor()) return false; | 78 if (cur_page == space_->anchor()) return false; |
84 cur_addr_ = cur_page->area_start(); | 79 cur_addr_ = cur_page->area_start(); |
85 cur_end_ = cur_page->area_end(); | 80 cur_end_ = cur_page->area_end(); |
86 DCHECK(cur_page->WasSweptPrecisely() || | 81 DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); |
87 (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() && | |
88 cur_page->SweepingCompleted())); | |
89 return true; | 82 return true; |
90 } | 83 } |
91 | 84 |
92 | 85 |
93 // ----------------------------------------------------------------------------- | 86 // ----------------------------------------------------------------------------- |
94 // CodeRange | 87 // CodeRange |
95 | 88 |
96 | 89 |
97 CodeRange::CodeRange(Isolate* isolate) | 90 CodeRange::CodeRange(Isolate* isolate) |
98 : isolate_(isolate), | 91 : isolate_(isolate), |
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
452 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 445 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
453 chunk->set_parallel_sweeping(SWEEPING_DONE); | 446 chunk->set_parallel_sweeping(SWEEPING_DONE); |
454 chunk->available_in_small_free_list_ = 0; | 447 chunk->available_in_small_free_list_ = 0; |
455 chunk->available_in_medium_free_list_ = 0; | 448 chunk->available_in_medium_free_list_ = 0; |
456 chunk->available_in_large_free_list_ = 0; | 449 chunk->available_in_large_free_list_ = 0; |
457 chunk->available_in_huge_free_list_ = 0; | 450 chunk->available_in_huge_free_list_ = 0; |
458 chunk->non_available_small_blocks_ = 0; | 451 chunk->non_available_small_blocks_ = 0; |
459 chunk->ResetLiveBytes(); | 452 chunk->ResetLiveBytes(); |
460 Bitmap::Clear(chunk); | 453 Bitmap::Clear(chunk); |
461 chunk->initialize_scan_on_scavenge(false); | 454 chunk->initialize_scan_on_scavenge(false); |
462 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 455 chunk->SetFlag(WAS_SWEPT); |
463 | 456 |
464 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 457 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
465 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 458 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
466 | 459 |
467 if (executable == EXECUTABLE) { | 460 if (executable == EXECUTABLE) { |
468 chunk->SetFlag(IS_EXECUTABLE); | 461 chunk->SetFlag(IS_EXECUTABLE); |
469 } | 462 } |
470 | 463 |
471 if (owner == heap->old_data_space()) { | 464 if (owner == heap->old_data_space()) { |
472 chunk->SetFlag(CONTAINS_ONLY_DATA); | 465 chunk->SetFlag(CONTAINS_ONLY_DATA); |
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
879 } | 872 } |
880 | 873 |
881 | 874 |
882 // ----------------------------------------------------------------------------- | 875 // ----------------------------------------------------------------------------- |
883 // PagedSpace implementation | 876 // PagedSpace implementation |
884 | 877 |
885 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, | 878 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, |
886 Executability executable) | 879 Executability executable) |
887 : Space(heap, id, executable), | 880 : Space(heap, id, executable), |
888 free_list_(this), | 881 free_list_(this), |
889 swept_precisely_(true), | |
890 unswept_free_bytes_(0), | 882 unswept_free_bytes_(0), |
891 end_of_unswept_pages_(NULL), | 883 end_of_unswept_pages_(NULL), |
892 emergency_memory_(NULL) { | 884 emergency_memory_(NULL) { |
893 if (id == CODE_SPACE) { | 885 if (id == CODE_SPACE) { |
894 area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize(); | 886 area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize(); |
895 } else { | 887 } else { |
896 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 888 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
897 } | 889 } |
898 max_capacity_ = | 890 max_capacity_ = |
899 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize(); | 891 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize(); |
(...skipping 29 matching lines...) Expand all Loading... |
929 size_t size = 0; | 921 size_t size = 0; |
930 PageIterator it(this); | 922 PageIterator it(this); |
931 while (it.has_next()) { | 923 while (it.has_next()) { |
932 size += it.next()->CommittedPhysicalMemory(); | 924 size += it.next()->CommittedPhysicalMemory(); |
933 } | 925 } |
934 return size; | 926 return size; |
935 } | 927 } |
936 | 928 |
937 | 929 |
938 Object* PagedSpace::FindObject(Address addr) { | 930 Object* PagedSpace::FindObject(Address addr) { |
939 // Note: this function can only be called on precisely swept spaces. | 931 // Note: this function can only be called on iterable spaces. |
940 DCHECK(!heap()->mark_compact_collector()->in_use()); | 932 DCHECK(!heap()->mark_compact_collector()->in_use()); |
941 | 933 |
942 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. | 934 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. |
943 | 935 |
944 Page* p = Page::FromAddress(addr); | 936 Page* p = Page::FromAddress(addr); |
945 HeapObjectIterator it(p, NULL); | 937 HeapObjectIterator it(p, NULL); |
946 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 938 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
947 Address cur = obj->address(); | 939 Address cur = obj->address(); |
948 Address next = cur + obj->Size(); | 940 Address next = cur + obj->Size(); |
949 if ((cur <= addr) && (addr < next)) return obj; | 941 if ((cur <= addr) && (addr < next)) return obj; |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1122 emergency_memory_ = NULL; | 1114 emergency_memory_ = NULL; |
1123 } | 1115 } |
1124 | 1116 |
1125 | 1117 |
1126 #ifdef DEBUG | 1118 #ifdef DEBUG |
1127 void PagedSpace::Print() {} | 1119 void PagedSpace::Print() {} |
1128 #endif | 1120 #endif |
1129 | 1121 |
1130 #ifdef VERIFY_HEAP | 1122 #ifdef VERIFY_HEAP |
1131 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1123 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1132 // We can only iterate over the pages if they were swept precisely. | |
1133 if (!swept_precisely_) return; | |
1134 | |
1135 bool allocation_pointer_found_in_space = | 1124 bool allocation_pointer_found_in_space = |
1136 (allocation_info_.top() == allocation_info_.limit()); | 1125 (allocation_info_.top() == allocation_info_.limit()); |
1137 PageIterator page_iterator(this); | 1126 PageIterator page_iterator(this); |
1138 while (page_iterator.has_next()) { | 1127 while (page_iterator.has_next()) { |
1139 Page* page = page_iterator.next(); | 1128 Page* page = page_iterator.next(); |
1140 CHECK(page->owner() == this); | 1129 CHECK(page->owner() == this); |
1141 if (page == Page::FromAllocationTop(allocation_info_.top())) { | 1130 if (page == Page::FromAllocationTop(allocation_info_.top())) { |
1142 allocation_pointer_found_in_space = true; | 1131 allocation_pointer_found_in_space = true; |
1143 } | 1132 } |
1144 CHECK(page->WasSweptPrecisely()); | 1133 CHECK(page->WasSwept()); |
1145 HeapObjectIterator it(page, NULL); | 1134 HeapObjectIterator it(page, NULL); |
1146 Address end_of_previous_object = page->area_start(); | 1135 Address end_of_previous_object = page->area_start(); |
1147 Address top = page->area_end(); | 1136 Address top = page->area_end(); |
1148 int black_size = 0; | 1137 int black_size = 0; |
1149 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | 1138 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
1150 CHECK(end_of_previous_object <= object->address()); | 1139 CHECK(end_of_previous_object <= object->address()); |
1151 | 1140 |
1152 // The first word should be a map, and we expect all map pointers to | 1141 // The first word should be a map, and we expect all map pointers to |
1153 // be in map space. | 1142 // be in map space. |
1154 Map* map = object->map(); | 1143 Map* map = object->map(); |
(...skipping 1575 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2730 | 2719 |
2731 void PagedSpace::ReportStatistics() { | 2720 void PagedSpace::ReportStatistics() { |
2732 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2721 int pct = static_cast<int>(Available() * 100 / Capacity()); |
2733 PrintF(" capacity: %" V8_PTR_PREFIX | 2722 PrintF(" capacity: %" V8_PTR_PREFIX |
2734 "d" | 2723 "d" |
2735 ", waste: %" V8_PTR_PREFIX | 2724 ", waste: %" V8_PTR_PREFIX |
2736 "d" | 2725 "d" |
2737 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 2726 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
2738 Capacity(), Waste(), Available(), pct); | 2727 Capacity(), Waste(), Available(), pct); |
2739 | 2728 |
2740 if (!swept_precisely_) return; | |
2741 if (heap()->mark_compact_collector()->sweeping_in_progress()) { | 2729 if (heap()->mark_compact_collector()->sweeping_in_progress()) { |
2742 heap()->mark_compact_collector()->EnsureSweepingCompleted(); | 2730 heap()->mark_compact_collector()->EnsureSweepingCompleted(); |
2743 } | 2731 } |
2744 ClearHistograms(heap()->isolate()); | 2732 ClearHistograms(heap()->isolate()); |
2745 HeapObjectIterator obj_it(this); | 2733 HeapObjectIterator obj_it(this); |
2746 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) | 2734 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
2747 CollectHistogramInfo(obj); | 2735 CollectHistogramInfo(obj); |
2748 ReportHistogram(heap()->isolate(), true); | 2736 ReportHistogram(heap()->isolate(), true); |
2749 } | 2737 } |
2750 #endif | 2738 #endif |
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3107 object->ShortPrint(); | 3095 object->ShortPrint(); |
3108 PrintF("\n"); | 3096 PrintF("\n"); |
3109 } | 3097 } |
3110 printf(" --------------------------------------\n"); | 3098 printf(" --------------------------------------\n"); |
3111 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3099 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3112 } | 3100 } |
3113 | 3101 |
3114 #endif // DEBUG | 3102 #endif // DEBUG |
3115 } | 3103 } |
3116 } // namespace v8::internal | 3104 } // namespace v8::internal |
OLD | NEW |