OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
10 #include "src/heap/slots-buffer.h" | 10 #include "src/heap/slots-buffer.h" |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
73 | 73 |
74 // ----------------------------------------------------------------------------- | 74 // ----------------------------------------------------------------------------- |
75 // CodeRange | 75 // CodeRange |
76 | 76 |
77 | 77 |
78 CodeRange::CodeRange(Isolate* isolate) | 78 CodeRange::CodeRange(Isolate* isolate) |
79 : isolate_(isolate), | 79 : isolate_(isolate), |
80 code_range_(NULL), | 80 code_range_(NULL), |
81 free_list_(0), | 81 free_list_(0), |
82 allocation_list_(0), | 82 allocation_list_(0), |
83 current_allocation_block_index_(0), | 83 current_allocation_block_index_(0) {} |
84 emergency_block_() {} | |
85 | 84 |
86 | 85 |
87 bool CodeRange::SetUp(size_t requested) { | 86 bool CodeRange::SetUp(size_t requested) { |
88 DCHECK(code_range_ == NULL); | 87 DCHECK(code_range_ == NULL); |
89 | 88 |
90 if (requested == 0) { | 89 if (requested == 0) { |
91 // When a target requires the code range feature, we put all code objects | 90 // When a target requires the code range feature, we put all code objects |
92 // in a kMaximalCodeRangeSize range of virtual address space, so that | 91 // in a kMaximalCodeRangeSize range of virtual address space, so that |
93 // they can call each other with near calls. | 92 // they can call each other with near calls. |
94 if (kRequiresCodeRange) { | 93 if (kRequiresCodeRange) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 } | 132 } |
134 base += kReservedCodeRangePages * base::OS::CommitPageSize(); | 133 base += kReservedCodeRangePages * base::OS::CommitPageSize(); |
135 } | 134 } |
136 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); | 135 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); |
137 size_t size = code_range_->size() - (aligned_base - base) - | 136 size_t size = code_range_->size() - (aligned_base - base) - |
138 kReservedCodeRangePages * base::OS::CommitPageSize(); | 137 kReservedCodeRangePages * base::OS::CommitPageSize(); |
139 allocation_list_.Add(FreeBlock(aligned_base, size)); | 138 allocation_list_.Add(FreeBlock(aligned_base, size)); |
140 current_allocation_block_index_ = 0; | 139 current_allocation_block_index_ = 0; |
141 | 140 |
142 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); | 141 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
143 ReserveEmergencyBlock(); | |
144 return true; | 142 return true; |
145 } | 143 } |
146 | 144 |
147 | 145 |
148 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 146 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
149 const FreeBlock* right) { | 147 const FreeBlock* right) { |
150 // The entire point of CodeRange is that the difference between two | 148 // The entire point of CodeRange is that the difference between two |
151 // addresses in the range can be represented as a signed 32-bit int, | 149 // addresses in the range can be represented as a signed 32-bit int, |
152 // so the cast is semantically correct. | 150 // so the cast is semantically correct. |
153 return static_cast<int>(left->start - right->start); | 151 return static_cast<int>(left->start - right->start); |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
269 return true; | 267 return true; |
270 } | 268 } |
271 | 269 |
272 | 270 |
273 void CodeRange::ReleaseBlock(const FreeBlock* block) { | 271 void CodeRange::ReleaseBlock(const FreeBlock* block) { |
274 base::LockGuard<base::Mutex> guard(&code_range_mutex_); | 272 base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
275 free_list_.Add(*block); | 273 free_list_.Add(*block); |
276 } | 274 } |
277 | 275 |
278 | 276 |
279 void CodeRange::ReserveEmergencyBlock() { | |
280 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); | |
281 if (emergency_block_.size == 0) { | |
282 ReserveBlock(requested_size, &emergency_block_); | |
283 } else { | |
284 DCHECK(emergency_block_.size >= requested_size); | |
285 } | |
286 } | |
287 | |
288 | |
289 void CodeRange::ReleaseEmergencyBlock() { | |
290 if (emergency_block_.size != 0) { | |
291 ReleaseBlock(&emergency_block_); | |
292 emergency_block_.size = 0; | |
293 } | |
294 } | |
295 | |
296 | |
297 // ----------------------------------------------------------------------------- | 277 // ----------------------------------------------------------------------------- |
298 // MemoryAllocator | 278 // MemoryAllocator |
299 // | 279 // |
300 | 280 |
301 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 281 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
302 : isolate_(isolate), | 282 : isolate_(isolate), |
303 capacity_(0), | 283 capacity_(0), |
304 capacity_executable_(0), | 284 capacity_executable_(0), |
305 size_(0), | 285 size_(0), |
306 size_executable_(0), | 286 size_executable_(0), |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
485 chunk->area_end_ = area_end; | 465 chunk->area_end_ = area_end; |
486 chunk->flags_ = 0; | 466 chunk->flags_ = 0; |
487 chunk->set_owner(owner); | 467 chunk->set_owner(owner); |
488 chunk->InitializeReservedMemory(); | 468 chunk->InitializeReservedMemory(); |
489 chunk->slots_buffer_ = NULL; | 469 chunk->slots_buffer_ = NULL; |
490 chunk->skip_list_ = NULL; | 470 chunk->skip_list_ = NULL; |
491 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 471 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
492 chunk->progress_bar_ = 0; | 472 chunk->progress_bar_ = 0; |
493 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); | 473 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); |
494 chunk->set_parallel_sweeping(SWEEPING_DONE); | 474 chunk->set_parallel_sweeping(SWEEPING_DONE); |
| 475 chunk->parallel_compaction_state().SetValue(kCompactingDone); |
495 chunk->mutex_ = NULL; | 476 chunk->mutex_ = NULL; |
496 chunk->available_in_small_free_list_ = 0; | 477 chunk->available_in_small_free_list_ = 0; |
497 chunk->available_in_medium_free_list_ = 0; | 478 chunk->available_in_medium_free_list_ = 0; |
498 chunk->available_in_large_free_list_ = 0; | 479 chunk->available_in_large_free_list_ = 0; |
499 chunk->available_in_huge_free_list_ = 0; | 480 chunk->available_in_huge_free_list_ = 0; |
500 chunk->non_available_small_blocks_ = 0; | 481 chunk->non_available_small_blocks_ = 0; |
501 chunk->ResetLiveBytes(); | 482 chunk->ResetLiveBytes(); |
502 Bitmap::Clear(chunk); | 483 Bitmap::Clear(chunk); |
503 chunk->initialize_scan_on_scavenge(false); | 484 chunk->initialize_scan_on_scavenge(false); |
504 chunk->SetFlag(WAS_SWEPT); | 485 chunk->SetFlag(WAS_SWEPT); |
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
967 ObjectSpace::kObjectSpaceCodeSpace); | 948 ObjectSpace::kObjectSpaceCodeSpace); |
968 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == | 949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == |
969 ObjectSpace::kObjectSpaceMapSpace); | 950 ObjectSpace::kObjectSpaceMapSpace); |
970 | 951 |
971 | 952 |
972 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, | 953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, |
973 Executability executable) | 954 Executability executable) |
974 : Space(heap, space, executable), | 955 : Space(heap, space, executable), |
975 free_list_(this), | 956 free_list_(this), |
976 unswept_free_bytes_(0), | 957 unswept_free_bytes_(0), |
977 end_of_unswept_pages_(NULL), | 958 end_of_unswept_pages_(NULL) { |
978 emergency_memory_(NULL) { | |
979 area_size_ = MemoryAllocator::PageAreaSize(space); | 959 area_size_ = MemoryAllocator::PageAreaSize(space); |
980 accounting_stats_.Clear(); | 960 accounting_stats_.Clear(); |
981 | 961 |
982 allocation_info_.set_top(NULL); | 962 allocation_info_.set_top(NULL); |
983 allocation_info_.set_limit(NULL); | 963 allocation_info_.set_limit(NULL); |
984 | 964 |
985 anchor_.InitializeAsAnchor(this); | 965 anchor_.InitializeAsAnchor(this); |
986 } | 966 } |
987 | 967 |
988 | 968 |
989 bool PagedSpace::SetUp() { return true; } | 969 bool PagedSpace::SetUp() { return true; } |
990 | 970 |
991 | 971 |
992 bool PagedSpace::HasBeenSetUp() { return true; } | 972 bool PagedSpace::HasBeenSetUp() { return true; } |
993 | 973 |
994 | 974 |
995 void PagedSpace::TearDown() { | 975 void PagedSpace::TearDown() { |
996 PageIterator iterator(this); | 976 PageIterator iterator(this); |
997 while (iterator.has_next()) { | 977 while (iterator.has_next()) { |
998 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 978 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
999 } | 979 } |
1000 anchor_.set_next_page(&anchor_); | 980 anchor_.set_next_page(&anchor_); |
1001 anchor_.set_prev_page(&anchor_); | 981 anchor_.set_prev_page(&anchor_); |
1002 accounting_stats_.Clear(); | 982 accounting_stats_.Clear(); |
1003 } | 983 } |
1004 | 984 |
1005 | 985 |
| 986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| 987 DCHECK(identity() == other->identity()); |
| 988 // Destroy the linear allocation space of {other}. This is needed to |
| 989 // (a) not waste the memory and |
| 990 // (b) keep the rest of the chunk in an iterable state (filler is needed). |
| 991 other->EmptyAllocationInfo(); |
| 992 |
| 993 // Move over the free list. Concatenate makes sure that the source free list |
| 994 // gets properly reset after moving over all nodes. |
| 995 intptr_t freed_bytes = free_list_.Concatenate(other->free_list()); |
| 996 other->accounting_stats_.AllocateBytes(freed_bytes); |
| 997 // We do not adjust accounting_stats_ for {this} as we treat the received |
| 998 // memory as borrowed, i.e., the originating space keeps track of its |
| 999 // capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly |
| 1000 // maintained by allocating and freeing blocks. |
| 1001 } |
| 1002 |
| 1003 |
1006 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { | 1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
1007 // Unmerged fields: | 1005 // Unmerged fields: |
1008 // area_size_ | 1006 // area_size_ |
1009 // allocation_info_ | 1007 // allocation_info_ |
1010 // emergency_memory_ | |
1011 // end_of_unswept_pages_ | 1008 // end_of_unswept_pages_ |
1012 // unswept_free_bytes_ | 1009 // unswept_free_bytes_ |
1013 // anchor_ | 1010 // anchor_ |
1014 | 1011 |
1015 // It only makes sense to merge compatible spaces. | 1012 MoveOverFreeMemory(other); |
1016 DCHECK(identity() == other->identity()); | |
1017 | |
1018 // Destroy the linear allocation space of {other}. This is needed to (a) not | |
1019 // waste the memory and (b) keep the rest of the chunk in an iterable state | |
1020 // (filler is needed). | |
1021 int linear_size = static_cast<int>(other->limit() - other->top()); | |
1022 other->Free(other->top(), linear_size); | |
1023 | |
1024 // Move over the free list. | |
1025 free_list_.Concatenate(other->free_list()); | |
1026 | 1013 |
1027 // Update and clear accounting statistics. | 1014 // Update and clear accounting statistics. |
1028 accounting_stats_.Merge(other->accounting_stats_); | 1015 accounting_stats_.Merge(other->accounting_stats_); |
1029 other->accounting_stats_.Clear(); | 1016 other->accounting_stats_.Reset(); |
1030 | 1017 |
1031 // Move over pages. | 1018 // Move over pages. |
1032 PageIterator it(other); | 1019 PageIterator it(other); |
1033 Page* p = nullptr; | 1020 Page* p = nullptr; |
1034 while (it.has_next()) { | 1021 while (it.has_next()) { |
1035 p = it.next(); | 1022 p = it.next(); |
1036 p->Unlink(); | 1023 p->Unlink(); |
1037 p->set_owner(this); | 1024 p->set_owner(this); |
1038 p->InsertAfter(anchor_.prev_page()); | 1025 p->InsertAfter(anchor_.prev_page()); |
1039 } | 1026 } |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1103 if (!CanExpand(size)) return false; | 1090 if (!CanExpand(size)) return false; |
1104 | 1091 |
1105 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, | 1092 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, |
1106 executable()); | 1093 executable()); |
1107 if (p == NULL) return false; | 1094 if (p == NULL) return false; |
1108 | 1095 |
1109 // Pages created during bootstrapping may contain immortal immovable objects. | 1096 // Pages created during bootstrapping may contain immortal immovable objects. |
1110 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1097 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1111 | 1098 |
1112 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1099 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1113 DCHECK(heap()->CommittedOldGenerationMemory() <= | |
1114 heap()->MaxOldGenerationSize() + | |
1115 PagedSpace::MaxEmergencyMemoryAllocated()); | |
1116 | 1100 |
1117 p->InsertAfter(anchor_.prev_page()); | 1101 p->InsertAfter(anchor_.prev_page()); |
1118 | 1102 |
1119 return true; | 1103 return true; |
1120 } | 1104 } |
1121 | 1105 |
1122 | 1106 |
1123 int PagedSpace::CountTotalPages() { | 1107 int PagedSpace::CountTotalPages() { |
1124 PageIterator it(this); | 1108 PageIterator it(this); |
1125 int count = 0; | 1109 int count = 0; |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1175 page->Unlink(); | 1159 page->Unlink(); |
1176 } | 1160 } |
1177 | 1161 |
1178 heap()->QueueMemoryChunkForFree(page); | 1162 heap()->QueueMemoryChunkForFree(page); |
1179 | 1163 |
1180 DCHECK(Capacity() > 0); | 1164 DCHECK(Capacity() > 0); |
1181 accounting_stats_.ShrinkSpace(AreaSize()); | 1165 accounting_stats_.ShrinkSpace(AreaSize()); |
1182 } | 1166 } |
1183 | 1167 |
1184 | 1168 |
1185 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() { | |
1186 // New space and large object space. | |
1187 static const int spaces_without_emergency_memory = 2; | |
1188 static const int spaces_with_emergency_memory = | |
1189 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory; | |
1190 return Page::kPageSize * spaces_with_emergency_memory; | |
1191 } | |
1192 | |
1193 | |
1194 void PagedSpace::CreateEmergencyMemory() { | |
1195 if (identity() == CODE_SPACE) { | |
1196 // Make the emergency block available to the allocator. | |
1197 CodeRange* code_range = heap()->isolate()->code_range(); | |
1198 if (code_range != NULL && code_range->valid()) { | |
1199 code_range->ReleaseEmergencyBlock(); | |
1200 } | |
1201 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize()); | |
1202 } | |
1203 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( | |
1204 AreaSize(), AreaSize(), executable(), this); | |
1205 } | |
1206 | |
1207 | |
1208 void PagedSpace::FreeEmergencyMemory() { | |
1209 Page* page = static_cast<Page*>(emergency_memory_); | |
1210 DCHECK(page->LiveBytes() == 0); | |
1211 DCHECK(AreaSize() == page->area_size()); | |
1212 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | |
1213 heap()->isolate()->memory_allocator()->Free(page); | |
1214 emergency_memory_ = NULL; | |
1215 } | |
1216 | |
1217 | |
1218 void PagedSpace::UseEmergencyMemory() { | |
1219 // Page::Initialize makes the chunk into a real page and adds it to the | |
1220 // accounting for this space. Unlike PagedSpace::Expand, we don't check | |
1221 // CanExpand first, so we can go over the limits a little here. That's OK, | |
1222 // because we are in the process of compacting which will free up at least as | |
1223 // much memory as it allocates. | |
1224 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); | |
1225 page->InsertAfter(anchor_.prev_page()); | |
1226 emergency_memory_ = NULL; | |
1227 } | |
1228 | |
1229 | |
1230 #ifdef DEBUG | 1169 #ifdef DEBUG |
1231 void PagedSpace::Print() {} | 1170 void PagedSpace::Print() {} |
1232 #endif | 1171 #endif |
1233 | 1172 |
1234 #ifdef VERIFY_HEAP | 1173 #ifdef VERIFY_HEAP |
1235 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1174 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1236 bool allocation_pointer_found_in_space = | 1175 bool allocation_pointer_found_in_space = |
1237 (allocation_info_.top() == allocation_info_.limit()); | 1176 (allocation_info_.top() == allocation_info_.limit()); |
1238 PageIterator page_iterator(this); | 1177 PageIterator page_iterator(this); |
1239 while (page_iterator.has_next()) { | 1178 while (page_iterator.has_next()) { |
(...skipping 1944 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3184 object->ShortPrint(); | 3123 object->ShortPrint(); |
3185 PrintF("\n"); | 3124 PrintF("\n"); |
3186 } | 3125 } |
3187 printf(" --------------------------------------\n"); | 3126 printf(" --------------------------------------\n"); |
3188 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3127 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3189 } | 3128 } |
3190 | 3129 |
3191 #endif // DEBUG | 3130 #endif // DEBUG |
3192 } // namespace internal | 3131 } // namespace internal |
3193 } // namespace v8 | 3132 } // namespace v8 |
OLD | NEW |