OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1222 if (!heap()->CanExpandOldGeneration(size)) return false; | 1222 if (!heap()->CanExpandOldGeneration(size)) return false; |
1223 | 1223 |
1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1225 if (p == nullptr) return false; | 1225 if (p == nullptr) return false; |
1226 | 1226 |
1227 AccountCommitted(static_cast<intptr_t>(p->size())); | 1227 AccountCommitted(static_cast<intptr_t>(p->size())); |
1228 | 1228 |
1229 // Pages created during bootstrapping may contain immortal immovable objects. | 1229 // Pages created during bootstrapping may contain immortal immovable objects. |
1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1231 | 1231 |
1232 // When incremental marking was activated, old space pages are allocated | |
1233 // black. | |
1234 if (heap()->incremental_marking()->black_allocation() && | |
1235 identity() == OLD_SPACE) { | |
1236 p->markbits()->SetAllBits(); | |
1237 p->SetFlag(Page::BLACK_PAGE); | |
1238 if (FLAG_trace_incremental_marking) { | |
1239 PrintIsolate(heap()->isolate(), "Added black page %p\n", | |
1240 static_cast<void*>(p)); | |
1241 } | |
1242 } | |
1243 | |
1244 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1232 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1245 | 1233 |
1246 p->InsertAfter(anchor_.prev_page()); | 1234 p->InsertAfter(anchor_.prev_page()); |
1247 | 1235 |
1248 return true; | 1236 return true; |
1249 } | 1237 } |
1250 | 1238 |
1251 | 1239 |
1252 int PagedSpace::CountTotalPages() { | 1240 int PagedSpace::CountTotalPages() { |
1253 int count = 0; | 1241 int count = 0; |
1254 for (Page* page : *this) { | 1242 for (Page* page : *this) { |
1255 count++; | 1243 count++; |
1256 USE(page); | 1244 USE(page); |
1257 } | 1245 } |
1258 return count; | 1246 return count; |
1259 } | 1247 } |
1260 | 1248 |
1261 | 1249 |
1262 void PagedSpace::ResetFreeListStatistics() { | 1250 void PagedSpace::ResetFreeListStatistics() { |
1263 for (Page* page : *this) { | 1251 for (Page* page : *this) { |
1264 page->ResetFreeListStatistics(); | 1252 page->ResetFreeListStatistics(); |
1265 } | 1253 } |
1266 } | 1254 } |
1267 | 1255 |
| 1256 void PagedSpace::SetAllocationInfo(Address top, Address limit) { |
| 1257 SetTopAndLimit(top, limit); |
| 1258 if (top != nullptr && top != limit && |
| 1259 heap()->incremental_marking()->black_allocation()) { |
| 1260 Page* page = Page::FromAddress(top); |
| 1261 page->markbits()->SetRange(page->AddressToMarkbitIndex(top), |
| 1262 page->AddressToMarkbitIndex(limit)); |
| 1263 page->IncrementLiveBytes(static_cast<int>(limit - top)); |
| 1264 } |
| 1265 } |
| 1266 |
| 1267 void PagedSpace::MarkAllocationInfoBlack() { |
| 1268 DCHECK(heap()->incremental_marking()->black_allocation()); |
| 1269 Address current_top = top(); |
| 1270 Address current_limit = limit(); |
| 1271 if (current_top != nullptr && current_top != current_limit) { |
| 1272 Page* page = Page::FromAddress(current_top); |
| 1273 page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top), |
| 1274 page->AddressToMarkbitIndex(current_limit)); |
| 1275 page->IncrementLiveBytes(static_cast<int>(current_limit - current_top)); |
| 1276 } |
| 1277 } |
| 1278 |
| 1279 // Empty space allocation info, returning unused area to free list. |
| 1280 void PagedSpace::EmptyAllocationInfo() { |
| 1281 // Mark the old linear allocation area with a free space map so it can be |
| 1282 // skipped when scanning the heap. |
| 1283 Address current_top = top(); |
| 1284 Address current_limit = limit(); |
| 1285 if (current_top == nullptr) { |
| 1286 DCHECK(current_limit == nullptr); |
| 1287 return; |
| 1288 } |
| 1289 int old_linear_size = static_cast<int>(current_limit - current_top); |
| 1290 SetTopAndLimit(NULL, NULL); |
| 1291 if (current_top != current_limit && |
| 1292 heap()->incremental_marking()->black_allocation()) { |
| 1293 Page* page = Page::FromAddress(current_top); |
| 1294 page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top), |
| 1295 page->AddressToMarkbitIndex(current_limit)); |
| 1296 page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top)); |
| 1297 } |
| 1298 Free(current_top, old_linear_size); |
| 1299 } |
1268 | 1300 |
1269 void PagedSpace::IncreaseCapacity(int size) { | 1301 void PagedSpace::IncreaseCapacity(int size) { |
1270 accounting_stats_.ExpandSpace(size); | 1302 accounting_stats_.ExpandSpace(size); |
1271 } | 1303 } |
1272 | 1304 |
1273 void PagedSpace::ReleasePage(Page* page) { | 1305 void PagedSpace::ReleasePage(Page* page) { |
1274 DCHECK_EQ(page->LiveBytes(), 0); | 1306 DCHECK_EQ(page->LiveBytes(), 0); |
1275 DCHECK_EQ(AreaSize(), page->area_size()); | 1307 DCHECK_EQ(AreaSize(), page->area_size()); |
1276 DCHECK_EQ(page->owner(), this); | 1308 DCHECK_EQ(page->owner(), this); |
1277 | 1309 |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1324 | 1356 |
1325 // Perform space-specific object verification. | 1357 // Perform space-specific object verification. |
1326 VerifyObject(object); | 1358 VerifyObject(object); |
1327 | 1359 |
1328 // The object itself should look OK. | 1360 // The object itself should look OK. |
1329 object->ObjectVerify(); | 1361 object->ObjectVerify(); |
1330 | 1362 |
1331 // All the interior pointers should be contained in the heap. | 1363 // All the interior pointers should be contained in the heap. |
1332 int size = object->Size(); | 1364 int size = object->Size(); |
1333 object->IterateBody(map->instance_type(), size, visitor); | 1365 object->IterateBody(map->instance_type(), size, visitor); |
1334 if (!page->IsFlagSet(Page::BLACK_PAGE) && | 1366 if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { |
1335 Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { | |
1336 black_size += size; | 1367 black_size += size; |
1337 } | 1368 } |
1338 | 1369 |
1339 CHECK(object->address() + size <= top); | 1370 CHECK(object->address() + size <= top); |
1340 end_of_previous_object = object->address() + size; | 1371 end_of_previous_object = object->address() + size; |
1341 } | 1372 } |
1342 CHECK_LE(black_size, page->LiveBytes()); | 1373 CHECK_LE(black_size, page->LiveBytes()); |
1343 } | 1374 } |
1344 CHECK(allocation_pointer_found_in_space); | 1375 CHECK(allocation_pointer_found_in_space); |
1345 } | 1376 } |
(...skipping 1076 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2422 DCHECK(0 < size_in_bytes); | 2453 DCHECK(0 < size_in_bytes); |
2423 DCHECK(size_in_bytes <= kMaxBlockSize); | 2454 DCHECK(size_in_bytes <= kMaxBlockSize); |
2424 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | 2455 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
2425 // Don't free list allocate if there is linear space available. | 2456 // Don't free list allocate if there is linear space available. |
2426 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); | 2457 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
2427 | 2458 |
2428 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2459 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
2429 // Mark the old linear allocation area with a free space map so it can be | 2460 // Mark the old linear allocation area with a free space map so it can be |
2430 // skipped when scanning the heap. This also puts it back in the free list | 2461 // skipped when scanning the heap. This also puts it back in the free list |
2431 // if it is big enough. | 2462 // if it is big enough. |
2432 owner_->Free(owner_->top(), old_linear_size); | 2463 owner_->EmptyAllocationInfo(); |
2433 owner_->SetTopAndLimit(nullptr, nullptr); | |
2434 | 2464 |
2435 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - | 2465 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - |
2436 old_linear_size); | 2466 old_linear_size); |
2437 | 2467 |
2438 int new_node_size = 0; | 2468 int new_node_size = 0; |
2439 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2469 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
2440 if (new_node == nullptr) return nullptr; | 2470 if (new_node == nullptr) return nullptr; |
2441 | 2471 |
2442 int bytes_left = new_node_size - size_in_bytes; | 2472 int bytes_left = new_node_size - size_in_bytes; |
2443 DCHECK(bytes_left >= 0); | 2473 DCHECK(bytes_left >= 0); |
(...skipping 13 matching lines...) Expand all Loading... |
2457 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2487 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
2458 | 2488 |
2459 // Memory in the linear allocation area is counted as allocated. We may free | 2489 // Memory in the linear allocation area is counted as allocated. We may free |
2460 // a little of this again immediately - see below. | 2490 // a little of this again immediately - see below. |
2461 owner_->Allocate(new_node_size); | 2491 owner_->Allocate(new_node_size); |
2462 | 2492 |
2463 if (owner_->heap()->inline_allocation_disabled()) { | 2493 if (owner_->heap()->inline_allocation_disabled()) { |
2464 // Keep the linear allocation area empty if requested to do so, just | 2494 // Keep the linear allocation area empty if requested to do so, just |
2465 // return area back to the free list instead. | 2495 // return area back to the free list instead. |
2466 owner_->Free(new_node->address() + size_in_bytes, bytes_left); | 2496 owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
2467 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2497 owner_->SetAllocationInfo(new_node->address() + size_in_bytes, |
2468 new_node->address() + size_in_bytes); | 2498 new_node->address() + size_in_bytes); |
2469 } else if (bytes_left > kThreshold && | 2499 } else if (bytes_left > kThreshold && |
2470 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2500 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
2471 FLAG_incremental_marking) { | 2501 FLAG_incremental_marking) { |
2472 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2502 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
2473 // We don't want to give too large linear areas to the allocator while | 2503 // We don't want to give too large linear areas to the allocator while |
2474 // incremental marking is going on, because we won't check again whether | 2504 // incremental marking is going on, because we won't check again whether |
2475 // we want to do another increment until the linear area is used up. | 2505 // we want to do another increment until the linear area is used up. |
2476 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2506 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
2477 new_node_size - size_in_bytes - linear_size); | 2507 new_node_size - size_in_bytes - linear_size); |
2478 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2508 owner_->SetAllocationInfo( |
2479 new_node->address() + size_in_bytes + linear_size); | 2509 new_node->address() + size_in_bytes, |
| 2510 new_node->address() + size_in_bytes + linear_size); |
2480 } else { | 2511 } else { |
2481 DCHECK(bytes_left >= 0); | 2512 DCHECK(bytes_left >= 0); |
2482 // Normally we give the rest of the node to the allocator as its new | 2513 // Normally we give the rest of the node to the allocator as its new |
2483 // linear allocation area. | 2514 // linear allocation area. |
2484 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2515 owner_->SetAllocationInfo(new_node->address() + size_in_bytes, |
2485 new_node->address() + new_node_size); | 2516 new_node->address() + new_node_size); |
2486 } | 2517 } |
2487 | 2518 |
2488 owner_->AllocationStep(new_node->address(), size_in_bytes); | 2519 owner_->AllocationStep(new_node->address(), size_in_bytes); |
2489 | 2520 |
2490 return new_node; | 2521 return new_node; |
2491 } | 2522 } |
2492 | 2523 |
2493 intptr_t FreeList::EvictFreeListItems(Page* page) { | 2524 intptr_t FreeList::EvictFreeListItems(Page* page) { |
2494 intptr_t sum = 0; | 2525 intptr_t sum = 0; |
2495 page->ForAllFreeListCategories( | 2526 page->ForAllFreeListCategories( |
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2865 if (Heap::ShouldZapGarbage()) { | 2896 if (Heap::ShouldZapGarbage()) { |
2866 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2897 // Make the object consistent so the heap can be verified in OldSpaceStep. |
2867 // We only need to do this in debug builds or if verify_heap is on. | 2898 // We only need to do this in debug builds or if verify_heap is on. |
2868 reinterpret_cast<Object**>(object->address())[0] = | 2899 reinterpret_cast<Object**>(object->address())[0] = |
2869 heap()->fixed_array_map(); | 2900 heap()->fixed_array_map(); |
2870 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2901 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
2871 } | 2902 } |
2872 | 2903 |
2873 heap()->incremental_marking()->OldSpaceStep(object_size); | 2904 heap()->incremental_marking()->OldSpaceStep(object_size); |
2874 AllocationStep(object->address(), object_size); | 2905 AllocationStep(object->address(), object_size); |
| 2906 |
| 2907 if (heap()->incremental_marking()->black_allocation()) { |
| 2908 Marking::MarkBlack(ObjectMarking::MarkBitFrom(object)); |
| 2909 MemoryChunk::IncrementLiveBytesFromGC(object, object_size); |
| 2910 } |
2875 return object; | 2911 return object; |
2876 } | 2912 } |
2877 | 2913 |
2878 | 2914 |
2879 size_t LargeObjectSpace::CommittedPhysicalMemory() { | 2915 size_t LargeObjectSpace::CommittedPhysicalMemory() { |
2880 // On a platform that provides lazy committing of memory, we over-account | 2916 // On a platform that provides lazy committing of memory, we over-account |
2881 // the actually committed memory. There is no easy way right now to support | 2917 // the actually committed memory. There is no easy way right now to support |
2882 // precise accounting of committed memory in large object space. | 2918 // precise accounting of committed memory in large object space. |
2883 return CommittedMemory(); | 2919 return CommittedMemory(); |
2884 } | 2920 } |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3098 object->ShortPrint(); | 3134 object->ShortPrint(); |
3099 PrintF("\n"); | 3135 PrintF("\n"); |
3100 } | 3136 } |
3101 printf(" --------------------------------------\n"); | 3137 printf(" --------------------------------------\n"); |
3102 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3138 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3103 } | 3139 } |
3104 | 3140 |
3105 #endif // DEBUG | 3141 #endif // DEBUG |
3106 } // namespace internal | 3142 } // namespace internal |
3107 } // namespace v8 | 3143 } // namespace v8 |
OLD | NEW |