OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
(...skipping 1326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1337 } | 1337 } |
1338 | 1338 |
1339 bool PagedSpace::ContainsSlow(Address addr) { | 1339 bool PagedSpace::ContainsSlow(Address addr) { |
1340 Page* p = Page::FromAddress(addr); | 1340 Page* p = Page::FromAddress(addr); |
1341 for (Page* page : *this) { | 1341 for (Page* page : *this) { |
1342 if (page == p) return true; | 1342 if (page == p) return true; |
1343 } | 1343 } |
1344 return false; | 1344 return false; |
1345 } | 1345 } |
1346 | 1346 |
| 1347 Page* PagedSpace::RemovePageSafe(int size_in_bytes) { |
| 1348 base::LockGuard<base::Mutex> guard(mutex()); |
| 1349 |
| 1350 // Check for pages that still contain free list entries. Bail out for smaller |
| 1351 // categories. |
| 1352 const int minimum_category = |
| 1353 static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes)); |
| 1354 Page* page = free_list()->GetPageForCategoryType(kHuge); |
| 1355 if (!page && static_cast<int>(kLarge) >= minimum_category) |
| 1356 page = free_list()->GetPageForCategoryType(kLarge); |
| 1357 if (!page && static_cast<int>(kMedium) >= minimum_category) |
| 1358 page = free_list()->GetPageForCategoryType(kMedium); |
| 1359 if (!page && static_cast<int>(kSmall) >= minimum_category) |
| 1360 page = free_list()->GetPageForCategoryType(kSmall); |
| 1361 if (!page) return nullptr; |
| 1362 |
| 1363 AccountUncommitted(page->size()); |
| 1364 accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList()); |
| 1365 accounting_stats_.DecreaseCapacity(page->area_size()); |
| 1366 page->Unlink(); |
| 1367 UnlinkFreeListCategories(page); |
| 1368 return page; |
| 1369 } |
| 1370 |
| 1371 void PagedSpace::AddPage(Page* page) { |
| 1372 AccountCommitted(page->size()); |
| 1373 accounting_stats_.IncreaseCapacity(page->area_size()); |
| 1374 accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList()); |
| 1375 page->set_owner(this); |
| 1376 RelinkFreeListCategories(page); |
| 1377 page->InsertAfter(anchor()->prev_page()); |
| 1378 } |
| 1379 |
1347 void PagedSpace::ShrinkImmortalImmovablePages() { | 1380 void PagedSpace::ShrinkImmortalImmovablePages() { |
1348 DCHECK(!heap()->deserialization_complete()); | 1381 DCHECK(!heap()->deserialization_complete()); |
1349 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1382 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
1350 EmptyAllocationInfo(); | 1383 EmptyAllocationInfo(); |
1351 ResetFreeList(); | 1384 ResetFreeList(); |
1352 | 1385 |
1353 for (Page* page : *this) { | 1386 for (Page* page : *this) { |
1354 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); | 1387 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); |
1355 size_t unused = page->ShrinkToHighWaterMark(); | 1388 size_t unused = page->ShrinkToHighWaterMark(); |
1356 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); | 1389 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); |
1357 AccountUncommitted(unused); | 1390 // Do not account for the unused space as uncommitted because the counter |
| 1391 // is kept in sync with page size which is also not adjusted for those |
| 1392 // chunks. |
1358 } | 1393 } |
1359 } | 1394 } |
1360 | 1395 |
1361 bool PagedSpace::Expand() { | 1396 bool PagedSpace::Expand() { |
| 1397 // Always lock against the main space as we can only adjust capacity and |
| 1398 // pages concurrently for the main paged space. |
| 1399 base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex()); |
| 1400 |
1362 const int size = AreaSize(); | 1401 const int size = AreaSize(); |
1363 | 1402 |
1364 if (!heap()->CanExpandOldGeneration(size)) return false; | 1403 if (!heap()->CanExpandOldGeneration(size)) return false; |
1365 | 1404 |
1366 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1405 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1367 if (p == nullptr) return false; | 1406 if (p == nullptr) return false; |
1368 | 1407 |
1369 AccountCommitted(p->size()); | 1408 AccountCommitted(p->size()); |
1370 | 1409 |
1371 // Pages created during bootstrapping may contain immortal immovable objects. | 1410 // Pages created during bootstrapping may contain immortal immovable objects. |
(...skipping 1527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2899 if (object != NULL) return object; | 2938 if (object != NULL) return object; |
2900 | 2939 |
2901 // If sweeping is still in progress try to sweep pages on the main thread. | 2940 // If sweeping is still in progress try to sweep pages on the main thread. |
2902 int max_freed = collector->sweeper().ParallelSweepSpace( | 2941 int max_freed = collector->sweeper().ParallelSweepSpace( |
2903 identity(), size_in_bytes, kMaxPagesToSweep); | 2942 identity(), size_in_bytes, kMaxPagesToSweep); |
2904 RefillFreeList(); | 2943 RefillFreeList(); |
2905 if (max_freed >= size_in_bytes) { | 2944 if (max_freed >= size_in_bytes) { |
2906 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2945 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
2907 if (object != nullptr) return object; | 2946 if (object != nullptr) return object; |
2908 } | 2947 } |
| 2948 } else if (is_local()) { |
| 2949 // Sweeping not in progress and we are on a {CompactionSpace}. This can |
| 2950 // only happen when we are evacuating for the young generation. |
| 2951 PagedSpace* main_space = heap()->paged_space(identity()); |
| 2952 Page* page = main_space->RemovePageSafe(size_in_bytes); |
| 2953 if (page != nullptr) { |
| 2954 AddPage(page); |
| 2955 HeapObject* object = |
| 2956 free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| 2957 if (object != nullptr) return object; |
| 2958 } |
2909 } | 2959 } |
2910 | 2960 |
2911 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { | 2961 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { |
2912 DCHECK((CountTotalPages() > 1) || | 2962 DCHECK((CountTotalPages() > 1) || |
2913 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); | 2963 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); |
2914 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2964 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
2915 } | 2965 } |
2916 | 2966 |
2917 // If sweeper threads are active, wait for them at that point and steal | 2967 // If sweeper threads are active, wait for them at that point and steal |
2918 // elements form their free-lists. Allocation may still fail their which | 2968 // elements form their free-lists. Allocation may still fail their which |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3302 PrintF("\n"); | 3352 PrintF("\n"); |
3303 } | 3353 } |
3304 printf(" --------------------------------------\n"); | 3354 printf(" --------------------------------------\n"); |
3305 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, | 3355 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, |
3306 MarkingState::Internal(this).live_bytes()); | 3356 MarkingState::Internal(this).live_bytes()); |
3307 } | 3357 } |
3308 | 3358 |
3309 #endif // DEBUG | 3359 #endif // DEBUG |
3310 } // namespace internal | 3360 } // namespace internal |
3311 } // namespace v8 | 3361 } // namespace v8 |
OLD | NEW |