OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
(...skipping 1337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1348 } | 1348 } |
1349 | 1349 |
1350 bool PagedSpace::ContainsSlow(Address addr) { | 1350 bool PagedSpace::ContainsSlow(Address addr) { |
1351 Page* p = Page::FromAddress(addr); | 1351 Page* p = Page::FromAddress(addr); |
1352 for (Page* page : *this) { | 1352 for (Page* page : *this) { |
1353 if (page == p) return true; | 1353 if (page == p) return true; |
1354 } | 1354 } |
1355 return false; | 1355 return false; |
1356 } | 1356 } |
1357 | 1357 |
| 1358 Page* PagedSpace::RemovePageSafe(int size_in_bytes) { |
| 1359 base::LockGuard<base::Mutex> guard(mutex()); |
| 1360 |
| 1361 // Check for pages that still contain free list entries. Bail out for smaller |
| 1362 // categories. |
| 1363 const int minimum_category = |
| 1364 static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes)); |
| 1365 Page* page = free_list()->GetPageForCategoryType(kHuge); |
| 1366 if (!page && static_cast<int>(kLarge) >= minimum_category) |
| 1367 page = free_list()->GetPageForCategoryType(kLarge); |
| 1368 if (!page && static_cast<int>(kMedium) >= minimum_category) |
| 1369 page = free_list()->GetPageForCategoryType(kMedium); |
| 1370 if (!page && static_cast<int>(kSmall) >= minimum_category) |
| 1371 page = free_list()->GetPageForCategoryType(kSmall); |
| 1372 if (!page) return nullptr; |
| 1373 |
| 1374 AccountUncommitted(page->size()); |
| 1375 accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList()); |
| 1376 accounting_stats_.DecreaseCapacity(page->area_size()); |
| 1377 page->Unlink(); |
| 1378 UnlinkFreeListCategories(page); |
| 1379 return page; |
| 1380 } |
| 1381 |
| 1382 void PagedSpace::AddPage(Page* page) { |
| 1383 AccountCommitted(page->size()); |
| 1384 accounting_stats_.IncreaseCapacity(page->area_size()); |
| 1385 accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList()); |
| 1386 page->set_owner(this); |
| 1387 RelinkFreeListCategories(page); |
| 1388 page->InsertAfter(anchor()->prev_page()); |
| 1389 } |
| 1390 |
1358 void PagedSpace::ShrinkImmortalImmovablePages() { | 1391 void PagedSpace::ShrinkImmortalImmovablePages() { |
1359 DCHECK(!heap()->deserialization_complete()); | 1392 DCHECK(!heap()->deserialization_complete()); |
1360 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1393 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
1361 EmptyAllocationInfo(); | 1394 EmptyAllocationInfo(); |
1362 ResetFreeList(); | 1395 ResetFreeList(); |
1363 | 1396 |
1364 for (Page* page : *this) { | 1397 for (Page* page : *this) { |
1365 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); | 1398 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); |
1366 size_t unused = page->ShrinkToHighWaterMark(); | 1399 size_t unused = page->ShrinkToHighWaterMark(); |
1367 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); | 1400 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); |
1368 AccountUncommitted(unused); | 1401 // Do not account for the unused space as uncommitted because the counter |
| 1402 // is kept in sync with page size which is also not adjusted for those |
| 1403 // chunks. |
1369 } | 1404 } |
1370 } | 1405 } |
1371 | 1406 |
1372 bool PagedSpace::Expand() { | 1407 bool PagedSpace::Expand() { |
| 1408 // Always lock against the main space as we can only adjust capacity and |
| 1409 // pages concurrently for the main paged space. |
| 1410 base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex()); |
| 1411 |
1373 const int size = AreaSize(); | 1412 const int size = AreaSize(); |
1374 | 1413 |
1375 if (!heap()->CanExpandOldGeneration(size)) return false; | 1414 if (!heap()->CanExpandOldGeneration(size)) return false; |
1376 | 1415 |
1377 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1416 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1378 if (p == nullptr) return false; | 1417 if (p == nullptr) return false; |
1379 | 1418 |
1380 AccountCommitted(p->size()); | 1419 AccountCommitted(p->size()); |
1381 | 1420 |
1382 // Pages created during bootstrapping may contain immortal immovable objects. | 1421 // Pages created during bootstrapping may contain immortal immovable objects. |
(...skipping 1536 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2919 if (object != NULL) return object; | 2958 if (object != NULL) return object; |
2920 | 2959 |
2921 // If sweeping is still in progress try to sweep pages on the main thread. | 2960 // If sweeping is still in progress try to sweep pages on the main thread. |
2922 int max_freed = collector->sweeper().ParallelSweepSpace( | 2961 int max_freed = collector->sweeper().ParallelSweepSpace( |
2923 identity(), size_in_bytes, kMaxPagesToSweep); | 2962 identity(), size_in_bytes, kMaxPagesToSweep); |
2924 RefillFreeList(); | 2963 RefillFreeList(); |
2925 if (max_freed >= size_in_bytes) { | 2964 if (max_freed >= size_in_bytes) { |
2926 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2965 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
2927 if (object != nullptr) return object; | 2966 if (object != nullptr) return object; |
2928 } | 2967 } |
| 2968 } else if (is_local()) { |
| 2969 // Sweeping not in progress and we are on a {CompactionSpace}. This can |
| 2970 // only happen when we are evacuating for the young generation. |
| 2971 PagedSpace* main_space = heap()->paged_space(identity()); |
| 2972 Page* page = main_space->RemovePageSafe(size_in_bytes); |
| 2973 if (page != nullptr) { |
| 2974 AddPage(page); |
| 2975 HeapObject* object = |
| 2976 free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| 2977 if (object != nullptr) return object; |
| 2978 } |
2929 } | 2979 } |
2930 | 2980 |
2931 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { | 2981 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { |
2932 DCHECK((CountTotalPages() > 1) || | 2982 DCHECK((CountTotalPages() > 1) || |
2933 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); | 2983 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); |
2934 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2984 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
2935 } | 2985 } |
2936 | 2986 |
2937 // If sweeper threads are active, wait for them at that point and steal | 2987 // If sweeper threads are active, wait for them at that point and steal |
2938 // elements form their free-lists. Allocation may still fail their which | 2988 // elements form their free-lists. Allocation may still fail their which |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3322 PrintF("\n"); | 3372 PrintF("\n"); |
3323 } | 3373 } |
3324 printf(" --------------------------------------\n"); | 3374 printf(" --------------------------------------\n"); |
3325 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, | 3375 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, |
3326 MarkingState::Internal(this).live_bytes()); | 3376 MarkingState::Internal(this).live_bytes()); |
3327 } | 3377 } |
3328 | 3378 |
3329 #endif // DEBUG | 3379 #endif // DEBUG |
3330 } // namespace internal | 3380 } // namespace internal |
3331 } // namespace v8 | 3381 } // namespace v8 |
OLD | NEW |