| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
| (...skipping 1277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1288 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) { | 1288 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) { |
| 1289 // Only during compaction pages can actually change ownership. This is | 1289 // Only during compaction pages can actually change ownership. This is |
| 1290 // safe because there exists no other competing action on the page links | 1290 // safe because there exists no other competing action on the page links |
| 1291 // during compaction. | 1291 // during compaction. |
| 1292 if (is_local() && (p->owner() != this)) { | 1292 if (is_local() && (p->owner() != this)) { |
| 1293 base::LockGuard<base::Mutex> guard( | 1293 base::LockGuard<base::Mutex> guard( |
| 1294 reinterpret_cast<PagedSpace*>(p->owner())->mutex()); | 1294 reinterpret_cast<PagedSpace*>(p->owner())->mutex()); |
| 1295 p->Unlink(); | 1295 p->Unlink(); |
| 1296 p->set_owner(this); | 1296 p->set_owner(this); |
| 1297 p->InsertAfter(anchor_.prev_page()); | 1297 p->InsertAfter(anchor_.prev_page()); |
| 1298 } else { |
| 1299 CHECK_EQ(this, p->owner()); |
| 1300 // Regular refill on main thread. Pages are already linked into the |
| 1301 // space but might require relinking. |
| 1302 if (p->available_in_free_list() < kPageReuseThreshold) { |
| 1303 // Relink categories with only little memory left previous to anchor. |
| 1304 p->Unlink(); |
| 1305 p->InsertAfter(anchor()->prev_page()); |
| 1306 } |
| 1298 } | 1307 } |
| 1299 added += RelinkFreeListCategories(p); | 1308 added += RelinkFreeListCategories(p); |
| 1300 added += p->wasted_memory(); | 1309 added += p->wasted_memory(); |
| 1301 if (is_local() && (added > kCompactionMemoryWanted)) break; | 1310 if (is_local() && (added > kCompactionMemoryWanted)) break; |
| 1302 } | 1311 } |
| 1303 } | 1312 } |
| 1304 accounting_stats_.IncreaseCapacity(added); | 1313 accounting_stats_.IncreaseCapacity(added); |
| 1305 } | 1314 } |
| 1306 | 1315 |
| 1307 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { | 1316 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1324 | 1333 |
| 1325 // Move over pages. | 1334 // Move over pages. |
| 1326 for (auto it = other->begin(); it != other->end();) { | 1335 for (auto it = other->begin(); it != other->end();) { |
| 1327 Page* p = *(it++); | 1336 Page* p = *(it++); |
| 1328 | 1337 |
| 1329 // Relinking requires the category to be unlinked. | 1338 // Relinking requires the category to be unlinked. |
| 1330 other->UnlinkFreeListCategories(p); | 1339 other->UnlinkFreeListCategories(p); |
| 1331 | 1340 |
| 1332 p->Unlink(); | 1341 p->Unlink(); |
| 1333 p->set_owner(this); | 1342 p->set_owner(this); |
| 1334 p->InsertAfter(anchor_.prev_page()); | 1343 if (p->available_in_free_list() < kPageReuseThreshold) { |
| 1344 // Relink categories with only little memory left previous to anchor. |
| 1345 p->InsertAfter(anchor()->prev_page()); |
| 1346 } else { |
| 1347 p->InsertAfter(anchor()); |
| 1348 } |
| 1335 RelinkFreeListCategories(p); | 1349 RelinkFreeListCategories(p); |
| 1336 DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list()); | 1350 DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list()); |
| 1337 } | 1351 } |
| 1338 } | 1352 } |
| 1339 | 1353 |
| 1340 | 1354 |
| 1341 size_t PagedSpace::CommittedPhysicalMemory() { | 1355 size_t PagedSpace::CommittedPhysicalMemory() { |
| 1342 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 1356 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 1343 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1357 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1344 size_t size = 0; | 1358 size_t size = 0; |
| 1345 for (Page* page : *this) { | 1359 for (Page* page : *this) { |
| 1346 size += page->CommittedPhysicalMemory(); | 1360 size += page->CommittedPhysicalMemory(); |
| 1347 } | 1361 } |
| 1348 return size; | 1362 return size; |
| 1349 } | 1363 } |
| 1350 | 1364 |
| 1351 bool PagedSpace::ContainsSlow(Address addr) { | 1365 bool PagedSpace::ContainsSlow(Address addr) { |
| 1352 Page* p = Page::FromAddress(addr); | 1366 Page* p = Page::FromAddress(addr); |
| 1353 for (Page* page : *this) { | 1367 for (Page* page : *this) { |
| 1354 if (page == p) return true; | 1368 if (page == p) return true; |
| 1355 } | 1369 } |
| 1356 return false; | 1370 return false; |
| 1357 } | 1371 } |
| 1358 | 1372 |
| 1373 Page* PagedSpace::RemovePageSafe() { |
| 1374 base::LockGuard<base::Mutex> guard(mutex()); |
| 1375 Page* page = anchor()->next_page(); |
| 1376 |
| 1377 while (!page->CanUseForAllocation()) page = page->next_page(); |
| 1378 if (page == anchor() || page->available_in_free_list() < kPageReuseThreshold) |
| 1379 return nullptr; |
| 1380 |
| 1381 AccountUncommitted(page->size()); |
| 1382 accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList()); |
| 1383 accounting_stats_.DecreaseCapacity(page->area_size()); |
| 1384 page->Unlink(); |
| 1385 UnlinkFreeListCategories(page); |
| 1386 return page; |
| 1387 } |
| 1388 |
| 1389 void PagedSpace::AddPage(Page* page) { |
| 1390 AccountCommitted(page->size()); |
| 1391 accounting_stats_.IncreaseCapacity(page->area_size()); |
| 1392 accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList()); |
| 1393 page->set_owner(this); |
| 1394 RelinkFreeListCategories(page); |
| 1395 page->InsertAfter(anchor()->prev_page()); |
| 1396 } |
| 1397 |
| 1359 void PagedSpace::ShrinkImmortalImmovablePages() { | 1398 void PagedSpace::ShrinkImmortalImmovablePages() { |
| 1360 DCHECK(!heap()->deserialization_complete()); | 1399 DCHECK(!heap()->deserialization_complete()); |
| 1361 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1400 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1362 EmptyAllocationInfo(); | 1401 EmptyAllocationInfo(); |
| 1363 ResetFreeList(); | 1402 ResetFreeList(); |
| 1364 | 1403 |
| 1365 for (Page* page : *this) { | 1404 for (Page* page : *this) { |
| 1366 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); | 1405 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); |
| 1367 size_t unused = page->ShrinkToHighWaterMark(); | 1406 size_t unused = page->ShrinkToHighWaterMark(); |
| 1368 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); | 1407 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); |
| 1369 AccountUncommitted(unused); | 1408 AccountUncommitted(unused); |
| 1370 } | 1409 } |
| 1371 } | 1410 } |
| 1372 | 1411 |
| 1373 bool PagedSpace::Expand() { | 1412 bool PagedSpace::Expand() { |
| 1413 base::LockGuard<base::Mutex> guard(mutex()); |
| 1414 |
| 1374 const int size = AreaSize(); | 1415 const int size = AreaSize(); |
| 1375 | 1416 |
| 1376 if (!heap()->CanExpandOldGeneration(size)) return false; | 1417 if (!heap()->CanExpandOldGeneration(size)) return false; |
| 1377 | 1418 |
| 1378 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1419 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
| 1379 if (p == nullptr) return false; | 1420 if (p == nullptr) return false; |
| 1380 | 1421 |
| 1381 AccountCommitted(p->size()); | 1422 AccountCommitted(p->size()); |
| 1382 | 1423 |
| 1383 // Pages created during bootstrapping may contain immortal immovable objects. | 1424 // Pages created during bootstrapping may contain immortal immovable objects. |
| (...skipping 1527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2911 if (object != NULL) return object; | 2952 if (object != NULL) return object; |
| 2912 | 2953 |
| 2913 // If sweeping is still in progress try to sweep pages on the main thread. | 2954 // If sweeping is still in progress try to sweep pages on the main thread. |
| 2914 int max_freed = collector->sweeper().ParallelSweepSpace( | 2955 int max_freed = collector->sweeper().ParallelSweepSpace( |
| 2915 identity(), size_in_bytes, kMaxPagesToSweep); | 2956 identity(), size_in_bytes, kMaxPagesToSweep); |
| 2916 RefillFreeList(); | 2957 RefillFreeList(); |
| 2917 if (max_freed >= size_in_bytes) { | 2958 if (max_freed >= size_in_bytes) { |
| 2918 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2959 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| 2919 if (object != nullptr) return object; | 2960 if (object != nullptr) return object; |
| 2920 } | 2961 } |
| 2962 } else if (is_local()) { |
| 2963 // Sweeping not in progress and we are on a {CompactionSpace}. This can |
| 2964 // only happen when we are evacuating for the young generation. |
| 2965 PagedSpace* main_space = heap()->paged_space(identity()); |
| 2966 Page* page = main_space->RemovePageSafe(); |
| 2967 if (page != nullptr) { |
| 2968 AddPage(page); |
| 2969 HeapObject* object = |
| 2970 free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| 2971 if (object != nullptr) return object; |
| 2972 } |
| 2921 } | 2973 } |
| 2922 | 2974 |
| 2923 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { | 2975 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { |
| 2924 DCHECK((CountTotalPages() > 1) || | 2976 DCHECK((CountTotalPages() > 1) || |
| 2925 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); | 2977 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); |
| 2926 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); | 2978 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| 2927 } | 2979 } |
| 2928 | 2980 |
| 2929 // If sweeper threads are active, wait for them at that point and steal | 2981 // If sweeper threads are active, wait for them at that point and steal |
| 2930 // elements form their free-lists. Allocation may still fail their which | 2982 // elements form their free-lists. Allocation may still fail their which |
| (...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3314 PrintF("\n"); | 3366 PrintF("\n"); |
| 3315 } | 3367 } |
| 3316 printf(" --------------------------------------\n"); | 3368 printf(" --------------------------------------\n"); |
| 3317 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, | 3369 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size, |
| 3318 MarkingState::Internal(this).live_bytes()); | 3370 MarkingState::Internal(this).live_bytes()); |
| 3319 } | 3371 } |
| 3320 | 3372 |
| 3321 #endif // DEBUG | 3373 #endif // DEBUG |
| 3322 } // namespace internal | 3374 } // namespace internal |
| 3323 } // namespace v8 | 3375 } // namespace v8 |
| OLD | NEW |