OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 663 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
674 ASSERT(!m_firstPage); | 674 ASSERT(!m_firstPage); |
675 ASSERT(!m_firstLargeHeapObject); | 675 ASSERT(!m_firstLargeHeapObject); |
676 } | 676 } |
677 | 677 |
678 template<typename Header> | 678 template<typename Header> |
679 void ThreadHeap<Header>::cleanupPages() | 679 void ThreadHeap<Header>::cleanupPages() |
680 { | 680 { |
681 clearFreeLists(); | 681 clearFreeLists(); |
682 | 682 |
683 // Add the ThreadHeap's pages to the orphanedPagePool. | 683 // Add the ThreadHeap's pages to the orphanedPagePool. |
684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) | 684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) { |
| 685 Heap::decreaseAllocatedSpace(blinkPageSize); |
685 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | 686 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 687 } |
686 m_firstPage = 0; | 688 m_firstPage = 0; |
687 | 689 |
688 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) | 690 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) { |
| 691 Heap::decreaseAllocatedSpace(largeObject->size()); |
689 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); | 692 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 693 } |
690 m_firstLargeHeapObject = 0; | 694 m_firstLargeHeapObject = 0; |
691 } | 695 } |
692 | 696 |
693 template<typename Header> | 697 template<typename Header> |
694 void ThreadHeap<Header>::updateRemainingAllocationSize() | 698 void ThreadHeap<Header>::updateRemainingAllocationSize() |
695 { | 699 { |
696 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 700 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
697 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAll
ocationSize()); | 701 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); |
698 m_lastRemainingAllocationSize = remainingAllocationSize(); | 702 m_lastRemainingAllocationSize = remainingAllocationSize(); |
699 } | 703 } |
700 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 704 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
701 } | 705 } |
702 | 706 |
703 template<typename Header> | 707 template<typename Header> |
704 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat
ionSize, const GCInfo* gcInfo) | 708 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat
ionSize, const GCInfo* gcInfo) |
705 { | 709 { |
706 ASSERT(allocationSize > remainingAllocationSize()); | 710 ASSERT(allocationSize > remainingAllocationSize()); |
707 if (allocationSize > blinkPageSize / 2) | 711 if (allocationSize > blinkPageSize / 2) |
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
955 page->clearObjectStartBitMap(); | 959 page->clearObjectStartBitMap(); |
956 page->resetPromptlyFreedSize(); | 960 page->resetPromptlyFreedSize(); |
957 size_t freedCount = 0; | 961 size_t freedCount = 0; |
958 Address startOfGap = page->payload(); | 962 Address startOfGap = page->payload(); |
959 for (Address headerAddress = startOfGap; headerAddress < page->end(); )
{ | 963 for (Address headerAddress = startOfGap; headerAddress < page->end(); )
{ |
960 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); | 964 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); |
961 ASSERT(basicHeader->size() > 0); | 965 ASSERT(basicHeader->size() > 0); |
962 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | 966 ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
963 | 967 |
964 if (basicHeader->isPromptlyFreed()) { | 968 if (basicHeader->isPromptlyFreed()) { |
965 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade
r)->size()); | 969 Heap::decreaseAllocatedObjectSize(reinterpret_cast<Header*>(basi
cHeader)->size()); |
966 size_t size = basicHeader->size(); | 970 size_t size = basicHeader->size(); |
967 ASSERT(size >= sizeof(Header)); | 971 ASSERT(size >= sizeof(Header)); |
968 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 972 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
969 memset(headerAddress, 0, sizeof(Header)); | 973 memset(headerAddress, 0, sizeof(Header)); |
970 #endif | 974 #endif |
971 ++freedCount; | 975 ++freedCount; |
972 headerAddress += size; | 976 headerAddress += size; |
973 continue; | 977 continue; |
974 } | 978 } |
975 | 979 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1039 memset(headerAddress, 0, size); | 1043 memset(headerAddress, 0, size); |
1040 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 1044 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
1041 Address result = headerAddress + sizeof(*header); | 1045 Address result = headerAddress + sizeof(*header); |
1042 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1046 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1043 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); | 1047 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); |
1044 | 1048 |
1045 // Poison the object header and allocationGranularity bytes after the object | 1049 // Poison the object header and allocationGranularity bytes after the object |
1046 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1050 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
1047 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 1051 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
1048 largeObject->link(&m_firstLargeHeapObject); | 1052 largeObject->link(&m_firstLargeHeapObject); |
1049 stats().increaseAllocatedSpace(largeObject->size()); | 1053 Heap::increaseAllocatedSpace(largeObject->size()); |
1050 stats().increaseObjectSpace(largeObject->size()); | 1054 Heap::increaseAllocatedObjectSize(largeObject->size()); |
1051 return result; | 1055 return result; |
1052 } | 1056 } |
1053 | 1057 |
1054 template<typename Header> | 1058 template<typename Header> |
1055 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 1059 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
1056 { | 1060 { |
1057 object->unlink(previousNext); | 1061 object->unlink(previousNext); |
1058 object->finalize(); | 1062 object->finalize(); |
| 1063 Heap::decreaseAllocatedSpace(object->size()); |
1059 | 1064 |
1060 // Unpoison the object header and allocationGranularity bytes after the | 1065 // Unpoison the object header and allocationGranularity bytes after the |
1061 // object before freeing. | 1066 // object before freeing. |
1062 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 1067 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
1063 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 1068 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
1064 | 1069 |
1065 if (object->terminating()) { | 1070 if (object->terminating()) { |
1066 ASSERT(ThreadState::current()->isTerminating()); | 1071 ASSERT(ThreadState::current()->isTerminating()); |
1067 // The thread is shutting down so this object is being removed as part | 1072 // The thread is shutting down so this object is being removed as part |
1068 // of a thread local GC. In that case the object could be traced in the | 1073 // of a thread local GC. In that case the object could be traced in the |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1270 { | 1275 { |
1271 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap | 1276 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap |
1272 // since it is the same for all objects | 1277 // since it is the same for all objects |
1273 ASSERT(gcInfo); | 1278 ASSERT(gcInfo); |
1274 allocatePage(gcInfo); | 1279 allocatePage(gcInfo); |
1275 } | 1280 } |
1276 | 1281 |
1277 template <typename Header> | 1282 template <typename Header> |
1278 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) | 1283 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
1279 { | 1284 { |
| 1285 Heap::decreaseAllocatedSpace(blinkPageSize); |
| 1286 |
1280 MutexLocker locker(m_threadState->sweepMutex()); | 1287 MutexLocker locker(m_threadState->sweepMutex()); |
1281 if (page->terminating()) { | 1288 if (page->terminating()) { |
1282 // The thread is shutting down so this page is being removed as part | 1289 // The thread is shutting down so this page is being removed as part |
1283 // of a thread local GC. In that case the page could be accessed in the | 1290 // of a thread local GC. In that case the page could be accessed in the |
1284 // next global GC either due to a dead object being traced via a | 1291 // next global GC either due to a dead object being traced via a |
1285 // conservative pointer or due to a programming error where an object | 1292 // conservative pointer or due to a programming error where an object |
1286 // in another thread heap keeps a dangling pointer to this object. | 1293 // in another thread heap keeps a dangling pointer to this object. |
1287 // To guard against this we put the page in the orphanedPagePool to | 1294 // To guard against this we put the page in the orphanedPagePool to |
1288 // ensure it is still reachable. After the next global GC it can be | 1295 // ensure it is still reachable. After the next global GC it can be |
1289 // decommitted and moved to the page pool assuming no rogue/dangling | 1296 // decommitted and moved to the page pool assuming no rogue/dangling |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1323 pageMemory = memory; | 1330 pageMemory = memory; |
1324 else | 1331 else |
1325 delete memory; | 1332 delete memory; |
1326 } else { | 1333 } else { |
1327 Heap::freePagePool()->addFreePage(m_index, memory); | 1334 Heap::freePagePool()->addFreePage(m_index, memory); |
1328 } | 1335 } |
1329 offset += blinkPageSize; | 1336 offset += blinkPageSize; |
1330 } | 1337 } |
1331 } | 1338 } |
1332 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 1339 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
| 1340 Heap::increaseAllocatedSpace(blinkPageSize); |
1333 // Use a separate list for pages allocated during sweeping to make | 1341 // Use a separate list for pages allocated during sweeping to make |
1334 // sure that we do not accidentally sweep objects that have been | 1342 // sure that we do not accidentally sweep objects that have been |
1335 // allocated during sweeping. | 1343 // allocated during sweeping. |
1336 if (m_threadState->isSweepInProgress()) { | 1344 if (m_threadState->isSweepInProgress()) { |
1337 if (!m_lastPageAllocatedDuringSweeping) | 1345 if (!m_lastPageAllocatedDuringSweeping) |
1338 m_lastPageAllocatedDuringSweeping = page; | 1346 m_lastPageAllocatedDuringSweeping = page; |
1339 page->link(&m_firstPageAllocatedDuringSweeping); | 1347 page->link(&m_firstPageAllocatedDuringSweeping); |
1340 } else { | 1348 } else { |
1341 page->link(&m_firstPage); | 1349 page->link(&m_firstPage); |
1342 } | 1350 } |
(...skipping 17 matching lines...) Expand all Loading... |
1360 { | 1368 { |
1361 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page
= page->next()) { | 1369 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page
= page->next()) { |
1362 if (page->contains(address)) | 1370 if (page->contains(address)) |
1363 return true; | 1371 return true; |
1364 } | 1372 } |
1365 return false; | 1373 return false; |
1366 } | 1374 } |
1367 #endif | 1375 #endif |
1368 | 1376 |
1369 template<typename Header> | 1377 template<typename Header> |
1370 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats) | 1378 size_t ThreadHeap<Header>::objectPayloadSizeForTesting() |
1371 { | 1379 { |
1372 ASSERT(!m_firstPageAllocatedDuringSweeping); | 1380 ASSERT(!m_firstPageAllocatedDuringSweeping); |
| 1381 size_t objectPayloadSize = 0; |
1373 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1382 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
1374 page->getStatsForTesting(stats); | 1383 objectPayloadSize += page->objectPayloadSizeForTesting(); |
1375 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 1384 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) |
1376 current->getStatsForTesting(stats); | 1385 objectPayloadSize += current->objectPayloadSizeForTesting(); |
| 1386 return objectPayloadSize; |
1377 } | 1387 } |
1378 | 1388 |
1379 template<typename Header> | 1389 template<typename Header> |
1380 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats) | 1390 void ThreadHeap<Header>::sweepNormalPages() |
1381 { | 1391 { |
1382 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages"); | 1392 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages"); |
1383 HeapPage<Header>* page = m_firstPage; | 1393 HeapPage<Header>* page = m_firstPage; |
1384 HeapPage<Header>** previousNext = &m_firstPage; | 1394 HeapPage<Header>** previousNext = &m_firstPage; |
1385 HeapPage<Header>* previous = 0; | 1395 HeapPage<Header>* previous = 0; |
1386 while (page) { | 1396 while (page) { |
1387 page->resetPromptlyFreedSize(); | 1397 page->resetPromptlyFreedSize(); |
1388 if (page->isEmpty()) { | 1398 if (page->isEmpty()) { |
1389 HeapPage<Header>* unused = page; | 1399 HeapPage<Header>* unused = page; |
1390 if (unused == m_mergePoint) | 1400 if (unused == m_mergePoint) |
1391 m_mergePoint = previous; | 1401 m_mergePoint = previous; |
1392 page = page->next(); | 1402 page = page->next(); |
1393 HeapPage<Header>::unlink(this, unused, previousNext); | 1403 HeapPage<Header>::unlink(this, unused, previousNext); |
1394 --m_numberOfNormalPages; | 1404 --m_numberOfNormalPages; |
1395 } else { | 1405 } else { |
1396 page->sweep(stats, this); | 1406 page->sweep(this); |
1397 previousNext = &page->m_next; | 1407 previousNext = &page->m_next; |
1398 previous = page; | 1408 previous = page; |
1399 page = page->next(); | 1409 page = page->next(); |
1400 } | 1410 } |
1401 } | 1411 } |
1402 } | 1412 } |
1403 | 1413 |
1404 template<typename Header> | 1414 template<typename Header> |
1405 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) | 1415 void ThreadHeap<Header>::sweepLargePages() |
1406 { | 1416 { |
1407 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); | 1417 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); |
1408 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 1418 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
1409 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 1419 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
1410 if (current->isMarked()) { | 1420 if (current->isMarked()) { |
1411 stats->increaseAllocatedSpace(current->size()); | 1421 Heap::increaseMarkedObjectSize(current->size()); |
1412 stats->increaseObjectSpace(current->size()); | |
1413 current->unmark(); | 1422 current->unmark(); |
1414 previousNext = ¤t->m_next; | 1423 previousNext = ¤t->m_next; |
1415 current = current->next(); | 1424 current = current->next(); |
1416 } else { | 1425 } else { |
1417 LargeHeapObject<Header>* next = current->next(); | 1426 LargeHeapObject<Header>* next = current->next(); |
1418 freeLargeObject(current, previousNext); | 1427 freeLargeObject(current, previousNext); |
1419 current = next; | 1428 current = next; |
1420 } | 1429 } |
1421 } | 1430 } |
1422 } | 1431 } |
1423 | 1432 |
1424 | 1433 |
1425 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during | 1434 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during |
1426 // sweeping to catch cases where dead objects touch each other. This is not | 1435 // sweeping to catch cases where dead objects touch each other. This is not |
1427 // turned on by default because it also triggers for cases that are safe. | 1436 // turned on by default because it also triggers for cases that are safe. |
1428 // Examples of such safe cases are context life cycle observers and timers | 1437 // Examples of such safe cases are context life cycle observers and timers |
1429 // embedded in garbage collected objects. | 1438 // embedded in garbage collected objects. |
1430 #define STRICT_ASAN_FINALIZATION_CHECKING 0 | 1439 #define STRICT_ASAN_FINALIZATION_CHECKING 0 |
1431 | 1440 |
1432 template<typename Header> | 1441 template<typename Header> |
1433 void ThreadHeap<Header>::sweep(HeapStats* stats) | 1442 void ThreadHeap<Header>::sweep() |
1434 { | 1443 { |
1435 ASSERT(isConsistentForSweeping()); | 1444 ASSERT(isConsistentForSweeping()); |
1436 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING | 1445 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
1437 // When using ASan do a pre-sweep where all unmarked objects are | 1446 // When using ASan do a pre-sweep where all unmarked objects are |
1438 // poisoned before calling their finalizer methods. This can catch | 1447 // poisoned before calling their finalizer methods. This can catch |
1439 // the case where the finalizer of an object tries to modify | 1448 // the case where the finalizer of an object tries to modify |
1440 // another object as part of finalization. | 1449 // another object as part of finalization. |
1441 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1450 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
1442 page->poisonUnmarkedObjects(); | 1451 page->poisonUnmarkedObjects(); |
1443 #endif | 1452 #endif |
1444 sweepNormalPages(stats); | 1453 sweepNormalPages(); |
1445 sweepLargePages(stats); | 1454 sweepLargePages(); |
1446 } | 1455 } |
1447 | 1456 |
1448 template<typename Header> | 1457 template<typename Header> |
1449 void ThreadHeap<Header>::postSweepProcessing() | 1458 void ThreadHeap<Header>::postSweepProcessing() |
1450 { | 1459 { |
1451 // If pages have been allocated during sweeping, link them into | 1460 // If pages have been allocated during sweeping, link them into |
1452 // the list of pages. | 1461 // the list of pages. |
1453 if (m_firstPageAllocatedDuringSweeping) { | 1462 if (m_firstPageAllocatedDuringSweeping) { |
1454 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage; | 1463 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage; |
1455 m_firstPage = m_firstPageAllocatedDuringSweeping; | 1464 m_firstPage = m_firstPageAllocatedDuringSweeping; |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1535 } | 1544 } |
1536 | 1545 |
1537 template<typename Header> | 1546 template<typename Header> |
1538 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const
GCInfo* gcInfo) | 1547 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const
GCInfo* gcInfo) |
1539 : BaseHeapPage(storage, gcInfo, heap->threadState()) | 1548 : BaseHeapPage(storage, gcInfo, heap->threadState()) |
1540 , m_next(0) | 1549 , m_next(0) |
1541 { | 1550 { |
1542 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc
orrectly_aligned); | 1551 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc
orrectly_aligned); |
1543 m_objectStartBitMapComputed = false; | 1552 m_objectStartBitMapComputed = false; |
1544 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1553 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
1545 heap->stats().increaseAllocatedSpace(blinkPageSize); | |
1546 } | 1554 } |
1547 | 1555 |
1548 template<typename Header> | 1556 template<typename Header> |
1549 void HeapPage<Header>::link(HeapPage** prevNext) | 1557 void HeapPage<Header>::link(HeapPage** prevNext) |
1550 { | 1558 { |
1551 m_next = *prevNext; | 1559 m_next = *prevNext; |
1552 *prevNext = this; | 1560 *prevNext = this; |
1553 } | 1561 } |
1554 | 1562 |
1555 template<typename Header> | 1563 template<typename Header> |
1556 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa
ge** prevNext) | 1564 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa
ge** prevNext) |
1557 { | 1565 { |
1558 *prevNext = unused->m_next; | 1566 *prevNext = unused->m_next; |
1559 heap->removePageFromHeap(unused); | 1567 heap->removePageFromHeap(unused); |
1560 } | 1568 } |
1561 | 1569 |
1562 template<typename Header> | 1570 template<typename Header> |
1563 void HeapPage<Header>::getStatsForTesting(HeapStats& stats) | 1571 size_t HeapPage<Header>::objectPayloadSizeForTesting() |
1564 { | 1572 { |
1565 stats.increaseAllocatedSpace(blinkPageSize); | 1573 size_t objectPayloadSize = 0; |
1566 Address headerAddress = payload(); | 1574 Address headerAddress = payload(); |
1567 ASSERT(headerAddress != end()); | 1575 ASSERT(headerAddress != end()); |
1568 do { | 1576 do { |
1569 Header* header = reinterpret_cast<Header*>(headerAddress); | 1577 Header* header = reinterpret_cast<Header*>(headerAddress); |
1570 if (!header->isFree()) { | 1578 if (!header->isFree()) { |
1571 stats.increaseObjectSpace(header->payloadSize()); | 1579 objectPayloadSize += header->payloadSize(); |
1572 } | 1580 } |
1573 ASSERT(header->size() < blinkPagePayloadSize()); | 1581 ASSERT(header->size() < blinkPagePayloadSize()); |
1574 headerAddress += header->size(); | 1582 headerAddress += header->size(); |
1575 ASSERT(headerAddress <= end()); | 1583 ASSERT(headerAddress <= end()); |
1576 } while (headerAddress < end()); | 1584 } while (headerAddress < end()); |
| 1585 return objectPayloadSize; |
1577 } | 1586 } |
1578 | 1587 |
1579 template<typename Header> | 1588 template<typename Header> |
1580 bool HeapPage<Header>::isEmpty() | 1589 bool HeapPage<Header>::isEmpty() |
1581 { | 1590 { |
1582 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); | 1591 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); |
1583 return header->isFree() && (header->size() == payloadSize()); | 1592 return header->isFree() && (header->size() == payloadSize()); |
1584 } | 1593 } |
1585 | 1594 |
1586 template<typename Header> | 1595 template<typename Header> |
1587 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap) | 1596 void HeapPage<Header>::sweep(ThreadHeap<Header>* heap) |
1588 { | 1597 { |
1589 clearObjectStartBitMap(); | 1598 clearObjectStartBitMap(); |
1590 stats->increaseAllocatedSpace(blinkPageSize); | |
1591 Address startOfGap = payload(); | 1599 Address startOfGap = payload(); |
1592 for (Address headerAddress = startOfGap; headerAddress < end(); ) { | 1600 for (Address headerAddress = startOfGap; headerAddress < end(); ) { |
1593 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); | 1601 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); |
1594 ASSERT(basicHeader->size() > 0); | 1602 ASSERT(basicHeader->size() > 0); |
1595 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | 1603 ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
1596 | 1604 |
1597 if (basicHeader->isFree()) { | 1605 if (basicHeader->isFree()) { |
1598 size_t size = basicHeader->size(); | 1606 size_t size = basicHeader->size(); |
1599 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1607 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1600 // Zero the memory in the free list header to maintain the | 1608 // Zero the memory in the free list header to maintain the |
(...skipping 25 matching lines...) Expand all Loading... |
1626 #endif | 1634 #endif |
1627 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1635 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
1628 headerAddress += size; | 1636 headerAddress += size; |
1629 continue; | 1637 continue; |
1630 } | 1638 } |
1631 | 1639 |
1632 if (startOfGap != headerAddress) | 1640 if (startOfGap != headerAddress) |
1633 heap->addToFreeList(startOfGap, headerAddress - startOfGap); | 1641 heap->addToFreeList(startOfGap, headerAddress - startOfGap); |
1634 header->unmark(); | 1642 header->unmark(); |
1635 headerAddress += header->size(); | 1643 headerAddress += header->size(); |
1636 stats->increaseObjectSpace(header->size()); | 1644 Heap::increaseMarkedObjectSize(header->size()); |
1637 startOfGap = headerAddress; | 1645 startOfGap = headerAddress; |
1638 } | 1646 } |
1639 if (startOfGap != end()) | 1647 if (startOfGap != end()) |
1640 heap->addToFreeList(startOfGap, end() - startOfGap); | 1648 heap->addToFreeList(startOfGap, end() - startOfGap); |
1641 } | 1649 } |
1642 | 1650 |
1643 template<typename Header> | 1651 template<typename Header> |
1644 void HeapPage<Header>::clearLiveAndMarkDead() | 1652 void HeapPage<Header>::clearLiveAndMarkDead() |
1645 { | 1653 { |
1646 for (Address headerAddress = payload(); headerAddress < end();) { | 1654 for (Address headerAddress = payload(); headerAddress < end();) { |
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1852 return gcInfo()->hasVTable(); | 1860 return gcInfo()->hasVTable(); |
1853 } | 1861 } |
1854 | 1862 |
1855 template<> | 1863 template<> |
1856 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe
ader* header) | 1864 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe
ader* header) |
1857 { | 1865 { |
1858 return header->hasVTable(); | 1866 return header->hasVTable(); |
1859 } | 1867 } |
1860 | 1868 |
1861 template<typename Header> | 1869 template<typename Header> |
1862 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats) | 1870 size_t LargeHeapObject<Header>::objectPayloadSizeForTesting() |
1863 { | 1871 { |
1864 stats.increaseAllocatedSpace(size()); | 1872 return payloadSize(); |
1865 stats.increaseObjectSpace(payloadSize()); | |
1866 } | 1873 } |
1867 | 1874 |
1868 #if ENABLE(GC_PROFILE_HEAP) | 1875 #if ENABLE(GC_PROFILE_HEAP) |
1869 template<typename Header> | 1876 template<typename Header> |
1870 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI
nfo* info) | 1877 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI
nfo* info) |
1871 { | 1878 { |
1872 Header* header = heapObjectHeader(); | 1879 Header* header = heapObjectHeader(); |
1873 size_t tag = info->getClassTag(header->gcInfo()); | 1880 size_t tag = info->getClassTag(header->gcInfo()); |
1874 size_t age = header->age(); | 1881 size_t age = header->age(); |
1875 if (isMarked()) { | 1882 if (isMarked()) { |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2195 ThreadState::init(); | 2202 ThreadState::init(); |
2196 s_markingStack = new CallbackStack(); | 2203 s_markingStack = new CallbackStack(); |
2197 s_postMarkingCallbackStack = new CallbackStack(); | 2204 s_postMarkingCallbackStack = new CallbackStack(); |
2198 s_weakCallbackStack = new CallbackStack(); | 2205 s_weakCallbackStack = new CallbackStack(); |
2199 s_ephemeronStack = new CallbackStack(); | 2206 s_ephemeronStack = new CallbackStack(); |
2200 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 2207 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
2201 s_markingVisitor = new MarkingVisitor(s_markingStack); | 2208 s_markingVisitor = new MarkingVisitor(s_markingStack); |
2202 s_freePagePool = new FreePagePool(); | 2209 s_freePagePool = new FreePagePool(); |
2203 s_orphanedPagePool = new OrphanedPagePool(); | 2210 s_orphanedPagePool = new OrphanedPagePool(); |
2204 s_markingThreads = new Vector<OwnPtr<WebThread>>(); | 2211 s_markingThreads = new Vector<OwnPtr<WebThread>>(); |
| 2212 s_allocatedObjectSize = 0; |
| 2213 s_allocatedSpace = 0; |
| 2214 s_markedObjectSize = 0; |
2205 if (Platform::current()) { | 2215 if (Platform::current()) { |
2206 int processors = Platform::current()->numberOfProcessors(); | 2216 int processors = Platform::current()->numberOfProcessors(); |
2207 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre
ads); | 2217 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre
ads); |
2208 for (int i = 0; i < numberOfMarkingThreads; i++) | 2218 for (int i = 0; i < numberOfMarkingThreads; i++) |
2209 s_markingThreads->append(adoptPtr(Platform::current()->createThread(
"Blink GC Marking Thread"))); | 2219 s_markingThreads->append(adoptPtr(Platform::current()->createThread(
"Blink GC Marking Thread"))); |
2210 } | 2220 } |
2211 } | 2221 } |
2212 | 2222 |
2213 void Heap::shutdown() | 2223 void Heap::shutdown() |
2214 { | 2224 { |
(...skipping 23 matching lines...) Expand all Loading... |
2238 s_weakCallbackStack = 0; | 2248 s_weakCallbackStack = 0; |
2239 delete s_postMarkingCallbackStack; | 2249 delete s_postMarkingCallbackStack; |
2240 s_postMarkingCallbackStack = 0; | 2250 s_postMarkingCallbackStack = 0; |
2241 delete s_markingStack; | 2251 delete s_markingStack; |
2242 s_markingStack = 0; | 2252 s_markingStack = 0; |
2243 delete s_ephemeronStack; | 2253 delete s_ephemeronStack; |
2244 s_ephemeronStack = 0; | 2254 s_ephemeronStack = 0; |
2245 delete s_regionTree; | 2255 delete s_regionTree; |
2246 s_regionTree = 0; | 2256 s_regionTree = 0; |
2247 ThreadState::shutdown(); | 2257 ThreadState::shutdown(); |
| 2258 ASSERT(Heap::allocatedSpace() == 0); |
2248 } | 2259 } |
2249 | 2260 |
2250 BaseHeapPage* Heap::contains(Address address) | 2261 BaseHeapPage* Heap::contains(Address address) |
2251 { | 2262 { |
2252 ASSERT(ThreadState::isAnyThreadInGC()); | 2263 ASSERT(ThreadState::isAnyThreadInGC()); |
2253 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2264 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
2254 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 2265 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
2255 BaseHeapPage* page = (*it)->contains(address); | 2266 BaseHeapPage* page = (*it)->contains(address); |
2256 if (page) | 2267 if (page) |
2257 return page; | 2268 return page; |
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2490 if (!gcScope.allThreadsParked()) { | 2501 if (!gcScope.allThreadsParked()) { |
2491 ThreadState::current()->setGCRequested(); | 2502 ThreadState::current()->setGCRequested(); |
2492 return; | 2503 return; |
2493 } | 2504 } |
2494 | 2505 |
2495 if (state->isMainThread()) | 2506 if (state->isMainThread()) |
2496 ScriptForbiddenScope::enter(); | 2507 ScriptForbiddenScope::enter(); |
2497 | 2508 |
2498 s_lastGCWasConservative = false; | 2509 s_lastGCWasConservative = false; |
2499 | 2510 |
| 2511 Heap::resetMarkedObjectSize(); |
| 2512 Heap::resetAllocatedObjectSize(); |
| 2513 |
2500 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", | 2514 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", |
2501 "precise", stackState == ThreadState::NoHeapPointersOnStack, | 2515 "precise", stackState == ThreadState::NoHeapPointersOnStack, |
2502 "forced", cause == ThreadState::ForcedGC); | 2516 "forced", cause == ThreadState::ForcedGC); |
2503 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 2517 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
2504 double timeStamp = WTF::currentTimeMS(); | 2518 double timeStamp = WTF::currentTimeMS(); |
2505 #if ENABLE(GC_PROFILE_MARKING) | 2519 #if ENABLE(GC_PROFILE_MARKING) |
2506 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); | 2520 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); |
2507 #endif | 2521 #endif |
2508 | 2522 |
2509 // Disallow allocation during garbage collection (but not | 2523 // Disallow allocation during garbage collection (but not |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2548 // marking we check that any object marked as dead is not traced. E.g. via a | 2562 // marking we check that any object marked as dead is not traced. E.g. via a |
2549 // conservatively found pointer or a programming error with an object contai
ning | 2563 // conservatively found pointer or a programming error with an object contai
ning |
2550 // a dangling pointer. | 2564 // a dangling pointer. |
2551 orphanedPagePool()->decommitOrphanedPages(); | 2565 orphanedPagePool()->decommitOrphanedPages(); |
2552 | 2566 |
2553 #if ENABLE(GC_PROFILE_MARKING) | 2567 #if ENABLE(GC_PROFILE_MARKING) |
2554 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | 2568 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); |
2555 #endif | 2569 #endif |
2556 | 2570 |
2557 if (Platform::current()) { | 2571 if (Platform::current()) { |
2558 uint64_t objectSpaceSize; | |
2559 uint64_t allocatedSpaceSize; | |
2560 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); | |
2561 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 2572 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
2562 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", o
bjectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | 2573 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H
eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
2563 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | 2574 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
2564 } | 2575 } |
2565 | 2576 |
2566 if (state->isMainThread()) | 2577 if (state->isMainThread()) |
2567 ScriptForbiddenScope::exit(); | 2578 ScriptForbiddenScope::exit(); |
2568 } | 2579 } |
2569 | 2580 |
2570 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 2581 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
2571 { | 2582 { |
2572 // We explicitly do not enter a safepoint while doing thread specific | 2583 // We explicitly do not enter a safepoint while doing thread specific |
2573 // garbage collection since we don't want to allow a global GC at the | 2584 // garbage collection since we don't want to allow a global GC at the |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2792 } else if (splitOff->m_freeList.m_freeLists[i]) { | 2803 } else if (splitOff->m_freeList.m_freeLists[i]) { |
2793 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList
.m_freeLists[i]); | 2804 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList
.m_freeLists[i]); |
2794 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las
tFreeListEntries[i]; | 2805 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las
tFreeListEntries[i]; |
2795 } | 2806 } |
2796 } | 2807 } |
2797 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr
eeListIndex) | 2808 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr
eeListIndex) |
2798 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr
eeListIndex; | 2809 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr
eeListIndex; |
2799 } | 2810 } |
2800 } | 2811 } |
2801 | 2812 |
2802 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) | 2813 size_t Heap::objectPayloadSizeForTesting() |
2803 { | 2814 { |
2804 *objectSpaceSize = 0; | 2815 size_t objectPayloadSize = 0; |
2805 *allocatedSpaceSize = 0; | |
2806 ASSERT(ThreadState::isAnyThreadInGC()); | |
2807 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | |
2808 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | |
2809 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { | |
2810 *objectSpaceSize += (*it)->stats().totalObjectSpace(); | |
2811 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); | |
2812 } | |
2813 } | |
2814 | |
2815 void Heap::getStatsForTesting(HeapStats* stats) | |
2816 { | |
2817 stats->clear(); | |
2818 ASSERT(ThreadState::isAnyThreadInGC()); | 2816 ASSERT(ThreadState::isAnyThreadInGC()); |
2819 makeConsistentForSweeping(); | 2817 makeConsistentForSweeping(); |
2820 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2818 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
2821 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | 2819 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; |
2822 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { | 2820 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { |
2823 HeapStats temp; | 2821 objectPayloadSize += (*it)->objectPayloadSizeForTesting(); |
2824 (*it)->getStatsForTesting(temp); | |
2825 stats->add(&temp); | |
2826 } | 2822 } |
| 2823 return objectPayloadSize; |
2827 } | 2824 } |
2828 | 2825 |
2829 #if ENABLE(ASSERT) | 2826 #if ENABLE(ASSERT) |
2830 bool Heap::isConsistentForSweeping() | 2827 bool Heap::isConsistentForSweeping() |
2831 { | 2828 { |
2832 ASSERT(ThreadState::isAnyThreadInGC()); | 2829 ASSERT(ThreadState::isAnyThreadInGC()); |
2833 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2830 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
2834 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 2831 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
2835 if (!(*it)->isConsistentForSweeping()) | 2832 if (!(*it)->isConsistentForSweeping()) |
2836 return false; | 2833 return false; |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3002 CallbackStack* Heap::s_markingStack; | 2999 CallbackStack* Heap::s_markingStack; |
3003 CallbackStack* Heap::s_postMarkingCallbackStack; | 3000 CallbackStack* Heap::s_postMarkingCallbackStack; |
3004 CallbackStack* Heap::s_weakCallbackStack; | 3001 CallbackStack* Heap::s_weakCallbackStack; |
3005 CallbackStack* Heap::s_ephemeronStack; | 3002 CallbackStack* Heap::s_ephemeronStack; |
3006 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 3003 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
3007 bool Heap::s_shutdownCalled = false; | 3004 bool Heap::s_shutdownCalled = false; |
3008 bool Heap::s_lastGCWasConservative = false; | 3005 bool Heap::s_lastGCWasConservative = false; |
3009 FreePagePool* Heap::s_freePagePool; | 3006 FreePagePool* Heap::s_freePagePool; |
3010 OrphanedPagePool* Heap::s_orphanedPagePool; | 3007 OrphanedPagePool* Heap::s_orphanedPagePool; |
3011 Heap::RegionTree* Heap::s_regionTree = 0; | 3008 Heap::RegionTree* Heap::s_regionTree = 0; |
| 3009 size_t Heap::s_allocatedObjectSize = 0; |
| 3010 size_t Heap::s_allocatedSpace = 0; |
| 3011 size_t Heap::s_markedObjectSize = 0; |
3012 | 3012 |
3013 } // namespace blink | 3013 } // namespace blink |
OLD | NEW |