Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(429)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 738773003: Revert of Oilpan: Refactor the way we calculate heap statistics (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 663 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 ASSERT(!m_firstPage); 674 ASSERT(!m_firstPage);
675 ASSERT(!m_firstLargeHeapObject); 675 ASSERT(!m_firstLargeHeapObject);
676 } 676 }
677 677
678 template<typename Header> 678 template<typename Header>
679 void ThreadHeap<Header>::cleanupPages() 679 void ThreadHeap<Header>::cleanupPages()
680 { 680 {
681 clearFreeLists(); 681 clearFreeLists();
682 682
683 // Add the ThreadHeap's pages to the orphanedPagePool. 683 // Add the ThreadHeap's pages to the orphanedPagePool.
684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) { 684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
685 Heap::decreaseAllocatedSpace(blinkPageSize);
686 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 685 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
687 }
688 m_firstPage = 0; 686 m_firstPage = 0;
689 687
690 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) { 688 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
691 Heap::decreaseAllocatedSpace(largeObject->size());
692 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 689 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
693 }
694 m_firstLargeHeapObject = 0; 690 m_firstLargeHeapObject = 0;
695 } 691 }
696 692
697 template<typename Header> 693 template<typename Header>
698 void ThreadHeap<Header>::updateRemainingAllocationSize() 694 void ThreadHeap<Header>::updateRemainingAllocationSize()
699 { 695 {
700 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { 696 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
701 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); 697 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAll ocationSize());
702 m_lastRemainingAllocationSize = remainingAllocationSize(); 698 m_lastRemainingAllocationSize = remainingAllocationSize();
703 } 699 }
704 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); 700 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
705 } 701 }
706 702
707 template<typename Header> 703 template<typename Header>
708 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo* gcInfo) 704 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo* gcInfo)
709 { 705 {
710 ASSERT(allocationSize > remainingAllocationSize()); 706 ASSERT(allocationSize > remainingAllocationSize());
711 if (allocationSize > blinkPageSize / 2) 707 if (allocationSize > blinkPageSize / 2)
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
959 page->clearObjectStartBitMap(); 955 page->clearObjectStartBitMap();
960 page->resetPromptlyFreedSize(); 956 page->resetPromptlyFreedSize();
961 size_t freedCount = 0; 957 size_t freedCount = 0;
962 Address startOfGap = page->payload(); 958 Address startOfGap = page->payload();
963 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) { 959 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
964 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress); 960 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress);
965 ASSERT(basicHeader->size() > 0); 961 ASSERT(basicHeader->size() > 0);
966 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 962 ASSERT(basicHeader->size() < blinkPagePayloadSize());
967 963
968 if (basicHeader->isPromptlyFreed()) { 964 if (basicHeader->isPromptlyFreed()) {
969 Heap::decreaseAllocatedObjectSize(reinterpret_cast<Header*>(basi cHeader)->size()); 965 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->size());
970 size_t size = basicHeader->size(); 966 size_t size = basicHeader->size();
971 ASSERT(size >= sizeof(Header)); 967 ASSERT(size >= sizeof(Header));
972 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 968 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
973 memset(headerAddress, 0, sizeof(Header)); 969 memset(headerAddress, 0, sizeof(Header));
974 #endif 970 #endif
975 ++freedCount; 971 ++freedCount;
976 headerAddress += size; 972 headerAddress += size;
977 continue; 973 continue;
978 } 974 }
979 975
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1043 memset(headerAddress, 0, size); 1039 memset(headerAddress, 0, size);
1044 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 1040 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
1045 Address result = headerAddress + sizeof(*header); 1041 Address result = headerAddress + sizeof(*header);
1046 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1042 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1047 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 1043 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
1048 1044
1049 // Poison the object header and allocationGranularity bytes after the object 1045 // Poison the object header and allocationGranularity bytes after the object
1050 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1046 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1051 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1047 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
1052 largeObject->link(&m_firstLargeHeapObject); 1048 largeObject->link(&m_firstLargeHeapObject);
1053 Heap::increaseAllocatedSpace(largeObject->size()); 1049 stats().increaseAllocatedSpace(largeObject->size());
1054 Heap::increaseAllocatedObjectSize(largeObject->size()); 1050 stats().increaseObjectSpace(largeObject->size());
1055 return result; 1051 return result;
1056 } 1052 }
1057 1053
1058 template<typename Header> 1054 template<typename Header>
1059 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 1055 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
1060 { 1056 {
1061 object->unlink(previousNext); 1057 object->unlink(previousNext);
1062 object->finalize(); 1058 object->finalize();
1063 Heap::decreaseAllocatedSpace(object->size());
1064 1059
1065 // Unpoison the object header and allocationGranularity bytes after the 1060 // Unpoison the object header and allocationGranularity bytes after the
1066 // object before freeing. 1061 // object before freeing.
1067 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 1062 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
1068 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1063 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
1069 1064
1070 if (object->terminating()) { 1065 if (object->terminating()) {
1071 ASSERT(ThreadState::current()->isTerminating()); 1066 ASSERT(ThreadState::current()->isTerminating());
1072 // The thread is shutting down so this object is being removed as part 1067 // The thread is shutting down so this object is being removed as part
1073 // of a thread local GC. In that case the object could be traced in the 1068 // of a thread local GC. In that case the object could be traced in the
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
1275 { 1270 {
1276 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 1271 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
1277 // since it is the same for all objects 1272 // since it is the same for all objects
1278 ASSERT(gcInfo); 1273 ASSERT(gcInfo);
1279 allocatePage(gcInfo); 1274 allocatePage(gcInfo);
1280 } 1275 }
1281 1276
1282 template <typename Header> 1277 template <typename Header>
1283 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) 1278 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1284 { 1279 {
1285 Heap::decreaseAllocatedSpace(blinkPageSize);
1286
1287 MutexLocker locker(m_threadState->sweepMutex()); 1280 MutexLocker locker(m_threadState->sweepMutex());
1288 if (page->terminating()) { 1281 if (page->terminating()) {
1289 // The thread is shutting down so this page is being removed as part 1282 // The thread is shutting down so this page is being removed as part
1290 // of a thread local GC. In that case the page could be accessed in the 1283 // of a thread local GC. In that case the page could be accessed in the
1291 // next global GC either due to a dead object being traced via a 1284 // next global GC either due to a dead object being traced via a
1292 // conservative pointer or due to a programming error where an object 1285 // conservative pointer or due to a programming error where an object
1293 // in another thread heap keeps a dangling pointer to this object. 1286 // in another thread heap keeps a dangling pointer to this object.
1294 // To guard against this we put the page in the orphanedPagePool to 1287 // To guard against this we put the page in the orphanedPagePool to
1295 // ensure it is still reachable. After the next global GC it can be 1288 // ensure it is still reachable. After the next global GC it can be
1296 // decommitted and moved to the page pool assuming no rogue/dangling 1289 // decommitted and moved to the page pool assuming no rogue/dangling
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1330 pageMemory = memory; 1323 pageMemory = memory;
1331 else 1324 else
1332 delete memory; 1325 delete memory;
1333 } else { 1326 } else {
1334 Heap::freePagePool()->addFreePage(m_index, memory); 1327 Heap::freePagePool()->addFreePage(m_index, memory);
1335 } 1328 }
1336 offset += blinkPageSize; 1329 offset += blinkPageSize;
1337 } 1330 }
1338 } 1331 }
1339 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 1332 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
1340 Heap::increaseAllocatedSpace(blinkPageSize);
1341 // Use a separate list for pages allocated during sweeping to make 1333 // Use a separate list for pages allocated during sweeping to make
1342 // sure that we do not accidentally sweep objects that have been 1334 // sure that we do not accidentally sweep objects that have been
1343 // allocated during sweeping. 1335 // allocated during sweeping.
1344 if (m_threadState->isSweepInProgress()) { 1336 if (m_threadState->isSweepInProgress()) {
1345 if (!m_lastPageAllocatedDuringSweeping) 1337 if (!m_lastPageAllocatedDuringSweeping)
1346 m_lastPageAllocatedDuringSweeping = page; 1338 m_lastPageAllocatedDuringSweeping = page;
1347 page->link(&m_firstPageAllocatedDuringSweeping); 1339 page->link(&m_firstPageAllocatedDuringSweeping);
1348 } else { 1340 } else {
1349 page->link(&m_firstPage); 1341 page->link(&m_firstPage);
1350 } 1342 }
(...skipping 17 matching lines...) Expand all
1368 { 1360 {
1369 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) { 1361 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1370 if (page->contains(address)) 1362 if (page->contains(address))
1371 return true; 1363 return true;
1372 } 1364 }
1373 return false; 1365 return false;
1374 } 1366 }
1375 #endif 1367 #endif
1376 1368
1377 template<typename Header> 1369 template<typename Header>
1378 size_t ThreadHeap<Header>::objectPayloadSizeForTesting() 1370 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats)
1379 { 1371 {
1380 ASSERT(!m_firstPageAllocatedDuringSweeping); 1372 ASSERT(!m_firstPageAllocatedDuringSweeping);
1381 size_t objectPayloadSize = 0;
1382 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1373 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1383 objectPayloadSize += page->objectPayloadSizeForTesting(); 1374 page->getStatsForTesting(stats);
1384 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) 1375 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next())
1385 objectPayloadSize += current->objectPayloadSizeForTesting(); 1376 current->getStatsForTesting(stats);
1386 return objectPayloadSize;
1387 } 1377 }
1388 1378
1389 template<typename Header> 1379 template<typename Header>
1390 void ThreadHeap<Header>::sweepNormalPages() 1380 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1391 { 1381 {
1392 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages"); 1382 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1393 HeapPage<Header>* page = m_firstPage; 1383 HeapPage<Header>* page = m_firstPage;
1394 HeapPage<Header>** previousNext = &m_firstPage; 1384 HeapPage<Header>** previousNext = &m_firstPage;
1395 HeapPage<Header>* previous = 0; 1385 HeapPage<Header>* previous = 0;
1396 while (page) { 1386 while (page) {
1397 page->resetPromptlyFreedSize(); 1387 page->resetPromptlyFreedSize();
1398 if (page->isEmpty()) { 1388 if (page->isEmpty()) {
1399 HeapPage<Header>* unused = page; 1389 HeapPage<Header>* unused = page;
1400 if (unused == m_mergePoint) 1390 if (unused == m_mergePoint)
1401 m_mergePoint = previous; 1391 m_mergePoint = previous;
1402 page = page->next(); 1392 page = page->next();
1403 HeapPage<Header>::unlink(this, unused, previousNext); 1393 HeapPage<Header>::unlink(this, unused, previousNext);
1404 --m_numberOfNormalPages; 1394 --m_numberOfNormalPages;
1405 } else { 1395 } else {
1406 page->sweep(this); 1396 page->sweep(stats, this);
1407 previousNext = &page->m_next; 1397 previousNext = &page->m_next;
1408 previous = page; 1398 previous = page;
1409 page = page->next(); 1399 page = page->next();
1410 } 1400 }
1411 } 1401 }
1412 } 1402 }
1413 1403
1414 template<typename Header> 1404 template<typename Header>
1415 void ThreadHeap<Header>::sweepLargePages() 1405 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1416 { 1406 {
1417 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); 1407 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1418 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1408 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1419 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1409 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1420 if (current->isMarked()) { 1410 if (current->isMarked()) {
1421 Heap::increaseMarkedObjectSize(current->size()); 1411 stats->increaseAllocatedSpace(current->size());
1412 stats->increaseObjectSpace(current->size());
1422 current->unmark(); 1413 current->unmark();
1423 previousNext = &current->m_next; 1414 previousNext = &current->m_next;
1424 current = current->next(); 1415 current = current->next();
1425 } else { 1416 } else {
1426 LargeHeapObject<Header>* next = current->next(); 1417 LargeHeapObject<Header>* next = current->next();
1427 freeLargeObject(current, previousNext); 1418 freeLargeObject(current, previousNext);
1428 current = next; 1419 current = next;
1429 } 1420 }
1430 } 1421 }
1431 } 1422 }
1432 1423
1433 1424
1434 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during 1425 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1435 // sweeping to catch cases where dead objects touch each other. This is not 1426 // sweeping to catch cases where dead objects touch each other. This is not
1436 // turned on by default because it also triggers for cases that are safe. 1427 // turned on by default because it also triggers for cases that are safe.
1437 // Examples of such safe cases are context life cycle observers and timers 1428 // Examples of such safe cases are context life cycle observers and timers
1438 // embedded in garbage collected objects. 1429 // embedded in garbage collected objects.
1439 #define STRICT_ASAN_FINALIZATION_CHECKING 0 1430 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1440 1431
1441 template<typename Header> 1432 template<typename Header>
1442 void ThreadHeap<Header>::sweep() 1433 void ThreadHeap<Header>::sweep(HeapStats* stats)
1443 { 1434 {
1444 ASSERT(isConsistentForSweeping()); 1435 ASSERT(isConsistentForSweeping());
1445 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 1436 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1446 // When using ASan do a pre-sweep where all unmarked objects are 1437 // When using ASan do a pre-sweep where all unmarked objects are
1447 // poisoned before calling their finalizer methods. This can catch 1438 // poisoned before calling their finalizer methods. This can catch
1448 // the case where the finalizer of an object tries to modify 1439 // the case where the finalizer of an object tries to modify
1449 // another object as part of finalization. 1440 // another object as part of finalization.
1450 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1441 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1451 page->poisonUnmarkedObjects(); 1442 page->poisonUnmarkedObjects();
1452 #endif 1443 #endif
1453 sweepNormalPages(); 1444 sweepNormalPages(stats);
1454 sweepLargePages(); 1445 sweepLargePages(stats);
1455 } 1446 }
1456 1447
1457 template<typename Header> 1448 template<typename Header>
1458 void ThreadHeap<Header>::postSweepProcessing() 1449 void ThreadHeap<Header>::postSweepProcessing()
1459 { 1450 {
1460 // If pages have been allocated during sweeping, link them into 1451 // If pages have been allocated during sweeping, link them into
1461 // the list of pages. 1452 // the list of pages.
1462 if (m_firstPageAllocatedDuringSweeping) { 1453 if (m_firstPageAllocatedDuringSweeping) {
1463 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage; 1454 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1464 m_firstPage = m_firstPageAllocatedDuringSweeping; 1455 m_firstPage = m_firstPageAllocatedDuringSweeping;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1544 } 1535 }
1545 1536
1546 template<typename Header> 1537 template<typename Header>
1547 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo) 1538 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1548 : BaseHeapPage(storage, gcInfo, heap->threadState()) 1539 : BaseHeapPage(storage, gcInfo, heap->threadState())
1549 , m_next(0) 1540 , m_next(0)
1550 { 1541 {
1551 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc orrectly_aligned); 1542 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc orrectly_aligned);
1552 m_objectStartBitMapComputed = false; 1543 m_objectStartBitMapComputed = false;
1553 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1544 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1545 heap->stats().increaseAllocatedSpace(blinkPageSize);
1554 } 1546 }
1555 1547
1556 template<typename Header> 1548 template<typename Header>
1557 void HeapPage<Header>::link(HeapPage** prevNext) 1549 void HeapPage<Header>::link(HeapPage** prevNext)
1558 { 1550 {
1559 m_next = *prevNext; 1551 m_next = *prevNext;
1560 *prevNext = this; 1552 *prevNext = this;
1561 } 1553 }
1562 1554
1563 template<typename Header> 1555 template<typename Header>
1564 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext) 1556 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext)
1565 { 1557 {
1566 *prevNext = unused->m_next; 1558 *prevNext = unused->m_next;
1567 heap->removePageFromHeap(unused); 1559 heap->removePageFromHeap(unused);
1568 } 1560 }
1569 1561
1570 template<typename Header> 1562 template<typename Header>
1571 size_t HeapPage<Header>::objectPayloadSizeForTesting() 1563 void HeapPage<Header>::getStatsForTesting(HeapStats& stats)
1572 { 1564 {
1573 size_t objectPayloadSize = 0; 1565 stats.increaseAllocatedSpace(blinkPageSize);
1574 Address headerAddress = payload(); 1566 Address headerAddress = payload();
1575 ASSERT(headerAddress != end()); 1567 ASSERT(headerAddress != end());
1576 do { 1568 do {
1577 Header* header = reinterpret_cast<Header*>(headerAddress); 1569 Header* header = reinterpret_cast<Header*>(headerAddress);
1578 if (!header->isFree()) { 1570 if (!header->isFree()) {
1579 objectPayloadSize += header->payloadSize(); 1571 stats.increaseObjectSpace(header->payloadSize());
1580 } 1572 }
1581 ASSERT(header->size() < blinkPagePayloadSize()); 1573 ASSERT(header->size() < blinkPagePayloadSize());
1582 headerAddress += header->size(); 1574 headerAddress += header->size();
1583 ASSERT(headerAddress <= end()); 1575 ASSERT(headerAddress <= end());
1584 } while (headerAddress < end()); 1576 } while (headerAddress < end());
1585 return objectPayloadSize;
1586 } 1577 }
1587 1578
1588 template<typename Header> 1579 template<typename Header>
1589 bool HeapPage<Header>::isEmpty() 1580 bool HeapPage<Header>::isEmpty()
1590 { 1581 {
1591 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); 1582 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1592 return header->isFree() && (header->size() == payloadSize()); 1583 return header->isFree() && (header->size() == payloadSize());
1593 } 1584 }
1594 1585
1595 template<typename Header> 1586 template<typename Header>
1596 void HeapPage<Header>::sweep(ThreadHeap<Header>* heap) 1587 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
1597 { 1588 {
1598 clearObjectStartBitMap(); 1589 clearObjectStartBitMap();
1590 stats->increaseAllocatedSpace(blinkPageSize);
1599 Address startOfGap = payload(); 1591 Address startOfGap = payload();
1600 for (Address headerAddress = startOfGap; headerAddress < end(); ) { 1592 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1601 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he aderAddress); 1593 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he aderAddress);
1602 ASSERT(basicHeader->size() > 0); 1594 ASSERT(basicHeader->size() > 0);
1603 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 1595 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1604 1596
1605 if (basicHeader->isFree()) { 1597 if (basicHeader->isFree()) {
1606 size_t size = basicHeader->size(); 1598 size_t size = basicHeader->size();
1607 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 1599 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1608 // Zero the memory in the free list header to maintain the 1600 // Zero the memory in the free list header to maintain the
(...skipping 25 matching lines...) Expand all
1634 #endif 1626 #endif
1635 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1627 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1636 headerAddress += size; 1628 headerAddress += size;
1637 continue; 1629 continue;
1638 } 1630 }
1639 1631
1640 if (startOfGap != headerAddress) 1632 if (startOfGap != headerAddress)
1641 heap->addToFreeList(startOfGap, headerAddress - startOfGap); 1633 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1642 header->unmark(); 1634 header->unmark();
1643 headerAddress += header->size(); 1635 headerAddress += header->size();
1644 Heap::increaseMarkedObjectSize(header->size()); 1636 stats->increaseObjectSpace(header->size());
1645 startOfGap = headerAddress; 1637 startOfGap = headerAddress;
1646 } 1638 }
1647 if (startOfGap != end()) 1639 if (startOfGap != end())
1648 heap->addToFreeList(startOfGap, end() - startOfGap); 1640 heap->addToFreeList(startOfGap, end() - startOfGap);
1649 } 1641 }
1650 1642
1651 template<typename Header> 1643 template<typename Header>
1652 void HeapPage<Header>::clearLiveAndMarkDead() 1644 void HeapPage<Header>::clearLiveAndMarkDead()
1653 { 1645 {
1654 for (Address headerAddress = payload(); headerAddress < end();) { 1646 for (Address headerAddress = payload(); headerAddress < end();) {
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
1860 return gcInfo()->hasVTable(); 1852 return gcInfo()->hasVTable();
1861 } 1853 }
1862 1854
1863 template<> 1855 template<>
1864 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header) 1856 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header)
1865 { 1857 {
1866 return header->hasVTable(); 1858 return header->hasVTable();
1867 } 1859 }
1868 1860
1869 template<typename Header> 1861 template<typename Header>
1870 size_t LargeHeapObject<Header>::objectPayloadSizeForTesting() 1862 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats)
1871 { 1863 {
1872 return payloadSize(); 1864 stats.increaseAllocatedSpace(size());
1865 stats.increaseObjectSpace(payloadSize());
1873 } 1866 }
1874 1867
1875 #if ENABLE(GC_PROFILE_HEAP) 1868 #if ENABLE(GC_PROFILE_HEAP)
1876 template<typename Header> 1869 template<typename Header>
1877 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info) 1870 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info)
1878 { 1871 {
1879 Header* header = heapObjectHeader(); 1872 Header* header = heapObjectHeader();
1880 size_t tag = info->getClassTag(header->gcInfo()); 1873 size_t tag = info->getClassTag(header->gcInfo());
1881 size_t age = header->age(); 1874 size_t age = header->age();
1882 if (isMarked()) { 1875 if (isMarked()) {
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
2202 ThreadState::init(); 2195 ThreadState::init();
2203 s_markingStack = new CallbackStack(); 2196 s_markingStack = new CallbackStack();
2204 s_postMarkingCallbackStack = new CallbackStack(); 2197 s_postMarkingCallbackStack = new CallbackStack();
2205 s_weakCallbackStack = new CallbackStack(); 2198 s_weakCallbackStack = new CallbackStack();
2206 s_ephemeronStack = new CallbackStack(); 2199 s_ephemeronStack = new CallbackStack();
2207 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); 2200 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2208 s_markingVisitor = new MarkingVisitor(s_markingStack); 2201 s_markingVisitor = new MarkingVisitor(s_markingStack);
2209 s_freePagePool = new FreePagePool(); 2202 s_freePagePool = new FreePagePool();
2210 s_orphanedPagePool = new OrphanedPagePool(); 2203 s_orphanedPagePool = new OrphanedPagePool();
2211 s_markingThreads = new Vector<OwnPtr<WebThread>>(); 2204 s_markingThreads = new Vector<OwnPtr<WebThread>>();
2212 s_allocatedObjectSize = 0;
2213 s_allocatedSpace = 0;
2214 s_markedObjectSize = 0;
2215 if (Platform::current()) { 2205 if (Platform::current()) {
2216 int processors = Platform::current()->numberOfProcessors(); 2206 int processors = Platform::current()->numberOfProcessors();
2217 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre ads); 2207 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre ads);
2218 for (int i = 0; i < numberOfMarkingThreads; i++) 2208 for (int i = 0; i < numberOfMarkingThreads; i++)
2219 s_markingThreads->append(adoptPtr(Platform::current()->createThread( "Blink GC Marking Thread"))); 2209 s_markingThreads->append(adoptPtr(Platform::current()->createThread( "Blink GC Marking Thread")));
2220 } 2210 }
2221 } 2211 }
2222 2212
2223 void Heap::shutdown() 2213 void Heap::shutdown()
2224 { 2214 {
(...skipping 23 matching lines...) Expand all
2248 s_weakCallbackStack = 0; 2238 s_weakCallbackStack = 0;
2249 delete s_postMarkingCallbackStack; 2239 delete s_postMarkingCallbackStack;
2250 s_postMarkingCallbackStack = 0; 2240 s_postMarkingCallbackStack = 0;
2251 delete s_markingStack; 2241 delete s_markingStack;
2252 s_markingStack = 0; 2242 s_markingStack = 0;
2253 delete s_ephemeronStack; 2243 delete s_ephemeronStack;
2254 s_ephemeronStack = 0; 2244 s_ephemeronStack = 0;
2255 delete s_regionTree; 2245 delete s_regionTree;
2256 s_regionTree = 0; 2246 s_regionTree = 0;
2257 ThreadState::shutdown(); 2247 ThreadState::shutdown();
2258 ASSERT(Heap::allocatedSpace() == 0);
2259 } 2248 }
2260 2249
2261 BaseHeapPage* Heap::contains(Address address) 2250 BaseHeapPage* Heap::contains(Address address)
2262 { 2251 {
2263 ASSERT(ThreadState::isAnyThreadInGC()); 2252 ASSERT(ThreadState::isAnyThreadInGC());
2264 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2253 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2265 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2254 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2266 BaseHeapPage* page = (*it)->contains(address); 2255 BaseHeapPage* page = (*it)->contains(address);
2267 if (page) 2256 if (page)
2268 return page; 2257 return page;
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
2501 if (!gcScope.allThreadsParked()) { 2490 if (!gcScope.allThreadsParked()) {
2502 ThreadState::current()->setGCRequested(); 2491 ThreadState::current()->setGCRequested();
2503 return; 2492 return;
2504 } 2493 }
2505 2494
2506 if (state->isMainThread()) 2495 if (state->isMainThread())
2507 ScriptForbiddenScope::enter(); 2496 ScriptForbiddenScope::enter();
2508 2497
2509 s_lastGCWasConservative = false; 2498 s_lastGCWasConservative = false;
2510 2499
2511 Heap::resetMarkedObjectSize();
2512 Heap::resetAllocatedObjectSize();
2513
2514 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", 2500 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2515 "precise", stackState == ThreadState::NoHeapPointersOnStack, 2501 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2516 "forced", cause == ThreadState::ForcedGC); 2502 "forced", cause == ThreadState::ForcedGC);
2517 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 2503 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2518 double timeStamp = WTF::currentTimeMS(); 2504 double timeStamp = WTF::currentTimeMS();
2519 #if ENABLE(GC_PROFILE_MARKING) 2505 #if ENABLE(GC_PROFILE_MARKING)
2520 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 2506 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2521 #endif 2507 #endif
2522 2508
2523 // Disallow allocation during garbage collection (but not 2509 // Disallow allocation during garbage collection (but not
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2562 // marking we check that any object marked as dead is not traced. E.g. via a 2548 // marking we check that any object marked as dead is not traced. E.g. via a
2563 // conservatively found pointer or a programming error with an object contai ning 2549 // conservatively found pointer or a programming error with an object contai ning
2564 // a dangling pointer. 2550 // a dangling pointer.
2565 orphanedPagePool()->decommitOrphanedPages(); 2551 orphanedPagePool()->decommitOrphanedPages();
2566 2552
2567 #if ENABLE(GC_PROFILE_MARKING) 2553 #if ENABLE(GC_PROFILE_MARKING)
2568 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); 2554 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2569 #endif 2555 #endif
2570 2556
2571 if (Platform::current()) { 2557 if (Platform::current()) {
2558 uint64_t objectSpaceSize;
2559 uint64_t allocatedSpaceSize;
2560 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2572 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); 2561 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2573 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); 2562 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", o bjectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2574 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); 2563 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2575 } 2564 }
2576 2565
2577 if (state->isMainThread()) 2566 if (state->isMainThread())
2578 ScriptForbiddenScope::exit(); 2567 ScriptForbiddenScope::exit();
2579 } 2568 }
2580 2569
2581 void Heap::collectGarbageForTerminatingThread(ThreadState* state) 2570 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2582 { 2571 {
2583 // We explicitly do not enter a safepoint while doing thread specific 2572 // We explicitly do not enter a safepoint while doing thread specific
2584 // garbage collection since we don't want to allow a global GC at the 2573 // garbage collection since we don't want to allow a global GC at the
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
2803 } else if (splitOff->m_freeList.m_freeLists[i]) { 2792 } else if (splitOff->m_freeList.m_freeLists[i]) {
2804 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList .m_freeLists[i]); 2793 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList .m_freeLists[i]);
2805 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las tFreeListEntries[i]; 2794 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las tFreeListEntries[i];
2806 } 2795 }
2807 } 2796 }
2808 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr eeListIndex) 2797 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr eeListIndex)
2809 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr eeListIndex; 2798 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr eeListIndex;
2810 } 2799 }
2811 } 2800 }
2812 2801
2813 size_t Heap::objectPayloadSizeForTesting() 2802 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize)
2814 { 2803 {
2815 size_t objectPayloadSize = 0; 2804 *objectSpaceSize = 0;
2805 *allocatedSpaceSize = 0;
2806 ASSERT(ThreadState::isAnyThreadInGC());
2807 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2808 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2809 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2810 *objectSpaceSize += (*it)->stats().totalObjectSpace();
2811 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2812 }
2813 }
2814
2815 void Heap::getStatsForTesting(HeapStats* stats)
2816 {
2817 stats->clear();
2816 ASSERT(ThreadState::isAnyThreadInGC()); 2818 ASSERT(ThreadState::isAnyThreadInGC());
2817 makeConsistentForSweeping(); 2819 makeConsistentForSweeping();
2818 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2820 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2819 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2821 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2820 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2822 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2821 objectPayloadSize += (*it)->objectPayloadSizeForTesting(); 2823 HeapStats temp;
2824 (*it)->getStatsForTesting(temp);
2825 stats->add(&temp);
2822 } 2826 }
2823 return objectPayloadSize;
2824 } 2827 }
2825 2828
2826 #if ENABLE(ASSERT) 2829 #if ENABLE(ASSERT)
2827 bool Heap::isConsistentForSweeping() 2830 bool Heap::isConsistentForSweeping()
2828 { 2831 {
2829 ASSERT(ThreadState::isAnyThreadInGC()); 2832 ASSERT(ThreadState::isAnyThreadInGC());
2830 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2833 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2831 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2834 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2832 if (!(*it)->isConsistentForSweeping()) 2835 if (!(*it)->isConsistentForSweeping())
2833 return false; 2836 return false;
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
2999 CallbackStack* Heap::s_markingStack; 3002 CallbackStack* Heap::s_markingStack;
3000 CallbackStack* Heap::s_postMarkingCallbackStack; 3003 CallbackStack* Heap::s_postMarkingCallbackStack;
3001 CallbackStack* Heap::s_weakCallbackStack; 3004 CallbackStack* Heap::s_weakCallbackStack;
3002 CallbackStack* Heap::s_ephemeronStack; 3005 CallbackStack* Heap::s_ephemeronStack;
3003 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 3006 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
3004 bool Heap::s_shutdownCalled = false; 3007 bool Heap::s_shutdownCalled = false;
3005 bool Heap::s_lastGCWasConservative = false; 3008 bool Heap::s_lastGCWasConservative = false;
3006 FreePagePool* Heap::s_freePagePool; 3009 FreePagePool* Heap::s_freePagePool;
3007 OrphanedPagePool* Heap::s_orphanedPagePool; 3010 OrphanedPagePool* Heap::s_orphanedPagePool;
3008 Heap::RegionTree* Heap::s_regionTree = 0; 3011 Heap::RegionTree* Heap::s_regionTree = 0;
3009 size_t Heap::s_allocatedObjectSize = 0;
3010 size_t Heap::s_allocatedSpace = 0;
3011 size_t Heap::s_markedObjectSize = 0;
3012 3012
3013 } // namespace blink 3013 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698