Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(265)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 723513002: Oilpan: Refactor the way we calculate heap statistics (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 663 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 ASSERT(!m_firstPage); 674 ASSERT(!m_firstPage);
675 ASSERT(!m_firstLargeHeapObject); 675 ASSERT(!m_firstLargeHeapObject);
676 } 676 }
677 677
678 template<typename Header> 678 template<typename Header>
679 void ThreadHeap<Header>::cleanupPages() 679 void ThreadHeap<Header>::cleanupPages()
680 { 680 {
681 clearFreeLists(); 681 clearFreeLists();
682 682
683 // Add the ThreadHeap's pages to the orphanedPagePool. 683 // Add the ThreadHeap's pages to the orphanedPagePool.
684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) 684 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) {
685 Heap::decreaseAllocatedSpace(blinkPageSize);
685 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 686 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
687 }
686 m_firstPage = 0; 688 m_firstPage = 0;
687 689
688 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) 690 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) {
691 Heap::decreaseAllocatedSpace(largeObject->size());
689 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 692 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
693 }
690 m_firstLargeHeapObject = 0; 694 m_firstLargeHeapObject = 0;
691 } 695 }
692 696
693 template<typename Header> 697 template<typename Header>
694 void ThreadHeap<Header>::updateRemainingAllocationSize() 698 void ThreadHeap<Header>::updateRemainingAllocationSize()
695 { 699 {
696 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { 700 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
697 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAll ocationSize()); 701 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize());
698 m_lastRemainingAllocationSize = remainingAllocationSize(); 702 m_lastRemainingAllocationSize = remainingAllocationSize();
699 } 703 }
700 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); 704 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
701 } 705 }
702 706
703 template<typename Header> 707 template<typename Header>
704 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 708 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
705 { 709 {
706 size_t allocationSize = allocationSizeFromSize(size); 710 size_t allocationSize = allocationSizeFromSize(size);
707 ASSERT(allocationSize > remainingAllocationSize()); 711 ASSERT(allocationSize > remainingAllocationSize());
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 page->clearObjectStartBitMap(); 937 page->clearObjectStartBitMap();
934 page->resetPromptlyFreedSize(); 938 page->resetPromptlyFreedSize();
935 size_t freedCount = 0; 939 size_t freedCount = 0;
936 Address startOfGap = page->payload(); 940 Address startOfGap = page->payload();
937 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) { 941 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
938 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress); 942 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress);
939 ASSERT(basicHeader->size() > 0); 943 ASSERT(basicHeader->size() > 0);
940 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 944 ASSERT(basicHeader->size() < blinkPagePayloadSize());
941 945
942 if (basicHeader->isPromptlyFreed()) { 946 if (basicHeader->isPromptlyFreed()) {
943 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->size()); 947 Heap::decreaseAllocatedObjectSize(reinterpret_cast<Header*>(basi cHeader)->size());
944 size_t size = basicHeader->size(); 948 size_t size = basicHeader->size();
945 ASSERT(size >= sizeof(Header)); 949 ASSERT(size >= sizeof(Header));
946 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 950 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
947 memset(headerAddress, 0, sizeof(Header)); 951 memset(headerAddress, 0, sizeof(Header));
948 #endif 952 #endif
949 ++freedCount; 953 ++freedCount;
950 headerAddress += size; 954 headerAddress += size;
951 continue; 955 continue;
952 } 956 }
953 957
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 memset(headerAddress, 0, size); 1021 memset(headerAddress, 0, size);
1018 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 1022 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
1019 Address result = headerAddress + sizeof(*header); 1023 Address result = headerAddress + sizeof(*header);
1020 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1024 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1021 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 1025 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
1022 1026
1023 // Poison the object header and allocationGranularity bytes after the object 1027 // Poison the object header and allocationGranularity bytes after the object
1024 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1028 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1025 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1029 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
1026 largeObject->link(&m_firstLargeHeapObject); 1030 largeObject->link(&m_firstLargeHeapObject);
1027 stats().increaseAllocatedSpace(largeObject->size()); 1031 Heap::increaseAllocatedSpace(largeObject->size());
1028 stats().increaseObjectSpace(largeObject->size()); 1032 Heap::increaseAllocatedObjectSize(largeObject->size());
1029 return result; 1033 return result;
1030 } 1034 }
1031 1035
1032 template<typename Header> 1036 template<typename Header>
1033 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 1037 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
1034 { 1038 {
1035 object->unlink(previousNext); 1039 object->unlink(previousNext);
1036 object->finalize(); 1040 object->finalize();
1041 Heap::decreaseAllocatedSpace(object->size());
1037 1042
1038 // Unpoison the object header and allocationGranularity bytes after the 1043 // Unpoison the object header and allocationGranularity bytes after the
1039 // object before freeing. 1044 // object before freeing.
1040 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 1045 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
1041 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1046 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
1042 1047
1043 if (object->terminating()) { 1048 if (object->terminating()) {
1044 ASSERT(ThreadState::current()->isTerminating()); 1049 ASSERT(ThreadState::current()->isTerminating());
1045 // The thread is shutting down so this object is being removed as part 1050 // The thread is shutting down so this object is being removed as part
1046 // of a thread local GC. In that case the object could be traced in the 1051 // of a thread local GC. In that case the object could be traced in the
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
1248 { 1253 {
1249 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 1254 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
1250 // since it is the same for all objects 1255 // since it is the same for all objects
1251 ASSERT(gcInfo); 1256 ASSERT(gcInfo);
1252 allocatePage(gcInfo); 1257 allocatePage(gcInfo);
1253 } 1258 }
1254 1259
1255 template <typename Header> 1260 template <typename Header>
1256 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) 1261 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1257 { 1262 {
1263 Heap::decreaseAllocatedSpace(blinkPageSize);
1264
1258 MutexLocker locker(m_threadState->sweepMutex()); 1265 MutexLocker locker(m_threadState->sweepMutex());
1259 if (page->terminating()) { 1266 if (page->terminating()) {
1260 // The thread is shutting down so this page is being removed as part 1267 // The thread is shutting down so this page is being removed as part
1261 // of a thread local GC. In that case the page could be accessed in the 1268 // of a thread local GC. In that case the page could be accessed in the
1262 // next global GC either due to a dead object being traced via a 1269 // next global GC either due to a dead object being traced via a
1263 // conservative pointer or due to a programming error where an object 1270 // conservative pointer or due to a programming error where an object
1264 // in another thread heap keeps a dangling pointer to this object. 1271 // in another thread heap keeps a dangling pointer to this object.
1265 // To guard against this we put the page in the orphanedPagePool to 1272 // To guard against this we put the page in the orphanedPagePool to
1266 // ensure it is still reachable. After the next global GC it can be 1273 // ensure it is still reachable. After the next global GC it can be
1267 // decommitted and moved to the page pool assuming no rogue/dangling 1274 // decommitted and moved to the page pool assuming no rogue/dangling
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1301 pageMemory = memory; 1308 pageMemory = memory;
1302 else 1309 else
1303 delete memory; 1310 delete memory;
1304 } else { 1311 } else {
1305 Heap::freePagePool()->addFreePage(m_index, memory); 1312 Heap::freePagePool()->addFreePage(m_index, memory);
1306 } 1313 }
1307 offset += blinkPageSize; 1314 offset += blinkPageSize;
1308 } 1315 }
1309 } 1316 }
1310 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 1317 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
1318 Heap::increaseAllocatedSpace(blinkPageSize);
1311 // Use a separate list for pages allocated during sweeping to make 1319 // Use a separate list for pages allocated during sweeping to make
1312 // sure that we do not accidentally sweep objects that have been 1320 // sure that we do not accidentally sweep objects that have been
1313 // allocated during sweeping. 1321 // allocated during sweeping.
1314 if (m_threadState->isSweepInProgress()) { 1322 if (m_threadState->isSweepInProgress()) {
1315 if (!m_lastPageAllocatedDuringSweeping) 1323 if (!m_lastPageAllocatedDuringSweeping)
1316 m_lastPageAllocatedDuringSweeping = page; 1324 m_lastPageAllocatedDuringSweeping = page;
1317 page->link(&m_firstPageAllocatedDuringSweeping); 1325 page->link(&m_firstPageAllocatedDuringSweeping);
1318 } else { 1326 } else {
1319 page->link(&m_firstPage); 1327 page->link(&m_firstPage);
1320 } 1328 }
(...skipping 17 matching lines...) Expand all
1338 { 1346 {
1339 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) { 1347 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1340 if (page->contains(address)) 1348 if (page->contains(address))
1341 return true; 1349 return true;
1342 } 1350 }
1343 return false; 1351 return false;
1344 } 1352 }
1345 #endif 1353 #endif
1346 1354
1347 template<typename Header> 1355 template<typename Header>
1348 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats) 1356 size_t ThreadHeap<Header>::objectPayloadSizeForTesting()
1349 { 1357 {
1350 ASSERT(!m_firstPageAllocatedDuringSweeping); 1358 ASSERT(!m_firstPageAllocatedDuringSweeping);
1359 size_t objectPayloadSize = 0;
1351 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1360 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1352 page->getStatsForTesting(stats); 1361 objectPayloadSize += page->objectPayloadSizeForTesting();
1353 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) 1362 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next())
1354 current->getStatsForTesting(stats); 1363 objectPayloadSize += current->objectPayloadSizeForTesting();
1364 return objectPayloadSize;
1355 } 1365 }
1356 1366
1357 template<typename Header> 1367 template<typename Header>
1358 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats) 1368 void ThreadHeap<Header>::sweepNormalPages()
1359 { 1369 {
1360 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages"); 1370 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1361 HeapPage<Header>* page = m_firstPage; 1371 HeapPage<Header>* page = m_firstPage;
1362 HeapPage<Header>** previousNext = &m_firstPage; 1372 HeapPage<Header>** previousNext = &m_firstPage;
1363 HeapPage<Header>* previous = 0; 1373 HeapPage<Header>* previous = 0;
1364 while (page) { 1374 while (page) {
1365 page->resetPromptlyFreedSize(); 1375 page->resetPromptlyFreedSize();
1366 if (page->isEmpty()) { 1376 if (page->isEmpty()) {
1367 HeapPage<Header>* unused = page; 1377 HeapPage<Header>* unused = page;
1368 if (unused == m_mergePoint) 1378 if (unused == m_mergePoint)
1369 m_mergePoint = previous; 1379 m_mergePoint = previous;
1370 page = page->next(); 1380 page = page->next();
1371 HeapPage<Header>::unlink(this, unused, previousNext); 1381 HeapPage<Header>::unlink(this, unused, previousNext);
1372 --m_numberOfNormalPages; 1382 --m_numberOfNormalPages;
1373 } else { 1383 } else {
1374 page->sweep(stats, this); 1384 page->sweep(this);
1375 previousNext = &page->m_next; 1385 previousNext = &page->m_next;
1376 previous = page; 1386 previous = page;
1377 page = page->next(); 1387 page = page->next();
1378 } 1388 }
1379 } 1389 }
1380 } 1390 }
1381 1391
1382 template<typename Header> 1392 template<typename Header>
1383 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) 1393 void ThreadHeap<Header>::sweepLargePages()
1384 { 1394 {
1385 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); 1395 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1386 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1396 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1387 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1397 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1388 if (current->isMarked()) { 1398 if (current->isMarked()) {
1389 stats->increaseAllocatedSpace(current->size()); 1399 Heap::increaseLiveObjectSize(current->size());
sof 2014/11/14 12:44:20 Could you explain why we don't need to increase th
haraken 2014/11/14 15:15:00 After this CL, s_allocatedSpace is never reset or
sof 2014/11/14 16:37:21 Thanks for clarifying.
1390 stats->increaseObjectSpace(current->size());
1391 current->unmark(); 1400 current->unmark();
1392 previousNext = &current->m_next; 1401 previousNext = &current->m_next;
1393 current = current->next(); 1402 current = current->next();
1394 } else { 1403 } else {
1395 LargeHeapObject<Header>* next = current->next(); 1404 LargeHeapObject<Header>* next = current->next();
1396 freeLargeObject(current, previousNext); 1405 freeLargeObject(current, previousNext);
1397 current = next; 1406 current = next;
1398 } 1407 }
1399 } 1408 }
1400 } 1409 }
1401 1410
1402 1411
1403 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during 1412 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1404 // sweeping to catch cases where dead objects touch each other. This is not 1413 // sweeping to catch cases where dead objects touch each other. This is not
1405 // turned on by default because it also triggers for cases that are safe. 1414 // turned on by default because it also triggers for cases that are safe.
1406 // Examples of such safe cases are context life cycle observers and timers 1415 // Examples of such safe cases are context life cycle observers and timers
1407 // embedded in garbage collected objects. 1416 // embedded in garbage collected objects.
1408 #define STRICT_ASAN_FINALIZATION_CHECKING 0 1417 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1409 1418
1410 template<typename Header> 1419 template<typename Header>
1411 void ThreadHeap<Header>::sweep(HeapStats* stats) 1420 void ThreadHeap<Header>::sweep()
1412 { 1421 {
1413 ASSERT(isConsistentForSweeping()); 1422 ASSERT(isConsistentForSweeping());
1414 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 1423 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1415 // When using ASan do a pre-sweep where all unmarked objects are 1424 // When using ASan do a pre-sweep where all unmarked objects are
1416 // poisoned before calling their finalizer methods. This can catch 1425 // poisoned before calling their finalizer methods. This can catch
1417 // the case where the finalizer of an object tries to modify 1426 // the case where the finalizer of an object tries to modify
1418 // another object as part of finalization. 1427 // another object as part of finalization.
1419 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1428 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1420 page->poisonUnmarkedObjects(); 1429 page->poisonUnmarkedObjects();
1421 #endif 1430 #endif
1422 sweepNormalPages(stats); 1431 sweepNormalPages();
1423 sweepLargePages(stats); 1432 sweepLargePages();
1424 } 1433 }
1425 1434
1426 template<typename Header> 1435 template<typename Header>
1427 void ThreadHeap<Header>::postSweepProcessing() 1436 void ThreadHeap<Header>::postSweepProcessing()
1428 { 1437 {
1429 // If pages have been allocated during sweeping, link them into 1438 // If pages have been allocated during sweeping, link them into
1430 // the list of pages. 1439 // the list of pages.
1431 if (m_firstPageAllocatedDuringSweeping) { 1440 if (m_firstPageAllocatedDuringSweeping) {
1432 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage; 1441 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1433 m_firstPage = m_firstPageAllocatedDuringSweeping; 1442 m_firstPage = m_firstPageAllocatedDuringSweeping;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1513 } 1522 }
1514 1523
1515 template<typename Header> 1524 template<typename Header>
1516 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo) 1525 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1517 : BaseHeapPage(storage, gcInfo, heap->threadState()) 1526 : BaseHeapPage(storage, gcInfo, heap->threadState())
1518 , m_next(0) 1527 , m_next(0)
1519 { 1528 {
1520 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc orrectly_aligned); 1529 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_inc orrectly_aligned);
1521 m_objectStartBitMapComputed = false; 1530 m_objectStartBitMapComputed = false;
1522 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1531 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1523 heap->stats().increaseAllocatedSpace(blinkPageSize);
1524 } 1532 }
1525 1533
1526 template<typename Header> 1534 template<typename Header>
1527 void HeapPage<Header>::link(HeapPage** prevNext) 1535 void HeapPage<Header>::link(HeapPage** prevNext)
1528 { 1536 {
1529 m_next = *prevNext; 1537 m_next = *prevNext;
1530 *prevNext = this; 1538 *prevNext = this;
1531 } 1539 }
1532 1540
1533 template<typename Header> 1541 template<typename Header>
1534 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext) 1542 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext)
1535 { 1543 {
1536 *prevNext = unused->m_next; 1544 *prevNext = unused->m_next;
1537 heap->removePageFromHeap(unused); 1545 heap->removePageFromHeap(unused);
1538 } 1546 }
1539 1547
1540 template<typename Header> 1548 template<typename Header>
1541 void HeapPage<Header>::getStatsForTesting(HeapStats& stats) 1549 size_t HeapPage<Header>::objectPayloadSizeForTesting()
1542 { 1550 {
1543 stats.increaseAllocatedSpace(blinkPageSize); 1551 size_t objectPayloadSize = 0;
1544 Address headerAddress = payload(); 1552 Address headerAddress = payload();
1545 ASSERT(headerAddress != end()); 1553 ASSERT(headerAddress != end());
1546 do { 1554 do {
1547 Header* header = reinterpret_cast<Header*>(headerAddress); 1555 Header* header = reinterpret_cast<Header*>(headerAddress);
1548 if (!header->isFree()) { 1556 if (!header->isFree()) {
1549 stats.increaseObjectSpace(header->payloadSize()); 1557 objectPayloadSize += header->payloadSize();
1550 } 1558 }
1551 ASSERT(header->size() < blinkPagePayloadSize()); 1559 ASSERT(header->size() < blinkPagePayloadSize());
1552 headerAddress += header->size(); 1560 headerAddress += header->size();
1553 ASSERT(headerAddress <= end()); 1561 ASSERT(headerAddress <= end());
1554 } while (headerAddress < end()); 1562 } while (headerAddress < end());
1563 return objectPayloadSize;
1555 } 1564 }
1556 1565
1557 template<typename Header> 1566 template<typename Header>
1558 bool HeapPage<Header>::isEmpty() 1567 bool HeapPage<Header>::isEmpty()
1559 { 1568 {
1560 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); 1569 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1561 return header->isFree() && (header->size() == payloadSize()); 1570 return header->isFree() && (header->size() == payloadSize());
1562 } 1571 }
1563 1572
1564 template<typename Header> 1573 template<typename Header>
1565 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap) 1574 void HeapPage<Header>::sweep(ThreadHeap<Header>* heap)
1566 { 1575 {
1567 clearObjectStartBitMap(); 1576 clearObjectStartBitMap();
1568 stats->increaseAllocatedSpace(blinkPageSize);
1569 Address startOfGap = payload(); 1577 Address startOfGap = payload();
1570 for (Address headerAddress = startOfGap; headerAddress < end(); ) { 1578 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1571 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he aderAddress); 1579 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he aderAddress);
1572 ASSERT(basicHeader->size() > 0); 1580 ASSERT(basicHeader->size() > 0);
1573 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 1581 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1574 1582
1575 if (basicHeader->isFree()) { 1583 if (basicHeader->isFree()) {
1576 size_t size = basicHeader->size(); 1584 size_t size = basicHeader->size();
1577 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 1585 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1578 // Zero the memory in the free list header to maintain the 1586 // Zero the memory in the free list header to maintain the
(...skipping 25 matching lines...) Expand all
1604 #endif 1612 #endif
1605 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1613 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1606 headerAddress += size; 1614 headerAddress += size;
1607 continue; 1615 continue;
1608 } 1616 }
1609 1617
1610 if (startOfGap != headerAddress) 1618 if (startOfGap != headerAddress)
1611 heap->addToFreeList(startOfGap, headerAddress - startOfGap); 1619 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1612 header->unmark(); 1620 header->unmark();
1613 headerAddress += header->size(); 1621 headerAddress += header->size();
1614 stats->increaseObjectSpace(header->size()); 1622 Heap::increaseLiveObjectSize(header->size());
1615 startOfGap = headerAddress; 1623 startOfGap = headerAddress;
1616 } 1624 }
1617 if (startOfGap != end()) 1625 if (startOfGap != end())
1618 heap->addToFreeList(startOfGap, end() - startOfGap); 1626 heap->addToFreeList(startOfGap, end() - startOfGap);
1619 } 1627 }
1620 1628
1621 template<typename Header> 1629 template<typename Header>
1622 void HeapPage<Header>::clearLiveAndMarkDead() 1630 void HeapPage<Header>::clearLiveAndMarkDead()
1623 { 1631 {
1624 for (Address headerAddress = payload(); headerAddress < end();) { 1632 for (Address headerAddress = payload(); headerAddress < end();) {
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
1830 return gcInfo()->hasVTable(); 1838 return gcInfo()->hasVTable();
1831 } 1839 }
1832 1840
1833 template<> 1841 template<>
1834 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header) 1842 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header)
1835 { 1843 {
1836 return header->hasVTable(); 1844 return header->hasVTable();
1837 } 1845 }
1838 1846
1839 template<typename Header> 1847 template<typename Header>
1840 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats) 1848 size_t LargeHeapObject<Header>::objectPayloadSizeForTesting()
1841 { 1849 {
1842 stats.increaseAllocatedSpace(size()); 1850 return payloadSize();
1843 stats.increaseObjectSpace(payloadSize());
1844 } 1851 }
1845 1852
1846 #if ENABLE(GC_PROFILE_HEAP) 1853 #if ENABLE(GC_PROFILE_HEAP)
1847 template<typename Header> 1854 template<typename Header>
1848 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info) 1855 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info)
1849 { 1856 {
1850 Header* header = heapObjectHeader(); 1857 Header* header = heapObjectHeader();
1851 size_t tag = info->getClassTag(header->gcInfo()); 1858 size_t tag = info->getClassTag(header->gcInfo());
1852 size_t age = header->age(); 1859 size_t age = header->age();
1853 if (isMarked()) { 1860 if (isMarked()) {
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
2173 ThreadState::init(); 2180 ThreadState::init();
2174 s_markingStack = new CallbackStack(); 2181 s_markingStack = new CallbackStack();
2175 s_postMarkingCallbackStack = new CallbackStack(); 2182 s_postMarkingCallbackStack = new CallbackStack();
2176 s_weakCallbackStack = new CallbackStack(); 2183 s_weakCallbackStack = new CallbackStack();
2177 s_ephemeronStack = new CallbackStack(); 2184 s_ephemeronStack = new CallbackStack();
2178 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); 2185 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2179 s_markingVisitor = new MarkingVisitor(s_markingStack); 2186 s_markingVisitor = new MarkingVisitor(s_markingStack);
2180 s_freePagePool = new FreePagePool(); 2187 s_freePagePool = new FreePagePool();
2181 s_orphanedPagePool = new OrphanedPagePool(); 2188 s_orphanedPagePool = new OrphanedPagePool();
2182 s_markingThreads = new Vector<OwnPtr<WebThread>>(); 2189 s_markingThreads = new Vector<OwnPtr<WebThread>>();
2190 s_allocatedObjectSize = 0;
sof 2014/11/14 12:44:20 Use resetAllocatedObjectSize() ?
haraken 2014/11/14 15:15:00 Because I added ASSERT(ThreadState::isAnyThreadInG
2191 s_allocatedSpace = 0;
2192 s_liveObjectSize = 0;
sof 2014/11/14 12:44:20 Use resetLiveObjectSize() ?
2183 if (Platform::current()) { 2193 if (Platform::current()) {
2184 int processors = Platform::current()->numberOfProcessors(); 2194 int processors = Platform::current()->numberOfProcessors();
2185 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre ads); 2195 int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThre ads);
2186 for (int i = 0; i < numberOfMarkingThreads; i++) 2196 for (int i = 0; i < numberOfMarkingThreads; i++)
2187 s_markingThreads->append(adoptPtr(Platform::current()->createThread( "Blink GC Marking Thread"))); 2197 s_markingThreads->append(adoptPtr(Platform::current()->createThread( "Blink GC Marking Thread")));
2188 } 2198 }
2189 } 2199 }
2190 2200
2191 void Heap::shutdown() 2201 void Heap::shutdown()
2192 { 2202 {
(...skipping 23 matching lines...) Expand all
2216 s_weakCallbackStack = 0; 2226 s_weakCallbackStack = 0;
2217 delete s_postMarkingCallbackStack; 2227 delete s_postMarkingCallbackStack;
2218 s_postMarkingCallbackStack = 0; 2228 s_postMarkingCallbackStack = 0;
2219 delete s_markingStack; 2229 delete s_markingStack;
2220 s_markingStack = 0; 2230 s_markingStack = 0;
2221 delete s_ephemeronStack; 2231 delete s_ephemeronStack;
2222 s_ephemeronStack = 0; 2232 s_ephemeronStack = 0;
2223 delete s_regionTree; 2233 delete s_regionTree;
2224 s_regionTree = 0; 2234 s_regionTree = 0;
2225 ThreadState::shutdown(); 2235 ThreadState::shutdown();
2236 ASSERT(Heap::allocatedSpace() == 0);
2226 } 2237 }
2227 2238
2228 BaseHeapPage* Heap::contains(Address address) 2239 BaseHeapPage* Heap::contains(Address address)
2229 { 2240 {
2230 ASSERT(ThreadState::isAnyThreadInGC()); 2241 ASSERT(ThreadState::isAnyThreadInGC());
2231 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2242 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2232 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2243 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2233 BaseHeapPage* page = (*it)->contains(address); 2244 BaseHeapPage* page = (*it)->contains(address);
2234 if (page) 2245 if (page)
2235 return page; 2246 return page;
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
2468 if (!gcScope.allThreadsParked()) { 2479 if (!gcScope.allThreadsParked()) {
2469 ThreadState::current()->setGCRequested(); 2480 ThreadState::current()->setGCRequested();
2470 return; 2481 return;
2471 } 2482 }
2472 2483
2473 if (state->isMainThread()) 2484 if (state->isMainThread())
2474 ScriptForbiddenScope::enter(); 2485 ScriptForbiddenScope::enter();
2475 2486
2476 s_lastGCWasConservative = false; 2487 s_lastGCWasConservative = false;
2477 2488
2489 Heap::resetLiveObjectSize();
2490 Heap::resetAllocatedObjectSize();
2491
2478 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", 2492 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2479 "precise", stackState == ThreadState::NoHeapPointersOnStack, 2493 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2480 "forced", cause == ThreadState::ForcedGC); 2494 "forced", cause == ThreadState::ForcedGC);
2481 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 2495 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2482 double timeStamp = WTF::currentTimeMS(); 2496 double timeStamp = WTF::currentTimeMS();
2483 #if ENABLE(GC_PROFILE_MARKING) 2497 #if ENABLE(GC_PROFILE_MARKING)
2484 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 2498 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2485 #endif 2499 #endif
2486 2500
2487 // Disallow allocation during garbage collection (but not 2501 // Disallow allocation during garbage collection (but not
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2526 // marking we check that any object marked as dead is not traced. E.g. via a 2540 // marking we check that any object marked as dead is not traced. E.g. via a
2527 // conservatively found pointer or a programming error with an object contai ning 2541 // conservatively found pointer or a programming error with an object contai ning
2528 // a dangling pointer. 2542 // a dangling pointer.
2529 orphanedPagePool()->decommitOrphanedPages(); 2543 orphanedPagePool()->decommitOrphanedPages();
2530 2544
2531 #if ENABLE(GC_PROFILE_MARKING) 2545 #if ENABLE(GC_PROFILE_MARKING)
2532 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); 2546 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2533 #endif 2547 #endif
2534 2548
2535 if (Platform::current()) { 2549 if (Platform::current()) {
2536 uint64_t objectSpaceSize;
2537 uint64_t allocatedSpaceSize;
2538 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2539 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); 2550 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2540 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", o bjectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); 2551 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
2541 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); 2552 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
2542 } 2553 }
2543 2554
2544 if (state->isMainThread()) 2555 if (state->isMainThread())
2545 ScriptForbiddenScope::exit(); 2556 ScriptForbiddenScope::exit();
2546 } 2557 }
2547 2558
2548 void Heap::collectGarbageForTerminatingThread(ThreadState* state) 2559 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2549 { 2560 {
2550 // We explicitly do not enter a safepoint while doing thread specific 2561 // We explicitly do not enter a safepoint while doing thread specific
2551 // garbage collection since we don't want to allow a global GC at the 2562 // garbage collection since we don't want to allow a global GC at the
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
2770 } else if (splitOff->m_freeList.m_freeLists[i]) { 2781 } else if (splitOff->m_freeList.m_freeLists[i]) {
2771 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList .m_freeLists[i]); 2782 m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList .m_freeLists[i]);
2772 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las tFreeListEntries[i]; 2783 m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_las tFreeListEntries[i];
2773 } 2784 }
2774 } 2785 }
2775 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr eeListIndex) 2786 if (m_freeList.m_biggestFreeListIndex < splitOff->m_freeList.m_biggestFr eeListIndex)
2776 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr eeListIndex; 2787 m_freeList.m_biggestFreeListIndex = splitOff->m_freeList.m_biggestFr eeListIndex;
2777 } 2788 }
2778 } 2789 }
2779 2790
2780 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) 2791 size_t Heap::objectPayloadSizeForTesting()
2781 { 2792 {
2782 *objectSpaceSize = 0; 2793 size_t objectPayloadSize = 0;
2783 *allocatedSpaceSize = 0;
2784 ASSERT(ThreadState::isAnyThreadInGC());
2785 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2786 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2787 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2788 *objectSpaceSize += (*it)->stats().totalObjectSpace();
2789 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2790 }
2791 }
2792
2793 void Heap::getStats(HeapStats* stats)
2794 {
2795 stats->clear();
2796 ASSERT(ThreadState::isAnyThreadInGC());
2797 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2798 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2799 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2800 HeapStats temp;
2801 (*it)->getStats(temp);
2802 stats->add(&temp);
2803 }
2804 }
2805
2806 void Heap::getStatsForTesting(HeapStats* stats)
2807 {
2808 stats->clear();
2809 ASSERT(ThreadState::isAnyThreadInGC()); 2794 ASSERT(ThreadState::isAnyThreadInGC());
2810 makeConsistentForSweeping(); 2795 makeConsistentForSweeping();
2811 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2796 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2812 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2797 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2813 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2798 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2814 HeapStats temp; 2799 objectPayloadSize += (*it)->objectPayloadSizeForTesting();
2815 (*it)->getStatsForTesting(temp);
2816 stats->add(&temp);
2817 } 2800 }
2801 return objectPayloadSize;
2818 } 2802 }
2819 2803
2820 #if ENABLE(ASSERT) 2804 #if ENABLE(ASSERT)
2821 bool Heap::isConsistentForSweeping() 2805 bool Heap::isConsistentForSweeping()
2822 { 2806 {
2823 ASSERT(ThreadState::isAnyThreadInGC()); 2807 ASSERT(ThreadState::isAnyThreadInGC());
2824 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2808 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2825 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2809 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2826 if (!(*it)->isConsistentForSweeping()) 2810 if (!(*it)->isConsistentForSweeping())
2827 return false; 2811 return false;
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
2966 CallbackStack* Heap::s_markingStack; 2950 CallbackStack* Heap::s_markingStack;
2967 CallbackStack* Heap::s_postMarkingCallbackStack; 2951 CallbackStack* Heap::s_postMarkingCallbackStack;
2968 CallbackStack* Heap::s_weakCallbackStack; 2952 CallbackStack* Heap::s_weakCallbackStack;
2969 CallbackStack* Heap::s_ephemeronStack; 2953 CallbackStack* Heap::s_ephemeronStack;
2970 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2954 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2971 bool Heap::s_shutdownCalled = false; 2955 bool Heap::s_shutdownCalled = false;
2972 bool Heap::s_lastGCWasConservative = false; 2956 bool Heap::s_lastGCWasConservative = false;
2973 FreePagePool* Heap::s_freePagePool; 2957 FreePagePool* Heap::s_freePagePool;
2974 OrphanedPagePool* Heap::s_orphanedPagePool; 2958 OrphanedPagePool* Heap::s_orphanedPagePool;
2975 Heap::RegionTree* Heap::s_regionTree = 0; 2959 Heap::RegionTree* Heap::s_regionTree = 0;
2960 size_t Heap::s_allocatedObjectSize = 0;
2961 size_t Heap::s_allocatedSpace = 0;
2962 size_t Heap::s_liveObjectSize = 0;
2976 2963
2977 } // namespace blink 2964 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698