Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(115)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 634243004: Oilpan: Simplify Heap::allocate (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 624 matching lines...) Expand 10 before | Expand all | Expand 10 after
635 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 635 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
636 FinalizedHeapObjectHeader* header = 636 FinalizedHeapObjectHeader* header =
637 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; 637 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ;
638 return header; 638 return header;
639 } 639 }
640 640
641 template<typename Header> 641 template<typename Header>
642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) 642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
643 : m_currentAllocationPoint(0) 643 : m_currentAllocationPoint(0)
644 , m_remainingAllocationSize(0) 644 , m_remainingAllocationSize(0)
645 , m_lastRemainingAllocationSize(0)
645 , m_firstPage(0) 646 , m_firstPage(0)
646 , m_firstLargeHeapObject(0) 647 , m_firstLargeHeapObject(0)
647 , m_firstPageAllocatedDuringSweeping(0) 648 , m_firstPageAllocatedDuringSweeping(0)
648 , m_lastPageAllocatedDuringSweeping(0) 649 , m_lastPageAllocatedDuringSweeping(0)
649 , m_mergePoint(0) 650 , m_mergePoint(0)
650 , m_biggestFreeListIndex(0) 651 , m_biggestFreeListIndex(0)
651 , m_threadState(state) 652 , m_threadState(state)
652 , m_index(index) 653 , m_index(index)
653 , m_numberOfNormalPages(0) 654 , m_numberOfNormalPages(0)
654 , m_promptlyFreedCount(0) 655 , m_promptlyFreedCount(0)
(...skipping 17 matching lines...) Expand all
672 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) 673 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
673 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 674 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
674 m_firstPage = 0; 675 m_firstPage = 0;
675 676
676 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) 677 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
677 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 678 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
678 m_firstLargeHeapObject = 0; 679 m_firstLargeHeapObject = 0;
679 } 680 }
680 681
681 template<typename Header> 682 template<typename Header>
683 void ThreadHeap<Header>::updateRemainingAllocationSize()
684 {
685 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
686 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAll ocationSize());
687 m_lastRemainingAllocationSize = remainingAllocationSize();
688 }
689 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
690 }
691
692 template<typename Header>
682 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 693 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
683 { 694 {
684 size_t allocationSize = allocationSizeFromSize(size); 695 size_t allocationSize = allocationSizeFromSize(size);
696 ASSERT(allocationSize > remainingAllocationSize());
697 if (allocationSize > blinkPageSize / 2)
698 return allocateLargeObject(allocationSize, gcInfo);
699
700 updateRemainingAllocationSize();
685 if (threadState()->shouldGC()) { 701 if (threadState()->shouldGC()) {
686 if (threadState()->shouldForceConservativeGC()) 702 if (threadState()->shouldForceConservativeGC())
687 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 703 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
688 else 704 else
689 threadState()->setGCRequested(); 705 threadState()->setGCRequested();
690 } 706 }
707 if (remainingAllocationSize() > 0) {
708 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
709 setAllocationPoint(0, 0);
710 }
691 ensureCurrentAllocation(allocationSize, gcInfo); 711 ensureCurrentAllocation(allocationSize, gcInfo);
692 return allocate(size, gcInfo); 712 return allocate(size, gcInfo);
693 } 713 }
694 714
695 template<typename Header> 715 template<typename Header>
696 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize) 716 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
697 { 717 {
698 size_t bucketSize = 1 << m_biggestFreeListIndex; 718 size_t bucketSize = 1 << m_biggestFreeListIndex;
699 int i = m_biggestFreeListIndex; 719 int i = m_biggestFreeListIndex;
700 for (; i > 0; i--, bucketSize >>= 1) { 720 for (; i > 0; i--, bucketSize >>= 1) {
701 if (bucketSize < minSize) 721 if (bucketSize < minSize)
702 break; 722 break;
703 FreeListEntry* entry = m_freeLists[i]; 723 FreeListEntry* entry = m_freeLists[i];
704 if (entry) { 724 if (entry) {
705 m_biggestFreeListIndex = i; 725 m_biggestFreeListIndex = i;
706 entry->unlink(&m_freeLists[i]); 726 entry->unlink(&m_freeLists[i]);
707 setAllocationPoint(entry->address(), entry->size()); 727 setAllocationPoint(entry->address(), entry->size());
708 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minS ize); 728 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minS ize);
709 return true; 729 return true;
710 } 730 }
711 } 731 }
712 m_biggestFreeListIndex = i; 732 m_biggestFreeListIndex = i;
713 return false; 733 return false;
714 } 734 }
715 735
716 template<typename Header> 736 template<typename Header>
717 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g cInfo) 737 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g cInfo)
718 { 738 {
719 ASSERT(minSize >= allocationGranularity); 739 ASSERT(minSize >= allocationGranularity);
720 if (remainingAllocationSize() >= minSize)
721 return;
722
723 if (remainingAllocationSize() > 0) {
724 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
725 setAllocationPoint(0, 0);
726 }
727 if (allocateFromFreeList(minSize)) 740 if (allocateFromFreeList(minSize))
728 return; 741 return;
729 if (coalesce(minSize) && allocateFromFreeList(minSize)) 742 if (coalesce(minSize) && allocateFromFreeList(minSize))
730 return; 743 return;
731 addPageToHeap(gcInfo); 744 addPageToHeap(gcInfo);
732 bool success = allocateFromFreeList(minSize); 745 bool success = allocateFromFreeList(minSize);
733 RELEASE_ASSERT(success); 746 RELEASE_ASSERT(success);
734 } 747 }
735 748
736 template<typename Header> 749 template<typename Header>
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 page->clearObjectStartBitMap(); 919 page->clearObjectStartBitMap();
907 page->resetPromptlyFreedSize(); 920 page->resetPromptlyFreedSize();
908 size_t freedCount = 0; 921 size_t freedCount = 0;
909 Address startOfGap = page->payload(); 922 Address startOfGap = page->payload();
910 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) { 923 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
911 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress); 924 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress);
912 ASSERT(basicHeader->size() > 0); 925 ASSERT(basicHeader->size() > 0);
913 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 926 ASSERT(basicHeader->size() < blinkPagePayloadSize());
914 927
915 if (basicHeader->isPromptlyFreed()) { 928 if (basicHeader->isPromptlyFreed()) {
916 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->payloadSize()); 929 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->size());
917 size_t size = basicHeader->size(); 930 size_t size = basicHeader->size();
918 ASSERT(size >= sizeof(Header)); 931 ASSERT(size >= sizeof(Header));
919 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 932 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
920 memset(headerAddress, 0, sizeof(Header)); 933 memset(headerAddress, 0, sizeof(Header));
921 #endif 934 #endif
922 ++freedCount; 935 ++freedCount;
923 headerAddress += size; 936 headerAddress += size;
924 continue; 937 continue;
925 } 938 }
926 939
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 // Ensure that there is enough space for alignment. If the header 984 // Ensure that there is enough space for alignment. If the header
972 // is not a multiple of 8 bytes we will allocate an extra 985 // is not a multiple of 8 bytes we will allocate an extra
973 // headerPadding<Header> bytes to ensure it 8 byte aligned. 986 // headerPadding<Header> bytes to ensure it 8 byte aligned.
974 allocationSize += headerPadding<Header>(); 987 allocationSize += headerPadding<Header>();
975 988
976 // If ASan is supported we add allocationGranularity bytes to the allocated space and 989 // If ASan is supported we add allocationGranularity bytes to the allocated space and
977 // poison that to detect overflows 990 // poison that to detect overflows
978 #if defined(ADDRESS_SANITIZER) 991 #if defined(ADDRESS_SANITIZER)
979 allocationSize += allocationGranularity; 992 allocationSize += allocationGranularity;
980 #endif 993 #endif
994
995 updateRemainingAllocationSize();
981 if (m_threadState->shouldGC()) 996 if (m_threadState->shouldGC())
982 m_threadState->setGCRequested(); 997 m_threadState->setGCRequested();
983 m_threadState->shouldFlushHeapDoesNotContainCache(); 998 m_threadState->shouldFlushHeapDoesNotContainCache();
984 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 999 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
985 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); 1000 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
986 Address largeObjectAddress = pageMemory->writableStart(); 1001 Address largeObjectAddress = pageMemory->writableStart();
987 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); 1002 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
988 memset(headerAddress, 0, size); 1003 memset(headerAddress, 0, size);
989 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 1004 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
990 Address result = headerAddress + sizeof(*header); 1005 Address result = headerAddress + sizeof(*header);
991 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1006 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
992 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 1007 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
993 1008
994 // Poison the object header and allocationGranularity bytes after the object 1009 // Poison the object header and allocationGranularity bytes after the object
995 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1010 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
996 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1011 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
997 largeObject->link(&m_firstLargeHeapObject); 1012 largeObject->link(&m_firstLargeHeapObject);
998 stats().increaseAllocatedSpace(largeObject->size()); 1013 stats().increaseAllocatedSpace(largeObject->size());
999 stats().increaseObjectSpace(largeObject->payloadSize()); 1014 stats().increaseObjectSpace(largeObject->size());
1000 return result; 1015 return result;
1001 } 1016 }
1002 1017
1003 template<typename Header> 1018 template<typename Header>
1004 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 1019 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
1005 { 1020 {
1006 object->unlink(previousNext); 1021 object->unlink(previousNext);
1007 object->finalize(); 1022 object->finalize();
1008 1023
1009 // Unpoison the object header and allocationGranularity bytes after the 1024 // Unpoison the object header and allocationGranularity bytes after the
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after
1306 1321
1307 template<typename Header> 1322 template<typename Header>
1308 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address) 1323 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
1309 { 1324 {
1310 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) { 1325 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1311 if (page->contains(address)) 1326 if (page->contains(address))
1312 return true; 1327 return true;
1313 } 1328 }
1314 return false; 1329 return false;
1315 } 1330 }
1316
1317 template<typename Header>
1318 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
1319 {
1320 ASSERT(!m_firstPageAllocatedDuringSweeping);
1321 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1322 page->getStats(scannedStats);
1323 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next())
1324 current->getStats(scannedStats);
1325 }
1326 #endif 1331 #endif
1327 1332
1328 template<typename Header> 1333 template<typename Header>
1334 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats)
1335 {
1336 ASSERT(!m_firstPageAllocatedDuringSweeping);
1337 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1338 page->getStatsForTesting(stats);
1339 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next())
1340 current->getStatsForTesting(stats);
1341 }
1342
1343 template<typename Header>
1329 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats) 1344 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1330 { 1345 {
1331 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages"); 1346 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1332 HeapPage<Header>* page = m_firstPage; 1347 HeapPage<Header>* page = m_firstPage;
1333 HeapPage<Header>** previousNext = &m_firstPage; 1348 HeapPage<Header>** previousNext = &m_firstPage;
1334 HeapPage<Header>* previous = 0; 1349 HeapPage<Header>* previous = 0;
1335 while (page) { 1350 while (page) {
1336 page->resetPromptlyFreedSize(); 1351 page->resetPromptlyFreedSize();
1337 if (page->isEmpty()) { 1352 if (page->isEmpty()) {
1338 HeapPage<Header>* unused = page; 1353 HeapPage<Header>* unused = page;
(...skipping 12 matching lines...) Expand all
1351 } 1366 }
1352 1367
1353 template<typename Header> 1368 template<typename Header>
1354 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) 1369 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1355 { 1370 {
1356 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); 1371 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1357 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1372 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1358 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1373 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1359 if (current->isMarked()) { 1374 if (current->isMarked()) {
1360 stats->increaseAllocatedSpace(current->size()); 1375 stats->increaseAllocatedSpace(current->size());
1361 stats->increaseObjectSpace(current->payloadSize()); 1376 stats->increaseObjectSpace(current->size());
1362 current->unmark(); 1377 current->unmark();
1363 previousNext = &current->m_next; 1378 previousNext = &current->m_next;
1364 current = current->next(); 1379 current = current->next();
1365 } else { 1380 } else {
1366 LargeHeapObject<Header>* next = current->next(); 1381 LargeHeapObject<Header>* next = current->next();
1367 freeLargeObject(current, previousNext); 1382 freeLargeObject(current, previousNext);
1368 current = next; 1383 current = next;
1369 } 1384 }
1370 } 1385 }
1371 } 1386 }
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
1494 } 1509 }
1495 1510
1496 template<typename Header> 1511 template<typename Header>
1497 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext) 1512 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa ge** prevNext)
1498 { 1513 {
1499 *prevNext = unused->m_next; 1514 *prevNext = unused->m_next;
1500 heap->removePageFromHeap(unused); 1515 heap->removePageFromHeap(unused);
1501 } 1516 }
1502 1517
1503 template<typename Header> 1518 template<typename Header>
1504 void HeapPage<Header>::getStats(HeapStats& stats) 1519 void HeapPage<Header>::getStatsForTesting(HeapStats& stats)
1505 { 1520 {
1506 stats.increaseAllocatedSpace(blinkPageSize); 1521 stats.increaseAllocatedSpace(blinkPageSize);
1507 Address headerAddress = payload(); 1522 Address headerAddress = payload();
1508 ASSERT(headerAddress != end()); 1523 ASSERT(headerAddress != end());
1509 do { 1524 do {
1510 Header* header = reinterpret_cast<Header*>(headerAddress); 1525 Header* header = reinterpret_cast<Header*>(headerAddress);
1511 if (!header->isFree()) 1526 if (!header->isFree()) {
1512 stats.increaseObjectSpace(header->payloadSize()); 1527 stats.increaseObjectSpace(header->payloadSize());
1528 }
1513 ASSERT(header->size() < blinkPagePayloadSize()); 1529 ASSERT(header->size() < blinkPagePayloadSize());
1514 headerAddress += header->size(); 1530 headerAddress += header->size();
1515 ASSERT(headerAddress <= end()); 1531 ASSERT(headerAddress <= end());
1516 } while (headerAddress < end()); 1532 } while (headerAddress < end());
1517 } 1533 }
1518 1534
1519 template<typename Header> 1535 template<typename Header>
1520 bool HeapPage<Header>::isEmpty() 1536 bool HeapPage<Header>::isEmpty()
1521 { 1537 {
1522 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); 1538 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1566 #endif 1582 #endif
1567 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1583 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1568 headerAddress += size; 1584 headerAddress += size;
1569 continue; 1585 continue;
1570 } 1586 }
1571 1587
1572 if (startOfGap != headerAddress) 1588 if (startOfGap != headerAddress)
1573 heap->addToFreeList(startOfGap, headerAddress - startOfGap); 1589 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1574 header->unmark(); 1590 header->unmark();
1575 headerAddress += header->size(); 1591 headerAddress += header->size();
1576 stats->increaseObjectSpace(header->payloadSize()); 1592 stats->increaseObjectSpace(header->size());
1577 startOfGap = headerAddress; 1593 startOfGap = headerAddress;
1578 } 1594 }
1579 if (startOfGap != end()) 1595 if (startOfGap != end())
1580 heap->addToFreeList(startOfGap, end() - startOfGap); 1596 heap->addToFreeList(startOfGap, end() - startOfGap);
1581 } 1597 }
1582 1598
1583 template<typename Header> 1599 template<typename Header>
1584 void HeapPage<Header>::clearLiveAndMarkDead() 1600 void HeapPage<Header>::clearLiveAndMarkDead()
1585 { 1601 {
1586 for (Address headerAddress = payload(); headerAddress < end();) { 1602 for (Address headerAddress = payload(); headerAddress < end();) {
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
1792 return gcInfo()->hasVTable(); 1808 return gcInfo()->hasVTable();
1793 } 1809 }
1794 1810
1795 template<> 1811 template<>
1796 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header) 1812 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header)
1797 { 1813 {
1798 return header->hasVTable(); 1814 return header->hasVTable();
1799 } 1815 }
1800 1816
1801 template<typename Header> 1817 template<typename Header>
1802 void LargeHeapObject<Header>::getStats(HeapStats& stats) 1818 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats)
1803 { 1819 {
1804 stats.increaseAllocatedSpace(size()); 1820 stats.increaseAllocatedSpace(size());
1805 stats.increaseObjectSpace(payloadSize()); 1821 stats.increaseObjectSpace(payloadSize());
1806 } 1822 }
1807 1823
1808 #if ENABLE(GC_PROFILE_HEAP) 1824 #if ENABLE(GC_PROFILE_HEAP)
1809 template<typename Header> 1825 template<typename Header>
1810 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info) 1826 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info)
1811 { 1827 {
1812 Header* header = heapObjectHeader(); 1828 Header* header = heapObjectHeader();
(...skipping 944 matching lines...) Expand 10 before | Expand all | Expand 10 after
2757 ASSERT(ThreadState::isAnyThreadInGC()); 2773 ASSERT(ThreadState::isAnyThreadInGC());
2758 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2774 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2759 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2775 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2760 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2776 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2761 HeapStats temp; 2777 HeapStats temp;
2762 (*it)->getStats(temp); 2778 (*it)->getStats(temp);
2763 stats->add(&temp); 2779 stats->add(&temp);
2764 } 2780 }
2765 } 2781 }
2766 2782
2783 void Heap::getStatsForTesting(HeapStats* stats)
2784 {
2785 stats->clear();
2786 ASSERT(ThreadState::isAnyThreadInGC());
2787 makeConsistentForSweeping();
2788 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2789 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2790 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
2791 HeapStats temp;
2792 (*it)->getStatsForTesting(temp);
2793 stats->add(&temp);
2794 }
2795 }
2796
2767 #if ENABLE(ASSERT) 2797 #if ENABLE(ASSERT)
2768 bool Heap::isConsistentForSweeping() 2798 bool Heap::isConsistentForSweeping()
2769 { 2799 {
2770 ASSERT(ThreadState::isAnyThreadInGC()); 2800 ASSERT(ThreadState::isAnyThreadInGC());
2771 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2801 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2772 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2802 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2773 if (!(*it)->isConsistentForSweeping()) 2803 if (!(*it)->isConsistentForSweeping())
2774 return false; 2804 return false;
2775 } 2805 }
2776 return true; 2806 return true;
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
2915 CallbackStack* Heap::s_weakCallbackStack; 2945 CallbackStack* Heap::s_weakCallbackStack;
2916 CallbackStack* Heap::s_ephemeronStack; 2946 CallbackStack* Heap::s_ephemeronStack;
2917 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2947 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2918 bool Heap::s_shutdownCalled = false; 2948 bool Heap::s_shutdownCalled = false;
2919 bool Heap::s_lastGCWasConservative = false; 2949 bool Heap::s_lastGCWasConservative = false;
2920 FreePagePool* Heap::s_freePagePool; 2950 FreePagePool* Heap::s_freePagePool;
2921 OrphanedPagePool* Heap::s_orphanedPagePool; 2951 OrphanedPagePool* Heap::s_orphanedPagePool;
2922 Heap::RegionTree* Heap::s_regionTree = 0; 2952 Heap::RegionTree* Heap::s_regionTree = 0;
2923 2953
2924 } 2954 }
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698