Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 638223003: [oilpan]: Attempt to make allocation faster by only updating the GC stats when needed, rather than … Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 566 matching lines...) Expand 10 before | Expand all | Expand 10 after
577 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 577 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
578 FinalizedHeapObjectHeader* header = 578 FinalizedHeapObjectHeader* header =
579 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; 579 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ;
580 return header; 580 return header;
581 } 581 }
582 582
583 template<typename Header> 583 template<typename Header>
584 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) 584 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
585 : m_currentAllocationPoint(0) 585 : m_currentAllocationPoint(0)
586 , m_remainingAllocationSize(0) 586 , m_remainingAllocationSize(0)
587 , m_lastRemainingAllocationSize(0)
587 , m_firstPage(0) 588 , m_firstPage(0)
588 , m_firstLargeHeapObject(0) 589 , m_firstLargeHeapObject(0)
589 , m_firstPageAllocatedDuringSweeping(0) 590 , m_firstPageAllocatedDuringSweeping(0)
590 , m_lastPageAllocatedDuringSweeping(0) 591 , m_lastPageAllocatedDuringSweeping(0)
591 , m_mergePoint(0) 592 , m_mergePoint(0)
592 , m_biggestFreeListIndex(0) 593 , m_biggestFreeListIndex(0)
593 , m_threadState(state) 594 , m_threadState(state)
594 , m_index(index) 595 , m_index(index)
595 , m_numberOfNormalPages(0) 596 , m_numberOfNormalPages(0)
596 , m_promptlyFreedCount(0) 597 , m_promptlyFreedCount(0)
(...skipping 21 matching lines...) Expand all
618 619
619 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) 620 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
620 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 621 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
621 m_firstLargeHeapObject = 0; 622 m_firstLargeHeapObject = 0;
622 } 623 }
623 624
624 template<typename Header> 625 template<typename Header>
625 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 626 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
626 { 627 {
627 size_t allocationSize = allocationSizeFromSize(size); 628 size_t allocationSize = allocationSizeFromSize(size);
629 ASSERT(allocationSize > remainingAllocationSize());
630 if (allocationSize > HeapPage<Header>::payloadSize() / 2)
631 return allocateLargeObject(allocationSize, gcInfo);
632
633 if (remainingAllocationSize() > 0)
634 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
635 // This also updates the stats.
636 setAllocationPoint(0, 0);
628 if (threadState()->shouldGC()) { 637 if (threadState()->shouldGC()) {
629 if (threadState()->shouldForceConservativeGC()) 638 if (threadState()->shouldForceConservativeGC())
630 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 639 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
631 else 640 else
632 threadState()->setGCRequested(); 641 threadState()->setGCRequested();
633 } 642 }
634 ensureCurrentAllocation(allocationSize, gcInfo); 643 ensureCurrentAllocation(allocationSize, gcInfo);
635 return allocate(size, gcInfo); 644 return allocate(size, gcInfo);
636 } 645 }
637 646
(...skipping 15 matching lines...) Expand all
653 } 662 }
654 } 663 }
655 m_biggestFreeListIndex = i; 664 m_biggestFreeListIndex = i;
656 return false; 665 return false;
657 } 666 }
658 667
659 template<typename Header> 668 template<typename Header>
660 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g cInfo) 669 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* g cInfo)
661 { 670 {
662 ASSERT(minSize >= allocationGranularity); 671 ASSERT(minSize >= allocationGranularity);
663 if (remainingAllocationSize() >= minSize)
664 return;
665
666 if (remainingAllocationSize() > 0) {
667 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
668 setAllocationPoint(0, 0);
669 }
670 if (allocateFromFreeList(minSize)) 672 if (allocateFromFreeList(minSize))
671 return; 673 return;
672 if (coalesce(minSize) && allocateFromFreeList(minSize)) 674 if (coalesce(minSize) && allocateFromFreeList(minSize))
673 return; 675 return;
674 addPageToHeap(gcInfo); 676 addPageToHeap(gcInfo);
675 bool success = allocateFromFreeList(minSize); 677 bool success = allocateFromFreeList(minSize);
676 RELEASE_ASSERT(success); 678 RELEASE_ASSERT(success);
677 } 679 }
678 680
679 template<typename Header> 681 template<typename Header>
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
849 page->clearObjectStartBitMap(); 851 page->clearObjectStartBitMap();
850 page->resetPromptlyFreedSize(); 852 page->resetPromptlyFreedSize();
851 size_t freedCount = 0; 853 size_t freedCount = 0;
852 Address startOfGap = page->payload(); 854 Address startOfGap = page->payload();
853 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) { 855 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
854 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress); 856 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress);
855 ASSERT(basicHeader->size() > 0); 857 ASSERT(basicHeader->size() > 0);
856 ASSERT(basicHeader->size() < blinkPagePayloadSize()); 858 ASSERT(basicHeader->size() < blinkPagePayloadSize());
857 859
858 if (basicHeader->isPromptlyFreed()) { 860 if (basicHeader->isPromptlyFreed()) {
859 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->payloadSize()); 861 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeade r)->size());
860 size_t size = basicHeader->size(); 862 size_t size = basicHeader->size();
861 ASSERT(size >= sizeof(Header)); 863 ASSERT(size >= sizeof(Header));
862 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 864 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
863 memset(headerAddress, 0, sizeof(Header)); 865 memset(headerAddress, 0, sizeof(Header));
864 #endif 866 #endif
865 ++freedCount; 867 ++freedCount;
866 headerAddress += size; 868 headerAddress += size;
867 continue; 869 continue;
868 } 870 }
869 871
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
914 // Ensure that there is enough space for alignment. If the header 916 // Ensure that there is enough space for alignment. If the header
915 // is not a multiple of 8 bytes we will allocate an extra 917 // is not a multiple of 8 bytes we will allocate an extra
916 // headerPadding<Header> bytes to ensure it 8 byte aligned. 918 // headerPadding<Header> bytes to ensure it 8 byte aligned.
917 allocationSize += headerPadding<Header>(); 919 allocationSize += headerPadding<Header>();
918 920
919 // If ASan is supported we add allocationGranularity bytes to the allocated space and 921 // If ASan is supported we add allocationGranularity bytes to the allocated space and
920 // poison that to detect overflows 922 // poison that to detect overflows
921 #if defined(ADDRESS_SANITIZER) 923 #if defined(ADDRESS_SANITIZER)
922 allocationSize += allocationGranularity; 924 allocationSize += allocationGranularity;
923 #endif 925 #endif
926
927 // Update stats before checking if we should GC.
928 if (m_lastRemainingAllocationSize != m_remainingAllocationSize) {
929 stats().increaseObjectSpace(m_lastRemainingAllocationSize-m_remainingAll ocationSize);
930 m_lastRemainingAllocationSize = m_remainingAllocationSize;
931 }
924 if (threadState()->shouldGC()) 932 if (threadState()->shouldGC())
925 threadState()->setGCRequested(); 933 threadState()->setGCRequested();
926 Heap::flushHeapDoesNotContainCache(); 934 Heap::flushHeapDoesNotContainCache();
927 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 935 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
928 Address largeObjectAddress = pageMemory->writableStart(); 936 Address largeObjectAddress = pageMemory->writableStart();
929 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); 937 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
930 memset(headerAddress, 0, size); 938 memset(headerAddress, 0, size);
931 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 939 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
932 Address result = headerAddress + sizeof(*header); 940 Address result = headerAddress + sizeof(*header);
933 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 941 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
934 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 942 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
935 943
936 // Poison the object header and allocationGranularity bytes after the object 944 // Poison the object header and allocationGranularity bytes after the object
937 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 945 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
938 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 946 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
939 largeObject->link(&m_firstLargeHeapObject); 947 largeObject->link(&m_firstLargeHeapObject);
940 stats().increaseAllocatedSpace(largeObject->size()); 948 stats().increaseAllocatedSpace(largeObject->size());
941 stats().increaseObjectSpace(largeObject->payloadSize()); 949 stats().increaseObjectSpace(largeObject->size());
942 return result; 950 return result;
943 } 951 }
944 952
945 template<typename Header> 953 template<typename Header>
946 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 954 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
947 { 955 {
948 flushHeapContainsCache(); 956 flushHeapContainsCache();
949 object->unlink(previousNext); 957 object->unlink(previousNext);
950 object->finalize(); 958 object->finalize();
951 959
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
1265 } 1273 }
1266 1274
1267 template<typename Header> 1275 template<typename Header>
1268 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) 1276 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1269 { 1277 {
1270 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages"); 1278 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1271 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1279 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1272 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1280 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1273 if (current->isMarked()) { 1281 if (current->isMarked()) {
1274 stats->increaseAllocatedSpace(current->size()); 1282 stats->increaseAllocatedSpace(current->size());
1275 stats->increaseObjectSpace(current->payloadSize()); 1283 stats->increaseObjectSpace(current->size());
1276 current->unmark(); 1284 current->unmark();
1277 previousNext = &current->m_next; 1285 previousNext = &current->m_next;
1278 current = current->next(); 1286 current = current->next();
1279 } else { 1287 } else {
1280 LargeHeapObject<Header>* next = current->next(); 1288 LargeHeapObject<Header>* next = current->next();
1281 freeLargeObject(current, previousNext); 1289 freeLargeObject(current, previousNext);
1282 current = next; 1290 current = next;
1283 } 1291 }
1284 } 1292 }
1285 } 1293 }
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
1416 1424
1417 template<typename Header> 1425 template<typename Header>
1418 void HeapPage<Header>::getStats(HeapStats& stats) 1426 void HeapPage<Header>::getStats(HeapStats& stats)
1419 { 1427 {
1420 stats.increaseAllocatedSpace(blinkPageSize); 1428 stats.increaseAllocatedSpace(blinkPageSize);
1421 Address headerAddress = payload(); 1429 Address headerAddress = payload();
1422 ASSERT(headerAddress != end()); 1430 ASSERT(headerAddress != end());
1423 do { 1431 do {
1424 Header* header = reinterpret_cast<Header*>(headerAddress); 1432 Header* header = reinterpret_cast<Header*>(headerAddress);
1425 if (!header->isFree()) 1433 if (!header->isFree())
1426 stats.increaseObjectSpace(header->payloadSize()); 1434 stats.increaseObjectSpace(header->size());
1427 ASSERT(header->size() < blinkPagePayloadSize()); 1435 ASSERT(header->size() < blinkPagePayloadSize());
1428 headerAddress += header->size(); 1436 headerAddress += header->size();
1429 ASSERT(headerAddress <= end()); 1437 ASSERT(headerAddress <= end());
1430 } while (headerAddress < end()); 1438 } while (headerAddress < end());
1431 } 1439 }
1432 1440
1433 template<typename Header> 1441 template<typename Header>
1434 bool HeapPage<Header>::isEmpty() 1442 bool HeapPage<Header>::isEmpty()
1435 { 1443 {
1436 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload()); 1444 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1480 #endif 1488 #endif
1481 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1489 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1482 headerAddress += size; 1490 headerAddress += size;
1483 continue; 1491 continue;
1484 } 1492 }
1485 1493
1486 if (startOfGap != headerAddress) 1494 if (startOfGap != headerAddress)
1487 heap->addToFreeList(startOfGap, headerAddress - startOfGap); 1495 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1488 header->unmark(); 1496 header->unmark();
1489 headerAddress += header->size(); 1497 headerAddress += header->size();
1490 stats->increaseObjectSpace(header->payloadSize()); 1498 stats->increaseObjectSpace(header->size());
1491 startOfGap = headerAddress; 1499 startOfGap = headerAddress;
1492 } 1500 }
1493 if (startOfGap != end()) 1501 if (startOfGap != end())
1494 heap->addToFreeList(startOfGap, end() - startOfGap); 1502 heap->addToFreeList(startOfGap, end() - startOfGap);
1495 } 1503 }
1496 1504
1497 template<typename Header> 1505 template<typename Header>
1498 void HeapPage<Header>::clearLiveAndMarkDead() 1506 void HeapPage<Header>::clearLiveAndMarkDead()
1499 { 1507 {
1500 for (Address headerAddress = payload(); headerAddress < end();) { 1508 for (Address headerAddress = payload(); headerAddress < end();) {
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
1709 template<> 1717 template<>
1710 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header) 1718 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe ader* header)
1711 { 1719 {
1712 return header->hasVTable(); 1720 return header->hasVTable();
1713 } 1721 }
1714 1722
1715 template<typename Header> 1723 template<typename Header>
1716 void LargeHeapObject<Header>::getStats(HeapStats& stats) 1724 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1717 { 1725 {
1718 stats.increaseAllocatedSpace(size()); 1726 stats.increaseAllocatedSpace(size());
1719 stats.increaseObjectSpace(payloadSize()); 1727 stats.increaseObjectSpace(size());
1720 } 1728 }
1721 1729
1722 #if ENABLE(GC_PROFILE_HEAP) 1730 #if ENABLE(GC_PROFILE_HEAP)
1723 template<typename Header> 1731 template<typename Header>
1724 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info) 1732 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI nfo* info)
1725 { 1733 {
1726 Header* header = heapObjectHeader(); 1734 Header* header = heapObjectHeader();
1727 size_t tag = info->getClassTag(header->gcInfo()); 1735 size_t tag = info->getClassTag(header->gcInfo());
1728 size_t age = header->age(); 1736 size_t age = header->age();
1729 if (isMarked()) { 1737 if (isMarked()) {
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
2721 CallbackStack* Heap::s_markingStack; 2729 CallbackStack* Heap::s_markingStack;
2722 CallbackStack* Heap::s_postMarkingCallbackStack; 2730 CallbackStack* Heap::s_postMarkingCallbackStack;
2723 CallbackStack* Heap::s_weakCallbackStack; 2731 CallbackStack* Heap::s_weakCallbackStack;
2724 CallbackStack* Heap::s_ephemeronStack; 2732 CallbackStack* Heap::s_ephemeronStack;
2725 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2733 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2726 bool Heap::s_shutdownCalled = false; 2734 bool Heap::s_shutdownCalled = false;
2727 bool Heap::s_lastGCWasConservative = false; 2735 bool Heap::s_lastGCWasConservative = false;
2728 FreePagePool* Heap::s_freePagePool; 2736 FreePagePool* Heap::s_freePagePool;
2729 OrphanedPagePool* Heap::s_orphanedPagePool; 2737 OrphanedPagePool* Heap::s_orphanedPagePool;
2730 } 2738 }
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698