Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(13)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 2786843002: Make HeapObjectHeader::checkHeader private. (Closed)
Patch Set: Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 688 matching lines...) Expand 10 before | Expand all | Expand 10 after
699 // invariant that memory on the free list is zero filled. 699 // invariant that memory on the free list is zero filled.
700 // The rest of the memory is already on the free list and is 700 // The rest of the memory is already on the free list and is
701 // therefore already zero filled. 701 // therefore already zero filled.
702 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) 702 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry)
703 ? size 703 ? size
704 : sizeof(FreeListEntry)); 704 : sizeof(FreeListEntry));
705 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); 705 CHECK_MEMORY_INACCESSIBLE(headerAddress, size);
706 headerAddress += size; 706 headerAddress += size;
707 continue; 707 continue;
708 } 708 }
709 header->checkHeader();
710 if (startOfGap != headerAddress) 709 if (startOfGap != headerAddress)
711 addToFreeList(startOfGap, headerAddress - startOfGap); 710 addToFreeList(startOfGap, headerAddress - startOfGap);
712 711
713 headerAddress += size; 712 headerAddress += size;
714 startOfGap = headerAddress; 713 startOfGap = headerAddress;
715 } 714 }
716 715
717 if (startOfGap != page->payloadEnd()) 716 if (startOfGap != page->payloadEnd())
718 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); 717 addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
719 } 718 }
720 getThreadState()->decreaseAllocatedObjectSize(freedSize); 719 getThreadState()->decreaseAllocatedObjectSize(freedSize);
721 ASSERT(m_promptlyFreedSize == freedSize); 720 ASSERT(m_promptlyFreedSize == freedSize);
722 m_promptlyFreedSize = 0; 721 m_promptlyFreedSize = 0;
723 return true; 722 return true;
724 } 723 }
725 724
726 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) { 725 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) {
727 ASSERT(!getThreadState()->sweepForbidden()); 726 ASSERT(!getThreadState()->sweepForbidden());
728 header->checkHeader();
729 Address address = reinterpret_cast<Address>(header); 727 Address address = reinterpret_cast<Address>(header);
730 Address payload = header->payload(); 728 Address payload = header->payload();
731 size_t size = header->size(); 729 size_t size = header->size();
732 size_t payloadSize = header->payloadSize(); 730 size_t payloadSize = header->payloadSize();
733 ASSERT(size > 0); 731 ASSERT(size > 0);
734 ASSERT(pageFromObject(address) == findPageFromAddress(address)); 732 ASSERT(pageFromObject(address) == findPageFromAddress(address));
735 733
736 { 734 {
737 ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); 735 ThreadState::SweepForbiddenScope forbiddenScope(getThreadState());
738 header->finalize(payload, payloadSize); 736 header->finalize(payload, payloadSize);
739 if (address + size == m_currentAllocationPoint) { 737 if (address + size == m_currentAllocationPoint) {
740 m_currentAllocationPoint = address; 738 m_currentAllocationPoint = address;
741 setRemainingAllocationSize(m_remainingAllocationSize + size); 739 setRemainingAllocationSize(m_remainingAllocationSize + size);
742 SET_MEMORY_INACCESSIBLE(address, size); 740 SET_MEMORY_INACCESSIBLE(address, size);
743 return; 741 return;
744 } 742 }
745 SET_MEMORY_INACCESSIBLE(payload, payloadSize); 743 SET_MEMORY_INACCESSIBLE(payload, payloadSize);
746 header->markPromptlyFreed(); 744 header->markPromptlyFreed();
747 } 745 }
748 746
749 m_promptlyFreedSize += size; 747 m_promptlyFreedSize += size;
750 } 748 }
751 749
752 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) { 750 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) {
753 // It's possible that Vector requests a smaller expanded size because 751 // It's possible that Vector requests a smaller expanded size because
754 // Vector::shrinkCapacity can set a capacity smaller than the actual payload 752 // Vector::shrinkCapacity can set a capacity smaller than the actual payload
755 // size. 753 // size.
756 header->checkHeader();
757 if (header->payloadSize() >= newSize) 754 if (header->payloadSize() >= newSize)
758 return true; 755 return true;
759 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); 756 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize);
760 ASSERT(allocationSize > header->size()); 757 ASSERT(allocationSize > header->size());
761 size_t expandSize = allocationSize - header->size(); 758 size_t expandSize = allocationSize - header->size();
762 if (isObjectAllocatedAtAllocationPoint(header) && 759 if (isObjectAllocatedAtAllocationPoint(header) &&
763 expandSize <= m_remainingAllocationSize) { 760 expandSize <= m_remainingAllocationSize) {
764 m_currentAllocationPoint += expandSize; 761 m_currentAllocationPoint += expandSize;
765 ASSERT(m_remainingAllocationSize >= expandSize); 762 ASSERT(m_remainingAllocationSize >= expandSize);
766 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); 763 setRemainingAllocationSize(m_remainingAllocationSize - expandSize);
767 // Unpoison the memory used for the object (payload). 764 // Unpoison the memory used for the object (payload).
768 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); 765 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize);
769 header->setSize(allocationSize); 766 header->setSize(allocationSize);
770 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); 767 ASSERT(findPageFromAddress(header->payloadEnd() - 1));
771 return true; 768 return true;
772 } 769 }
773 return false; 770 return false;
774 } 771 }
775 772
776 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) { 773 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) {
777 header->checkHeader();
778 ASSERT(header->payloadSize() > newSize); 774 ASSERT(header->payloadSize() > newSize);
779 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); 775 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize);
780 ASSERT(header->size() > allocationSize); 776 ASSERT(header->size() > allocationSize);
781 size_t shrinkSize = header->size() - allocationSize; 777 size_t shrinkSize = header->size() - allocationSize;
782 if (isObjectAllocatedAtAllocationPoint(header)) { 778 if (isObjectAllocatedAtAllocationPoint(header)) {
783 m_currentAllocationPoint -= shrinkSize; 779 m_currentAllocationPoint -= shrinkSize;
784 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); 780 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize);
785 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); 781 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize);
786 header->setSize(allocationSize); 782 header->setSize(allocationSize);
787 return true; 783 return true;
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 for (size_t i = 0; i < largeObjectSize; ++i) 992 for (size_t i = 0; i < largeObjectSize; ++i)
997 ASSERT(!largeObjectAddress[i]); 993 ASSERT(!largeObjectAddress[i]);
998 #endif 994 #endif
999 ASSERT(gcInfoIndex > 0); 995 ASSERT(gcInfoIndex > 0);
1000 HeapObjectHeader* header = new (NotNull, headerAddress) 996 HeapObjectHeader* header = new (NotNull, headerAddress)
1001 HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); 997 HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex);
1002 Address result = headerAddress + sizeof(*header); 998 Address result = headerAddress + sizeof(*header);
1003 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 999 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1004 LargeObjectPage* largeObject = new (largeObjectAddress) 1000 LargeObjectPage* largeObject = new (largeObjectAddress)
1005 LargeObjectPage(pageMemory, this, allocationSize); 1001 LargeObjectPage(pageMemory, this, allocationSize);
1006 header->checkHeader();
1007 1002
1008 // Poison the object header and allocationGranularity bytes after the object 1003 // Poison the object header and allocationGranularity bytes after the object
1009 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1004 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1010 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), 1005 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(),
1011 allocationGranularity); 1006 allocationGranularity);
1012 1007
1013 largeObject->link(&m_firstPage); 1008 largeObject->link(&m_firstPage);
1014 1009
1015 getThreadState()->heap().heapStats().increaseAllocatedSpace( 1010 getThreadState()->heap().heapStats().increaseAllocatedSpace(
1016 largeObject->size()); 1011 largeObject->size());
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
1254 1249
1255 size_t NormalPage::objectPayloadSizeForTesting() { 1250 size_t NormalPage::objectPayloadSizeForTesting() {
1256 size_t objectPayloadSize = 0; 1251 size_t objectPayloadSize = 0;
1257 Address headerAddress = payload(); 1252 Address headerAddress = payload();
1258 markAsSwept(); 1253 markAsSwept();
1259 ASSERT(headerAddress != payloadEnd()); 1254 ASSERT(headerAddress != payloadEnd());
1260 do { 1255 do {
1261 HeapObjectHeader* header = 1256 HeapObjectHeader* header =
1262 reinterpret_cast<HeapObjectHeader*>(headerAddress); 1257 reinterpret_cast<HeapObjectHeader*>(headerAddress);
1263 if (!header->isFree()) { 1258 if (!header->isFree()) {
1264 header->checkHeader();
1265 objectPayloadSize += header->payloadSize(); 1259 objectPayloadSize += header->payloadSize();
1266 } 1260 }
1267 ASSERT(header->size() < blinkPagePayloadSize()); 1261 ASSERT(header->size() < blinkPagePayloadSize());
1268 headerAddress += header->size(); 1262 headerAddress += header->size();
1269 ASSERT(headerAddress <= payloadEnd()); 1263 ASSERT(headerAddress <= payloadEnd());
1270 } while (headerAddress < payloadEnd()); 1264 } while (headerAddress < payloadEnd());
1271 return objectPayloadSize; 1265 return objectPayloadSize;
1272 } 1266 }
1273 1267
1274 bool NormalPage::isEmpty() { 1268 bool NormalPage::isEmpty() {
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
1570 ASSERT(mapIndex > 0); 1564 ASSERT(mapIndex > 0);
1571 byte = m_objectStartBitMap[--mapIndex]; 1565 byte = m_objectStartBitMap[--mapIndex];
1572 } 1566 }
1573 int leadingZeroes = numberOfLeadingZeroes(byte); 1567 int leadingZeroes = numberOfLeadingZeroes(byte);
1574 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; 1568 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1575 objectOffset = objectStartNumber * allocationGranularity; 1569 objectOffset = objectStartNumber * allocationGranularity;
1576 Address objectAddress = objectOffset + payload(); 1570 Address objectAddress = objectOffset + payload();
1577 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress); 1571 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress);
1578 if (header->isFree()) 1572 if (header->isFree())
1579 return nullptr; 1573 return nullptr;
1580 header->checkHeader();
1581 return header; 1574 return header;
1582 } 1575 }
1583 1576
1584 #if DCHECK_IS_ON() 1577 #if DCHECK_IS_ON()
1585 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) { 1578 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) {
1586 // Scan through the object's fields and check that they are all zero. 1579 // Scan through the object's fields and check that they are all zero.
1587 Address* objectFields = reinterpret_cast<Address*>(objectPointer); 1580 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1588 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { 1581 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
1589 if (objectFields[i] != 0) 1582 if (objectFields[i] != 0)
1590 return false; 1583 return false;
1591 } 1584 }
1592 return true; 1585 return true;
1593 } 1586 }
1594 #endif 1587 #endif
1595 1588
1596 static void markPointer(Visitor* visitor, HeapObjectHeader* header) { 1589 static void markPointer(Visitor* visitor, HeapObjectHeader* header) {
1597 header->checkHeader();
1598 const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); 1590 const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex());
1599 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { 1591 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) {
1600 // We hit this branch when a GC strikes before GarbageCollected<>'s 1592 // We hit this branch when a GC strikes before GarbageCollected<>'s
1601 // constructor runs. 1593 // constructor runs.
1602 // 1594 //
1603 // class A : public GarbageCollected<A> { virtual void f() = 0; }; 1595 // class A : public GarbageCollected<A> { virtual void f() = 0; };
1604 // class B : public A { 1596 // class B : public A {
1605 // B() : A(foo()) { }; 1597 // B() : A(foo()) { };
1606 // }; 1598 // };
1607 // 1599 //
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
1826 1818
1827 m_hasEntries = true; 1819 m_hasEntries = true;
1828 size_t index = hash(address); 1820 size_t index = hash(address);
1829 ASSERT(!(index & 1)); 1821 ASSERT(!(index & 1));
1830 Address cachePage = roundToBlinkPageStart(address); 1822 Address cachePage = roundToBlinkPageStart(address);
1831 m_entries[index + 1] = m_entries[index]; 1823 m_entries[index + 1] = m_entries[index];
1832 m_entries[index] = cachePage; 1824 m_entries[index] = cachePage;
1833 } 1825 }
1834 1826
1835 } // namespace blink 1827 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698