Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1211243006: Oilpan: Replace checkHeader() with ASSERT(checkHeader()) (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapAllocator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 SafePointScope m_safePointScope; 194 SafePointScope m_safePointScope;
195 ThreadState::GCType m_gcType; 195 ThreadState::GCType m_gcType;
196 OwnPtr<Visitor> m_visitor; 196 OwnPtr<Visitor> m_visitor;
197 bool m_parkedAllThreads; // False if we fail to park all threads 197 bool m_parkedAllThreads; // False if we fail to park all threads
198 }; 198 };
199 199
200 #if ENABLE(ASSERT) 200 #if ENABLE(ASSERT)
201 NO_SANITIZE_ADDRESS 201 NO_SANITIZE_ADDRESS
202 void HeapObjectHeader::zapMagic() 202 void HeapObjectHeader::zapMagic()
203 { 203 {
204 checkHeader(); 204 ASSERT(checkHeader());
205 m_magic = zappedMagic; 205 m_magic = zappedMagic;
206 } 206 }
207 #endif 207 #endif
208 208
209 void HeapObjectHeader::finalize(Address object, size_t objectSize) 209 void HeapObjectHeader::finalize(Address object, size_t objectSize)
210 { 210 {
211 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 211 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
212 if (gcInfo->hasFinalizer()) { 212 if (gcInfo->hasFinalizer()) {
213 gcInfo->m_finalize(object); 213 gcInfo->m_finalize(object);
214 } 214 }
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after
709 } 709 }
710 if (header->isFree()) { 710 if (header->isFree()) {
711 // Zero the memory in the free list header to maintain the 711 // Zero the memory in the free list header to maintain the
712 // invariant that memory on the free list is zero filled. 712 // invariant that memory on the free list is zero filled.
713 // The rest of the memory is already on the free list and is 713 // The rest of the memory is already on the free list and is
714 // therefore already zero filled. 714 // therefore already zero filled.
715 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEnt ry) ? size : sizeof(FreeListEntry)); 715 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEnt ry) ? size : sizeof(FreeListEntry));
716 headerAddress += size; 716 headerAddress += size;
717 continue; 717 continue;
718 } 718 }
719 header->checkHeader(); 719 ASSERT(header->checkHeader());
720 if (startOfGap != headerAddress) 720 if (startOfGap != headerAddress)
721 addToFreeList(startOfGap, headerAddress - startOfGap); 721 addToFreeList(startOfGap, headerAddress - startOfGap);
722 722
723 headerAddress += size; 723 headerAddress += size;
724 startOfGap = headerAddress; 724 startOfGap = headerAddress;
725 } 725 }
726 726
727 if (startOfGap != page->payloadEnd()) 727 if (startOfGap != page->payloadEnd())
728 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); 728 addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
729 } 729 }
730 Heap::decreaseAllocatedObjectSize(freedSize); 730 Heap::decreaseAllocatedObjectSize(freedSize);
731 ASSERT(m_promptlyFreedSize == freedSize); 731 ASSERT(m_promptlyFreedSize == freedSize);
732 m_promptlyFreedSize = 0; 732 m_promptlyFreedSize = 0;
733 return true; 733 return true;
734 } 734 }
735 735
736 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) 736 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header)
737 { 737 {
738 ASSERT(!threadState()->sweepForbidden()); 738 ASSERT(!threadState()->sweepForbidden());
739 header->checkHeader(); 739 ASSERT(header->checkHeader());
740 Address address = reinterpret_cast<Address>(header); 740 Address address = reinterpret_cast<Address>(header);
741 Address payload = header->payload(); 741 Address payload = header->payload();
742 size_t size = header->size(); 742 size_t size = header->size();
743 size_t payloadSize = header->payloadSize(); 743 size_t payloadSize = header->payloadSize();
744 ASSERT(size > 0); 744 ASSERT(size > 0);
745 ASSERT(pageFromObject(address) == findPageFromAddress(address)); 745 ASSERT(pageFromObject(address) == findPageFromAddress(address));
746 746
747 { 747 {
748 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); 748 ThreadState::SweepForbiddenScope forbiddenScope(threadState());
749 header->finalize(payload, payloadSize); 749 header->finalize(payload, payloadSize);
(...skipping 13 matching lines...) Expand all
763 } 763 }
764 764
765 m_promptlyFreedSize += size; 765 m_promptlyFreedSize += size;
766 } 766 }
767 767
768 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize) 768 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize)
769 { 769 {
770 // It's possible that Vector requests a smaller expanded size because 770 // It's possible that Vector requests a smaller expanded size because
771 // Vector::shrinkCapacity can set a capacity smaller than the actual payload 771 // Vector::shrinkCapacity can set a capacity smaller than the actual payload
772 // size. 772 // size.
773 header->checkHeader(); 773 ASSERT(header->checkHeader());
774 if (header->payloadSize() >= newSize) 774 if (header->payloadSize() >= newSize)
775 return true; 775 return true;
776 size_t allocationSize = Heap::allocationSizeFromSize(newSize); 776 size_t allocationSize = Heap::allocationSizeFromSize(newSize);
777 ASSERT(allocationSize > header->size()); 777 ASSERT(allocationSize > header->size());
778 size_t expandSize = allocationSize - header->size(); 778 size_t expandSize = allocationSize - header->size();
779 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) { 779 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) {
780 m_currentAllocationPoint += expandSize; 780 m_currentAllocationPoint += expandSize;
781 m_remainingAllocationSize -= expandSize; 781 m_remainingAllocationSize -= expandSize;
782 782
783 // Unpoison the memory used for the object (payload). 783 // Unpoison the memory used for the object (payload).
784 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); 784 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize);
785 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); 785 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize);
786 header->setSize(allocationSize); 786 header->setSize(allocationSize);
787 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); 787 ASSERT(findPageFromAddress(header->payloadEnd() - 1));
788 return true; 788 return true;
789 } 789 }
790 return false; 790 return false;
791 } 791 }
792 792
793 bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) 793 bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize)
794 { 794 {
795 header->checkHeader(); 795 ASSERT(header->checkHeader());
796 ASSERT(header->payloadSize() > newSize); 796 ASSERT(header->payloadSize() > newSize);
797 size_t allocationSize = Heap::allocationSizeFromSize(newSize); 797 size_t allocationSize = Heap::allocationSizeFromSize(newSize);
798 ASSERT(header->size() > allocationSize); 798 ASSERT(header->size() > allocationSize);
799 size_t shrinkSize = header->size() - allocationSize; 799 size_t shrinkSize = header->size() - allocationSize;
800 if (header->payloadEnd() == m_currentAllocationPoint) { 800 if (header->payloadEnd() == m_currentAllocationPoint) {
801 m_currentAllocationPoint -= shrinkSize; 801 m_currentAllocationPoint -= shrinkSize;
802 m_remainingAllocationSize += shrinkSize; 802 m_remainingAllocationSize += shrinkSize;
803 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); 803 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize);
804 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); 804 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize);
805 header->setSize(allocationSize); 805 header->setSize(allocationSize);
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
1001 #if ENABLE(ASSERT) 1001 #if ENABLE(ASSERT)
1002 // Verify that the allocated PageMemory is expectedly zeroed. 1002 // Verify that the allocated PageMemory is expectedly zeroed.
1003 for (size_t i = 0; i < largeObjectSize; ++i) 1003 for (size_t i = 0; i < largeObjectSize; ++i)
1004 ASSERT(!largeObjectAddress[i]); 1004 ASSERT(!largeObjectAddress[i]);
1005 #endif 1005 #endif
1006 ASSERT(gcInfoIndex > 0); 1006 ASSERT(gcInfoIndex > 0);
1007 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); 1007 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex);
1008 Address result = headerAddress + sizeof(*header); 1008 Address result = headerAddress + sizeof(*header);
1009 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1009 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1010 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); 1010 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize);
1011 header->checkHeader(); 1011 ASSERT(header->checkHeader());
1012 1012
1013 // Poison the object header and allocationGranularity bytes after the object 1013 // Poison the object header and allocationGranularity bytes after the object
1014 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1014 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1015 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1015 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
1016 1016
1017 largeObject->link(&m_firstPage); 1017 largeObject->link(&m_firstPage);
1018 1018
1019 Heap::increaseAllocatedSpace(largeObject->size()); 1019 Heap::increaseAllocatedSpace(largeObject->size());
1020 Heap::increaseAllocatedObjectSize(largeObject->size()); 1020 Heap::increaseAllocatedObjectSize(largeObject->size());
1021 return result; 1021 return result;
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
1201 1201
1202 size_t NormalPage::objectPayloadSizeForTesting() 1202 size_t NormalPage::objectPayloadSizeForTesting()
1203 { 1203 {
1204 size_t objectPayloadSize = 0; 1204 size_t objectPayloadSize = 0;
1205 Address headerAddress = payload(); 1205 Address headerAddress = payload();
1206 markAsSwept(); 1206 markAsSwept();
1207 ASSERT(headerAddress != payloadEnd()); 1207 ASSERT(headerAddress != payloadEnd());
1208 do { 1208 do {
1209 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1209 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1210 if (!header->isFree()) { 1210 if (!header->isFree()) {
1211 header->checkHeader(); 1211 ASSERT(header->checkHeader());
1212 objectPayloadSize += header->payloadSize(); 1212 objectPayloadSize += header->payloadSize();
1213 } 1213 }
1214 ASSERT(header->size() < blinkPagePayloadSize()); 1214 ASSERT(header->size() < blinkPagePayloadSize());
1215 headerAddress += header->size(); 1215 headerAddress += header->size();
1216 ASSERT(headerAddress <= payloadEnd()); 1216 ASSERT(headerAddress <= payloadEnd());
1217 } while (headerAddress < payloadEnd()); 1217 } while (headerAddress < payloadEnd());
1218 return objectPayloadSize; 1218 return objectPayloadSize;
1219 } 1219 }
1220 1220
1221 bool NormalPage::isEmpty() 1221 bool NormalPage::isEmpty()
(...skipping 23 matching lines...) Expand all
1245 if (header->isFree()) { 1245 if (header->isFree()) {
1246 size_t size = header->size(); 1246 size_t size = header->size();
1247 // Zero the memory in the free list header to maintain the 1247 // Zero the memory in the free list header to maintain the
1248 // invariant that memory on the free list is zero filled. 1248 // invariant that memory on the free list is zero filled.
1249 // The rest of the memory is already on the free list and is 1249 // The rest of the memory is already on the free list and is
1250 // therefore already zero filled. 1250 // therefore already zero filled.
1251 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry)); 1251 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry));
1252 headerAddress += size; 1252 headerAddress += size;
1253 continue; 1253 continue;
1254 } 1254 }
1255 header->checkHeader(); 1255 ASSERT(header->checkHeader());
1256 1256
1257 if (!header->isMarked()) { 1257 if (!header->isMarked()) {
1258 size_t size = header->size(); 1258 size_t size = header->size();
1259 // This is a fast version of header->payloadSize(). 1259 // This is a fast version of header->payloadSize().
1260 size_t payloadSize = size - sizeof(HeapObjectHeader); 1260 size_t payloadSize = size - sizeof(HeapObjectHeader);
1261 Address payload = header->payload(); 1261 Address payload = header->payload();
1262 // For ASan we unpoison the specific object when calling the 1262 // For ASan we unpoison the specific object when calling the
1263 // finalizer and poison it again when done to allow the object's own 1263 // finalizer and poison it again when done to allow the object's own
1264 // finalizer to operate on the object. Given all other unmarked 1264 // finalizer to operate on the object. Given all other unmarked
1265 // objects are poisoned, ASan will detect an error if the finalizer 1265 // objects are poisoned, ASan will detect an error if the finalizer
(...skipping 26 matching lines...) Expand all
1292 size_t markedObjectSize = 0; 1292 size_t markedObjectSize = 0;
1293 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1293 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1294 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1294 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1295 ASSERT(header->size() < blinkPagePayloadSize()); 1295 ASSERT(header->size() < blinkPagePayloadSize());
1296 // Check if a free list entry first since we cannot call 1296 // Check if a free list entry first since we cannot call
1297 // isMarked on a free list entry. 1297 // isMarked on a free list entry.
1298 if (header->isFree()) { 1298 if (header->isFree()) {
1299 headerAddress += header->size(); 1299 headerAddress += header->size();
1300 continue; 1300 continue;
1301 } 1301 }
1302 header->checkHeader(); 1302 ASSERT(header->checkHeader());
1303 if (header->isMarked()) { 1303 if (header->isMarked()) {
1304 header->unmark(); 1304 header->unmark();
1305 markedObjectSize += header->size(); 1305 markedObjectSize += header->size();
1306 } else { 1306 } else {
1307 header->markDead(); 1307 header->markDead();
1308 } 1308 }
1309 headerAddress += header->size(); 1309 headerAddress += header->size();
1310 } 1310 }
1311 if (markedObjectSize) 1311 if (markedObjectSize)
1312 Heap::increaseMarkedObjectSize(markedObjectSize); 1312 Heap::increaseMarkedObjectSize(markedObjectSize);
1313 } 1313 }
1314 1314
1315 void NormalPage::makeConsistentForMutator() 1315 void NormalPage::makeConsistentForMutator()
1316 { 1316 {
1317 Address startOfGap = payload(); 1317 Address startOfGap = payload();
1318 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1318 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1319 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1319 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1320 ASSERT(header->size() < blinkPagePayloadSize()); 1320 ASSERT(header->size() < blinkPagePayloadSize());
1321 if (header->isPromptlyFreed()) 1321 if (header->isPromptlyFreed())
1322 heapForNormalPage()->decreasePromptlyFreedSize(header->size()); 1322 heapForNormalPage()->decreasePromptlyFreedSize(header->size());
1323 if (header->isFree()) { 1323 if (header->isFree()) {
1324 headerAddress += header->size(); 1324 headerAddress += header->size();
1325 continue; 1325 continue;
1326 } 1326 }
1327 header->checkHeader(); 1327 ASSERT(header->checkHeader());
1328 1328
1329 if (startOfGap != headerAddress) 1329 if (startOfGap != headerAddress)
1330 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); 1330 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap);
1331 if (header->isMarked()) 1331 if (header->isMarked())
1332 header->unmark(); 1332 header->unmark();
1333 headerAddress += header->size(); 1333 headerAddress += header->size();
1334 startOfGap = headerAddress; 1334 startOfGap = headerAddress;
1335 } 1335 }
1336 if (startOfGap != payloadEnd()) 1336 if (startOfGap != payloadEnd())
1337 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); 1337 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap );
1338 } 1338 }
1339 1339
1340 #if defined(ADDRESS_SANITIZER) 1340 #if defined(ADDRESS_SANITIZER)
1341 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison ing) 1341 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison ing)
1342 { 1342 {
1343 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1343 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1344 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1344 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1345 ASSERT(header->size() < blinkPagePayloadSize()); 1345 ASSERT(header->size() < blinkPagePayloadSize());
1346 // Check if a free list entry first since we cannot call 1346 // Check if a free list entry first since we cannot call
1347 // isMarked on a free list entry. 1347 // isMarked on a free list entry.
1348 if (header->isFree()) { 1348 if (header->isFree()) {
1349 headerAddress += header->size(); 1349 headerAddress += header->size();
1350 continue; 1350 continue;
1351 } 1351 }
1352 header->checkHeader(); 1352 ASSERT(header->checkHeader());
1353 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) { 1353 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) {
1354 if (poisoning == SetPoison) 1354 if (poisoning == SetPoison)
1355 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize ()); 1355 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize ());
1356 else 1356 else
1357 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSi ze()); 1357 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSi ze());
1358 } 1358 }
1359 headerAddress += header->size(); 1359 headerAddress += header->size();
1360 } 1360 }
1361 } 1361 }
1362 #endif 1362 #endif
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1418 ASSERT(mapIndex > 0); 1418 ASSERT(mapIndex > 0);
1419 byte = m_objectStartBitMap[--mapIndex]; 1419 byte = m_objectStartBitMap[--mapIndex];
1420 } 1420 }
1421 int leadingZeroes = numberOfLeadingZeroes(byte); 1421 int leadingZeroes = numberOfLeadingZeroes(byte);
1422 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; 1422 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1423 objectOffset = objectStartNumber * allocationGranularity; 1423 objectOffset = objectStartNumber * allocationGranularity;
1424 Address objectAddress = objectOffset + payload(); 1424 Address objectAddress = objectOffset + payload();
1425 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress ); 1425 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress );
1426 if (header->isFree()) 1426 if (header->isFree())
1427 return nullptr; 1427 return nullptr;
1428 header->checkHeader(); 1428 ASSERT(header->checkHeader());
1429 return header; 1429 return header;
1430 } 1430 }
1431 1431
1432 #if ENABLE(ASSERT) 1432 #if ENABLE(ASSERT)
1433 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) 1433 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
1434 { 1434 {
1435 // Scan through the object's fields and check that they are all zero. 1435 // Scan through the object's fields and check that they are all zero.
1436 Address* objectFields = reinterpret_cast<Address*>(objectPointer); 1436 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1437 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { 1437 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
1438 if (objectFields[i] != 0) 1438 if (objectFields[i] != 0)
1439 return false; 1439 return false;
1440 } 1440 }
1441 return true; 1441 return true;
1442 } 1442 }
1443 #endif 1443 #endif
1444 1444
1445 static void markPointer(Visitor* visitor, HeapObjectHeader* header) 1445 static void markPointer(Visitor* visitor, HeapObjectHeader* header)
1446 { 1446 {
1447 header->checkHeader(); 1447 ASSERT(header->checkHeader());
1448 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex()); 1448 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex());
1449 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { 1449 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) {
1450 // We hit this branch when a GC strikes before GarbageCollected<>'s 1450 // We hit this branch when a GC strikes before GarbageCollected<>'s
1451 // constructor runs. 1451 // constructor runs.
1452 // 1452 //
1453 // class A : public GarbageCollected<A> { virtual void f() = 0; }; 1453 // class A : public GarbageCollected<A> { virtual void f() = 0; };
1454 // class B : public A { 1454 // class B : public A {
1455 // B() : A(foo()) { }; 1455 // B() : A(foo()) { };
1456 // }; 1456 // };
1457 // 1457 //
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
1540 { 1540 {
1541 HeapObjectHeader* header = nullptr; 1541 HeapObjectHeader* header = nullptr;
1542 for (Address addr = payload(); addr < payloadEnd(); addr += header->size()) { 1542 for (Address addr = payload(); addr < payloadEnd(); addr += header->size()) {
1543 header = reinterpret_cast<HeapObjectHeader*>(addr); 1543 header = reinterpret_cast<HeapObjectHeader*>(addr);
1544 if (json) 1544 if (json)
1545 json->pushInteger(header->encodedSize()); 1545 json->pushInteger(header->encodedSize());
1546 if (header->isFree()) { 1546 if (header->isFree()) {
1547 info->freeSize += header->size(); 1547 info->freeSize += header->size();
1548 continue; 1548 continue;
1549 } 1549 }
1550 header->checkHeader(); 1550 ASSERT(header->checkHeader());
1551 1551
1552 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); 1552 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex()));
1553 size_t age = header->age(); 1553 size_t age = header->age();
1554 if (json) 1554 if (json)
1555 json->pushInteger(tag); 1555 json->pushInteger(tag);
1556 if (header->isMarked()) { 1556 if (header->isMarked()) {
1557 info->liveCount[tag] += 1; 1557 info->liveCount[tag] += 1;
1558 info->liveSize[tag] += header->size(); 1558 info->liveSize[tag] += header->size();
1559 // Count objects that are live when promoted to the final generation . 1559 // Count objects that are live when promoted to the final generation .
1560 if (age == maxHeapObjectAge - 1) 1560 if (age == maxHeapObjectAge - 1)
(...skipping 882 matching lines...) Expand 10 before | Expand all | Expand 10 after
2443 size_t Heap::s_allocatedObjectSize = 0; 2443 size_t Heap::s_allocatedObjectSize = 0;
2444 size_t Heap::s_allocatedSpace = 0; 2444 size_t Heap::s_allocatedSpace = 0;
2445 size_t Heap::s_markedObjectSize = 0; 2445 size_t Heap::s_markedObjectSize = 0;
2446 // We don't want to use 0 KB for the initial value because it may end up 2446 // We don't want to use 0 KB for the initial value because it may end up
2447 // triggering the first GC of some thread too prematurely. 2447 // triggering the first GC of some thread too prematurely.
2448 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2448 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2449 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2449 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2450 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2450 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2451 2451
2452 } // namespace blink 2452 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapAllocator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698