Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1157933002: Oilpan: introduce eager finalization. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: round of improvements Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
339 // Move all pages to a list of unswept pages. 339 // Move all pages to a list of unswept pages.
340 m_firstUnsweptPage = m_firstPage; 340 m_firstUnsweptPage = m_firstPage;
341 m_firstPage = nullptr; 341 m_firstPage = nullptr;
342 } 342 }
343 343
344 #if defined(ADDRESS_SANITIZER) 344 #if defined(ADDRESS_SANITIZER)
345 void BaseHeap::poisonUnmarkedObjects() 345 void BaseHeap::poisonUnmarkedObjects()
346 { 346 {
347 // This method is called just before starting sweeping. 347 // This method is called just before starting sweeping.
348 // Thus all dead objects are in the list of m_firstUnsweptPage. 348 // Thus all dead objects are in the list of m_firstUnsweptPage.
349 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { 349 for (BasePage* page = m_firstUnsweptPage; page; page = page->next())
350 page->poisonUnmarkedObjects(); 350 page->poisonObjects(UnmarkedOnly, SetPoison);
351 }
352
353 void BaseHeap::poisonHeap(Poisoning poisoning)
haraken 2015/06/01 08:42:52 I'd call this function poisonUnmarkedAndMarkedObje
sof 2015/06/01 14:51:36 Addressed (see below.)
354 {
355 // TODO(sof): support poisoning of all heaps.
356 ASSERT(heapIndex() == EagerSweepHeapIndex);
357 // This method is called with SetPoison just before starting sweeping
358 // of (eager) heaps. Hence, all objects will be in m_firstUnsweptPage
359 // before start.
360 if (poisoning == SetPoison) {
361 ASSERT(!m_firstPage);
362 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) {
363 page->poisonObjects(UnmarkedOrMarked, SetPoison);
364 }
365 return;
366 }
367 ASSERT(!m_firstUnsweptPage);
368 for (BasePage* page = m_firstPage; page; page = page->next()) {
369 page->poisonObjects(UnmarkedOnly, ClearPoison);
haraken 2015/06/01 08:42:52 Nit: If we call the function poisonUnmarkedAndMark
sof 2015/06/01 14:51:36 There wouldn't, but that observation made me reali
351 } 370 }
352 } 371 }
353 #endif 372 #endif
354 373
355 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) 374 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex)
356 { 375 {
357 // If there are no pages to be swept, return immediately. 376 // If there are no pages to be swept, return immediately.
358 if (!m_firstUnsweptPage) 377 if (!m_firstUnsweptPage)
359 return nullptr; 378 return nullptr;
360 379
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after
775 { 794 {
776 ASSERT(allocationSize > remainingAllocationSize()); 795 ASSERT(allocationSize > remainingAllocationSize());
777 ASSERT(allocationSize >= allocationGranularity); 796 ASSERT(allocationSize >= allocationGranularity);
778 797
779 #if ENABLE(GC_PROFILING) 798 #if ENABLE(GC_PROFILING)
780 threadState()->snapshotFreeListIfNecessary(); 799 threadState()->snapshotFreeListIfNecessary();
781 #endif 800 #endif
782 801
783 // 1. If this allocation is big enough, allocate a large object. 802 // 1. If this allocation is big enough, allocate a large object.
784 if (allocationSize >= largeObjectSizeThreshold) { 803 if (allocationSize >= largeObjectSizeThreshold) {
804 // TODO(sof): support eagerly finalized large objects, if ever needed.
805 ASSERT(heapIndex() != EagerSweepHeapIndex);
haraken 2015/06/01 08:42:52 I'd use RELEASE_ASSERT, since if we hit this, we'l
sof 2015/06/01 14:51:36 switched; to the extent there's overhead, this isn
785 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(LargeObjectHeapIndex)); 806 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(LargeObjectHeapIndex));
786 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex); 807 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex);
787 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); 808 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject);
788 return largeObject; 809 return largeObject;
789 } 810 }
790 811
791 // 2. Check if we should trigger a GC. 812 // 2. Check if we should trigger a GC.
792 updateRemainingAllocationSize(); 813 updateRemainingAllocationSize();
793 threadState()->scheduleGCIfNeeded(); 814 threadState()->scheduleGCIfNeeded();
794 815
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after
1145 // touches any other on-heap object that die at the same GC cycle. 1166 // touches any other on-heap object that die at the same GC cycle.
1146 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); 1167 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize);
1147 header->finalize(payload, payloadSize); 1168 header->finalize(payload, payloadSize);
1148 // This memory will be added to the freelist. Maintain the invariant 1169 // This memory will be added to the freelist. Maintain the invariant
1149 // that memory on the freelist is zero filled. 1170 // that memory on the freelist is zero filled.
1150 FILL_ZERO_IF_PRODUCTION(headerAddress, size); 1171 FILL_ZERO_IF_PRODUCTION(headerAddress, size);
1151 ASAN_POISON_MEMORY_REGION(payload, payloadSize); 1172 ASAN_POISON_MEMORY_REGION(payload, payloadSize);
1152 headerAddress += size; 1173 headerAddress += size;
1153 continue; 1174 continue;
1154 } 1175 }
1155
1156 if (startOfGap != headerAddress) 1176 if (startOfGap != headerAddress)
1157 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); 1177 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap);
1158 header->unmark(); 1178 header->unmark();
1159 headerAddress += header->size(); 1179 headerAddress += header->size();
1160 markedObjectSize += header->size(); 1180 markedObjectSize += header->size();
1161 startOfGap = headerAddress; 1181 startOfGap = headerAddress;
1162 } 1182 }
1163 if (startOfGap != payloadEnd()) 1183 if (startOfGap != payloadEnd())
1164 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); 1184 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap );
1165 1185
(...skipping 20 matching lines...) Expand all
1186 } else { 1206 } else {
1187 header->markDead(); 1207 header->markDead();
1188 } 1208 }
1189 headerAddress += header->size(); 1209 headerAddress += header->size();
1190 } 1210 }
1191 if (markedObjectSize) 1211 if (markedObjectSize)
1192 Heap::increaseMarkedObjectSize(markedObjectSize); 1212 Heap::increaseMarkedObjectSize(markedObjectSize);
1193 } 1213 }
1194 1214
1195 #if defined(ADDRESS_SANITIZER) 1215 #if defined(ADDRESS_SANITIZER)
1196 void NormalPage::poisonUnmarkedObjects() 1216 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison ing)
1197 { 1217 {
1198 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1218 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1199 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1219 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1200 ASSERT(header->size() < blinkPagePayloadSize()); 1220 ASSERT(header->size() < blinkPagePayloadSize());
1201 // Check if a free list entry first since we cannot call 1221 // Check if a free list entry first since we cannot call
1202 // isMarked on a free list entry. 1222 // isMarked on a free list entry.
1203 if (header->isFree()) { 1223 if (header->isFree()) {
1204 headerAddress += header->size(); 1224 headerAddress += header->size();
1205 continue; 1225 continue;
1206 } 1226 }
1207 header->checkHeader(); 1227 header->checkHeader();
1208 if (!header->isMarked()) { 1228 if (objectsToPoison == UnmarkedOrMarked || !header->isMarked()) {
1209 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1229 if (poisoning == SetPoison)
1230 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize ());
1231 else
1232 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSi ze());
1210 } 1233 }
1211 headerAddress += header->size(); 1234 headerAddress += header->size();
1212 } 1235 }
1213 } 1236 }
1214 #endif 1237 #endif
1215 1238
1216 void NormalPage::populateObjectStartBitMap() 1239 void NormalPage::populateObjectStartBitMap()
1217 { 1240 {
1218 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1241 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1219 Address start = payload(); 1242 Address start = payload();
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after
1475 HeapObjectHeader* header = heapObjectHeader(); 1498 HeapObjectHeader* header = heapObjectHeader();
1476 if (header->isMarked()) { 1499 if (header->isMarked()) {
1477 header->unmark(); 1500 header->unmark();
1478 Heap::increaseMarkedObjectSize(size()); 1501 Heap::increaseMarkedObjectSize(size());
1479 } else { 1502 } else {
1480 header->markDead(); 1503 header->markDead();
1481 } 1504 }
1482 } 1505 }
1483 1506
1484 #if defined(ADDRESS_SANITIZER) 1507 #if defined(ADDRESS_SANITIZER)
1485 void LargeObjectPage::poisonUnmarkedObjects() 1508 void LargeObjectPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning p oisoning)
1486 { 1509 {
1487 HeapObjectHeader* header = heapObjectHeader(); 1510 HeapObjectHeader* header = heapObjectHeader();
1488 if (!header->isMarked()) 1511 if (objectsToPoison == UnmarkedOrMarked || !header->isMarked()) {
1489 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1512 if (poisoning == SetPoison)
1513 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1514 else
1515 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize() );
1516 }
1490 } 1517 }
1491 #endif 1518 #endif
1492 1519
1493 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) 1520 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address)
1494 { 1521 {
1495 ASSERT(contains(address)); 1522 ASSERT(contains(address));
1496 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) 1523 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead())
1497 return; 1524 return;
1498 #if ENABLE(GC_PROFILING) 1525 #if ENABLE(GC_PROFILING)
1499 visitor->setHostInfo(&address, "stack"); 1526 visitor->setHostInfo(&address, "stack");
(...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after
2246 size_t Heap::s_allocatedObjectSize = 0; 2273 size_t Heap::s_allocatedObjectSize = 0;
2247 size_t Heap::s_allocatedSpace = 0; 2274 size_t Heap::s_allocatedSpace = 0;
2248 size_t Heap::s_markedObjectSize = 0; 2275 size_t Heap::s_markedObjectSize = 0;
2249 // We don't want to use 0 KB for the initial value because it may end up 2276 // We don't want to use 0 KB for the initial value because it may end up
2250 // triggering the first GC of some thread too prematurely. 2277 // triggering the first GC of some thread too prematurely.
2251 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2278 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2252 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2279 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2253 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2280 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2254 2281
2255 } // namespace blink 2282 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698