Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(109)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1157933002: Oilpan: introduce eager finalization. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Parameterize Heap::poisonHeap() over ObjectsToPoison Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapAllocator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
349 { 349 {
350 ASSERT(!threadState()->isInGC()); 350 ASSERT(!threadState()->isInGC());
351 ASSERT(!m_firstUnsweptPage); 351 ASSERT(!m_firstUnsweptPage);
352 352
353 // Move all pages to a list of unswept pages. 353 // Move all pages to a list of unswept pages.
354 m_firstUnsweptPage = m_firstPage; 354 m_firstUnsweptPage = m_firstPage;
355 m_firstPage = nullptr; 355 m_firstPage = nullptr;
356 } 356 }
357 357
358 #if defined(ADDRESS_SANITIZER) 358 #if defined(ADDRESS_SANITIZER)
359 void BaseHeap::poisonUnmarkedObjects() 359 void BaseHeap::poisonHeap(ObjectsToPoison objectsToPoison, Poisoning poisoning)
360 { 360 {
361 // This method is called just before starting sweeping. 361 // TODO(sof): support complete poisoning of all heaps.
362 // Thus all dead objects are in the list of m_firstUnsweptPage. 362 ASSERT(objectsToPoison != MarkedAndUnmarked || heapIndex() == EagerSweepHeap Index);
363 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { 363
364 page->poisonUnmarkedObjects(); 364 // This method may either be called to poison (SetPoison) heap
365 // object payloads prior to sweeping, or it may be called at
366 // the completion of a sweep to unpoison (ClearPoison) the
367 // objects remaining in the heap. Those will all be live and unmarked.
368 //
369 // Poisoning may be limited to unmarked objects only, or apply to all.
370 if (poisoning == SetPoison) {
371 ASSERT(!m_firstPage);
372 for (BasePage* page = m_firstUnsweptPage; page; page = page->next())
373 page->poisonObjects(objectsToPoison, poisoning);
374 return;
365 } 375 }
376 // Support clearing of poisoning after sweeping has completed,
377 // in which case the pages of the live objects are reachable
378 // via m_firstPage.
379 ASSERT(!m_firstUnsweptPage);
380 for (BasePage* page = m_firstPage; page; page = page->next())
381 page->poisonObjects(objectsToPoison, poisoning);
366 } 382 }
367 #endif 383 #endif
368 384
369 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) 385 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex)
370 { 386 {
371 // If there are no pages to be swept, return immediately. 387 // If there are no pages to be swept, return immediately.
372 if (!m_firstUnsweptPage) 388 if (!m_firstUnsweptPage)
373 return nullptr; 389 return nullptr;
374 390
375 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 391 RELEASE_ASSERT(threadState()->isSweepingInProgress());
(...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after
789 { 805 {
790 ASSERT(allocationSize > remainingAllocationSize()); 806 ASSERT(allocationSize > remainingAllocationSize());
791 ASSERT(allocationSize >= allocationGranularity); 807 ASSERT(allocationSize >= allocationGranularity);
792 808
793 #if ENABLE(GC_PROFILING) 809 #if ENABLE(GC_PROFILING)
794 threadState()->snapshotFreeListIfNecessary(); 810 threadState()->snapshotFreeListIfNecessary();
795 #endif 811 #endif
796 812
797 // 1. If this allocation is big enough, allocate a large object. 813 // 1. If this allocation is big enough, allocate a large object.
798 if (allocationSize >= largeObjectSizeThreshold) { 814 if (allocationSize >= largeObjectSizeThreshold) {
815 // TODO(sof): support eagerly finalized large objects, if ever needed.
816 RELEASE_ASSERT(heapIndex() != EagerSweepHeapIndex);
799 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(LargeObjectHeapIndex)); 817 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(LargeObjectHeapIndex));
800 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex); 818 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex);
801 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); 819 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject);
802 return largeObject; 820 return largeObject;
803 } 821 }
804 822
805 // 2. Check if we should trigger a GC. 823 // 2. Check if we should trigger a GC.
806 updateRemainingAllocationSize(); 824 updateRemainingAllocationSize();
807 threadState()->scheduleGCIfNeeded(); 825 threadState()->scheduleGCIfNeeded();
808 826
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after
1159 // touches any other on-heap object that die at the same GC cycle. 1177 // touches any other on-heap object that die at the same GC cycle.
1160 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); 1178 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize);
1161 header->finalize(payload, payloadSize); 1179 header->finalize(payload, payloadSize);
1162 // This memory will be added to the freelist. Maintain the invariant 1180 // This memory will be added to the freelist. Maintain the invariant
1163 // that memory on the freelist is zero filled. 1181 // that memory on the freelist is zero filled.
1164 FILL_ZERO_IF_PRODUCTION(headerAddress, size); 1182 FILL_ZERO_IF_PRODUCTION(headerAddress, size);
1165 ASAN_POISON_MEMORY_REGION(payload, payloadSize); 1183 ASAN_POISON_MEMORY_REGION(payload, payloadSize);
1166 headerAddress += size; 1184 headerAddress += size;
1167 continue; 1185 continue;
1168 } 1186 }
1169
1170 if (startOfGap != headerAddress) 1187 if (startOfGap != headerAddress)
1171 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); 1188 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap);
1172 header->unmark(); 1189 header->unmark();
1173 headerAddress += header->size(); 1190 headerAddress += header->size();
1174 markedObjectSize += header->size(); 1191 markedObjectSize += header->size();
1175 startOfGap = headerAddress; 1192 startOfGap = headerAddress;
1176 } 1193 }
1177 if (startOfGap != payloadEnd()) 1194 if (startOfGap != payloadEnd())
1178 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); 1195 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap );
1179 1196
(...skipping 20 matching lines...) Expand all
1200 } else { 1217 } else {
1201 header->markDead(); 1218 header->markDead();
1202 } 1219 }
1203 headerAddress += header->size(); 1220 headerAddress += header->size();
1204 } 1221 }
1205 if (markedObjectSize) 1222 if (markedObjectSize)
1206 Heap::increaseMarkedObjectSize(markedObjectSize); 1223 Heap::increaseMarkedObjectSize(markedObjectSize);
1207 } 1224 }
1208 1225
1209 #if defined(ADDRESS_SANITIZER) 1226 #if defined(ADDRESS_SANITIZER)
1210 void NormalPage::poisonUnmarkedObjects() 1227 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison ing)
1211 { 1228 {
1212 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1229 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1213 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1230 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1214 ASSERT(header->size() < blinkPagePayloadSize()); 1231 ASSERT(header->size() < blinkPagePayloadSize());
1215 // Check if a free list entry first since we cannot call 1232 // Check if a free list entry first since we cannot call
1216 // isMarked on a free list entry. 1233 // isMarked on a free list entry.
1217 if (header->isFree()) { 1234 if (header->isFree()) {
1218 headerAddress += header->size(); 1235 headerAddress += header->size();
1219 continue; 1236 continue;
1220 } 1237 }
1221 header->checkHeader(); 1238 header->checkHeader();
1222 if (!header->isMarked()) { 1239 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) {
1223 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1240 if (poisoning == SetPoison)
1241 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize ());
1242 else
1243 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSi ze());
1224 } 1244 }
1225 headerAddress += header->size(); 1245 headerAddress += header->size();
1226 } 1246 }
1227 } 1247 }
1228 #endif 1248 #endif
1229 1249
1230 void NormalPage::populateObjectStartBitMap() 1250 void NormalPage::populateObjectStartBitMap()
1231 { 1251 {
1232 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1252 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1233 Address start = payload(); 1253 Address start = payload();
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after
1489 HeapObjectHeader* header = heapObjectHeader(); 1509 HeapObjectHeader* header = heapObjectHeader();
1490 if (header->isMarked()) { 1510 if (header->isMarked()) {
1491 header->unmark(); 1511 header->unmark();
1492 Heap::increaseMarkedObjectSize(size()); 1512 Heap::increaseMarkedObjectSize(size());
1493 } else { 1513 } else {
1494 header->markDead(); 1514 header->markDead();
1495 } 1515 }
1496 } 1516 }
1497 1517
1498 #if defined(ADDRESS_SANITIZER) 1518 #if defined(ADDRESS_SANITIZER)
1499 void LargeObjectPage::poisonUnmarkedObjects() 1519 void LargeObjectPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning p oisoning)
1500 { 1520 {
1501 HeapObjectHeader* header = heapObjectHeader(); 1521 HeapObjectHeader* header = heapObjectHeader();
1502 if (!header->isMarked()) 1522 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) {
1503 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1523 if (poisoning == SetPoison)
1524 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1525 else
1526 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize() );
1527 }
1504 } 1528 }
1505 #endif 1529 #endif
1506 1530
1507 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) 1531 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address)
1508 { 1532 {
1509 ASSERT(contains(address)); 1533 ASSERT(contains(address));
1510 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) 1534 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead())
1511 return; 1535 return;
1512 #if ENABLE(GC_PROFILING) 1536 #if ENABLE(GC_PROFILING)
1513 visitor->setHostInfo(&address, "stack"); 1537 visitor->setHostInfo(&address, "stack");
(...skipping 745 matching lines...) Expand 10 before | Expand all | Expand 10 after
2259 size_t Heap::s_allocatedObjectSize = 0; 2283 size_t Heap::s_allocatedObjectSize = 0;
2260 size_t Heap::s_allocatedSpace = 0; 2284 size_t Heap::s_allocatedSpace = 0;
2261 size_t Heap::s_markedObjectSize = 0; 2285 size_t Heap::s_markedObjectSize = 0;
2262 // We don't want to use 0 KB for the initial value because it may end up 2286 // We don't want to use 0 KB for the initial value because it may end up
2263 // triggering the first GC of some thread too prematurely. 2287 // triggering the first GC of some thread too prematurely.
2264 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2288 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2265 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2289 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2266 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2290 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2267 2291
2268 } // namespace blink 2292 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapAllocator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698