Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #include "platform/heap/HeapPage.h" | 31 #include "platform/heap/HeapPage.h" |
| 32 | 32 |
| 33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
| 34 #include "platform/MemoryCoordinator.h" | 34 #include "platform/MemoryCoordinator.h" |
| 35 #include "platform/ScriptForbiddenScope.h" | 35 #include "platform/ScriptForbiddenScope.h" |
| 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 37 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
| 38 #include "platform/heap/HeapCompact.h" | |
| 38 #include "platform/heap/MarkingVisitor.h" | 39 #include "platform/heap/MarkingVisitor.h" |
| 39 #include "platform/heap/PageMemory.h" | 40 #include "platform/heap/PageMemory.h" |
| 40 #include "platform/heap/PagePool.h" | 41 #include "platform/heap/PagePool.h" |
| 41 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
| 42 #include "platform/heap/ThreadState.h" | 43 #include "platform/heap/ThreadState.h" |
| 43 #include "platform/tracing/TraceEvent.h" | 44 #include "platform/tracing/TraceEvent.h" |
| 44 #include "platform/tracing/web_memory_allocator_dump.h" | 45 #include "platform/tracing/web_memory_allocator_dump.h" |
| 45 #include "platform/tracing/web_process_memory_dump.h" | 46 #include "platform/tracing/web_process_memory_dump.h" |
| 46 #include "public/platform/Platform.h" | 47 #include "public/platform/Platform.h" |
| 47 #include "wtf/Assertions.h" | 48 #include "wtf/Assertions.h" |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 194 ASSERT(!page->hasBeenSwept()); | 195 ASSERT(!page->hasBeenSwept()); |
| 195 page->invalidateObjectStartBitmap(); | 196 page->invalidateObjectStartBitmap(); |
| 196 } | 197 } |
| 197 if (previousPage) { | 198 if (previousPage) { |
| 198 ASSERT(m_firstUnsweptPage); | 199 ASSERT(m_firstUnsweptPage); |
| 199 previousPage->m_next = m_firstPage; | 200 previousPage->m_next = m_firstPage; |
| 200 m_firstPage = m_firstUnsweptPage; | 201 m_firstPage = m_firstUnsweptPage; |
| 201 m_firstUnsweptPage = nullptr; | 202 m_firstUnsweptPage = nullptr; |
| 202 } | 203 } |
| 203 ASSERT(!m_firstUnsweptPage); | 204 ASSERT(!m_firstUnsweptPage); |
| 205 | |
| 206 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); | |
| 207 if (!heapCompactor->isCompactingArena(arenaIndex())) | |
| 208 return; | |
| 209 | |
| 210 BasePage* nextPage = m_firstPage; | |
| 211 while (nextPage) { | |
| 212 if (!nextPage->isLargeObjectPage()) | |
| 213 heapCompactor->addCompactablePage(nextPage); | |
| 214 nextPage = nextPage->next(); | |
| 215 } | |
| 204 } | 216 } |
| 205 | 217 |
| 206 void BaseArena::makeConsistentForMutator() { | 218 void BaseArena::makeConsistentForMutator() { |
| 207 clearFreeLists(); | 219 clearFreeLists(); |
| 208 ASSERT(isConsistentForGC()); | 220 ASSERT(isConsistentForGC()); |
| 209 ASSERT(!m_firstPage); | 221 ASSERT(!m_firstPage); |
| 210 | 222 |
| 211 // Drop marks from marked objects and rebuild free lists in preparation for | 223 // Drop marks from marked objects and rebuild free lists in preparation for |
| 212 // resuming the executions of mutators. | 224 // resuming the executions of mutators. |
| 213 BasePage* previousPage = nullptr; | 225 BasePage* previousPage = nullptr; |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 433 m_promptlyFreedSize(0), | 445 m_promptlyFreedSize(0), |
| 434 m_isLazySweeping(false) { | 446 m_isLazySweeping(false) { |
| 435 clearFreeLists(); | 447 clearFreeLists(); |
| 436 } | 448 } |
| 437 | 449 |
| 438 void NormalPageArena::clearFreeLists() { | 450 void NormalPageArena::clearFreeLists() { |
| 439 setAllocationPoint(nullptr, 0); | 451 setAllocationPoint(nullptr, 0); |
| 440 m_freeList.clear(); | 452 m_freeList.clear(); |
| 441 } | 453 } |
| 442 | 454 |
| 455 size_t NormalPageArena::arenaSize() { | |
| 456 size_t size = 0; | |
| 457 BasePage* p = m_firstPage; | |
|
haraken
2016/12/05 11:27:47
page
sof
2016/12/05 19:30:06
Done.
| |
| 458 while (p) { | |
| 459 size += p->size(); | |
| 460 p = p->next(); | |
| 461 } | |
| 462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); | |
| 463 return size; | |
| 464 } | |
| 465 | |
| 466 size_t NormalPageArena::freeListSize() { | |
| 467 size_t freeSize = m_freeList.freeListSize(); | |
| 468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); | |
| 469 return freeSize; | |
| 470 } | |
| 471 | |
| 472 void NormalPageArena::sweepAndCompact() { | |
| 473 ThreadHeap& heap = getThreadState()->heap(); | |
| 474 if (!heap.compaction()->isCompactingArena(arenaIndex())) | |
| 475 return; | |
| 476 | |
|
haraken
2016/12/05 11:27:47
Add DCHECK(!hasCurrentAllocationArea()).
sof
2016/12/05 19:30:07
Done.
| |
| 477 NormalPage* nextPage = nullptr; | |
| 478 size_t allocationPoint = 0; | |
| 479 | |
| 480 while (m_firstUnsweptPage) { | |
| 481 BasePage* page = m_firstUnsweptPage; | |
| 482 if (page->isEmpty()) { | |
| 483 page->unlink(&m_firstUnsweptPage); | |
| 484 page->removeFromHeap(); | |
| 485 continue; | |
| 486 } | |
| 487 if (page->isLargeObjectPage()) { | |
| 488 page->sweep(); | |
| 489 page->markAsSwept(); | |
| 490 continue; | |
|
haraken
2016/12/05 11:27:47
Don't we need to call:
page->unlink(&m_firstUns
sof
2016/12/05 19:30:07
Good catch; the large object case is dead code, ho
| |
| 491 } | |
| 492 NormalPage* normalPage = static_cast<NormalPage*>(page); | |
| 493 normalPage->unlink(&m_firstUnsweptPage); | |
| 494 normalPage->markAsSwept(); | |
| 495 if (!nextPage) { | |
| 496 nextPage = normalPage; | |
| 497 } else { | |
| 498 // Add |normalPage| onto the |nextPage| chain, but after it as |nextPage| | |
| 499 // is the current page being allocated from. | |
| 500 BasePage* nextP; | |
| 501 nextPage->unlink(&nextP); | |
| 502 normalPage->link(&nextP); | |
| 503 nextPage->link(&nextP); | |
| 504 } | |
| 505 allocationPoint = | |
| 506 normalPage->sweepAndCompact(nextPage, &m_firstPage, allocationPoint); | |
|
haraken
2016/12/05 11:27:47
Honestly speaking, it's very hard to understand wh
sof
2016/12/05 19:30:07
We do want to perform in-place compaction of these
sof
2016/12/06 10:55:59
Done; refreshed the code + added comments.
| |
| 507 } | |
| 508 // Add unused tail to the free list. | |
| 509 BasePage* nextP = nullptr; | |
| 510 if (nextPage) { | |
| 511 // If the 'next page' is used, add it to the heap's list of swept pages. | |
| 512 // Otherwise we hand it back to the OS below. | |
| 513 if (allocationPoint) { | |
| 514 nextPage->unlink(&nextP); | |
| 515 nextPage->link(&m_firstPage); | |
| 516 } else { | |
| 517 nextP = nextPage; | |
| 518 nextPage = nullptr; | |
| 519 } | |
| 520 } | |
| 521 size_t freedSize = 0; | |
| 522 if (nextPage && allocationPoint != nextPage->payloadSize()) { | |
| 523 freedSize = nextPage->payloadSize() - allocationPoint; | |
| 524 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
| 525 defined(MEMORY_SANITIZER) | |
| 526 FreeList::zapFreedMemory(nextPage->payload() + allocationPoint, freedSize); | |
| 527 #endif | |
| 528 nextPage->arenaForNormalPage()->addToFreeList( | |
| 529 nextPage->payload() + allocationPoint, freedSize); | |
| 530 } | |
| 531 nextPage = static_cast<NormalPage*>(nextP); | |
| 532 size_t freedPages = 0; | |
| 533 while (nextPage) { | |
| 534 #if DEBUG_HEAP_COMPACTION | |
| 535 if (!freedPages) | |
| 536 LOG_HEAP_COMPACTION("Releasing:"); | |
| 537 LOG_HEAP_COMPACTION(" [%p, %p]", nextPage, nextPage + nextPage->size()); | |
| 538 #endif | |
| 539 freedSize += nextPage->size(); | |
| 540 freedPages++; | |
| 541 nextPage->unlink(&nextP); | |
| 542 nextPage->removeFromHeap(); | |
| 543 nextPage = static_cast<NormalPage*>(nextP); | |
| 544 } | |
| 545 if (nextP) | |
| 546 LOG_HEAP_COMPACTION("\n"); | |
| 547 heap.compaction()->finishedArenaCompaction(this, freedPages, freedSize); | |
| 548 } | |
| 549 | |
| 443 #if ENABLE(ASSERT) | 550 #if ENABLE(ASSERT) |
| 444 bool NormalPageArena::isConsistentForGC() { | 551 bool NormalPageArena::isConsistentForGC() { |
| 445 // A thread heap is consistent for sweeping if none of the pages to be swept | 552 // A thread heap is consistent for sweeping if none of the pages to be swept |
| 446 // contain a freelist block or the current allocation point. | 553 // contain a freelist block or the current allocation point. |
| 447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 554 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; | 555 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; |
| 449 freeListEntry; freeListEntry = freeListEntry->next()) { | 556 freeListEntry; freeListEntry = freeListEntry->next()) { |
| 450 if (pagesToBeSweptContains(freeListEntry->getAddress())) | 557 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
| 451 return false; | 558 return false; |
| 452 } | 559 } |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 474 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | 581 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); |
| 475 base::trace_event::MemoryAllocatorDump* pagesDump = | 582 base::trace_event::MemoryAllocatorDump* pagesDump = |
| 476 BlinkGCMemoryDumpProvider::instance() | 583 BlinkGCMemoryDumpProvider::instance() |
| 477 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | 584 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); |
| 478 BlinkGCMemoryDumpProvider::instance() | 585 BlinkGCMemoryDumpProvider::instance() |
| 479 ->currentProcessMemoryDump() | 586 ->currentProcessMemoryDump() |
| 480 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid()); | 587 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid()); |
| 481 } | 588 } |
| 482 } | 589 } |
| 483 | 590 |
| 484 void NormalPageArena::allocatePage() { | 591 NormalPage* NormalPageArena::allocatePage() { |
| 485 getThreadState()->shouldFlushHeapDoesNotContainCache(); | 592 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| 486 PageMemory* pageMemory = | 593 PageMemory* pageMemory = |
| 487 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); | 594 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); |
| 488 | 595 |
| 489 if (!pageMemory) { | 596 if (!pageMemory) { |
| 490 // Allocate a memory region for blinkPagesPerRegion pages that | 597 // Allocate a memory region for blinkPagesPerRegion pages that |
| 491 // will each have the following layout. | 598 // will each have the following layout. |
| 492 // | 599 // |
| 493 // [ guard os page | ... payload ... | guard os page ] | 600 // [ guard os page | ... payload ... | guard os page ] |
| 494 // ^---{ aligned to blink page size } | 601 // ^---{ aligned to blink page size } |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 510 pageMemory = memory; | 617 pageMemory = memory; |
| 511 } else { | 618 } else { |
| 512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), | 619 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), |
| 513 memory); | 620 memory); |
| 514 } | 621 } |
| 515 } | 622 } |
| 516 } | 623 } |
| 517 | 624 |
| 518 NormalPage* page = | 625 NormalPage* page = |
| 519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 626 new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
| 627 return page; | |
|
haraken
2016/12/05 11:27:47
return new...
sof
2016/12/05 19:30:06
Done.
| |
| 628 } | |
| 629 | |
| 630 void NormalPageArena::allocateAndAddPage() { | |
| 631 NormalPage* page = allocatePage(); | |
| 520 page->link(&m_firstPage); | 632 page->link(&m_firstPage); |
| 521 | 633 |
| 522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); | 634 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
| 523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 635 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 524 // Allow the following addToFreeList() to add the newly allocated memory | 636 // Allow the following addToFreeList() to add the newly allocated memory |
| 525 // to the free list. | 637 // to the free list. |
| 526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 638 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
| 527 Address address = page->payload(); | 639 Address address = page->payload(); |
| 528 for (size_t i = 0; i < page->payloadSize(); i++) | 640 for (size_t i = 0; i < page->payloadSize(); i++) |
| 529 address[i] = reuseAllowedZapValue; | 641 address[i] = reuseAllowedZapValue; |
| (...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 806 return result; | 918 return result; |
| 807 } | 919 } |
| 808 | 920 |
| 809 // 6. Complete sweeping. | 921 // 6. Complete sweeping. |
| 810 getThreadState()->completeSweep(); | 922 getThreadState()->completeSweep(); |
| 811 | 923 |
| 812 // 7. Check if we should trigger a GC. | 924 // 7. Check if we should trigger a GC. |
| 813 getThreadState()->scheduleGCIfNeeded(); | 925 getThreadState()->scheduleGCIfNeeded(); |
| 814 | 926 |
| 815 // 8. Add a new page to this heap. | 927 // 8. Add a new page to this heap. |
| 816 allocatePage(); | 928 allocateAndAddPage(); |
| 817 | 929 |
| 818 // 9. Try to allocate from a free list. This allocation must succeed. | 930 // 9. Try to allocate from a free list. This allocation must succeed. |
| 819 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 931 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 820 RELEASE_ASSERT(result); | 932 RELEASE_ASSERT(result); |
| 821 return result; | 933 return result; |
| 822 } | 934 } |
| 823 | 935 |
| 824 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, | 936 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, |
| 825 size_t gcInfoIndex) { | 937 size_t gcInfoIndex) { |
| 826 // Try reusing a block from the largest bin. The underlying reasoning | 938 // Try reusing a block from the largest bin. The underlying reasoning |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1070 | 1182 |
| 1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, | 1183 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
| 1072 size_t size) { | 1184 size_t size) { |
| 1073 for (size_t i = 0; i < size; i++) { | 1185 for (size_t i = 0; i < size; i++) { |
| 1074 ASSERT(address[i] == reuseAllowedZapValue || | 1186 ASSERT(address[i] == reuseAllowedZapValue || |
| 1075 address[i] == reuseForbiddenZapValue); | 1187 address[i] == reuseForbiddenZapValue); |
| 1076 } | 1188 } |
| 1077 } | 1189 } |
| 1078 #endif | 1190 #endif |
| 1079 | 1191 |
| 1192 size_t FreeList::freeListSize() const { | |
| 1193 size_t freeSize = 0; | |
| 1194 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
| 1195 FreeListEntry* entry = m_freeLists[i]; | |
| 1196 while (entry) { | |
| 1197 freeSize += entry->size(); | |
| 1198 entry = entry->next(); | |
| 1199 } | |
| 1200 } | |
| 1201 #if DEBUG_HEAP_FREELIST | |
| 1202 if (freeSize) { | |
| 1203 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); | |
| 1204 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
| 1205 FreeListEntry* entry = m_freeLists[i]; | |
| 1206 size_t bucket = 0; | |
| 1207 size_t count = 0; | |
| 1208 while (entry) { | |
| 1209 bucket += entry->size(); | |
| 1210 count++; | |
| 1211 entry = entry->next(); | |
| 1212 } | |
| 1213 if (bucket) { | |
| 1214 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, | |
| 1215 0x1 << (i + 1), bucket, count); | |
| 1216 } | |
| 1217 } | |
| 1218 } | |
| 1219 #endif | |
| 1220 return freeSize; | |
| 1221 } | |
| 1222 | |
| 1080 void FreeList::clear() { | 1223 void FreeList::clear() { |
| 1081 m_biggestFreeListIndex = 0; | 1224 m_biggestFreeListIndex = 0; |
| 1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1225 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| 1083 m_freeLists[i] = nullptr; | 1226 m_freeLists[i] = nullptr; |
| 1084 } | 1227 } |
| 1085 | 1228 |
| 1086 int FreeList::bucketIndexForSize(size_t size) { | 1229 int FreeList::bucketIndexForSize(size_t size) { |
| 1087 ASSERT(size > 0); | 1230 ASSERT(size > 0); |
| 1088 int index = -1; | 1231 int index = -1; |
| 1089 while (size) { | 1232 while (size) { |
| (...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1382 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1240 if (MemoryCoordinator::isLowEndDevice()) | 1383 if (MemoryCoordinator::isLowEndDevice()) |
| 1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1384 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
| 1242 #endif | 1385 #endif |
| 1243 } | 1386 } |
| 1244 | 1387 |
| 1245 if (markedObjectSize) | 1388 if (markedObjectSize) |
| 1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1389 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| 1247 } | 1390 } |
| 1248 | 1391 |
| 1392 size_t NormalPage::sweepAndCompact(NormalPage*& arena, | |
| 1393 BasePage** firstPage, | |
| 1394 size_t allocationPoint) { | |
| 1395 size_t markedObjectSize = 0; | |
| 1396 NormalPageArena* pageArena = arenaForNormalPage(); | |
| 1397 HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); | |
| 1398 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
| 1399 HeapObjectHeader* header = | |
| 1400 reinterpret_cast<HeapObjectHeader*>(headerAddress); | |
| 1401 size_t size = header->size(); | |
| 1402 DCHECK(size > 0 && size < blinkPagePayloadSize()); | |
| 1403 | |
| 1404 if (header->isPromptlyFreed()) | |
| 1405 pageArena->decreasePromptlyFreedSize(size); | |
| 1406 if (header->isFree()) { | |
| 1407 // Unpoison the freelist entry so that we | |
| 1408 // can compact into it as wanted. | |
| 1409 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
| 1410 headerAddress += size; | |
| 1411 continue; | |
| 1412 } | |
| 1413 #if ENABLE(ASSERT) | |
| 1414 DCHECK(header->checkHeader()); | |
| 1415 #endif | |
| 1416 | |
| 1417 if (!header->isMarked()) { | |
| 1418 // This is a fast version of header->payloadSize(). | |
| 1419 size_t payloadSize = size - sizeof(HeapObjectHeader); | |
| 1420 Address payload = header->payload(); | |
| 1421 // For ASan, unpoison the object before calling the finalizer. The | |
| 1422 // finalized object will be zero-filled and poison'ed afterwards. | |
| 1423 // Given all other unmarked objects are poisoned, ASan will detect | |
| 1424 // an error if the finalizer touches any other on-heap object that | |
| 1425 // die at the same GC cycle. | |
| 1426 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
| 1427 header->finalize(payload, payloadSize); | |
| 1428 | |
| 1429 // As compaction is under way, leave the freed memory accessible | |
| 1430 // while compacting the rest of the page. We just zap the payload | |
| 1431 // to catch out other finalizers trying to access it. | |
| 1432 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
| 1433 defined(MEMORY_SANITIZER) | |
| 1434 FreeList::zapFreedMemory(payload, payloadSize); | |
| 1435 #endif | |
| 1436 headerAddress += size; | |
| 1437 continue; | |
| 1438 } | |
| 1439 DCHECK(header->isMarked()); | |
| 1440 header->unmark(); | |
| 1441 markedObjectSize += size; | |
| 1442 // Allocate and copy over the live object. | |
| 1443 if (arena->payload() + allocationPoint + size > arena->payloadEnd()) { | |
| 1444 // Can't fit on current allocation page. | |
| 1445 // TODO(sof): be more clever & compact later objects into |arena|'s unused | |
| 1446 // slop. | |
| 1447 BasePage* nextP; | |
| 1448 arena->unlink(&nextP); | |
| 1449 arena->link(firstPage); | |
| 1450 size_t freeSize = arena->payloadSize() - allocationPoint; | |
| 1451 if (freeSize) { | |
| 1452 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
| 1453 defined(MEMORY_SANITIZER) | |
| 1454 SET_MEMORY_INACCESSIBLE(arena->payload() + allocationPoint, freeSize); | |
| 1455 #endif | |
| 1456 arena->arenaForNormalPage()->addToFreeList( | |
| 1457 arena->payload() + allocationPoint, freeSize); | |
| 1458 } | |
| 1459 arena = static_cast<NormalPage*>(nextP); | |
| 1460 allocationPoint = 0; | |
| 1461 } | |
| 1462 Address movedObject = arena->payload() + allocationPoint; | |
| 1463 if (LIKELY(movedObject != headerAddress)) { | |
| 1464 #if defined(ADDRESS_SANITIZER) | |
| 1465 // Unpoison the header + if it is a vector backing | |
| 1466 // store object, let go of the container annotations. | |
| 1467 // Do that by unpoisoning the payload entirely. | |
| 1468 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); | |
| 1469 if (ThreadState::isVectorArenaIndex( | |
| 1470 arena->arenaForNormalPage()->arenaIndex())) { | |
| 1471 ASAN_UNPOISON_MEMORY_REGION(header->payload(), | |
| 1472 size - sizeof(HeapObjectHeader)); | |
| 1473 } | |
| 1474 #endif | |
| 1475 // Use a non-overlapping copy, if possible. | |
| 1476 if (arena == this) | |
| 1477 memmove(movedObject, headerAddress, size); | |
| 1478 else | |
| 1479 memcpy(movedObject, headerAddress, size); | |
| 1480 compact->movedObject(header->payload(), | |
|
haraken
2016/12/05 11:27:47
movedObject => relocate ?
sof
2016/12/05 19:30:07
Alright.
| |
| 1481 movedObject + sizeof(HeapObjectHeader)); | |
| 1482 } | |
| 1483 headerAddress += size; | |
| 1484 allocationPoint += size; | |
| 1485 DCHECK(allocationPoint <= arena->payloadSize()); | |
| 1486 } | |
| 1487 if (markedObjectSize) | |
| 1488 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | |
| 1489 | |
| 1490 // Clear the page; it'll either be used for compacted objects or freed. | |
| 1491 if (arena != this) | |
| 1492 memset(payload(), 0, payloadSize()); | |
| 1493 else | |
| 1494 memset(payload() + allocationPoint, 0, payloadSize() - allocationPoint); | |
| 1495 return allocationPoint; | |
| 1496 } | |
| 1497 | |
| 1249 void NormalPage::makeConsistentForGC() { | 1498 void NormalPage::makeConsistentForGC() { |
| 1250 size_t markedObjectSize = 0; | 1499 size_t markedObjectSize = 0; |
| 1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1500 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1252 HeapObjectHeader* header = | 1501 HeapObjectHeader* header = |
| 1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1502 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 1254 ASSERT(header->size() < blinkPagePayloadSize()); | 1503 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1255 // Check if a free list entry first since we cannot call | 1504 // Check if a free list entry first since we cannot call |
| 1256 // isMarked on a free list entry. | 1505 // isMarked on a free list entry. |
| 1257 if (header->isFree()) { | 1506 if (header->isFree()) { |
| 1258 headerAddress += header->size(); | 1507 headerAddress += header->size(); |
| (...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1631 | 1880 |
| 1632 m_hasEntries = true; | 1881 m_hasEntries = true; |
| 1633 size_t index = hash(address); | 1882 size_t index = hash(address); |
| 1634 ASSERT(!(index & 1)); | 1883 ASSERT(!(index & 1)); |
| 1635 Address cachePage = roundToBlinkPageStart(address); | 1884 Address cachePage = roundToBlinkPageStart(address); |
| 1636 m_entries[index + 1] = m_entries[index]; | 1885 m_entries[index + 1] = m_entries[index]; |
| 1637 m_entries[index] = cachePage; | 1886 m_entries[index] = cachePage; |
| 1638 } | 1887 } |
| 1639 | 1888 |
| 1640 } // namespace blink | 1889 } // namespace blink |
| OLD | NEW |