Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #include "platform/heap/HeapPage.h" | 31 #include "platform/heap/HeapPage.h" |
| 32 | 32 |
| 33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
| 34 #include "platform/MemoryCoordinator.h" | 34 #include "platform/MemoryCoordinator.h" |
| 35 #include "platform/ScriptForbiddenScope.h" | 35 #include "platform/ScriptForbiddenScope.h" |
| 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 37 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
| 38 #include "platform/heap/HeapCompact.h" | |
| 38 #include "platform/heap/MarkingVisitor.h" | 39 #include "platform/heap/MarkingVisitor.h" |
| 39 #include "platform/heap/PageMemory.h" | 40 #include "platform/heap/PageMemory.h" |
| 40 #include "platform/heap/PagePool.h" | 41 #include "platform/heap/PagePool.h" |
| 41 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
| 42 #include "platform/heap/ThreadState.h" | 43 #include "platform/heap/ThreadState.h" |
| 43 #include "platform/tracing/TraceEvent.h" | 44 #include "platform/tracing/TraceEvent.h" |
| 44 #include "platform/tracing/web_memory_allocator_dump.h" | 45 #include "platform/tracing/web_memory_allocator_dump.h" |
| 45 #include "platform/tracing/web_process_memory_dump.h" | 46 #include "platform/tracing/web_process_memory_dump.h" |
| 46 #include "public/platform/Platform.h" | 47 #include "public/platform/Platform.h" |
| 47 #include "wtf/Assertions.h" | 48 #include "wtf/Assertions.h" |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 194 ASSERT(!page->hasBeenSwept()); | 195 ASSERT(!page->hasBeenSwept()); |
| 195 page->invalidateObjectStartBitmap(); | 196 page->invalidateObjectStartBitmap(); |
| 196 } | 197 } |
| 197 if (previousPage) { | 198 if (previousPage) { |
| 198 ASSERT(m_firstUnsweptPage); | 199 ASSERT(m_firstUnsweptPage); |
| 199 previousPage->m_next = m_firstPage; | 200 previousPage->m_next = m_firstPage; |
| 200 m_firstPage = m_firstUnsweptPage; | 201 m_firstPage = m_firstUnsweptPage; |
| 201 m_firstUnsweptPage = nullptr; | 202 m_firstUnsweptPage = nullptr; |
| 202 } | 203 } |
| 203 ASSERT(!m_firstUnsweptPage); | 204 ASSERT(!m_firstUnsweptPage); |
| 205 | |
| 206 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); | |
| 207 if (!heapCompactor->isCompactingArena(arenaIndex())) | |
| 208 return; | |
| 209 | |
| 210 BasePage* nextPage = m_firstPage; | |
| 211 while (nextPage) { | |
| 212 if (!nextPage->isLargeObjectPage()) | |
| 213 heapCompactor->addCompactablePage(nextPage); | |
| 214 nextPage = nextPage->next(); | |
| 215 } | |
| 204 } | 216 } |
| 205 | 217 |
| 206 void BaseArena::makeConsistentForMutator() { | 218 void BaseArena::makeConsistentForMutator() { |
| 207 clearFreeLists(); | 219 clearFreeLists(); |
| 208 ASSERT(isConsistentForGC()); | 220 ASSERT(isConsistentForGC()); |
| 209 ASSERT(!m_firstPage); | 221 ASSERT(!m_firstPage); |
| 210 | 222 |
| 211 // Drop marks from marked objects and rebuild free lists in preparation for | 223 // Drop marks from marked objects and rebuild free lists in preparation for |
| 212 // resuming the executions of mutators. | 224 // resuming the executions of mutators. |
| 213 BasePage* previousPage = nullptr; | 225 BasePage* previousPage = nullptr; |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 433 m_promptlyFreedSize(0), | 445 m_promptlyFreedSize(0), |
| 434 m_isLazySweeping(false) { | 446 m_isLazySweeping(false) { |
| 435 clearFreeLists(); | 447 clearFreeLists(); |
| 436 } | 448 } |
| 437 | 449 |
| 438 void NormalPageArena::clearFreeLists() { | 450 void NormalPageArena::clearFreeLists() { |
| 439 setAllocationPoint(nullptr, 0); | 451 setAllocationPoint(nullptr, 0); |
| 440 m_freeList.clear(); | 452 m_freeList.clear(); |
| 441 } | 453 } |
| 442 | 454 |
| 455 size_t NormalPageArena::arenaSize() { | |
| 456 size_t size = 0; | |
| 457 BasePage* page = m_firstPage; | |
| 458 while (page) { | |
| 459 size += page->size(); | |
| 460 page = page->next(); | |
| 461 } | |
| 462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); | |
| 463 return size; | |
| 464 } | |
| 465 | |
| 466 size_t NormalPageArena::freeListSize() { | |
| 467 size_t freeSize = m_freeList.freeListSize(); | |
| 468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); | |
| 469 return freeSize; | |
| 470 } | |
| 471 | |
| 472 void NormalPageArena::sweepAndCompact() { | |
| 473 ThreadHeap& heap = getThreadState()->heap(); | |
| 474 if (!heap.compaction()->isCompactingArena(arenaIndex())) | |
| 475 return; | |
| 476 | |
| 477 DCHECK(!hasCurrentAllocationArea()); | |
| 478 | |
| 479 // Compaction is performed in-place, sliding objects down over unused | |
| 480 // holes for a smaller heap page footprint and improved locality. | |
| 481 // A "compaction pointer" is consequently kept, pointing to the next | |
| 482 // available address to move objects down to. It will belong to one | |
| 483 // of the already sweep-compacted pages for this arena, but as compaction | |
| 484 // proceeds, it will not belong to the same page as the one being | |
| 485 // currently compacted. | |
| 486 // | |
| 487 // The compaction pointer is represented by the | |
| 488 // |(availablePages, allocationPoint)| pair, with |allocationPoint| | |
| 489 // being the offset into |*availablePages|, making up the next | |
| 490 // available location. As the compaction of a arena page may cause the | |
| 491 // compaction pointer to exhaust the current page it is compacting into, | |
| 492 // page compaction can update both the current page of the compaction | |
| 493 // pointer, as well as the allocation point. | |
| 494 // | |
| 495 // By construction, the page compaction can be performed without having | |
| 496 // to allocate any new pages. So to arrange for the page compaction's | |
| 497 // supply of freed, available pages, we chain them together after each | |
| 498 // has been "compacted from". The page compaction will then reuse those | |
| 499 // as needed, and once finished, the chained pages after |*availablePages|, | |
| 500 // can be released back to the OS. | |
|
haraken
2016/12/06 13:30:39
This comment looks awesome!
| |
| 501 NormalPage* availablePages = nullptr; | |
| 502 size_t allocationPoint = 0; | |
| 503 | |
| 504 while (m_firstUnsweptPage) { | |
| 505 BasePage* page = m_firstUnsweptPage; | |
| 506 if (page->isEmpty()) { | |
| 507 page->unlink(&m_firstUnsweptPage); | |
| 508 page->removeFromHeap(); | |
| 509 continue; | |
| 510 } | |
| 511 // Large objects do not belong to this arena. | |
| 512 DCHECK(!page->isLargeObjectPage()); | |
| 513 NormalPage* normalPage = static_cast<NormalPage*>(page); | |
| 514 normalPage->unlink(&m_firstUnsweptPage); | |
| 515 normalPage->markAsSwept(); | |
| 516 if (!availablePages) { | |
| 517 availablePages = normalPage; | |
| 518 } else { | |
| 519 // Add |normalPage| onto the available pages chain, after the | |
| 520 // current head as it is the one currently being compacted | |
| 521 // into. | |
|
haraken
2016/12/06 13:30:39
Hmm. What's an issue of just doing normalPage->lin
sof
2016/12/06 21:39:35
Thanks for your input, but it seems very complex t
haraken
2016/12/07 08:55:11
Okay. But even if we want to use the availablePage
sof
2016/12/07 10:45:08
As the comment above explains, we're allocating fr
haraken
2016/12/07 12:44:33
Ah, ok, now I understand the logic.
In addition t
sof
2016/12/07 13:01:59
Stating the obvious perhaps, but notice that the h
haraken
2016/12/07 15:45:53
You're right, but if we keep |availablePages| and
sof
2016/12/07 22:37:25
given the state threading that needs to happen, it
| |
| 522 BasePage* secondPage; | |
| 523 availablePages->unlink(&secondPage); | |
| 524 normalPage->link(&secondPage); | |
| 525 // Note: |secondPage| is now aliased to |normalPage|, but we | |
| 526 // have to preserve |normalPage| for the call below. | |
| 527 availablePages->link(&secondPage); | |
| 528 } | |
| 529 allocationPoint = normalPage->sweepAndCompact( | |
| 530 availablePages, allocationPoint, &m_firstPage); | |
| 531 } | |
| 532 | |
| 533 // Release unused tail of |availablePages| back to the OS. | |
| 534 | |
| 535 BasePage* freePages = nullptr; | |
| 536 if (availablePages) { | |
| 537 // If the first available page has been allocated into, add it to the | |
| 538 // heap's list of swept pages. Otherwise we hand it back to the OS below. | |
| 539 if (allocationPoint) { | |
| 540 availablePages->unlink(&freePages); | |
| 541 availablePages->link(&m_firstPage); | |
| 542 } else { | |
| 543 freePages = availablePages; | |
| 544 availablePages = nullptr; | |
| 545 } | |
| 546 } | |
| 547 | |
| 548 size_t freedSize = 0; | |
| 549 size_t freedPageCount = 0; | |
| 550 if (availablePages && allocationPoint != availablePages->payloadSize()) { | |
|
haraken
2016/12/06 13:30:39
If we rewrite this branch as follows:
if (allocat
sof
2016/12/06 21:39:35
Thanks for the suggestion, but what's here already
haraken
2016/12/07 08:55:11
It took me a while to understand why line 533 - 54
| |
| 551 // Put the remainder of the page onto the free list. | |
| 552 freedSize = availablePages->payloadSize() - allocationPoint; | |
| 553 Address payload = availablePages->payload(); | |
| 554 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
| 555 defined(MEMORY_SANITIZER) | |
|
haraken
2016/12/06 13:30:39
Should we use SET_MEMORY_INACCESSIBLE?
sof
2016/12/06 21:39:35
Done.
| |
| 556 FreeList::zapFreedMemory(payload + allocationPoint, freedSize); | |
| 557 #endif | |
| 558 availablePages->arenaForNormalPage()->addToFreeList( | |
| 559 payload + allocationPoint, freedSize); | |
| 560 } | |
| 561 availablePages = static_cast<NormalPage*>(freePages); | |
| 562 while (availablePages) { | |
| 563 size_t pageSize = availablePages->size(); | |
| 564 #if DEBUG_HEAP_COMPACTION | |
| 565 if (!freedPageCount) | |
| 566 LOG_HEAP_COMPACTION("Releasing:"); | |
| 567 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); | |
| 568 #endif | |
| 569 freedSize += pageSize; | |
| 570 freedPageCount++; | |
| 571 BasePage* nextPage; | |
| 572 availablePages->unlink(&nextPage); | |
| 573 availablePages->removeFromHeap(); | |
| 574 availablePages = static_cast<NormalPage*>(nextPage); | |
| 575 } | |
| 576 if (freePages) | |
| 577 LOG_HEAP_COMPACTION("\n"); | |
| 578 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); | |
| 579 } | |
| 580 | |
| 443 #if ENABLE(ASSERT) | 581 #if ENABLE(ASSERT) |
| 444 bool NormalPageArena::isConsistentForGC() { | 582 bool NormalPageArena::isConsistentForGC() { |
| 445 // A thread heap is consistent for sweeping if none of the pages to be swept | 583 // A thread heap is consistent for sweeping if none of the pages to be swept |
| 446 // contain a freelist block or the current allocation point. | 584 // contain a freelist block or the current allocation point. |
| 447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 585 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; | 586 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; |
| 449 freeListEntry; freeListEntry = freeListEntry->next()) { | 587 freeListEntry; freeListEntry = freeListEntry->next()) { |
| 450 if (pagesToBeSweptContains(freeListEntry->getAddress())) | 588 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
| 451 return false; | 589 return false; |
| 452 } | 590 } |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 474 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | 612 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); |
| 475 base::trace_event::MemoryAllocatorDump* pagesDump = | 613 base::trace_event::MemoryAllocatorDump* pagesDump = |
| 476 BlinkGCMemoryDumpProvider::instance() | 614 BlinkGCMemoryDumpProvider::instance() |
| 477 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | 615 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); |
| 478 BlinkGCMemoryDumpProvider::instance() | 616 BlinkGCMemoryDumpProvider::instance() |
| 479 ->currentProcessMemoryDump() | 617 ->currentProcessMemoryDump() |
| 480 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid()); | 618 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid()); |
| 481 } | 619 } |
| 482 } | 620 } |
| 483 | 621 |
| 484 void NormalPageArena::allocatePage() { | 622 NormalPage* NormalPageArena::allocatePage() { |
| 485 getThreadState()->shouldFlushHeapDoesNotContainCache(); | 623 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| 486 PageMemory* pageMemory = | 624 PageMemory* pageMemory = |
| 487 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); | 625 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); |
| 488 | 626 |
| 489 if (!pageMemory) { | 627 if (!pageMemory) { |
| 490 // Allocate a memory region for blinkPagesPerRegion pages that | 628 // Allocate a memory region for blinkPagesPerRegion pages that |
| 491 // will each have the following layout. | 629 // will each have the following layout. |
| 492 // | 630 // |
| 493 // [ guard os page | ... payload ... | guard os page ] | 631 // [ guard os page | ... payload ... | guard os page ] |
| 494 // ^---{ aligned to blink page size } | 632 // ^---{ aligned to blink page size } |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 507 // the limit of the number of mmapped regions OS can support | 645 // the limit of the number of mmapped regions OS can support |
| 508 // (e.g., /proc/sys/vm/max_map_count in Linux). | 646 // (e.g., /proc/sys/vm/max_map_count in Linux). |
| 509 RELEASE_ASSERT(result); | 647 RELEASE_ASSERT(result); |
| 510 pageMemory = memory; | 648 pageMemory = memory; |
| 511 } else { | 649 } else { |
| 512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), | 650 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), |
| 513 memory); | 651 memory); |
| 514 } | 652 } |
| 515 } | 653 } |
| 516 } | 654 } |
| 655 return new (pageMemory->writableStart()) NormalPage(pageMemory, this); | |
| 656 } | |
| 517 | 657 |
| 518 NormalPage* page = | 658 void NormalPageArena::allocateAndAddPage() { |
| 519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 659 NormalPage* page = allocatePage(); |
| 520 page->link(&m_firstPage); | 660 page->link(&m_firstPage); |
| 521 | 661 |
| 522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); | 662 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
| 523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 663 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 524 // Allow the following addToFreeList() to add the newly allocated memory | 664 // Allow the following addToFreeList() to add the newly allocated memory |
| 525 // to the free list. | 665 // to the free list. |
| 526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 666 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
| 527 Address address = page->payload(); | 667 Address address = page->payload(); |
| 528 for (size_t i = 0; i < page->payloadSize(); i++) | 668 for (size_t i = 0; i < page->payloadSize(); i++) |
| 529 address[i] = reuseAllowedZapValue; | 669 address[i] = reuseAllowedZapValue; |
| (...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 806 return result; | 946 return result; |
| 807 } | 947 } |
| 808 | 948 |
| 809 // 6. Complete sweeping. | 949 // 6. Complete sweeping. |
| 810 getThreadState()->completeSweep(); | 950 getThreadState()->completeSweep(); |
| 811 | 951 |
| 812 // 7. Check if we should trigger a GC. | 952 // 7. Check if we should trigger a GC. |
| 813 getThreadState()->scheduleGCIfNeeded(); | 953 getThreadState()->scheduleGCIfNeeded(); |
| 814 | 954 |
| 815 // 8. Add a new page to this heap. | 955 // 8. Add a new page to this heap. |
| 816 allocatePage(); | 956 allocateAndAddPage(); |
| 817 | 957 |
| 818 // 9. Try to allocate from a free list. This allocation must succeed. | 958 // 9. Try to allocate from a free list. This allocation must succeed. |
| 819 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 959 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 820 RELEASE_ASSERT(result); | 960 RELEASE_ASSERT(result); |
| 821 return result; | 961 return result; |
| 822 } | 962 } |
| 823 | 963 |
| 824 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, | 964 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, |
| 825 size_t gcInfoIndex) { | 965 size_t gcInfoIndex) { |
| 826 // Try reusing a block from the largest bin. The underlying reasoning | 966 // Try reusing a block from the largest bin. The underlying reasoning |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1070 | 1210 |
| 1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, | 1211 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
| 1072 size_t size) { | 1212 size_t size) { |
| 1073 for (size_t i = 0; i < size; i++) { | 1213 for (size_t i = 0; i < size; i++) { |
| 1074 ASSERT(address[i] == reuseAllowedZapValue || | 1214 ASSERT(address[i] == reuseAllowedZapValue || |
| 1075 address[i] == reuseForbiddenZapValue); | 1215 address[i] == reuseForbiddenZapValue); |
| 1076 } | 1216 } |
| 1077 } | 1217 } |
| 1078 #endif | 1218 #endif |
| 1079 | 1219 |
| 1220 size_t FreeList::freeListSize() const { | |
| 1221 size_t freeSize = 0; | |
| 1222 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
| 1223 FreeListEntry* entry = m_freeLists[i]; | |
| 1224 while (entry) { | |
| 1225 freeSize += entry->size(); | |
| 1226 entry = entry->next(); | |
| 1227 } | |
| 1228 } | |
| 1229 #if DEBUG_HEAP_FREELIST | |
| 1230 if (freeSize) { | |
| 1231 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); | |
| 1232 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
| 1233 FreeListEntry* entry = m_freeLists[i]; | |
| 1234 size_t bucket = 0; | |
| 1235 size_t count = 0; | |
| 1236 while (entry) { | |
| 1237 bucket += entry->size(); | |
| 1238 count++; | |
| 1239 entry = entry->next(); | |
| 1240 } | |
| 1241 if (bucket) { | |
| 1242 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, | |
| 1243 0x1 << (i + 1), bucket, count); | |
| 1244 } | |
| 1245 } | |
| 1246 } | |
| 1247 #endif | |
| 1248 return freeSize; | |
| 1249 } | |
| 1250 | |
| 1080 void FreeList::clear() { | 1251 void FreeList::clear() { |
| 1081 m_biggestFreeListIndex = 0; | 1252 m_biggestFreeListIndex = 0; |
| 1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1253 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| 1083 m_freeLists[i] = nullptr; | 1254 m_freeLists[i] = nullptr; |
| 1084 } | 1255 } |
| 1085 | 1256 |
| 1086 int FreeList::bucketIndexForSize(size_t size) { | 1257 int FreeList::bucketIndexForSize(size_t size) { |
| 1087 ASSERT(size > 0); | 1258 ASSERT(size > 0); |
| 1088 int index = -1; | 1259 int index = -1; |
| 1089 while (size) { | 1260 while (size) { |
| (...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1410 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1240 if (MemoryCoordinator::isLowEndDevice()) | 1411 if (MemoryCoordinator::isLowEndDevice()) |
| 1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1412 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
| 1242 #endif | 1413 #endif |
| 1243 } | 1414 } |
| 1244 | 1415 |
| 1245 if (markedObjectSize) | 1416 if (markedObjectSize) |
| 1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1417 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| 1247 } | 1418 } |
| 1248 | 1419 |
| 1420 size_t NormalPage::sweepAndCompact(NormalPage*& arena, | |
|
haraken
2016/12/06 13:30:39
arena => avalilablePages
sof
2016/12/06 21:39:35
Renamed.
| |
| 1421 size_t allocationPoint, | |
| 1422 BasePage** firstPage) { | |
| 1423 size_t markedObjectSize = 0; | |
| 1424 NormalPageArena* pageArena = arenaForNormalPage(); | |
| 1425 HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); | |
| 1426 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
| 1427 HeapObjectHeader* header = | |
| 1428 reinterpret_cast<HeapObjectHeader*>(headerAddress); | |
| 1429 size_t size = header->size(); | |
| 1430 DCHECK(size > 0 && size < blinkPagePayloadSize()); | |
| 1431 | |
| 1432 if (header->isPromptlyFreed()) | |
| 1433 pageArena->decreasePromptlyFreedSize(size); | |
| 1434 if (header->isFree()) { | |
| 1435 // Unpoison the freelist entry so that we | |
| 1436 // can compact into it as wanted. | |
| 1437 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
|
haraken
2016/12/06 13:30:39
SET_MEMORY_INACCESSIBLE + CHECK_MEMORY_INACCESSIBL
sof
2016/12/06 21:39:35
No, please see the comment above why that isn't ap
| |
| 1438 headerAddress += size; | |
| 1439 continue; | |
| 1440 } | |
| 1441 #if ENABLE(ASSERT) | |
|
haraken
2016/12/06 13:30:39
Remove?
In any case, mimic ToT.
sof
2016/12/06 21:39:34
It currently is (but the other uses tend to still
| |
| 1442 DCHECK(header->checkHeader()); | |
| 1443 #endif | |
| 1444 | |
| 1445 // This is a fast version of header->payloadSize(). | |
| 1446 size_t payloadSize = size - sizeof(HeapObjectHeader); | |
| 1447 Address payload = header->payload(); | |
| 1448 if (!header->isMarked()) { | |
| 1449 // For ASan, unpoison the object before calling the finalizer. The | |
| 1450 // finalized object will be zero-filled and poison'ed afterwards. | |
| 1451 // Given all other unmarked objects are poisoned, ASan will detect | |
| 1452 // an error if the finalizer touches any other on-heap object that | |
| 1453 // die at the same GC cycle. | |
| 1454 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
| 1455 header->finalize(payload, payloadSize); | |
| 1456 | |
| 1457 // As compaction is under way, leave the freed memory accessible | |
| 1458 // while compacting the rest of the page. We just zap the payload | |
| 1459 // to catch out other finalizers trying to access it. | |
| 1460 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
|
haraken
2016/12/06 13:30:39
SET_MEMORY_INACCESSIBLE
sof
2016/12/06 21:39:35
Not appropriate at this stage, see comment above (
| |
| 1461 defined(MEMORY_SANITIZER) | |
| 1462 FreeList::zapFreedMemory(payload, payloadSize); | |
| 1463 #endif | |
| 1464 headerAddress += size; | |
| 1465 continue; | |
| 1466 } | |
| 1467 DCHECK(header->isMarked()); | |
|
haraken
2016/12/06 13:30:39
Remove. This is checked in unmark().
sof
2016/12/06 21:39:35
Done.
| |
| 1468 header->unmark(); | |
| 1469 markedObjectSize += size; | |
|
haraken
2016/12/06 13:30:39
I'd move this to line 1509.
sof
2016/12/06 21:39:35
Moved.
| |
| 1470 // Allocate and copy over the live object. | |
| 1471 if (arena->payload() + allocationPoint + size > arena->payloadEnd()) { | |
| 1472 // Can't fit on current allocation page. | |
| 1473 // TODO(sof): be more clever & compact later objects into |arena|'s unused | |
| 1474 // slop. | |
| 1475 BasePage* nextP; | |
|
haraken
2016/12/06 13:30:39
nextP => nextAvailablePage
sof
2016/12/06 21:39:35
alright..
| |
| 1476 arena->unlink(&nextP); | |
| 1477 arena->link(firstPage); | |
| 1478 size_t freeSize = arena->payloadSize() - allocationPoint; | |
| 1479 if (freeSize) { | |
| 1480 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
| 1481 defined(MEMORY_SANITIZER) | |
|
haraken
2016/12/06 13:30:39
This #if wouldn't be needed -- it's covered by SET
sof
2016/12/06 21:39:34
Done.
| |
| 1482 SET_MEMORY_INACCESSIBLE(arena->payload() + allocationPoint, freeSize); | |
| 1483 #endif | |
| 1484 arena->arenaForNormalPage()->addToFreeList( | |
|
haraken
2016/12/06 13:30:39
'arena->arenaForNormalPage()->' would not be neede
sof
2016/12/06 21:39:35
It's preferable not to subtly build in the assumpt
| |
| 1485 arena->payload() + allocationPoint, freeSize); | |
| 1486 } | |
| 1487 arena = static_cast<NormalPage*>(nextP); | |
| 1488 allocationPoint = 0; | |
| 1489 } | |
| 1490 Address movedObject = arena->payload() + allocationPoint; | |
| 1491 if (LIKELY(movedObject != headerAddress)) { | |
|
haraken
2016/12/06 13:30:39
Just to confirm: movedObject == headerAddress can
sof
2016/12/06 21:39:35
Yes, pretty much -- it could conceivably also happ
| |
| 1492 #if defined(ADDRESS_SANITIZER) | |
| 1493 // Unpoison the header + if it is a vector backing | |
| 1494 // store object, let go of the container annotations. | |
| 1495 // Do that by unpoisoning the payload entirely. | |
|
haraken
2016/12/06 13:30:39
Would you help me understand why you need the unpo
sof
2016/12/06 21:39:35
The above comment explains this already.
| |
| 1496 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); | |
| 1497 if (ThreadState::isVectorArenaIndex( | |
| 1498 arena->arenaForNormalPage()->arenaIndex())) { | |
|
haraken
2016/12/06 13:30:39
At least, you can move the arenaIndex check outsid
sof
2016/12/06 21:39:35
Done; a bug to consult |arena| here, btw.
| |
| 1499 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); | |
| 1500 } | |
| 1501 #endif | |
| 1502 // Use a non-overlapping copy, if possible. | |
| 1503 if (arena == this) | |
| 1504 memmove(movedObject, headerAddress, size); | |
| 1505 else | |
| 1506 memcpy(movedObject, headerAddress, size); | |
| 1507 compact->relocate(payload, movedObject + sizeof(HeapObjectHeader)); | |
| 1508 } | |
| 1509 headerAddress += size; | |
| 1510 allocationPoint += size; | |
| 1511 DCHECK(allocationPoint <= arena->payloadSize()); | |
| 1512 } | |
| 1513 if (markedObjectSize) | |
| 1514 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | |
| 1515 | |
| 1516 // Clear the page; it'll either be used for compacted objects or freed. | |
| 1517 if (arena != this) | |
|
haraken
2016/12/06 13:30:39
Shouldn't this be equal to 'if (allocationPoint ==
sof
2016/12/06 21:39:35
If the compacted page overlaps with where the comp
| |
| 1518 memset(payload(), 0, payloadSize()); | |
| 1519 else | |
| 1520 memset(payload() + allocationPoint, 0, payloadSize() - allocationPoint); | |
|
haraken
2016/12/06 13:30:39
Just in case, I'd prefer zapping the memory. And c
sof
2016/12/06 21:39:35
Now zapped/memset(), as appropriate.
| |
| 1521 return allocationPoint; | |
| 1522 } | |
| 1523 | |
| 1249 void NormalPage::makeConsistentForGC() { | 1524 void NormalPage::makeConsistentForGC() { |
| 1250 size_t markedObjectSize = 0; | 1525 size_t markedObjectSize = 0; |
| 1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1526 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1252 HeapObjectHeader* header = | 1527 HeapObjectHeader* header = |
| 1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1528 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 1254 ASSERT(header->size() < blinkPagePayloadSize()); | 1529 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1255 // Check if a free list entry first since we cannot call | 1530 // Check if a free list entry first since we cannot call |
| 1256 // isMarked on a free list entry. | 1531 // isMarked on a free list entry. |
| 1257 if (header->isFree()) { | 1532 if (header->isFree()) { |
| 1258 headerAddress += header->size(); | 1533 headerAddress += header->size(); |
| (...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1631 | 1906 |
| 1632 m_hasEntries = true; | 1907 m_hasEntries = true; |
| 1633 size_t index = hash(address); | 1908 size_t index = hash(address); |
| 1634 ASSERT(!(index & 1)); | 1909 ASSERT(!(index & 1)); |
| 1635 Address cachePage = roundToBlinkPageStart(address); | 1910 Address cachePage = roundToBlinkPageStart(address); |
| 1636 m_entries[index + 1] = m_entries[index]; | 1911 m_entries[index + 1] = m_entries[index]; |
| 1637 m_entries[index] = cachePage; | 1912 m_entries[index] = cachePage; |
| 1638 } | 1913 } |
| 1639 | 1914 |
| 1640 } // namespace blink | 1915 } // namespace blink |
| OLD | NEW |