Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(331)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: add pointer alignment handling to SparseHeapBitmap Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 17 matching lines...) Expand all
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31 #include "platform/heap/HeapPage.h" 31 #include "platform/heap/HeapPage.h"
32 32
33 #include "base/trace_event/process_memory_dump.h" 33 #include "base/trace_event/process_memory_dump.h"
34 #include "platform/MemoryCoordinator.h" 34 #include "platform/MemoryCoordinator.h"
35 #include "platform/ScriptForbiddenScope.h" 35 #include "platform/ScriptForbiddenScope.h"
36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h"
37 #include "platform/heap/CallbackStack.h" 37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/HeapCompact.h"
38 #include "platform/heap/MarkingVisitor.h" 39 #include "platform/heap/MarkingVisitor.h"
39 #include "platform/heap/PageMemory.h" 40 #include "platform/heap/PageMemory.h"
40 #include "platform/heap/PagePool.h" 41 #include "platform/heap/PagePool.h"
41 #include "platform/heap/SafePoint.h" 42 #include "platform/heap/SafePoint.h"
42 #include "platform/heap/ThreadState.h" 43 #include "platform/heap/ThreadState.h"
43 #include "platform/tracing/TraceEvent.h" 44 #include "platform/tracing/TraceEvent.h"
44 #include "platform/tracing/web_memory_allocator_dump.h" 45 #include "platform/tracing/web_memory_allocator_dump.h"
45 #include "platform/tracing/web_process_memory_dump.h" 46 #include "platform/tracing/web_process_memory_dump.h"
46 #include "public/platform/Platform.h" 47 #include "public/platform/Platform.h"
47 #include "wtf/Assertions.h" 48 #include "wtf/Assertions.h"
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 ASSERT(!page->hasBeenSwept()); 195 ASSERT(!page->hasBeenSwept());
195 page->invalidateObjectStartBitmap(); 196 page->invalidateObjectStartBitmap();
196 } 197 }
197 if (previousPage) { 198 if (previousPage) {
198 ASSERT(m_firstUnsweptPage); 199 ASSERT(m_firstUnsweptPage);
199 previousPage->m_next = m_firstPage; 200 previousPage->m_next = m_firstPage;
200 m_firstPage = m_firstUnsweptPage; 201 m_firstPage = m_firstUnsweptPage;
201 m_firstUnsweptPage = nullptr; 202 m_firstUnsweptPage = nullptr;
202 } 203 }
203 ASSERT(!m_firstUnsweptPage); 204 ASSERT(!m_firstUnsweptPage);
205
206 HeapCompact* heapCompactor = getThreadState()->heap().compaction();
207 if (!heapCompactor->isCompactingArena(arenaIndex()))
208 return;
209
210 BasePage* nextPage = m_firstPage;
211 while (nextPage) {
212 if (!nextPage->isLargeObjectPage())
213 heapCompactor->addCompactablePage(nextPage);
214 nextPage = nextPage->next();
215 }
204 } 216 }
205 217
206 void BaseArena::makeConsistentForMutator() { 218 void BaseArena::makeConsistentForMutator() {
207 clearFreeLists(); 219 clearFreeLists();
208 ASSERT(isConsistentForGC()); 220 ASSERT(isConsistentForGC());
209 ASSERT(!m_firstPage); 221 ASSERT(!m_firstPage);
210 222
211 // Drop marks from marked objects and rebuild free lists in preparation for 223 // Drop marks from marked objects and rebuild free lists in preparation for
212 // resuming the executions of mutators. 224 // resuming the executions of mutators.
213 BasePage* previousPage = nullptr; 225 BasePage* previousPage = nullptr;
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 m_promptlyFreedSize(0), 445 m_promptlyFreedSize(0),
434 m_isLazySweeping(false) { 446 m_isLazySweeping(false) {
435 clearFreeLists(); 447 clearFreeLists();
436 } 448 }
437 449
438 void NormalPageArena::clearFreeLists() { 450 void NormalPageArena::clearFreeLists() {
439 setAllocationPoint(nullptr, 0); 451 setAllocationPoint(nullptr, 0);
440 m_freeList.clear(); 452 m_freeList.clear();
441 } 453 }
442 454
455 size_t NormalPageArena::arenaSize() {
456 size_t size = 0;
457 BasePage* page = m_firstPage;
458 while (page) {
459 size += page->size();
460 page = page->next();
461 }
462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex());
463 return size;
464 }
465
466 size_t NormalPageArena::freeListSize() {
467 size_t freeSize = m_freeList.freeListSize();
468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex());
469 return freeSize;
470 }
471
472 void NormalPageArena::sweepAndCompact() {
473 ThreadHeap& heap = getThreadState()->heap();
474 if (!heap.compaction()->isCompactingArena(arenaIndex()))
475 return;
476
477 if (!m_firstUnsweptPage) {
478 heap.compaction()->finishedArenaCompaction(this, 0, 0);
haraken 2016/12/09 07:25:55 This looks unnecessary. We can just immediately re
sof 2016/12/09 21:44:04 Keeping it, no need to cut corners for this case &
479 return;
480 }
481
482 // Compaction is performed in-place, sliding objects down over unused
483 // holes for a smaller heap page footprint and improved locality.
484 // A "compaction pointer" is consequently kept, pointing to the next
485 // available address to move objects down to. It will belong to one
486 // of the already sweep-compacted pages for this arena, but as compaction
487 // proceeds, it will not belong to the same page as the one being
488 // currently compacted.
489 //
490 // The compaction pointer is represented by the
491 // |(currentPage, allocationPoint)| pair, with |allocationPoint|
492 // being the offset into |currentPage|, making up the next
493 // available location. When the compaction of an arena page causes the
494 // compaction pointer to exhaust the current page it is compacting into,
495 // page compaction will advance the current page of the compaction
496 // pointer, as well as the allocation point.
497 //
498 // By construction, the page compaction can be performed without having
499 // to allocate any new pages. So to arrange for the page compaction's
500 // supply of freed, available pages, we chain them together after each
501 // has been "compacted from". The page compaction will then reuse those
502 // as needed, and once finished, the chained, available pages can be
503 // released back to the OS.
504 //
505 // To ease the passing of the compaction state when iterating over an
506 // arena's pages, package it up into a |CompactionContext|.
507 NormalPage::CompactionContext context;
508 context.m_compactedPages = &m_firstPage;
509
510 while (m_firstUnsweptPage) {
511 BasePage* page = m_firstUnsweptPage;
512 if (page->isEmpty()) {
513 page->unlink(&m_firstUnsweptPage);
514 page->removeFromHeap();
515 continue;
516 }
517 // Large objects do not belong to this arena.
518 DCHECK(!page->isLargeObjectPage());
519 NormalPage* normalPage = static_cast<NormalPage*>(page);
520 normalPage->unlink(&m_firstUnsweptPage);
521 normalPage->markAsSwept();
522 // If not the first page, add |normalPage| onto the available pages chain.
523 if (!context.m_currentPage)
524 context.m_currentPage = normalPage;
525 else
526 normalPage->link(&context.m_availablePages);
527 normalPage->sweepAndCompact(context);
528 }
529
530 size_t freedSize = 0;
531 size_t freedPageCount = 0;
532
533 DCHECK(context.m_currentPage);
534 // If the current page hasn't been allocated into (empty heap?), add
haraken 2016/12/09 07:25:56 It should be empty heap, right?
sof 2016/12/09 21:44:04 Done.
535 // it to the available list, for subsequent release back to the OS below.
536 size_t allocationPoint = context.m_allocationPoint;
537 if (!allocationPoint) {
538 context.m_currentPage->link(&context.m_availablePages);
haraken 2016/12/09 07:25:55 Add DCHECK(context.m_currentPage->isEmpty()). Or
539 } else {
540 NormalPage* currentPage = context.m_currentPage;
541 currentPage->link(&m_firstPage);
542 if (allocationPoint != currentPage->payloadSize()) {
543 // Put the remainder of the page onto the free list.
544 freedSize = currentPage->payloadSize() - allocationPoint;
545 Address payload = currentPage->payload();
546 SET_MEMORY_INACCESSIBLE(payload + allocationPoint, freedSize);
547 currentPage->arenaForNormalPage()->addToFreeList(
548 payload + allocationPoint, freedSize);
549 }
550 }
551
552 // Release available page back to the OS.
haraken 2016/12/09 07:25:55 pages
sof 2016/12/09 21:44:04 Done, made clear that they're handed back to the f
553 BasePage* availablePages = context.m_availablePages;
554 while (availablePages) {
555 size_t pageSize = availablePages->size();
haraken 2016/12/09 07:25:56 Can we add DCHECK(availablePages->isEmpty())? Mayb
sof 2016/12/09 21:44:04 An "available page" won't be marked as freed here
556 #if DEBUG_HEAP_COMPACTION
557 if (!freedPageCount)
558 LOG_HEAP_COMPACTION("Releasing:");
559 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize);
560 #endif
561 freedSize += pageSize;
562 freedPageCount++;
563 BasePage* nextPage;
564 availablePages->unlink(&nextPage);
565 availablePages->removeFromHeap();
566 availablePages = static_cast<NormalPage*>(nextPage);
567 }
568 if (freedPageCount)
569 LOG_HEAP_COMPACTION("\n");
570 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize);
haraken 2016/12/09 07:25:56 freedPageCount looks redundant. It can be calculat
sof 2016/12/09 21:44:04 Yes, assuming equal sized pages and you round down
571 }
572
443 #if ENABLE(ASSERT) 573 #if ENABLE(ASSERT)
444 bool NormalPageArena::isConsistentForGC() { 574 bool NormalPageArena::isConsistentForGC() {
445 // A thread heap is consistent for sweeping if none of the pages to be swept 575 // A thread heap is consistent for sweeping if none of the pages to be swept
446 // contain a freelist block or the current allocation point. 576 // contain a freelist block or the current allocation point.
447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { 577 for (size_t i = 0; i < blinkPageSizeLog2; ++i) {
448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; 578 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i];
449 freeListEntry; freeListEntry = freeListEntry->next()) { 579 freeListEntry; freeListEntry = freeListEntry->next()) {
450 if (pagesToBeSweptContains(freeListEntry->getAddress())) 580 if (pagesToBeSweptContains(freeListEntry->getAddress()))
451 return false; 581 return false;
452 } 582 }
(...skipping 21 matching lines...) Expand all
474 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); 604 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets");
475 base::trace_event::MemoryAllocatorDump* pagesDump = 605 base::trace_event::MemoryAllocatorDump* pagesDump =
476 BlinkGCMemoryDumpProvider::instance() 606 BlinkGCMemoryDumpProvider::instance()
477 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); 607 ->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages");
478 BlinkGCMemoryDumpProvider::instance() 608 BlinkGCMemoryDumpProvider::instance()
479 ->currentProcessMemoryDump() 609 ->currentProcessMemoryDump()
480 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid()); 610 ->AddOwnershipEdge(pagesDump->guid(), bucketsDump->guid());
481 } 611 }
482 } 612 }
483 613
484 void NormalPageArena::allocatePage() { 614 NormalPage* NormalPageArena::allocatePage() {
485 getThreadState()->shouldFlushHeapDoesNotContainCache(); 615 getThreadState()->shouldFlushHeapDoesNotContainCache();
486 PageMemory* pageMemory = 616 PageMemory* pageMemory =
487 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); 617 getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex());
488 618
489 if (!pageMemory) { 619 if (!pageMemory) {
490 // Allocate a memory region for blinkPagesPerRegion pages that 620 // Allocate a memory region for blinkPagesPerRegion pages that
491 // will each have the following layout. 621 // will each have the following layout.
492 // 622 //
493 // [ guard os page | ... payload ... | guard os page ] 623 // [ guard os page | ... payload ... | guard os page ]
494 // ^---{ aligned to blink page size } 624 // ^---{ aligned to blink page size }
(...skipping 12 matching lines...) Expand all
507 // the limit of the number of mmapped regions OS can support 637 // the limit of the number of mmapped regions OS can support
508 // (e.g., /proc/sys/vm/max_map_count in Linux). 638 // (e.g., /proc/sys/vm/max_map_count in Linux).
509 RELEASE_ASSERT(result); 639 RELEASE_ASSERT(result);
510 pageMemory = memory; 640 pageMemory = memory;
511 } else { 641 } else {
512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), 642 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(),
513 memory); 643 memory);
514 } 644 }
515 } 645 }
516 } 646 }
647 return new (pageMemory->writableStart()) NormalPage(pageMemory, this);
648 }
517 649
518 NormalPage* page = 650 void NormalPageArena::allocateAndAddPage() {
519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); 651 NormalPage* page = allocatePage();
520 page->link(&m_firstPage); 652 page->link(&m_firstPage);
521 653
522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); 654 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size());
523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 655 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
524 // Allow the following addToFreeList() to add the newly allocated memory 656 // Allow the following addToFreeList() to add the newly allocated memory
525 // to the free list. 657 // to the free list.
526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); 658 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize());
527 Address address = page->payload(); 659 Address address = page->payload();
528 for (size_t i = 0; i < page->payloadSize(); i++) 660 for (size_t i = 0; i < page->payloadSize(); i++)
529 address[i] = reuseAllowedZapValue; 661 address[i] = reuseAllowedZapValue;
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 return result; 938 return result;
807 } 939 }
808 940
809 // 6. Complete sweeping. 941 // 6. Complete sweeping.
810 getThreadState()->completeSweep(); 942 getThreadState()->completeSweep();
811 943
812 // 7. Check if we should trigger a GC. 944 // 7. Check if we should trigger a GC.
813 getThreadState()->scheduleGCIfNeeded(); 945 getThreadState()->scheduleGCIfNeeded();
814 946
815 // 8. Add a new page to this heap. 947 // 8. Add a new page to this heap.
816 allocatePage(); 948 allocateAndAddPage();
817 949
818 // 9. Try to allocate from a free list. This allocation must succeed. 950 // 9. Try to allocate from a free list. This allocation must succeed.
819 result = allocateFromFreeList(allocationSize, gcInfoIndex); 951 result = allocateFromFreeList(allocationSize, gcInfoIndex);
820 RELEASE_ASSERT(result); 952 RELEASE_ASSERT(result);
821 return result; 953 return result;
822 } 954 }
823 955
824 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, 956 Address NormalPageArena::allocateFromFreeList(size_t allocationSize,
825 size_t gcInfoIndex) { 957 size_t gcInfoIndex) {
826 // Try reusing a block from the largest bin. The underlying reasoning 958 // Try reusing a block from the largest bin. The underlying reasoning
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
1070 1202
1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, 1203 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address,
1072 size_t size) { 1204 size_t size) {
1073 for (size_t i = 0; i < size; i++) { 1205 for (size_t i = 0; i < size; i++) {
1074 ASSERT(address[i] == reuseAllowedZapValue || 1206 ASSERT(address[i] == reuseAllowedZapValue ||
1075 address[i] == reuseForbiddenZapValue); 1207 address[i] == reuseForbiddenZapValue);
1076 } 1208 }
1077 } 1209 }
1078 #endif 1210 #endif
1079 1211
1212 size_t FreeList::freeListSize() const {
1213 size_t freeSize = 0;
1214 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) {
1215 FreeListEntry* entry = m_freeLists[i];
1216 while (entry) {
1217 freeSize += entry->size();
1218 entry = entry->next();
1219 }
1220 }
1221 #if DEBUG_HEAP_FREELIST
1222 if (freeSize) {
1223 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize);
1224 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) {
1225 FreeListEntry* entry = m_freeLists[i];
1226 size_t bucket = 0;
1227 size_t count = 0;
1228 while (entry) {
1229 bucket += entry->size();
1230 count++;
1231 entry = entry->next();
1232 }
1233 if (bucket) {
1234 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i,
1235 0x1 << (i + 1), bucket, count);
1236 }
1237 }
1238 }
1239 #endif
1240 return freeSize;
1241 }
1242
1080 void FreeList::clear() { 1243 void FreeList::clear() {
1081 m_biggestFreeListIndex = 0; 1244 m_biggestFreeListIndex = 0;
1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1245 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1083 m_freeLists[i] = nullptr; 1246 m_freeLists[i] = nullptr;
1084 } 1247 }
1085 1248
1086 int FreeList::bucketIndexForSize(size_t size) { 1249 int FreeList::bucketIndexForSize(size_t size) {
1087 ASSERT(size > 0); 1250 ASSERT(size > 0);
1088 int index = -1; 1251 int index = -1;
1089 while (size) { 1252 while (size) {
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 1402 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1240 if (MemoryCoordinator::isLowEndDevice()) 1403 if (MemoryCoordinator::isLowEndDevice())
1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); 1404 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd());
1242 #endif 1405 #endif
1243 } 1406 }
1244 1407
1245 if (markedObjectSize) 1408 if (markedObjectSize)
1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); 1409 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize);
1247 } 1410 }
1248 1411
1412 void NormalPage::sweepAndCompact(CompactionContext& context) {
1413 NormalPage*& currentPage = context.m_currentPage;
1414 size_t& allocationPoint = context.m_allocationPoint;
1415
1416 size_t markedObjectSize = 0;
1417 NormalPageArena* pageArena = arenaForNormalPage();
1418 #if defined(ADDRESS_SANITIZER)
1419 bool isVectorArena = ThreadState::isVectorArenaIndex(pageArena->arenaIndex());
1420 #endif
1421 HeapCompact* compact = pageArena->getThreadState()->heap().compaction();
1422 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1423 HeapObjectHeader* header =
1424 reinterpret_cast<HeapObjectHeader*>(headerAddress);
1425 size_t size = header->size();
1426 DCHECK(size > 0 && size < blinkPagePayloadSize());
1427
1428 if (header->isPromptlyFreed())
1429 pageArena->decreasePromptlyFreedSize(size);
1430 if (header->isFree()) {
1431 // Unpoison the freelist entry so that we
1432 // can compact into it as wanted.
1433 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size);
1434 headerAddress += size;
1435 continue;
1436 }
1437 #if ENABLE(ASSERT)
1438 DCHECK(header->checkHeader());
haraken 2016/12/09 07:25:56 This is checked in the below isMarked().
sof 2016/12/09 21:44:04 Gone
1439 #endif
1440
1441 // This is a fast version of header->payloadSize().
1442 size_t payloadSize = size - sizeof(HeapObjectHeader);
1443 Address payload = header->payload();
1444 if (!header->isMarked()) {
1445 // For ASan, unpoison the object before calling the finalizer. The
1446 // finalized object will be zero-filled and poison'ed afterwards.
1447 // Given all other unmarked objects are poisoned, ASan will detect
1448 // an error if the finalizer touches any other on-heap object that
1449 // die at the same GC cycle.
1450 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size);
haraken 2016/12/09 07:25:56 This could be ASAN_UNPOISON_MEMORY_REGION(payload,
sof 2016/12/09 21:44:04 No, we need to unpoison the whole allocation as it
1451 header->finalize(payload, payloadSize);
1452
1453 // As compaction is under way, leave the freed memory accessible
1454 // while compacting the rest of the page. We just zap the payload
1455 // to catch out other finalizers trying to access it.
1456 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
1457 defined(MEMORY_SANITIZER)
1458 FreeList::zapFreedMemory(payload, payloadSize);
1459 #endif
1460 headerAddress += size;
1461 continue;
1462 }
1463 header->unmark();
1464 // Allocate and copy over the live object.
1465 Address compactFrontier = currentPage->payload() + allocationPoint;
1466 if (compactFrontier + size > currentPage->payloadEnd()) {
1467 // Can't fit on current allocation page; add remaining onto the
1468 // freelist and advance to next available page.
1469 //
1470 // TODO(sof): be more clever & compact later objects into
1471 // |currentPage|'s unused slop.
1472 currentPage->link(context.m_compactedPages);
1473 size_t freeSize = currentPage->payloadSize() - allocationPoint;
1474 if (freeSize) {
1475 SET_MEMORY_INACCESSIBLE(compactFrontier, freeSize);
1476 currentPage->arenaForNormalPage()->addToFreeList(compactFrontier,
1477 freeSize);
1478 }
1479
1480 BasePage* nextAvailablePage;
1481 context.m_availablePages->unlink(&nextAvailablePage);
1482 currentPage = reinterpret_cast<NormalPage*>(context.m_availablePages);
1483 context.m_availablePages = nextAvailablePage;
1484 allocationPoint = 0;
1485 compactFrontier = currentPage->payload();
1486 }
1487 if (LIKELY(compactFrontier != headerAddress)) {
1488 #if defined(ADDRESS_SANITIZER)
1489 // Unpoison the header + if it is a vector backing
1490 // store object, let go of the container annotations.
1491 // Do that by unpoisoning the payload entirely.
1492 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader));
1493 if (isVectorArena)
1494 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize);
1495 #endif
1496 // Use a non-overlapping copy, if possible.
1497 if (currentPage == this)
1498 memmove(compactFrontier, headerAddress, size);
1499 else
1500 memcpy(compactFrontier, headerAddress, size);
1501 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader));
1502 }
1503 headerAddress += size;
1504 markedObjectSize += size;
1505 allocationPoint += size;
1506 DCHECK(allocationPoint <= currentPage->payloadSize());
1507 }
1508 if (markedObjectSize)
1509 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize);
1510
1511 // Clear the page; it'll either be used for compacted objects or freed.
1512 Address unusedStart;
1513 size_t unusedSize;
1514 if (currentPage != this) {
1515 unusedStart = payload();
1516 unusedSize = payloadSize();
1517 } else {
1518 unusedStart = payload() + allocationPoint;
1519 unusedSize = payloadSize() - allocationPoint;
1520 }
1521 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
1522 defined(MEMORY_SANITIZER)
1523 FreeList::zapFreedMemory(unusedStart, unusedSize);
1524 #else
1525 memset(unusedStart, 0, unusedSize);
haraken 2016/12/09 07:25:55 This would not be needed (though I don't know if t
sof 2016/12/09 21:44:04 I'm just being very careful while it is "semi free
1526 #endif
1527 }
1528
1249 void NormalPage::makeConsistentForGC() { 1529 void NormalPage::makeConsistentForGC() {
1250 size_t markedObjectSize = 0; 1530 size_t markedObjectSize = 0;
1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1531 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1252 HeapObjectHeader* header = 1532 HeapObjectHeader* header =
1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); 1533 reinterpret_cast<HeapObjectHeader*>(headerAddress);
1254 ASSERT(header->size() < blinkPagePayloadSize()); 1534 ASSERT(header->size() < blinkPagePayloadSize());
1255 // Check if a free list entry first since we cannot call 1535 // Check if a free list entry first since we cannot call
1256 // isMarked on a free list entry. 1536 // isMarked on a free list entry.
1257 if (header->isFree()) { 1537 if (header->isFree()) {
1258 headerAddress += header->size(); 1538 headerAddress += header->size();
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
1631 1911
1632 m_hasEntries = true; 1912 m_hasEntries = true;
1633 size_t index = hash(address); 1913 size_t index = hash(address);
1634 ASSERT(!(index & 1)); 1914 ASSERT(!(index & 1));
1635 Address cachePage = roundToBlinkPageStart(address); 1915 Address cachePage = roundToBlinkPageStart(address);
1636 m_entries[index + 1] = m_entries[index]; 1916 m_entries[index + 1] = m_entries[index];
1637 m_entries[index] = cachePage; 1917 m_entries[index] = cachePage;
1638 } 1918 }
1639 1919
1640 } // namespace blink 1920 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698