OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 17 matching lines...) Expand all Loading... |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #include "platform/heap/HeapPage.h" | 31 #include "platform/heap/HeapPage.h" |
32 | 32 |
33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
34 #include "platform/MemoryCoordinator.h" | 34 #include "platform/MemoryCoordinator.h" |
35 #include "platform/ScriptForbiddenScope.h" | 35 #include "platform/ScriptForbiddenScope.h" |
36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
37 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
| 38 #include "platform/heap/HeapCompact.h" |
38 #include "platform/heap/MarkingVisitor.h" | 39 #include "platform/heap/MarkingVisitor.h" |
39 #include "platform/heap/PageMemory.h" | 40 #include "platform/heap/PageMemory.h" |
40 #include "platform/heap/PagePool.h" | 41 #include "platform/heap/PagePool.h" |
41 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
42 #include "platform/heap/ThreadState.h" | 43 #include "platform/heap/ThreadState.h" |
43 #include "platform/tracing/TraceEvent.h" | 44 #include "platform/tracing/TraceEvent.h" |
44 #include "platform/tracing/web_memory_allocator_dump.h" | 45 #include "platform/tracing/web_memory_allocator_dump.h" |
45 #include "platform/tracing/web_process_memory_dump.h" | 46 #include "platform/tracing/web_process_memory_dump.h" |
46 #include "public/platform/Platform.h" | 47 #include "public/platform/Platform.h" |
47 #include "wtf/Assertions.h" | 48 #include "wtf/Assertions.h" |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
194 ASSERT(!page->hasBeenSwept()); | 195 ASSERT(!page->hasBeenSwept()); |
195 page->invalidateObjectStartBitmap(); | 196 page->invalidateObjectStartBitmap(); |
196 } | 197 } |
197 if (previousPage) { | 198 if (previousPage) { |
198 ASSERT(m_firstUnsweptPage); | 199 ASSERT(m_firstUnsweptPage); |
199 previousPage->m_next = m_firstPage; | 200 previousPage->m_next = m_firstPage; |
200 m_firstPage = m_firstUnsweptPage; | 201 m_firstPage = m_firstUnsweptPage; |
201 m_firstUnsweptPage = nullptr; | 202 m_firstUnsweptPage = nullptr; |
202 } | 203 } |
203 ASSERT(!m_firstUnsweptPage); | 204 ASSERT(!m_firstUnsweptPage); |
| 205 |
| 206 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); |
| 207 if (!heapCompactor->isCompactingArena(arenaIndex())) |
| 208 return; |
| 209 |
| 210 BasePage* nextPage = m_firstPage; |
| 211 while (nextPage) { |
| 212 if (!nextPage->isLargeObjectPage()) |
| 213 heapCompactor->addCompactingPage(nextPage); |
| 214 nextPage = nextPage->next(); |
| 215 } |
204 } | 216 } |
205 | 217 |
206 void BaseArena::makeConsistentForMutator() { | 218 void BaseArena::makeConsistentForMutator() { |
207 clearFreeLists(); | 219 clearFreeLists(); |
208 ASSERT(isConsistentForGC()); | 220 ASSERT(isConsistentForGC()); |
209 ASSERT(!m_firstPage); | 221 ASSERT(!m_firstPage); |
210 | 222 |
211 // Drop marks from marked objects and rebuild free lists in preparation for | 223 // Drop marks from marked objects and rebuild free lists in preparation for |
212 // resuming the executions of mutators. | 224 // resuming the executions of mutators. |
213 BasePage* previousPage = nullptr; | 225 BasePage* previousPage = nullptr; |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
433 m_promptlyFreedSize(0), | 445 m_promptlyFreedSize(0), |
434 m_isLazySweeping(false) { | 446 m_isLazySweeping(false) { |
435 clearFreeLists(); | 447 clearFreeLists(); |
436 } | 448 } |
437 | 449 |
438 void NormalPageArena::clearFreeLists() { | 450 void NormalPageArena::clearFreeLists() { |
439 setAllocationPoint(nullptr, 0); | 451 setAllocationPoint(nullptr, 0); |
440 m_freeList.clear(); | 452 m_freeList.clear(); |
441 } | 453 } |
442 | 454 |
| 455 size_t NormalPageArena::arenaSize() { |
| 456 size_t size = 0; |
| 457 BasePage* page = m_firstPage; |
| 458 while (page) { |
| 459 size += page->size(); |
| 460 page = page->next(); |
| 461 } |
| 462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); |
| 463 return size; |
| 464 } |
| 465 |
| 466 size_t NormalPageArena::freeListSize() { |
| 467 size_t freeSize = m_freeList.freeListSize(); |
| 468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); |
| 469 return freeSize; |
| 470 } |
| 471 |
| 472 void NormalPageArena::sweepAndCompact() { |
| 473 ThreadHeap& heap = getThreadState()->heap(); |
| 474 if (!heap.compaction()->isCompactingArena(arenaIndex())) |
| 475 return; |
| 476 |
| 477 if (!m_firstUnsweptPage) { |
| 478 heap.compaction()->finishedArenaCompaction(this, 0, 0); |
| 479 return; |
| 480 } |
| 481 |
| 482 // Compaction is performed in-place, sliding objects down over unused |
| 483 // holes for a smaller heap page footprint and improved locality. |
| 484 // A "compaction pointer" is consequently kept, pointing to the next |
| 485 // available address to move objects down to. It will belong to one |
| 486 // of the already sweep-compacted pages for this arena, but as compaction |
| 487 // proceeds, it will not belong to the same page as the one being |
| 488 // currently compacted. |
| 489 // |
| 490 // The compaction pointer is represented by the |
| 491 // |(currentPage, allocationPoint)| pair, with |allocationPoint| |
| 492 // being the offset into |currentPage|, making up the next |
| 493 // available location. When the compaction of an arena page causes the |
| 494 // compaction pointer to exhaust the current page it is compacting into, |
| 495 // page compaction will advance the current page of the compaction |
| 496 // pointer, as well as the allocation point. |
| 497 // |
| 498 // By construction, the page compaction can be performed without having |
| 499 // to allocate any new pages. So to arrange for the page compaction's |
| 500 // supply of freed, available pages, we chain them together after each |
| 501 // has been "compacted from". The page compaction will then reuse those |
| 502 // as needed, and once finished, the chained, available pages can be |
| 503 // released back to the OS. |
| 504 // |
| 505 // To ease the passing of the compaction state when iterating over an |
| 506 // arena's pages, package it up into a |CompactionContext|. |
| 507 NormalPage::CompactionContext context; |
| 508 context.m_compactedPages = &m_firstPage; |
| 509 |
| 510 while (m_firstUnsweptPage) { |
| 511 BasePage* page = m_firstUnsweptPage; |
| 512 if (page->isEmpty()) { |
| 513 page->unlink(&m_firstUnsweptPage); |
| 514 page->removeFromHeap(); |
| 515 continue; |
| 516 } |
| 517 // Large objects do not belong to this arena. |
| 518 DCHECK(!page->isLargeObjectPage()); |
| 519 NormalPage* normalPage = static_cast<NormalPage*>(page); |
| 520 normalPage->unlink(&m_firstUnsweptPage); |
| 521 normalPage->markAsSwept(); |
| 522 // If not the first page, add |normalPage| onto the available pages chain. |
| 523 if (!context.m_currentPage) |
| 524 context.m_currentPage = normalPage; |
| 525 else |
| 526 normalPage->link(&context.m_availablePages); |
| 527 normalPage->sweepAndCompact(context); |
| 528 } |
| 529 |
| 530 size_t freedSize = 0; |
| 531 size_t freedPageCount = 0; |
| 532 |
| 533 DCHECK(context.m_currentPage); |
| 534 // If the current page hasn't been allocated into, add it to the available |
| 535 // list, for subsequent release below. |
| 536 size_t allocationPoint = context.m_allocationPoint; |
| 537 if (!allocationPoint) { |
| 538 context.m_currentPage->link(&context.m_availablePages); |
| 539 } else { |
| 540 NormalPage* currentPage = context.m_currentPage; |
| 541 currentPage->link(&m_firstPage); |
| 542 if (allocationPoint != currentPage->payloadSize()) { |
| 543 // Put the remainder of the page onto the free list. |
| 544 freedSize = currentPage->payloadSize() - allocationPoint; |
| 545 Address payload = currentPage->payload(); |
| 546 SET_MEMORY_INACCESSIBLE(payload + allocationPoint, freedSize); |
| 547 currentPage->arenaForNormalPage()->addToFreeList( |
| 548 payload + allocationPoint, freedSize); |
| 549 } |
| 550 } |
| 551 |
| 552 // Return available pages to the free page pool, decommitting them from |
| 553 // the pagefile. |
| 554 BasePage* availablePages = context.m_availablePages; |
| 555 while (availablePages) { |
| 556 size_t pageSize = availablePages->size(); |
| 557 #if DEBUG_HEAP_COMPACTION |
| 558 if (!freedPageCount) |
| 559 LOG_HEAP_COMPACTION("Releasing:"); |
| 560 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); |
| 561 #endif |
| 562 freedSize += pageSize; |
| 563 freedPageCount++; |
| 564 BasePage* nextPage; |
| 565 availablePages->unlink(&nextPage); |
| 566 availablePages->removeFromHeap(); |
| 567 availablePages = static_cast<NormalPage*>(nextPage); |
| 568 } |
| 569 if (freedPageCount) |
| 570 LOG_HEAP_COMPACTION("\n"); |
| 571 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); |
| 572 } |
| 573 |
443 #if ENABLE(ASSERT) | 574 #if ENABLE(ASSERT) |
444 bool NormalPageArena::isConsistentForGC() { | 575 bool NormalPageArena::isConsistentForGC() { |
445 // A thread heap is consistent for sweeping if none of the pages to be swept | 576 // A thread heap is consistent for sweeping if none of the pages to be swept |
446 // contain a freelist block or the current allocation point. | 577 // contain a freelist block or the current allocation point. |
447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 578 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; | 579 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; |
449 freeListEntry; freeListEntry = freeListEntry->next()) { | 580 freeListEntry; freeListEntry = freeListEntry->next()) { |
450 if (pagesToBeSweptContains(freeListEntry->getAddress())) | 581 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
451 return false; | 582 return false; |
452 } | 583 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
507 // the limit of the number of mmapped regions OS can support | 638 // the limit of the number of mmapped regions OS can support |
508 // (e.g., /proc/sys/vm/max_map_count in Linux). | 639 // (e.g., /proc/sys/vm/max_map_count in Linux). |
509 RELEASE_ASSERT(result); | 640 RELEASE_ASSERT(result); |
510 pageMemory = memory; | 641 pageMemory = memory; |
511 } else { | 642 } else { |
512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), | 643 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), |
513 memory); | 644 memory); |
514 } | 645 } |
515 } | 646 } |
516 } | 647 } |
517 | |
518 NormalPage* page = | 648 NormalPage* page = |
519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 649 new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
520 page->link(&m_firstPage); | 650 page->link(&m_firstPage); |
521 | 651 |
522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); | 652 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 653 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
524 // Allow the following addToFreeList() to add the newly allocated memory | 654 // Allow the following addToFreeList() to add the newly allocated memory |
525 // to the free list. | 655 // to the free list. |
526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 656 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
527 Address address = page->payload(); | 657 Address address = page->payload(); |
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1070 | 1200 |
1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, | 1201 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
1072 size_t size) { | 1202 size_t size) { |
1073 for (size_t i = 0; i < size; i++) { | 1203 for (size_t i = 0; i < size; i++) { |
1074 ASSERT(address[i] == reuseAllowedZapValue || | 1204 ASSERT(address[i] == reuseAllowedZapValue || |
1075 address[i] == reuseForbiddenZapValue); | 1205 address[i] == reuseForbiddenZapValue); |
1076 } | 1206 } |
1077 } | 1207 } |
1078 #endif | 1208 #endif |
1079 | 1209 |
| 1210 size_t FreeList::freeListSize() const { |
| 1211 size_t freeSize = 0; |
| 1212 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| 1213 FreeListEntry* entry = m_freeLists[i]; |
| 1214 while (entry) { |
| 1215 freeSize += entry->size(); |
| 1216 entry = entry->next(); |
| 1217 } |
| 1218 } |
| 1219 #if DEBUG_HEAP_FREELIST |
| 1220 if (freeSize) { |
| 1221 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); |
| 1222 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| 1223 FreeListEntry* entry = m_freeLists[i]; |
| 1224 size_t bucket = 0; |
| 1225 size_t count = 0; |
| 1226 while (entry) { |
| 1227 bucket += entry->size(); |
| 1228 count++; |
| 1229 entry = entry->next(); |
| 1230 } |
| 1231 if (bucket) { |
| 1232 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, |
| 1233 0x1 << (i + 1), bucket, count); |
| 1234 } |
| 1235 } |
| 1236 } |
| 1237 #endif |
| 1238 return freeSize; |
| 1239 } |
| 1240 |
1080 void FreeList::clear() { | 1241 void FreeList::clear() { |
1081 m_biggestFreeListIndex = 0; | 1242 m_biggestFreeListIndex = 0; |
1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1243 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
1083 m_freeLists[i] = nullptr; | 1244 m_freeLists[i] = nullptr; |
1084 } | 1245 } |
1085 | 1246 |
1086 int FreeList::bucketIndexForSize(size_t size) { | 1247 int FreeList::bucketIndexForSize(size_t size) { |
1087 ASSERT(size > 0); | 1248 ASSERT(size > 0); |
1088 int index = -1; | 1249 int index = -1; |
1089 while (size) { | 1250 while (size) { |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1400 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1240 if (MemoryCoordinator::isLowEndDevice()) | 1401 if (MemoryCoordinator::isLowEndDevice()) |
1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1402 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
1242 #endif | 1403 #endif |
1243 } | 1404 } |
1244 | 1405 |
1245 if (markedObjectSize) | 1406 if (markedObjectSize) |
1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1407 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
1247 } | 1408 } |
1248 | 1409 |
| 1410 void NormalPage::sweepAndCompact(CompactionContext& context) { |
| 1411 NormalPage*& currentPage = context.m_currentPage; |
| 1412 size_t& allocationPoint = context.m_allocationPoint; |
| 1413 |
| 1414 size_t markedObjectSize = 0; |
| 1415 NormalPageArena* pageArena = arenaForNormalPage(); |
| 1416 #if defined(ADDRESS_SANITIZER) |
| 1417 bool isVectorArena = ThreadState::isVectorArenaIndex(pageArena->arenaIndex()); |
| 1418 #endif |
| 1419 HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); |
| 1420 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1421 HeapObjectHeader* header = |
| 1422 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 1423 size_t size = header->size(); |
| 1424 DCHECK(size > 0 && size < blinkPagePayloadSize()); |
| 1425 |
| 1426 if (header->isPromptlyFreed()) |
| 1427 pageArena->decreasePromptlyFreedSize(size); |
| 1428 if (header->isFree()) { |
| 1429 // Unpoison the freelist entry so that we |
| 1430 // can compact into it as wanted. |
| 1431 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
| 1432 headerAddress += size; |
| 1433 continue; |
| 1434 } |
| 1435 // This is a fast version of header->payloadSize(). |
| 1436 size_t payloadSize = size - sizeof(HeapObjectHeader); |
| 1437 Address payload = header->payload(); |
| 1438 if (!header->isMarked()) { |
| 1439 // For ASan, unpoison the object before calling the finalizer. The |
| 1440 // finalized object will be zero-filled and poison'ed afterwards. |
| 1441 // Given all other unmarked objects are poisoned, ASan will detect |
| 1442 // an error if the finalizer touches any other on-heap object that |
| 1443 // die at the same GC cycle. |
| 1444 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
| 1445 header->finalize(payload, payloadSize); |
| 1446 |
| 1447 // As compaction is under way, leave the freed memory accessible |
| 1448 // while compacting the rest of the page. We just zap the payload |
| 1449 // to catch out other finalizers trying to access it. |
| 1450 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| 1451 defined(MEMORY_SANITIZER) |
| 1452 FreeList::zapFreedMemory(payload, payloadSize); |
| 1453 #endif |
| 1454 headerAddress += size; |
| 1455 continue; |
| 1456 } |
| 1457 header->unmark(); |
| 1458 // Allocate and copy over the live object. |
| 1459 Address compactFrontier = currentPage->payload() + allocationPoint; |
| 1460 if (compactFrontier + size > currentPage->payloadEnd()) { |
| 1461 // Can't fit on current allocation page; add remaining onto the |
| 1462 // freelist and advance to next available page. |
| 1463 // |
| 1464 // TODO(sof): be more clever & compact later objects into |
| 1465 // |currentPage|'s unused slop. |
| 1466 currentPage->link(context.m_compactedPages); |
| 1467 size_t freeSize = currentPage->payloadSize() - allocationPoint; |
| 1468 if (freeSize) { |
| 1469 SET_MEMORY_INACCESSIBLE(compactFrontier, freeSize); |
| 1470 currentPage->arenaForNormalPage()->addToFreeList(compactFrontier, |
| 1471 freeSize); |
| 1472 } |
| 1473 |
| 1474 BasePage* nextAvailablePage; |
| 1475 context.m_availablePages->unlink(&nextAvailablePage); |
| 1476 currentPage = reinterpret_cast<NormalPage*>(context.m_availablePages); |
| 1477 context.m_availablePages = nextAvailablePage; |
| 1478 allocationPoint = 0; |
| 1479 compactFrontier = currentPage->payload(); |
| 1480 } |
| 1481 if (LIKELY(compactFrontier != headerAddress)) { |
| 1482 #if defined(ADDRESS_SANITIZER) |
| 1483 // Unpoison the header + if it is a vector backing |
| 1484 // store object, let go of the container annotations. |
| 1485 // Do that by unpoisoning the payload entirely. |
| 1486 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); |
| 1487 if (isVectorArena) |
| 1488 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); |
| 1489 #endif |
| 1490 // Use a non-overlapping copy, if possible. |
| 1491 if (currentPage == this) |
| 1492 memmove(compactFrontier, headerAddress, size); |
| 1493 else |
| 1494 memcpy(compactFrontier, headerAddress, size); |
| 1495 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader)); |
| 1496 } |
| 1497 headerAddress += size; |
| 1498 markedObjectSize += size; |
| 1499 allocationPoint += size; |
| 1500 DCHECK(allocationPoint <= currentPage->payloadSize()); |
| 1501 } |
| 1502 if (markedObjectSize) |
| 1503 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| 1504 |
| 1505 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| 1506 defined(MEMORY_SANITIZER) |
| 1507 // Zap the page, which is now available and will either be compacted into |
| 1508 // or freed. |
| 1509 if (currentPage != this) { |
| 1510 FreeList::zapFreedMemory(payload(), payloadSize()); |
| 1511 } else { |
| 1512 FreeList::zapFreedMemory(payload() + allocationPoint, |
| 1513 payloadSize() - allocationPoint); |
| 1514 } |
| 1515 #endif |
| 1516 } |
| 1517 |
1249 void NormalPage::makeConsistentForGC() { | 1518 void NormalPage::makeConsistentForGC() { |
1250 size_t markedObjectSize = 0; | 1519 size_t markedObjectSize = 0; |
1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1520 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1252 HeapObjectHeader* header = | 1521 HeapObjectHeader* header = |
1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1522 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1254 ASSERT(header->size() < blinkPagePayloadSize()); | 1523 ASSERT(header->size() < blinkPagePayloadSize()); |
1255 // Check if a free list entry first since we cannot call | 1524 // Check if a free list entry first since we cannot call |
1256 // isMarked on a free list entry. | 1525 // isMarked on a free list entry. |
1257 if (header->isFree()) { | 1526 if (header->isFree()) { |
1258 headerAddress += header->size(); | 1527 headerAddress += header->size(); |
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1631 | 1900 |
1632 m_hasEntries = true; | 1901 m_hasEntries = true; |
1633 size_t index = hash(address); | 1902 size_t index = hash(address); |
1634 ASSERT(!(index & 1)); | 1903 ASSERT(!(index & 1)); |
1635 Address cachePage = roundToBlinkPageStart(address); | 1904 Address cachePage = roundToBlinkPageStart(address); |
1636 m_entries[index + 1] = m_entries[index]; | 1905 m_entries[index + 1] = m_entries[index]; |
1637 m_entries[index] = cachePage; | 1906 m_entries[index] = cachePage; |
1638 } | 1907 } |
1639 | 1908 |
1640 } // namespace blink | 1909 } // namespace blink |
OLD | NEW |