OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 17 matching lines...) Expand all Loading... |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #include "platform/heap/HeapPage.h" | 31 #include "platform/heap/HeapPage.h" |
32 | 32 |
33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
34 #include "platform/MemoryCoordinator.h" | 34 #include "platform/MemoryCoordinator.h" |
35 #include "platform/ScriptForbiddenScope.h" | 35 #include "platform/ScriptForbiddenScope.h" |
36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
37 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
38 #include "platform/heap/HeapCompact.h" | |
39 #include "platform/heap/MarkingVisitor.h" | 38 #include "platform/heap/MarkingVisitor.h" |
40 #include "platform/heap/PageMemory.h" | 39 #include "platform/heap/PageMemory.h" |
41 #include "platform/heap/PagePool.h" | 40 #include "platform/heap/PagePool.h" |
42 #include "platform/heap/SafePoint.h" | 41 #include "platform/heap/SafePoint.h" |
43 #include "platform/heap/ThreadState.h" | 42 #include "platform/heap/ThreadState.h" |
44 #include "platform/tracing/TraceEvent.h" | 43 #include "platform/tracing/TraceEvent.h" |
45 #include "platform/tracing/web_memory_allocator_dump.h" | 44 #include "platform/tracing/web_memory_allocator_dump.h" |
46 #include "platform/tracing/web_process_memory_dump.h" | 45 #include "platform/tracing/web_process_memory_dump.h" |
47 #include "public/platform/Platform.h" | 46 #include "public/platform/Platform.h" |
48 #include "wtf/Assertions.h" | 47 #include "wtf/Assertions.h" |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
195 ASSERT(!page->hasBeenSwept()); | 194 ASSERT(!page->hasBeenSwept()); |
196 page->invalidateObjectStartBitmap(); | 195 page->invalidateObjectStartBitmap(); |
197 } | 196 } |
198 if (previousPage) { | 197 if (previousPage) { |
199 ASSERT(m_firstUnsweptPage); | 198 ASSERT(m_firstUnsweptPage); |
200 previousPage->m_next = m_firstPage; | 199 previousPage->m_next = m_firstPage; |
201 m_firstPage = m_firstUnsweptPage; | 200 m_firstPage = m_firstUnsweptPage; |
202 m_firstUnsweptPage = nullptr; | 201 m_firstUnsweptPage = nullptr; |
203 } | 202 } |
204 ASSERT(!m_firstUnsweptPage); | 203 ASSERT(!m_firstUnsweptPage); |
205 | |
206 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); | |
207 if (!heapCompactor->isCompactingArena(arenaIndex())) | |
208 return; | |
209 | |
210 BasePage* nextPage = m_firstPage; | |
211 while (nextPage) { | |
212 if (!nextPage->isLargeObjectPage()) | |
213 heapCompactor->addCompactingPage(nextPage); | |
214 nextPage = nextPage->next(); | |
215 } | |
216 } | 204 } |
217 | 205 |
218 void BaseArena::makeConsistentForMutator() { | 206 void BaseArena::makeConsistentForMutator() { |
219 clearFreeLists(); | 207 clearFreeLists(); |
220 ASSERT(isConsistentForGC()); | 208 ASSERT(isConsistentForGC()); |
221 ASSERT(!m_firstPage); | 209 ASSERT(!m_firstPage); |
222 | 210 |
223 // Drop marks from marked objects and rebuild free lists in preparation for | 211 // Drop marks from marked objects and rebuild free lists in preparation for |
224 // resuming the executions of mutators. | 212 // resuming the executions of mutators. |
225 BasePage* previousPage = nullptr; | 213 BasePage* previousPage = nullptr; |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
445 m_promptlyFreedSize(0), | 433 m_promptlyFreedSize(0), |
446 m_isLazySweeping(false) { | 434 m_isLazySweeping(false) { |
447 clearFreeLists(); | 435 clearFreeLists(); |
448 } | 436 } |
449 | 437 |
450 void NormalPageArena::clearFreeLists() { | 438 void NormalPageArena::clearFreeLists() { |
451 setAllocationPoint(nullptr, 0); | 439 setAllocationPoint(nullptr, 0); |
452 m_freeList.clear(); | 440 m_freeList.clear(); |
453 } | 441 } |
454 | 442 |
455 size_t NormalPageArena::arenaSize() { | |
456 size_t size = 0; | |
457 BasePage* page = m_firstPage; | |
458 while (page) { | |
459 size += page->size(); | |
460 page = page->next(); | |
461 } | |
462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); | |
463 return size; | |
464 } | |
465 | |
466 size_t NormalPageArena::freeListSize() { | |
467 size_t freeSize = m_freeList.freeListSize(); | |
468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); | |
469 return freeSize; | |
470 } | |
471 | |
472 void NormalPageArena::sweepAndCompact() { | |
473 ThreadHeap& heap = getThreadState()->heap(); | |
474 if (!heap.compaction()->isCompactingArena(arenaIndex())) | |
475 return; | |
476 | |
477 if (!m_firstUnsweptPage) { | |
478 heap.compaction()->finishedArenaCompaction(this, 0, 0); | |
479 return; | |
480 } | |
481 | |
482 // Compaction is performed in-place, sliding objects down over unused | |
483 // holes for a smaller heap page footprint and improved locality. | |
484 // A "compaction pointer" is consequently kept, pointing to the next | |
485 // available address to move objects down to. It will belong to one | |
486 // of the already sweep-compacted pages for this arena, but as compaction | |
487 // proceeds, it will not belong to the same page as the one being | |
488 // currently compacted. | |
489 // | |
490 // The compaction pointer is represented by the | |
491 // |(currentPage, allocationPoint)| pair, with |allocationPoint| | |
492 // being the offset into |currentPage|, making up the next | |
493 // available location. When the compaction of an arena page causes the | |
494 // compaction pointer to exhaust the current page it is compacting into, | |
495 // page compaction will advance the current page of the compaction | |
496 // pointer, as well as the allocation point. | |
497 // | |
498 // By construction, the page compaction can be performed without having | |
499 // to allocate any new pages. So to arrange for the page compaction's | |
500 // supply of freed, available pages, we chain them together after each | |
501 // has been "compacted from". The page compaction will then reuse those | |
502 // as needed, and once finished, the chained, available pages can be | |
503 // released back to the OS. | |
504 // | |
505 // To ease the passing of the compaction state when iterating over an | |
506 // arena's pages, package it up into a |CompactionContext|. | |
507 NormalPage::CompactionContext context; | |
508 context.m_compactedPages = &m_firstPage; | |
509 | |
510 while (m_firstUnsweptPage) { | |
511 BasePage* page = m_firstUnsweptPage; | |
512 if (page->isEmpty()) { | |
513 page->unlink(&m_firstUnsweptPage); | |
514 page->removeFromHeap(); | |
515 continue; | |
516 } | |
517 // Large objects do not belong to this arena. | |
518 DCHECK(!page->isLargeObjectPage()); | |
519 NormalPage* normalPage = static_cast<NormalPage*>(page); | |
520 normalPage->unlink(&m_firstUnsweptPage); | |
521 normalPage->markAsSwept(); | |
522 // If not the first page, add |normalPage| onto the available pages chain. | |
523 if (!context.m_currentPage) | |
524 context.m_currentPage = normalPage; | |
525 else | |
526 normalPage->link(&context.m_availablePages); | |
527 normalPage->sweepAndCompact(context); | |
528 } | |
529 | |
530 size_t freedSize = 0; | |
531 size_t freedPageCount = 0; | |
532 | |
533 DCHECK(context.m_currentPage); | |
534 // If the current page hasn't been allocated into, add it to the available | |
535 // list, for subsequent release below. | |
536 size_t allocationPoint = context.m_allocationPoint; | |
537 if (!allocationPoint) { | |
538 context.m_currentPage->link(&context.m_availablePages); | |
539 } else { | |
540 NormalPage* currentPage = context.m_currentPage; | |
541 currentPage->link(&m_firstPage); | |
542 if (allocationPoint != currentPage->payloadSize()) { | |
543 // Put the remainder of the page onto the free list. | |
544 freedSize = currentPage->payloadSize() - allocationPoint; | |
545 Address payload = currentPage->payload(); | |
546 SET_MEMORY_INACCESSIBLE(payload + allocationPoint, freedSize); | |
547 currentPage->arenaForNormalPage()->addToFreeList( | |
548 payload + allocationPoint, freedSize); | |
549 } | |
550 } | |
551 | |
552 // Return available pages to the free page pool, decommitting them from | |
553 // the pagefile. | |
554 BasePage* availablePages = context.m_availablePages; | |
555 while (availablePages) { | |
556 size_t pageSize = availablePages->size(); | |
557 #if DEBUG_HEAP_COMPACTION | |
558 if (!freedPageCount) | |
559 LOG_HEAP_COMPACTION("Releasing:"); | |
560 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); | |
561 #endif | |
562 freedSize += pageSize; | |
563 freedPageCount++; | |
564 BasePage* nextPage; | |
565 availablePages->unlink(&nextPage); | |
566 availablePages->removeFromHeap(); | |
567 availablePages = static_cast<NormalPage*>(nextPage); | |
568 } | |
569 if (freedPageCount) | |
570 LOG_HEAP_COMPACTION("\n"); | |
571 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); | |
572 } | |
573 | |
574 #if ENABLE(ASSERT) | 443 #if ENABLE(ASSERT) |
575 bool NormalPageArena::isConsistentForGC() { | 444 bool NormalPageArena::isConsistentForGC() { |
576 // A thread heap is consistent for sweeping if none of the pages to be swept | 445 // A thread heap is consistent for sweeping if none of the pages to be swept |
577 // contain a freelist block or the current allocation point. | 446 // contain a freelist block or the current allocation point. |
578 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
579 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; | 448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; |
580 freeListEntry; freeListEntry = freeListEntry->next()) { | 449 freeListEntry; freeListEntry = freeListEntry->next()) { |
581 if (pagesToBeSweptContains(freeListEntry->getAddress())) | 450 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
582 return false; | 451 return false; |
583 } | 452 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
638 // the limit of the number of mmapped regions OS can support | 507 // the limit of the number of mmapped regions OS can support |
639 // (e.g., /proc/sys/vm/max_map_count in Linux). | 508 // (e.g., /proc/sys/vm/max_map_count in Linux). |
640 RELEASE_ASSERT(result); | 509 RELEASE_ASSERT(result); |
641 pageMemory = memory; | 510 pageMemory = memory; |
642 } else { | 511 } else { |
643 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), | 512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), |
644 memory); | 513 memory); |
645 } | 514 } |
646 } | 515 } |
647 } | 516 } |
| 517 |
648 NormalPage* page = | 518 NormalPage* page = |
649 new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
650 page->link(&m_firstPage); | 520 page->link(&m_firstPage); |
651 | 521 |
652 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); | 522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
653 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
654 // Allow the following addToFreeList() to add the newly allocated memory | 524 // Allow the following addToFreeList() to add the newly allocated memory |
655 // to the free list. | 525 // to the free list. |
656 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
657 Address address = page->payload(); | 527 Address address = page->payload(); |
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1200 | 1070 |
1201 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, | 1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
1202 size_t size) { | 1072 size_t size) { |
1203 for (size_t i = 0; i < size; i++) { | 1073 for (size_t i = 0; i < size; i++) { |
1204 ASSERT(address[i] == reuseAllowedZapValue || | 1074 ASSERT(address[i] == reuseAllowedZapValue || |
1205 address[i] == reuseForbiddenZapValue); | 1075 address[i] == reuseForbiddenZapValue); |
1206 } | 1076 } |
1207 } | 1077 } |
1208 #endif | 1078 #endif |
1209 | 1079 |
1210 size_t FreeList::freeListSize() const { | |
1211 size_t freeSize = 0; | |
1212 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
1213 FreeListEntry* entry = m_freeLists[i]; | |
1214 while (entry) { | |
1215 freeSize += entry->size(); | |
1216 entry = entry->next(); | |
1217 } | |
1218 } | |
1219 #if DEBUG_HEAP_FREELIST | |
1220 if (freeSize) { | |
1221 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); | |
1222 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | |
1223 FreeListEntry* entry = m_freeLists[i]; | |
1224 size_t bucket = 0; | |
1225 size_t count = 0; | |
1226 while (entry) { | |
1227 bucket += entry->size(); | |
1228 count++; | |
1229 entry = entry->next(); | |
1230 } | |
1231 if (bucket) { | |
1232 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, | |
1233 0x1 << (i + 1), bucket, count); | |
1234 } | |
1235 } | |
1236 } | |
1237 #endif | |
1238 return freeSize; | |
1239 } | |
1240 | |
1241 void FreeList::clear() { | 1080 void FreeList::clear() { |
1242 m_biggestFreeListIndex = 0; | 1081 m_biggestFreeListIndex = 0; |
1243 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
1244 m_freeLists[i] = nullptr; | 1083 m_freeLists[i] = nullptr; |
1245 } | 1084 } |
1246 | 1085 |
1247 int FreeList::bucketIndexForSize(size_t size) { | 1086 int FreeList::bucketIndexForSize(size_t size) { |
1248 ASSERT(size > 0); | 1087 ASSERT(size > 0); |
1249 int index = -1; | 1088 int index = -1; |
1250 while (size) { | 1089 while (size) { |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1400 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1401 if (MemoryCoordinator::isLowEndDevice()) | 1240 if (MemoryCoordinator::isLowEndDevice()) |
1402 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
1403 #endif | 1242 #endif |
1404 } | 1243 } |
1405 | 1244 |
1406 if (markedObjectSize) | 1245 if (markedObjectSize) |
1407 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
1408 } | 1247 } |
1409 | 1248 |
1410 void NormalPage::sweepAndCompact(CompactionContext& context) { | |
1411 NormalPage*& currentPage = context.m_currentPage; | |
1412 size_t& allocationPoint = context.m_allocationPoint; | |
1413 | |
1414 size_t markedObjectSize = 0; | |
1415 NormalPageArena* pageArena = arenaForNormalPage(); | |
1416 #if defined(ADDRESS_SANITIZER) | |
1417 bool isVectorArena = ThreadState::isVectorArenaIndex(pageArena->arenaIndex()); | |
1418 #endif | |
1419 HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); | |
1420 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
1421 HeapObjectHeader* header = | |
1422 reinterpret_cast<HeapObjectHeader*>(headerAddress); | |
1423 size_t size = header->size(); | |
1424 DCHECK(size > 0 && size < blinkPagePayloadSize()); | |
1425 | |
1426 if (header->isPromptlyFreed()) | |
1427 pageArena->decreasePromptlyFreedSize(size); | |
1428 if (header->isFree()) { | |
1429 // Unpoison the freelist entry so that we | |
1430 // can compact into it as wanted. | |
1431 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
1432 headerAddress += size; | |
1433 continue; | |
1434 } | |
1435 // This is a fast version of header->payloadSize(). | |
1436 size_t payloadSize = size - sizeof(HeapObjectHeader); | |
1437 Address payload = header->payload(); | |
1438 if (!header->isMarked()) { | |
1439 // For ASan, unpoison the object before calling the finalizer. The | |
1440 // finalized object will be zero-filled and poison'ed afterwards. | |
1441 // Given all other unmarked objects are poisoned, ASan will detect | |
1442 // an error if the finalizer touches any other on-heap object that | |
1443 // die at the same GC cycle. | |
1444 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | |
1445 header->finalize(payload, payloadSize); | |
1446 | |
1447 // As compaction is under way, leave the freed memory accessible | |
1448 // while compacting the rest of the page. We just zap the payload | |
1449 // to catch out other finalizers trying to access it. | |
1450 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
1451 defined(MEMORY_SANITIZER) | |
1452 FreeList::zapFreedMemory(payload, payloadSize); | |
1453 #endif | |
1454 headerAddress += size; | |
1455 continue; | |
1456 } | |
1457 header->unmark(); | |
1458 // Allocate and copy over the live object. | |
1459 Address compactFrontier = currentPage->payload() + allocationPoint; | |
1460 if (compactFrontier + size > currentPage->payloadEnd()) { | |
1461 // Can't fit on current allocation page; add remaining onto the | |
1462 // freelist and advance to next available page. | |
1463 // | |
1464 // TODO(sof): be more clever & compact later objects into | |
1465 // |currentPage|'s unused slop. | |
1466 currentPage->link(context.m_compactedPages); | |
1467 size_t freeSize = currentPage->payloadSize() - allocationPoint; | |
1468 if (freeSize) { | |
1469 SET_MEMORY_INACCESSIBLE(compactFrontier, freeSize); | |
1470 currentPage->arenaForNormalPage()->addToFreeList(compactFrontier, | |
1471 freeSize); | |
1472 } | |
1473 | |
1474 BasePage* nextAvailablePage; | |
1475 context.m_availablePages->unlink(&nextAvailablePage); | |
1476 currentPage = reinterpret_cast<NormalPage*>(context.m_availablePages); | |
1477 context.m_availablePages = nextAvailablePage; | |
1478 allocationPoint = 0; | |
1479 compactFrontier = currentPage->payload(); | |
1480 } | |
1481 if (LIKELY(compactFrontier != headerAddress)) { | |
1482 #if defined(ADDRESS_SANITIZER) | |
1483 // Unpoison the header + if it is a vector backing | |
1484 // store object, let go of the container annotations. | |
1485 // Do that by unpoisoning the payload entirely. | |
1486 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); | |
1487 if (isVectorArena) | |
1488 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); | |
1489 #endif | |
1490 // Use a non-overlapping copy, if possible. | |
1491 if (currentPage == this) | |
1492 memmove(compactFrontier, headerAddress, size); | |
1493 else | |
1494 memcpy(compactFrontier, headerAddress, size); | |
1495 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader)); | |
1496 } | |
1497 headerAddress += size; | |
1498 markedObjectSize += size; | |
1499 allocationPoint += size; | |
1500 DCHECK(allocationPoint <= currentPage->payloadSize()); | |
1501 } | |
1502 if (markedObjectSize) | |
1503 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | |
1504 | |
1505 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | |
1506 defined(MEMORY_SANITIZER) | |
1507 // Zap the page, which is now available and will either be compacted into | |
1508 // or freed. | |
1509 if (currentPage != this) { | |
1510 FreeList::zapFreedMemory(payload(), payloadSize()); | |
1511 } else { | |
1512 FreeList::zapFreedMemory(payload() + allocationPoint, | |
1513 payloadSize() - allocationPoint); | |
1514 } | |
1515 #endif | |
1516 } | |
1517 | |
1518 void NormalPage::makeConsistentForGC() { | 1249 void NormalPage::makeConsistentForGC() { |
1519 size_t markedObjectSize = 0; | 1250 size_t markedObjectSize = 0; |
1520 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1521 HeapObjectHeader* header = | 1252 HeapObjectHeader* header = |
1522 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1523 ASSERT(header->size() < blinkPagePayloadSize()); | 1254 ASSERT(header->size() < blinkPagePayloadSize()); |
1524 // Check if a free list entry first since we cannot call | 1255 // Check if a free list entry first since we cannot call |
1525 // isMarked on a free list entry. | 1256 // isMarked on a free list entry. |
1526 if (header->isFree()) { | 1257 if (header->isFree()) { |
1527 headerAddress += header->size(); | 1258 headerAddress += header->size(); |
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1900 | 1631 |
1901 m_hasEntries = true; | 1632 m_hasEntries = true; |
1902 size_t index = hash(address); | 1633 size_t index = hash(address); |
1903 ASSERT(!(index & 1)); | 1634 ASSERT(!(index & 1)); |
1904 Address cachePage = roundToBlinkPageStart(address); | 1635 Address cachePage = roundToBlinkPageStart(address); |
1905 m_entries[index + 1] = m_entries[index]; | 1636 m_entries[index + 1] = m_entries[index]; |
1906 m_entries[index] = cachePage; | 1637 m_entries[index] = cachePage; |
1907 } | 1638 } |
1908 | 1639 |
1909 } // namespace blink | 1640 } // namespace blink |
OLD | NEW |