| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 439 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); | 439 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); |
| 440 return size; | 440 return size; |
| 441 } | 441 } |
| 442 | 442 |
| 443 size_t NormalPageArena::FreeListSize() { | 443 size_t NormalPageArena::FreeListSize() { |
| 444 size_t free_size = free_list_.FreeListSize(); | 444 size_t free_size = free_list_.FreeListSize(); |
| 445 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); | 445 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); |
| 446 return free_size; | 446 return free_size; |
| 447 } | 447 } |
| 448 | 448 |
| 449 void NormalPage::CompactionContext::AddAvailable(BasePage* page) { |
| 450 if (!available_pages_) { |
| 451 DCHECK(!last_available_); |
| 452 available_pages_ = last_available_ = page; |
| 453 return; |
| 454 } |
| 455 DCHECK(last_available_); |
| 456 BasePage* next_available_page = page; |
| 457 last_available_->Link(&next_available_page); |
| 458 last_available_ = page; |
| 459 } |
| 460 |
| 461 BasePage* NormalPage::CompactionContext::TakeAvailable() { |
| 462 DCHECK(available_pages_); |
| 463 BasePage* page = available_pages_; |
| 464 page->Unlink(&available_pages_); |
| 465 if (page == last_available_) |
| 466 last_available_ = nullptr; |
| 467 return page; |
| 468 } |
| 469 |
| 449 void NormalPageArena::SweepAndCompact() { | 470 void NormalPageArena::SweepAndCompact() { |
| 450 ThreadHeap& heap = GetThreadState()->Heap(); | 471 ThreadHeap& heap = GetThreadState()->Heap(); |
| 451 if (!heap.Compaction()->IsCompactingArena(ArenaIndex())) | 472 if (!heap.Compaction()->IsCompactingArena(ArenaIndex())) |
| 452 return; | 473 return; |
| 453 | 474 |
| 454 if (!first_unswept_page_) { | 475 if (!first_unswept_page_) { |
| 455 heap.Compaction()->FinishedArenaCompaction(this, 0, 0); | 476 heap.Compaction()->FinishedArenaCompaction(this, 0, 0); |
| 456 return; | 477 return; |
| 457 } | 478 } |
| 458 | 479 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 493 } | 514 } |
| 494 // Large objects do not belong to this arena. | 515 // Large objects do not belong to this arena. |
| 495 DCHECK(!page->IsLargeObjectPage()); | 516 DCHECK(!page->IsLargeObjectPage()); |
| 496 NormalPage* normal_page = static_cast<NormalPage*>(page); | 517 NormalPage* normal_page = static_cast<NormalPage*>(page); |
| 497 normal_page->Unlink(&first_unswept_page_); | 518 normal_page->Unlink(&first_unswept_page_); |
| 498 normal_page->MarkAsSwept(); | 519 normal_page->MarkAsSwept(); |
| 499 // If not the first page, add |normalPage| onto the available pages chain. | 520 // If not the first page, add |normalPage| onto the available pages chain. |
| 500 if (!context.current_page_) | 521 if (!context.current_page_) |
| 501 context.current_page_ = normal_page; | 522 context.current_page_ = normal_page; |
| 502 else | 523 else |
| 503 normal_page->Link(&context.available_pages_); | 524 context.AddAvailable(normal_page); |
| 504 normal_page->SweepAndCompact(context); | 525 normal_page->SweepAndCompact(context); |
| 505 } | 526 } |
| 506 | 527 |
| 507 // All pages were empty; nothing to compact. | 528 // All pages were empty; nothing to compact. |
| 508 if (!context.current_page_) { | 529 if (!context.current_page_) { |
| 509 heap.Compaction()->FinishedArenaCompaction(this, 0, 0); | 530 heap.Compaction()->FinishedArenaCompaction(this, 0, 0); |
| 510 return; | 531 return; |
| 511 } | 532 } |
| 512 | 533 |
| 513 size_t freed_size = 0; | 534 size_t freed_size = 0; |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 552 // decommits it. Recommitting the page must find a zeroed page later. | 573 // decommits it. Recommitting the page must find a zeroed page later. |
| 553 // We cannot assume that the OS will hand back a zeroed page across | 574 // We cannot assume that the OS will hand back a zeroed page across |
| 554 // its "decommit" operation. | 575 // its "decommit" operation. |
| 555 // | 576 // |
| 556 // If in a debug setting, the unused page contents will have been | 577 // If in a debug setting, the unused page contents will have been |
| 557 // zapped already; leave it in that state. | 578 // zapped already; leave it in that state. |
| 558 DCHECK(!available_pages->IsLargeObjectPage()); | 579 DCHECK(!available_pages->IsLargeObjectPage()); |
| 559 NormalPage* unused_page = reinterpret_cast<NormalPage*>(available_pages); | 580 NormalPage* unused_page = reinterpret_cast<NormalPage*>(available_pages); |
| 560 memset(unused_page->Payload(), 0, unused_page->PayloadSize()); | 581 memset(unused_page->Payload(), 0, unused_page->PayloadSize()); |
| 561 #endif | 582 #endif |
| 562 available_pages->RemoveFromHeap(); | 583 // Try to cycle out the pages right away; done to try get |
| 584 // immediately observable memory reduction across platforms. |
| 585 available_pages->RemoveFromHeap(DecommitMemoryTiming::DecommitPromptly); |
| 563 available_pages = static_cast<NormalPage*>(next_page); | 586 available_pages = static_cast<NormalPage*>(next_page); |
| 564 } | 587 } |
| 565 if (freed_page_count) | 588 if (freed_page_count) |
| 566 LOG_HEAP_COMPACTION("\n"); | 589 LOG_HEAP_COMPACTION("\n"); |
| 567 heap.Compaction()->FinishedArenaCompaction(this, freed_page_count, | 590 heap.Compaction()->FinishedArenaCompaction(this, freed_page_count, |
| 568 freed_size); | 591 freed_size); |
| 569 } | 592 } |
| 570 | 593 |
| 571 #if DCHECK_IS_ON() | 594 #if DCHECK_IS_ON() |
| 572 bool NormalPageArena::IsConsistentForGC() { | 595 bool NormalPageArena::IsConsistentForGC() { |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 651 // to the free list. | 674 // to the free list. |
| 652 ASAN_UNPOISON_MEMORY_REGION(page->Payload(), page->PayloadSize()); | 675 ASAN_UNPOISON_MEMORY_REGION(page->Payload(), page->PayloadSize()); |
| 653 Address address = page->Payload(); | 676 Address address = page->Payload(); |
| 654 for (size_t i = 0; i < page->PayloadSize(); i++) | 677 for (size_t i = 0; i < page->PayloadSize(); i++) |
| 655 address[i] = kReuseAllowedZapValue; | 678 address[i] = kReuseAllowedZapValue; |
| 656 ASAN_POISON_MEMORY_REGION(page->Payload(), page->PayloadSize()); | 679 ASAN_POISON_MEMORY_REGION(page->Payload(), page->PayloadSize()); |
| 657 #endif | 680 #endif |
| 658 AddToFreeList(page->Payload(), page->PayloadSize()); | 681 AddToFreeList(page->Payload(), page->PayloadSize()); |
| 659 } | 682 } |
| 660 | 683 |
| 661 void NormalPageArena::FreePage(NormalPage* page) { | 684 void NormalPageArena::FreePage(NormalPage* page, |
| 685 DecommitMemoryTiming decommit_hint) { |
| 662 GetThreadState()->Heap().HeapStats().DecreaseAllocatedSpace(page->size()); | 686 GetThreadState()->Heap().HeapStats().DecreaseAllocatedSpace(page->size()); |
| 663 | 687 |
| 664 PageMemory* memory = page->Storage(); | 688 PageMemory* memory = page->Storage(); |
| 665 page->~NormalPage(); | 689 page->~NormalPage(); |
| 666 GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory); | 690 GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory, |
| 691 decommit_hint); |
| 667 } | 692 } |
| 668 | 693 |
| 669 bool NormalPageArena::Coalesce() { | 694 bool NormalPageArena::Coalesce() { |
| 670 // Don't coalesce arenas if there are not enough promptly freed entries | 695 // Don't coalesce arenas if there are not enough promptly freed entries |
| 671 // to be coalesced. | 696 // to be coalesced. |
| 672 // | 697 // |
| 673 // FIXME: This threshold is determined just to optimize blink_perf | 698 // FIXME: This threshold is determined just to optimize blink_perf |
| 674 // benchmarks. Coalescing is very sensitive to the threashold and | 699 // benchmarks. Coalescing is very sensitive to the threashold and |
| 675 // we need further investigations on the coalescing scheme. | 700 // we need further investigations on the coalescing scheme. |
| 676 if (promptly_freed_size_ < 1024 * 1024) | 701 if (promptly_freed_size_ < 1024 * 1024) |
| (...skipping 607 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1284 DCHECK_LE(header_address, PayloadEnd()); | 1309 DCHECK_LE(header_address, PayloadEnd()); |
| 1285 } while (header_address < PayloadEnd()); | 1310 } while (header_address < PayloadEnd()); |
| 1286 return object_payload_size; | 1311 return object_payload_size; |
| 1287 } | 1312 } |
| 1288 | 1313 |
| 1289 bool NormalPage::IsEmpty() { | 1314 bool NormalPage::IsEmpty() { |
| 1290 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(Payload()); | 1315 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(Payload()); |
| 1291 return header->IsFree() && header->size() == PayloadSize(); | 1316 return header->IsFree() && header->size() == PayloadSize(); |
| 1292 } | 1317 } |
| 1293 | 1318 |
| 1294 void NormalPage::RemoveFromHeap() { | 1319 void NormalPage::RemoveFromHeap(DecommitMemoryTiming decommit_hint) { |
| 1295 ArenaForNormalPage()->FreePage(this); | 1320 ArenaForNormalPage()->FreePage(this, decommit_hint); |
| 1296 } | 1321 } |
| 1297 | 1322 |
| 1298 #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1323 #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1299 static void DiscardPages(Address begin, Address end) { | 1324 static void DiscardPages(Address begin, Address end) { |
| 1300 uintptr_t begin_address = | 1325 uintptr_t begin_address = |
| 1301 WTF::RoundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); | 1326 WTF::RoundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); |
| 1302 uintptr_t end_address = | 1327 uintptr_t end_address = |
| 1303 WTF::RoundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); | 1328 WTF::RoundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); |
| 1304 if (begin_address < end_address) | 1329 if (begin_address < end_address) |
| 1305 WTF::DiscardSystemPages(reinterpret_cast<void*>(begin_address), | 1330 WTF::DiscardSystemPages(reinterpret_cast<void*>(begin_address), |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1434 // TODO(sof): be more clever & compact later objects into | 1459 // TODO(sof): be more clever & compact later objects into |
| 1435 // |currentPage|'s unused slop. | 1460 // |currentPage|'s unused slop. |
| 1436 current_page->Link(context.compacted_pages_); | 1461 current_page->Link(context.compacted_pages_); |
| 1437 size_t free_size = current_page->PayloadSize() - allocation_point; | 1462 size_t free_size = current_page->PayloadSize() - allocation_point; |
| 1438 if (free_size) { | 1463 if (free_size) { |
| 1439 SET_MEMORY_INACCESSIBLE(compact_frontier, free_size); | 1464 SET_MEMORY_INACCESSIBLE(compact_frontier, free_size); |
| 1440 current_page->ArenaForNormalPage()->AddToFreeList(compact_frontier, | 1465 current_page->ArenaForNormalPage()->AddToFreeList(compact_frontier, |
| 1441 free_size); | 1466 free_size); |
| 1442 } | 1467 } |
| 1443 | 1468 |
| 1444 BasePage* next_available_page; | 1469 current_page = static_cast<NormalPage*>(context.TakeAvailable()); |
| 1445 context.available_pages_->Unlink(&next_available_page); | |
| 1446 current_page = reinterpret_cast<NormalPage*>(context.available_pages_); | |
| 1447 context.available_pages_ = next_available_page; | |
| 1448 allocation_point = 0; | 1470 allocation_point = 0; |
| 1449 compact_frontier = current_page->Payload(); | 1471 compact_frontier = current_page->Payload(); |
| 1450 } | 1472 } |
| 1451 if (LIKELY(compact_frontier != header_address)) { | 1473 if (LIKELY(compact_frontier != header_address)) { |
| 1452 #if defined(ADDRESS_SANITIZER) | 1474 #if defined(ADDRESS_SANITIZER) |
| 1453 // Unpoison the header + if it is a vector backing | 1475 // Unpoison the header + if it is a vector backing |
| 1454 // store object, let go of the container annotations. | 1476 // store object, let go of the container annotations. |
| 1455 // Do that by unpoisoning the payload entirely. | 1477 // Do that by unpoisoning the payload entirely. |
| 1456 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); | 1478 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); |
| 1457 if (is_vector_arena) | 1479 if (is_vector_arena) |
| (...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1722 | 1744 |
| 1723 size_t LargeObjectPage::ObjectPayloadSizeForTesting() { | 1745 size_t LargeObjectPage::ObjectPayloadSizeForTesting() { |
| 1724 MarkAsSwept(); | 1746 MarkAsSwept(); |
| 1725 return PayloadSize(); | 1747 return PayloadSize(); |
| 1726 } | 1748 } |
| 1727 | 1749 |
| 1728 bool LargeObjectPage::IsEmpty() { | 1750 bool LargeObjectPage::IsEmpty() { |
| 1729 return !GetHeapObjectHeader()->IsMarked(); | 1751 return !GetHeapObjectHeader()->IsMarked(); |
| 1730 } | 1752 } |
| 1731 | 1753 |
| 1732 void LargeObjectPage::RemoveFromHeap() { | 1754 void LargeObjectPage::RemoveFromHeap(DecommitMemoryTiming) { |
| 1733 static_cast<LargeObjectArena*>(Arena())->FreeLargeObjectPage(this); | 1755 static_cast<LargeObjectArena*>(Arena())->FreeLargeObjectPage(this); |
| 1734 } | 1756 } |
| 1735 | 1757 |
| 1736 void LargeObjectPage::Sweep() { | 1758 void LargeObjectPage::Sweep() { |
| 1737 GetHeapObjectHeader()->Unmark(); | 1759 GetHeapObjectHeader()->Unmark(); |
| 1738 Arena()->GetThreadState()->IncreaseMarkedObjectSize(size()); | 1760 Arena()->GetThreadState()->IncreaseMarkedObjectSize(size()); |
| 1739 } | 1761 } |
| 1740 | 1762 |
| 1741 void LargeObjectPage::MakeConsistentForMutator() { | 1763 void LargeObjectPage::MakeConsistentForMutator() { |
| 1742 HeapObjectHeader* header = GetHeapObjectHeader(); | 1764 HeapObjectHeader* header = GetHeapObjectHeader(); |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1844 | 1866 |
| 1845 has_entries_ = true; | 1867 has_entries_ = true; |
| 1846 size_t index = GetHash(address); | 1868 size_t index = GetHash(address); |
| 1847 DCHECK(!(index & 1)); | 1869 DCHECK(!(index & 1)); |
| 1848 Address cache_page = RoundToBlinkPageStart(address); | 1870 Address cache_page = RoundToBlinkPageStart(address); |
| 1849 entries_[index + 1] = entries_[index]; | 1871 entries_[index + 1] = entries_[index]; |
| 1850 entries_[index] = cache_page; | 1872 entries_[index] = cache_page; |
| 1851 } | 1873 } |
| 1852 | 1874 |
| 1853 } // namespace blink | 1875 } // namespace blink |
| OLD | NEW |