| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 182 static PageMemoryRegion* allocateLargePage(size_t size) | 182 static PageMemoryRegion* allocateLargePage(size_t size) |
| 183 { | 183 { |
| 184 return allocate(size, 1); | 184 return allocate(size, 1); |
| 185 } | 185 } |
| 186 | 186 |
| 187 static PageMemoryRegion* allocateNormalPages() | 187 static PageMemoryRegion* allocateNormalPages() |
| 188 { | 188 { |
| 189 return allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion
); | 189 return allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion
); |
| 190 } | 190 } |
| 191 | 191 |
| 192 BasePage* pageFromAddress(Address address) | 192 BaseHeapPage* pageFromAddress(Address address) |
| 193 { | 193 { |
| 194 ASSERT(contains(address)); | 194 ASSERT(contains(address)); |
| 195 if (!m_inUse[index(address)]) | 195 if (!m_inUse[index(address)]) |
| 196 return nullptr; | 196 return nullptr; |
| 197 if (m_isLargePage) | 197 if (m_isLargePage) |
| 198 return pageFromObject(base()); | 198 return pageFromObject(base()); |
| 199 return pageFromObject(address); | 199 return pageFromObject(address); |
| 200 } | 200 } |
| 201 | 201 |
| 202 private: | 202 private: |
| (...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 470 // Zap the primary vTable entry (secondary vTable entries are not zapped). | 470 // Zap the primary vTable entry (secondary vTable entries are not zapped). |
| 471 if (gcInfo->hasVTable()) { | 471 if (gcInfo->hasVTable()) { |
| 472 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; | 472 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; |
| 473 } | 473 } |
| 474 #endif | 474 #endif |
| 475 // In Release builds, the entire object is zeroed out when it is added to | 475 // In Release builds, the entire object is zeroed out when it is added to |
| 476 // the free list. This happens right after sweeping the page and before the | 476 // the free list. This happens right after sweeping the page and before the |
| 477 // thread commences execution. | 477 // thread commences execution. |
| 478 } | 478 } |
| 479 | 479 |
| 480 void LargeObjectPage::sweep() | 480 void LargeObject::sweep() |
| 481 { | 481 { |
| 482 Heap::increaseMarkedObjectSize(size()); | 482 Heap::increaseMarkedObjectSize(size()); |
| 483 heapObjectHeader()->unmark(); | 483 heapObjectHeader()->unmark(); |
| 484 } | 484 } |
| 485 | 485 |
| 486 bool LargeObjectPage::isEmpty() | 486 bool LargeObject::isEmpty() |
| 487 { | 487 { |
| 488 return !heapObjectHeader()->isMarked(); | 488 return !heapObjectHeader()->isMarked(); |
| 489 } | 489 } |
| 490 | 490 |
| 491 #if ENABLE(ASSERT) | 491 #if ENABLE(ASSERT) |
| 492 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) | 492 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) |
| 493 { | 493 { |
| 494 // Scan through the object's fields and check that they are all zero. | 494 // Scan through the object's fields and check that they are all zero. |
| 495 Address* objectFields = reinterpret_cast<Address*>(objectPointer); | 495 Address* objectFields = reinterpret_cast<Address*>(objectPointer); |
| 496 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { | 496 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { |
| 497 if (objectFields[i] != 0) | 497 if (objectFields[i] != 0) |
| 498 return false; | 498 return false; |
| 499 } | 499 } |
| 500 return true; | 500 return true; |
| 501 } | 501 } |
| 502 #endif | 502 #endif |
| 503 | 503 |
| 504 static void markPointer(Visitor* visitor, HeapObjectHeader* header) | 504 static void markPointer(Visitor* visitor, HeapObjectHeader* header) |
| 505 { | 505 { |
| 506 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex()); | 506 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex()); |
| 507 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | 507 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { |
| 508 visitor->markHeaderNoTracing(header); | 508 visitor->markHeaderNoTracing(header); |
| 509 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize())); | 509 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize())); |
| 510 } else { | 510 } else { |
| 511 visitor->markHeader(header, gcInfo->m_trace); | 511 visitor->markHeader(header, gcInfo->m_trace); |
| 512 } | 512 } |
| 513 } | 513 } |
| 514 | 514 |
| 515 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) | 515 void LargeObject::checkAndMarkPointer(Visitor* visitor, Address address) |
| 516 { | 516 { |
| 517 ASSERT(contains(address)); | 517 ASSERT(contains(address)); |
| 518 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) | 518 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) |
| 519 return; | 519 return; |
| 520 #if ENABLE(GC_PROFILING) | 520 #if ENABLE(GC_PROFILING) |
| 521 visitor->setHostInfo(&address, "stack"); | 521 visitor->setHostInfo(&address, "stack"); |
| 522 #endif | 522 #endif |
| 523 markPointer(visitor, heapObjectHeader()); | 523 markPointer(visitor, heapObjectHeader()); |
| 524 } | 524 } |
| 525 | 525 |
| 526 void LargeObjectPage::markUnmarkedObjectsDead() | 526 void LargeObject::markUnmarkedObjectsDead() |
| 527 { | 527 { |
| 528 HeapObjectHeader* header = heapObjectHeader(); | 528 HeapObjectHeader* header = heapObjectHeader(); |
| 529 if (header->isMarked()) | 529 if (header->isMarked()) |
| 530 header->unmark(); | 530 header->unmark(); |
| 531 else | 531 else |
| 532 header->markDead(); | 532 header->markDead(); |
| 533 } | 533 } |
| 534 | 534 |
| 535 void LargeObjectPage::removeFromHeap() | 535 void LargeObject::removeFromHeap() |
| 536 { | 536 { |
| 537 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); | 537 static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this); |
| 538 } | 538 } |
| 539 | 539 |
| 540 FreeList::FreeList() | 540 FreeList::FreeList() |
| 541 : m_biggestFreeListIndex(0) | 541 : m_biggestFreeListIndex(0) |
| 542 { | 542 { |
| 543 } | 543 } |
| 544 | 544 |
| 545 BaseHeap::BaseHeap(ThreadState* state, int index) | 545 ThreadHeap::ThreadHeap(ThreadState* state, int index) |
| 546 : m_firstPage(nullptr) | 546 : m_firstPage(nullptr) |
| 547 , m_firstUnsweptPage(nullptr) | 547 , m_firstUnsweptPage(nullptr) |
| 548 , m_threadState(state) | 548 , m_threadState(state) |
| 549 , m_index(index) | 549 , m_index(index) |
| 550 #if ENABLE(GC_PROFILING) | 550 #if ENABLE(GC_PROFILING) |
| 551 , m_cumulativeAllocationSize(0) | 551 , m_cumulativeAllocationSize(0) |
| 552 , m_allocationCount(0) | 552 , m_allocationCount(0) |
| 553 , m_inlineAllocationCount(0) | 553 , m_inlineAllocationCount(0) |
| 554 #endif | 554 #endif |
| 555 { | 555 { |
| 556 } | 556 } |
| 557 | 557 |
| 558 NormalPageHeap::NormalPageHeap(ThreadState* state, int index) | 558 ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index) |
| 559 : BaseHeap(state, index) | 559 : ThreadHeap(state, index) |
| 560 , m_currentAllocationPoint(nullptr) | 560 , m_currentAllocationPoint(nullptr) |
| 561 , m_remainingAllocationSize(0) | 561 , m_remainingAllocationSize(0) |
| 562 , m_lastRemainingAllocationSize(0) | 562 , m_lastRemainingAllocationSize(0) |
| 563 , m_promptlyFreedSize(0) | 563 , m_promptlyFreedSize(0) |
| 564 { | 564 { |
| 565 clearFreeLists(); | 565 clearFreeLists(); |
| 566 } | 566 } |
| 567 | 567 |
| 568 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) | 568 ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index
) |
| 569 : BaseHeap(state, index) | 569 : ThreadHeap(state, index) |
| 570 { | 570 { |
| 571 } | 571 } |
| 572 | 572 |
| 573 BaseHeap::~BaseHeap() | 573 ThreadHeap::~ThreadHeap() |
| 574 { | 574 { |
| 575 ASSERT(!m_firstPage); | 575 ASSERT(!m_firstPage); |
| 576 ASSERT(!m_firstUnsweptPage); | 576 ASSERT(!m_firstUnsweptPage); |
| 577 } | 577 } |
| 578 | 578 |
| 579 void BaseHeap::cleanupPages() | 579 void ThreadHeap::cleanupPages() |
| 580 { | 580 { |
| 581 clearFreeLists(); | 581 clearFreeLists(); |
| 582 | 582 |
| 583 ASSERT(!m_firstUnsweptPage); | 583 ASSERT(!m_firstUnsweptPage); |
| 584 // Add the BaseHeap's pages to the orphanedPagePool. | 584 // Add the ThreadHeap's pages to the orphanedPagePool. |
| 585 for (BasePage* page = m_firstPage; page; page = page->next()) { | 585 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
| 586 Heap::decreaseAllocatedSpace(page->size()); | 586 Heap::decreaseAllocatedSpace(page->size()); |
| 587 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 587 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); |
| 588 } | 588 } |
| 589 m_firstPage = nullptr; | 589 m_firstPage = nullptr; |
| 590 } | 590 } |
| 591 | 591 |
| 592 void NormalPageHeap::updateRemainingAllocationSize() | 592 void ThreadHeapForHeapPage::updateRemainingAllocationSize() |
| 593 { | 593 { |
| 594 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 594 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 595 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); | 595 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); |
| 596 m_lastRemainingAllocationSize = remainingAllocationSize(); | 596 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 597 } | 597 } |
| 598 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 598 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 599 } | 599 } |
| 600 | 600 |
| 601 void NormalPageHeap::setAllocationPoint(Address point, size_t size) | 601 void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size) |
| 602 { | 602 { |
| 603 #if ENABLE(ASSERT) | 603 #if ENABLE(ASSERT) |
| 604 if (point) { | 604 if (point) { |
| 605 ASSERT(size); | 605 ASSERT(size); |
| 606 BasePage* page = pageFromObject(point); | 606 BaseHeapPage* page = pageFromObject(point); |
| 607 ASSERT(!page->isLargeObjectPage()); | 607 ASSERT(!page->isLargeObject()); |
| 608 ASSERT(size <= static_cast<NormalPage*>(page)->payloadSize()); | 608 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); |
| 609 } | 609 } |
| 610 #endif | 610 #endif |
| 611 if (hasCurrentAllocationArea()) { | 611 if (hasCurrentAllocationArea()) { |
| 612 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 612 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
| 613 } | 613 } |
| 614 updateRemainingAllocationSize(); | 614 updateRemainingAllocationSize(); |
| 615 m_currentAllocationPoint = point; | 615 m_currentAllocationPoint = point; |
| 616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
| 617 } | 617 } |
| 618 | 618 |
| 619 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn
dex) | 619 Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t g
cInfoIndex) |
| 620 { | 620 { |
| 621 ASSERT(allocationSize > remainingAllocationSize()); | 621 ASSERT(allocationSize > remainingAllocationSize()); |
| 622 ASSERT(allocationSize >= allocationGranularity); | 622 ASSERT(allocationSize >= allocationGranularity); |
| 623 | 623 |
| 624 #if ENABLE(GC_PROFILING) | 624 #if ENABLE(GC_PROFILING) |
| 625 m_threadState->snapshotFreeListIfNecessary(); | 625 m_threadState->snapshotFreeListIfNecessary(); |
| 626 #endif | 626 #endif |
| 627 | 627 |
| 628 // 1. If this allocation is big enough, allocate a large object. | 628 // 1. If this allocation is big enough, allocate a large object. |
| 629 if (allocationSize >= largeObjectSizeThreshold) | 629 if (allocationSize >= largeObjectSizeThreshold) |
| 630 return static_cast<LargeObjectHeap*>(threadState()->heap(LargeObjectHeap
Index))->allocateLargeObjectPage(allocationSize, gcInfoIndex); | 630 return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeO
bjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex); |
| 631 | 631 |
| 632 // 2. Check if we should trigger a GC. | 632 // 2. Check if we should trigger a GC. |
| 633 updateRemainingAllocationSize(); | 633 updateRemainingAllocationSize(); |
| 634 threadState()->scheduleGCIfNeeded(); | 634 threadState()->scheduleGCIfNeeded(); |
| 635 | 635 |
| 636 // 3. Try to allocate from a free list. | 636 // 3. Try to allocate from a free list. |
| 637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | 637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 638 if (result) | 638 if (result) |
| 639 return result; | 639 return result; |
| 640 | 640 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 660 | 660 |
| 661 // 8. Add a new page to this heap. | 661 // 8. Add a new page to this heap. |
| 662 allocatePage(); | 662 allocatePage(); |
| 663 | 663 |
| 664 // 9. Try to allocate from a free list. This allocation must succeed. | 664 // 9. Try to allocate from a free list. This allocation must succeed. |
| 665 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 665 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 666 RELEASE_ASSERT(result); | 666 RELEASE_ASSERT(result); |
| 667 return result; | 667 return result; |
| 668 } | 668 } |
| 669 | 669 |
| 670 Address NormalPageHeap::allocateFromFreeList(size_t allocationSize, size_t gcInf
oIndex) | 670 Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_
t gcInfoIndex) |
| 671 { | 671 { |
| 672 // Try reusing a block from the largest bin. The underlying reasoning | 672 // Try reusing a block from the largest bin. The underlying reasoning |
| 673 // being that we want to amortize this slow allocation call by carving | 673 // being that we want to amortize this slow allocation call by carving |
| 674 // off as a large a free block as possible in one go; a block that will | 674 // off as a large a free block as possible in one go; a block that will |
| 675 // service this block and let following allocations be serviced quickly | 675 // service this block and let following allocations be serviced quickly |
| 676 // by bump allocation. | 676 // by bump allocation. |
| 677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; | 677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
| 678 int index = m_freeList.m_biggestFreeListIndex; | 678 int index = m_freeList.m_biggestFreeListIndex; |
| 679 for (; index > 0; --index, bucketSize >>= 1) { | 679 for (; index > 0; --index, bucketSize >>= 1) { |
| 680 FreeListEntry* entry = m_freeList.m_freeLists[index]; | 680 FreeListEntry* entry = m_freeList.m_freeLists[index]; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 691 ASSERT(hasCurrentAllocationArea()); | 691 ASSERT(hasCurrentAllocationArea()); |
| 692 ASSERT(remainingAllocationSize() >= allocationSize); | 692 ASSERT(remainingAllocationSize() >= allocationSize); |
| 693 m_freeList.m_biggestFreeListIndex = index; | 693 m_freeList.m_biggestFreeListIndex = index; |
| 694 return allocateObject(allocationSize, gcInfoIndex); | 694 return allocateObject(allocationSize, gcInfoIndex); |
| 695 } | 695 } |
| 696 } | 696 } |
| 697 m_freeList.m_biggestFreeListIndex = index; | 697 m_freeList.m_biggestFreeListIndex = index; |
| 698 return nullptr; | 698 return nullptr; |
| 699 } | 699 } |
| 700 | 700 |
| 701 void BaseHeap::prepareForSweep() | 701 void ThreadHeap::prepareForSweep() |
| 702 { | 702 { |
| 703 ASSERT(!threadState()->isInGC()); | 703 ASSERT(!threadState()->isInGC()); |
| 704 ASSERT(!m_firstUnsweptPage); | 704 ASSERT(!m_firstUnsweptPage); |
| 705 | 705 |
| 706 // Move all pages to a list of unswept pages. | 706 // Move all pages to a list of unswept pages. |
| 707 m_firstUnsweptPage = m_firstPage; | 707 m_firstUnsweptPage = m_firstPage; |
| 708 m_firstPage = nullptr; | 708 m_firstPage = nullptr; |
| 709 } | 709 } |
| 710 | 710 |
| 711 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) | 711 Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) |
| 712 { | 712 { |
| 713 // If there are no pages to be swept, return immediately. | 713 // If there are no pages to be swept, return immediately. |
| 714 if (!m_firstUnsweptPage) | 714 if (!m_firstUnsweptPage) |
| 715 return nullptr; | 715 return nullptr; |
| 716 | 716 |
| 717 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 717 RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
| 718 | 718 |
| 719 // lazySweepPages() can be called recursively if finalizers invoked in | 719 // lazySweepPages() can be called recursively if finalizers invoked in |
| 720 // page->sweep() allocate memory and the allocation triggers | 720 // page->sweep() allocate memory and the allocation triggers |
| 721 // lazySweepPages(). This check prevents the sweeping from being executed | 721 // lazySweepPages(). This check prevents the sweeping from being executed |
| 722 // recursively. | 722 // recursively. |
| 723 if (threadState()->sweepForbidden()) | 723 if (threadState()->sweepForbidden()) |
| 724 return nullptr; | 724 return nullptr; |
| 725 | 725 |
| 726 TRACE_EVENT0("blink_gc", "BaseHeap::lazySweepPages"); | 726 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); |
| 727 ThreadState::SweepForbiddenScope scope(threadState()); | 727 ThreadState::SweepForbiddenScope scope(threadState()); |
| 728 | 728 |
| 729 if (threadState()->isMainThread()) | 729 if (threadState()->isMainThread()) |
| 730 ScriptForbiddenScope::enter(); | 730 ScriptForbiddenScope::enter(); |
| 731 | 731 |
| 732 Address result = lazySweepPages(allocationSize, gcInfoIndex); | 732 Address result = lazySweepPages(allocationSize, gcInfoIndex); |
| 733 | 733 |
| 734 if (threadState()->isMainThread()) | 734 if (threadState()->isMainThread()) |
| 735 ScriptForbiddenScope::exit(); | 735 ScriptForbiddenScope::exit(); |
| 736 return result; | 736 return result; |
| 737 } | 737 } |
| 738 | 738 |
| 739 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex
) | 739 Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcIn
foIndex) |
| 740 { | 740 { |
| 741 ASSERT(!hasCurrentAllocationArea()); | 741 ASSERT(!hasCurrentAllocationArea()); |
| 742 Address result = nullptr; | 742 Address result = nullptr; |
| 743 while (m_firstUnsweptPage) { | 743 while (m_firstUnsweptPage) { |
| 744 BasePage* page = m_firstUnsweptPage; | 744 BaseHeapPage* page = m_firstUnsweptPage; |
| 745 if (page->isEmpty()) { | 745 if (page->isEmpty()) { |
| 746 page->unlink(&m_firstUnsweptPage); | 746 page->unlink(&m_firstUnsweptPage); |
| 747 page->removeFromHeap(); | 747 page->removeFromHeap(); |
| 748 } else { | 748 } else { |
| 749 // Sweep a page and move the page from m_firstUnsweptPages to | 749 // Sweep a page and move the page from m_firstUnsweptPages to |
| 750 // m_firstPages. | 750 // m_firstPages. |
| 751 page->sweep(); | 751 page->sweep(); |
| 752 page->unlink(&m_firstUnsweptPage); | 752 page->unlink(&m_firstUnsweptPage); |
| 753 page->link(&m_firstPage); | 753 page->link(&m_firstPage); |
| 754 page->markAsSwept(); | 754 page->markAsSwept(); |
| 755 | 755 |
| 756 // For NormalPage, stop lazy sweeping once we find a slot to | 756 // For HeapPage, stop lazy sweeping once we find a slot to |
| 757 // allocate a new object. | 757 // allocate a new object. |
| 758 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 758 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 759 if (result) | 759 if (result) |
| 760 break; | 760 break; |
| 761 } | 761 } |
| 762 } | 762 } |
| 763 return result; | 763 return result; |
| 764 } | 764 } |
| 765 | 765 |
| 766 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde
x) | 766 Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t g
cInfoIndex) |
| 767 { | 767 { |
| 768 Address result = nullptr; | 768 Address result = nullptr; |
| 769 size_t sweptSize = 0; | 769 size_t sweptSize = 0; |
| 770 while (m_firstUnsweptPage) { | 770 while (m_firstUnsweptPage) { |
| 771 BasePage* page = m_firstUnsweptPage; | 771 BaseHeapPage* page = m_firstUnsweptPage; |
| 772 if (page->isEmpty()) { | 772 if (page->isEmpty()) { |
| 773 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + si
zeof(HeapObjectHeader); | 773 sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof
(HeapObjectHeader); |
| 774 page->unlink(&m_firstUnsweptPage); | 774 page->unlink(&m_firstUnsweptPage); |
| 775 page->removeFromHeap(); | 775 page->removeFromHeap(); |
| 776 // For LargeObjectPage, stop lazy sweeping once we have swept | 776 // For LargeObject, stop lazy sweeping once we have swept |
| 777 // more than allocationSize bytes. | 777 // more than allocationSize bytes. |
| 778 if (sweptSize >= allocationSize) { | 778 if (sweptSize >= allocationSize) { |
| 779 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 779 result = doAllocateLargeObject(allocationSize, gcInfoIndex); |
| 780 ASSERT(result); | 780 ASSERT(result); |
| 781 break; | 781 break; |
| 782 } | 782 } |
| 783 } else { | 783 } else { |
| 784 // Sweep a page and move the page from m_firstUnsweptPages to | 784 // Sweep a page and move the page from m_firstUnsweptPages to |
| 785 // m_firstPages. | 785 // m_firstPages. |
| 786 page->sweep(); | 786 page->sweep(); |
| 787 page->unlink(&m_firstUnsweptPage); | 787 page->unlink(&m_firstUnsweptPage); |
| 788 page->link(&m_firstPage); | 788 page->link(&m_firstPage); |
| 789 page->markAsSwept(); | 789 page->markAsSwept(); |
| 790 } | 790 } |
| 791 } | 791 } |
| 792 return result; | 792 return result; |
| 793 } | 793 } |
| 794 | 794 |
| 795 void BaseHeap::completeSweep() | 795 void ThreadHeap::completeSweep() |
| 796 { | 796 { |
| 797 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 797 RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
| 798 ASSERT(threadState()->sweepForbidden()); | 798 ASSERT(threadState()->sweepForbidden()); |
| 799 | 799 |
| 800 if (threadState()->isMainThread()) | 800 if (threadState()->isMainThread()) |
| 801 ScriptForbiddenScope::enter(); | 801 ScriptForbiddenScope::enter(); |
| 802 | 802 |
| 803 while (m_firstUnsweptPage) { | 803 while (m_firstUnsweptPage) { |
| 804 BasePage* page = m_firstUnsweptPage; | 804 BaseHeapPage* page = m_firstUnsweptPage; |
| 805 if (page->isEmpty()) { | 805 if (page->isEmpty()) { |
| 806 page->unlink(&m_firstUnsweptPage); | 806 page->unlink(&m_firstUnsweptPage); |
| 807 page->removeFromHeap(); | 807 page->removeFromHeap(); |
| 808 } else { | 808 } else { |
| 809 // Sweep a page and move the page from m_firstUnsweptPages to | 809 // Sweep a page and move the page from m_firstUnsweptPages to |
| 810 // m_firstPages. | 810 // m_firstPages. |
| 811 page->sweep(); | 811 page->sweep(); |
| 812 page->unlink(&m_firstUnsweptPage); | 812 page->unlink(&m_firstUnsweptPage); |
| 813 page->link(&m_firstPage); | 813 page->link(&m_firstPage); |
| 814 page->markAsSwept(); | 814 page->markAsSwept(); |
| 815 } | 815 } |
| 816 } | 816 } |
| 817 | 817 |
| 818 if (threadState()->isMainThread()) | 818 if (threadState()->isMainThread()) |
| 819 ScriptForbiddenScope::exit(); | 819 ScriptForbiddenScope::exit(); |
| 820 } | 820 } |
| 821 | 821 |
| 822 #if ENABLE(ASSERT) | 822 #if ENABLE(ASSERT) |
| 823 BasePage* BaseHeap::findPageFromAddress(Address address) | 823 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) |
| 824 { | 824 { |
| 825 for (BasePage* page = m_firstPage; page; page = page->next()) { | 825 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
| 826 if (page->contains(address)) | 826 if (page->contains(address)) |
| 827 return page; | 827 return page; |
| 828 } | 828 } |
| 829 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | 829 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
| 830 if (page->contains(address)) | 830 if (page->contains(address)) |
| 831 return page; | 831 return page; |
| 832 } | 832 } |
| 833 return nullptr; | 833 return nullptr; |
| 834 } | 834 } |
| 835 #endif | 835 #endif |
| 836 | 836 |
| 837 #if ENABLE(GC_PROFILING) | 837 #if ENABLE(GC_PROFILING) |
| 838 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | 838 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
| 839 void BaseHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 839 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
| 840 { | 840 { |
| 841 ASSERT(isConsistentForSweeping()); | 841 ASSERT(isConsistentForSweeping()); |
| 842 size_t previousPageCount = info->pageCount; | 842 size_t previousPageCount = info->pageCount; |
| 843 | 843 |
| 844 json->beginArray("pages"); | 844 json->beginArray("pages"); |
| 845 for (BasePage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { | 845 for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pa
geCount) { |
| 846 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. | 846 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. |
| 847 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { | 847 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { |
| 848 json->beginArray(); | 848 json->beginArray(); |
| 849 json->pushInteger(reinterpret_cast<intptr_t>(page)); | 849 json->pushInteger(reinterpret_cast<intptr_t>(page)); |
| 850 page->snapshot(json, info); | 850 page->snapshot(json, info); |
| 851 json->endArray(); | 851 json->endArray(); |
| 852 } else { | 852 } else { |
| 853 page->snapshot(0, info); | 853 page->snapshot(0, info); |
| 854 } | 854 } |
| 855 } | 855 } |
| 856 json->endArray(); | 856 json->endArray(); |
| 857 | 857 |
| 858 json->setInteger("pageCount", info->pageCount - previousPageCount); | 858 json->setInteger("pageCount", info->pageCount - previousPageCount); |
| 859 } | 859 } |
| 860 | 860 |
| 861 void BaseHeap::incrementMarkedObjectsAge() | 861 void ThreadHeap::incrementMarkedObjectsAge() |
| 862 { | 862 { |
| 863 for (NormalPage* page = m_firstPage; page; page = page->next()) | 863 for (HeapPage* page = m_firstPage; page; page = page->next()) |
| 864 page->incrementMarkedObjectsAge(); | 864 page->incrementMarkedObjectsAge(); |
| 865 for (LargeObjectPage* largeObject = m_firstLargeObjectPage; largeObject; lar
geObject = largeObject->next()) | 865 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
| 866 largeObject->incrementMarkedObjectsAge(); | 866 largeObject->incrementMarkedObjectsAge(); |
| 867 } | 867 } |
| 868 #endif | 868 #endif |
| 869 | 869 |
| 870 void FreeList::addToFreeList(Address address, size_t size) | 870 void FreeList::addToFreeList(Address address, size_t size) |
| 871 { | 871 { |
| 872 ASSERT(size < blinkPagePayloadSize()); | 872 ASSERT(size < blinkPagePayloadSize()); |
| 873 // The free list entries are only pointer aligned (but when we allocate | 873 // The free list entries are only pointer aligned (but when we allocate |
| 874 // from them we are 8 byte aligned due to the header size). | 874 // from them we are 8 byte aligned due to the header size). |
| 875 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
allocationMask)); | 875 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
allocationMask)); |
| 876 ASSERT(!(size & allocationMask)); | 876 ASSERT(!(size & allocationMask)); |
| 877 ASAN_POISON_MEMORY_REGION(address, size); | 877 ASAN_POISON_MEMORY_REGION(address, size); |
| 878 FreeListEntry* entry; | 878 FreeListEntry* entry; |
| 879 if (size < sizeof(*entry)) { | 879 if (size < sizeof(*entry)) { |
| 880 // Create a dummy header with only a size and freelist bit set. | 880 // Create a dummy header with only a size and freelist bit set. |
| 881 ASSERT(size >= sizeof(HeapObjectHeader)); | 881 ASSERT(size >= sizeof(HeapObjectHeader)); |
| 882 // Free list encode the size to mark the lost memory as freelist memory. | 882 // Free list encode the size to mark the lost memory as freelist memory. |
| 883 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead
er); | 883 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead
er); |
| 884 // This memory gets lost. Sweeping can reclaim it. | 884 // This memory gets lost. Sweeping can reclaim it. |
| 885 return; | 885 return; |
| 886 } | 886 } |
| 887 entry = new (NotNull, address) FreeListEntry(size); | 887 entry = new (NotNull, address) FreeListEntry(size); |
| 888 #if defined(ADDRESS_SANITIZER) | 888 #if defined(ADDRESS_SANITIZER) |
| 889 BasePage* page = pageFromObject(address); | 889 BaseHeapPage* page = pageFromObject(address); |
| 890 ASSERT(!page->isLargeObjectPage()); | 890 ASSERT(!page->isLargeObject()); |
| 891 // For ASan we don't add the entry to the free lists until the | 891 // For ASan we don't add the entry to the free lists until the |
| 892 // asanDeferMemoryReuseCount reaches zero. However we always add entire | 892 // asanDeferMemoryReuseCount reaches zero. However we always add entire |
| 893 // pages to ensure that adding a new page will increase the allocation | 893 // pages to ensure that adding a new page will increase the allocation |
| 894 // space. | 894 // space. |
| 895 if (static_cast<NormalPage*>(page)->payloadSize() != size && !entry->shouldA
ddToFreeList()) | 895 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd
ToFreeList()) |
| 896 return; | 896 return; |
| 897 #endif | 897 #endif |
| 898 int index = bucketIndexForSize(size); | 898 int index = bucketIndexForSize(size); |
| 899 entry->link(&m_freeLists[index]); | 899 entry->link(&m_freeLists[index]); |
| 900 if (index > m_biggestFreeListIndex) | 900 if (index > m_biggestFreeListIndex) |
| 901 m_biggestFreeListIndex = index; | 901 m_biggestFreeListIndex = index; |
| 902 } | 902 } |
| 903 | 903 |
| 904 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize) | 904 bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSiz
e) |
| 905 { | 905 { |
| 906 // It's possible that Vector requests a smaller expanded size because | 906 // It's possible that Vector requests a smaller expanded size because |
| 907 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 907 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
| 908 // size. | 908 // size. |
| 909 if (header->payloadSize() >= newSize) | 909 if (header->payloadSize() >= newSize) |
| 910 return true; | 910 return true; |
| 911 size_t allocationSize = allocationSizeFromSize(newSize); | 911 size_t allocationSize = allocationSizeFromSize(newSize); |
| 912 ASSERT(allocationSize > header->size()); | 912 ASSERT(allocationSize > header->size()); |
| 913 size_t expandSize = allocationSize - header->size(); | 913 size_t expandSize = allocationSize - header->size(); |
| 914 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema
iningAllocationSize) { | 914 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema
iningAllocationSize) { |
| 915 m_currentAllocationPoint += expandSize; | 915 m_currentAllocationPoint += expandSize; |
| 916 m_remainingAllocationSize -= expandSize; | 916 m_remainingAllocationSize -= expandSize; |
| 917 | 917 |
| 918 // Unpoison the memory used for the object (payload). | 918 // Unpoison the memory used for the object (payload). |
| 919 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); | 919 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); |
| 920 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); | 920 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); |
| 921 header->setSize(allocationSize); | 921 header->setSize(allocationSize); |
| 922 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 922 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); |
| 923 return true; | 923 return true; |
| 924 } | 924 } |
| 925 return false; | 925 return false; |
| 926 } | 926 } |
| 927 | 927 |
| 928 void NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) | 928 void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSiz
e) |
| 929 { | 929 { |
| 930 ASSERT(header->payloadSize() > newSize); | 930 ASSERT(header->payloadSize() > newSize); |
| 931 size_t allocationSize = allocationSizeFromSize(newSize); | 931 size_t allocationSize = allocationSizeFromSize(newSize); |
| 932 ASSERT(header->size() > allocationSize); | 932 ASSERT(header->size() > allocationSize); |
| 933 size_t shrinkSize = header->size() - allocationSize; | 933 size_t shrinkSize = header->size() - allocationSize; |
| 934 if (header->payloadEnd() == m_currentAllocationPoint) { | 934 if (header->payloadEnd() == m_currentAllocationPoint) { |
| 935 m_currentAllocationPoint -= shrinkSize; | 935 m_currentAllocationPoint -= shrinkSize; |
| 936 m_remainingAllocationSize += shrinkSize; | 936 m_remainingAllocationSize += shrinkSize; |
| 937 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); | 937 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); |
| 938 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); | 938 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); |
| 939 header->setSize(allocationSize); | 939 header->setSize(allocationSize); |
| 940 } else { | 940 } else { |
| 941 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 941 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); |
| 942 ASSERT(header->gcInfoIndex() > 0); | 942 ASSERT(header->gcInfoIndex() > 0); |
| 943 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr
inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); | 943 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr
inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); |
| 944 freedHeader->markPromptlyFreed(); | 944 freedHeader->markPromptlyFreed(); |
| 945 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom
Address(reinterpret_cast<Address>(header))); | 945 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom
Address(reinterpret_cast<Address>(header))); |
| 946 m_promptlyFreedSize += shrinkSize; | 946 m_promptlyFreedSize += shrinkSize; |
| 947 header->setSize(allocationSize); | 947 header->setSize(allocationSize); |
| 948 } | 948 } |
| 949 } | 949 } |
| 950 | 950 |
| 951 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) | 951 void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header) |
| 952 { | 952 { |
| 953 ASSERT(!threadState()->sweepForbidden()); | 953 ASSERT(!threadState()->sweepForbidden()); |
| 954 header->checkHeader(); | 954 header->checkHeader(); |
| 955 Address address = reinterpret_cast<Address>(header); | 955 Address address = reinterpret_cast<Address>(header); |
| 956 Address payload = header->payload(); | 956 Address payload = header->payload(); |
| 957 size_t size = header->size(); | 957 size_t size = header->size(); |
| 958 size_t payloadSize = header->payloadSize(); | 958 size_t payloadSize = header->payloadSize(); |
| 959 ASSERT(size > 0); | 959 ASSERT(size > 0); |
| 960 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 960 ASSERT(pageFromObject(address) == findPageFromAddress(address)); |
| 961 | 961 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 973 ASAN_POISON_MEMORY_REGION(address, size); | 973 ASAN_POISON_MEMORY_REGION(address, size); |
| 974 return; | 974 return; |
| 975 } | 975 } |
| 976 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); | 976 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); |
| 977 header->markPromptlyFreed(); | 977 header->markPromptlyFreed(); |
| 978 } | 978 } |
| 979 | 979 |
| 980 m_promptlyFreedSize += size; | 980 m_promptlyFreedSize += size; |
| 981 } | 981 } |
| 982 | 982 |
| 983 bool NormalPageHeap::coalesce() | 983 bool ThreadHeapForHeapPage::coalesce() |
| 984 { | 984 { |
| 985 // Don't coalesce heaps if there are not enough promptly freed entries | 985 // Don't coalesce heaps if there are not enough promptly freed entries |
| 986 // to be coalesced. | 986 // to be coalesced. |
| 987 // | 987 // |
| 988 // FIXME: This threshold is determined just to optimize blink_perf | 988 // FIXME: This threshold is determined just to optimize blink_perf |
| 989 // benchmarks. Coalescing is very sensitive to the threashold and | 989 // benchmarks. Coalescing is very sensitive to the threashold and |
| 990 // we need further investigations on the coalescing scheme. | 990 // we need further investigations on the coalescing scheme. |
| 991 if (m_promptlyFreedSize < 1024 * 1024) | 991 if (m_promptlyFreedSize < 1024 * 1024) |
| 992 return false; | 992 return false; |
| 993 | 993 |
| 994 if (threadState()->sweepForbidden()) | 994 if (threadState()->sweepForbidden()) |
| 995 return false; | 995 return false; |
| 996 | 996 |
| 997 ASSERT(!hasCurrentAllocationArea()); | 997 ASSERT(!hasCurrentAllocationArea()); |
| 998 TRACE_EVENT0("blink_gc", "BaseHeap::coalesce"); | 998 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce"); |
| 999 | 999 |
| 1000 // Rebuild free lists. | 1000 // Rebuild free lists. |
| 1001 m_freeList.clear(); | 1001 m_freeList.clear(); |
| 1002 size_t freedSize = 0; | 1002 size_t freedSize = 0; |
| 1003 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page =
static_cast<NormalPage*>(page->next())) { | 1003 for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = stat
ic_cast<HeapPage*>(page->next())) { |
| 1004 page->clearObjectStartBitMap(); | 1004 page->clearObjectStartBitMap(); |
| 1005 Address startOfGap = page->payload(); | 1005 Address startOfGap = page->payload(); |
| 1006 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn
d(); ) { | 1006 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn
d(); ) { |
| 1007 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade
rAddress); | 1007 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade
rAddress); |
| 1008 size_t size = header->size(); | 1008 size_t size = header->size(); |
| 1009 ASSERT(size > 0); | 1009 ASSERT(size > 0); |
| 1010 ASSERT(size < blinkPagePayloadSize()); | 1010 ASSERT(size < blinkPagePayloadSize()); |
| 1011 | 1011 |
| 1012 if (header->isPromptlyFreed()) { | 1012 if (header->isPromptlyFreed()) { |
| 1013 ASSERT(size >= sizeof(HeapObjectHeader)); | 1013 ASSERT(size >= sizeof(HeapObjectHeader)); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1034 | 1034 |
| 1035 if (startOfGap != page->payloadEnd()) | 1035 if (startOfGap != page->payloadEnd()) |
| 1036 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 1036 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
| 1037 } | 1037 } |
| 1038 Heap::decreaseAllocatedObjectSize(freedSize); | 1038 Heap::decreaseAllocatedObjectSize(freedSize); |
| 1039 ASSERT(m_promptlyFreedSize == freedSize); | 1039 ASSERT(m_promptlyFreedSize == freedSize); |
| 1040 m_promptlyFreedSize = 0; | 1040 m_promptlyFreedSize = 0; |
| 1041 return true; | 1041 return true; |
| 1042 } | 1042 } |
| 1043 | 1043 |
| 1044 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g
cInfoIndex) | 1044 Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, siz
e_t gcInfoIndex) |
| 1045 { | 1045 { |
| 1046 // Caller already added space for object header and rounded up to allocation | 1046 // Caller already added space for object header and rounded up to allocation |
| 1047 // alignment | 1047 // alignment |
| 1048 ASSERT(!(allocationSize & allocationMask)); | 1048 ASSERT(!(allocationSize & allocationMask)); |
| 1049 | 1049 |
| 1050 // 1. Check if we should trigger a GC. | 1050 // 1. Check if we should trigger a GC. |
| 1051 threadState()->scheduleGCIfNeeded(); | 1051 threadState()->scheduleGCIfNeeded(); |
| 1052 | 1052 |
| 1053 // 2. Try to sweep large objects more than allocationSize bytes | 1053 // 2. Try to sweep large objects more than allocationSize bytes |
| 1054 // before allocating a new large object. | 1054 // before allocating a new large object. |
| 1055 Address result = lazySweep(allocationSize, gcInfoIndex); | 1055 Address result = lazySweep(allocationSize, gcInfoIndex); |
| 1056 if (result) | 1056 if (result) |
| 1057 return result; | 1057 return result; |
| 1058 | 1058 |
| 1059 // 3. If we have failed in sweeping allocationSize bytes, | 1059 // 3. If we have failed in sweeping allocationSize bytes, |
| 1060 // we complete sweeping before allocating this large object. | 1060 // we complete sweeping before allocating this large object. |
| 1061 threadState()->completeSweep(); | 1061 threadState()->completeSweep(); |
| 1062 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 1062 return doAllocateLargeObject(allocationSize, gcInfoIndex); |
| 1063 } | 1063 } |
| 1064 | 1064 |
| 1065 Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t
gcInfoIndex) | 1065 Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, s
ize_t gcInfoIndex) |
| 1066 { | 1066 { |
| 1067 size_t largeObjectSize = sizeof(LargeObjectPage) + headerPadding() + allocat
ionSize; | 1067 size_t largeObjectSize = sizeof(LargeObject) + headerPadding() + allocationS
ize; |
| 1068 // If ASan is supported we add allocationGranularity bytes to the allocated | 1068 // If ASan is supported we add allocationGranularity bytes to the allocated |
| 1069 // space and poison that to detect overflows | 1069 // space and poison that to detect overflows |
| 1070 #if defined(ADDRESS_SANITIZER) | 1070 #if defined(ADDRESS_SANITIZER) |
| 1071 largeObjectSize += allocationGranularity; | 1071 largeObjectSize += allocationGranularity; |
| 1072 #endif | 1072 #endif |
| 1073 | 1073 |
| 1074 threadState()->shouldFlushHeapDoesNotContainCache(); | 1074 threadState()->shouldFlushHeapDoesNotContainCache(); |
| 1075 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | 1075 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); |
| 1076 threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region()); | 1076 threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
| 1077 Address largeObjectAddress = pageMemory->writableStart(); | 1077 Address largeObjectAddress = pageMemory->writableStart(); |
| 1078 Address headerAddress = largeObjectAddress + sizeof(LargeObjectPage) + heade
rPadding(); | 1078 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPad
ding(); |
| 1079 #if ENABLE(ASSERT) | 1079 #if ENABLE(ASSERT) |
| 1080 // Verify that the allocated PageMemory is expectedly zeroed. | 1080 // Verify that the allocated PageMemory is expectedly zeroed. |
| 1081 for (size_t i = 0; i < largeObjectSize; ++i) | 1081 for (size_t i = 0; i < largeObjectSize; ++i) |
| 1082 ASSERT(!headerAddress[i]); | 1082 ASSERT(!headerAddress[i]); |
| 1083 #endif | 1083 #endif |
| 1084 ASSERT(gcInfoIndex > 0); | 1084 ASSERT(gcInfoIndex > 0); |
| 1085 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | 1085 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); |
| 1086 Address result = headerAddress + sizeof(*header); | 1086 Address result = headerAddress + sizeof(*header); |
| 1087 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1087 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1088 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); | 1088 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory,
this, allocationSize); |
| 1089 header->checkHeader(); | 1089 header->checkHeader(); |
| 1090 | 1090 |
| 1091 // Poison the object header and allocationGranularity bytes after the object | 1091 // Poison the object header and allocationGranularity bytes after the object |
| 1092 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1092 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 1093 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 1093 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
| 1094 | 1094 |
| 1095 largeObject->link(&m_firstPage); | 1095 largeObject->link(&m_firstPage); |
| 1096 | 1096 |
| 1097 Heap::increaseAllocatedSpace(largeObject->size()); | 1097 Heap::increaseAllocatedSpace(largeObject->size()); |
| 1098 Heap::increaseAllocatedObjectSize(largeObject->size()); | 1098 Heap::increaseAllocatedObjectSize(largeObject->size()); |
| 1099 return result; | 1099 return result; |
| 1100 } | 1100 } |
| 1101 | 1101 |
| 1102 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) | 1102 void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object) |
| 1103 { | 1103 { |
| 1104 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | 1104 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); |
| 1105 Heap::decreaseAllocatedSpace(object->size()); | 1105 Heap::decreaseAllocatedSpace(object->size()); |
| 1106 | 1106 |
| 1107 // Unpoison the object header and allocationGranularity bytes after the | 1107 // Unpoison the object header and allocationGranularity bytes after the |
| 1108 // object before freeing. | 1108 // object before freeing. |
| 1109 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | 1109 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); |
| 1110 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 1110 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
| 1111 | 1111 |
| 1112 if (object->terminating()) { | 1112 if (object->terminating()) { |
| 1113 ASSERT(ThreadState::current()->isTerminating()); | 1113 ASSERT(ThreadState::current()->isTerminating()); |
| 1114 // The thread is shutting down and this page is being removed as a part | 1114 // The thread is shutting down and this page is being removed as a part |
| 1115 // of the thread local GC. In that case the object could be traced in | 1115 // of the thread local GC. In that case the object could be traced in |
| 1116 // the next global GC if there is a dangling pointer from a live thread | 1116 // the next global GC if there is a dangling pointer from a live thread |
| 1117 // heap to this dead thread heap. To guard against this, we put the | 1117 // heap to this dead thread heap. To guard against this, we put the |
| 1118 // page into the orphaned page pool and zap the page memory. This | 1118 // page into the orphaned page pool and zap the page memory. This |
| 1119 // ensures that tracing the dangling pointer in the next global GC just | 1119 // ensures that tracing the dangling pointer in the next global GC just |
| 1120 // crashes instead of causing use-after-frees. After the next global | 1120 // crashes instead of causing use-after-frees. After the next global |
| 1121 // GC, the orphaned pages are removed. | 1121 // GC, the orphaned pages are removed. |
| 1122 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); | 1122 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); |
| 1123 } else { | 1123 } else { |
| 1124 ASSERT(!ThreadState::current()->isTerminating()); | 1124 ASSERT(!ThreadState::current()->isTerminating()); |
| 1125 PageMemory* memory = object->storage(); | 1125 PageMemory* memory = object->storage(); |
| 1126 object->~LargeObjectPage(); | 1126 object->~LargeObject(); |
| 1127 delete memory; | 1127 delete memory; |
| 1128 } | 1128 } |
| 1129 } | 1129 } |
| 1130 | 1130 |
| 1131 template<typename DataType> | 1131 template<typename DataType> |
| 1132 PagePool<DataType>::PagePool() | 1132 PagePool<DataType>::PagePool() |
| 1133 { | 1133 { |
| 1134 for (int i = 0; i < NumberOfHeaps; ++i) { | 1134 for (int i = 0; i < NumberOfHeaps; ++i) { |
| 1135 m_pool[i] = nullptr; | 1135 m_pool[i] = nullptr; |
| 1136 } | 1136 } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1170 delete entry; | 1170 delete entry; |
| 1171 if (memory->commit()) | 1171 if (memory->commit()) |
| 1172 return memory; | 1172 return memory; |
| 1173 | 1173 |
| 1174 // We got some memory, but failed to commit it, try again. | 1174 // We got some memory, but failed to commit it, try again. |
| 1175 delete memory; | 1175 delete memory; |
| 1176 } | 1176 } |
| 1177 return nullptr; | 1177 return nullptr; |
| 1178 } | 1178 } |
| 1179 | 1179 |
| 1180 BasePage::BasePage(PageMemory* storage, BaseHeap* heap) | 1180 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) |
| 1181 : m_storage(storage) | 1181 : m_storage(storage) |
| 1182 , m_heap(heap) | 1182 , m_heap(heap) |
| 1183 , m_next(nullptr) | 1183 , m_next(nullptr) |
| 1184 , m_terminating(false) | 1184 , m_terminating(false) |
| 1185 , m_swept(true) | 1185 , m_swept(true) |
| 1186 { | 1186 { |
| 1187 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1187 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 1188 } | 1188 } |
| 1189 | 1189 |
| 1190 void BasePage::markOrphaned() | 1190 void BaseHeapPage::markOrphaned() |
| 1191 { | 1191 { |
| 1192 m_heap = nullptr; | 1192 m_heap = nullptr; |
| 1193 m_terminating = false; | 1193 m_terminating = false; |
| 1194 // Since we zap the page payload for orphaned pages we need to mark it as | 1194 // Since we zap the page payload for orphaned pages we need to mark it as |
| 1195 // unused so a conservative pointer won't interpret the object headers. | 1195 // unused so a conservative pointer won't interpret the object headers. |
| 1196 storage()->markUnused(); | 1196 storage()->markUnused(); |
| 1197 } | 1197 } |
| 1198 | 1198 |
| 1199 OrphanedPagePool::~OrphanedPagePool() | 1199 OrphanedPagePool::~OrphanedPagePool() |
| 1200 { | 1200 { |
| 1201 for (int index = 0; index < NumberOfHeaps; ++index) { | 1201 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 1202 while (PoolEntry* entry = m_pool[index]) { | 1202 while (PoolEntry* entry = m_pool[index]) { |
| 1203 m_pool[index] = entry->next; | 1203 m_pool[index] = entry->next; |
| 1204 BasePage* page = entry->data; | 1204 BaseHeapPage* page = entry->data; |
| 1205 delete entry; | 1205 delete entry; |
| 1206 PageMemory* memory = page->storage(); | 1206 PageMemory* memory = page->storage(); |
| 1207 ASSERT(memory); | 1207 ASSERT(memory); |
| 1208 page->~BasePage(); | 1208 page->~BaseHeapPage(); |
| 1209 delete memory; | 1209 delete memory; |
| 1210 } | 1210 } |
| 1211 } | 1211 } |
| 1212 } | 1212 } |
| 1213 | 1213 |
| 1214 void OrphanedPagePool::addOrphanedPage(int index, BasePage* page) | 1214 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) |
| 1215 { | 1215 { |
| 1216 page->markOrphaned(); | 1216 page->markOrphaned(); |
| 1217 PoolEntry* entry = new PoolEntry(page, m_pool[index]); | 1217 PoolEntry* entry = new PoolEntry(page, m_pool[index]); |
| 1218 m_pool[index] = entry; | 1218 m_pool[index] = entry; |
| 1219 } | 1219 } |
| 1220 | 1220 |
| 1221 NO_SANITIZE_ADDRESS | 1221 NO_SANITIZE_ADDRESS |
| 1222 void OrphanedPagePool::decommitOrphanedPages() | 1222 void OrphanedPagePool::decommitOrphanedPages() |
| 1223 { | 1223 { |
| 1224 ASSERT(ThreadState::current()->isInGC()); | 1224 ASSERT(ThreadState::current()->isInGC()); |
| 1225 | 1225 |
| 1226 #if ENABLE(ASSERT) | 1226 #if ENABLE(ASSERT) |
| 1227 // No locking needed as all threads are at safepoints at this point in time. | 1227 // No locking needed as all threads are at safepoints at this point in time. |
| 1228 for (ThreadState* state : ThreadState::attachedThreads()) | 1228 for (ThreadState* state : ThreadState::attachedThreads()) |
| 1229 ASSERT(state->isAtSafePoint()); | 1229 ASSERT(state->isAtSafePoint()); |
| 1230 #endif | 1230 #endif |
| 1231 | 1231 |
| 1232 for (int index = 0; index < NumberOfHeaps; ++index) { | 1232 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 1233 PoolEntry* entry = m_pool[index]; | 1233 PoolEntry* entry = m_pool[index]; |
| 1234 PoolEntry** prevNext = &m_pool[index]; | 1234 PoolEntry** prevNext = &m_pool[index]; |
| 1235 while (entry) { | 1235 while (entry) { |
| 1236 BasePage* page = entry->data; | 1236 BaseHeapPage* page = entry->data; |
| 1237 // Check if we should reuse the memory or just free it. | 1237 // Check if we should reuse the memory or just free it. |
| 1238 // Large object memory is not reused but freed, normal blink heap | 1238 // Large object memory is not reused but freed, normal blink heap |
| 1239 // pages are reused. | 1239 // pages are reused. |
| 1240 // NOTE: We call the destructor before freeing or adding to the | 1240 // NOTE: We call the destructor before freeing or adding to the |
| 1241 // free page pool. | 1241 // free page pool. |
| 1242 PageMemory* memory = page->storage(); | 1242 PageMemory* memory = page->storage(); |
| 1243 if (page->isLargeObjectPage()) { | 1243 if (page->isLargeObject()) { |
| 1244 page->~BasePage(); | 1244 page->~BaseHeapPage(); |
| 1245 delete memory; | 1245 delete memory; |
| 1246 } else { | 1246 } else { |
| 1247 page->~BasePage(); | 1247 page->~BaseHeapPage(); |
| 1248 clearMemory(memory); | 1248 clearMemory(memory); |
| 1249 Heap::freePagePool()->addFreePage(index, memory); | 1249 Heap::freePagePool()->addFreePage(index, memory); |
| 1250 } | 1250 } |
| 1251 | 1251 |
| 1252 PoolEntry* deadEntry = entry; | 1252 PoolEntry* deadEntry = entry; |
| 1253 entry = entry->next; | 1253 entry = entry->next; |
| 1254 *prevNext = entry; | 1254 *prevNext = entry; |
| 1255 delete deadEntry; | 1255 delete deadEntry; |
| 1256 } | 1256 } |
| 1257 } | 1257 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1270 #else | 1270 #else |
| 1271 memset(memory->writableStart(), 0, blinkPagePayloadSize()); | 1271 memset(memory->writableStart(), 0, blinkPagePayloadSize()); |
| 1272 #endif | 1272 #endif |
| 1273 } | 1273 } |
| 1274 | 1274 |
| 1275 #if ENABLE(ASSERT) | 1275 #if ENABLE(ASSERT) |
| 1276 bool OrphanedPagePool::contains(void* object) | 1276 bool OrphanedPagePool::contains(void* object) |
| 1277 { | 1277 { |
| 1278 for (int index = 0; index < NumberOfHeaps; ++index) { | 1278 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 1279 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { | 1279 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { |
| 1280 BasePage* page = entry->data; | 1280 BaseHeapPage* page = entry->data; |
| 1281 if (page->contains(reinterpret_cast<Address>(object))) | 1281 if (page->contains(reinterpret_cast<Address>(object))) |
| 1282 return true; | 1282 return true; |
| 1283 } | 1283 } |
| 1284 } | 1284 } |
| 1285 return false; | 1285 return false; |
| 1286 } | 1286 } |
| 1287 #endif | 1287 #endif |
| 1288 | 1288 |
| 1289 void NormalPageHeap::freePage(NormalPage* page) | 1289 void ThreadHeapForHeapPage::freePage(HeapPage* page) |
| 1290 { | 1290 { |
| 1291 Heap::decreaseAllocatedSpace(page->size()); | 1291 Heap::decreaseAllocatedSpace(page->size()); |
| 1292 | 1292 |
| 1293 if (page->terminating()) { | 1293 if (page->terminating()) { |
| 1294 // The thread is shutting down and this page is being removed as a part | 1294 // The thread is shutting down and this page is being removed as a part |
| 1295 // of the thread local GC. In that case the object could be traced in | 1295 // of the thread local GC. In that case the object could be traced in |
| 1296 // the next global GC if there is a dangling pointer from a live thread | 1296 // the next global GC if there is a dangling pointer from a live thread |
| 1297 // heap to this dead thread heap. To guard against this, we put the | 1297 // heap to this dead thread heap. To guard against this, we put the |
| 1298 // page into the orphaned page pool and zap the page memory. This | 1298 // page into the orphaned page pool and zap the page memory. This |
| 1299 // ensures that tracing the dangling pointer in the next global GC just | 1299 // ensures that tracing the dangling pointer in the next global GC just |
| 1300 // crashes instead of causing use-after-frees. After the next global | 1300 // crashes instead of causing use-after-frees. After the next global |
| 1301 // GC, the orphaned pages are removed. | 1301 // GC, the orphaned pages are removed. |
| 1302 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 1302 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); |
| 1303 } else { | 1303 } else { |
| 1304 PageMemory* memory = page->storage(); | 1304 PageMemory* memory = page->storage(); |
| 1305 page->~NormalPage(); | 1305 page->~HeapPage(); |
| 1306 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 1306 Heap::freePagePool()->addFreePage(heapIndex(), memory); |
| 1307 } | 1307 } |
| 1308 } | 1308 } |
| 1309 | 1309 |
| 1310 void NormalPageHeap::allocatePage() | 1310 void ThreadHeapForHeapPage::allocatePage() |
| 1311 { | 1311 { |
| 1312 threadState()->shouldFlushHeapDoesNotContainCache(); | 1312 threadState()->shouldFlushHeapDoesNotContainCache(); |
| 1313 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); | 1313 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); |
| 1314 // We continue allocating page memory until we succeed in committing one. | 1314 // We continue allocating page memory until we succeed in committing one. |
| 1315 while (!pageMemory) { | 1315 while (!pageMemory) { |
| 1316 // Allocate a memory region for blinkPagesPerRegion pages that | 1316 // Allocate a memory region for blinkPagesPerRegion pages that |
| 1317 // will each have the following layout. | 1317 // will each have the following layout. |
| 1318 // | 1318 // |
| 1319 // [ guard os page | ... payload ... | guard os page ] | 1319 // [ guard os page | ... payload ... | guard os page ] |
| 1320 // ^---{ aligned to blink page size } | 1320 // ^---{ aligned to blink page size } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1331 if (memory->commit()) | 1331 if (memory->commit()) |
| 1332 pageMemory = memory; | 1332 pageMemory = memory; |
| 1333 else | 1333 else |
| 1334 delete memory; | 1334 delete memory; |
| 1335 } else { | 1335 } else { |
| 1336 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 1336 Heap::freePagePool()->addFreePage(heapIndex(), memory); |
| 1337 } | 1337 } |
| 1338 offset += blinkPageSize; | 1338 offset += blinkPageSize; |
| 1339 } | 1339 } |
| 1340 } | 1340 } |
| 1341 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); | 1341 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this
); |
| 1342 page->link(&m_firstPage); | 1342 page->link(&m_firstPage); |
| 1343 | 1343 |
| 1344 Heap::increaseAllocatedSpace(page->size()); | 1344 Heap::increaseAllocatedSpace(page->size()); |
| 1345 addToFreeList(page->payload(), page->payloadSize()); | 1345 addToFreeList(page->payload(), page->payloadSize()); |
| 1346 } | 1346 } |
| 1347 | 1347 |
| 1348 #if ENABLE(ASSERT) | 1348 #if ENABLE(ASSERT) |
| 1349 bool NormalPageHeap::pagesToBeSweptContains(Address address) | 1349 bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address) |
| 1350 { | 1350 { |
| 1351 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | 1351 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
| 1352 if (page->contains(address)) | 1352 if (page->contains(address)) |
| 1353 return true; | 1353 return true; |
| 1354 } | 1354 } |
| 1355 return false; | 1355 return false; |
| 1356 } | 1356 } |
| 1357 #endif | 1357 #endif |
| 1358 | 1358 |
| 1359 size_t BaseHeap::objectPayloadSizeForTesting() | 1359 size_t ThreadHeap::objectPayloadSizeForTesting() |
| 1360 { | 1360 { |
| 1361 ASSERT(isConsistentForSweeping()); | 1361 ASSERT(isConsistentForSweeping()); |
| 1362 ASSERT(!m_firstUnsweptPage); | 1362 ASSERT(!m_firstUnsweptPage); |
| 1363 | 1363 |
| 1364 size_t objectPayloadSize = 0; | 1364 size_t objectPayloadSize = 0; |
| 1365 for (BasePage* page = m_firstPage; page; page = page->next()) | 1365 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) |
| 1366 objectPayloadSize += page->objectPayloadSizeForTesting(); | 1366 objectPayloadSize += page->objectPayloadSizeForTesting(); |
| 1367 return objectPayloadSize; | 1367 return objectPayloadSize; |
| 1368 } | 1368 } |
| 1369 | 1369 |
| 1370 #if ENABLE(ASSERT) | 1370 #if ENABLE(ASSERT) |
| 1371 bool NormalPageHeap::isConsistentForSweeping() | 1371 bool ThreadHeapForHeapPage::isConsistentForSweeping() |
| 1372 { | 1372 { |
| 1373 // A thread heap is consistent for sweeping if none of the pages to be swept | 1373 // A thread heap is consistent for sweeping if none of the pages to be swept |
| 1374 // contain a freelist block or the current allocation point. | 1374 // contain a freelist block or the current allocation point. |
| 1375 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 1375 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 1376 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { | 1376 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { |
| 1377 if (pagesToBeSweptContains(freeListEntry->address())) | 1377 if (pagesToBeSweptContains(freeListEntry->address())) |
| 1378 return false; | 1378 return false; |
| 1379 } | 1379 } |
| 1380 } | 1380 } |
| 1381 if (hasCurrentAllocationArea()) { | 1381 if (hasCurrentAllocationArea()) { |
| 1382 if (pagesToBeSweptContains(currentAllocationPoint())) | 1382 if (pagesToBeSweptContains(currentAllocationPoint())) |
| 1383 return false; | 1383 return false; |
| 1384 } | 1384 } |
| 1385 return true; | 1385 return true; |
| 1386 } | 1386 } |
| 1387 #endif | 1387 #endif |
| 1388 | 1388 |
| 1389 void BaseHeap::makeConsistentForSweeping() | 1389 void ThreadHeap::makeConsistentForSweeping() |
| 1390 { | 1390 { |
| 1391 clearFreeLists(); | 1391 clearFreeLists(); |
| 1392 ASSERT(isConsistentForSweeping()); | 1392 ASSERT(isConsistentForSweeping()); |
| 1393 for (BasePage* page = m_firstPage; page; page = page->next()) | 1393 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) |
| 1394 page->markAsUnswept(); | 1394 page->markAsUnswept(); |
| 1395 | 1395 |
| 1396 // If a new GC is requested before this thread got around to sweep, | 1396 // If a new GC is requested before this thread got around to sweep, |
| 1397 // ie. due to the thread doing a long running operation, we clear | 1397 // ie. due to the thread doing a long running operation, we clear |
| 1398 // the mark bits and mark any of the dead objects as dead. The latter | 1398 // the mark bits and mark any of the dead objects as dead. The latter |
| 1399 // is used to ensure the next GC marking does not trace already dead | 1399 // is used to ensure the next GC marking does not trace already dead |
| 1400 // objects. If we trace a dead object we could end up tracing into | 1400 // objects. If we trace a dead object we could end up tracing into |
| 1401 // garbage or the middle of another object via the newly conservatively | 1401 // garbage or the middle of another object via the newly conservatively |
| 1402 // found object. | 1402 // found object. |
| 1403 BasePage* previousPage = nullptr; | 1403 BaseHeapPage* previousPage = nullptr; |
| 1404 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { | 1404 for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, pag
e = page->next()) { |
| 1405 page->markUnmarkedObjectsDead(); | 1405 page->markUnmarkedObjectsDead(); |
| 1406 ASSERT(!page->hasBeenSwept()); | 1406 ASSERT(!page->hasBeenSwept()); |
| 1407 } | 1407 } |
| 1408 if (previousPage) { | 1408 if (previousPage) { |
| 1409 ASSERT(m_firstUnsweptPage); | 1409 ASSERT(m_firstUnsweptPage); |
| 1410 previousPage->m_next = m_firstPage; | 1410 previousPage->m_next = m_firstPage; |
| 1411 m_firstPage = m_firstUnsweptPage; | 1411 m_firstPage = m_firstUnsweptPage; |
| 1412 m_firstUnsweptPage = nullptr; | 1412 m_firstUnsweptPage = nullptr; |
| 1413 } | 1413 } |
| 1414 ASSERT(!m_firstUnsweptPage); | 1414 ASSERT(!m_firstUnsweptPage); |
| 1415 } | 1415 } |
| 1416 | 1416 |
| 1417 void NormalPageHeap::clearFreeLists() | 1417 void ThreadHeapForHeapPage::clearFreeLists() |
| 1418 { | 1418 { |
| 1419 setAllocationPoint(nullptr, 0); | 1419 setAllocationPoint(nullptr, 0); |
| 1420 m_freeList.clear(); | 1420 m_freeList.clear(); |
| 1421 } | 1421 } |
| 1422 | 1422 |
| 1423 #if ENABLE(GC_PROFILING) | 1423 #if ENABLE(GC_PROFILING) |
| 1424 void BaseHeap::snapshotFreeList(TracedValue& json) | 1424 void ThreadHeap::snapshotFreeList(TracedValue& json) |
| 1425 { | 1425 { |
| 1426 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); | 1426 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); |
| 1427 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio
nCount) / m_allocationCount); | 1427 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio
nCount) / m_allocationCount); |
| 1428 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); | 1428 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); |
| 1429 json.setInteger("allocationCount", m_allocationCount); | 1429 json.setInteger("allocationCount", m_allocationCount); |
| 1430 size_t pageCount = 0; | 1430 size_t pageCount = 0; |
| 1431 size_t totalPageSize = 0; | 1431 size_t totalPageSize = 0; |
| 1432 for (NormalPage* page = m_firstPage; page; page = page->next()) { | 1432 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
| 1433 ++pageCount; | 1433 ++pageCount; |
| 1434 totalPageSize += page->payloadSize(); | 1434 totalPageSize += page->payloadSize(); |
| 1435 } | 1435 } |
| 1436 json.setInteger("pageCount", pageCount); | 1436 json.setInteger("pageCount", pageCount); |
| 1437 json.setInteger("totalPageSize", totalPageSize); | 1437 json.setInteger("totalPageSize", totalPageSize); |
| 1438 | 1438 |
| 1439 FreeList::PerBucketFreeListStats bucketStats[blinkPageSizeLog2]; | 1439 FreeList::PerBucketFreeListStats bucketStats[blinkPageSizeLog2]; |
| 1440 size_t totalFreeSize; | 1440 size_t totalFreeSize; |
| 1441 m_freeList.getFreeSizeStats(bucketStats, totalFreeSize); | 1441 m_freeList.getFreeSizeStats(bucketStats, totalFreeSize); |
| 1442 json.setInteger("totalFreeSize", totalFreeSize); | 1442 json.setInteger("totalFreeSize", totalFreeSize); |
| 1443 | 1443 |
| 1444 json.beginArray("perBucketEntryCount"); | 1444 json.beginArray("perBucketEntryCount"); |
| 1445 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1445 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| 1446 json.pushInteger(bucketStats[i].entryCount); | 1446 json.pushInteger(bucketStats[i].entryCount); |
| 1447 json.endArray(); | 1447 json.endArray(); |
| 1448 | 1448 |
| 1449 json.beginArray("perBucketFreeSize"); | 1449 json.beginArray("perBucketFreeSize"); |
| 1450 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1450 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| 1451 json.pushInteger(bucketStats[i].freeSize); | 1451 json.pushInteger(bucketStats[i].freeSize); |
| 1452 json.endArray(); | 1452 json.endArray(); |
| 1453 } | 1453 } |
| 1454 | 1454 |
| 1455 void BaseHeap::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) const | 1455 void ThreadHeap::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) const |
| 1456 { | 1456 { |
| 1457 for (NormalPage* page = m_firstPage; page; page = page->next()) | 1457 for (HeapPage* page = m_firstPage; page; page = page->next()) |
| 1458 page->countMarkedObjects(classAgeCounts); | 1458 page->countMarkedObjects(classAgeCounts); |
| 1459 for (LargeObjectPage* largeObject = m_firstLargeObjectPage; largeObject; lar
geObject = largeObject->next()) | 1459 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
| 1460 largeObject->countMarkedObjects(classAgeCounts); | 1460 largeObject->countMarkedObjects(classAgeCounts); |
| 1461 } | 1461 } |
| 1462 | 1462 |
| 1463 void BaseHeap::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) const | 1463 void ThreadHeap::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) const |
| 1464 { | 1464 { |
| 1465 for (NormalPage* page = m_firstPage; page; page = page->next()) | 1465 for (HeapPage* page = m_firstPage; page; page = page->next()) |
| 1466 page->countObjectsToSweep(classAgeCounts); | 1466 page->countObjectsToSweep(classAgeCounts); |
| 1467 for (LargeObjectPage* largeObject = m_firstLargeObjectPage; largeObject; lar
geObject = largeObject->next()) | 1467 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
| 1468 largeObject->countObjectsToSweep(classAgeCounts); | 1468 largeObject->countObjectsToSweep(classAgeCounts); |
| 1469 } | 1469 } |
| 1470 #endif | 1470 #endif |
| 1471 | 1471 |
| 1472 void FreeList::clear() | 1472 void FreeList::clear() |
| 1473 { | 1473 { |
| 1474 m_biggestFreeListIndex = 0; | 1474 m_biggestFreeListIndex = 0; |
| 1475 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1475 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| 1476 m_freeLists[i] = nullptr; | 1476 m_freeLists[i] = nullptr; |
| 1477 } | 1477 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1496 size_t& freeSize = bucketStats[i].freeSize; | 1496 size_t& freeSize = bucketStats[i].freeSize; |
| 1497 for (FreeListEntry* entry = m_freeLists[i]; entry; entry = entry->next()
) { | 1497 for (FreeListEntry* entry = m_freeLists[i]; entry; entry = entry->next()
) { |
| 1498 ++entryCount; | 1498 ++entryCount; |
| 1499 freeSize += entry->size(); | 1499 freeSize += entry->size(); |
| 1500 } | 1500 } |
| 1501 totalFreeSize += freeSize; | 1501 totalFreeSize += freeSize; |
| 1502 } | 1502 } |
| 1503 } | 1503 } |
| 1504 #endif | 1504 #endif |
| 1505 | 1505 |
| 1506 NormalPage::NormalPage(PageMemory* storage, BaseHeap* heap) | 1506 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) |
| 1507 : BasePage(storage, heap) | 1507 : BaseHeapPage(storage, heap) |
| 1508 { | 1508 { |
| 1509 m_objectStartBitMapComputed = false; | 1509 m_objectStartBitMapComputed = false; |
| 1510 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1510 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 1511 } | 1511 } |
| 1512 | 1512 |
| 1513 size_t NormalPage::objectPayloadSizeForTesting() | 1513 size_t HeapPage::objectPayloadSizeForTesting() |
| 1514 { | 1514 { |
| 1515 size_t objectPayloadSize = 0; | 1515 size_t objectPayloadSize = 0; |
| 1516 Address headerAddress = payload(); | 1516 Address headerAddress = payload(); |
| 1517 markAsSwept(); | 1517 markAsSwept(); |
| 1518 ASSERT(headerAddress != payloadEnd()); | 1518 ASSERT(headerAddress != payloadEnd()); |
| 1519 do { | 1519 do { |
| 1520 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1520 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1521 if (!header->isFree()) { | 1521 if (!header->isFree()) { |
| 1522 header->checkHeader(); | 1522 header->checkHeader(); |
| 1523 objectPayloadSize += header->payloadSize(); | 1523 objectPayloadSize += header->payloadSize(); |
| 1524 } | 1524 } |
| 1525 ASSERT(header->size() < blinkPagePayloadSize()); | 1525 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1526 headerAddress += header->size(); | 1526 headerAddress += header->size(); |
| 1527 ASSERT(headerAddress <= payloadEnd()); | 1527 ASSERT(headerAddress <= payloadEnd()); |
| 1528 } while (headerAddress < payloadEnd()); | 1528 } while (headerAddress < payloadEnd()); |
| 1529 return objectPayloadSize; | 1529 return objectPayloadSize; |
| 1530 } | 1530 } |
| 1531 | 1531 |
| 1532 bool NormalPage::isEmpty() | 1532 bool HeapPage::isEmpty() |
| 1533 { | 1533 { |
| 1534 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); | 1534 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); |
| 1535 return header->isFree() && header->size() == payloadSize(); | 1535 return header->isFree() && header->size() == payloadSize(); |
| 1536 } | 1536 } |
| 1537 | 1537 |
| 1538 void NormalPage::sweep() | 1538 void HeapPage::sweep() |
| 1539 { | 1539 { |
| 1540 clearObjectStartBitMap(); | 1540 clearObjectStartBitMap(); |
| 1541 | 1541 |
| 1542 size_t markedObjectSize = 0; | 1542 size_t markedObjectSize = 0; |
| 1543 Address startOfGap = payload(); | 1543 Address startOfGap = payload(); |
| 1544 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { | 1544 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { |
| 1545 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1545 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1546 ASSERT(header->size() > 0); | 1546 ASSERT(header->size() > 0); |
| 1547 ASSERT(header->size() < blinkPagePayloadSize()); | 1547 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1548 | 1548 |
| 1549 if (header->isPromptlyFreed()) | 1549 if (header->isPromptlyFreed()) |
| 1550 heapForNormalPage()->decreasePromptlyFreedSize(header->size()); | 1550 heapForHeapPage()->decreasePromptlyFreedSize(header->size()); |
| 1551 if (header->isFree()) { | 1551 if (header->isFree()) { |
| 1552 size_t size = header->size(); | 1552 size_t size = header->size(); |
| 1553 // Zero the memory in the free list header to maintain the | 1553 // Zero the memory in the free list header to maintain the |
| 1554 // invariant that memory on the free list is zero filled. | 1554 // invariant that memory on the free list is zero filled. |
| 1555 // The rest of the memory is already on the free list and is | 1555 // The rest of the memory is already on the free list and is |
| 1556 // therefore already zero filled. | 1556 // therefore already zero filled. |
| 1557 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); | 1557 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); |
| 1558 headerAddress += size; | 1558 headerAddress += size; |
| 1559 continue; | 1559 continue; |
| 1560 } | 1560 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1573 header->finalize(payload, payloadSize); | 1573 header->finalize(payload, payloadSize); |
| 1574 // This memory will be added to the freelist. Maintain the invariant | 1574 // This memory will be added to the freelist. Maintain the invariant |
| 1575 // that memory on the freelist is zero filled. | 1575 // that memory on the freelist is zero filled. |
| 1576 FILL_ZERO_IF_PRODUCTION(headerAddress, size); | 1576 FILL_ZERO_IF_PRODUCTION(headerAddress, size); |
| 1577 ASAN_POISON_MEMORY_REGION(payload, payloadSize); | 1577 ASAN_POISON_MEMORY_REGION(payload, payloadSize); |
| 1578 headerAddress += size; | 1578 headerAddress += size; |
| 1579 continue; | 1579 continue; |
| 1580 } | 1580 } |
| 1581 | 1581 |
| 1582 if (startOfGap != headerAddress) | 1582 if (startOfGap != headerAddress) |
| 1583 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); | 1583 heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOf
Gap); |
| 1584 header->unmark(); | 1584 header->unmark(); |
| 1585 headerAddress += header->size(); | 1585 headerAddress += header->size(); |
| 1586 markedObjectSize += header->size(); | 1586 markedObjectSize += header->size(); |
| 1587 startOfGap = headerAddress; | 1587 startOfGap = headerAddress; |
| 1588 } | 1588 } |
| 1589 if (startOfGap != payloadEnd()) | 1589 if (startOfGap != payloadEnd()) |
| 1590 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); | 1590 heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
| 1591 | 1591 |
| 1592 if (markedObjectSize) | 1592 if (markedObjectSize) |
| 1593 Heap::increaseMarkedObjectSize(markedObjectSize); | 1593 Heap::increaseMarkedObjectSize(markedObjectSize); |
| 1594 } | 1594 } |
| 1595 | 1595 |
| 1596 void NormalPage::markUnmarkedObjectsDead() | 1596 void HeapPage::markUnmarkedObjectsDead() |
| 1597 { | 1597 { |
| 1598 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1598 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1599 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1599 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1600 ASSERT(header->size() < blinkPagePayloadSize()); | 1600 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1601 // Check if a free list entry first since we cannot call | 1601 // Check if a free list entry first since we cannot call |
| 1602 // isMarked on a free list entry. | 1602 // isMarked on a free list entry. |
| 1603 if (header->isFree()) { | 1603 if (header->isFree()) { |
| 1604 headerAddress += header->size(); | 1604 headerAddress += header->size(); |
| 1605 continue; | 1605 continue; |
| 1606 } | 1606 } |
| 1607 header->checkHeader(); | 1607 header->checkHeader(); |
| 1608 if (header->isMarked()) | 1608 if (header->isMarked()) |
| 1609 header->unmark(); | 1609 header->unmark(); |
| 1610 else | 1610 else |
| 1611 header->markDead(); | 1611 header->markDead(); |
| 1612 headerAddress += header->size(); | 1612 headerAddress += header->size(); |
| 1613 } | 1613 } |
| 1614 } | 1614 } |
| 1615 | 1615 |
| 1616 void NormalPage::removeFromHeap() | 1616 void HeapPage::removeFromHeap() |
| 1617 { | 1617 { |
| 1618 heapForNormalPage()->freePage(this); | 1618 heapForHeapPage()->freePage(this); |
| 1619 } | 1619 } |
| 1620 | 1620 |
| 1621 NormalPageHeap* NormalPage::heapForNormalPage() | 1621 ThreadHeapForHeapPage* HeapPage::heapForHeapPage() |
| 1622 { | 1622 { |
| 1623 return static_cast<NormalPageHeap*>(heap()); | 1623 return static_cast<ThreadHeapForHeapPage*>(heap()); |
| 1624 } | 1624 } |
| 1625 | 1625 |
| 1626 void NormalPage::populateObjectStartBitMap() | 1626 void HeapPage::populateObjectStartBitMap() |
| 1627 { | 1627 { |
| 1628 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1628 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
| 1629 Address start = payload(); | 1629 Address start = payload(); |
| 1630 for (Address headerAddress = start; headerAddress < payloadEnd();) { | 1630 for (Address headerAddress = start; headerAddress < payloadEnd();) { |
| 1631 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1631 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1632 size_t objectOffset = headerAddress - start; | 1632 size_t objectOffset = headerAddress - start; |
| 1633 ASSERT(!(objectOffset & allocationMask)); | 1633 ASSERT(!(objectOffset & allocationMask)); |
| 1634 size_t objectStartNumber = objectOffset / allocationGranularity; | 1634 size_t objectStartNumber = objectOffset / allocationGranularity; |
| 1635 size_t mapIndex = objectStartNumber / 8; | 1635 size_t mapIndex = objectStartNumber / 8; |
| 1636 ASSERT(mapIndex < objectStartBitMapSize); | 1636 ASSERT(mapIndex < objectStartBitMapSize); |
| 1637 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7)); | 1637 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7)); |
| 1638 headerAddress += header->size(); | 1638 headerAddress += header->size(); |
| 1639 ASSERT(headerAddress <= payloadEnd()); | 1639 ASSERT(headerAddress <= payloadEnd()); |
| 1640 } | 1640 } |
| 1641 m_objectStartBitMapComputed = true; | 1641 m_objectStartBitMapComputed = true; |
| 1642 } | 1642 } |
| 1643 | 1643 |
| 1644 void NormalPage::clearObjectStartBitMap() | 1644 void HeapPage::clearObjectStartBitMap() |
| 1645 { | 1645 { |
| 1646 m_objectStartBitMapComputed = false; | 1646 m_objectStartBitMapComputed = false; |
| 1647 } | 1647 } |
| 1648 | 1648 |
| 1649 static int numberOfLeadingZeroes(uint8_t byte) | 1649 static int numberOfLeadingZeroes(uint8_t byte) |
| 1650 { | 1650 { |
| 1651 if (!byte) | 1651 if (!byte) |
| 1652 return 8; | 1652 return 8; |
| 1653 int result = 0; | 1653 int result = 0; |
| 1654 if (byte <= 0x0F) { | 1654 if (byte <= 0x0F) { |
| 1655 result += 4; | 1655 result += 4; |
| 1656 byte = byte << 4; | 1656 byte = byte << 4; |
| 1657 } | 1657 } |
| 1658 if (byte <= 0x3F) { | 1658 if (byte <= 0x3F) { |
| 1659 result += 2; | 1659 result += 2; |
| 1660 byte = byte << 2; | 1660 byte = byte << 2; |
| 1661 } | 1661 } |
| 1662 if (byte <= 0x7F) | 1662 if (byte <= 0x7F) |
| 1663 result++; | 1663 result++; |
| 1664 return result; | 1664 return result; |
| 1665 } | 1665 } |
| 1666 | 1666 |
| 1667 HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) | 1667 HeapObjectHeader* HeapPage::findHeaderFromAddress(Address address) |
| 1668 { | 1668 { |
| 1669 if (address < payload()) | 1669 if (address < payload()) |
| 1670 return nullptr; | 1670 return nullptr; |
| 1671 if (!isObjectStartBitMapComputed()) | 1671 if (!isObjectStartBitMapComputed()) |
| 1672 populateObjectStartBitMap(); | 1672 populateObjectStartBitMap(); |
| 1673 size_t objectOffset = address - payload(); | 1673 size_t objectOffset = address - payload(); |
| 1674 size_t objectStartNumber = objectOffset / allocationGranularity; | 1674 size_t objectStartNumber = objectOffset / allocationGranularity; |
| 1675 size_t mapIndex = objectStartNumber / 8; | 1675 size_t mapIndex = objectStartNumber / 8; |
| 1676 ASSERT(mapIndex < objectStartBitMapSize); | 1676 ASSERT(mapIndex < objectStartBitMapSize); |
| 1677 size_t bit = objectStartNumber & 7; | 1677 size_t bit = objectStartNumber & 7; |
| 1678 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1); | 1678 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1); |
| 1679 while (!byte) { | 1679 while (!byte) { |
| 1680 ASSERT(mapIndex > 0); | 1680 ASSERT(mapIndex > 0); |
| 1681 byte = m_objectStartBitMap[--mapIndex]; | 1681 byte = m_objectStartBitMap[--mapIndex]; |
| 1682 } | 1682 } |
| 1683 int leadingZeroes = numberOfLeadingZeroes(byte); | 1683 int leadingZeroes = numberOfLeadingZeroes(byte); |
| 1684 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1684 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; |
| 1685 objectOffset = objectStartNumber * allocationGranularity; | 1685 objectOffset = objectStartNumber * allocationGranularity; |
| 1686 Address objectAddress = objectOffset + payload(); | 1686 Address objectAddress = objectOffset + payload(); |
| 1687 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress
); | 1687 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress
); |
| 1688 if (header->isFree()) | 1688 if (header->isFree()) |
| 1689 return nullptr; | 1689 return nullptr; |
| 1690 header->checkHeader(); | 1690 header->checkHeader(); |
| 1691 return header; | 1691 return header; |
| 1692 } | 1692 } |
| 1693 | 1693 |
| 1694 void NormalPage::checkAndMarkPointer(Visitor* visitor, Address address) | 1694 void HeapPage::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1695 { | 1695 { |
| 1696 ASSERT(contains(address)); | 1696 ASSERT(contains(address)); |
| 1697 HeapObjectHeader* header = findHeaderFromAddress(address); | 1697 HeapObjectHeader* header = findHeaderFromAddress(address); |
| 1698 if (!header || header->isDead()) | 1698 if (!header || header->isDead()) |
| 1699 return; | 1699 return; |
| 1700 #if ENABLE(GC_PROFILING) | 1700 #if ENABLE(GC_PROFILING) |
| 1701 visitor->setHostInfo(&address, "stack"); | 1701 visitor->setHostInfo(&address, "stack"); |
| 1702 #endif | 1702 #endif |
| 1703 markPointer(visitor, header); | 1703 markPointer(visitor, header); |
| 1704 } | 1704 } |
| 1705 | 1705 |
| 1706 #if ENABLE(GC_PROFILING) | 1706 #if ENABLE(GC_PROFILING) |
| 1707 const GCInfo* NormalPage::findGCInfo(Address address) | 1707 const GCInfo* HeapPage::findGCInfo(Address address) |
| 1708 { | 1708 { |
| 1709 if (address < payload()) | 1709 if (address < payload()) |
| 1710 return nullptr; | 1710 return nullptr; |
| 1711 | 1711 |
| 1712 HeapObjectHeader* header = findHeaderFromAddress(address); | 1712 HeapObjectHeader* header = findHeaderFromAddress(address); |
| 1713 if (!header) | 1713 if (!header) |
| 1714 return nullptr; | 1714 return nullptr; |
| 1715 | 1715 |
| 1716 return Heap::gcInfo(header->gcInfoIndex()); | 1716 return Heap::gcInfo(header->gcInfoIndex()); |
| 1717 } | 1717 } |
| 1718 #endif | 1718 #endif |
| 1719 | 1719 |
| 1720 #if ENABLE(GC_PROFILING) | 1720 #if ENABLE(GC_PROFILING) |
| 1721 void NormalPage::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 1721 void HeapPage::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
| 1722 { | 1722 { |
| 1723 HeapObjectHeader* header = nullptr; | 1723 HeapObjectHeader* header = nullptr; |
| 1724 for (Address addr = payload(); addr < payloadEnd(); addr += header->size())
{ | 1724 for (Address addr = payload(); addr < payloadEnd(); addr += header->size())
{ |
| 1725 header = reinterpret_cast<HeapObjectHeader*>(addr); | 1725 header = reinterpret_cast<HeapObjectHeader*>(addr); |
| 1726 if (json) | 1726 if (json) |
| 1727 json->pushInteger(header->encodedSize()); | 1727 json->pushInteger(header->encodedSize()); |
| 1728 if (header->isFree()) { | 1728 if (header->isFree()) { |
| 1729 info->freeSize += header->size(); | 1729 info->freeSize += header->size(); |
| 1730 continue; | 1730 continue; |
| 1731 } | 1731 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1743 } else { | 1743 } else { |
| 1744 info->deadCount[tag] += 1; | 1744 info->deadCount[tag] += 1; |
| 1745 info->deadSize[tag] += header->size(); | 1745 info->deadSize[tag] += header->size(); |
| 1746 // Count objects that are dead before the final generation. | 1746 // Count objects that are dead before the final generation. |
| 1747 if (age < maxHeapObjectAge) | 1747 if (age < maxHeapObjectAge) |
| 1748 info->generations[tag][age] += 1; | 1748 info->generations[tag][age] += 1; |
| 1749 } | 1749 } |
| 1750 } | 1750 } |
| 1751 } | 1751 } |
| 1752 | 1752 |
| 1753 void NormalPage::incrementMarkedObjectsAge() | 1753 void HeapPage::incrementMarkedObjectsAge() |
| 1754 { | 1754 { |
| 1755 HeapObjectHeader* header = nullptr; | 1755 HeapObjectHeader* header = nullptr; |
| 1756 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | 1756 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { |
| 1757 header = reinterpret_cast<HeapObjectHeader*>(address); | 1757 header = reinterpret_cast<HeapObjectHeader*>(address); |
| 1758 if (header->isMarked()) | 1758 if (header->isMarked()) |
| 1759 header->incrementAge(); | 1759 header->incrementAge(); |
| 1760 } | 1760 } |
| 1761 } | 1761 } |
| 1762 | 1762 |
| 1763 void NormalPage::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) | 1763 void HeapPage::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) |
| 1764 { | 1764 { |
| 1765 HeapObjectHeader* header = nullptr; | 1765 HeapObjectHeader* header = nullptr; |
| 1766 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | 1766 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { |
| 1767 header = reinterpret_cast<HeapObjectHeader*>(address); | 1767 header = reinterpret_cast<HeapObjectHeader*>(address); |
| 1768 if (header->isMarked()) { | 1768 if (header->isMarked()) { |
| 1769 String className(classOf(header->payload())); | 1769 String className(classOf(header->payload())); |
| 1770 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); | 1770 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); |
| 1771 } | 1771 } |
| 1772 } | 1772 } |
| 1773 } | 1773 } |
| 1774 | 1774 |
| 1775 void NormalPage::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) | 1775 void HeapPage::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) |
| 1776 { | 1776 { |
| 1777 HeapObjectHeader* header = nullptr; | 1777 HeapObjectHeader* header = nullptr; |
| 1778 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | 1778 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { |
| 1779 header = reinterpret_cast<HeapObjectHeader*>(address); | 1779 header = reinterpret_cast<HeapObjectHeader*>(address); |
| 1780 if (!header->isFree() && !header->isMarked()) { | 1780 if (!header->isFree() && !header->isMarked()) { |
| 1781 String className(classOf(header->payload())); | 1781 String className(classOf(header->payload())); |
| 1782 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); | 1782 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); |
| 1783 } | 1783 } |
| 1784 } | 1784 } |
| 1785 } | 1785 } |
| 1786 #endif | 1786 #endif |
| 1787 | 1787 |
| 1788 size_t LargeObjectPage::objectPayloadSizeForTesting() | 1788 size_t LargeObject::objectPayloadSizeForTesting() |
| 1789 { | 1789 { |
| 1790 markAsSwept(); | 1790 markAsSwept(); |
| 1791 return payloadSize(); | 1791 return payloadSize(); |
| 1792 } | 1792 } |
| 1793 | 1793 |
| 1794 #if ENABLE(GC_PROFILING) | 1794 #if ENABLE(GC_PROFILING) |
| 1795 const GCInfo* LargeObjectPage::findGCInfo(Address address) | 1795 const GCInfo* LargeObject::findGCInfo(Address address) |
| 1796 { | 1796 { |
| 1797 if (!containedInObjectPayload(address)) | 1797 if (!containedInObjectPayload(address)) |
| 1798 return nullptr; | 1798 return nullptr; |
| 1799 HeapObjectHeader* header = heapObjectHeader(); | 1799 HeapObjectHeader* header = heapObjectHeader(); |
| 1800 return Heap::gcInfo(header->gcInfoIndex()); | 1800 return Heap::gcInfo(header->gcInfoIndex()); |
| 1801 } | 1801 } |
| 1802 | 1802 |
| 1803 void LargeObjectPage::snapshot(TracedValue* json, ThreadState::SnapshotInfo* inf
o) | 1803 void LargeObject::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
| 1804 { | 1804 { |
| 1805 HeapObjectHeader* header = heapObjectHeader(); | 1805 HeapObjectHeader* header = heapObjectHeader(); |
| 1806 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); | 1806 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); |
| 1807 size_t age = header->age(); | 1807 size_t age = header->age(); |
| 1808 if (header->isMarked()) { | 1808 if (header->isMarked()) { |
| 1809 info->liveCount[tag] += 1; | 1809 info->liveCount[tag] += 1; |
| 1810 info->liveSize[tag] += header->size(); | 1810 info->liveSize[tag] += header->size(); |
| 1811 // Count objects that are live when promoted to the final generation. | 1811 // Count objects that are live when promoted to the final generation. |
| 1812 if (age == maxHeapObjectAge - 1) | 1812 if (age == maxHeapObjectAge - 1) |
| 1813 info->generations[tag][maxHeapObjectAge] += 1; | 1813 info->generations[tag][maxHeapObjectAge] += 1; |
| 1814 } else { | 1814 } else { |
| 1815 info->deadCount[tag] += 1; | 1815 info->deadCount[tag] += 1; |
| 1816 info->deadSize[tag] += header->size(); | 1816 info->deadSize[tag] += header->size(); |
| 1817 // Count objects that are dead before the final generation. | 1817 // Count objects that are dead before the final generation. |
| 1818 if (age < maxHeapObjectAge) | 1818 if (age < maxHeapObjectAge) |
| 1819 info->generations[tag][age] += 1; | 1819 info->generations[tag][age] += 1; |
| 1820 } | 1820 } |
| 1821 | 1821 |
| 1822 if (json) { | 1822 if (json) { |
| 1823 json->setInteger("class", tag); | 1823 json->setInteger("class", tag); |
| 1824 json->setInteger("size", header->size()); | 1824 json->setInteger("size", header->size()); |
| 1825 json->setInteger("isMarked", header->isMarked()); | 1825 json->setInteger("isMarked", header->isMarked()); |
| 1826 } | 1826 } |
| 1827 } | 1827 } |
| 1828 | 1828 |
| 1829 void LargeObjectPage::incrementMarkedObjectsAge() | 1829 void LargeObject::incrementMarkedObjectsAge() |
| 1830 { | 1830 { |
| 1831 HeapObjectHeader* header = heapObjectHeader(); | 1831 HeapObjectHeader* header = heapObjectHeader(); |
| 1832 if (header->isMarked()) | 1832 if (header->isMarked()) |
| 1833 header->incrementAge(); | 1833 header->incrementAge(); |
| 1834 } | 1834 } |
| 1835 | 1835 |
| 1836 void LargeObjectPage::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) | 1836 void LargeObject::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) |
| 1837 { | 1837 { |
| 1838 HeapObjectHeader* header = heapObjectHeader(); | 1838 HeapObjectHeader* header = heapObjectHeader(); |
| 1839 if (header->isMarked()) { | 1839 if (header->isMarked()) { |
| 1840 String className(classOf(header->payload())); | 1840 String className(classOf(header->payload())); |
| 1841 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); | 1841 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); |
| 1842 } | 1842 } |
| 1843 } | 1843 } |
| 1844 | 1844 |
| 1845 void LargeObjectPage::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) | 1845 void LargeObject::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) |
| 1846 { | 1846 { |
| 1847 HeapObjectHeader* header = heapObjectHeader(); | 1847 HeapObjectHeader* header = heapObjectHeader(); |
| 1848 if (!header->isFree() && !header->isMarked()) { | 1848 if (!header->isFree() && !header->isMarked()) { |
| 1849 String className(classOf(header->payload())); | 1849 String className(classOf(header->payload())); |
| 1850 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); | 1850 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); |
| 1851 } | 1851 } |
| 1852 } | 1852 } |
| 1853 #endif | 1853 #endif |
| 1854 | 1854 |
| 1855 void HeapDoesNotContainCache::flush() | 1855 void HeapDoesNotContainCache::flush() |
| (...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2078 virtual void registerWeakCellWithCallback(void** cell, WeakPointerCallback c
allback) override | 2078 virtual void registerWeakCellWithCallback(void** cell, WeakPointerCallback c
allback) override |
| 2079 { | 2079 { |
| 2080 Impl::registerWeakCellWithCallback(cell, callback); | 2080 Impl::registerWeakCellWithCallback(cell, callback); |
| 2081 } | 2081 } |
| 2082 | 2082 |
| 2083 inline bool shouldMarkObject(const void* objectPointer) | 2083 inline bool shouldMarkObject(const void* objectPointer) |
| 2084 { | 2084 { |
| 2085 if (Mode != ThreadLocalMarking) | 2085 if (Mode != ThreadLocalMarking) |
| 2086 return true; | 2086 return true; |
| 2087 | 2087 |
| 2088 BasePage* page = pageFromObject(objectPointer); | 2088 BaseHeapPage* page = pageFromObject(objectPointer); |
| 2089 ASSERT(!page->orphaned()); | 2089 ASSERT(!page->orphaned()); |
| 2090 // When doing a thread local GC, the marker checks if | 2090 // When doing a thread local GC, the marker checks if |
| 2091 // the object resides in another thread's heap. If it | 2091 // the object resides in another thread's heap. If it |
| 2092 // does, the object should not be marked & traced. | 2092 // does, the object should not be marked & traced. |
| 2093 return page->terminating(); | 2093 return page->terminating(); |
| 2094 } | 2094 } |
| 2095 | 2095 |
| 2096 #if ENABLE(ASSERT) | 2096 #if ENABLE(ASSERT) |
| 2097 virtual void checkMarkingAllowed() override | 2097 virtual void checkMarkingAllowed() override |
| 2098 { | 2098 { |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2149 delete s_ephemeronStack; | 2149 delete s_ephemeronStack; |
| 2150 s_ephemeronStack = nullptr; | 2150 s_ephemeronStack = nullptr; |
| 2151 delete s_regionTree; | 2151 delete s_regionTree; |
| 2152 s_regionTree = nullptr; | 2152 s_regionTree = nullptr; |
| 2153 GCInfoTable::shutdown(); | 2153 GCInfoTable::shutdown(); |
| 2154 ThreadState::shutdown(); | 2154 ThreadState::shutdown(); |
| 2155 ASSERT(Heap::allocatedSpace() == 0); | 2155 ASSERT(Heap::allocatedSpace() == 0); |
| 2156 } | 2156 } |
| 2157 | 2157 |
| 2158 #if ENABLE(ASSERT) | 2158 #if ENABLE(ASSERT) |
| 2159 BasePage* Heap::findPageFromAddress(Address address) | 2159 BaseHeapPage* Heap::findPageFromAddress(Address address) |
| 2160 { | 2160 { |
| 2161 ASSERT(ThreadState::current()->isInGC()); | 2161 ASSERT(ThreadState::current()->isInGC()); |
| 2162 for (ThreadState* state : ThreadState::attachedThreads()) { | 2162 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 2163 if (BasePage* page = state->findPageFromAddress(address)) | 2163 if (BaseHeapPage* page = state->findPageFromAddress(address)) |
| 2164 return page; | 2164 return page; |
| 2165 } | 2165 } |
| 2166 return nullptr; | 2166 return nullptr; |
| 2167 } | 2167 } |
| 2168 | 2168 |
| 2169 bool Heap::containedInHeapOrOrphanedPage(void* object) | 2169 bool Heap::containedInHeapOrOrphanedPage(void* object) |
| 2170 { | 2170 { |
| 2171 return findPageFromAddress(object) || orphanedPagePool()->contains(object); | 2171 return findPageFromAddress(object) || orphanedPagePool()->contains(object); |
| 2172 } | 2172 } |
| 2173 #endif | 2173 #endif |
| 2174 | 2174 |
| 2175 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 2175 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 2176 { | 2176 { |
| 2177 ASSERT(ThreadState::current()->isInGC()); | 2177 ASSERT(ThreadState::current()->isInGC()); |
| 2178 | 2178 |
| 2179 #if !ENABLE(ASSERT) | 2179 #if !ENABLE(ASSERT) |
| 2180 if (s_heapDoesNotContainCache->lookup(address)) | 2180 if (s_heapDoesNotContainCache->lookup(address)) |
| 2181 return nullptr; | 2181 return nullptr; |
| 2182 #endif | 2182 #endif |
| 2183 | 2183 |
| 2184 if (BasePage* page = lookup(address)) { | 2184 if (BaseHeapPage* page = lookup(address)) { |
| 2185 ASSERT(page->contains(address)); | 2185 ASSERT(page->contains(address)); |
| 2186 ASSERT(!page->orphaned()); | 2186 ASSERT(!page->orphaned()); |
| 2187 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 2187 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| 2188 page->checkAndMarkPointer(visitor, address); | 2188 page->checkAndMarkPointer(visitor, address); |
| 2189 // FIXME: We only need to set the conservative flag if | 2189 // FIXME: We only need to set the conservative flag if |
| 2190 // checkAndMarkPointer actually marked the pointer. | 2190 // checkAndMarkPointer actually marked the pointer. |
| 2191 s_lastGCWasConservative = true; | 2191 s_lastGCWasConservative = true; |
| 2192 return address; | 2192 return address; |
| 2193 } | 2193 } |
| 2194 | 2194 |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2288 | 2288 |
| 2289 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) | 2289 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) |
| 2290 { | 2290 { |
| 2291 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | 2291 ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
| 2292 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(); | 2292 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(); |
| 2293 *slot = CallbackStack::Item(cell, callback); | 2293 *slot = CallbackStack::Item(cell, callback); |
| 2294 } | 2294 } |
| 2295 | 2295 |
| 2296 void Heap::pushWeakPointerCallback(void* closure, void* object, WeakPointerCallb
ack callback) | 2296 void Heap::pushWeakPointerCallback(void* closure, void* object, WeakPointerCallb
ack callback) |
| 2297 { | 2297 { |
| 2298 BasePage* page = pageFromObject(object); | 2298 BaseHeapPage* page = pageFromObject(object); |
| 2299 ASSERT(!page->orphaned()); | 2299 ASSERT(!page->orphaned()); |
| 2300 ThreadState* state = page->heap()->threadState(); | 2300 ThreadState* state = page->heap()->threadState(); |
| 2301 state->pushWeakPointerCallback(closure, callback); | 2301 state->pushWeakPointerCallback(closure, callback); |
| 2302 } | 2302 } |
| 2303 | 2303 |
| 2304 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) | 2304 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) |
| 2305 { | 2305 { |
| 2306 // For weak processing we should never reach orphaned pages since orphaned | 2306 // For weak processing we should never reach orphaned pages since orphaned |
| 2307 // pages are not traced and thus objects on those pages are never be | 2307 // pages are not traced and thus objects on those pages are never be |
| 2308 // registered as objects on orphaned pages. We cannot assert this here | 2308 // registered as objects on orphaned pages. We cannot assert this here |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2529 for (int i = 0; i < 5; ++i) | 2529 for (int i = 0; i < 5; ++i) |
| 2530 collectGarbage(ThreadState::NoHeapPointersOnStack); | 2530 collectGarbage(ThreadState::NoHeapPointersOnStack); |
| 2531 } | 2531 } |
| 2532 | 2532 |
| 2533 double Heap::estimatedMarkingTime() | 2533 double Heap::estimatedMarkingTime() |
| 2534 { | 2534 { |
| 2535 // FIXME: Implement heuristics | 2535 // FIXME: Implement heuristics |
| 2536 return 0.0; | 2536 return 0.0; |
| 2537 } | 2537 } |
| 2538 | 2538 |
| 2539 void BaseHeap::prepareHeapForTermination() | 2539 void ThreadHeap::prepareHeapForTermination() |
| 2540 { | 2540 { |
| 2541 ASSERT(!m_firstUnsweptPage); | 2541 ASSERT(!m_firstUnsweptPage); |
| 2542 for (BasePage* page = m_firstPage; page; page = page->next()) { | 2542 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
| 2543 page->setTerminating(); | 2543 page->setTerminating(); |
| 2544 } | 2544 } |
| 2545 } | 2545 } |
| 2546 | 2546 |
| 2547 size_t Heap::objectPayloadSizeForTesting() | 2547 size_t Heap::objectPayloadSizeForTesting() |
| 2548 { | 2548 { |
| 2549 size_t objectPayloadSize = 0; | 2549 size_t objectPayloadSize = 0; |
| 2550 for (ThreadState* state : ThreadState::attachedThreads()) { | 2550 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 2551 state->setGCState(ThreadState::GCRunning); | 2551 state->setGCState(ThreadState::GCRunning); |
| 2552 state->makeConsistentForSweeping(); | 2552 state->makeConsistentForSweeping(); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2563 if (!address) | 2563 if (!address) |
| 2564 return; | 2564 return; |
| 2565 | 2565 |
| 2566 ThreadState* state = ThreadState::current(); | 2566 ThreadState* state = ThreadState::current(); |
| 2567 if (state->sweepForbidden()) | 2567 if (state->sweepForbidden()) |
| 2568 return; | 2568 return; |
| 2569 ASSERT(!state->isInGC()); | 2569 ASSERT(!state->isInGC()); |
| 2570 | 2570 |
| 2571 // Don't promptly free large objects because their page is never reused. | 2571 // Don't promptly free large objects because their page is never reused. |
| 2572 // Don't free backings allocated on other threads. | 2572 // Don't free backings allocated on other threads. |
| 2573 BasePage* page = pageFromObject(address); | 2573 BaseHeapPage* page = pageFromObject(address); |
| 2574 if (page->isLargeObjectPage() || page->heap()->threadState() != state) | 2574 if (page->isLargeObject() || page->heap()->threadState() != state) |
| 2575 return; | 2575 return; |
| 2576 | 2576 |
| 2577 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2577 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 2578 header->checkHeader(); | 2578 header->checkHeader(); |
| 2579 static_cast<NormalPage*>(page)->heapForNormalPage()->promptlyFreeObject(head
er); | 2579 static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header); |
| 2580 } | 2580 } |
| 2581 | 2581 |
| 2582 void HeapAllocator::freeVectorBacking(void* address) | 2582 void HeapAllocator::freeVectorBacking(void* address) |
| 2583 { | 2583 { |
| 2584 backingFree(address); | 2584 backingFree(address); |
| 2585 } | 2585 } |
| 2586 | 2586 |
| 2587 void HeapAllocator::freeInlineVectorBacking(void* address) | 2587 void HeapAllocator::freeInlineVectorBacking(void* address) |
| 2588 { | 2588 { |
| 2589 backingFree(address); | 2589 backingFree(address); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2600 return false; | 2600 return false; |
| 2601 | 2601 |
| 2602 ThreadState* state = ThreadState::current(); | 2602 ThreadState* state = ThreadState::current(); |
| 2603 if (state->sweepForbidden()) | 2603 if (state->sweepForbidden()) |
| 2604 return false; | 2604 return false; |
| 2605 ASSERT(!state->isInGC()); | 2605 ASSERT(!state->isInGC()); |
| 2606 ASSERT(state->isAllocationAllowed()); | 2606 ASSERT(state->isAllocationAllowed()); |
| 2607 | 2607 |
| 2608 // FIXME: Support expand for large objects. | 2608 // FIXME: Support expand for large objects. |
| 2609 // Don't expand backings allocated on other threads. | 2609 // Don't expand backings allocated on other threads. |
| 2610 BasePage* page = pageFromObject(address); | 2610 BaseHeapPage* page = pageFromObject(address); |
| 2611 if (page->isLargeObjectPage() || page->heap()->threadState() != state) | 2611 if (page->isLargeObject() || page->heap()->threadState() != state) |
| 2612 return false; | 2612 return false; |
| 2613 | 2613 |
| 2614 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2614 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 2615 header->checkHeader(); | 2615 header->checkHeader(); |
| 2616 return static_cast<NormalPage*>(page)->heapForNormalPage()->expandObject(hea
der, newSize); | 2616 return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header,
newSize); |
| 2617 } | 2617 } |
| 2618 | 2618 |
| 2619 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) | 2619 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) |
| 2620 { | 2620 { |
| 2621 return backingExpand(address, newSize); | 2621 return backingExpand(address, newSize); |
| 2622 } | 2622 } |
| 2623 | 2623 |
| 2624 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) | 2624 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) |
| 2625 { | 2625 { |
| 2626 return backingExpand(address, newSize); | 2626 return backingExpand(address, newSize); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2643 return; | 2643 return; |
| 2644 | 2644 |
| 2645 ThreadState* state = ThreadState::current(); | 2645 ThreadState* state = ThreadState::current(); |
| 2646 if (state->sweepForbidden()) | 2646 if (state->sweepForbidden()) |
| 2647 return; | 2647 return; |
| 2648 ASSERT(!state->isInGC()); | 2648 ASSERT(!state->isInGC()); |
| 2649 ASSERT(state->isAllocationAllowed()); | 2649 ASSERT(state->isAllocationAllowed()); |
| 2650 | 2650 |
| 2651 // FIXME: Support shrink for large objects. | 2651 // FIXME: Support shrink for large objects. |
| 2652 // Don't shrink backings allocated on other threads. | 2652 // Don't shrink backings allocated on other threads. |
| 2653 BasePage* page = pageFromObject(address); | 2653 BaseHeapPage* page = pageFromObject(address); |
| 2654 if (page->isLargeObjectPage() || page->heap()->threadState() != state) | 2654 if (page->isLargeObject() || page->heap()->threadState() != state) |
| 2655 return; | 2655 return; |
| 2656 | 2656 |
| 2657 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2657 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 2658 header->checkHeader(); | 2658 header->checkHeader(); |
| 2659 static_cast<NormalPage*>(page)->heapForNormalPage()->shrinkObject(header, qu
antizedShrunkSize); | 2659 static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quanti
zedShrunkSize); |
| 2660 } | 2660 } |
| 2661 | 2661 |
| 2662 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC
urrentSize, size_t quantizedShrunkSize) | 2662 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC
urrentSize, size_t quantizedShrunkSize) |
| 2663 { | 2663 { |
| 2664 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 2664 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
| 2665 } | 2665 } |
| 2666 | 2666 |
| 2667 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan
tizedCurrentSize, size_t quantizedShrunkSize) | 2667 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan
tizedCurrentSize, size_t quantizedShrunkSize) |
| 2668 { | 2668 { |
| 2669 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 2669 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
| 2670 } | 2670 } |
| 2671 | 2671 |
| 2672 BasePage* Heap::lookup(Address address) | 2672 BaseHeapPage* Heap::lookup(Address address) |
| 2673 { | 2673 { |
| 2674 ASSERT(ThreadState::current()->isInGC()); | 2674 ASSERT(ThreadState::current()->isInGC()); |
| 2675 if (!s_regionTree) | 2675 if (!s_regionTree) |
| 2676 return nullptr; | 2676 return nullptr; |
| 2677 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | 2677 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { |
| 2678 BasePage* page = region->pageFromAddress(address); | 2678 BaseHeapPage* page = region->pageFromAddress(address); |
| 2679 return page && !page->orphaned() ? page : nullptr; | 2679 return page && !page->orphaned() ? page : nullptr; |
| 2680 } | 2680 } |
| 2681 return nullptr; | 2681 return nullptr; |
| 2682 } | 2682 } |
| 2683 | 2683 |
| 2684 static Mutex& regionTreeMutex() | 2684 static Mutex& regionTreeMutex() |
| 2685 { | 2685 { |
| 2686 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); | 2686 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); |
| 2687 return mutex; | 2687 return mutex; |
| 2688 } | 2688 } |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2769 bool Heap::s_shutdownCalled = false; | 2769 bool Heap::s_shutdownCalled = false; |
| 2770 bool Heap::s_lastGCWasConservative = false; | 2770 bool Heap::s_lastGCWasConservative = false; |
| 2771 FreePagePool* Heap::s_freePagePool; | 2771 FreePagePool* Heap::s_freePagePool; |
| 2772 OrphanedPagePool* Heap::s_orphanedPagePool; | 2772 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2773 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2773 Heap::RegionTree* Heap::s_regionTree = nullptr; |
| 2774 size_t Heap::s_allocatedObjectSize = 0; | 2774 size_t Heap::s_allocatedObjectSize = 0; |
| 2775 size_t Heap::s_allocatedSpace = 0; | 2775 size_t Heap::s_allocatedSpace = 0; |
| 2776 size_t Heap::s_markedObjectSize = 0; | 2776 size_t Heap::s_markedObjectSize = 0; |
| 2777 | 2777 |
| 2778 } // namespace blink | 2778 } // namespace blink |
| OLD | NEW |