Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 124 int err = munmap(m_base, m_size); | 124 int err = munmap(m_base, m_size); |
| 125 RELEASE_ASSERT(!err); | 125 RELEASE_ASSERT(!err); |
| 126 #else | 126 #else |
| 127 bool success = VirtualFree(m_base, 0, MEM_RELEASE); | 127 bool success = VirtualFree(m_base, 0, MEM_RELEASE); |
| 128 RELEASE_ASSERT(success); | 128 RELEASE_ASSERT(success); |
| 129 #endif | 129 #endif |
| 130 } | 130 } |
| 131 | 131 |
| 132 WARN_UNUSED_RETURN bool commit() | 132 WARN_UNUSED_RETURN bool commit() |
| 133 { | 133 { |
| 134 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | |
| 134 #if OS(POSIX) | 135 #if OS(POSIX) |
| 135 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE); | 136 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE); |
| 136 if (!err) { | 137 if (!err) { |
| 137 madvise(m_base, m_size, MADV_NORMAL); | 138 madvise(m_base, m_size, MADV_NORMAL); |
| 138 return true; | 139 return true; |
| 139 } | 140 } |
| 140 return false; | 141 return false; |
| 141 #else | 142 #else |
| 142 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE); | 143 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE); |
| 143 return !!result; | 144 return !!result; |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 197 // Virtual memory allocation routines operate in OS page sizes. | 198 // Virtual memory allocation routines operate in OS page sizes. |
| 198 // Round up the requested size to nearest os page size. | 199 // Round up the requested size to nearest os page size. |
| 199 payloadSize = roundToOsPageSize(payloadSize); | 200 payloadSize = roundToOsPageSize(payloadSize); |
| 200 | 201 |
| 201 // Overallocate by blinkPageSize and 2 times OS page size to | 202 // Overallocate by blinkPageSize and 2 times OS page size to |
| 202 // ensure a chunk of memory which is blinkPageSize aligned and | 203 // ensure a chunk of memory which is blinkPageSize aligned and |
| 203 // has a system page before and after to use for guarding. We | 204 // has a system page before and after to use for guarding. We |
| 204 // unmap the excess memory before returning. | 205 // unmap the excess memory before returning. |
| 205 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; | 206 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; |
| 206 | 207 |
| 208 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | |
| 207 #if OS(POSIX) | 209 #if OS(POSIX) |
| 208 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); | 210 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| 209 RELEASE_ASSERT(base != MAP_FAILED); | 211 RELEASE_ASSERT(base != MAP_FAILED); |
| 210 | 212 |
| 211 Address end = base + allocationSize; | 213 Address end = base + allocationSize; |
| 212 Address alignedBase = roundToBlinkPageBoundary(base); | 214 Address alignedBase = roundToBlinkPageBoundary(base); |
| 213 Address payloadBase = alignedBase + osPageSize(); | 215 Address payloadBase = alignedBase + osPageSize(); |
| 214 Address payloadEnd = payloadBase + payloadSize; | 216 Address payloadEnd = payloadBase + payloadSize; |
| 215 Address blinkPageEnd = payloadEnd + osPageSize(); | 217 Address blinkPageEnd = payloadEnd + osPageSize(); |
| 216 | 218 |
| (...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 402 return heapObjectHeader()->unmark(); | 404 return heapObjectHeader()->unmark(); |
| 403 } | 405 } |
| 404 | 406 |
| 405 template<typename Header> | 407 template<typename Header> |
| 406 bool LargeHeapObject<Header>::isMarked() | 408 bool LargeHeapObject<Header>::isMarked() |
| 407 { | 409 { |
| 408 return heapObjectHeader()->isMarked(); | 410 return heapObjectHeader()->isMarked(); |
| 409 } | 411 } |
| 410 | 412 |
| 411 template<typename Header> | 413 template<typename Header> |
| 412 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) | 414 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) |
| 413 { | 415 { |
| 414 if (contains(address)) { | 416 ASSERT(contains(address)); |
| 417 if (!objectContains(address)) | |
| 418 return; | |
| 415 #if ENABLE(GC_TRACING) | 419 #if ENABLE(GC_TRACING) |
| 416 visitor->setHostInfo(&address, "stack"); | 420 visitor->setHostInfo(&address, "stack"); |
| 417 #endif | 421 #endif |
| 418 mark(visitor); | 422 mark(visitor); |
| 419 return true; | |
| 420 } | |
| 421 return false; | |
| 422 } | 423 } |
| 423 | 424 |
| 424 template<> | 425 template<> |
| 425 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 426 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
| 426 { | 427 { |
| 427 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) | 428 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) |
| 428 visitor->markConservatively(heapObjectHeader()); | 429 visitor->markConservatively(heapObjectHeader()); |
| 429 else | 430 else |
| 430 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); | 431 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); |
| 431 } | 432 } |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 534 RELEASE_ASSERT(success); | 535 RELEASE_ASSERT(success); |
| 535 } | 536 } |
| 536 | 537 |
| 537 template<typename Header> | 538 template<typename Header> |
| 538 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) | 539 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) |
| 539 { | 540 { |
| 540 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | 541 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| 541 if (page->contains(address)) | 542 if (page->contains(address)) |
| 542 return page; | 543 return page; |
| 543 } | 544 } |
| 544 return 0; | |
| 545 } | |
| 546 | |
| 547 template<typename Header> | |
| 548 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address) | |
| 549 { | |
| 550 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | 545 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { |
| 546 // Check that large pages are blinkPageSize aligned (modulo the | |
| 547 // osPageSize for the guard page). | |
| 548 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlink PageStart(reinterpret_cast<Address>(current))); | |
| 551 if (current->contains(address)) | 549 if (current->contains(address)) |
| 552 return current; | 550 return current; |
| 553 } | 551 } |
| 554 return 0; | 552 return 0; |
| 555 } | 553 } |
| 556 | 554 |
| 557 #if ENABLE(GC_TRACING) | 555 #if ENABLE(GC_TRACING) |
| 558 template<typename Header> | 556 template<typename Header> |
| 559 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) | 557 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) |
| 560 { | 558 { |
| 561 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | 559 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { |
| 562 if (current->contains(address)) | 560 if (current->contains(address)) |
| 563 return current->gcInfo(); | 561 return current->gcInfo(); |
| 564 } | 562 } |
| 565 return 0; | 563 return 0; |
| 566 } | 564 } |
| 567 #endif | 565 #endif |
| 568 | 566 |
| 569 template<typename Header> | 567 template<typename Header> |
| 570 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address a ddress) | |
| 571 { | |
| 572 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | |
| 573 if (current->checkAndMarkPointer(visitor, address)) | |
| 574 return true; | |
| 575 } | |
| 576 return false; | |
| 577 } | |
| 578 | |
| 579 template<typename Header> | |
| 580 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) | 568 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) |
| 581 { | 569 { |
| 582 ASSERT(heapPageFromAddress(address)); | 570 ASSERT(heapPageFromAddress(address)); |
| 583 ASSERT(heapPageFromAddress(address + size - 1)); | 571 ASSERT(heapPageFromAddress(address + size - 1)); |
| 584 ASSERT(size < blinkPagePayloadSize()); | 572 ASSERT(size < blinkPagePayloadSize()); |
| 585 // The free list entries are only pointer aligned (but when we allocate | 573 // The free list entries are only pointer aligned (but when we allocate |
| 586 // from them we are 8 byte aligned due to the header size). | 574 // from them we are 8 byte aligned due to the header size). |
| 587 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); | 575 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); |
| 588 ASSERT(!(size & allocationMask)); | 576 ASSERT(!(size & allocationMask)); |
| 589 ASAN_POISON_MEMORY_REGION(address, size); | 577 ASAN_POISON_MEMORY_REGION(address, size); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 623 // headerPadding<Header> bytes to ensure it 8 byte aligned. | 611 // headerPadding<Header> bytes to ensure it 8 byte aligned. |
| 624 allocationSize += headerPadding<Header>(); | 612 allocationSize += headerPadding<Header>(); |
| 625 | 613 |
| 626 // If ASAN is supported we add allocationGranularity bytes to the allocated space and | 614 // If ASAN is supported we add allocationGranularity bytes to the allocated space and |
| 627 // poison that to detect overflows | 615 // poison that to detect overflows |
| 628 #if defined(ADDRESS_SANITIZER) | 616 #if defined(ADDRESS_SANITIZER) |
| 629 allocationSize += allocationGranularity; | 617 allocationSize += allocationGranularity; |
| 630 #endif | 618 #endif |
| 631 if (threadState()->shouldGC()) | 619 if (threadState()->shouldGC()) |
| 632 threadState()->setGCRequested(); | 620 threadState()->setGCRequested(); |
| 621 Heap::flushHeapDoesNotContainCache(); | |
| 633 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 622 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| 634 Address largeObjectAddress = pageMemory->writableStart(); | 623 Address largeObjectAddress = pageMemory->writableStart(); |
| 635 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); | 624 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); |
| 636 memset(headerAddress, 0, size); | 625 memset(headerAddress, 0, size); |
| 637 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 626 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
| 638 Address result = headerAddress + sizeof(*header); | 627 Address result = headerAddress + sizeof(*header); |
| 639 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 628 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 640 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); | 629 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); |
| 641 | 630 |
| 642 // Poison the object header and allocationGranularity bytes after the object | 631 // Poison the object header and allocationGranularity bytes after the object |
| 643 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 632 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 644 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); | 633 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); |
| 645 largeObject->link(&m_firstLargeHeapObject); | 634 largeObject->link(&m_firstLargeHeapObject); |
| 646 stats().increaseAllocatedSpace(largeObject->size()); | 635 stats().increaseAllocatedSpace(largeObject->size()); |
| 647 stats().increaseObjectSpace(largeObject->payloadSize()); | 636 stats().increaseObjectSpace(largeObject->payloadSize()); |
| 648 return result; | 637 return result; |
| 649 } | 638 } |
| 650 | 639 |
| 651 template<typename Header> | 640 template<typename Header> |
| 652 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) | 641 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) |
| 653 { | 642 { |
| 643 flushHeapContainsCache(); | |
| 654 object->unlink(previousNext); | 644 object->unlink(previousNext); |
| 655 object->finalize(); | 645 object->finalize(); |
| 656 | 646 |
| 657 // Unpoison the object header and allocationGranularity bytes after the | 647 // Unpoison the object header and allocationGranularity bytes after the |
| 658 // object before freeing. | 648 // object before freeing. |
| 659 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 649 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
| 660 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); | 650 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); |
| 661 delete object->storage(); | 651 delete object->storage(); |
| 662 } | 652 } |
| 663 | 653 |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 680 | 670 |
| 681 template<typename Header> | 671 template<typename Header> |
| 682 void ThreadHeap<Header>::clearPagePool() | 672 void ThreadHeap<Header>::clearPagePool() |
| 683 { | 673 { |
| 684 while (takePageFromPool()) { } | 674 while (takePageFromPool()) { } |
| 685 } | 675 } |
| 686 | 676 |
| 687 template<typename Header> | 677 template<typename Header> |
| 688 PageMemory* ThreadHeap<Header>::takePageFromPool() | 678 PageMemory* ThreadHeap<Header>::takePageFromPool() |
| 689 { | 679 { |
| 680 Heap::flushHeapDoesNotContainCache(); | |
| 690 while (PagePoolEntry* entry = m_pagePool) { | 681 while (PagePoolEntry* entry = m_pagePool) { |
| 691 m_pagePool = entry->next(); | 682 m_pagePool = entry->next(); |
| 692 PageMemory* storage = entry->storage(); | 683 PageMemory* storage = entry->storage(); |
| 693 delete entry; | 684 delete entry; |
| 694 | 685 |
| 695 if (storage->commit()) | 686 if (storage->commit()) |
| 696 return storage; | 687 return storage; |
| 697 | 688 |
| 698 // Failed to commit pooled storage. Release it. | 689 // Failed to commit pooled storage. Release it. |
| 699 delete storage; | 690 delete storage; |
| 700 } | 691 } |
| 701 | 692 |
| 702 return 0; | 693 return 0; |
| 703 } | 694 } |
| 704 | 695 |
| 705 template<typename Header> | 696 template<typename Header> |
| 706 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) | 697 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) |
| 707 { | 698 { |
| 699 flushHeapContainsCache(); | |
| 708 PageMemory* storage = unused->storage(); | 700 PageMemory* storage = unused->storage(); |
| 709 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 701 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
| 710 m_pagePool = entry; | 702 m_pagePool = entry; |
| 711 storage->decommit(); | 703 storage->decommit(); |
| 712 } | 704 } |
| 713 | 705 |
| 714 template<typename Header> | 706 template<typename Header> |
| 715 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 707 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| 716 { | 708 { |
| 717 heapContainsCache()->flush(); | 709 Heap::flushHeapDoesNotContainCache(); |
| 718 PageMemory* pageMemory = takePageFromPool(); | 710 PageMemory* pageMemory = takePageFromPool(); |
| 719 if (!pageMemory) { | 711 if (!pageMemory) { |
| 720 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); | 712 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); |
| 721 RELEASE_ASSERT(pageMemory); | 713 RELEASE_ASSERT(pageMemory); |
| 722 } | 714 } |
| 723 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); | 715 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); |
| 724 // FIXME: Oilpan: Linking new pages into the front of the list is | 716 // FIXME: Oilpan: Linking new pages into the front of the list is |
| 725 // crucial when performing allocations during finalization because | 717 // crucial when performing allocations during finalization because |
| 726 // it ensures that those pages are not swept in the current GC | 718 // it ensures that those pages are not swept in the current GC |
| 727 // round. We should create a separate page list for that to | 719 // round. We should create a separate page list for that to |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 758 // calling their finalizer methods. This can catch the cases where one objec ts | 750 // calling their finalizer methods. This can catch the cases where one objec ts |
| 759 // finalizer tries to modify another object as part of finalization. | 751 // finalizer tries to modify another object as part of finalization. |
| 760 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 752 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 761 page->poisonUnmarkedObjects(); | 753 page->poisonUnmarkedObjects(); |
| 762 #endif | 754 #endif |
| 763 HeapPage<Header>* page = m_firstPage; | 755 HeapPage<Header>* page = m_firstPage; |
| 764 HeapPage<Header>** previous = &m_firstPage; | 756 HeapPage<Header>** previous = &m_firstPage; |
| 765 bool pagesRemoved = false; | 757 bool pagesRemoved = false; |
| 766 while (page) { | 758 while (page) { |
| 767 if (page->isEmpty()) { | 759 if (page->isEmpty()) { |
| 760 flushHeapContainsCache(); | |
| 768 HeapPage<Header>* unused = page; | 761 HeapPage<Header>* unused = page; |
| 769 page = page->next(); | 762 page = page->next(); |
| 770 HeapPage<Header>::unlink(unused, previous); | 763 HeapPage<Header>::unlink(unused, previous); |
| 771 pagesRemoved = true; | 764 pagesRemoved = true; |
| 772 } else { | 765 } else { |
| 773 page->sweep(); | 766 page->sweep(); |
| 774 previous = &page->m_next; | 767 previous = &page->m_next; |
| 775 page = page->next(); | 768 page = page->next(); |
| 776 } | 769 } |
| 777 } | 770 } |
| 778 if (pagesRemoved) | 771 if (pagesRemoved) |
| 779 heapContainsCache()->flush(); | 772 flushHeapContainsCache(); |
| 780 | 773 |
| 781 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 774 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
| 782 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 775 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
| 783 if (current->isMarked()) { | 776 if (current->isMarked()) { |
| 784 stats().increaseAllocatedSpace(current->size()); | 777 stats().increaseAllocatedSpace(current->size()); |
| 785 stats().increaseObjectSpace(current->payloadSize()); | 778 stats().increaseObjectSpace(current->payloadSize()); |
| 786 current->unmark(); | 779 current->unmark(); |
| 787 previousNext = ¤t->m_next; | 780 previousNext = ¤t->m_next; |
| 788 current = current->next(); | 781 current = current->next(); |
| 789 } else { | 782 } else { |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 843 ASSERT(isConsistentForGC()); | 836 ASSERT(isConsistentForGC()); |
| 844 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 837 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 845 page->clearMarks(); | 838 page->clearMarks(); |
| 846 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) | 839 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) |
| 847 current->unmark(); | 840 current->unmark(); |
| 848 } | 841 } |
| 849 | 842 |
| 850 template<typename Header> | 843 template<typename Header> |
| 851 void ThreadHeap<Header>::deletePages() | 844 void ThreadHeap<Header>::deletePages() |
| 852 { | 845 { |
| 853 heapContainsCache()->flush(); | 846 flushHeapContainsCache(); |
| 854 // Add all pages in the pool to the heap's list of pages before deleting | 847 // Add all pages in the pool to the heap's list of pages before deleting |
| 855 clearPagePool(); | 848 clearPagePool(); |
| 856 | 849 |
| 857 for (HeapPage<Header>* page = m_firstPage; page; ) { | 850 for (HeapPage<Header>* page = m_firstPage; page; ) { |
| 858 HeapPage<Header>* dead = page; | 851 HeapPage<Header>* dead = page; |
| 859 page = page->next(); | 852 page = page->next(); |
| 860 PageMemory* storage = dead->storage(); | 853 PageMemory* storage = dead->storage(); |
| 861 dead->~HeapPage(); | 854 dead->~HeapPage(); |
| 862 delete storage; | 855 delete storage; |
| 863 } | 856 } |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1055 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1048 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; |
| 1056 objectOffset = objectStartNumber * allocationGranularity; | 1049 objectOffset = objectStartNumber * allocationGranularity; |
| 1057 Address objectAddress = objectOffset + payload(); | 1050 Address objectAddress = objectOffset + payload(); |
| 1058 Header* header = reinterpret_cast<Header*>(objectAddress); | 1051 Header* header = reinterpret_cast<Header*>(objectAddress); |
| 1059 if (header->isFree()) | 1052 if (header->isFree()) |
| 1060 return 0; | 1053 return 0; |
| 1061 return header; | 1054 return header; |
| 1062 } | 1055 } |
| 1063 | 1056 |
| 1064 template<typename Header> | 1057 template<typename Header> |
| 1065 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1058 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1066 { | 1059 { |
| 1060 ASSERT(contains(address)); | |
| 1067 Header* header = findHeaderFromAddress(address); | 1061 Header* header = findHeaderFromAddress(address); |
| 1068 if (!header) | 1062 if (!header) |
| 1069 return false; | 1063 return; |
| 1070 | 1064 |
| 1071 #if ENABLE(GC_TRACING) | 1065 #if ENABLE(GC_TRACING) |
| 1072 visitor->setHostInfo(&address, "stack"); | 1066 visitor->setHostInfo(&address, "stack"); |
| 1073 #endif | 1067 #endif |
| 1074 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1068 if (hasVTable(header) && !vTableInitialized(header->payload())) |
| 1075 visitor->markConservatively(header); | 1069 visitor->markConservatively(header); |
| 1076 else | 1070 else |
| 1077 visitor->mark(header, traceCallback(header)); | 1071 visitor->mark(header, traceCallback(header)); |
| 1078 return true; | |
| 1079 } | 1072 } |
| 1080 | 1073 |
| 1081 #if ENABLE(GC_TRACING) | 1074 #if ENABLE(GC_TRACING) |
| 1082 template<typename Header> | 1075 template<typename Header> |
| 1083 const GCInfo* HeapPage<Header>::findGCInfo(Address address) | 1076 const GCInfo* HeapPage<Header>::findGCInfo(Address address) |
| 1084 { | 1077 { |
| 1085 if (address < payload()) | 1078 if (address < payload()) |
| 1086 return 0; | 1079 return 0; |
| 1087 | 1080 |
| 1088 if (gcInfo()) // for non FinalizedObjectHeader | 1081 if (gcInfo()) // for non FinalizedObjectHeader |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1150 return header->hasVTable(); | 1143 return header->hasVTable(); |
| 1151 } | 1144 } |
| 1152 | 1145 |
| 1153 template<typename Header> | 1146 template<typename Header> |
| 1154 void LargeHeapObject<Header>::getStats(HeapStats& stats) | 1147 void LargeHeapObject<Header>::getStats(HeapStats& stats) |
| 1155 { | 1148 { |
| 1156 stats.increaseAllocatedSpace(size()); | 1149 stats.increaseAllocatedSpace(size()); |
| 1157 stats.increaseObjectSpace(payloadSize()); | 1150 stats.increaseObjectSpace(payloadSize()); |
| 1158 } | 1151 } |
| 1159 | 1152 |
| 1160 HeapContainsCache::HeapContainsCache() | 1153 template<typename Entry> |
| 1161 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries])) | 1154 void HeapExtentCache<Entry>::flush() |
| 1162 { | 1155 { |
| 1156 if (m_hasEntries) { | |
| 1157 for (int i = 0; i < numberOfEntries; i++) | |
| 1158 m_entries[i] = Entry(); | |
| 1159 m_hasEntries = false; | |
| 1160 } | |
| 1163 } | 1161 } |
| 1164 | 1162 |
| 1165 void HeapContainsCache::flush() | 1163 template<typename Entry> |
| 1166 { | 1164 size_t HeapExtentCache<Entry>::hash(Address address) |
| 1167 for (int i = 0; i < numberOfEntries; i++) | |
| 1168 m_entries[i] = Entry(); | |
| 1169 } | |
| 1170 | |
| 1171 size_t HeapContainsCache::hash(Address address) | |
| 1172 { | 1165 { |
| 1173 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1166 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| 1174 value ^= value >> numberOfEntriesLog2; | 1167 value ^= value >> numberOfEntriesLog2; |
| 1175 value ^= value >> (numberOfEntriesLog2 * 2); | 1168 value ^= value >> (numberOfEntriesLog2 * 2); |
| 1176 value &= numberOfEntries - 1; | 1169 value &= numberOfEntries - 1; |
| 1177 return value & ~1; // Returns only even number. | 1170 return value & ~1; // Returns only even number. |
| 1178 } | 1171 } |
| 1179 | 1172 |
| 1180 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page) | 1173 template<typename Entry> |
| 1181 { | 1174 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) |
| 1182 ASSERT(page); | |
| 1183 size_t index = hash(address); | |
| 1184 ASSERT(!(index & 1)); | |
| 1185 Address cachePage = roundToBlinkPageStart(address); | |
| 1186 if (m_entries[index].address() == cachePage) { | |
| 1187 *page = m_entries[index].containingPage(); | |
| 1188 return true; | |
| 1189 } | |
| 1190 if (m_entries[index + 1].address() == cachePage) { | |
| 1191 *page = m_entries[index + 1].containingPage(); | |
| 1192 return true; | |
| 1193 } | |
| 1194 *page = 0; | |
| 1195 return false; | |
| 1196 } | |
| 1197 | |
| 1198 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1199 { | 1175 { |
| 1200 size_t index = hash(address); | 1176 size_t index = hash(address); |
| 1201 ASSERT(!(index & 1)); | 1177 ASSERT(!(index & 1)); |
| 1202 Address cachePage = roundToBlinkPageStart(address); | 1178 Address cachePage = roundToBlinkPageStart(address); |
| 1179 if (m_entries[index].address() == cachePage) | |
| 1180 return m_entries[index].result(); | |
| 1181 if (m_entries[index + 1].address() == cachePage) | |
| 1182 return m_entries[index + 1].result(); | |
| 1183 return 0; | |
| 1184 } | |
| 1185 | |
| 1186 template<typename Entry> | |
| 1187 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes ult entry) | |
| 1188 { | |
| 1189 m_hasEntries = true; | |
| 1190 size_t index = hash(address); | |
| 1191 ASSERT(!(index & 1)); | |
| 1192 Address cachePage = roundToBlinkPageStart(address); | |
| 1203 m_entries[index + 1] = m_entries[index]; | 1193 m_entries[index + 1] = m_entries[index]; |
| 1204 m_entries[index] = Entry(cachePage, page); | 1194 m_entries[index] = Entry(cachePage, entry); |
| 1195 } | |
| 1196 | |
| 1197 // These should not be needed, but it seems impossible to persuade clang to | |
| 1198 // instantiate the template functions and export them from a shared library, so | |
| 1199 // we add these in the non-templated subclass, which does not have that issue. | |
| 1200 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1201 { | |
| 1202 HeapExtentCache<PositiveEntry>::addEntry(address, page); | |
| 1203 } | |
| 1204 | |
| 1205 BaseHeapPage* HeapContainsCache::lookup(Address address) | |
| 1206 { | |
| 1207 return HeapExtentCache<PositiveEntry>::lookup(address); | |
| 1208 } | |
| 1209 | |
| 1210 void Heap::flushHeapDoesNotContainCache() | |
| 1211 { | |
| 1212 s_heapDoesNotContainCache->flush(); | |
| 1205 } | 1213 } |
| 1206 | 1214 |
| 1207 void CallbackStack::init(CallbackStack** first) | 1215 void CallbackStack::init(CallbackStack** first) |
| 1208 { | 1216 { |
| 1209 // The stacks are chained, so we start by setting this to null as terminator . | 1217 // The stacks are chained, so we start by setting this to null as terminator . |
| 1210 *first = 0; | 1218 *first = 0; |
| 1211 *first = new CallbackStack(first); | 1219 *first = new CallbackStack(first); |
| 1212 } | 1220 } |
| 1213 | 1221 |
| 1214 void CallbackStack::shutdown(CallbackStack** first) | 1222 void CallbackStack::shutdown(CallbackStack** first) |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1468 { | 1476 { |
| 1469 Heap::pushWeakCellPointerCallback(cell, callback); | 1477 Heap::pushWeakCellPointerCallback(cell, callback); |
| 1470 } | 1478 } |
| 1471 }; | 1479 }; |
| 1472 | 1480 |
| 1473 void Heap::init() | 1481 void Heap::init() |
| 1474 { | 1482 { |
| 1475 ThreadState::init(); | 1483 ThreadState::init(); |
| 1476 CallbackStack::init(&s_markingStack); | 1484 CallbackStack::init(&s_markingStack); |
| 1477 CallbackStack::init(&s_weakCallbackStack); | 1485 CallbackStack::init(&s_weakCallbackStack); |
| 1486 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
| 1478 s_markingVisitor = new MarkingVisitor(); | 1487 s_markingVisitor = new MarkingVisitor(); |
| 1479 } | 1488 } |
| 1480 | 1489 |
| 1481 void Heap::shutdown() | 1490 void Heap::shutdown() |
| 1482 { | 1491 { |
| 1483 s_shutdownCalled = true; | 1492 s_shutdownCalled = true; |
| 1484 ThreadState::shutdownHeapIfNecessary(); | 1493 ThreadState::shutdownHeapIfNecessary(); |
| 1485 } | 1494 } |
| 1486 | 1495 |
| 1487 void Heap::doShutdown() | 1496 void Heap::doShutdown() |
| 1488 { | 1497 { |
| 1489 // We don't want to call doShutdown() twice. | 1498 // We don't want to call doShutdown() twice. |
| 1490 if (!s_markingVisitor) | 1499 if (!s_markingVisitor) |
| 1491 return; | 1500 return; |
| 1492 | 1501 |
| 1493 ASSERT(!ThreadState::isAnyThreadInGC()); | 1502 ASSERT(!ThreadState::isAnyThreadInGC()); |
| 1494 ASSERT(!ThreadState::attachedThreads().size()); | 1503 ASSERT(!ThreadState::attachedThreads().size()); |
| 1495 delete s_markingVisitor; | 1504 delete s_markingVisitor; |
| 1496 s_markingVisitor = 0; | 1505 s_markingVisitor = 0; |
| 1506 delete s_heapDoesNotContainCache; | |
| 1507 s_heapDoesNotContainCache = 0; | |
| 1497 CallbackStack::shutdown(&s_weakCallbackStack); | 1508 CallbackStack::shutdown(&s_weakCallbackStack); |
| 1498 CallbackStack::shutdown(&s_markingStack); | 1509 CallbackStack::shutdown(&s_markingStack); |
| 1499 ThreadState::shutdown(); | 1510 ThreadState::shutdown(); |
| 1500 } | 1511 } |
| 1501 | 1512 |
| 1502 BaseHeapPage* Heap::contains(Address address) | 1513 BaseHeapPage* Heap::contains(Address address) |
| 1503 { | 1514 { |
| 1504 ASSERT(ThreadState::isAnyThreadInGC()); | 1515 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1505 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1516 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1506 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1517 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1507 BaseHeapPage* page = (*it)->contains(address); | 1518 BaseHeapPage* page = (*it)->contains(address); |
| 1508 if (page) | 1519 if (page) |
| 1509 return page; | 1520 return page; |
| 1510 } | 1521 } |
| 1511 return 0; | 1522 return 0; |
| 1512 } | 1523 } |
| 1513 | 1524 |
| 1514 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1525 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1515 { | 1526 { |
| 1516 ASSERT(ThreadState::isAnyThreadInGC()); | 1527 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1517 if (!address) | 1528 |
| 1529 #ifdef NDEBUG | |
|
wibling-chromium
2014/05/09 10:25:20
Why not do this optimization in release builds? Is
Erik Corry
2014/05/09 10:31:54
It's confusing because the macro has negative sens
wibling-chromium
2014/05/09 10:40:21
Doh, okay then:)
| |
| 1530 if (s_heapDoesNotContainCache->lookup(address)) | |
| 1518 return 0; | 1531 return 0; |
| 1532 #endif | |
| 1519 | 1533 |
| 1520 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1534 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1521 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1535 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1522 if ((*it)->checkAndMarkPointer(visitor, address)) { | 1536 if ((*it)->checkAndMarkPointer(visitor, address)) { |
| 1523 // Pointer found and marked. | 1537 // Pointer was in a page of that thread. If it actually pointed |
| 1538 // into an object then that object was found and marked. | |
| 1539 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | |
| 1524 return address; | 1540 return address; |
| 1525 } | 1541 } |
| 1526 } | 1542 } |
| 1543 | |
| 1544 #ifdef NDEBUG | |
| 1545 s_heapDoesNotContainCache->addEntry(address, true); | |
| 1546 #else | |
|
wibling-chromium
2014/05/09 10:25:20
Ditto?
Erik Corry
2014/05/09 10:31:54
Ditto
| |
| 1547 if (!s_heapDoesNotContainCache->lookup(address)) | |
| 1548 s_heapDoesNotContainCache->addEntry(address, true); | |
| 1549 #endif | |
| 1527 return 0; | 1550 return 0; |
| 1528 } | 1551 } |
| 1529 | 1552 |
| 1530 #if ENABLE(GC_TRACING) | 1553 #if ENABLE(GC_TRACING) |
| 1531 const GCInfo* Heap::findGCInfo(Address address) | 1554 const GCInfo* Heap::findGCInfo(Address address) |
| 1532 { | 1555 { |
| 1533 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1556 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1534 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1557 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1535 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1558 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
| 1536 return gcInfo; | 1559 return gcInfo; |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1674 | 1697 |
| 1675 // Force template instantiations for the types that we need. | 1698 // Force template instantiations for the types that we need. |
| 1676 template class HeapPage<FinalizedHeapObjectHeader>; | 1699 template class HeapPage<FinalizedHeapObjectHeader>; |
| 1677 template class HeapPage<HeapObjectHeader>; | 1700 template class HeapPage<HeapObjectHeader>; |
| 1678 template class ThreadHeap<FinalizedHeapObjectHeader>; | 1701 template class ThreadHeap<FinalizedHeapObjectHeader>; |
| 1679 template class ThreadHeap<HeapObjectHeader>; | 1702 template class ThreadHeap<HeapObjectHeader>; |
| 1680 | 1703 |
| 1681 Visitor* Heap::s_markingVisitor; | 1704 Visitor* Heap::s_markingVisitor; |
| 1682 CallbackStack* Heap::s_markingStack; | 1705 CallbackStack* Heap::s_markingStack; |
| 1683 CallbackStack* Heap::s_weakCallbackStack; | 1706 CallbackStack* Heap::s_weakCallbackStack; |
| 1707 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
| 1684 bool Heap::s_shutdownCalled = false; | 1708 bool Heap::s_shutdownCalled = false; |
| 1685 } | 1709 } |
| OLD | NEW |