OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
124 int err = munmap(m_base, m_size); | 124 int err = munmap(m_base, m_size); |
125 RELEASE_ASSERT(!err); | 125 RELEASE_ASSERT(!err); |
126 #else | 126 #else |
127 bool success = VirtualFree(m_base, 0, MEM_RELEASE); | 127 bool success = VirtualFree(m_base, 0, MEM_RELEASE); |
128 RELEASE_ASSERT(success); | 128 RELEASE_ASSERT(success); |
129 #endif | 129 #endif |
130 } | 130 } |
131 | 131 |
132 WARN_UNUSED_RETURN bool commit() | 132 WARN_UNUSED_RETURN bool commit() |
133 { | 133 { |
| 134 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
134 #if OS(POSIX) | 135 #if OS(POSIX) |
135 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE); | 136 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE); |
136 if (!err) { | 137 if (!err) { |
137 madvise(m_base, m_size, MADV_NORMAL); | 138 madvise(m_base, m_size, MADV_NORMAL); |
138 return true; | 139 return true; |
139 } | 140 } |
140 return false; | 141 return false; |
141 #else | 142 #else |
142 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE); | 143 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE); |
143 return !!result; | 144 return !!result; |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 // Virtual memory allocation routines operate in OS page sizes. | 198 // Virtual memory allocation routines operate in OS page sizes. |
198 // Round up the requested size to nearest os page size. | 199 // Round up the requested size to nearest os page size. |
199 payloadSize = roundToOsPageSize(payloadSize); | 200 payloadSize = roundToOsPageSize(payloadSize); |
200 | 201 |
201 // Overallocate by blinkPageSize and 2 times OS page size to | 202 // Overallocate by blinkPageSize and 2 times OS page size to |
202 // ensure a chunk of memory which is blinkPageSize aligned and | 203 // ensure a chunk of memory which is blinkPageSize aligned and |
203 // has a system page before and after to use for guarding. We | 204 // has a system page before and after to use for guarding. We |
204 // unmap the excess memory before returning. | 205 // unmap the excess memory before returning. |
205 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; | 206 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; |
206 | 207 |
| 208 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
207 #if OS(POSIX) | 209 #if OS(POSIX) |
208 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ |
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); | 210 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ |
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
209 RELEASE_ASSERT(base != MAP_FAILED); | 211 RELEASE_ASSERT(base != MAP_FAILED); |
210 | 212 |
211 Address end = base + allocationSize; | 213 Address end = base + allocationSize; |
212 Address alignedBase = roundToBlinkPageBoundary(base); | 214 Address alignedBase = roundToBlinkPageBoundary(base); |
213 Address payloadBase = alignedBase + osPageSize(); | 215 Address payloadBase = alignedBase + osPageSize(); |
214 Address payloadEnd = payloadBase + payloadSize; | 216 Address payloadEnd = payloadBase + payloadSize; |
215 Address blinkPageEnd = payloadEnd + osPageSize(); | 217 Address blinkPageEnd = payloadEnd + osPageSize(); |
216 | 218 |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
419 return heapObjectHeader()->unmark(); | 421 return heapObjectHeader()->unmark(); |
420 } | 422 } |
421 | 423 |
422 template<typename Header> | 424 template<typename Header> |
423 bool LargeHeapObject<Header>::isMarked() | 425 bool LargeHeapObject<Header>::isMarked() |
424 { | 426 { |
425 return heapObjectHeader()->isMarked(); | 427 return heapObjectHeader()->isMarked(); |
426 } | 428 } |
427 | 429 |
428 template<typename Header> | 430 template<typename Header> |
429 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) | 431 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) |
430 { | 432 { |
431 if (contains(address)) { | 433 ASSERT(contains(address)); |
| 434 if (!objectContains(address)) |
| 435 return; |
432 #if ENABLE(GC_TRACING) | 436 #if ENABLE(GC_TRACING) |
433 visitor->setHostInfo(&address, "stack"); | 437 visitor->setHostInfo(&address, "stack"); |
434 #endif | 438 #endif |
435 mark(visitor); | 439 mark(visitor); |
436 return true; | |
437 } | |
438 return false; | |
439 } | 440 } |
440 | 441 |
441 template<> | 442 template<> |
442 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 443 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
443 { | 444 { |
444 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) | 445 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) |
445 visitor->markConservatively(heapObjectHeader()); | 446 visitor->markConservatively(heapObjectHeader()); |
446 else | 447 else |
447 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); | 448 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); |
448 } | 449 } |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
551 RELEASE_ASSERT(success); | 552 RELEASE_ASSERT(success); |
552 } | 553 } |
553 | 554 |
554 template<typename Header> | 555 template<typename Header> |
555 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) | 556 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) |
556 { | 557 { |
557 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | 558 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
558 if (page->contains(address)) | 559 if (page->contains(address)) |
559 return page; | 560 return page; |
560 } | 561 } |
561 return 0; | |
562 } | |
563 | |
564 template<typename Header> | |
565 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address) | |
566 { | |
567 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { | 562 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
| 563 // Check that large pages are blinkPageSize aligned (modulo the |
| 564 // osPageSize for the guard page). |
| 565 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlink
PageStart(reinterpret_cast<Address>(current))); |
568 if (current->contains(address)) | 566 if (current->contains(address)) |
569 return current; | 567 return current; |
570 } | 568 } |
571 return 0; | 569 return 0; |
572 } | 570 } |
573 | 571 |
574 #if ENABLE(GC_TRACING) | 572 #if ENABLE(GC_TRACING) |
575 template<typename Header> | 573 template<typename Header> |
576 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) | 574 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) |
577 { | 575 { |
578 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { | 576 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
579 if (current->contains(address)) | 577 if (current->contains(address)) |
580 return current->gcInfo(); | 578 return current->gcInfo(); |
581 } | 579 } |
582 return 0; | 580 return 0; |
583 } | 581 } |
584 #endif | 582 #endif |
585 | 583 |
586 template<typename Header> | 584 template<typename Header> |
587 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address a
ddress) | |
588 { | |
589 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { | |
590 if (current->checkAndMarkPointer(visitor, address)) | |
591 return true; | |
592 } | |
593 return false; | |
594 } | |
595 | |
596 template<typename Header> | |
597 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) | 585 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) |
598 { | 586 { |
599 ASSERT(heapPageFromAddress(address)); | 587 ASSERT(heapPageFromAddress(address)); |
600 ASSERT(heapPageFromAddress(address + size - 1)); | 588 ASSERT(heapPageFromAddress(address + size - 1)); |
601 ASSERT(size < blinkPagePayloadSize()); | 589 ASSERT(size < blinkPagePayloadSize()); |
602 // The free list entries are only pointer aligned (but when we allocate | 590 // The free list entries are only pointer aligned (but when we allocate |
603 // from them we are 8 byte aligned due to the header size). | 591 // from them we are 8 byte aligned due to the header size). |
604 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio
nMask)); | 592 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio
nMask)); |
605 ASSERT(!(size & allocationMask)); | 593 ASSERT(!(size & allocationMask)); |
606 ASAN_POISON_MEMORY_REGION(address, size); | 594 ASAN_POISON_MEMORY_REGION(address, size); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
640 // headerPadding<Header> bytes to ensure it 8 byte aligned. | 628 // headerPadding<Header> bytes to ensure it 8 byte aligned. |
641 allocationSize += headerPadding<Header>(); | 629 allocationSize += headerPadding<Header>(); |
642 | 630 |
643 // If ASAN is supported we add allocationGranularity bytes to the allocated
space and | 631 // If ASAN is supported we add allocationGranularity bytes to the allocated
space and |
644 // poison that to detect overflows | 632 // poison that to detect overflows |
645 #if defined(ADDRESS_SANITIZER) | 633 #if defined(ADDRESS_SANITIZER) |
646 allocationSize += allocationGranularity; | 634 allocationSize += allocationGranularity; |
647 #endif | 635 #endif |
648 if (threadState()->shouldGC()) | 636 if (threadState()->shouldGC()) |
649 threadState()->setGCRequested(); | 637 threadState()->setGCRequested(); |
| 638 Heap::flushHeapDoesNotContainCache(); |
650 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 639 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
651 Address largeObjectAddress = pageMemory->writableStart(); | 640 Address largeObjectAddress = pageMemory->writableStart(); |
652 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); | 641 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); |
653 memset(headerAddress, 0, size); | 642 memset(headerAddress, 0, size); |
654 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 643 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
655 Address result = headerAddress + sizeof(*header); | 644 Address result = headerAddress + sizeof(*header); |
656 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 645 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
657 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); | 646 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); |
658 | 647 |
659 // Poison the object header and allocationGranularity bytes after the object | 648 // Poison the object header and allocationGranularity bytes after the object |
660 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 649 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
661 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 650 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
662 largeObject->link(&m_firstLargeHeapObject); | 651 largeObject->link(&m_firstLargeHeapObject); |
663 stats().increaseAllocatedSpace(largeObject->size()); | 652 stats().increaseAllocatedSpace(largeObject->size()); |
664 stats().increaseObjectSpace(largeObject->payloadSize()); | 653 stats().increaseObjectSpace(largeObject->payloadSize()); |
665 return result; | 654 return result; |
666 } | 655 } |
667 | 656 |
668 template<typename Header> | 657 template<typename Header> |
669 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 658 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
670 { | 659 { |
| 660 flushHeapContainsCache(); |
671 object->unlink(previousNext); | 661 object->unlink(previousNext); |
672 object->finalize(); | 662 object->finalize(); |
673 | 663 |
674 // Unpoison the object header and allocationGranularity bytes after the | 664 // Unpoison the object header and allocationGranularity bytes after the |
675 // object before freeing. | 665 // object before freeing. |
676 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 666 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
677 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 667 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
678 delete object->storage(); | 668 delete object->storage(); |
679 } | 669 } |
680 | 670 |
(...skipping 16 matching lines...) Expand all Loading... |
697 | 687 |
698 template<typename Header> | 688 template<typename Header> |
699 void ThreadHeap<Header>::clearPagePool() | 689 void ThreadHeap<Header>::clearPagePool() |
700 { | 690 { |
701 while (takePageFromPool()) { } | 691 while (takePageFromPool()) { } |
702 } | 692 } |
703 | 693 |
704 template<typename Header> | 694 template<typename Header> |
705 PageMemory* ThreadHeap<Header>::takePageFromPool() | 695 PageMemory* ThreadHeap<Header>::takePageFromPool() |
706 { | 696 { |
| 697 Heap::flushHeapDoesNotContainCache(); |
707 while (PagePoolEntry* entry = m_pagePool) { | 698 while (PagePoolEntry* entry = m_pagePool) { |
708 m_pagePool = entry->next(); | 699 m_pagePool = entry->next(); |
709 PageMemory* storage = entry->storage(); | 700 PageMemory* storage = entry->storage(); |
710 delete entry; | 701 delete entry; |
711 | 702 |
712 if (storage->commit()) | 703 if (storage->commit()) |
713 return storage; | 704 return storage; |
714 | 705 |
715 // Failed to commit pooled storage. Release it. | 706 // Failed to commit pooled storage. Release it. |
716 delete storage; | 707 delete storage; |
717 } | 708 } |
718 | 709 |
719 return 0; | 710 return 0; |
720 } | 711 } |
721 | 712 |
722 template<typename Header> | 713 template<typename Header> |
723 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) | 714 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) |
724 { | 715 { |
| 716 flushHeapContainsCache(); |
725 PageMemory* storage = unused->storage(); | 717 PageMemory* storage = unused->storage(); |
726 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 718 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
727 m_pagePool = entry; | 719 m_pagePool = entry; |
728 storage->decommit(); | 720 storage->decommit(); |
729 } | 721 } |
730 | 722 |
731 template<typename Header> | 723 template<typename Header> |
732 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 724 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
733 { | 725 { |
734 heapContainsCache()->flush(); | 726 Heap::flushHeapDoesNotContainCache(); |
735 PageMemory* pageMemory = takePageFromPool(); | 727 PageMemory* pageMemory = takePageFromPool(); |
736 if (!pageMemory) { | 728 if (!pageMemory) { |
737 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); | 729 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); |
738 RELEASE_ASSERT(pageMemory); | 730 RELEASE_ASSERT(pageMemory); |
739 } | 731 } |
740 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 732 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
741 // FIXME: Oilpan: Linking new pages into the front of the list is | 733 // FIXME: Oilpan: Linking new pages into the front of the list is |
742 // crucial when performing allocations during finalization because | 734 // crucial when performing allocations during finalization because |
743 // it ensures that those pages are not swept in the current GC | 735 // it ensures that those pages are not swept in the current GC |
744 // round. We should create a separate page list for that to | 736 // round. We should create a separate page list for that to |
(...skipping 30 matching lines...) Expand all Loading... |
775 // calling their finalizer methods. This can catch the cases where one objec
ts | 767 // calling their finalizer methods. This can catch the cases where one objec
ts |
776 // finalizer tries to modify another object as part of finalization. | 768 // finalizer tries to modify another object as part of finalization. |
777 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 769 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
778 page->poisonUnmarkedObjects(); | 770 page->poisonUnmarkedObjects(); |
779 #endif | 771 #endif |
780 HeapPage<Header>* page = m_firstPage; | 772 HeapPage<Header>* page = m_firstPage; |
781 HeapPage<Header>** previous = &m_firstPage; | 773 HeapPage<Header>** previous = &m_firstPage; |
782 bool pagesRemoved = false; | 774 bool pagesRemoved = false; |
783 while (page) { | 775 while (page) { |
784 if (page->isEmpty()) { | 776 if (page->isEmpty()) { |
| 777 flushHeapContainsCache(); |
785 HeapPage<Header>* unused = page; | 778 HeapPage<Header>* unused = page; |
786 page = page->next(); | 779 page = page->next(); |
787 HeapPage<Header>::unlink(unused, previous); | 780 HeapPage<Header>::unlink(unused, previous); |
788 pagesRemoved = true; | 781 pagesRemoved = true; |
789 } else { | 782 } else { |
790 page->sweep(); | 783 page->sweep(); |
791 previous = &page->m_next; | 784 previous = &page->m_next; |
792 page = page->next(); | 785 page = page->next(); |
793 } | 786 } |
794 } | 787 } |
795 if (pagesRemoved) | 788 if (pagesRemoved) |
796 heapContainsCache()->flush(); | 789 flushHeapContainsCache(); |
797 | 790 |
798 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 791 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
799 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 792 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
800 if (current->isMarked()) { | 793 if (current->isMarked()) { |
801 stats().increaseAllocatedSpace(current->size()); | 794 stats().increaseAllocatedSpace(current->size()); |
802 stats().increaseObjectSpace(current->payloadSize()); | 795 stats().increaseObjectSpace(current->payloadSize()); |
803 current->unmark(); | 796 current->unmark(); |
804 previousNext = ¤t->m_next; | 797 previousNext = ¤t->m_next; |
805 current = current->next(); | 798 current = current->next(); |
806 } else { | 799 } else { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
860 ASSERT(isConsistentForGC()); | 853 ASSERT(isConsistentForGC()); |
861 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 854 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
862 page->clearMarks(); | 855 page->clearMarks(); |
863 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 856 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) |
864 current->unmark(); | 857 current->unmark(); |
865 } | 858 } |
866 | 859 |
867 template<typename Header> | 860 template<typename Header> |
868 void ThreadHeap<Header>::deletePages() | 861 void ThreadHeap<Header>::deletePages() |
869 { | 862 { |
870 heapContainsCache()->flush(); | 863 flushHeapContainsCache(); |
871 // Add all pages in the pool to the heap's list of pages before deleting | 864 // Add all pages in the pool to the heap's list of pages before deleting |
872 clearPagePool(); | 865 clearPagePool(); |
873 | 866 |
874 for (HeapPage<Header>* page = m_firstPage; page; ) { | 867 for (HeapPage<Header>* page = m_firstPage; page; ) { |
875 HeapPage<Header>* dead = page; | 868 HeapPage<Header>* dead = page; |
876 page = page->next(); | 869 page = page->next(); |
877 PageMemory* storage = dead->storage(); | 870 PageMemory* storage = dead->storage(); |
878 dead->~HeapPage(); | 871 dead->~HeapPage(); |
879 delete storage; | 872 delete storage; |
880 } | 873 } |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1072 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1065 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; |
1073 objectOffset = objectStartNumber * allocationGranularity; | 1066 objectOffset = objectStartNumber * allocationGranularity; |
1074 Address objectAddress = objectOffset + payload(); | 1067 Address objectAddress = objectOffset + payload(); |
1075 Header* header = reinterpret_cast<Header*>(objectAddress); | 1068 Header* header = reinterpret_cast<Header*>(objectAddress); |
1076 if (header->isFree()) | 1069 if (header->isFree()) |
1077 return 0; | 1070 return 0; |
1078 return header; | 1071 return header; |
1079 } | 1072 } |
1080 | 1073 |
1081 template<typename Header> | 1074 template<typename Header> |
1082 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1075 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
1083 { | 1076 { |
| 1077 ASSERT(contains(address)); |
1084 Header* header = findHeaderFromAddress(address); | 1078 Header* header = findHeaderFromAddress(address); |
1085 if (!header) | 1079 if (!header) |
1086 return false; | 1080 return; |
1087 | 1081 |
1088 #if ENABLE(GC_TRACING) | 1082 #if ENABLE(GC_TRACING) |
1089 visitor->setHostInfo(&address, "stack"); | 1083 visitor->setHostInfo(&address, "stack"); |
1090 #endif | 1084 #endif |
1091 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1085 if (hasVTable(header) && !vTableInitialized(header->payload())) |
1092 visitor->markConservatively(header); | 1086 visitor->markConservatively(header); |
1093 else | 1087 else |
1094 visitor->mark(header, traceCallback(header)); | 1088 visitor->mark(header, traceCallback(header)); |
1095 return true; | |
1096 } | 1089 } |
1097 | 1090 |
1098 #if ENABLE(GC_TRACING) | 1091 #if ENABLE(GC_TRACING) |
1099 template<typename Header> | 1092 template<typename Header> |
1100 const GCInfo* HeapPage<Header>::findGCInfo(Address address) | 1093 const GCInfo* HeapPage<Header>::findGCInfo(Address address) |
1101 { | 1094 { |
1102 if (address < payload()) | 1095 if (address < payload()) |
1103 return 0; | 1096 return 0; |
1104 | 1097 |
1105 if (gcInfo()) // for non FinalizedObjectHeader | 1098 if (gcInfo()) // for non FinalizedObjectHeader |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1167 return header->hasVTable(); | 1160 return header->hasVTable(); |
1168 } | 1161 } |
1169 | 1162 |
1170 template<typename Header> | 1163 template<typename Header> |
1171 void LargeHeapObject<Header>::getStats(HeapStats& stats) | 1164 void LargeHeapObject<Header>::getStats(HeapStats& stats) |
1172 { | 1165 { |
1173 stats.increaseAllocatedSpace(size()); | 1166 stats.increaseAllocatedSpace(size()); |
1174 stats.increaseObjectSpace(payloadSize()); | 1167 stats.increaseObjectSpace(payloadSize()); |
1175 } | 1168 } |
1176 | 1169 |
1177 HeapContainsCache::HeapContainsCache() | 1170 template<typename Entry> |
1178 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries])) | 1171 void HeapExtentCache<Entry>::flush() |
1179 { | 1172 { |
| 1173 if (m_hasEntries) { |
| 1174 for (int i = 0; i < numberOfEntries; i++) |
| 1175 m_entries[i] = Entry(); |
| 1176 m_hasEntries = false; |
| 1177 } |
1180 } | 1178 } |
1181 | 1179 |
1182 void HeapContainsCache::flush() | 1180 template<typename Entry> |
1183 { | 1181 size_t HeapExtentCache<Entry>::hash(Address address) |
1184 for (int i = 0; i < numberOfEntries; i++) | |
1185 m_entries[i] = Entry(); | |
1186 } | |
1187 | |
1188 size_t HeapContainsCache::hash(Address address) | |
1189 { | 1182 { |
1190 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1183 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
1191 value ^= value >> numberOfEntriesLog2; | 1184 value ^= value >> numberOfEntriesLog2; |
1192 value ^= value >> (numberOfEntriesLog2 * 2); | 1185 value ^= value >> (numberOfEntriesLog2 * 2); |
1193 value &= numberOfEntries - 1; | 1186 value &= numberOfEntries - 1; |
1194 return value & ~1; // Returns only even number. | 1187 return value & ~1; // Returns only even number. |
1195 } | 1188 } |
1196 | 1189 |
1197 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page) | 1190 template<typename Entry> |
1198 { | 1191 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) |
1199 ASSERT(page); | |
1200 size_t index = hash(address); | |
1201 ASSERT(!(index & 1)); | |
1202 Address cachePage = roundToBlinkPageStart(address); | |
1203 if (m_entries[index].address() == cachePage) { | |
1204 *page = m_entries[index].containingPage(); | |
1205 return true; | |
1206 } | |
1207 if (m_entries[index + 1].address() == cachePage) { | |
1208 *page = m_entries[index + 1].containingPage(); | |
1209 return true; | |
1210 } | |
1211 *page = 0; | |
1212 return false; | |
1213 } | |
1214 | |
1215 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
1216 { | 1192 { |
1217 size_t index = hash(address); | 1193 size_t index = hash(address); |
1218 ASSERT(!(index & 1)); | 1194 ASSERT(!(index & 1)); |
1219 Address cachePage = roundToBlinkPageStart(address); | 1195 Address cachePage = roundToBlinkPageStart(address); |
| 1196 if (m_entries[index].address() == cachePage) |
| 1197 return m_entries[index].result(); |
| 1198 if (m_entries[index + 1].address() == cachePage) |
| 1199 return m_entries[index + 1].result(); |
| 1200 return 0; |
| 1201 } |
| 1202 |
| 1203 template<typename Entry> |
| 1204 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes
ult entry) |
| 1205 { |
| 1206 m_hasEntries = true; |
| 1207 size_t index = hash(address); |
| 1208 ASSERT(!(index & 1)); |
| 1209 Address cachePage = roundToBlinkPageStart(address); |
1220 m_entries[index + 1] = m_entries[index]; | 1210 m_entries[index + 1] = m_entries[index]; |
1221 m_entries[index] = Entry(cachePage, page); | 1211 m_entries[index] = Entry(cachePage, entry); |
| 1212 } |
| 1213 |
| 1214 // These should not be needed, but it seems impossible to persuade clang to |
| 1215 // instantiate the template functions and export them from a shared library, so |
| 1216 // we add these in the non-templated subclass, which does not have that issue. |
| 1217 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) |
| 1218 { |
| 1219 HeapExtentCache<PositiveEntry>::addEntry(address, page); |
| 1220 } |
| 1221 |
| 1222 BaseHeapPage* HeapContainsCache::lookup(Address address) |
| 1223 { |
| 1224 return HeapExtentCache<PositiveEntry>::lookup(address); |
| 1225 } |
| 1226 |
| 1227 void Heap::flushHeapDoesNotContainCache() |
| 1228 { |
| 1229 s_heapDoesNotContainCache->flush(); |
1222 } | 1230 } |
1223 | 1231 |
1224 void CallbackStack::init(CallbackStack** first) | 1232 void CallbackStack::init(CallbackStack** first) |
1225 { | 1233 { |
1226 // The stacks are chained, so we start by setting this to null as terminator
. | 1234 // The stacks are chained, so we start by setting this to null as terminator
. |
1227 *first = 0; | 1235 *first = 0; |
1228 *first = new CallbackStack(first); | 1236 *first = new CallbackStack(first); |
1229 } | 1237 } |
1230 | 1238 |
1231 void CallbackStack::shutdown(CallbackStack** first) | 1239 void CallbackStack::shutdown(CallbackStack** first) |
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1485 { | 1493 { |
1486 Heap::pushWeakCellPointerCallback(cell, callback); | 1494 Heap::pushWeakCellPointerCallback(cell, callback); |
1487 } | 1495 } |
1488 }; | 1496 }; |
1489 | 1497 |
1490 void Heap::init() | 1498 void Heap::init() |
1491 { | 1499 { |
1492 ThreadState::init(); | 1500 ThreadState::init(); |
1493 CallbackStack::init(&s_markingStack); | 1501 CallbackStack::init(&s_markingStack); |
1494 CallbackStack::init(&s_weakCallbackStack); | 1502 CallbackStack::init(&s_weakCallbackStack); |
| 1503 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
1495 s_markingVisitor = new MarkingVisitor(); | 1504 s_markingVisitor = new MarkingVisitor(); |
1496 } | 1505 } |
1497 | 1506 |
1498 void Heap::shutdown() | 1507 void Heap::shutdown() |
1499 { | 1508 { |
1500 s_shutdownCalled = true; | 1509 s_shutdownCalled = true; |
1501 ThreadState::shutdownHeapIfNecessary(); | 1510 ThreadState::shutdownHeapIfNecessary(); |
1502 } | 1511 } |
1503 | 1512 |
1504 void Heap::doShutdown() | 1513 void Heap::doShutdown() |
1505 { | 1514 { |
1506 // We don't want to call doShutdown() twice. | 1515 // We don't want to call doShutdown() twice. |
1507 if (!s_markingVisitor) | 1516 if (!s_markingVisitor) |
1508 return; | 1517 return; |
1509 | 1518 |
1510 ASSERT(!ThreadState::isAnyThreadInGC()); | 1519 ASSERT(!ThreadState::isAnyThreadInGC()); |
1511 ASSERT(!ThreadState::attachedThreads().size()); | 1520 ASSERT(!ThreadState::attachedThreads().size()); |
1512 delete s_markingVisitor; | 1521 delete s_markingVisitor; |
1513 s_markingVisitor = 0; | 1522 s_markingVisitor = 0; |
| 1523 delete s_heapDoesNotContainCache; |
| 1524 s_heapDoesNotContainCache = 0; |
1514 CallbackStack::shutdown(&s_weakCallbackStack); | 1525 CallbackStack::shutdown(&s_weakCallbackStack); |
1515 CallbackStack::shutdown(&s_markingStack); | 1526 CallbackStack::shutdown(&s_markingStack); |
1516 ThreadState::shutdown(); | 1527 ThreadState::shutdown(); |
1517 } | 1528 } |
1518 | 1529 |
1519 BaseHeapPage* Heap::contains(Address address) | 1530 BaseHeapPage* Heap::contains(Address address) |
1520 { | 1531 { |
1521 ASSERT(ThreadState::isAnyThreadInGC()); | 1532 ASSERT(ThreadState::isAnyThreadInGC()); |
1522 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1533 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1523 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1534 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1524 BaseHeapPage* page = (*it)->contains(address); | 1535 BaseHeapPage* page = (*it)->contains(address); |
1525 if (page) | 1536 if (page) |
1526 return page; | 1537 return page; |
1527 } | 1538 } |
1528 return 0; | 1539 return 0; |
1529 } | 1540 } |
1530 | 1541 |
1531 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1542 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
1532 { | 1543 { |
1533 ASSERT(ThreadState::isAnyThreadInGC()); | 1544 ASSERT(ThreadState::isAnyThreadInGC()); |
1534 if (!address) | 1545 |
| 1546 #ifdef NDEBUG |
| 1547 if (s_heapDoesNotContainCache->lookup(address)) |
1535 return 0; | 1548 return 0; |
| 1549 #endif |
1536 | 1550 |
1537 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1551 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1538 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1552 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1539 if ((*it)->checkAndMarkPointer(visitor, address)) { | 1553 if ((*it)->checkAndMarkPointer(visitor, address)) { |
1540 // Pointer found and marked. | 1554 // Pointer was in a page of that thread. If it actually pointed |
| 1555 // into an object then that object was found and marked. |
| 1556 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
1541 return address; | 1557 return address; |
1542 } | 1558 } |
1543 } | 1559 } |
| 1560 |
| 1561 #ifdef NDEBUG |
| 1562 s_heapDoesNotContainCache->addEntry(address, true); |
| 1563 #else |
| 1564 if (!s_heapDoesNotContainCache->lookup(address)) |
| 1565 s_heapDoesNotContainCache->addEntry(address, true); |
| 1566 #endif |
1544 return 0; | 1567 return 0; |
1545 } | 1568 } |
1546 | 1569 |
1547 #if ENABLE(GC_TRACING) | 1570 #if ENABLE(GC_TRACING) |
1548 const GCInfo* Heap::findGCInfo(Address address) | 1571 const GCInfo* Heap::findGCInfo(Address address) |
1549 { | 1572 { |
1550 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1573 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1551 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1574 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1552 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1575 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
1553 return gcInfo; | 1576 return gcInfo; |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1694 | 1717 |
1695 // Force template instantiations for the types that we need. | 1718 // Force template instantiations for the types that we need. |
1696 template class HeapPage<FinalizedHeapObjectHeader>; | 1719 template class HeapPage<FinalizedHeapObjectHeader>; |
1697 template class HeapPage<HeapObjectHeader>; | 1720 template class HeapPage<HeapObjectHeader>; |
1698 template class ThreadHeap<FinalizedHeapObjectHeader>; | 1721 template class ThreadHeap<FinalizedHeapObjectHeader>; |
1699 template class ThreadHeap<HeapObjectHeader>; | 1722 template class ThreadHeap<HeapObjectHeader>; |
1700 | 1723 |
1701 Visitor* Heap::s_markingVisitor; | 1724 Visitor* Heap::s_markingVisitor; |
1702 CallbackStack* Heap::s_markingStack; | 1725 CallbackStack* Heap::s_markingStack; |
1703 CallbackStack* Heap::s_weakCallbackStack; | 1726 CallbackStack* Heap::s_weakCallbackStack; |
| 1727 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
1704 bool Heap::s_shutdownCalled = false; | 1728 bool Heap::s_shutdownCalled = false; |
1705 } | 1729 } |
OLD | NEW |