Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 402 return heapObjectHeader()->unmark(); | 402 return heapObjectHeader()->unmark(); |
| 403 } | 403 } |
| 404 | 404 |
| 405 template<typename Header> | 405 template<typename Header> |
| 406 bool LargeHeapObject<Header>::isMarked() | 406 bool LargeHeapObject<Header>::isMarked() |
| 407 { | 407 { |
| 408 return heapObjectHeader()->isMarked(); | 408 return heapObjectHeader()->isMarked(); |
| 409 } | 409 } |
| 410 | 410 |
| 411 template<typename Header> | 411 template<typename Header> |
| 412 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) | 412 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) |
| 413 { | 413 { |
| 414 if (contains(address)) { | 414 ASSERT(contains(address)); |
| 415 if (objectContains(address)) { | |
| 415 #if ENABLE(GC_TRACING) | 416 #if ENABLE(GC_TRACING) |
| 416 visitor->setHostInfo(&address, "stack"); | 417 visitor->setHostInfo(&address, "stack"); |
| 417 #endif | 418 #endif |
| 418 mark(visitor); | 419 mark(visitor); |
| 419 return true; | |
| 420 } | 420 } |
| 421 return false; | |
| 422 } | 421 } |
| 423 | 422 |
| 424 template<> | 423 template<> |
| 425 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 424 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
| 426 { | 425 { |
| 427 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) | 426 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) |
| 428 visitor->markConservatively(heapObjectHeader()); | 427 visitor->markConservatively(heapObjectHeader()); |
| 429 else | 428 else |
| 430 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); | 429 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback()); |
| 431 } | 430 } |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 534 RELEASE_ASSERT(success); | 533 RELEASE_ASSERT(success); |
| 535 } | 534 } |
| 536 | 535 |
| 537 template<typename Header> | 536 template<typename Header> |
| 538 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) | 537 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address) |
| 539 { | 538 { |
| 540 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | 539 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| 541 if (page->contains(address)) | 540 if (page->contains(address)) |
| 542 return page; | 541 return page; |
| 543 } | 542 } |
| 544 return 0; | |
| 545 } | |
| 546 | |
| 547 template<typename Header> | |
| 548 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address) | |
| 549 { | |
| 550 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | 543 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { |
| 544 ASSERT(reinterpret_cast<Address>(current) == roundToBlinkPageStart(reint erpret_cast<Address>(current))); | |
| 551 if (current->contains(address)) | 545 if (current->contains(address)) |
| 552 return current; | 546 return current; |
| 553 } | 547 } |
| 554 return 0; | 548 return 0; |
| 555 } | 549 } |
| 556 | 550 |
| 557 #if ENABLE(GC_TRACING) | 551 #if ENABLE(GC_TRACING) |
| 558 template<typename Header> | 552 template<typename Header> |
| 559 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) | 553 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) |
| 560 { | 554 { |
| 561 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | 555 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { |
| 562 if (current->contains(address)) | 556 if (current->contains(address)) |
| 563 return current->gcInfo(); | 557 return current->gcInfo(); |
| 564 } | 558 } |
| 565 return 0; | 559 return 0; |
| 566 } | 560 } |
| 567 #endif | 561 #endif |
| 568 | 562 |
| 569 template<typename Header> | 563 template<typename Header> |
| 570 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address a ddress) | |
| 571 { | |
| 572 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | |
| 573 if (current->checkAndMarkPointer(visitor, address)) | |
| 574 return true; | |
| 575 } | |
| 576 return false; | |
| 577 } | |
| 578 | |
| 579 template<typename Header> | |
| 580 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) | 564 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) |
| 581 { | 565 { |
| 582 ASSERT(heapPageFromAddress(address)); | 566 ASSERT(heapPageFromAddress(address)); |
| 583 ASSERT(heapPageFromAddress(address + size - 1)); | 567 ASSERT(heapPageFromAddress(address + size - 1)); |
| 584 ASSERT(size < blinkPagePayloadSize()); | 568 ASSERT(size < blinkPagePayloadSize()); |
| 585 // The free list entries are only pointer aligned (but when we allocate | 569 // The free list entries are only pointer aligned (but when we allocate |
| 586 // from them we are 8 byte aligned due to the header size). | 570 // from them we are 8 byte aligned due to the header size). |
| 587 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); | 571 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); |
| 588 ASSERT(!(size & allocationMask)); | 572 ASSERT(!(size & allocationMask)); |
| 589 ASAN_POISON_MEMORY_REGION(address, size); | 573 ASAN_POISON_MEMORY_REGION(address, size); |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 707 { | 691 { |
| 708 PageMemory* storage = unused->storage(); | 692 PageMemory* storage = unused->storage(); |
| 709 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 693 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
| 710 m_pagePool = entry; | 694 m_pagePool = entry; |
| 711 storage->decommit(); | 695 storage->decommit(); |
| 712 } | 696 } |
| 713 | 697 |
| 714 template<typename Header> | 698 template<typename Header> |
| 715 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 699 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| 716 { | 700 { |
| 717 heapContainsCache()->flush(); | 701 Heap::flushNotInHeapCache(); |
| 718 PageMemory* pageMemory = takePageFromPool(); | 702 PageMemory* pageMemory = takePageFromPool(); |
| 719 if (!pageMemory) { | 703 if (!pageMemory) { |
| 720 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); | 704 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); |
| 721 RELEASE_ASSERT(pageMemory); | 705 RELEASE_ASSERT(pageMemory); |
| 722 } | 706 } |
| 723 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); | 707 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); |
| 724 // FIXME: Oilpan: Linking new pages into the front of the list is | 708 // FIXME: Oilpan: Linking new pages into the front of the list is |
| 725 // crucial when performing allocations during finalization because | 709 // crucial when performing allocations during finalization because |
| 726 // it ensures that those pages are not swept in the current GC | 710 // it ensures that those pages are not swept in the current GC |
| 727 // round. We should create a separate page list for that to | 711 // round. We should create a separate page list for that to |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 769 page = page->next(); | 753 page = page->next(); |
| 770 HeapPage<Header>::unlink(unused, previous); | 754 HeapPage<Header>::unlink(unused, previous); |
| 771 pagesRemoved = true; | 755 pagesRemoved = true; |
| 772 } else { | 756 } else { |
| 773 page->sweep(); | 757 page->sweep(); |
| 774 previous = &page->m_next; | 758 previous = &page->m_next; |
| 775 page = page->next(); | 759 page = page->next(); |
| 776 } | 760 } |
| 777 } | 761 } |
| 778 if (pagesRemoved) | 762 if (pagesRemoved) |
| 779 heapContainsCache()->flush(); | 763 heapContainsCache()->flush(); |
|
haraken
2014/05/08 05:44:58
For consistency, let's define flushHeapContainsCac
Erik Corry
2014/05/08 09:26:08
Done, but it doesn't make it very consistent with
| |
| 780 | 764 |
| 781 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 765 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
| 782 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 766 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
| 783 if (current->isMarked()) { | 767 if (current->isMarked()) { |
| 784 stats().increaseAllocatedSpace(current->size()); | 768 stats().increaseAllocatedSpace(current->size()); |
| 785 stats().increaseObjectSpace(current->payloadSize()); | 769 stats().increaseObjectSpace(current->payloadSize()); |
| 786 current->unmark(); | 770 current->unmark(); |
| 787 previousNext = ¤t->m_next; | 771 previousNext = ¤t->m_next; |
| 788 current = current->next(); | 772 current = current->next(); |
| 789 } else { | 773 } else { |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 843 ASSERT(isConsistentForGC()); | 827 ASSERT(isConsistentForGC()); |
| 844 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 828 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 845 page->clearMarks(); | 829 page->clearMarks(); |
| 846 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) | 830 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) |
| 847 current->unmark(); | 831 current->unmark(); |
| 848 } | 832 } |
| 849 | 833 |
| 850 template<typename Header> | 834 template<typename Header> |
| 851 void ThreadHeap<Header>::deletePages() | 835 void ThreadHeap<Header>::deletePages() |
| 852 { | 836 { |
| 853 heapContainsCache()->flush(); | 837 heapContainsCache()->flush(); |
|
haraken
2014/05/08 05:44:58
flushHeapContainsCache()
Erik Corry
2014/05/08 09:26:08
Done.
| |
| 854 // Add all pages in the pool to the heap's list of pages before deleting | 838 // Add all pages in the pool to the heap's list of pages before deleting |
| 855 clearPagePool(); | 839 clearPagePool(); |
| 856 | 840 |
| 857 for (HeapPage<Header>* page = m_firstPage; page; ) { | 841 for (HeapPage<Header>* page = m_firstPage; page; ) { |
| 858 HeapPage<Header>* dead = page; | 842 HeapPage<Header>* dead = page; |
| 859 page = page->next(); | 843 page = page->next(); |
| 860 PageMemory* storage = dead->storage(); | 844 PageMemory* storage = dead->storage(); |
| 861 dead->~HeapPage(); | 845 dead->~HeapPage(); |
| 862 delete storage; | 846 delete storage; |
| 863 } | 847 } |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1055 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1039 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; |
| 1056 objectOffset = objectStartNumber * allocationGranularity; | 1040 objectOffset = objectStartNumber * allocationGranularity; |
| 1057 Address objectAddress = objectOffset + payload(); | 1041 Address objectAddress = objectOffset + payload(); |
| 1058 Header* header = reinterpret_cast<Header*>(objectAddress); | 1042 Header* header = reinterpret_cast<Header*>(objectAddress); |
| 1059 if (header->isFree()) | 1043 if (header->isFree()) |
| 1060 return 0; | 1044 return 0; |
| 1061 return header; | 1045 return header; |
| 1062 } | 1046 } |
| 1063 | 1047 |
| 1064 template<typename Header> | 1048 template<typename Header> |
| 1065 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1049 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1066 { | 1050 { |
| 1051 ASSERT(contains(address)); | |
| 1067 Header* header = findHeaderFromAddress(address); | 1052 Header* header = findHeaderFromAddress(address); |
| 1068 if (!header) | 1053 if (!header) |
| 1069 return false; | 1054 return; |
| 1070 | 1055 |
| 1071 #if ENABLE(GC_TRACING) | 1056 #if ENABLE(GC_TRACING) |
| 1072 visitor->setHostInfo(&address, "stack"); | 1057 visitor->setHostInfo(&address, "stack"); |
| 1073 #endif | 1058 #endif |
| 1074 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1059 if (hasVTable(header) && !vTableInitialized(header->payload())) |
| 1075 visitor->markConservatively(header); | 1060 visitor->markConservatively(header); |
| 1076 else | 1061 else |
| 1077 visitor->mark(header, traceCallback(header)); | 1062 visitor->mark(header, traceCallback(header)); |
| 1078 return true; | |
| 1079 } | 1063 } |
| 1080 | 1064 |
| 1081 #if ENABLE(GC_TRACING) | 1065 #if ENABLE(GC_TRACING) |
| 1082 template<typename Header> | 1066 template<typename Header> |
| 1083 const GCInfo* HeapPage<Header>::findGCInfo(Address address) | 1067 const GCInfo* HeapPage<Header>::findGCInfo(Address address) |
| 1084 { | 1068 { |
| 1085 if (address < payload()) | 1069 if (address < payload()) |
| 1086 return 0; | 1070 return 0; |
| 1087 | 1071 |
| 1088 if (gcInfo()) // for non FinalizedObjectHeader | 1072 if (gcInfo()) // for non FinalizedObjectHeader |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1150 return header->hasVTable(); | 1134 return header->hasVTable(); |
| 1151 } | 1135 } |
| 1152 | 1136 |
| 1153 template<typename Header> | 1137 template<typename Header> |
| 1154 void LargeHeapObject<Header>::getStats(HeapStats& stats) | 1138 void LargeHeapObject<Header>::getStats(HeapStats& stats) |
| 1155 { | 1139 { |
| 1156 stats.increaseAllocatedSpace(size()); | 1140 stats.increaseAllocatedSpace(size()); |
| 1157 stats.increaseObjectSpace(payloadSize()); | 1141 stats.increaseObjectSpace(payloadSize()); |
| 1158 } | 1142 } |
| 1159 | 1143 |
| 1160 HeapContainsCache::HeapContainsCache() | 1144 template<typename Entry> |
| 1161 : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries])) | 1145 void HeapExtentCache<Entry>::flush() |
| 1162 { | 1146 { |
| 1147 if (m_hasEntries) { | |
| 1148 for (int i = 0; i < numberOfEntries; i++) | |
| 1149 m_entries[i] = Entry(); | |
| 1150 m_hasEntries = false; | |
| 1151 } | |
| 1163 } | 1152 } |
| 1164 | 1153 |
| 1165 void HeapContainsCache::flush() | 1154 template<typename Entry> |
| 1166 { | 1155 size_t HeapExtentCache<Entry>::hash(Address address) |
| 1167 for (int i = 0; i < numberOfEntries; i++) | |
| 1168 m_entries[i] = Entry(); | |
| 1169 } | |
| 1170 | |
| 1171 size_t HeapContainsCache::hash(Address address) | |
| 1172 { | 1156 { |
| 1173 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1157 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| 1174 value ^= value >> numberOfEntriesLog2; | 1158 value ^= value >> numberOfEntriesLog2; |
| 1175 value ^= value >> (numberOfEntriesLog2 * 2); | 1159 value ^= value >> (numberOfEntriesLog2 * 2); |
| 1176 value &= numberOfEntries - 1; | 1160 value &= numberOfEntries - 1; |
| 1177 return value & ~1; // Returns only even number. | 1161 return value & ~1; // Returns only even number. |
| 1178 } | 1162 } |
| 1179 | 1163 |
| 1180 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page) | 1164 template<typename Entry> |
| 1181 { | 1165 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) |
| 1182 ASSERT(page); | |
| 1183 size_t index = hash(address); | |
| 1184 ASSERT(!(index & 1)); | |
| 1185 Address cachePage = roundToBlinkPageStart(address); | |
| 1186 if (m_entries[index].address() == cachePage) { | |
| 1187 *page = m_entries[index].containingPage(); | |
| 1188 return true; | |
| 1189 } | |
| 1190 if (m_entries[index + 1].address() == cachePage) { | |
| 1191 *page = m_entries[index + 1].containingPage(); | |
| 1192 return true; | |
| 1193 } | |
| 1194 *page = 0; | |
| 1195 return false; | |
| 1196 } | |
| 1197 | |
| 1198 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1199 { | 1166 { |
| 1200 size_t index = hash(address); | 1167 size_t index = hash(address); |
| 1201 ASSERT(!(index & 1)); | 1168 ASSERT(!(index & 1)); |
| 1202 Address cachePage = roundToBlinkPageStart(address); | 1169 Address cachePage = roundToBlinkPageStart(address); |
| 1170 if (m_entries[index].address() == cachePage) | |
| 1171 return m_entries[index].result(); | |
| 1172 if (m_entries[index + 1].address() == cachePage) | |
| 1173 return m_entries[index + 1].result(); | |
| 1174 return 0; | |
| 1175 } | |
| 1176 | |
| 1177 template<typename Entry> | |
| 1178 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes ult entry) | |
| 1179 { | |
| 1180 m_hasEntries = true; | |
| 1181 size_t index = hash(address); | |
| 1182 ASSERT(!(index & 1)); | |
| 1183 Address cachePage = roundToBlinkPageStart(address); | |
| 1203 m_entries[index + 1] = m_entries[index]; | 1184 m_entries[index + 1] = m_entries[index]; |
| 1204 m_entries[index] = Entry(cachePage, page); | 1185 m_entries[index] = Entry(cachePage, entry); |
| 1186 } | |
| 1187 | |
| 1188 // These should not be needed, but it seems impossible to persuade clang to | |
| 1189 // instantiate the template functions and export them from a shared library, so | |
| 1190 // we add these in the non-templated subclass, which does not have that issue. | |
| 1191 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1192 { | |
| 1193 HeapExtentCache<PositiveEntry>::addEntry(address, page); | |
| 1194 } | |
| 1195 | |
| 1196 BaseHeapPage* HeapContainsCache::lookup(Address address) | |
| 1197 { | |
| 1198 return HeapExtentCache<PositiveEntry>::lookup(address); | |
| 1199 } | |
| 1200 | |
| 1201 bool Heap::notInHeap(Address address) | |
| 1202 { | |
| 1203 return s_notInHeapCache->lookup(address); | |
| 1204 } | |
| 1205 | |
| 1206 void Heap::addressIsNotInHeap(Address address) | |
| 1207 { | |
| 1208 s_notInHeapCache->addEntry(address, true); | |
| 1209 } | |
| 1210 | |
| 1211 void Heap::flushNotInHeapCache() | |
| 1212 { | |
| 1213 s_notInHeapCache->flush(); | |
| 1205 } | 1214 } |
| 1206 | 1215 |
| 1207 void CallbackStack::init(CallbackStack** first) | 1216 void CallbackStack::init(CallbackStack** first) |
| 1208 { | 1217 { |
| 1209 // The stacks are chained, so we start by setting this to null as terminator . | 1218 // The stacks are chained, so we start by setting this to null as terminator . |
| 1210 *first = 0; | 1219 *first = 0; |
| 1211 *first = new CallbackStack(first); | 1220 *first = new CallbackStack(first); |
| 1212 } | 1221 } |
| 1213 | 1222 |
| 1214 void CallbackStack::shutdown(CallbackStack** first) | 1223 void CallbackStack::shutdown(CallbackStack** first) |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1468 { | 1477 { |
| 1469 Heap::pushWeakCellPointerCallback(cell, callback); | 1478 Heap::pushWeakCellPointerCallback(cell, callback); |
| 1470 } | 1479 } |
| 1471 }; | 1480 }; |
| 1472 | 1481 |
| 1473 void Heap::init() | 1482 void Heap::init() |
| 1474 { | 1483 { |
| 1475 ThreadState::init(); | 1484 ThreadState::init(); |
| 1476 CallbackStack::init(&s_markingStack); | 1485 CallbackStack::init(&s_markingStack); |
| 1477 CallbackStack::init(&s_weakCallbackStack); | 1486 CallbackStack::init(&s_weakCallbackStack); |
| 1487 s_notInHeapCache = new HeapDoesNotContainCache(); | |
| 1478 s_markingVisitor = new MarkingVisitor(); | 1488 s_markingVisitor = new MarkingVisitor(); |
| 1479 } | 1489 } |
| 1480 | 1490 |
| 1481 void Heap::shutdown() | 1491 void Heap::shutdown() |
| 1482 { | 1492 { |
| 1483 s_shutdownCalled = true; | 1493 s_shutdownCalled = true; |
| 1484 ThreadState::shutdownHeapIfNecessary(); | 1494 ThreadState::shutdownHeapIfNecessary(); |
| 1485 } | 1495 } |
| 1486 | 1496 |
| 1487 void Heap::doShutdown() | 1497 void Heap::doShutdown() |
| 1488 { | 1498 { |
| 1489 // We don't want to call doShutdown() twice. | 1499 // We don't want to call doShutdown() twice. |
| 1490 if (!s_markingVisitor) | 1500 if (!s_markingVisitor) |
| 1491 return; | 1501 return; |
| 1492 | 1502 |
| 1493 ASSERT(!ThreadState::isAnyThreadInGC()); | 1503 ASSERT(!ThreadState::isAnyThreadInGC()); |
| 1494 ASSERT(!ThreadState::attachedThreads().size()); | 1504 ASSERT(!ThreadState::attachedThreads().size()); |
| 1495 delete s_markingVisitor; | 1505 delete s_markingVisitor; |
| 1496 s_markingVisitor = 0; | 1506 s_markingVisitor = 0; |
| 1507 delete s_notInHeapCache; | |
| 1508 s_notInHeapCache = 0; | |
| 1497 CallbackStack::shutdown(&s_weakCallbackStack); | 1509 CallbackStack::shutdown(&s_weakCallbackStack); |
| 1498 CallbackStack::shutdown(&s_markingStack); | 1510 CallbackStack::shutdown(&s_markingStack); |
| 1499 ThreadState::shutdown(); | 1511 ThreadState::shutdown(); |
| 1500 } | 1512 } |
| 1501 | 1513 |
| 1502 BaseHeapPage* Heap::contains(Address address) | 1514 BaseHeapPage* Heap::contains(Address address) |
| 1503 { | 1515 { |
| 1504 ASSERT(ThreadState::isAnyThreadInGC()); | 1516 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1505 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1517 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1506 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1518 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1507 BaseHeapPage* page = (*it)->contains(address); | 1519 BaseHeapPage* page = (*it)->contains(address); |
| 1508 if (page) | 1520 if (page) |
| 1509 return page; | 1521 return page; |
| 1510 } | 1522 } |
| 1511 return 0; | 1523 return 0; |
| 1512 } | 1524 } |
| 1513 | 1525 |
| 1514 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1526 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1515 { | 1527 { |
| 1516 ASSERT(ThreadState::isAnyThreadInGC()); | 1528 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1517 if (!address) | 1529 if (reinterpret_cast<uintptr_t>(address) < blinkPageSize) |
|
haraken
2014/05/08 05:44:58
What is this change for?
Mads Ager (chromium)
2014/05/08 06:52:37
This is an optimization to quickly filter out smal
Erik Corry
2014/05/08 09:26:08
You can't allocate at address 0, so no allocation
Erik Corry
2014/05/08 09:26:08
Removed instead.
| |
| 1518 return 0; | 1530 return 0; |
| 1519 | 1531 |
| 1520 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1532 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1521 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1533 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1522 if ((*it)->checkAndMarkPointer(visitor, address)) { | 1534 if ((*it)->checkAndMarkPointer(visitor, address)) { |
| 1523 // Pointer found and marked. | 1535 // Pointer found and marked. |
| 1524 return address; | 1536 return address; |
| 1525 } | 1537 } |
| 1526 } | 1538 } |
| 1527 return 0; | 1539 return 0; |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1674 | 1686 |
| 1675 // Force template instantiations for the types that we need. | 1687 // Force template instantiations for the types that we need. |
| 1676 template class HeapPage<FinalizedHeapObjectHeader>; | 1688 template class HeapPage<FinalizedHeapObjectHeader>; |
| 1677 template class HeapPage<HeapObjectHeader>; | 1689 template class HeapPage<HeapObjectHeader>; |
| 1678 template class ThreadHeap<FinalizedHeapObjectHeader>; | 1690 template class ThreadHeap<FinalizedHeapObjectHeader>; |
| 1679 template class ThreadHeap<HeapObjectHeader>; | 1691 template class ThreadHeap<HeapObjectHeader>; |
| 1680 | 1692 |
| 1681 Visitor* Heap::s_markingVisitor; | 1693 Visitor* Heap::s_markingVisitor; |
| 1682 CallbackStack* Heap::s_markingStack; | 1694 CallbackStack* Heap::s_markingStack; |
| 1683 CallbackStack* Heap::s_weakCallbackStack; | 1695 CallbackStack* Heap::s_weakCallbackStack; |
| 1696 HeapDoesNotContainCache* Heap::s_notInHeapCache; | |
| 1684 bool Heap::s_shutdownCalled = false; | 1697 bool Heap::s_shutdownCalled = false; |
| 1685 } | 1698 } |
| OLD | NEW |