OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 } | 437 } |
438 | 438 |
439 NO_SANITIZE_ADDRESS | 439 NO_SANITIZE_ADDRESS |
440 void HeapObjectHeader::setDeadMark() | 440 void HeapObjectHeader::setDeadMark() |
441 { | 441 { |
442 ASSERT(!isMarked()); | 442 ASSERT(!isMarked()); |
443 checkHeader(); | 443 checkHeader(); |
444 m_size |= deadBitMask; | 444 m_size |= deadBitMask; |
445 } | 445 } |
446 | 446 |
447 #ifndef NDEBUG | 447 #if ENABLE(ASSERT) |
448 NO_SANITIZE_ADDRESS | 448 NO_SANITIZE_ADDRESS |
449 void HeapObjectHeader::zapMagic() | 449 void HeapObjectHeader::zapMagic() |
450 { | 450 { |
451 m_magic = zappedMagic; | 451 m_magic = zappedMagic; |
452 } | 452 } |
453 #endif | 453 #endif |
454 | 454 |
455 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) | 455 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) |
456 { | 456 { |
457 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 457 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
458 HeapObjectHeader* header = | 458 HeapObjectHeader* header = |
459 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize); | 459 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize); |
460 return header; | 460 return header; |
461 } | 461 } |
462 | 462 |
463 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj
ectSize) | 463 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj
ectSize) |
464 { | 464 { |
465 ASSERT(gcInfo); | 465 ASSERT(gcInfo); |
466 if (gcInfo->hasFinalizer()) { | 466 if (gcInfo->hasFinalizer()) { |
467 gcInfo->m_finalize(object); | 467 gcInfo->m_finalize(object); |
468 } | 468 } |
469 | 469 |
470 #if !defined(NDEBUG) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 470 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
471 // In Debug builds, memory is zapped when it's freed, and the zapped memory
is | 471 // In Debug builds, memory is zapped when it's freed, and the zapped memory
is |
472 // zeroed out when the memory is reused. Memory is also zapped when using Le
ak | 472 // zeroed out when the memory is reused. Memory is also zapped when using Le
ak |
473 // Sanitizer because the heap is used as a root region for LSan and therefor
e | 473 // Sanitizer because the heap is used as a root region for LSan and therefor
e |
474 // pointers in unreachable memory could hide leaks. | 474 // pointers in unreachable memory could hide leaks. |
475 for (size_t i = 0; i < objectSize; i++) | 475 for (size_t i = 0; i < objectSize; i++) |
476 object[i] = finalizedZapValue; | 476 object[i] = finalizedZapValue; |
477 | 477 |
478 // Zap the primary vTable entry (secondary vTable entries are not zapped). | 478 // Zap the primary vTable entry (secondary vTable entries are not zapped). |
479 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; | 479 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; |
480 #endif | 480 #endif |
(...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
848 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) | 848 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) |
849 { | 849 { |
850 page->markOrphaned(); | 850 page->markOrphaned(); |
851 PoolEntry* entry = new PoolEntry(page, m_pool[index]); | 851 PoolEntry* entry = new PoolEntry(page, m_pool[index]); |
852 m_pool[index] = entry; | 852 m_pool[index] = entry; |
853 } | 853 } |
854 | 854 |
855 NO_SANITIZE_ADDRESS | 855 NO_SANITIZE_ADDRESS |
856 void OrphanedPagePool::decommitOrphanedPages() | 856 void OrphanedPagePool::decommitOrphanedPages() |
857 { | 857 { |
858 #ifndef NDEBUG | 858 #if ENABLE(ASSERT) |
859 // No locking needed as all threads are at safepoints at this point in time. | 859 // No locking needed as all threads are at safepoints at this point in time. |
860 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 860 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
861 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) | 861 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
862 ASSERT((*it)->isAtSafePoint()); | 862 ASSERT((*it)->isAtSafePoint()); |
863 #endif | 863 #endif |
864 | 864 |
865 for (int index = 0; index < NumberOfHeaps; ++index) { | 865 for (int index = 0; index < NumberOfHeaps; ++index) { |
866 PoolEntry* entry = m_pool[index]; | 866 PoolEntry* entry = m_pool[index]; |
867 PoolEntry** prevNext = &m_pool[index]; | 867 PoolEntry** prevNext = &m_pool[index]; |
868 while (entry) { | 868 while (entry) { |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
912 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation | 912 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation |
913 // only works for code in this method and not for calls to memset. | 913 // only works for code in this method and not for calls to memset. |
914 Address base = memory->writableStart(); | 914 Address base = memory->writableStart(); |
915 for (Address current = base; current < base + blinkPagePayloadSize(); ++curr
ent) | 915 for (Address current = base; current < base + blinkPagePayloadSize(); ++curr
ent) |
916 *current = 0; | 916 *current = 0; |
917 #else | 917 #else |
918 memset(memory->writableStart(), 0, blinkPagePayloadSize()); | 918 memset(memory->writableStart(), 0, blinkPagePayloadSize()); |
919 #endif | 919 #endif |
920 } | 920 } |
921 | 921 |
922 #ifndef NDEBUG | 922 #if ENABLE(ASSERT) |
923 bool OrphanedPagePool::contains(void* object) | 923 bool OrphanedPagePool::contains(void* object) |
924 { | 924 { |
925 for (int index = 0; index < NumberOfHeaps; ++index) { | 925 for (int index = 0; index < NumberOfHeaps; ++index) { |
926 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { | 926 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { |
927 BaseHeapPage* page = entry->data; | 927 BaseHeapPage* page = entry->data; |
928 if (page->contains(reinterpret_cast<Address>(object))) | 928 if (page->contains(reinterpret_cast<Address>(object))) |
929 return true; | 929 return true; |
930 } | 930 } |
931 } | 931 } |
932 return false; | 932 return false; |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1002 // FIXME: Oilpan: Linking new pages into the front of the list is | 1002 // FIXME: Oilpan: Linking new pages into the front of the list is |
1003 // crucial when performing allocations during finalization because | 1003 // crucial when performing allocations during finalization because |
1004 // it ensures that those pages are not swept in the current GC | 1004 // it ensures that those pages are not swept in the current GC |
1005 // round. We should create a separate page list for that to | 1005 // round. We should create a separate page list for that to |
1006 // separate out the pages allocated during finalization clearly | 1006 // separate out the pages allocated during finalization clearly |
1007 // from the pages currently being swept. | 1007 // from the pages currently being swept. |
1008 page->link(&m_firstPage); | 1008 page->link(&m_firstPage); |
1009 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); | 1009 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); |
1010 } | 1010 } |
1011 | 1011 |
1012 #ifndef NDEBUG | 1012 #if ENABLE(ASSERT) |
1013 template<typename Header> | 1013 template<typename Header> |
1014 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats) | 1014 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats) |
1015 { | 1015 { |
1016 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1016 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
1017 page->getStats(scannedStats); | 1017 page->getStats(scannedStats); |
1018 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 1018 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) |
1019 current->getStats(scannedStats); | 1019 current->getStats(scannedStats); |
1020 } | 1020 } |
1021 #endif | 1021 #endif |
1022 | 1022 |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1173 { | 1173 { |
1174 clearObjectStartBitMap(); | 1174 clearObjectStartBitMap(); |
1175 heap()->stats().increaseAllocatedSpace(blinkPageSize); | 1175 heap()->stats().increaseAllocatedSpace(blinkPageSize); |
1176 Address startOfGap = payload(); | 1176 Address startOfGap = payload(); |
1177 for (Address headerAddress = startOfGap; headerAddress < end(); ) { | 1177 for (Address headerAddress = startOfGap; headerAddress < end(); ) { |
1178 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); | 1178 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); |
1179 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | 1179 ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
1180 | 1180 |
1181 if (basicHeader->isFree()) { | 1181 if (basicHeader->isFree()) { |
1182 size_t size = basicHeader->size(); | 1182 size_t size = basicHeader->size(); |
1183 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1183 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1184 // Zero the memory in the free list header to maintain the | 1184 // Zero the memory in the free list header to maintain the |
1185 // invariant that memory on the free list is zero filled. | 1185 // invariant that memory on the free list is zero filled. |
1186 // The rest of the memory is already on the free list and is | 1186 // The rest of the memory is already on the free list and is |
1187 // therefore already zero filled. | 1187 // therefore already zero filled. |
1188 if (size < sizeof(FreeListEntry)) | 1188 if (size < sizeof(FreeListEntry)) |
1189 memset(headerAddress, 0, size); | 1189 memset(headerAddress, 0, size); |
1190 else | 1190 else |
1191 memset(headerAddress, 0, sizeof(FreeListEntry)); | 1191 memset(headerAddress, 0, sizeof(FreeListEntry)); |
1192 #endif | 1192 #endif |
1193 headerAddress += size; | 1193 headerAddress += size; |
1194 continue; | 1194 continue; |
1195 } | 1195 } |
1196 // At this point we know this is a valid object of type Header | 1196 // At this point we know this is a valid object of type Header |
1197 Header* header = static_cast<Header*>(basicHeader); | 1197 Header* header = static_cast<Header*>(basicHeader); |
1198 | 1198 |
1199 if (!header->isMarked()) { | 1199 if (!header->isMarked()) { |
1200 // For ASan we unpoison the specific object when calling the finaliz
er and | 1200 // For ASan we unpoison the specific object when calling the finaliz
er and |
1201 // poison it again when done to allow the object's own finalizer to
operate | 1201 // poison it again when done to allow the object's own finalizer to
operate |
1202 // on the object, but not have other finalizers be allowed to access
it. | 1202 // on the object, but not have other finalizers be allowed to access
it. |
1203 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); | 1203 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); |
1204 finalize(header); | 1204 finalize(header); |
1205 size_t size = header->size(); | 1205 size_t size = header->size(); |
1206 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1206 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1207 // This memory will be added to the freelist. Maintain the invariant | 1207 // This memory will be added to the freelist. Maintain the invariant |
1208 // that memory on the freelist is zero filled. | 1208 // that memory on the freelist is zero filled. |
1209 memset(headerAddress, 0, size); | 1209 memset(headerAddress, 0, size); |
1210 #endif | 1210 #endif |
1211 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1211 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
1212 headerAddress += size; | 1212 headerAddress += size; |
1213 continue; | 1213 continue; |
1214 } | 1214 } |
1215 | 1215 |
1216 if (startOfGap != headerAddress) | 1216 if (startOfGap != headerAddress) |
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1484 CallbackStack* next; | 1484 CallbackStack* next; |
1485 for (CallbackStack* current = *first; current; current = next) { | 1485 for (CallbackStack* current = *first; current; current = next) { |
1486 next = current->m_next; | 1486 next = current->m_next; |
1487 delete current; | 1487 delete current; |
1488 } | 1488 } |
1489 *first = 0; | 1489 *first = 0; |
1490 } | 1490 } |
1491 | 1491 |
1492 CallbackStack::~CallbackStack() | 1492 CallbackStack::~CallbackStack() |
1493 { | 1493 { |
1494 #ifndef NDEBUG | 1494 #if ENABLE(ASSERT) |
1495 clearUnused(); | 1495 clearUnused(); |
1496 #endif | 1496 #endif |
1497 } | 1497 } |
1498 | 1498 |
1499 void CallbackStack::clearUnused() | 1499 void CallbackStack::clearUnused() |
1500 { | 1500 { |
1501 for (size_t i = 0; i < bufferSize; i++) | 1501 for (size_t i = 0; i < bufferSize; i++) |
1502 m_buffer[i] = Item(0, 0); | 1502 m_buffer[i] = Item(0, 0); |
1503 } | 1503 } |
1504 | 1504 |
1505 bool CallbackStack::isEmpty() | 1505 bool CallbackStack::isEmpty() |
1506 { | 1506 { |
1507 return m_current == &(m_buffer[0]) && !m_next; | 1507 return m_current == &(m_buffer[0]) && !m_next; |
1508 } | 1508 } |
1509 | 1509 |
1510 template<CallbackInvocationMode Mode> | 1510 template<CallbackInvocationMode Mode> |
1511 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) | 1511 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) |
1512 { | 1512 { |
1513 if (m_current == &(m_buffer[0])) { | 1513 if (m_current == &(m_buffer[0])) { |
1514 if (!m_next) { | 1514 if (!m_next) { |
1515 #ifndef NDEBUG | 1515 #if ENABLE(ASSERT) |
1516 clearUnused(); | 1516 clearUnused(); |
1517 #endif | 1517 #endif |
1518 return false; | 1518 return false; |
1519 } | 1519 } |
1520 CallbackStack* nextStack = m_next; | 1520 CallbackStack* nextStack = m_next; |
1521 *first = nextStack; | 1521 *first = nextStack; |
1522 delete this; | 1522 delete this; |
1523 return nextStack->popAndInvokeCallback<Mode>(first, visitor); | 1523 return nextStack->popAndInvokeCallback<Mode>(first, visitor); |
1524 } | 1524 } |
1525 Item* item = --m_current; | 1525 Item* item = --m_current; |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1598 // Ad. 2. The containing object cannot be on an orphaned page since | 1598 // Ad. 2. The containing object cannot be on an orphaned page since |
1599 // in that case we wouldn't have traced its parts. This also means | 1599 // in that case we wouldn't have traced its parts. This also means |
1600 // the ephemeron collection is not on the orphaned page. | 1600 // the ephemeron collection is not on the orphaned page. |
1601 // Ad. 3. Is the same as 2. The collection containing the ephemeron | 1601 // Ad. 3. Is the same as 2. The collection containing the ephemeron |
1602 // collection as a value object cannot be on an orphaned page since | 1602 // collection as a value object cannot be on an orphaned page since |
1603 // it would not have traced its values in that case. | 1603 // it would not have traced its values in that case. |
1604 item.callback()(visitor, item.object()); | 1604 item.callback()(visitor, item.object()); |
1605 } | 1605 } |
1606 } | 1606 } |
1607 | 1607 |
1608 #ifndef NDEBUG | 1608 #if ENABLE(ASSERT) |
1609 bool CallbackStack::hasCallbackForObject(const void* object) | 1609 bool CallbackStack::hasCallbackForObject(const void* object) |
1610 { | 1610 { |
1611 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1611 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
1612 Item* item = &m_buffer[i]; | 1612 Item* item = &m_buffer[i]; |
1613 if (item->object() == object) { | 1613 if (item->object() == object) { |
1614 return true; | 1614 return true; |
1615 } | 1615 } |
1616 } | 1616 } |
1617 if (m_next) | 1617 if (m_next) |
1618 return m_next->hasCallbackForObject(object); | 1618 return m_next->hasCallbackForObject(object); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1709 virtual void registerWeakMembers(const void* closure, const void* containing
Object, WeakPointerCallback callback) OVERRIDE | 1709 virtual void registerWeakMembers(const void* closure, const void* containing
Object, WeakPointerCallback callback) OVERRIDE |
1710 { | 1710 { |
1711 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_ca
st<void*>(containingObject), callback); | 1711 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_ca
st<void*>(containingObject), callback); |
1712 } | 1712 } |
1713 | 1713 |
1714 virtual void registerWeakTable(const void* closure, EphemeronCallback iterat
ionCallback, EphemeronCallback iterationDoneCallback) | 1714 virtual void registerWeakTable(const void* closure, EphemeronCallback iterat
ionCallback, EphemeronCallback iterationDoneCallback) |
1715 { | 1715 { |
1716 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i
terationDoneCallback); | 1716 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i
terationDoneCallback); |
1717 } | 1717 } |
1718 | 1718 |
1719 #ifndef NDEBUG | 1719 #if ENABLE(ASSERT) |
1720 virtual bool weakTableRegistered(const void* closure) | 1720 virtual bool weakTableRegistered(const void* closure) |
1721 { | 1721 { |
1722 return Heap::weakTableRegistered(closure); | 1722 return Heap::weakTableRegistered(closure); |
1723 } | 1723 } |
1724 #endif | 1724 #endif |
1725 | 1725 |
1726 virtual bool isMarked(const void* objectPointer) OVERRIDE | 1726 virtual bool isMarked(const void* objectPointer) OVERRIDE |
1727 { | 1727 { |
1728 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked()
; | 1728 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked()
; |
1729 } | 1729 } |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1894 ASSERT(ThreadState::isAnyThreadInGC()); | 1894 ASSERT(ThreadState::isAnyThreadInGC()); |
1895 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1895 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1896 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1896 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1897 BaseHeapPage* page = (*it)->contains(address); | 1897 BaseHeapPage* page = (*it)->contains(address); |
1898 if (page) | 1898 if (page) |
1899 return page; | 1899 return page; |
1900 } | 1900 } |
1901 return 0; | 1901 return 0; |
1902 } | 1902 } |
1903 | 1903 |
1904 #ifndef NDEBUG | 1904 #if ENABLE(ASSERT) |
1905 bool Heap::containedInHeapOrOrphanedPage(void* object) | 1905 bool Heap::containedInHeapOrOrphanedPage(void* object) |
1906 { | 1906 { |
1907 return contains(object) || orphanedPagePool()->contains(object); | 1907 return contains(object) || orphanedPagePool()->contains(object); |
1908 } | 1908 } |
1909 #endif | 1909 #endif |
1910 | 1910 |
1911 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1911 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
1912 { | 1912 { |
1913 ASSERT(ThreadState::isAnyThreadInGC()); | 1913 ASSERT(ThreadState::isAnyThreadInGC()); |
1914 | 1914 |
1915 #ifdef NDEBUG | 1915 #if !ENABLE(ASSERT) |
1916 if (s_heapDoesNotContainCache->lookup(address)) | 1916 if (s_heapDoesNotContainCache->lookup(address)) |
1917 return 0; | 1917 return 0; |
1918 #endif | 1918 #endif |
1919 | 1919 |
1920 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1920 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1921 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1921 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1922 if ((*it)->checkAndMarkPointer(visitor, address)) { | 1922 if ((*it)->checkAndMarkPointer(visitor, address)) { |
1923 // Pointer was in a page of that thread. If it actually pointed | 1923 // Pointer was in a page of that thread. If it actually pointed |
1924 // into an object then that object was found and marked. | 1924 // into an object then that object was found and marked. |
1925 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 1925 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
1926 s_lastGCWasConservative = true; | 1926 s_lastGCWasConservative = true; |
1927 return address; | 1927 return address; |
1928 } | 1928 } |
1929 } | 1929 } |
1930 | 1930 |
1931 #ifdef NDEBUG | 1931 #if !ENABLE(ASSERT) |
1932 s_heapDoesNotContainCache->addEntry(address, true); | 1932 s_heapDoesNotContainCache->addEntry(address, true); |
1933 #else | 1933 #else |
1934 if (!s_heapDoesNotContainCache->lookup(address)) | 1934 if (!s_heapDoesNotContainCache->lookup(address)) |
1935 s_heapDoesNotContainCache->addEntry(address, true); | 1935 s_heapDoesNotContainCache->addEntry(address, true); |
1936 #endif | 1936 #endif |
1937 return 0; | 1937 return 0; |
1938 } | 1938 } |
1939 | 1939 |
1940 #if ENABLE(GC_TRACING) | 1940 #if ENABLE(GC_TRACING) |
1941 const GCInfo* Heap::findGCInfo(Address address) | 1941 const GCInfo* Heap::findGCInfo(Address address) |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2027 | 2027 |
2028 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); | 2028 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); |
2029 *slot = CallbackStack::Item(table, iterationCallback); | 2029 *slot = CallbackStack::Item(table, iterationCallback); |
2030 | 2030 |
2031 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. | 2031 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. |
2032 // These callbacks are called right after marking and before any thread comm
ences execution | 2032 // These callbacks are called right after marking and before any thread comm
ences execution |
2033 // so it suits our needs for telling the ephemerons that the iteration is do
ne. | 2033 // so it suits our needs for telling the ephemerons that the iteration is do
ne. |
2034 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); | 2034 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); |
2035 } | 2035 } |
2036 | 2036 |
2037 #ifndef NDEBUG | 2037 #if ENABLE(ASSERT) |
2038 bool Heap::weakTableRegistered(const void* table) | 2038 bool Heap::weakTableRegistered(const void* table) |
2039 { | 2039 { |
2040 ASSERT(s_ephemeronStack); | 2040 ASSERT(s_ephemeronStack); |
2041 return s_ephemeronStack->hasCallbackForObject(table); | 2041 return s_ephemeronStack->hasCallbackForObject(table); |
2042 } | 2042 } |
2043 #endif | 2043 #endif |
2044 | 2044 |
2045 void Heap::prepareForGC() | 2045 void Heap::prepareForGC() |
2046 { | 2046 { |
2047 ASSERT(ThreadState::isAnyThreadInGC()); | 2047 ASSERT(ThreadState::isAnyThreadInGC()); |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2242 Visitor* Heap::s_markingVisitor; | 2242 Visitor* Heap::s_markingVisitor; |
2243 CallbackStack* Heap::s_markingStack; | 2243 CallbackStack* Heap::s_markingStack; |
2244 CallbackStack* Heap::s_weakCallbackStack; | 2244 CallbackStack* Heap::s_weakCallbackStack; |
2245 CallbackStack* Heap::s_ephemeronStack; | 2245 CallbackStack* Heap::s_ephemeronStack; |
2246 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2246 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
2247 bool Heap::s_shutdownCalled = false; | 2247 bool Heap::s_shutdownCalled = false; |
2248 bool Heap::s_lastGCWasConservative = false; | 2248 bool Heap::s_lastGCWasConservative = false; |
2249 FreePagePool* Heap::s_freePagePool; | 2249 FreePagePool* Heap::s_freePagePool; |
2250 OrphanedPagePool* Heap::s_orphanedPagePool; | 2250 OrphanedPagePool* Heap::s_orphanedPagePool; |
2251 } | 2251 } |
OLD | NEW |