| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 436 m_size &= ~debugBitMask; | 436 m_size &= ~debugBitMask; |
| 437 } | 437 } |
| 438 | 438 |
| 439 NO_SANITIZE_ADDRESS | 439 NO_SANITIZE_ADDRESS |
| 440 void HeapObjectHeader::setDebugMark() | 440 void HeapObjectHeader::setDebugMark() |
| 441 { | 441 { |
| 442 checkHeader(); | 442 checkHeader(); |
| 443 m_size |= debugBitMask; | 443 m_size |= debugBitMask; |
| 444 } | 444 } |
| 445 | 445 |
| 446 #ifndef NDEBUG | 446 #if ENABLE(ASSERT) |
| 447 NO_SANITIZE_ADDRESS | 447 NO_SANITIZE_ADDRESS |
| 448 void HeapObjectHeader::zapMagic() | 448 void HeapObjectHeader::zapMagic() |
| 449 { | 449 { |
| 450 m_magic = zappedMagic; | 450 m_magic = zappedMagic; |
| 451 } | 451 } |
| 452 #endif | 452 #endif |
| 453 | 453 |
| 454 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) | 454 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) |
| 455 { | 455 { |
| 456 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 456 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
| 457 HeapObjectHeader* header = | 457 HeapObjectHeader* header = |
| 458 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize); | 458 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize); |
| 459 return header; | 459 return header; |
| 460 } | 460 } |
| 461 | 461 |
| 462 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj
ectSize) | 462 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t obj
ectSize) |
| 463 { | 463 { |
| 464 ASSERT(gcInfo); | 464 ASSERT(gcInfo); |
| 465 if (gcInfo->hasFinalizer()) { | 465 if (gcInfo->hasFinalizer()) { |
| 466 gcInfo->m_finalize(object); | 466 gcInfo->m_finalize(object); |
| 467 } | 467 } |
| 468 | 468 |
| 469 #if !defined(NDEBUG) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 469 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 470 // In Debug builds, memory is zapped when it's freed, and the zapped memory
is | 470 // In Debug builds, memory is zapped when it's freed, and the zapped memory
is |
| 471 // zeroed out when the memory is reused. Memory is also zapped when using Le
ak | 471 // zeroed out when the memory is reused. Memory is also zapped when using Le
ak |
| 472 // Sanitizer because the heap is used as a root region for LSan and therefor
e | 472 // Sanitizer because the heap is used as a root region for LSan and therefor
e |
| 473 // pointers in unreachable memory could hide leaks. | 473 // pointers in unreachable memory could hide leaks. |
| 474 for (size_t i = 0; i < objectSize; i++) | 474 for (size_t i = 0; i < objectSize; i++) |
| 475 object[i] = finalizedZapValue; | 475 object[i] = finalizedZapValue; |
| 476 | 476 |
| 477 // Zap the primary vTable entry (secondary vTable entries are not zapped). | 477 // Zap the primary vTable entry (secondary vTable entries are not zapped). |
| 478 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; | 478 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable; |
| 479 #endif | 479 #endif |
| (...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 824 // FIXME: Oilpan: Linking new pages into the front of the list is | 824 // FIXME: Oilpan: Linking new pages into the front of the list is |
| 825 // crucial when performing allocations during finalization because | 825 // crucial when performing allocations during finalization because |
| 826 // it ensures that those pages are not swept in the current GC | 826 // it ensures that those pages are not swept in the current GC |
| 827 // round. We should create a separate page list for that to | 827 // round. We should create a separate page list for that to |
| 828 // separate out the pages allocated during finalization clearly | 828 // separate out the pages allocated during finalization clearly |
| 829 // from the pages currently being swept. | 829 // from the pages currently being swept. |
| 830 page->link(&m_firstPage); | 830 page->link(&m_firstPage); |
| 831 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); | 831 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); |
| 832 } | 832 } |
| 833 | 833 |
| 834 #ifndef NDEBUG | 834 #if ENABLE(ASSERT) |
| 835 template<typename Header> | 835 template<typename Header> |
| 836 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats) | 836 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats) |
| 837 { | 837 { |
| 838 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 838 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 839 page->getStats(scannedStats); | 839 page->getStats(scannedStats); |
| 840 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 840 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) |
| 841 current->getStats(scannedStats); | 841 current->getStats(scannedStats); |
| 842 } | 842 } |
| 843 #endif | 843 #endif |
| 844 | 844 |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 { | 1060 { |
| 1061 clearObjectStartBitMap(); | 1061 clearObjectStartBitMap(); |
| 1062 heap()->stats().increaseAllocatedSpace(blinkPageSize); | 1062 heap()->stats().increaseAllocatedSpace(blinkPageSize); |
| 1063 Address startOfGap = payload(); | 1063 Address startOfGap = payload(); |
| 1064 for (Address headerAddress = startOfGap; headerAddress < end(); ) { | 1064 for (Address headerAddress = startOfGap; headerAddress < end(); ) { |
| 1065 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); | 1065 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(he
aderAddress); |
| 1066 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | 1066 ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
| 1067 | 1067 |
| 1068 if (basicHeader->isFree()) { | 1068 if (basicHeader->isFree()) { |
| 1069 size_t size = basicHeader->size(); | 1069 size_t size = basicHeader->size(); |
| 1070 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1070 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1071 // Zero the memory in the free list header to maintain the | 1071 // Zero the memory in the free list header to maintain the |
| 1072 // invariant that memory on the free list is zero filled. | 1072 // invariant that memory on the free list is zero filled. |
| 1073 // The rest of the memory is already on the free list and is | 1073 // The rest of the memory is already on the free list and is |
| 1074 // therefore already zero filled. | 1074 // therefore already zero filled. |
| 1075 if (size < sizeof(FreeListEntry)) | 1075 if (size < sizeof(FreeListEntry)) |
| 1076 memset(headerAddress, 0, size); | 1076 memset(headerAddress, 0, size); |
| 1077 else | 1077 else |
| 1078 memset(headerAddress, 0, sizeof(FreeListEntry)); | 1078 memset(headerAddress, 0, sizeof(FreeListEntry)); |
| 1079 #endif | 1079 #endif |
| 1080 headerAddress += size; | 1080 headerAddress += size; |
| 1081 continue; | 1081 continue; |
| 1082 } | 1082 } |
| 1083 // At this point we know this is a valid object of type Header | 1083 // At this point we know this is a valid object of type Header |
| 1084 Header* header = static_cast<Header*>(basicHeader); | 1084 Header* header = static_cast<Header*>(basicHeader); |
| 1085 | 1085 |
| 1086 if (!header->isMarked()) { | 1086 if (!header->isMarked()) { |
| 1087 // For ASan we unpoison the specific object when calling the finaliz
er and | 1087 // For ASan we unpoison the specific object when calling the finaliz
er and |
| 1088 // poison it again when done to allow the object's own finalizer to
operate | 1088 // poison it again when done to allow the object's own finalizer to
operate |
| 1089 // on the object, but not have other finalizers be allowed to access
it. | 1089 // on the object, but not have other finalizers be allowed to access
it. |
| 1090 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); | 1090 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); |
| 1091 finalize(header); | 1091 finalize(header); |
| 1092 size_t size = header->size(); | 1092 size_t size = header->size(); |
| 1093 #if defined(NDEBUG) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1093 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1094 // This memory will be added to the freelist. Maintain the invariant | 1094 // This memory will be added to the freelist. Maintain the invariant |
| 1095 // that memory on the freelist is zero filled. | 1095 // that memory on the freelist is zero filled. |
| 1096 memset(headerAddress, 0, size); | 1096 memset(headerAddress, 0, size); |
| 1097 #endif | 1097 #endif |
| 1098 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1098 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
| 1099 headerAddress += size; | 1099 headerAddress += size; |
| 1100 continue; | 1100 continue; |
| 1101 } | 1101 } |
| 1102 | 1102 |
| 1103 if (startOfGap != headerAddress) | 1103 if (startOfGap != headerAddress) |
| (...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1363 CallbackStack* next; | 1363 CallbackStack* next; |
| 1364 for (CallbackStack* current = *first; current; current = next) { | 1364 for (CallbackStack* current = *first; current; current = next) { |
| 1365 next = current->m_next; | 1365 next = current->m_next; |
| 1366 delete current; | 1366 delete current; |
| 1367 } | 1367 } |
| 1368 *first = 0; | 1368 *first = 0; |
| 1369 } | 1369 } |
| 1370 | 1370 |
| 1371 CallbackStack::~CallbackStack() | 1371 CallbackStack::~CallbackStack() |
| 1372 { | 1372 { |
| 1373 #ifndef NDEBUG | 1373 #if ENABLE(ASSERT) |
| 1374 clearUnused(); | 1374 clearUnused(); |
| 1375 #endif | 1375 #endif |
| 1376 } | 1376 } |
| 1377 | 1377 |
| 1378 void CallbackStack::clearUnused() | 1378 void CallbackStack::clearUnused() |
| 1379 { | 1379 { |
| 1380 for (size_t i = 0; i < bufferSize; i++) | 1380 for (size_t i = 0; i < bufferSize; i++) |
| 1381 m_buffer[i] = Item(0, 0); | 1381 m_buffer[i] = Item(0, 0); |
| 1382 } | 1382 } |
| 1383 | 1383 |
| 1384 bool CallbackStack::isEmpty() | 1384 bool CallbackStack::isEmpty() |
| 1385 { | 1385 { |
| 1386 return m_current == &(m_buffer[0]) && !m_next; | 1386 return m_current == &(m_buffer[0]) && !m_next; |
| 1387 } | 1387 } |
| 1388 | 1388 |
| 1389 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) | 1389 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) |
| 1390 { | 1390 { |
| 1391 if (m_current == &(m_buffer[0])) { | 1391 if (m_current == &(m_buffer[0])) { |
| 1392 if (!m_next) { | 1392 if (!m_next) { |
| 1393 #ifndef NDEBUG | 1393 #if ENABLE(ASSERT) |
| 1394 clearUnused(); | 1394 clearUnused(); |
| 1395 #endif | 1395 #endif |
| 1396 return false; | 1396 return false; |
| 1397 } | 1397 } |
| 1398 CallbackStack* nextStack = m_next; | 1398 CallbackStack* nextStack = m_next; |
| 1399 *first = nextStack; | 1399 *first = nextStack; |
| 1400 delete this; | 1400 delete this; |
| 1401 return nextStack->popAndInvokeCallback(first, visitor); | 1401 return nextStack->popAndInvokeCallback(first, visitor); |
| 1402 } | 1402 } |
| 1403 Item* item = --m_current; | 1403 Item* item = --m_current; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1436 m_next->invokeOldestCallbacks(visitor); | 1436 m_next->invokeOldestCallbacks(visitor); |
| 1437 | 1437 |
| 1438 // This loop can tolerate entries being added by the callbacks after | 1438 // This loop can tolerate entries being added by the callbacks after |
| 1439 // iteration starts. | 1439 // iteration starts. |
| 1440 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1440 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 1441 Item& item = m_buffer[i]; | 1441 Item& item = m_buffer[i]; |
| 1442 item.callback()(visitor, item.object()); | 1442 item.callback()(visitor, item.object()); |
| 1443 } | 1443 } |
| 1444 } | 1444 } |
| 1445 | 1445 |
| 1446 #ifndef NDEBUG | 1446 #if ENABLE(ASSERT) |
| 1447 bool CallbackStack::hasCallbackForObject(const void* object) | 1447 bool CallbackStack::hasCallbackForObject(const void* object) |
| 1448 { | 1448 { |
| 1449 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1449 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 1450 Item* item = &m_buffer[i]; | 1450 Item* item = &m_buffer[i]; |
| 1451 if (item->object() == object) { | 1451 if (item->object() == object) { |
| 1452 return true; | 1452 return true; |
| 1453 } | 1453 } |
| 1454 } | 1454 } |
| 1455 if (m_next) | 1455 if (m_next) |
| 1456 return m_next->hasCallbackForObject(object); | 1456 return m_next->hasCallbackForObject(object); |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1543 virtual void registerWeakMembers(const void* closure, const void* containing
Object, WeakPointerCallback callback) OVERRIDE | 1543 virtual void registerWeakMembers(const void* closure, const void* containing
Object, WeakPointerCallback callback) OVERRIDE |
| 1544 { | 1544 { |
| 1545 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_ca
st<void*>(containingObject), callback); | 1545 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_ca
st<void*>(containingObject), callback); |
| 1546 } | 1546 } |
| 1547 | 1547 |
| 1548 virtual void registerWeakTable(const void* closure, EphemeronCallback iterat
ionCallback, EphemeronCallback iterationDoneCallback) | 1548 virtual void registerWeakTable(const void* closure, EphemeronCallback iterat
ionCallback, EphemeronCallback iterationDoneCallback) |
| 1549 { | 1549 { |
| 1550 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i
terationDoneCallback); | 1550 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i
terationDoneCallback); |
| 1551 } | 1551 } |
| 1552 | 1552 |
| 1553 #ifndef NDEBUG | 1553 #if ENABLE(ASSERT) |
| 1554 virtual bool weakTableRegistered(const void* closure) | 1554 virtual bool weakTableRegistered(const void* closure) |
| 1555 { | 1555 { |
| 1556 return Heap::weakTableRegistered(closure); | 1556 return Heap::weakTableRegistered(closure); |
| 1557 } | 1557 } |
| 1558 #endif | 1558 #endif |
| 1559 | 1559 |
| 1560 virtual bool isMarked(const void* objectPointer) OVERRIDE | 1560 virtual bool isMarked(const void* objectPointer) OVERRIDE |
| 1561 { | 1561 { |
| 1562 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked()
; | 1562 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked()
; |
| 1563 } | 1563 } |
| (...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1726 if (page) | 1726 if (page) |
| 1727 return page; | 1727 return page; |
| 1728 } | 1728 } |
| 1729 return 0; | 1729 return 0; |
| 1730 } | 1730 } |
| 1731 | 1731 |
| 1732 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1732 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1733 { | 1733 { |
| 1734 ASSERT(ThreadState::isAnyThreadInGC()); | 1734 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1735 | 1735 |
| 1736 #ifdef NDEBUG | 1736 #if !ENABLE(ASSERT) |
| 1737 if (s_heapDoesNotContainCache->lookup(address)) | 1737 if (s_heapDoesNotContainCache->lookup(address)) |
| 1738 return 0; | 1738 return 0; |
| 1739 #endif | 1739 #endif |
| 1740 | 1740 |
| 1741 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1741 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 1742 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1742 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
| 1743 if ((*it)->checkAndMarkPointer(visitor, address)) { | 1743 if ((*it)->checkAndMarkPointer(visitor, address)) { |
| 1744 // Pointer was in a page of that thread. If it actually pointed | 1744 // Pointer was in a page of that thread. If it actually pointed |
| 1745 // into an object then that object was found and marked. | 1745 // into an object then that object was found and marked. |
| 1746 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 1746 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| 1747 s_lastGCWasConservative = true; | 1747 s_lastGCWasConservative = true; |
| 1748 return address; | 1748 return address; |
| 1749 } | 1749 } |
| 1750 } | 1750 } |
| 1751 | 1751 |
| 1752 #ifdef NDEBUG | 1752 #if !ENABLE(ASSERT) |
| 1753 s_heapDoesNotContainCache->addEntry(address, true); | 1753 s_heapDoesNotContainCache->addEntry(address, true); |
| 1754 #else | 1754 #else |
| 1755 if (!s_heapDoesNotContainCache->lookup(address)) | 1755 if (!s_heapDoesNotContainCache->lookup(address)) |
| 1756 s_heapDoesNotContainCache->addEntry(address, true); | 1756 s_heapDoesNotContainCache->addEntry(address, true); |
| 1757 #endif | 1757 #endif |
| 1758 return 0; | 1758 return 0; |
| 1759 } | 1759 } |
| 1760 | 1760 |
| 1761 #if ENABLE(GC_TRACING) | 1761 #if ENABLE(GC_TRACING) |
| 1762 const GCInfo* Heap::findGCInfo(Address address) | 1762 const GCInfo* Heap::findGCInfo(Address address) |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1842 { | 1842 { |
| 1843 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); | 1843 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); |
| 1844 *slot = CallbackStack::Item(table, iterationCallback); | 1844 *slot = CallbackStack::Item(table, iterationCallback); |
| 1845 | 1845 |
| 1846 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. | 1846 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. |
| 1847 // These callbacks are called right after marking and before any thread comm
ences execution | 1847 // These callbacks are called right after marking and before any thread comm
ences execution |
| 1848 // so it suits our needs for telling the ephemerons that the iteration is do
ne. | 1848 // so it suits our needs for telling the ephemerons that the iteration is do
ne. |
| 1849 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); | 1849 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); |
| 1850 } | 1850 } |
| 1851 | 1851 |
| 1852 #ifndef NDEBUG | 1852 #if ENABLE(ASSERT) |
| 1853 bool Heap::weakTableRegistered(const void* table) | 1853 bool Heap::weakTableRegistered(const void* table) |
| 1854 { | 1854 { |
| 1855 ASSERT(s_ephemeronStack); | 1855 ASSERT(s_ephemeronStack); |
| 1856 return s_ephemeronStack->hasCallbackForObject(table); | 1856 return s_ephemeronStack->hasCallbackForObject(table); |
| 1857 } | 1857 } |
| 1858 #endif | 1858 #endif |
| 1859 | 1859 |
| 1860 void Heap::prepareForGC() | 1860 void Heap::prepareForGC() |
| 1861 { | 1861 { |
| 1862 ASSERT(ThreadState::isAnyThreadInGC()); | 1862 ASSERT(ThreadState::isAnyThreadInGC()); |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2002 template class ThreadHeap<HeapObjectHeader>; | 2002 template class ThreadHeap<HeapObjectHeader>; |
| 2003 | 2003 |
| 2004 Visitor* Heap::s_markingVisitor; | 2004 Visitor* Heap::s_markingVisitor; |
| 2005 CallbackStack* Heap::s_markingStack; | 2005 CallbackStack* Heap::s_markingStack; |
| 2006 CallbackStack* Heap::s_weakCallbackStack; | 2006 CallbackStack* Heap::s_weakCallbackStack; |
| 2007 CallbackStack* Heap::s_ephemeronStack; | 2007 CallbackStack* Heap::s_ephemeronStack; |
| 2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2009 bool Heap::s_shutdownCalled = false; | 2009 bool Heap::s_shutdownCalled = false; |
| 2010 bool Heap::s_lastGCWasConservative = false; | 2010 bool Heap::s_lastGCWasConservative = false; |
| 2011 } | 2011 } |
| OLD | NEW |