Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #include "config.h" | 31 #include "config.h" |
| 32 #include "platform/heap/Heap.h" | 32 #include "platform/heap/Heap.h" |
| 33 | 33 |
| 34 #include "platform/TraceEvent.h" | 34 #include "platform/TraceEvent.h" |
| 35 #include "platform/heap/ThreadState.h" | 35 #include "platform/heap/ThreadState.h" |
| 36 #include "public/platform/Platform.h" | 36 #include "public/platform/Platform.h" |
| 37 #include "wtf/AddressSpaceRandomization.h" | |
| 37 #include "wtf/Assertions.h" | 38 #include "wtf/Assertions.h" |
| 38 #include "wtf/LeakAnnotations.h" | 39 #include "wtf/LeakAnnotations.h" |
| 39 #include "wtf/PassOwnPtr.h" | 40 #include "wtf/PassOwnPtr.h" |
| 40 #if ENABLE(GC_TRACING) | 41 #if ENABLE(GC_TRACING) |
| 41 #include "wtf/HashMap.h" | 42 #include "wtf/HashMap.h" |
| 42 #include "wtf/HashSet.h" | 43 #include "wtf/HashSet.h" |
| 43 #include "wtf/text/StringBuilder.h" | 44 #include "wtf/text/StringBuilder.h" |
| 44 #include "wtf/text/StringHash.h" | 45 #include "wtf/text/StringHash.h" |
| 45 #include <stdio.h> | 46 #include <stdio.h> |
| 46 #include <utility> | 47 #include <utility> |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 165 } | 166 } |
| 166 | 167 |
| 167 Address base() const { return m_base; } | 168 Address base() const { return m_base; } |
| 168 size_t size() const { return m_size; } | 169 size_t size() const { return m_size; } |
| 169 | 170 |
| 170 private: | 171 private: |
| 171 Address m_base; | 172 Address m_base; |
| 172 size_t m_size; | 173 size_t m_size; |
| 173 }; | 174 }; |
| 174 | 175 |
| 176 // A PageMemoryRegion represents a chunk of reserved virtual address | |
| 177 // space containing a number of blink heap pages. On Windows, reserved | |
| 178 // virtual address space can only be given back to the system as a | |
| 179 // whole. The PageMemoryRegion allows us to do that by keeping track | |
| 180 // of the number of pages using it in order to be able to release all | |
| 181 // of the virtual address space when there are no more pages using it. | |
| 182 class PageMemoryRegion : public MemoryRegion { | |
| 183 public: | |
| 184 ~PageMemoryRegion() | |
| 185 { | |
| 186 release(); | |
| 187 } | |
| 188 | |
| 189 void pageRemoved() | |
| 190 { | |
| 191 if (!--m_numPages) | |
| 192 delete this; | |
| 193 } | |
| 194 | |
| 195 static PageMemoryRegion* allocate(size_t size, unsigned numPages) | |
| 196 { | |
| 197 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | |
| 198 | |
| 199 // Compute a random blink page aligned address for the page memory | |
| 200 // region and attempt to get the memory there. | |
| 201 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ()); | |
| 202 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); | |
| 203 | |
| 204 #if OS(POSIX) | |
| 205 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); | |
| 206 RELEASE_ASSERT(base != MAP_FAILED); | |
| 207 if (base == roundToBlinkPageBoundary(base)) | |
| 208 return new PageMemoryRegion(base, size, numPages); | |
| 209 | |
| 210 // We failed to get a blink page aligned chunk of | |
| 211 // memory. Unmap the chunk that we got and fall back to | |
| 212 // overallocating and selecting an aligned sub part of what | |
| 213 // we allocate. | |
| 214 int error = munmap(base, size); | |
| 215 RELEASE_ASSERT(!error); | |
| 216 size_t allocationSize = size + blinkPageSize; | |
| 217 base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, P ROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); | |
| 218 RELEASE_ASSERT(base != MAP_FAILED); | |
| 219 | |
| 220 Address end = base + allocationSize; | |
| 221 Address alignedBase = roundToBlinkPageBoundary(base); | |
| 222 Address regionEnd = alignedBase + size; | |
| 223 | |
| 224 // If the allocated memory was not blink page aligned release | |
| 225 // the memory before the aligned address. | |
| 226 if (alignedBase != base) | |
| 227 MemoryRegion(base, alignedBase - base).release(); | |
| 228 | |
| 229 // Free the additional memory at the end of the page if any. | |
| 230 if (regionEnd < end) | |
| 231 MemoryRegion(regionEnd, end - regionEnd).release(); | |
| 232 | |
| 233 return new PageMemoryRegion(alignedBase, size, numPages); | |
| 234 #else | |
| 235 Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, s ize, MEM_RESERVE, PAGE_NOACCESS)); | |
| 236 if (base) { | |
| 237 ASSERT(base == alignedRandomAddress); | |
| 238 return new PageMemoryRegion(base, size, numPages); | |
| 239 } | |
| 240 | |
| 241 // We failed to get the random aligned address that we asked | |
| 242 // for. Fall back to overallocating. On Windows it is | |
| 243 // impossible to partially release a region of memory | |
| 244 // allocated by VirtualAlloc. To avoid wasting virtual address | |
| 245 // space we attempt to release a large region of memory | |
| 246 // returned as a whole and then allocate an aligned region | |
| 247 // inside this larger region. | |
| 248 size_t allocationSize = size + blinkPageSize; | |
| 249 for (int attempt = 0; attempt < 3; attempt++) { | |
| 250 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE RVE, PAGE_NOACCESS)); | |
| 251 RELEASE_ASSERT(base); | |
| 252 VirtualFree(base, 0, MEM_RELEASE); | |
| 253 | |
| 254 Address alignedBase = roundToBlinkPageBoundary(base); | |
| 255 base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESE RVE, PAGE_NOACCESS)); | |
| 256 if (base) { | |
| 257 ASSERT(base == alignedBase); | |
| 258 return new PageMemoryRegion(alignedBase, size, numPages); | |
| 259 } | |
| 260 } | |
| 261 | |
| 262 // We failed to avoid wasting virtual address space after | |
| 263 // several attempts. | |
| 264 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS)); | |
| 265 RELEASE_ASSERT(base); | |
| 266 | |
| 267 // FIXME: If base is by accident blink page size aligned | |
| 268 // here then we can create two pages out of reserved | |
| 269 // space. Do this. | |
| 270 Address alignedBase = roundToBlinkPageBoundary(base); | |
| 271 | |
| 272 return new PageMemoryRegion(alignedBase, size, numPages); | |
| 273 #endif | |
| 274 } | |
| 275 | |
| 276 private: | |
| 277 PageMemoryRegion(Address base, size_t size, unsigned numPages) | |
| 278 : MemoryRegion(base, size) | |
| 279 , m_numPages(numPages) | |
| 280 { | |
| 281 } | |
| 282 | |
| 283 unsigned m_numPages; | |
| 284 }; | |
| 285 | |
| 175 // Representation of the memory used for a Blink heap page. | 286 // Representation of the memory used for a Blink heap page. |
| 176 // | 287 // |
| 177 // The representation keeps track of two memory regions: | 288 // The representation keeps track of two memory regions: |
| 178 // | 289 // |
| 179 // 1. The virtual memory reserved from the sytem in order to be able | 290 // 1. The virtual memory reserved from the system in order to be able |
| 180 // to free all the virtual memory reserved on destruction. | 291 // to free all the virtual memory reserved. Multiple PageMemory |
| 292 // instances can share the same reserved memory region and | |
| 293 // therefore notify the reserved memory region on destruction so | |
| 294 // that the system memory can be given back when all PageMemory | |
| 295 // instances for that memory are gone. | |
| 181 // | 296 // |
| 182 // 2. The writable memory (a sub-region of the reserved virtual | 297 // 2. The writable memory (a sub-region of the reserved virtual |
| 183 // memory region) that is used for the actual heap page payload. | 298 // memory region) that is used for the actual heap page payload. |
| 184 // | 299 // |
| 185 // Guard pages are created before and after the writable memory. | 300 // Guard pages are created before and after the writable memory. |
| 186 class PageMemory { | 301 class PageMemory { |
| 187 public: | 302 public: |
| 188 ~PageMemory() | 303 ~PageMemory() |
| 189 { | 304 { |
| 190 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); | 305 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| 191 m_reserved.release(); | 306 m_reserved->pageRemoved(); |
| 192 } | 307 } |
| 193 | 308 |
| 194 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } | 309 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } |
| 195 void decommit() { m_writable.decommit(); } | 310 void decommit() { m_writable.decommit(); } |
| 196 | 311 |
| 197 Address writableStart() { return m_writable.base(); } | 312 Address writableStart() { return m_writable.base(); } |
| 198 | 313 |
| 199 // Allocate a virtual address space for the blink page with the | 314 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) |
| 315 { | |
| 316 // Setup the payload one OS page into the page memory. The | |
| 317 // first os page is the guard page. | |
| 318 Address payloadAddress = region->base() + pageOffset + osPageSize(); | |
| 319 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ; | |
| 320 } | |
| 321 | |
| 322 // Allocate a virtual address space for one blink page with the | |
| 200 // following layout: | 323 // following layout: |
| 201 // | 324 // |
| 202 // [ guard os page | ... payload ... | guard os page ] | 325 // [ guard os page | ... payload ... | guard os page ] |
| 203 // ^---{ aligned to blink page size } | 326 // ^---{ aligned to blink page size } |
| 204 // | 327 // |
| 205 static PageMemory* allocate(size_t payloadSize) | 328 static PageMemory* allocate(size_t payloadSize) |
| 206 { | 329 { |
| 207 ASSERT(payloadSize > 0); | 330 ASSERT(payloadSize > 0); |
| 208 | 331 |
| 209 // Virtual memory allocation routines operate in OS page sizes. | 332 // Virtual memory allocation routines operate in OS page sizes. |
| 210 // Round up the requested size to nearest os page size. | 333 // Round up the requested size to nearest os page size. |
| 211 payloadSize = roundToOsPageSize(payloadSize); | 334 payloadSize = roundToOsPageSize(payloadSize); |
| 212 | 335 |
| 213 // Overallocate by blinkPageSize and 2 times OS page size to | 336 // Overallocate by 2 times OS page size to have space for a |
| 214 // ensure a chunk of memory which is blinkPageSize aligned and | 337 // guard page at the beginning and end of blink heap page. |
| 215 // has a system page before and after to use for guarding. We | 338 size_t allocationSize = payloadSize + 2 * osPageSize(); |
| 216 // unmap the excess memory before returning. | 339 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocati onSize, 1); |
| 217 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; | 340 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, paylo adSize); |
| 218 | 341 RELEASE_ASSERT(storage->commit()); |
| 219 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | |
| 220 #if OS(POSIX) | |
| 221 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); | |
| 222 RELEASE_ASSERT(base != MAP_FAILED); | |
| 223 | |
| 224 Address end = base + allocationSize; | |
| 225 Address alignedBase = roundToBlinkPageBoundary(base); | |
| 226 Address payloadBase = alignedBase + osPageSize(); | |
| 227 Address payloadEnd = payloadBase + payloadSize; | |
| 228 Address blinkPageEnd = payloadEnd + osPageSize(); | |
| 229 | |
| 230 // If the allocate memory was not blink page aligned release | |
| 231 // the memory before the aligned address. | |
| 232 if (alignedBase != base) | |
| 233 MemoryRegion(base, alignedBase - base).release(); | |
| 234 | |
| 235 // Create guard pages by decommiting an OS page before and | |
| 236 // after the payload. | |
| 237 MemoryRegion(alignedBase, osPageSize()).decommit(); | |
| 238 MemoryRegion(payloadEnd, osPageSize()).decommit(); | |
| 239 | |
| 240 // Free the additional memory at the end of the page if any. | |
| 241 if (blinkPageEnd < end) | |
| 242 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release(); | |
| 243 | |
| 244 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBa se), MemoryRegion(payloadBase, payloadSize)); | |
| 245 #else | |
| 246 Address base = 0; | |
| 247 Address alignedBase = 0; | |
| 248 | |
| 249 // On Windows it is impossible to partially release a region | |
| 250 // of memory allocated by VirtualAlloc. To avoid wasting | |
| 251 // virtual address space we attempt to release a large region | |
| 252 // of memory returned as a whole and then allocate an aligned | |
| 253 // region inside this larger region. | |
| 254 for (int attempt = 0; attempt < 3; attempt++) { | |
| 255 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE RVE, PAGE_NOACCESS)); | |
| 256 RELEASE_ASSERT(base); | |
| 257 VirtualFree(base, 0, MEM_RELEASE); | |
| 258 | |
| 259 alignedBase = roundToBlinkPageBoundary(base); | |
| 260 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS)); | |
| 261 if (base) { | |
| 262 RELEASE_ASSERT(base == alignedBase); | |
| 263 allocationSize = payloadSize + 2 * osPageSize(); | |
| 264 break; | |
| 265 } | |
| 266 } | |
| 267 | |
| 268 if (!base) { | |
| 269 // We failed to avoid wasting virtual address space after | |
| 270 // several attempts. | |
| 271 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE RVE, PAGE_NOACCESS)); | |
| 272 RELEASE_ASSERT(base); | |
| 273 | |
| 274 // FIXME: If base is by accident blink page size aligned | |
| 275 // here then we can create two pages out of reserved | |
| 276 // space. Do this. | |
| 277 alignedBase = roundToBlinkPageBoundary(base); | |
| 278 } | |
| 279 | |
| 280 Address payloadBase = alignedBase + osPageSize(); | |
| 281 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize)); | |
| 282 bool res = storage->commit(); | |
| 283 RELEASE_ASSERT(res); | |
| 284 return storage; | 342 return storage; |
| 285 #endif | |
| 286 } | 343 } |
| 287 | 344 |
| 288 private: | 345 private: |
| 289 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable) | 346 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) |
| 290 : m_reserved(reserved) | 347 : m_reserved(reserved) |
| 291 , m_writable(writable) | 348 , m_writable(writable) |
| 292 { | 349 { |
| 293 ASSERT(reserved.contains(writable)); | 350 ASSERT(reserved->contains(writable)); |
| 294 | 351 |
| 295 // Register the writable area of the memory as part of the LSan root set . | 352 // Register the writable area of the memory as part of the LSan root set . |
| 296 // Only the writable area is mapped and can contain C++ objects. Those | 353 // Only the writable area is mapped and can contain C++ objects. Those |
| 297 // C++ objects can contain pointers to objects outside of the heap and | 354 // C++ objects can contain pointers to objects outside of the heap and |
| 298 // should therefore be part of the LSan root set. | 355 // should therefore be part of the LSan root set. |
| 299 __lsan_register_root_region(m_writable.base(), m_writable.size()); | 356 __lsan_register_root_region(m_writable.base(), m_writable.size()); |
| 300 } | 357 } |
| 301 | 358 |
| 302 MemoryRegion m_reserved; | 359 |
| 360 PageMemoryRegion* m_reserved; | |
| 303 MemoryRegion m_writable; | 361 MemoryRegion m_writable; |
| 304 }; | 362 }; |
| 305 | 363 |
| 306 class GCScope { | 364 class GCScope { |
| 307 public: | 365 public: |
| 308 explicit GCScope(ThreadState::StackState stackState) | 366 explicit GCScope(ThreadState::StackState stackState) |
| 309 : m_state(ThreadState::current()) | 367 : m_state(ThreadState::current()) |
| 310 , m_safePointScope(stackState) | 368 , m_safePointScope(stackState) |
| 311 , m_parkedAllThreads(false) | 369 , m_parkedAllThreads(false) |
| 312 { | 370 { |
| (...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 713 return storage; | 771 return storage; |
| 714 | 772 |
| 715 // Failed to commit pooled storage. Release it. | 773 // Failed to commit pooled storage. Release it. |
| 716 delete storage; | 774 delete storage; |
| 717 } | 775 } |
| 718 | 776 |
| 719 return 0; | 777 return 0; |
| 720 } | 778 } |
| 721 | 779 |
| 722 template<typename Header> | 780 template<typename Header> |
| 723 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) | 781 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) |
| 724 { | 782 { |
| 725 flushHeapContainsCache(); | 783 flushHeapContainsCache(); |
| 726 PageMemory* storage = unused->storage(); | |
| 727 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 784 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
| 728 m_pagePool = entry; | 785 m_pagePool = entry; |
| 786 } | |
| 787 | |
| 788 template <typename Header> | |
| 789 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) | |
| 790 { | |
| 791 PageMemory* storage = page->storage(); | |
| 729 storage->decommit(); | 792 storage->decommit(); |
| 793 addPageMemoryToPool(storage); | |
| 730 } | 794 } |
| 731 | 795 |
| 732 template<typename Header> | 796 template<typename Header> |
| 733 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 797 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| 734 { | 798 { |
| 735 Heap::flushHeapDoesNotContainCache(); | 799 Heap::flushHeapDoesNotContainCache(); |
| 736 PageMemory* pageMemory = takePageFromPool(); | 800 PageMemory* pageMemory = takePageFromPool(); |
| 737 if (!pageMemory) { | 801 if (!pageMemory) { |
| 738 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); | 802 // Allocate a memory region for blinkPagesPerRegion pages that |
| 803 // will each have the following layout. | |
|
haraken
2014/06/27 06:50:17
I guess "each" is confusing. Also "... payload ...
Mads Ager (chromium)
2014/06/27 07:01:04
They layout that we get in a region is:
[guard |
haraken
2014/06/27 07:16:43
Now I understood!
| |
| 804 // | |
| 805 // [ guard os page | ... payload ... | guard os page ] | |
| 806 // ^---{ aligned to blink page size } | |
| 807 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); | |
| 808 // Setup the PageMemory object for each of the pages in the | |
| 809 // region. | |
| 810 size_t offset = 0; | |
| 811 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | |
| 812 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs et, blinkPagePayloadSize())); | |
| 813 offset += blinkPageSize; | |
| 814 } | |
| 815 pageMemory = takePageFromPool(); | |
| 739 RELEASE_ASSERT(pageMemory); | 816 RELEASE_ASSERT(pageMemory); |
| 740 } | 817 } |
| 741 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); | 818 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); |
| 742 // FIXME: Oilpan: Linking new pages into the front of the list is | 819 // FIXME: Oilpan: Linking new pages into the front of the list is |
| 743 // crucial when performing allocations during finalization because | 820 // crucial when performing allocations during finalization because |
| 744 // it ensures that those pages are not swept in the current GC | 821 // it ensures that those pages are not swept in the current GC |
| 745 // round. We should create a separate page list for that to | 822 // round. We should create a separate page list for that to |
| 746 // separate out the pages allocated during finalization clearly | 823 // separate out the pages allocated during finalization clearly |
| 747 // from the pages currently being swept. | 824 // from the pages currently being swept. |
| 748 page->link(&m_firstPage); | 825 page->link(&m_firstPage); |
| (...skipping 1152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1901 template class ThreadHeap<HeapObjectHeader>; | 1978 template class ThreadHeap<HeapObjectHeader>; |
| 1902 | 1979 |
| 1903 Visitor* Heap::s_markingVisitor; | 1980 Visitor* Heap::s_markingVisitor; |
| 1904 CallbackStack* Heap::s_markingStack; | 1981 CallbackStack* Heap::s_markingStack; |
| 1905 CallbackStack* Heap::s_weakCallbackStack; | 1982 CallbackStack* Heap::s_weakCallbackStack; |
| 1906 CallbackStack* Heap::s_ephemeronStack; | 1983 CallbackStack* Heap::s_ephemeronStack; |
| 1907 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 1984 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 1908 bool Heap::s_shutdownCalled = false; | 1985 bool Heap::s_shutdownCalled = false; |
| 1909 bool Heap::s_lastGCWasConservative = false; | 1986 bool Heap::s_lastGCWasConservative = false; |
| 1910 } | 1987 } |
| OLD | NEW |