Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte | 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte |
| 63 // align all allocations even on 32 bit. | 63 // align all allocations even on 32 bit. |
| 64 const size_t allocationGranularity = 8; | 64 const size_t allocationGranularity = 8; |
| 65 const size_t allocationMask = allocationGranularity - 1; | 65 const size_t allocationMask = allocationGranularity - 1; |
| 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); | 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); |
| 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); | 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); |
| 68 const size_t maxHeapObjectSize = 1 << 27; | 68 const size_t maxHeapObjectSize = 1 << 27; |
| 69 | 69 |
| 70 const size_t markBitMask = 1; | 70 const size_t markBitMask = 1; |
| 71 const size_t freeListMask = 2; | 71 const size_t freeListMask = 2; |
| 72 const size_t debugBitMask = 4; | 72 // The dead bit is used for objects that have gone through a GC marking, but did |
| 73 // not get swept before a new GC started. In that case we set the dead bit on | |
| 74 // objects that were not marked in the previous GC to ensure we are not tracing | |
| 75 // them via a conservatively found pointer. Tracing dead objects could lead to | |
| 76 // tracing of already finalized objects in another thread's heap which is a | |
| 77 // use-after-free situation. | |
| 78 const size_t deadBitMask = 4; | |
| 73 const size_t sizeMask = ~7; | 79 const size_t sizeMask = ~7; |
| 74 const uint8_t freelistZapValue = 42; | 80 const uint8_t freelistZapValue = 42; |
| 75 const uint8_t finalizedZapValue = 24; | 81 const uint8_t finalizedZapValue = 24; |
| 82 const uint8_t orphanedZapValue = 15; | |
| 83 | |
| 84 enum GCMode { | |
| 85 GlobalGC, | |
| 86 ThreadLocalGC, | |
| 87 }; | |
| 76 | 88 |
| 77 class HeapStats; | 89 class HeapStats; |
| 78 class PageMemory; | 90 class PageMemory; |
| 79 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 91 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| 80 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; | 92 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; |
| 81 template<typename T> class CrossThreadPersistent; | 93 template<typename T> class CrossThreadPersistent; |
| 82 | 94 |
| 83 PLATFORM_EXPORT size_t osPageSize(); | 95 PLATFORM_EXPORT size_t osPageSize(); |
| 84 | 96 |
| 85 // Blink heap pages are set up with a guard page before and after the | 97 // Blink heap pages are set up with a guard page before and after the |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 120 | 132 |
| 121 // Sanity check for a page header address: the address of the page | 133 // Sanity check for a page header address: the address of the page |
| 122 // header should be OS page size away from being Blink page size | 134 // header should be OS page size away from being Blink page size |
| 123 // aligned. | 135 // aligned. |
| 124 inline bool isPageHeaderAddress(Address address) | 136 inline bool isPageHeaderAddress(Address address) |
| 125 { | 137 { |
| 126 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); | 138 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); |
| 127 } | 139 } |
| 128 #endif | 140 #endif |
| 129 | 141 |
| 130 // Mask an address down to the enclosing oilpan heap page base address. | 142 // Mask an address down to the enclosing oilpan heap base page. |
| 131 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. | 143 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. |
| 132 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. | 144 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. |
| 133 // This is only exported to enable tests in HeapTest.cpp. | 145 // This is only exported to enable tests in HeapTest.cpp. |
| 134 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) | 146 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object) |
| 135 { | 147 { |
| 136 return blinkPageAddress(address) + osPageSize(); | 148 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 149 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSiz e()); | |
| 137 } | 150 } |
| 138 | 151 |
| 139 // Common header for heap pages. | |
| 140 class BaseHeapPage { | |
| 141 public: | |
| 142 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) | |
| 143 : m_storage(storage) | |
| 144 , m_gcInfo(gcInfo) | |
| 145 , m_threadState(state) | |
| 146 , m_padding(0) | |
| 147 { | |
| 148 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | |
| 149 } | |
| 150 | |
| 151 // Check if the given address points to an object in this | |
| 152 // heap page. If so, find the start of that object and mark it | |
| 153 // using the given Visitor. Otherwise do nothing. The pointer must | |
| 154 // be within the same aligned blinkPageSize as the this-pointer. | |
| 155 // | |
| 156 // This is used during conservative stack scanning to | |
| 157 // conservatively mark all objects that could be referenced from | |
| 158 // the stack. | |
| 159 virtual void checkAndMarkPointer(Visitor*, Address) = 0; | |
| 160 | |
| 161 #if ENABLE(GC_TRACING) | |
| 162 virtual const GCInfo* findGCInfo(Address) = 0; | |
| 163 #endif | |
| 164 | |
| 165 Address address() { return reinterpret_cast<Address>(this); } | |
| 166 PageMemory* storage() const { return m_storage; } | |
| 167 ThreadState* threadState() const { return m_threadState; } | |
| 168 const GCInfo* gcInfo() { return m_gcInfo; } | |
| 169 virtual bool isLargeObject() { return false; } | |
| 170 | |
| 171 private: | |
| 172 // Accessor to silence unused warnings for the m_padding field. | |
| 173 intptr_t padding() const { return m_padding; } | |
| 174 | |
| 175 PageMemory* m_storage; | |
| 176 const GCInfo* m_gcInfo; | |
| 177 ThreadState* m_threadState; | |
| 178 // Pointer sized integer to ensure proper alignment of the | |
| 179 // HeapPage header. This can be used as a bit field if we need | |
| 180 // to associate more information with pages. | |
| 181 intptr_t m_padding; | |
| 182 }; | |
| 183 | |
| 184 // Large allocations are allocated as separate objects and linked in a | 152 // Large allocations are allocated as separate objects and linked in a |
| 185 // list. | 153 // list. |
| 186 // | 154 // |
| 187 // In order to use the same memory allocation routines for everything | 155 // In order to use the same memory allocation routines for everything |
| 188 // allocated in the heap, large objects are considered heap pages | 156 // allocated in the heap, large objects are considered heap pages |
| 189 // containing only one object. | 157 // containing only one object. |
| 190 // | 158 // |
| 191 // The layout of a large heap object is as follows: | 159 // The layout of a large heap object is as follows: |
| 192 // | 160 // |
| 193 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | | 161 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 225 // The LargeHeapObject pseudo-page contains one actual object. Determine | 193 // The LargeHeapObject pseudo-page contains one actual object. Determine |
| 226 // whether the pointer is within that object. | 194 // whether the pointer is within that object. |
| 227 bool objectContains(Address object) | 195 bool objectContains(Address object) |
| 228 { | 196 { |
| 229 return (payload() <= object) && (object < address() + size()); | 197 return (payload() <= object) && (object < address() + size()); |
| 230 } | 198 } |
| 231 | 199 |
| 232 // Returns true for any address that is on one of the pages that this | 200 // Returns true for any address that is on one of the pages that this |
| 233 // large object uses. That ensures that we can use a negative result to | 201 // large object uses. That ensures that we can use a negative result to |
| 234 // populate the negative page cache. | 202 // populate the negative page cache. |
| 235 bool contains(Address object) | 203 virtual bool contains(Address object) OVERRIDE |
| 236 { | 204 { |
| 237 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); | 205 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); |
| 238 } | 206 } |
| 239 | 207 |
| 240 LargeHeapObject<Header>* next() | 208 LargeHeapObject<Header>* next() |
| 241 { | 209 { |
| 242 return m_next; | 210 return m_next; |
| 243 } | 211 } |
| 244 | 212 |
| 245 size_t size() | 213 size_t size() |
| 246 { | 214 { |
| 247 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 215 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
| 248 } | 216 } |
| 249 | 217 |
| 250 Address payload() { return heapObjectHeader()->payload(); } | 218 Address payload() { return heapObjectHeader()->payload(); } |
| 251 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 219 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
| 252 | 220 |
| 253 Header* heapObjectHeader() | 221 Header* heapObjectHeader() |
| 254 { | 222 { |
| 255 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 223 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
| 256 return reinterpret_cast<Header*>(headerAddress); | 224 return reinterpret_cast<Header*>(headerAddress); |
| 257 } | 225 } |
| 258 | 226 |
| 259 bool isMarked(); | 227 bool isMarked(); |
| 260 void unmark(); | 228 void unmark(); |
| 261 void getStats(HeapStats&); | 229 void getStats(HeapStats&); |
| 262 void mark(Visitor*); | 230 void mark(Visitor*); |
| 263 void finalize(); | 231 void finalize(); |
| 232 void setDeadMark(); | |
| 233 virtual void markOrphaned() | |
| 234 { | |
| 235 // Zap the payload with a recognizable value to detect any incorrect | |
| 236 // cross thread pointer usage. | |
| 237 memset(payload(), orphanedZapValue, payloadSize()); | |
| 238 BaseHeapPage::markOrphaned(); | |
| 239 } | |
| 264 | 240 |
| 265 private: | 241 private: |
| 266 friend class ThreadHeap<Header>; | 242 friend class ThreadHeap<Header>; |
| 267 | 243 |
| 268 LargeHeapObject<Header>* m_next; | 244 LargeHeapObject<Header>* m_next; |
| 269 }; | 245 }; |
| 270 | 246 |
| 271 // The BasicObjectHeader is the minimal object header. It is used when | 247 // The BasicObjectHeader is the minimal object header. It is used when |
| 272 // encountering heap space of size allocationGranularity to mark it as | 248 // encountering heap space of size allocationGranularity to mark it as |
| 273 // as freelist entry. | 249 // as freelist entry. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 322 | 298 |
| 323 inline void mark(); | 299 inline void mark(); |
| 324 inline void unmark(); | 300 inline void unmark(); |
| 325 | 301 |
| 326 inline const GCInfo* gcInfo() { return 0; } | 302 inline const GCInfo* gcInfo() { return 0; } |
| 327 | 303 |
| 328 inline Address payload(); | 304 inline Address payload(); |
| 329 inline size_t payloadSize(); | 305 inline size_t payloadSize(); |
| 330 inline Address payloadEnd(); | 306 inline Address payloadEnd(); |
| 331 | 307 |
| 332 inline void setDebugMark(); | 308 inline void setDeadMark(); |
| 333 inline void clearDebugMark(); | 309 inline void clearDeadMark(); |
| 334 inline bool hasDebugMark() const; | 310 inline bool hasDeadMark() const; |
| 335 | 311 |
| 336 // Zap magic number with a new magic number that means there was once an | 312 // Zap magic number with a new magic number that means there was once an |
| 337 // object allocated here, but it was freed because nobody marked it during | 313 // object allocated here, but it was freed because nobody marked it during |
| 338 // GC. | 314 // GC. |
| 339 void zapMagic(); | 315 void zapMagic(); |
| 340 | 316 |
| 341 static void finalize(const GCInfo*, Address, size_t); | 317 static void finalize(const GCInfo*, Address, size_t); |
| 342 static HeapObjectHeader* fromPayload(const void*); | 318 static HeapObjectHeader* fromPayload(const void*); |
| 343 | 319 |
| 344 static const intptr_t magic = 0xc0de247; | 320 static const intptr_t magic = 0xc0de247; |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 461 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); | 437 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); |
| 462 | 438 |
| 463 void link(HeapPage**); | 439 void link(HeapPage**); |
| 464 static void unlink(HeapPage*, HeapPage**); | 440 static void unlink(HeapPage*, HeapPage**); |
| 465 | 441 |
| 466 bool isEmpty(); | 442 bool isEmpty(); |
| 467 | 443 |
| 468 // Returns true for the whole blinkPageSize page that the page is on, even | 444 // Returns true for the whole blinkPageSize page that the page is on, even |
| 469 // for the header, and the unmapped guard page at the start. That ensures | 445 // for the header, and the unmapped guard page at the start. That ensures |
| 470 // the result can be used to populate the negative page cache. | 446 // the result can be used to populate the negative page cache. |
| 471 bool contains(Address addr) | 447 virtual bool contains(Address addr) OVERRIDE |
| 472 { | 448 { |
| 473 Address blinkPageStart = roundToBlinkPageStart(address()); | 449 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 474 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. | 450 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. |
| 475 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 451 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 476 } | 452 } |
| 477 | 453 |
| 478 HeapPage* next() { return m_next; } | 454 HeapPage* next() { return m_next; } |
| 479 | 455 |
| 480 Address payload() | 456 Address payload() |
| 481 { | 457 { |
| 482 return address() + sizeof(*this) + headerPadding<Header>(); | 458 return address() + sizeof(*this) + headerPadding<Header>(); |
| 483 } | 459 } |
| 484 | 460 |
| 485 static size_t payloadSize() | 461 static size_t payloadSize() |
| 486 { | 462 { |
| 487 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; | 463 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; |
| 488 } | 464 } |
| 489 | 465 |
| 490 Address end() { return payload() + payloadSize(); } | 466 Address end() { return payload() + payloadSize(); } |
| 491 | 467 |
| 492 void getStats(HeapStats&); | 468 void getStats(HeapStats&); |
| 493 void clearMarks(); | 469 void clearLiveAndMarkDead(); |
| 494 void sweep(); | 470 void sweep(); |
| 495 void clearObjectStartBitMap(); | 471 void clearObjectStartBitMap(); |
| 496 void finalize(Header*); | 472 void finalize(Header*); |
| 497 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; | 473 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; |
| 498 #if ENABLE(GC_TRACING) | 474 #if ENABLE(GC_TRACING) |
| 499 const GCInfo* findGCInfo(Address) OVERRIDE; | 475 const GCInfo* findGCInfo(Address) OVERRIDE; |
| 500 #endif | 476 #endif |
| 501 ThreadHeap<Header>* heap() { return m_heap; } | 477 ThreadHeap<Header>* heap() { return m_heap; } |
| 502 #if defined(ADDRESS_SANITIZER) | 478 #if defined(ADDRESS_SANITIZER) |
| 503 void poisonUnmarkedObjects(); | 479 void poisonUnmarkedObjects(); |
| 504 #endif | 480 #endif |
| 481 virtual void markOrphaned() | |
| 482 { | |
| 483 // Zap the payload with a recognizable value to detect any incorrect | |
| 484 // cross thread pointer usage. | |
| 485 memset(payload(), orphanedZapValue, payloadSize()); | |
| 486 BaseHeapPage::markOrphaned(); | |
| 487 } | |
| 505 | 488 |
| 506 protected: | 489 protected: |
| 507 Header* findHeaderFromAddress(Address); | 490 Header* findHeaderFromAddress(Address); |
| 508 void populateObjectStartBitMap(); | 491 void populateObjectStartBitMap(); |
| 509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 492 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 510 TraceCallback traceCallback(Header*); | 493 TraceCallback traceCallback(Header*); |
| 511 bool hasVTable(Header*); | 494 bool hasVTable(Header*); |
| 512 | 495 |
| 513 HeapPage<Header>* m_next; | 496 HeapPage<Header>* m_next; |
| 514 ThreadHeap<Header>* m_heap; | 497 ThreadHeap<Header>* m_heap; |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 670 using GarbageCollectedFinalized<T>::operator delete; | 653 using GarbageCollectedFinalized<T>::operator delete; |
| 671 | 654 |
| 672 protected: | 655 protected: |
| 673 ~ThreadSafeRefCountedGarbageCollected() { } | 656 ~ThreadSafeRefCountedGarbageCollected() { } |
| 674 | 657 |
| 675 private: | 658 private: |
| 676 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; | 659 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; |
| 677 mutable Mutex m_mutex; | 660 mutable Mutex m_mutex; |
| 678 }; | 661 }; |
| 679 | 662 |
| 663 template<typename DataType> | |
| 664 class PagePool { | |
| 665 protected: | |
| 666 PagePool(); | |
| 667 | |
| 668 class PoolEntry { | |
| 669 public: | |
| 670 PoolEntry(DataType* data, PoolEntry* next) | |
| 671 : data(data) | |
| 672 , next(next) | |
| 673 { } | |
| 674 | |
| 675 DataType* data; | |
| 676 PoolEntry* next; | |
| 677 }; | |
| 678 | |
| 679 PoolEntry* m_pool[NumberOfHeaps]; | |
| 680 }; | |
| 681 | |
| 682 // Once pages have been used for one type of thread heap they will never be | |
| 683 // reused for another type of thread heap. Instead of unmapping, we add the | |
| 684 // pages to a pool of pages to be reused later by a thread heap of the same | |
| 685 // type. This is done as a security feature to avoid type confusion. The | |
| 686 // heaps are type segregated by having separate thread heaps for different | |
| 687 // types of objects. Holding on to pages ensures that the same virtual address | |
| 688 // space cannot be used for objects of another type than the type contained | |
| 689 // in this page to begin with. | |
| 690 class FreePagePool : public PagePool<PageMemory> { | |
| 691 public: | |
| 692 ~FreePagePool(); | |
| 693 void addFreePage(int index, PageMemory*); | |
| 694 PageMemory* takeFreePage(int index); | |
| 695 | |
| 696 private: | |
| 697 Mutex m_mutex[NumberOfHeaps]; | |
| 698 }; | |
| 699 | |
| 700 class OrphanedPagePool : public PagePool<BaseHeapPage> { | |
| 701 public: | |
| 702 ~OrphanedPagePool(); | |
| 703 void addOrphanedPage(int, BaseHeapPage*); | |
| 704 void addOrphanedPages(int, Vector<BaseHeapPage*>&); | |
| 705 void decommitOrphanedPages(); | |
| 706 bool contains(void*); | |
| 707 }; | |
| 708 | |
| 680 // The CallbackStack contains all the visitor callbacks used to trace and mark | 709 // The CallbackStack contains all the visitor callbacks used to trace and mark |
| 681 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. | 710 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. |
| 682 // If more space is needed a new CallbackStack instance is created and chained | 711 // If more space is needed a new CallbackStack instance is created and chained |
| 683 // together with the former instance. I.e. a logical CallbackStack can be made o f | 712 // together with the former instance. I.e. a logical CallbackStack can be made o f |
| 684 // multiple chained CallbackStack object instances. | 713 // multiple chained CallbackStack object instances. |
| 685 // There are two logical callback stacks. One containing all the marking callbac ks and | 714 // There are two logical callback stacks. One containing all the marking callbac ks and |
| 686 // one containing the weak pointer callbacks. | 715 // one containing the weak pointer callbacks. |
| 687 class CallbackStack { | 716 class CallbackStack { |
| 688 public: | 717 public: |
| 689 CallbackStack(CallbackStack** first) | 718 CallbackStack(CallbackStack** first) |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 720 | 749 |
| 721 static void init(CallbackStack** first); | 750 static void init(CallbackStack** first); |
| 722 static void shutdown(CallbackStack** first); | 751 static void shutdown(CallbackStack** first); |
| 723 static void clear(CallbackStack** first) | 752 static void clear(CallbackStack** first) |
| 724 { | 753 { |
| 725 if (!(*first)->isEmpty()) { | 754 if (!(*first)->isEmpty()) { |
| 726 shutdown(first); | 755 shutdown(first); |
| 727 init(first); | 756 init(first); |
| 728 } | 757 } |
| 729 } | 758 } |
| 730 bool popAndInvokeCallback(CallbackStack** first, Visitor*); | 759 template<GCMode Mode> bool popAndInvokeCallback(CallbackStack** first, Visit or*); |
| 731 static void invokeCallbacks(CallbackStack** first, Visitor*); | 760 template<GCMode Mode> static void invokeCallbacks(CallbackStack** first, Vis itor*); |
| 732 | 761 |
| 733 Item* allocateEntry(CallbackStack** first) | 762 Item* allocateEntry(CallbackStack** first) |
| 734 { | 763 { |
| 735 if (m_current < m_limit) | 764 if (m_current < m_limit) |
| 736 return m_current++; | 765 return m_current++; |
| 737 return (new CallbackStack(first))->allocateEntry(first); | 766 return (new CallbackStack(first))->allocateEntry(first); |
| 738 } | 767 } |
| 739 | 768 |
| 740 #ifndef NDEBUG | 769 #ifndef NDEBUG |
| 741 bool hasCallbackForObject(const void*); | 770 bool hasCallbackForObject(const void*); |
| 742 #endif | 771 #endif |
| 743 | 772 |
| 744 private: | 773 private: |
| 745 void invokeOldestCallbacks(Visitor*); | 774 template<GCMode Mode> void invokeOldestCallbacks(Visitor*); |
| 746 | 775 |
| 747 static const size_t bufferSize = 8000; | 776 static const size_t bufferSize = 8000; |
| 748 Item m_buffer[bufferSize]; | 777 Item m_buffer[bufferSize]; |
| 749 Item* m_limit; | 778 Item* m_limit; |
| 750 Item* m_current; | 779 Item* m_current; |
| 751 CallbackStack* m_next; | 780 CallbackStack* m_next; |
| 752 }; | 781 }; |
| 753 | 782 |
| 754 // Non-template super class used to pass a heap around to other classes. | 783 // Non-template super class used to pass a heap around to other classes. |
| 755 class BaseHeap { | 784 class BaseHeap { |
| 756 public: | 785 public: |
| 757 virtual ~BaseHeap() { } | 786 virtual ~BaseHeap() { } |
| 758 | 787 |
| 759 // Find the page in this thread heap containing the given | 788 // Find the page in this thread heap containing the given |
| 760 // address. Returns 0 if the address is not contained in any | 789 // address. Returns 0 if the address is not contained in any |
| 761 // page in this thread heap. | 790 // page in this thread heap. |
| 762 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; | 791 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; |
| 763 | 792 |
| 764 #if ENABLE(GC_TRACING) | 793 #if ENABLE(GC_TRACING) |
| 765 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; | 794 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; |
| 766 #endif | 795 #endif |
| 767 | 796 |
| 768 // Sweep this part of the Blink heap. This finalizes dead objects | 797 // Sweep this part of the Blink heap. This finalizes dead objects |
| 769 // and builds freelists for all the unused memory. | 798 // and builds freelists for all the unused memory. |
| 770 virtual void sweep() = 0; | 799 virtual void sweep() = 0; |
| 771 | 800 |
| 772 // Forcefully finalize all objects in this part of the Blink heap | |
| 773 // (potentially with the exception of one object). This is used | |
| 774 // during thread termination to make sure that all objects for the | |
| 775 // dying thread are finalized. | |
| 776 virtual void assertEmpty() = 0; | |
| 777 | |
| 778 virtual void clearFreeLists() = 0; | 801 virtual void clearFreeLists() = 0; |
| 779 virtual void clearMarks() = 0; | 802 virtual void clearLiveAndMarkDead() = 0; |
| 780 #ifndef NDEBUG | 803 #ifndef NDEBUG |
| 781 virtual void getScannedStats(HeapStats&) = 0; | 804 virtual void getScannedStats(HeapStats&) = 0; |
| 782 #endif | 805 #endif |
| 783 | 806 |
| 784 virtual void makeConsistentForGC() = 0; | 807 virtual void makeConsistentForGC() = 0; |
| 785 virtual bool isConsistentForGC() = 0; | 808 virtual bool isConsistentForGC() = 0; |
| 786 | 809 |
| 810 virtual void prepareHeapForShutdown() = 0; | |
|
haraken
2014/07/09 08:01:59
prepareForHeapForTermination ? We want to avoid mi
wibling-chromium
2014/07/09 10:32:31
Done.
| |
| 811 | |
| 787 // Returns a bucket number for inserting a FreeListEntry of a | 812 // Returns a bucket number for inserting a FreeListEntry of a |
| 788 // given size. All FreeListEntries in the given bucket, n, have | 813 // given size. All FreeListEntries in the given bucket, n, have |
| 789 // size >= 2^n. | 814 // size >= 2^n. |
| 790 static int bucketIndexForSize(size_t); | 815 static int bucketIndexForSize(size_t); |
| 791 }; | 816 }; |
| 792 | 817 |
| 793 // Thread heaps represent a part of the per-thread Blink heap. | 818 // Thread heaps represent a part of the per-thread Blink heap. |
| 794 // | 819 // |
| 795 // Each Blink thread has a number of thread heaps: one general heap | 820 // Each Blink thread has a number of thread heaps: one general heap |
| 796 // that contains any type of object and a number of heaps specialized | 821 // that contains any type of object and a number of heaps specialized |
| 797 // for specific object types (such as Node). | 822 // for specific object types (such as Node). |
| 798 // | 823 // |
| 799 // Each thread heap contains the functionality to allocate new objects | 824 // Each thread heap contains the functionality to allocate new objects |
| 800 // (potentially adding new pages to the heap), to find and mark | 825 // (potentially adding new pages to the heap), to find and mark |
| 801 // objects during conservative stack scanning and to sweep the set of | 826 // objects during conservative stack scanning and to sweep the set of |
| 802 // pages after a GC. | 827 // pages after a GC. |
| 803 template<typename Header> | 828 template<typename Header> |
| 804 class ThreadHeap : public BaseHeap { | 829 class ThreadHeap : public BaseHeap { |
| 805 public: | 830 public: |
| 806 ThreadHeap(ThreadState*); | 831 ThreadHeap(ThreadState*, int); |
| 807 virtual ~ThreadHeap(); | 832 virtual ~ThreadHeap(); |
| 808 | 833 |
| 809 virtual BaseHeapPage* heapPageFromAddress(Address); | 834 virtual BaseHeapPage* heapPageFromAddress(Address); |
| 810 #if ENABLE(GC_TRACING) | 835 #if ENABLE(GC_TRACING) |
| 811 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); | 836 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); |
| 812 #endif | 837 #endif |
| 813 virtual void sweep(); | 838 virtual void sweep(); |
| 814 virtual void assertEmpty(); | |
| 815 virtual void clearFreeLists(); | 839 virtual void clearFreeLists(); |
| 816 virtual void clearMarks(); | 840 virtual void clearLiveAndMarkDead(); |
| 817 #ifndef NDEBUG | 841 #ifndef NDEBUG |
| 818 virtual void getScannedStats(HeapStats&); | 842 virtual void getScannedStats(HeapStats&); |
| 819 #endif | 843 #endif |
| 820 | 844 |
| 821 virtual void makeConsistentForGC(); | 845 virtual void makeConsistentForGC(); |
| 822 virtual bool isConsistentForGC(); | 846 virtual bool isConsistentForGC(); |
| 823 | 847 |
| 824 ThreadState* threadState() { return m_threadState; } | 848 ThreadState* threadState() { return m_threadState; } |
| 825 HeapStats& stats() { return m_threadState->stats(); } | 849 HeapStats& stats() { return m_threadState->stats(); } |
| 826 void flushHeapContainsCache() | 850 void flushHeapContainsCache() |
| 827 { | 851 { |
| 828 m_threadState->heapContainsCache()->flush(); | 852 m_threadState->heapContainsCache()->flush(); |
| 829 } | 853 } |
| 830 | 854 |
| 831 inline Address allocate(size_t, const GCInfo*); | 855 inline Address allocate(size_t, const GCInfo*); |
| 832 void addToFreeList(Address, size_t); | 856 void addToFreeList(Address, size_t); |
| 833 void addPageMemoryToPool(PageMemory*); | |
| 834 void addPageToPool(HeapPage<Header>*); | |
| 835 inline static size_t roundedAllocationSize(size_t size) | 857 inline static size_t roundedAllocationSize(size_t size) |
| 836 { | 858 { |
| 837 return allocationSizeFromSize(size) - sizeof(Header); | 859 return allocationSizeFromSize(size) - sizeof(Header); |
| 838 } | 860 } |
| 839 | 861 |
| 862 void prepareHeapForShutdown(); | |
| 863 void removePageFromHeap(HeapPage<Header>*); | |
| 864 | |
| 840 private: | 865 private: |
| 841 // Once pages have been used for one thread heap they will never | 866 void addPageToHeap(const GCInfo*); |
| 842 // be reused for another thread heap. Instead of unmapping, we add | |
| 843 // the pages to a pool of pages to be reused later by this thread | |
| 844 // heap. This is done as a security feature to avoid type | |
| 845 // confusion. The heap is type segregated by having separate | |
| 846 // thread heaps for various types of objects. Holding on to pages | |
| 847 // ensures that the same virtual address space cannot be used for | |
| 848 // objects of another type than the type contained in this thread | |
| 849 // heap. | |
| 850 class PagePoolEntry { | |
| 851 public: | |
| 852 PagePoolEntry(PageMemory* storage, PagePoolEntry* next) | |
| 853 : m_storage(storage) | |
| 854 , m_next(next) | |
| 855 { } | |
| 856 | |
| 857 PageMemory* storage() { return m_storage; } | |
| 858 PagePoolEntry* next() { return m_next; } | |
| 859 | |
| 860 private: | |
| 861 PageMemory* m_storage; | |
| 862 PagePoolEntry* m_next; | |
| 863 }; | |
| 864 | |
| 865 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 867 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); |
| 866 static size_t allocationSizeFromSize(size_t); | 868 static size_t allocationSizeFromSize(size_t); |
| 867 void addPageToHeap(const GCInfo*); | |
| 868 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 869 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| 869 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 870 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 870 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 871 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 871 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } | 872 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
| 872 void setAllocationPoint(Address point, size_t size) | 873 void setAllocationPoint(Address point, size_t size) |
| 873 { | 874 { |
| 874 ASSERT(!point || heapPageFromAddress(point)); | 875 ASSERT(!point || heapPageFromAddress(point)); |
| 875 ASSERT(size <= HeapPage<Header>::payloadSize()); | 876 ASSERT(size <= HeapPage<Header>::payloadSize()); |
| 876 m_currentAllocationPoint = point; | 877 m_currentAllocationPoint = point; |
| 877 m_remainingAllocationSize = size; | 878 m_remainingAllocationSize = size; |
| 878 } | 879 } |
| 879 void ensureCurrentAllocation(size_t, const GCInfo*); | 880 void ensureCurrentAllocation(size_t, const GCInfo*); |
| 880 bool allocateFromFreeList(size_t); | 881 bool allocateFromFreeList(size_t); |
| 881 | 882 |
| 882 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 883 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); |
| 883 | |
| 884 void allocatePage(const GCInfo*); | 884 void allocatePage(const GCInfo*); |
| 885 PageMemory* takePageFromPool(); | |
| 886 void clearPagePool(); | |
| 887 void deletePages(); | |
| 888 | 885 |
| 889 Address m_currentAllocationPoint; | 886 Address m_currentAllocationPoint; |
| 890 size_t m_remainingAllocationSize; | 887 size_t m_remainingAllocationSize; |
| 891 | 888 |
| 892 HeapPage<Header>* m_firstPage; | 889 HeapPage<Header>* m_firstPage; |
| 893 LargeHeapObject<Header>* m_firstLargeHeapObject; | 890 LargeHeapObject<Header>* m_firstLargeHeapObject; |
| 894 | 891 |
| 895 int m_biggestFreeListIndex; | 892 int m_biggestFreeListIndex; |
| 896 ThreadState* m_threadState; | 893 ThreadState* m_threadState; |
| 897 | 894 |
| 898 // All FreeListEntries in the nth list have size >= 2^n. | 895 // All FreeListEntries in the nth list have size >= 2^n. |
| 899 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 896 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 900 | 897 |
| 901 // List of pages that have been previously allocated, but are now | 898 // Index into the page pools. This is used to ensure that the pages of the |
| 902 // unused. | 899 // same type go into the correct page pool and thus avoid type confusion. |
| 903 PagePoolEntry* m_pagePool; | 900 int m_index; |
| 904 }; | 901 }; |
| 905 | 902 |
| 906 class PLATFORM_EXPORT Heap { | 903 class PLATFORM_EXPORT Heap { |
| 907 public: | 904 public: |
| 908 static void init(); | 905 static void init(); |
| 909 static void shutdown(); | 906 static void shutdown(); |
| 910 static void doShutdown(); | 907 static void doShutdown(); |
| 911 | 908 |
| 912 static BaseHeapPage* contains(Address); | 909 static BaseHeapPage* contains(Address); |
| 913 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } | 910 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } |
| 914 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } | 911 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } |
| 912 #ifndef NDEBUG | |
| 913 static bool containedInHeapOrOrphanedPage(void*); | |
| 914 #endif | |
| 915 | 915 |
| 916 // Push a trace callback on the marking stack. | 916 // Push a trace callback on the marking stack. |
| 917 static void pushTraceCallback(void* containerObject, TraceCallback); | 917 static void pushTraceCallback(void* containerObject, TraceCallback); |
| 918 | 918 |
| 919 // Add a weak pointer callback to the weak callback work list. General | 919 // Add a weak pointer callback to the weak callback work list. General |
| 920 // object pointer callbacks are added to a thread local weak callback work | 920 // object pointer callbacks are added to a thread local weak callback work |
| 921 // list and the callback is called on the thread that owns the object, with | 921 // list and the callback is called on the thread that owns the object, with |
| 922 // the closure pointer as an argument. Most of the time, the closure and | 922 // the closure pointer as an argument. Most of the time, the closure and |
| 923 // the containerObject can be the same thing, but the containerObject is | 923 // the containerObject can be the same thing, but the containerObject is |
| 924 // constrained to be on the heap, since the heap is used to identify the | 924 // constrained to be on the heap, since the heap is used to identify the |
| 925 // correct thread. | 925 // correct thread. |
| 926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); | 926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); |
| 927 | 927 |
| 928 // Similar to the more general pushWeakObjectPointerCallback, but cell | 928 // Similar to the more general pushWeakObjectPointerCallback, but cell |
| 929 // pointer callbacks are added to a static callback work list and the weak | 929 // pointer callbacks are added to a static callback work list and the weak |
| 930 // callback is performed on the thread performing garbage collection. This | 930 // callback is performed on the thread performing garbage collection. This |
| 931 // is OK because cells are just cleared and no deallocation can happen. | 931 // is OK because cells are just cleared and no deallocation can happen. |
| 932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); | 932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); |
| 933 | 933 |
| 934 // Pop the top of the marking stack and call the callback with the visitor | 934 // Pop the top of the marking stack and call the callback with the visitor |
| 935 // and the object. Returns false when there is nothing more to do. | 935 // and the object. Returns false when there is nothing more to do. |
| 936 static bool popAndInvokeTraceCallback(Visitor*); | 936 template<GCMode Mode> static bool popAndInvokeTraceCallback(Visitor*); |
| 937 | 937 |
| 938 // Remove an item from the weak callback work list and call the callback | 938 // Remove an item from the weak callback work list and call the callback |
| 939 // with the visitor and the closure pointer. Returns false when there is | 939 // with the visitor and the closure pointer. Returns false when there is |
| 940 // nothing more to do. | 940 // nothing more to do. |
| 941 static bool popAndInvokeWeakPointerCallback(Visitor*); | 941 static bool popAndInvokeWeakPointerCallback(Visitor*); |
| 942 | 942 |
| 943 // Register an ephemeron table for fixed-point iteration. | 943 // Register an ephemeron table for fixed-point iteration. |
| 944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); | 944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); |
| 945 #ifndef NDEBUG | 945 #ifndef NDEBUG |
| 946 static bool weakTableRegistered(const void*); | 946 static bool weakTableRegistered(const void*); |
| 947 #endif | 947 #endif |
| 948 | 948 |
| 949 template<typename T> static Address allocate(size_t); | 949 template<typename T> static Address allocate(size_t); |
| 950 template<typename T> static Address reallocate(void* previous, size_t); | 950 template<typename T> static Address reallocate(void* previous, size_t); |
| 951 | 951 |
| 952 static void collectGarbage(ThreadState::StackState); | 952 static void collectGarbage(ThreadState::StackState); |
| 953 static void collectGarbageForTerminatingThread(ThreadState*); | |
| 953 static void collectAllGarbage(); | 954 static void collectAllGarbage(); |
| 955 template<GCMode Mode> static void traceRootsAndPerformGlobalWeakProcessing() ; | |
| 954 static void setForcePreciseGCForTesting(); | 956 static void setForcePreciseGCForTesting(); |
| 955 | 957 |
| 956 static void prepareForGC(); | 958 static void prepareForGC(); |
| 957 | 959 |
| 958 // Conservatively checks whether an address is a pointer in any of the threa d | 960 // Conservatively checks whether an address is a pointer in any of the threa d |
| 959 // heaps. If so marks the object pointed to as live. | 961 // heaps. If so marks the object pointed to as live. |
| 960 static Address checkAndMarkPointer(Visitor*, Address); | 962 static Address checkAndMarkPointer(Visitor*, Address); |
| 961 | 963 |
| 962 #if ENABLE(GC_TRACING) | 964 #if ENABLE(GC_TRACING) |
| 963 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. | 965 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 981 static bool isConsistentForGC(); | 983 static bool isConsistentForGC(); |
| 982 static void makeConsistentForGC(); | 984 static void makeConsistentForGC(); |
| 983 | 985 |
| 984 static void flushHeapDoesNotContainCache(); | 986 static void flushHeapDoesNotContainCache(); |
| 985 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } | 987 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } |
| 986 | 988 |
| 987 // Return true if the last GC found a pointer into a heap page | 989 // Return true if the last GC found a pointer into a heap page |
| 988 // during conservative scanning. | 990 // during conservative scanning. |
| 989 static bool lastGCWasConservative() { return s_lastGCWasConservative; } | 991 static bool lastGCWasConservative() { return s_lastGCWasConservative; } |
| 990 | 992 |
| 993 static FreePagePool* freePagePool() { return s_freePagePool; } | |
| 994 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } | |
| 995 | |
| 991 private: | 996 private: |
| 992 static Visitor* s_markingVisitor; | 997 static Visitor* s_markingVisitor; |
| 993 | 998 |
| 994 static CallbackStack* s_markingStack; | 999 static CallbackStack* s_markingStack; |
| 995 static CallbackStack* s_weakCallbackStack; | 1000 static CallbackStack* s_weakCallbackStack; |
| 996 static CallbackStack* s_ephemeronStack; | 1001 static CallbackStack* s_ephemeronStack; |
| 997 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | 1002 static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
| 998 static bool s_shutdownCalled; | 1003 static bool s_shutdownCalled; |
| 999 static bool s_lastGCWasConservative; | 1004 static bool s_lastGCWasConservative; |
| 1005 static FreePagePool* s_freePagePool; | |
| 1006 static OrphanedPagePool* s_orphanedPagePool; | |
| 1000 friend class ThreadState; | 1007 friend class ThreadState; |
| 1001 }; | 1008 }; |
| 1002 | 1009 |
| 1003 // The NoAllocationScope class is used in debug mode to catch unwanted | 1010 // The NoAllocationScope class is used in debug mode to catch unwanted |
| 1004 // allocations. E.g. allocations during GC. | 1011 // allocations. E.g. allocations during GC. |
| 1005 template<ThreadAffinity Affinity> | 1012 template<ThreadAffinity Affinity> |
| 1006 class NoAllocationScope { | 1013 class NoAllocationScope { |
| 1007 public: | 1014 public: |
| 1008 NoAllocationScope() : m_active(true) { enter(); } | 1015 NoAllocationScope() : m_active(true) { enter(); } |
| 1009 | 1016 |
| (...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1296 #define GC_PLUGIN_IGNORE(bug) \ | 1303 #define GC_PLUGIN_IGNORE(bug) \ |
| 1297 __attribute__((annotate("blink_gc_plugin_ignore"))) | 1304 __attribute__((annotate("blink_gc_plugin_ignore"))) |
| 1298 #else | 1305 #else |
| 1299 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1306 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
| 1300 #define GC_PLUGIN_IGNORE(bug) | 1307 #define GC_PLUGIN_IGNORE(bug) |
| 1301 #endif | 1308 #endif |
| 1302 | 1309 |
| 1303 NO_SANITIZE_ADDRESS | 1310 NO_SANITIZE_ADDRESS |
| 1304 void HeapObjectHeader::checkHeader() const | 1311 void HeapObjectHeader::checkHeader() const |
| 1305 { | 1312 { |
| 1306 ASSERT(m_magic == magic); | 1313 #ifndef NDEBUG |
| 1314 BaseHeapPage* page = pageHeaderFromObject(this); | |
| 1315 ASSERT(page->orphaned() || m_magic == magic); | |
| 1316 #endif | |
| 1307 } | 1317 } |
| 1308 | 1318 |
| 1309 Address HeapObjectHeader::payload() | 1319 Address HeapObjectHeader::payload() |
| 1310 { | 1320 { |
| 1311 return reinterpret_cast<Address>(this) + objectHeaderSize; | 1321 return reinterpret_cast<Address>(this) + objectHeaderSize; |
| 1312 } | 1322 } |
| 1313 | 1323 |
| 1314 size_t HeapObjectHeader::payloadSize() | 1324 size_t HeapObjectHeader::payloadSize() |
| 1315 { | 1325 { |
| 1316 return size() - objectHeaderSize; | 1326 return size() - objectHeaderSize; |
| (...skipping 1030 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2347 }; | 2357 }; |
| 2348 | 2358 |
| 2349 template<typename T> | 2359 template<typename T> |
| 2350 struct IfWeakMember<WeakMember<T> > { | 2360 struct IfWeakMember<WeakMember<T> > { |
| 2351 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } | 2361 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } |
| 2352 }; | 2362 }; |
| 2353 | 2363 |
| 2354 } | 2364 } |
| 2355 | 2365 |
| 2356 #endif // Heap_h | 2366 #endif // Heap_h |
| OLD | NEW |