Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte | 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte |
| 63 // align all allocations even on 32 bit. | 63 // align all allocations even on 32 bit. |
| 64 const size_t allocationGranularity = 8; | 64 const size_t allocationGranularity = 8; |
| 65 const size_t allocationMask = allocationGranularity - 1; | 65 const size_t allocationMask = allocationGranularity - 1; |
| 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); | 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); |
| 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); | 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); |
| 68 const size_t maxHeapObjectSize = 1 << 27; | 68 const size_t maxHeapObjectSize = 1 << 27; |
| 69 | 69 |
| 70 const size_t markBitMask = 1; | 70 const size_t markBitMask = 1; |
| 71 const size_t freeListMask = 2; | 71 const size_t freeListMask = 2; |
| 72 const size_t debugBitMask = 4; | 72 // The dead bit is used for objects that have gone through a GC marking, but did |
| 73 // not get swept before a new GC started. In that case we set the dead bit on | |
| 74 // objects that were not marked in the previous GC to ensure we are not tracing | |
| 75 // them via a conservatively found pointer. Tracing dead objects could lead to | |
| 76 // tracing of already finalized objects in another thread's heap which is a | |
| 77 // use-after-free situation. | |
| 78 const size_t deadBitMask = 4; | |
| 73 const size_t sizeMask = ~7; | 79 const size_t sizeMask = ~7; |
| 74 const uint8_t freelistZapValue = 42; | 80 const uint8_t freelistZapValue = 42; |
| 75 const uint8_t finalizedZapValue = 24; | 81 const uint8_t finalizedZapValue = 24; |
| 82 // The orphaned zap value must be zero in the lowest bits to allow for using | |
| 83 // the mark bit when tracing. | |
| 84 const uint8_t orphanedZapValue = 240; | |
| 85 | |
| 86 enum CallbackInvocationMode { | |
| 87 GlobalMarking, | |
| 88 ThreadLocalMarking, | |
| 89 WeaknessProcessing, | |
| 90 }; | |
| 76 | 91 |
| 77 class HeapStats; | 92 class HeapStats; |
| 78 class PageMemory; | 93 class PageMemory; |
| 79 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 94 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| 80 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; | 95 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; |
| 81 template<typename T> class CrossThreadPersistent; | 96 template<typename T> class CrossThreadPersistent; |
| 82 | 97 |
| 83 PLATFORM_EXPORT size_t osPageSize(); | 98 PLATFORM_EXPORT size_t osPageSize(); |
| 84 | 99 |
| 85 // Blink heap pages are set up with a guard page before and after the | 100 // Blink heap pages are set up with a guard page before and after the |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 120 | 135 |
| 121 // Sanity check for a page header address: the address of the page | 136 // Sanity check for a page header address: the address of the page |
| 122 // header should be OS page size away from being Blink page size | 137 // header should be OS page size away from being Blink page size |
| 123 // aligned. | 138 // aligned. |
| 124 inline bool isPageHeaderAddress(Address address) | 139 inline bool isPageHeaderAddress(Address address) |
| 125 { | 140 { |
| 126 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); | 141 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); |
| 127 } | 142 } |
| 128 #endif | 143 #endif |
| 129 | 144 |
| 130 // Mask an address down to the enclosing oilpan heap page base address. | 145 // Mask an address down to the enclosing oilpan heap base page. |
| 131 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. | 146 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. |
| 132 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. | 147 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. |
| 133 // This is only exported to enable tests in HeapTest.cpp. | 148 // This is only exported to enable tests in HeapTest.cpp. |
| 134 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) | 149 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object) |
| 135 { | 150 { |
| 136 return blinkPageAddress(address) + osPageSize(); | 151 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 152 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSiz e()); | |
| 137 } | 153 } |
| 138 | 154 |
| 139 // Common header for heap pages. | |
| 140 class BaseHeapPage { | |
| 141 public: | |
| 142 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) | |
| 143 : m_storage(storage) | |
| 144 , m_gcInfo(gcInfo) | |
| 145 , m_threadState(state) | |
| 146 , m_padding(0) | |
| 147 { | |
| 148 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | |
| 149 } | |
| 150 | |
| 151 // Check if the given address points to an object in this | |
| 152 // heap page. If so, find the start of that object and mark it | |
| 153 // using the given Visitor. Otherwise do nothing. The pointer must | |
| 154 // be within the same aligned blinkPageSize as the this-pointer. | |
| 155 // | |
| 156 // This is used during conservative stack scanning to | |
| 157 // conservatively mark all objects that could be referenced from | |
| 158 // the stack. | |
| 159 virtual void checkAndMarkPointer(Visitor*, Address) = 0; | |
| 160 | |
| 161 #if ENABLE(GC_TRACING) | |
| 162 virtual const GCInfo* findGCInfo(Address) = 0; | |
| 163 #endif | |
| 164 | |
| 165 Address address() { return reinterpret_cast<Address>(this); } | |
| 166 PageMemory* storage() const { return m_storage; } | |
| 167 ThreadState* threadState() const { return m_threadState; } | |
| 168 const GCInfo* gcInfo() { return m_gcInfo; } | |
| 169 virtual bool isLargeObject() { return false; } | |
| 170 | |
| 171 private: | |
| 172 // Accessor to silence unused warnings for the m_padding field. | |
| 173 intptr_t padding() const { return m_padding; } | |
| 174 | |
| 175 PageMemory* m_storage; | |
| 176 const GCInfo* m_gcInfo; | |
| 177 ThreadState* m_threadState; | |
| 178 // Pointer sized integer to ensure proper alignment of the | |
| 179 // HeapPage header. This can be used as a bit field if we need | |
| 180 // to associate more information with pages. | |
| 181 intptr_t m_padding; | |
| 182 }; | |
| 183 | |
| 184 // Large allocations are allocated as separate objects and linked in a | 155 // Large allocations are allocated as separate objects and linked in a |
| 185 // list. | 156 // list. |
| 186 // | 157 // |
| 187 // In order to use the same memory allocation routines for everything | 158 // In order to use the same memory allocation routines for everything |
| 188 // allocated in the heap, large objects are considered heap pages | 159 // allocated in the heap, large objects are considered heap pages |
| 189 // containing only one object. | 160 // containing only one object. |
| 190 // | 161 // |
| 191 // The layout of a large heap object is as follows: | 162 // The layout of a large heap object is as follows: |
| 192 // | 163 // |
| 193 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | | 164 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 225 // The LargeHeapObject pseudo-page contains one actual object. Determine | 196 // The LargeHeapObject pseudo-page contains one actual object. Determine |
| 226 // whether the pointer is within that object. | 197 // whether the pointer is within that object. |
| 227 bool objectContains(Address object) | 198 bool objectContains(Address object) |
| 228 { | 199 { |
| 229 return (payload() <= object) && (object < address() + size()); | 200 return (payload() <= object) && (object < address() + size()); |
| 230 } | 201 } |
| 231 | 202 |
| 232 // Returns true for any address that is on one of the pages that this | 203 // Returns true for any address that is on one of the pages that this |
| 233 // large object uses. That ensures that we can use a negative result to | 204 // large object uses. That ensures that we can use a negative result to |
| 234 // populate the negative page cache. | 205 // populate the negative page cache. |
| 235 bool contains(Address object) | 206 virtual bool contains(Address object) OVERRIDE |
| 236 { | 207 { |
| 237 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); | 208 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); |
| 238 } | 209 } |
| 239 | 210 |
| 240 LargeHeapObject<Header>* next() | 211 LargeHeapObject<Header>* next() |
| 241 { | 212 { |
| 242 return m_next; | 213 return m_next; |
| 243 } | 214 } |
| 244 | 215 |
| 245 size_t size() | 216 size_t size() |
| 246 { | 217 { |
| 247 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 218 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
| 248 } | 219 } |
| 249 | 220 |
| 250 Address payload() { return heapObjectHeader()->payload(); } | 221 Address payload() { return heapObjectHeader()->payload(); } |
| 251 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 222 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
| 252 | 223 |
| 253 Header* heapObjectHeader() | 224 Header* heapObjectHeader() |
| 254 { | 225 { |
| 255 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 226 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
| 256 return reinterpret_cast<Header*>(headerAddress); | 227 return reinterpret_cast<Header*>(headerAddress); |
| 257 } | 228 } |
| 258 | 229 |
| 259 bool isMarked(); | 230 bool isMarked(); |
| 260 void unmark(); | 231 void unmark(); |
| 261 void getStats(HeapStats&); | 232 void getStats(HeapStats&); |
| 262 void mark(Visitor*); | 233 void mark(Visitor*); |
| 263 void finalize(); | 234 void finalize(); |
| 235 void setDeadMark(); | |
| 236 virtual void markOrphaned() | |
| 237 { | |
| 238 // Zap the payload with a recognizable value to detect any incorrect | |
| 239 // cross thread pointer usage. | |
| 240 memset(payload(), orphanedZapValue, payloadSize()); | |
| 241 BaseHeapPage::markOrphaned(); | |
| 242 } | |
| 264 | 243 |
| 265 private: | 244 private: |
| 266 friend class ThreadHeap<Header>; | 245 friend class ThreadHeap<Header>; |
| 267 | 246 |
| 268 LargeHeapObject<Header>* m_next; | 247 LargeHeapObject<Header>* m_next; |
| 269 }; | 248 }; |
| 270 | 249 |
| 271 // The BasicObjectHeader is the minimal object header. It is used when | 250 // The BasicObjectHeader is the minimal object header. It is used when |
| 272 // encountering heap space of size allocationGranularity to mark it as | 251 // encountering heap space of size allocationGranularity to mark it as |
| 273 // as freelist entry. | 252 // as freelist entry. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 322 | 301 |
| 323 inline void mark(); | 302 inline void mark(); |
| 324 inline void unmark(); | 303 inline void unmark(); |
| 325 | 304 |
| 326 inline const GCInfo* gcInfo() { return 0; } | 305 inline const GCInfo* gcInfo() { return 0; } |
| 327 | 306 |
| 328 inline Address payload(); | 307 inline Address payload(); |
| 329 inline size_t payloadSize(); | 308 inline size_t payloadSize(); |
| 330 inline Address payloadEnd(); | 309 inline Address payloadEnd(); |
| 331 | 310 |
| 332 inline void setDebugMark(); | 311 inline void setDeadMark(); |
| 333 inline void clearDebugMark(); | 312 inline void clearDeadMark(); |
| 334 inline bool hasDebugMark() const; | 313 inline bool hasDeadMark() const; |
| 335 | 314 |
| 336 // Zap magic number with a new magic number that means there was once an | 315 // Zap magic number with a new magic number that means there was once an |
| 337 // object allocated here, but it was freed because nobody marked it during | 316 // object allocated here, but it was freed because nobody marked it during |
| 338 // GC. | 317 // GC. |
| 339 void zapMagic(); | 318 void zapMagic(); |
| 340 | 319 |
| 341 static void finalize(const GCInfo*, Address, size_t); | 320 static void finalize(const GCInfo*, Address, size_t); |
| 342 static HeapObjectHeader* fromPayload(const void*); | 321 static HeapObjectHeader* fromPayload(const void*); |
| 343 | 322 |
| 344 static const intptr_t magic = 0xc0de247; | 323 static const intptr_t magic = 0xc0de247; |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 461 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); | 440 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); |
| 462 | 441 |
| 463 void link(HeapPage**); | 442 void link(HeapPage**); |
| 464 static void unlink(HeapPage*, HeapPage**); | 443 static void unlink(HeapPage*, HeapPage**); |
| 465 | 444 |
| 466 bool isEmpty(); | 445 bool isEmpty(); |
| 467 | 446 |
| 468 // Returns true for the whole blinkPageSize page that the page is on, even | 447 // Returns true for the whole blinkPageSize page that the page is on, even |
| 469 // for the header, and the unmapped guard page at the start. That ensures | 448 // for the header, and the unmapped guard page at the start. That ensures |
| 470 // the result can be used to populate the negative page cache. | 449 // the result can be used to populate the negative page cache. |
| 471 bool contains(Address addr) | 450 virtual bool contains(Address addr) OVERRIDE |
| 472 { | 451 { |
| 473 Address blinkPageStart = roundToBlinkPageStart(address()); | 452 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 474 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. | 453 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. |
| 475 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 454 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 476 } | 455 } |
| 477 | 456 |
| 478 HeapPage* next() { return m_next; } | 457 HeapPage* next() { return m_next; } |
| 479 | 458 |
| 480 Address payload() | 459 Address payload() |
| 481 { | 460 { |
| 482 return address() + sizeof(*this) + headerPadding<Header>(); | 461 return address() + sizeof(*this) + headerPadding<Header>(); |
| 483 } | 462 } |
| 484 | 463 |
| 485 static size_t payloadSize() | 464 static size_t payloadSize() |
| 486 { | 465 { |
| 487 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; | 466 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; |
| 488 } | 467 } |
| 489 | 468 |
| 490 Address end() { return payload() + payloadSize(); } | 469 Address end() { return payload() + payloadSize(); } |
| 491 | 470 |
| 492 void getStats(HeapStats&); | 471 void getStats(HeapStats&); |
| 493 void clearMarks(); | 472 void clearLiveAndMarkDead(); |
| 494 void sweep(); | 473 void sweep(); |
| 495 void clearObjectStartBitMap(); | 474 void clearObjectStartBitMap(); |
| 496 void finalize(Header*); | 475 void finalize(Header*); |
| 497 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; | 476 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; |
| 498 #if ENABLE(GC_TRACING) | 477 #if ENABLE(GC_TRACING) |
| 499 const GCInfo* findGCInfo(Address) OVERRIDE; | 478 const GCInfo* findGCInfo(Address) OVERRIDE; |
| 500 #endif | 479 #endif |
| 501 ThreadHeap<Header>* heap() { return m_heap; } | 480 ThreadHeap<Header>* heap() { return m_heap; } |
| 502 #if defined(ADDRESS_SANITIZER) | 481 #if defined(ADDRESS_SANITIZER) |
| 503 void poisonUnmarkedObjects(); | 482 void poisonUnmarkedObjects(); |
| 504 #endif | 483 #endif |
| 484 virtual void markOrphaned() | |
| 485 { | |
| 486 // Zap the payload with a recognizable value to detect any incorrect | |
| 487 // cross thread pointer usage. | |
| 488 memset(payload(), orphanedZapValue, payloadSize()); | |
| 489 BaseHeapPage::markOrphaned(); | |
| 490 } | |
| 505 | 491 |
| 506 protected: | 492 protected: |
| 507 Header* findHeaderFromAddress(Address); | 493 Header* findHeaderFromAddress(Address); |
| 508 void populateObjectStartBitMap(); | 494 void populateObjectStartBitMap(); |
| 509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 495 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 510 TraceCallback traceCallback(Header*); | 496 TraceCallback traceCallback(Header*); |
| 511 bool hasVTable(Header*); | 497 bool hasVTable(Header*); |
| 512 | 498 |
| 513 HeapPage<Header>* m_next; | 499 HeapPage<Header>* m_next; |
| 514 ThreadHeap<Header>* m_heap; | 500 ThreadHeap<Header>* m_heap; |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 670 using GarbageCollectedFinalized<T>::operator delete; | 656 using GarbageCollectedFinalized<T>::operator delete; |
| 671 | 657 |
| 672 protected: | 658 protected: |
| 673 ~ThreadSafeRefCountedGarbageCollected() { } | 659 ~ThreadSafeRefCountedGarbageCollected() { } |
| 674 | 660 |
| 675 private: | 661 private: |
| 676 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; | 662 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; |
| 677 mutable Mutex m_mutex; | 663 mutable Mutex m_mutex; |
| 678 }; | 664 }; |
| 679 | 665 |
| 666 template<typename DataType> | |
| 667 class PagePool { | |
| 668 protected: | |
| 669 PagePool(); | |
| 670 | |
| 671 class PoolEntry { | |
| 672 public: | |
| 673 PoolEntry(DataType* data, PoolEntry* next) | |
| 674 : data(data) | |
| 675 , next(next) | |
| 676 { } | |
| 677 | |
| 678 DataType* data; | |
| 679 PoolEntry* next; | |
| 680 }; | |
| 681 | |
| 682 PoolEntry* m_pool[NumberOfHeaps]; | |
| 683 }; | |
| 684 | |
| 685 // Once pages have been used for one type of thread heap they will never be | |
| 686 // reused for another type of thread heap. Instead of unmapping, we add the | |
| 687 // pages to a pool of pages to be reused later by a thread heap of the same | |
| 688 // type. This is done as a security feature to avoid type confusion. The | |
| 689 // heaps are type segregated by having separate thread heaps for different | |
| 690 // types of objects. Holding on to pages ensures that the same virtual address | |
| 691 // space cannot be used for objects of another type than the type contained | |
| 692 // in this page to begin with. | |
| 693 class FreePagePool : public PagePool<PageMemory> { | |
| 694 public: | |
| 695 ~FreePagePool(); | |
| 696 void addFreePage(int index, PageMemory*); | |
|
haraken
2014/07/13 17:30:39
Nit: Remove |index|.
wibling-chromium
2014/07/14 08:06:11
Done.
| |
| 697 PageMemory* takeFreePage(int index); | |
|
haraken
2014/07/13 17:30:39
Ditto.
wibling-chromium
2014/07/14 08:06:11
Done.
| |
| 698 | |
| 699 private: | |
| 700 Mutex m_mutex[NumberOfHeaps]; | |
| 701 }; | |
| 702 | |
| 703 class OrphanedPagePool : public PagePool<BaseHeapPage> { | |
| 704 public: | |
| 705 ~OrphanedPagePool(); | |
| 706 void addOrphanedPage(int, BaseHeapPage*); | |
| 707 void decommitOrphanedPages(); | |
| 708 #ifndef NDEBUG | |
| 709 bool contains(void*); | |
| 710 #endif | |
| 711 }; | |
| 712 | |
| 680 // The CallbackStack contains all the visitor callbacks used to trace and mark | 713 // The CallbackStack contains all the visitor callbacks used to trace and mark |
| 681 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. | 714 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. |
| 682 // If more space is needed a new CallbackStack instance is created and chained | 715 // If more space is needed a new CallbackStack instance is created and chained |
| 683 // together with the former instance. I.e. a logical CallbackStack can be made o f | 716 // together with the former instance. I.e. a logical CallbackStack can be made o f |
| 684 // multiple chained CallbackStack object instances. | 717 // multiple chained CallbackStack object instances. |
| 685 // There are two logical callback stacks. One containing all the marking callbac ks and | 718 // There are two logical callback stacks. One containing all the marking callbac ks and |
| 686 // one containing the weak pointer callbacks. | 719 // one containing the weak pointer callbacks. |
| 687 class CallbackStack { | 720 class CallbackStack { |
| 688 public: | 721 public: |
| 689 CallbackStack(CallbackStack** first) | 722 CallbackStack(CallbackStack** first) |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 720 | 753 |
| 721 static void init(CallbackStack** first); | 754 static void init(CallbackStack** first); |
| 722 static void shutdown(CallbackStack** first); | 755 static void shutdown(CallbackStack** first); |
| 723 static void clear(CallbackStack** first) | 756 static void clear(CallbackStack** first) |
| 724 { | 757 { |
| 725 if (!(*first)->isEmpty()) { | 758 if (!(*first)->isEmpty()) { |
| 726 shutdown(first); | 759 shutdown(first); |
| 727 init(first); | 760 init(first); |
| 728 } | 761 } |
| 729 } | 762 } |
| 730 bool popAndInvokeCallback(CallbackStack** first, Visitor*); | 763 template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStac k** first, Visitor*); |
| 731 static void invokeCallbacks(CallbackStack** first, Visitor*); | 764 static void invokeCallbacks(CallbackStack** first, Visitor*); |
| 732 | 765 |
| 733 Item* allocateEntry(CallbackStack** first) | 766 Item* allocateEntry(CallbackStack** first) |
| 734 { | 767 { |
| 735 if (m_current < m_limit) | 768 if (m_current < m_limit) |
| 736 return m_current++; | 769 return m_current++; |
| 737 return (new CallbackStack(first))->allocateEntry(first); | 770 return (new CallbackStack(first))->allocateEntry(first); |
| 738 } | 771 } |
| 739 | 772 |
| 740 #ifndef NDEBUG | 773 #ifndef NDEBUG |
| 741 bool hasCallbackForObject(const void*); | 774 bool hasCallbackForObject(const void*); |
| 742 #endif | 775 #endif |
| 743 | 776 |
| 744 private: | 777 private: |
| 745 void invokeOldestCallbacks(Visitor*); | 778 void invokeOldestCallbacks(Visitor*); |
| 746 | 779 |
| 747 static const size_t bufferSize = 8000; | 780 static const size_t bufferSize = 8000; |
| 748 Item m_buffer[bufferSize]; | 781 Item m_buffer[bufferSize]; |
| 749 Item* m_limit; | 782 Item* m_limit; |
| 750 Item* m_current; | 783 Item* m_current; |
| 751 CallbackStack* m_next; | 784 CallbackStack* m_next; |
| 752 }; | 785 }; |
| 753 | 786 |
| 754 // Non-template super class used to pass a heap around to other classes. | 787 // Non-template super class used to pass a heap around to other classes. |
| 755 class BaseHeap { | 788 class BaseHeap { |
| 756 public: | 789 public: |
| 757 virtual ~BaseHeap() { } | 790 virtual ~BaseHeap() { } |
| 791 virtual void cleanupPages() = 0; | |
| 758 | 792 |
| 759 // Find the page in this thread heap containing the given | 793 // Find the page in this thread heap containing the given |
| 760 // address. Returns 0 if the address is not contained in any | 794 // address. Returns 0 if the address is not contained in any |
| 761 // page in this thread heap. | 795 // page in this thread heap. |
| 762 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; | 796 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; |
| 763 | 797 |
| 764 #if ENABLE(GC_TRACING) | 798 #if ENABLE(GC_TRACING) |
| 765 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; | 799 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; |
| 766 #endif | 800 #endif |
| 767 | 801 |
| 768 // Sweep this part of the Blink heap. This finalizes dead objects | 802 // Sweep this part of the Blink heap. This finalizes dead objects |
| 769 // and builds freelists for all the unused memory. | 803 // and builds freelists for all the unused memory. |
| 770 virtual void sweep() = 0; | 804 virtual void sweep() = 0; |
| 771 | 805 |
| 772 // Forcefully finalize all objects in this part of the Blink heap | |
| 773 // (potentially with the exception of one object). This is used | |
| 774 // during thread termination to make sure that all objects for the | |
| 775 // dying thread are finalized. | |
| 776 virtual void assertEmpty() = 0; | |
| 777 | |
| 778 virtual void clearFreeLists() = 0; | 806 virtual void clearFreeLists() = 0; |
| 779 virtual void clearMarks() = 0; | 807 virtual void clearLiveAndMarkDead() = 0; |
| 780 #ifndef NDEBUG | 808 #ifndef NDEBUG |
| 781 virtual void getScannedStats(HeapStats&) = 0; | 809 virtual void getScannedStats(HeapStats&) = 0; |
| 782 #endif | 810 #endif |
| 783 | 811 |
| 784 virtual void makeConsistentForGC() = 0; | 812 virtual void makeConsistentForGC() = 0; |
| 785 virtual bool isConsistentForGC() = 0; | 813 virtual bool isConsistentForGC() = 0; |
| 786 | 814 |
| 815 virtual void prepareHeapForTermination() = 0; | |
| 816 | |
| 787 // Returns a bucket number for inserting a FreeListEntry of a | 817 // Returns a bucket number for inserting a FreeListEntry of a |
| 788 // given size. All FreeListEntries in the given bucket, n, have | 818 // given size. All FreeListEntries in the given bucket, n, have |
| 789 // size >= 2^n. | 819 // size >= 2^n. |
| 790 static int bucketIndexForSize(size_t); | 820 static int bucketIndexForSize(size_t); |
| 791 }; | 821 }; |
| 792 | 822 |
| 793 // Thread heaps represent a part of the per-thread Blink heap. | 823 // Thread heaps represent a part of the per-thread Blink heap. |
| 794 // | 824 // |
| 795 // Each Blink thread has a number of thread heaps: one general heap | 825 // Each Blink thread has a number of thread heaps: one general heap |
| 796 // that contains any type of object and a number of heaps specialized | 826 // that contains any type of object and a number of heaps specialized |
| 797 // for specific object types (such as Node). | 827 // for specific object types (such as Node). |
| 798 // | 828 // |
| 799 // Each thread heap contains the functionality to allocate new objects | 829 // Each thread heap contains the functionality to allocate new objects |
| 800 // (potentially adding new pages to the heap), to find and mark | 830 // (potentially adding new pages to the heap), to find and mark |
| 801 // objects during conservative stack scanning and to sweep the set of | 831 // objects during conservative stack scanning and to sweep the set of |
| 802 // pages after a GC. | 832 // pages after a GC. |
| 803 template<typename Header> | 833 template<typename Header> |
| 804 class ThreadHeap : public BaseHeap { | 834 class ThreadHeap : public BaseHeap { |
| 805 public: | 835 public: |
| 806 ThreadHeap(ThreadState*); | 836 ThreadHeap(ThreadState*, int); |
| 807 virtual ~ThreadHeap(); | 837 virtual ~ThreadHeap(); |
| 838 virtual void cleanupPages(); | |
| 808 | 839 |
| 809 virtual BaseHeapPage* heapPageFromAddress(Address); | 840 virtual BaseHeapPage* heapPageFromAddress(Address); |
| 810 #if ENABLE(GC_TRACING) | 841 #if ENABLE(GC_TRACING) |
| 811 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); | 842 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); |
| 812 #endif | 843 #endif |
| 813 virtual void sweep(); | 844 virtual void sweep(); |
| 814 virtual void assertEmpty(); | |
| 815 virtual void clearFreeLists(); | 845 virtual void clearFreeLists(); |
| 816 virtual void clearMarks(); | 846 virtual void clearLiveAndMarkDead(); |
| 817 #ifndef NDEBUG | 847 #ifndef NDEBUG |
| 818 virtual void getScannedStats(HeapStats&); | 848 virtual void getScannedStats(HeapStats&); |
| 819 #endif | 849 #endif |
| 820 | 850 |
| 821 virtual void makeConsistentForGC(); | 851 virtual void makeConsistentForGC(); |
| 822 virtual bool isConsistentForGC(); | 852 virtual bool isConsistentForGC(); |
| 823 | 853 |
| 824 ThreadState* threadState() { return m_threadState; } | 854 ThreadState* threadState() { return m_threadState; } |
| 825 HeapStats& stats() { return m_threadState->stats(); } | 855 HeapStats& stats() { return m_threadState->stats(); } |
| 826 void flushHeapContainsCache() | 856 void flushHeapContainsCache() |
| 827 { | 857 { |
| 828 m_threadState->heapContainsCache()->flush(); | 858 m_threadState->heapContainsCache()->flush(); |
| 829 } | 859 } |
| 830 | 860 |
| 831 inline Address allocate(size_t, const GCInfo*); | 861 inline Address allocate(size_t, const GCInfo*); |
| 832 void addToFreeList(Address, size_t); | 862 void addToFreeList(Address, size_t); |
| 833 void addPageMemoryToPool(PageMemory*); | |
| 834 void addPageToPool(HeapPage<Header>*); | |
| 835 inline static size_t roundedAllocationSize(size_t size) | 863 inline static size_t roundedAllocationSize(size_t size) |
| 836 { | 864 { |
| 837 return allocationSizeFromSize(size) - sizeof(Header); | 865 return allocationSizeFromSize(size) - sizeof(Header); |
| 838 } | 866 } |
| 839 | 867 |
| 868 void prepareHeapForTermination(); | |
| 869 void removePageFromHeap(HeapPage<Header>*); | |
| 870 | |
| 840 private: | 871 private: |
| 841 // Once pages have been used for one thread heap they will never | 872 void addPageToHeap(const GCInfo*); |
| 842 // be reused for another thread heap. Instead of unmapping, we add | |
| 843 // the pages to a pool of pages to be reused later by this thread | |
| 844 // heap. This is done as a security feature to avoid type | |
| 845 // confusion. The heap is type segregated by having separate | |
| 846 // thread heaps for various types of objects. Holding on to pages | |
| 847 // ensures that the same virtual address space cannot be used for | |
| 848 // objects of another type than the type contained in this thread | |
| 849 // heap. | |
| 850 class PagePoolEntry { | |
| 851 public: | |
| 852 PagePoolEntry(PageMemory* storage, PagePoolEntry* next) | |
| 853 : m_storage(storage) | |
| 854 , m_next(next) | |
| 855 { } | |
| 856 | |
| 857 PageMemory* storage() { return m_storage; } | |
| 858 PagePoolEntry* next() { return m_next; } | |
| 859 | |
| 860 private: | |
| 861 PageMemory* m_storage; | |
| 862 PagePoolEntry* m_next; | |
| 863 }; | |
| 864 | |
| 865 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 873 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); |
| 866 static size_t allocationSizeFromSize(size_t); | 874 static size_t allocationSizeFromSize(size_t); |
| 867 void addPageToHeap(const GCInfo*); | |
| 868 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 875 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| 869 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 876 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 870 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 877 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 871 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } | 878 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
| 872 void setAllocationPoint(Address point, size_t size) | 879 void setAllocationPoint(Address point, size_t size) |
| 873 { | 880 { |
| 874 ASSERT(!point || heapPageFromAddress(point)); | 881 ASSERT(!point || heapPageFromAddress(point)); |
| 875 ASSERT(size <= HeapPage<Header>::payloadSize()); | 882 ASSERT(size <= HeapPage<Header>::payloadSize()); |
| 876 m_currentAllocationPoint = point; | 883 m_currentAllocationPoint = point; |
| 877 m_remainingAllocationSize = size; | 884 m_remainingAllocationSize = size; |
| 878 } | 885 } |
| 879 void ensureCurrentAllocation(size_t, const GCInfo*); | 886 void ensureCurrentAllocation(size_t, const GCInfo*); |
| 880 bool allocateFromFreeList(size_t); | 887 bool allocateFromFreeList(size_t); |
| 881 | 888 |
| 882 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 889 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); |
| 883 | |
| 884 void allocatePage(const GCInfo*); | 890 void allocatePage(const GCInfo*); |
| 885 PageMemory* takePageFromPool(); | |
| 886 void clearPagePool(); | |
| 887 void deletePages(); | |
| 888 | 891 |
| 889 Address m_currentAllocationPoint; | 892 Address m_currentAllocationPoint; |
| 890 size_t m_remainingAllocationSize; | 893 size_t m_remainingAllocationSize; |
| 891 | 894 |
| 892 HeapPage<Header>* m_firstPage; | 895 HeapPage<Header>* m_firstPage; |
| 893 LargeHeapObject<Header>* m_firstLargeHeapObject; | 896 LargeHeapObject<Header>* m_firstLargeHeapObject; |
| 894 | 897 |
| 895 int m_biggestFreeListIndex; | 898 int m_biggestFreeListIndex; |
| 896 ThreadState* m_threadState; | 899 ThreadState* m_threadState; |
| 897 | 900 |
| 898 // All FreeListEntries in the nth list have size >= 2^n. | 901 // All FreeListEntries in the nth list have size >= 2^n. |
| 899 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 902 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 900 | 903 |
| 901 // List of pages that have been previously allocated, but are now | 904 // Index into the page pools. This is used to ensure that the pages of the |
| 902 // unused. | 905 // same type go into the correct page pool and thus avoid type confusion. |
| 903 PagePoolEntry* m_pagePool; | 906 int m_index; |
| 904 }; | 907 }; |
| 905 | 908 |
| 906 class PLATFORM_EXPORT Heap { | 909 class PLATFORM_EXPORT Heap { |
| 907 public: | 910 public: |
| 908 static void init(); | 911 static void init(); |
| 909 static void shutdown(); | 912 static void shutdown(); |
| 910 static void doShutdown(); | 913 static void doShutdown(); |
| 911 | 914 |
| 912 static BaseHeapPage* contains(Address); | 915 static BaseHeapPage* contains(Address); |
| 913 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } | 916 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } |
| 914 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } | 917 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } |
| 918 #ifndef NDEBUG | |
| 919 static bool containedInHeapOrOrphanedPage(void*); | |
| 920 #endif | |
| 915 | 921 |
| 916 // Push a trace callback on the marking stack. | 922 // Push a trace callback on the marking stack. |
| 917 static void pushTraceCallback(void* containerObject, TraceCallback); | 923 static void pushTraceCallback(void* containerObject, TraceCallback); |
| 918 | 924 |
| 919 // Add a weak pointer callback to the weak callback work list. General | 925 // Add a weak pointer callback to the weak callback work list. General |
| 920 // object pointer callbacks are added to a thread local weak callback work | 926 // object pointer callbacks are added to a thread local weak callback work |
| 921 // list and the callback is called on the thread that owns the object, with | 927 // list and the callback is called on the thread that owns the object, with |
| 922 // the closure pointer as an argument. Most of the time, the closure and | 928 // the closure pointer as an argument. Most of the time, the closure and |
| 923 // the containerObject can be the same thing, but the containerObject is | 929 // the containerObject can be the same thing, but the containerObject is |
| 924 // constrained to be on the heap, since the heap is used to identify the | 930 // constrained to be on the heap, since the heap is used to identify the |
| 925 // correct thread. | 931 // correct thread. |
| 926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); | 932 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); |
| 927 | 933 |
| 928 // Similar to the more general pushWeakObjectPointerCallback, but cell | 934 // Similar to the more general pushWeakObjectPointerCallback, but cell |
| 929 // pointer callbacks are added to a static callback work list and the weak | 935 // pointer callbacks are added to a static callback work list and the weak |
| 930 // callback is performed on the thread performing garbage collection. This | 936 // callback is performed on the thread performing garbage collection. This |
| 931 // is OK because cells are just cleared and no deallocation can happen. | 937 // is OK because cells are just cleared and no deallocation can happen. |
| 932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); | 938 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); |
| 933 | 939 |
| 934 // Pop the top of the marking stack and call the callback with the visitor | 940 // Pop the top of the marking stack and call the callback with the visitor |
| 935 // and the object. Returns false when there is nothing more to do. | 941 // and the object. Returns false when there is nothing more to do. |
| 936 static bool popAndInvokeTraceCallback(Visitor*); | 942 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback( Visitor*); |
| 937 | 943 |
| 938 // Remove an item from the weak callback work list and call the callback | 944 // Remove an item from the weak callback work list and call the callback |
| 939 // with the visitor and the closure pointer. Returns false when there is | 945 // with the visitor and the closure pointer. Returns false when there is |
| 940 // nothing more to do. | 946 // nothing more to do. |
| 941 static bool popAndInvokeWeakPointerCallback(Visitor*); | 947 static bool popAndInvokeWeakPointerCallback(Visitor*); |
| 942 | 948 |
| 943 // Register an ephemeron table for fixed-point iteration. | 949 // Register an ephemeron table for fixed-point iteration. |
| 944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); | 950 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); |
| 945 #ifndef NDEBUG | 951 #ifndef NDEBUG |
| 946 static bool weakTableRegistered(const void*); | 952 static bool weakTableRegistered(const void*); |
| 947 #endif | 953 #endif |
| 948 | 954 |
| 949 template<typename T> static Address allocate(size_t); | 955 template<typename T> static Address allocate(size_t); |
| 950 template<typename T> static Address reallocate(void* previous, size_t); | 956 template<typename T> static Address reallocate(void* previous, size_t); |
| 951 | 957 |
| 952 static void collectGarbage(ThreadState::StackState); | 958 static void collectGarbage(ThreadState::StackState); |
| 959 static void collectGarbageForTerminatingThread(ThreadState*); | |
| 953 static void collectAllGarbage(); | 960 static void collectAllGarbage(); |
| 961 template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobal WeakProcessing(); | |
| 954 static void setForcePreciseGCForTesting(); | 962 static void setForcePreciseGCForTesting(); |
| 955 | 963 |
| 956 static void prepareForGC(); | 964 static void prepareForGC(); |
| 957 | 965 |
| 958 // Conservatively checks whether an address is a pointer in any of the threa d | 966 // Conservatively checks whether an address is a pointer in any of the threa d |
| 959 // heaps. If so marks the object pointed to as live. | 967 // heaps. If so marks the object pointed to as live. |
| 960 static Address checkAndMarkPointer(Visitor*, Address); | 968 static Address checkAndMarkPointer(Visitor*, Address); |
| 961 | 969 |
| 962 #if ENABLE(GC_TRACING) | 970 #if ENABLE(GC_TRACING) |
| 963 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. | 971 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 981 static bool isConsistentForGC(); | 989 static bool isConsistentForGC(); |
| 982 static void makeConsistentForGC(); | 990 static void makeConsistentForGC(); |
| 983 | 991 |
| 984 static void flushHeapDoesNotContainCache(); | 992 static void flushHeapDoesNotContainCache(); |
| 985 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } | 993 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } |
| 986 | 994 |
| 987 // Return true if the last GC found a pointer into a heap page | 995 // Return true if the last GC found a pointer into a heap page |
| 988 // during conservative scanning. | 996 // during conservative scanning. |
| 989 static bool lastGCWasConservative() { return s_lastGCWasConservative; } | 997 static bool lastGCWasConservative() { return s_lastGCWasConservative; } |
| 990 | 998 |
| 999 static FreePagePool* freePagePool() { return s_freePagePool; } | |
| 1000 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } | |
| 1001 | |
| 991 private: | 1002 private: |
| 992 static Visitor* s_markingVisitor; | 1003 static Visitor* s_markingVisitor; |
| 993 | 1004 |
| 994 static CallbackStack* s_markingStack; | 1005 static CallbackStack* s_markingStack; |
| 995 static CallbackStack* s_weakCallbackStack; | 1006 static CallbackStack* s_weakCallbackStack; |
| 996 static CallbackStack* s_ephemeronStack; | 1007 static CallbackStack* s_ephemeronStack; |
| 997 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | 1008 static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
| 998 static bool s_shutdownCalled; | 1009 static bool s_shutdownCalled; |
| 999 static bool s_lastGCWasConservative; | 1010 static bool s_lastGCWasConservative; |
| 1011 static FreePagePool* s_freePagePool; | |
| 1012 static OrphanedPagePool* s_orphanedPagePool; | |
| 1000 friend class ThreadState; | 1013 friend class ThreadState; |
| 1001 }; | 1014 }; |
| 1002 | 1015 |
| 1003 // The NoAllocationScope class is used in debug mode to catch unwanted | 1016 // The NoAllocationScope class is used in debug mode to catch unwanted |
| 1004 // allocations. E.g. allocations during GC. | 1017 // allocations. E.g. allocations during GC. |
| 1005 template<ThreadAffinity Affinity> | 1018 template<ThreadAffinity Affinity> |
| 1006 class NoAllocationScope { | 1019 class NoAllocationScope { |
| 1007 public: | 1020 public: |
| 1008 NoAllocationScope() : m_active(true) { enter(); } | 1021 NoAllocationScope() : m_active(true) { enter(); } |
| 1009 | 1022 |
| (...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1296 #define GC_PLUGIN_IGNORE(bug) \ | 1309 #define GC_PLUGIN_IGNORE(bug) \ |
| 1297 __attribute__((annotate("blink_gc_plugin_ignore"))) | 1310 __attribute__((annotate("blink_gc_plugin_ignore"))) |
| 1298 #else | 1311 #else |
| 1299 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1312 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
| 1300 #define GC_PLUGIN_IGNORE(bug) | 1313 #define GC_PLUGIN_IGNORE(bug) |
| 1301 #endif | 1314 #endif |
| 1302 | 1315 |
| 1303 NO_SANITIZE_ADDRESS | 1316 NO_SANITIZE_ADDRESS |
| 1304 void HeapObjectHeader::checkHeader() const | 1317 void HeapObjectHeader::checkHeader() const |
| 1305 { | 1318 { |
| 1306 ASSERT(m_magic == magic); | 1319 #ifndef NDEBUG |
| 1320 BaseHeapPage* page = pageHeaderFromObject(this); | |
| 1321 ASSERT(page->orphaned() || m_magic == magic); | |
| 1322 #endif | |
| 1307 } | 1323 } |
| 1308 | 1324 |
| 1309 Address HeapObjectHeader::payload() | 1325 Address HeapObjectHeader::payload() |
| 1310 { | 1326 { |
| 1311 return reinterpret_cast<Address>(this) + objectHeaderSize; | 1327 return reinterpret_cast<Address>(this) + objectHeaderSize; |
| 1312 } | 1328 } |
| 1313 | 1329 |
| 1314 size_t HeapObjectHeader::payloadSize() | 1330 size_t HeapObjectHeader::payloadSize() |
| 1315 { | 1331 { |
| 1316 return size() - objectHeaderSize; | 1332 return size() - objectHeaderSize; |
| (...skipping 1030 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2347 }; | 2363 }; |
| 2348 | 2364 |
| 2349 template<typename T> | 2365 template<typename T> |
| 2350 struct IfWeakMember<WeakMember<T> > { | 2366 struct IfWeakMember<WeakMember<T> > { |
| 2351 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } | 2367 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } |
| 2352 }; | 2368 }; |
| 2353 | 2369 |
| 2354 } | 2370 } |
| 2355 | 2371 |
| 2356 #endif // Heap_h | 2372 #endif // Heap_h |
| OLD | NEW |