Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(762)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 393823003: Revert "Revert "[oilpan]: Make thread shutdown more robust."" (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
63 // align all allocations even on 32 bit. 63 // align all allocations even on 32 bit.
64 const size_t allocationGranularity = 8; 64 const size_t allocationGranularity = 8;
65 const size_t allocationMask = allocationGranularity - 1; 65 const size_t allocationMask = allocationGranularity - 1;
66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity);
67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
68 const size_t maxHeapObjectSize = 1 << 27; 68 const size_t maxHeapObjectSize = 1 << 27;
69 69
70 const size_t markBitMask = 1; 70 const size_t markBitMask = 1;
71 const size_t freeListMask = 2; 71 const size_t freeListMask = 2;
72 const size_t debugBitMask = 4; 72 // The dead bit is used for objects that have gone through a GC marking, but did
73 // not get swept before a new GC started. In that case we set the dead bit on
74 // objects that were not marked in the previous GC to ensure we are not tracing
75 // them via a conservatively found pointer. Tracing dead objects could lead to
76 // tracing of already finalized objects in another thread's heap which is a
77 // use-after-free situation.
78 const size_t deadBitMask = 4;
73 const size_t sizeMask = ~7; 79 const size_t sizeMask = ~7;
74 const uint8_t freelistZapValue = 42; 80 const uint8_t freelistZapValue = 42;
75 const uint8_t finalizedZapValue = 24; 81 const uint8_t finalizedZapValue = 24;
82 // The orphaned zap value must be zero in the lowest bits to allow for using
83 // the mark bit when tracing.
84 const uint8_t orphanedZapValue = 240;
85
86 enum CallbackInvocationMode {
87 GlobalMarking,
88 ThreadLocalMarking,
89 WeaknessProcessing,
90 };
76 91
77 class HeapStats; 92 class HeapStats;
78 class PageMemory; 93 class PageMemory;
79 template<ThreadAffinity affinity> class ThreadLocalPersistents; 94 template<ThreadAffinity affinity> class ThreadLocalPersistents;
80 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; 95 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent;
81 template<typename T> class CrossThreadPersistent; 96 template<typename T> class CrossThreadPersistent;
82 97
83 PLATFORM_EXPORT size_t osPageSize(); 98 PLATFORM_EXPORT size_t osPageSize();
84 99
85 // Blink heap pages are set up with a guard page before and after the 100 // Blink heap pages are set up with a guard page before and after the
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 135
121 // Sanity check for a page header address: the address of the page 136 // Sanity check for a page header address: the address of the page
122 // header should be OS page size away from being Blink page size 137 // header should be OS page size away from being Blink page size
123 // aligned. 138 // aligned.
124 inline bool isPageHeaderAddress(Address address) 139 inline bool isPageHeaderAddress(Address address)
125 { 140 {
126 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); 141 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize());
127 } 142 }
128 #endif 143 #endif
129 144
130 // Mask an address down to the enclosing oilpan heap page base address. 145 // Mask an address down to the enclosing oilpan heap base page.
131 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. 146 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
132 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. 147 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps.
133 // This is only exported to enable tests in HeapTest.cpp. 148 // This is only exported to enable tests in HeapTest.cpp.
134 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) 149 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
135 { 150 {
136 return blinkPageAddress(address) + osPageSize(); 151 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
152 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSiz e());
137 } 153 }
138 154
139 // Common header for heap pages.
140 class BaseHeapPage {
141 public:
142 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
143 : m_storage(storage)
144 , m_gcInfo(gcInfo)
145 , m_threadState(state)
146 , m_padding(0)
147 {
148 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
149 }
150
151 // Check if the given address points to an object in this
152 // heap page. If so, find the start of that object and mark it
153 // using the given Visitor. Otherwise do nothing. The pointer must
154 // be within the same aligned blinkPageSize as the this-pointer.
155 //
156 // This is used during conservative stack scanning to
157 // conservatively mark all objects that could be referenced from
158 // the stack.
159 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
160
161 #if ENABLE(GC_TRACING)
162 virtual const GCInfo* findGCInfo(Address) = 0;
163 #endif
164
165 Address address() { return reinterpret_cast<Address>(this); }
166 PageMemory* storage() const { return m_storage; }
167 ThreadState* threadState() const { return m_threadState; }
168 const GCInfo* gcInfo() { return m_gcInfo; }
169 virtual bool isLargeObject() { return false; }
170
171 private:
172 // Accessor to silence unused warnings for the m_padding field.
173 intptr_t padding() const { return m_padding; }
174
175 PageMemory* m_storage;
176 const GCInfo* m_gcInfo;
177 ThreadState* m_threadState;
178 // Pointer sized integer to ensure proper alignment of the
179 // HeapPage header. This can be used as a bit field if we need
180 // to associate more information with pages.
181 intptr_t m_padding;
182 };
183
184 // Large allocations are allocated as separate objects and linked in a 155 // Large allocations are allocated as separate objects and linked in a
185 // list. 156 // list.
186 // 157 //
187 // In order to use the same memory allocation routines for everything 158 // In order to use the same memory allocation routines for everything
188 // allocated in the heap, large objects are considered heap pages 159 // allocated in the heap, large objects are considered heap pages
189 // containing only one object. 160 // containing only one object.
190 // 161 //
191 // The layout of a large heap object is as follows: 162 // The layout of a large heap object is as follows:
192 // 163 //
193 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | 164 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 // The LargeHeapObject pseudo-page contains one actual object. Determine 196 // The LargeHeapObject pseudo-page contains one actual object. Determine
226 // whether the pointer is within that object. 197 // whether the pointer is within that object.
227 bool objectContains(Address object) 198 bool objectContains(Address object)
228 { 199 {
229 return (payload() <= object) && (object < address() + size()); 200 return (payload() <= object) && (object < address() + size());
230 } 201 }
231 202
232 // Returns true for any address that is on one of the pages that this 203 // Returns true for any address that is on one of the pages that this
233 // large object uses. That ensures that we can use a negative result to 204 // large object uses. That ensures that we can use a negative result to
234 // populate the negative page cache. 205 // populate the negative page cache.
235 bool contains(Address object) 206 virtual bool contains(Address object) OVERRIDE
236 { 207 {
237 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); 208 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
238 } 209 }
239 210
240 LargeHeapObject<Header>* next() 211 LargeHeapObject<Header>* next()
241 { 212 {
242 return m_next; 213 return m_next;
243 } 214 }
244 215
245 size_t size() 216 size_t size()
246 { 217 {
247 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 218 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
248 } 219 }
249 220
250 Address payload() { return heapObjectHeader()->payload(); } 221 Address payload() { return heapObjectHeader()->payload(); }
251 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } 222 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
252 223
253 Header* heapObjectHeader() 224 Header* heapObjectHeader()
254 { 225 {
255 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 226 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
256 return reinterpret_cast<Header*>(headerAddress); 227 return reinterpret_cast<Header*>(headerAddress);
257 } 228 }
258 229
259 bool isMarked(); 230 bool isMarked();
260 void unmark(); 231 void unmark();
261 void getStats(HeapStats&); 232 void getStats(HeapStats&);
262 void mark(Visitor*); 233 void mark(Visitor*);
263 void finalize(); 234 void finalize();
235 void setDeadMark();
236 virtual void markOrphaned()
237 {
238 // Zap the payload with a recognizable value to detect any incorrect
239 // cross thread pointer usage.
240 memset(payload(), orphanedZapValue, payloadSize());
241 BaseHeapPage::markOrphaned();
242 }
264 243
265 private: 244 private:
266 friend class ThreadHeap<Header>; 245 friend class ThreadHeap<Header>;
267 246
268 LargeHeapObject<Header>* m_next; 247 LargeHeapObject<Header>* m_next;
269 }; 248 };
270 249
271 // The BasicObjectHeader is the minimal object header. It is used when 250 // The BasicObjectHeader is the minimal object header. It is used when
272 // encountering heap space of size allocationGranularity to mark it as 251 // encountering heap space of size allocationGranularity to mark it as
273 // as freelist entry. 252 // as freelist entry.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 301
323 inline void mark(); 302 inline void mark();
324 inline void unmark(); 303 inline void unmark();
325 304
326 inline const GCInfo* gcInfo() { return 0; } 305 inline const GCInfo* gcInfo() { return 0; }
327 306
328 inline Address payload(); 307 inline Address payload();
329 inline size_t payloadSize(); 308 inline size_t payloadSize();
330 inline Address payloadEnd(); 309 inline Address payloadEnd();
331 310
332 inline void setDebugMark(); 311 inline void setDeadMark();
333 inline void clearDebugMark(); 312 inline void clearDeadMark();
334 inline bool hasDebugMark() const; 313 inline bool hasDeadMark() const;
335 314
336 // Zap magic number with a new magic number that means there was once an 315 // Zap magic number with a new magic number that means there was once an
337 // object allocated here, but it was freed because nobody marked it during 316 // object allocated here, but it was freed because nobody marked it during
338 // GC. 317 // GC.
339 void zapMagic(); 318 void zapMagic();
340 319
341 static void finalize(const GCInfo*, Address, size_t); 320 static void finalize(const GCInfo*, Address, size_t);
342 static HeapObjectHeader* fromPayload(const void*); 321 static HeapObjectHeader* fromPayload(const void*);
343 322
344 static const intptr_t magic = 0xc0de247; 323 static const intptr_t magic = 0xc0de247;
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); 440 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
462 441
463 void link(HeapPage**); 442 void link(HeapPage**);
464 static void unlink(HeapPage*, HeapPage**); 443 static void unlink(HeapPage*, HeapPage**);
465 444
466 bool isEmpty(); 445 bool isEmpty();
467 446
468 // Returns true for the whole blinkPageSize page that the page is on, even 447 // Returns true for the whole blinkPageSize page that the page is on, even
469 // for the header, and the unmapped guard page at the start. That ensures 448 // for the header, and the unmapped guard page at the start. That ensures
470 // the result can be used to populate the negative page cache. 449 // the result can be used to populate the negative page cache.
471 bool contains(Address addr) 450 virtual bool contains(Address addr) OVERRIDE
472 { 451 {
473 Address blinkPageStart = roundToBlinkPageStart(address()); 452 Address blinkPageStart = roundToBlinkPageStart(address());
474 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. 453 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size.
475 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 454 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
476 } 455 }
477 456
478 HeapPage* next() { return m_next; } 457 HeapPage* next() { return m_next; }
479 458
480 Address payload() 459 Address payload()
481 { 460 {
482 return address() + sizeof(*this) + headerPadding<Header>(); 461 return address() + sizeof(*this) + headerPadding<Header>();
483 } 462 }
484 463
485 static size_t payloadSize() 464 static size_t payloadSize()
486 { 465 {
487 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; 466 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask;
488 } 467 }
489 468
490 Address end() { return payload() + payloadSize(); } 469 Address end() { return payload() + payloadSize(); }
491 470
492 void getStats(HeapStats&); 471 void getStats(HeapStats&);
493 void clearMarks(); 472 void clearLiveAndMarkDead();
494 void sweep(); 473 void sweep();
495 void clearObjectStartBitMap(); 474 void clearObjectStartBitMap();
496 void finalize(Header*); 475 void finalize(Header*);
497 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; 476 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
498 #if ENABLE(GC_TRACING) 477 #if ENABLE(GC_TRACING)
499 const GCInfo* findGCInfo(Address) OVERRIDE; 478 const GCInfo* findGCInfo(Address) OVERRIDE;
500 #endif 479 #endif
501 ThreadHeap<Header>* heap() { return m_heap; } 480 ThreadHeap<Header>* heap() { return m_heap; }
502 #if defined(ADDRESS_SANITIZER) 481 #if defined(ADDRESS_SANITIZER)
503 void poisonUnmarkedObjects(); 482 void poisonUnmarkedObjects();
504 #endif 483 #endif
484 NO_SANITIZE_ADDRESS
485 virtual void markOrphaned()
486 {
487 // Zap the payload with a recognizable value to detect any incorrect
488 // cross thread pointer usage.
489 #if defined(ADDRESS_SANITIZER)
490 // Don't use memset when running with ASan since this needs to zap
491 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
492 // only works for code in this method and not for calls to memset.
493 for (Address current = payload(); current < payload() + payloadSize(); + +current)
494 *current = orphanedZapValue;
495 #else
496 memset(payload(), orphanedZapValue, payloadSize());
497 #endif
498 BaseHeapPage::markOrphaned();
499 }
505 500
506 protected: 501 protected:
507 Header* findHeaderFromAddress(Address); 502 Header* findHeaderFromAddress(Address);
508 void populateObjectStartBitMap(); 503 void populateObjectStartBitMap();
509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 504 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
510 TraceCallback traceCallback(Header*); 505 TraceCallback traceCallback(Header*);
511 bool hasVTable(Header*); 506 bool hasVTable(Header*);
512 507
513 HeapPage<Header>* m_next; 508 HeapPage<Header>* m_next;
514 ThreadHeap<Header>* m_heap; 509 ThreadHeap<Header>* m_heap;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 using GarbageCollectedFinalized<T>::operator delete; 665 using GarbageCollectedFinalized<T>::operator delete;
671 666
672 protected: 667 protected:
673 ~ThreadSafeRefCountedGarbageCollected() { } 668 ~ThreadSafeRefCountedGarbageCollected() { }
674 669
675 private: 670 private:
676 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; 671 OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
677 mutable Mutex m_mutex; 672 mutable Mutex m_mutex;
678 }; 673 };
679 674
675 template<typename DataType>
676 class PagePool {
677 protected:
678 PagePool();
679
680 class PoolEntry {
681 public:
682 PoolEntry(DataType* data, PoolEntry* next)
683 : data(data)
684 , next(next)
685 { }
686
687 DataType* data;
688 PoolEntry* next;
689 };
690
691 PoolEntry* m_pool[NumberOfHeaps];
692 };
693
694 // Once pages have been used for one type of thread heap they will never be
695 // reused for another type of thread heap. Instead of unmapping, we add the
696 // pages to a pool of pages to be reused later by a thread heap of the same
697 // type. This is done as a security feature to avoid type confusion. The
698 // heaps are type segregated by having separate thread heaps for different
699 // types of objects. Holding on to pages ensures that the same virtual address
700 // space cannot be used for objects of another type than the type contained
701 // in this page to begin with.
702 class FreePagePool : public PagePool<PageMemory> {
703 public:
704 ~FreePagePool();
705 void addFreePage(int, PageMemory*);
706 PageMemory* takeFreePage(int);
707
708 private:
709 Mutex m_mutex[NumberOfHeaps];
710 };
711
712 class OrphanedPagePool : public PagePool<BaseHeapPage> {
713 public:
714 ~OrphanedPagePool();
715 void addOrphanedPage(int, BaseHeapPage*);
716 void decommitOrphanedPages();
717 #ifndef NDEBUG
718 bool contains(void*);
719 #endif
720 private:
721 void clearMemory(PageMemory*);
722 };
723
680 // The CallbackStack contains all the visitor callbacks used to trace and mark 724 // The CallbackStack contains all the visitor callbacks used to trace and mark
681 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. 725 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts.
682 // If more space is needed a new CallbackStack instance is created and chained 726 // If more space is needed a new CallbackStack instance is created and chained
683 // together with the former instance. I.e. a logical CallbackStack can be made o f 727 // together with the former instance. I.e. a logical CallbackStack can be made o f
684 // multiple chained CallbackStack object instances. 728 // multiple chained CallbackStack object instances.
685 // There are two logical callback stacks. One containing all the marking callbac ks and 729 // There are two logical callback stacks. One containing all the marking callbac ks and
686 // one containing the weak pointer callbacks. 730 // one containing the weak pointer callbacks.
687 class CallbackStack { 731 class CallbackStack {
688 public: 732 public:
689 CallbackStack(CallbackStack** first) 733 CallbackStack(CallbackStack** first)
(...skipping 30 matching lines...) Expand all
720 764
721 static void init(CallbackStack** first); 765 static void init(CallbackStack** first);
722 static void shutdown(CallbackStack** first); 766 static void shutdown(CallbackStack** first);
723 static void clear(CallbackStack** first) 767 static void clear(CallbackStack** first)
724 { 768 {
725 if (!(*first)->isEmpty()) { 769 if (!(*first)->isEmpty()) {
726 shutdown(first); 770 shutdown(first);
727 init(first); 771 init(first);
728 } 772 }
729 } 773 }
730 bool popAndInvokeCallback(CallbackStack** first, Visitor*); 774 template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStac k** first, Visitor*);
731 static void invokeCallbacks(CallbackStack** first, Visitor*); 775 static void invokeCallbacks(CallbackStack** first, Visitor*);
732 776
733 Item* allocateEntry(CallbackStack** first) 777 Item* allocateEntry(CallbackStack** first)
734 { 778 {
735 if (m_current < m_limit) 779 if (m_current < m_limit)
736 return m_current++; 780 return m_current++;
737 return (new CallbackStack(first))->allocateEntry(first); 781 return (new CallbackStack(first))->allocateEntry(first);
738 } 782 }
739 783
740 #ifndef NDEBUG 784 #ifndef NDEBUG
741 bool hasCallbackForObject(const void*); 785 bool hasCallbackForObject(const void*);
742 #endif 786 #endif
743 787
744 private: 788 private:
745 void invokeOldestCallbacks(Visitor*); 789 void invokeOldestCallbacks(Visitor*);
746 790
747 static const size_t bufferSize = 8000; 791 static const size_t bufferSize = 8000;
748 Item m_buffer[bufferSize]; 792 Item m_buffer[bufferSize];
749 Item* m_limit; 793 Item* m_limit;
750 Item* m_current; 794 Item* m_current;
751 CallbackStack* m_next; 795 CallbackStack* m_next;
752 }; 796 };
753 797
754 // Non-template super class used to pass a heap around to other classes. 798 // Non-template super class used to pass a heap around to other classes.
755 class BaseHeap { 799 class BaseHeap {
756 public: 800 public:
757 virtual ~BaseHeap() { } 801 virtual ~BaseHeap() { }
802 virtual void cleanupPages() = 0;
758 803
759 // Find the page in this thread heap containing the given 804 // Find the page in this thread heap containing the given
760 // address. Returns 0 if the address is not contained in any 805 // address. Returns 0 if the address is not contained in any
761 // page in this thread heap. 806 // page in this thread heap.
762 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; 807 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
763 808
764 #if ENABLE(GC_TRACING) 809 #if ENABLE(GC_TRACING)
765 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; 810 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
766 #endif 811 #endif
767 812
768 // Sweep this part of the Blink heap. This finalizes dead objects 813 // Sweep this part of the Blink heap. This finalizes dead objects
769 // and builds freelists for all the unused memory. 814 // and builds freelists for all the unused memory.
770 virtual void sweep() = 0; 815 virtual void sweep() = 0;
771 816
772 // Forcefully finalize all objects in this part of the Blink heap
773 // (potentially with the exception of one object). This is used
774 // during thread termination to make sure that all objects for the
775 // dying thread are finalized.
776 virtual void assertEmpty() = 0;
777
778 virtual void clearFreeLists() = 0; 817 virtual void clearFreeLists() = 0;
779 virtual void clearMarks() = 0; 818 virtual void clearLiveAndMarkDead() = 0;
780 #ifndef NDEBUG 819 #ifndef NDEBUG
781 virtual void getScannedStats(HeapStats&) = 0; 820 virtual void getScannedStats(HeapStats&) = 0;
782 #endif 821 #endif
783 822
784 virtual void makeConsistentForGC() = 0; 823 virtual void makeConsistentForGC() = 0;
785 virtual bool isConsistentForGC() = 0; 824 virtual bool isConsistentForGC() = 0;
786 825
826 virtual void prepareHeapForTermination() = 0;
827
787 // Returns a bucket number for inserting a FreeListEntry of a 828 // Returns a bucket number for inserting a FreeListEntry of a
788 // given size. All FreeListEntries in the given bucket, n, have 829 // given size. All FreeListEntries in the given bucket, n, have
789 // size >= 2^n. 830 // size >= 2^n.
790 static int bucketIndexForSize(size_t); 831 static int bucketIndexForSize(size_t);
791 }; 832 };
792 833
793 // Thread heaps represent a part of the per-thread Blink heap. 834 // Thread heaps represent a part of the per-thread Blink heap.
794 // 835 //
795 // Each Blink thread has a number of thread heaps: one general heap 836 // Each Blink thread has a number of thread heaps: one general heap
796 // that contains any type of object and a number of heaps specialized 837 // that contains any type of object and a number of heaps specialized
797 // for specific object types (such as Node). 838 // for specific object types (such as Node).
798 // 839 //
799 // Each thread heap contains the functionality to allocate new objects 840 // Each thread heap contains the functionality to allocate new objects
800 // (potentially adding new pages to the heap), to find and mark 841 // (potentially adding new pages to the heap), to find and mark
801 // objects during conservative stack scanning and to sweep the set of 842 // objects during conservative stack scanning and to sweep the set of
802 // pages after a GC. 843 // pages after a GC.
803 template<typename Header> 844 template<typename Header>
804 class ThreadHeap : public BaseHeap { 845 class ThreadHeap : public BaseHeap {
805 public: 846 public:
806 ThreadHeap(ThreadState*); 847 ThreadHeap(ThreadState*, int);
807 virtual ~ThreadHeap(); 848 virtual ~ThreadHeap();
849 virtual void cleanupPages();
808 850
809 virtual BaseHeapPage* heapPageFromAddress(Address); 851 virtual BaseHeapPage* heapPageFromAddress(Address);
810 #if ENABLE(GC_TRACING) 852 #if ENABLE(GC_TRACING)
811 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); 853 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
812 #endif 854 #endif
813 virtual void sweep(); 855 virtual void sweep();
814 virtual void assertEmpty();
815 virtual void clearFreeLists(); 856 virtual void clearFreeLists();
816 virtual void clearMarks(); 857 virtual void clearLiveAndMarkDead();
817 #ifndef NDEBUG 858 #ifndef NDEBUG
818 virtual void getScannedStats(HeapStats&); 859 virtual void getScannedStats(HeapStats&);
819 #endif 860 #endif
820 861
821 virtual void makeConsistentForGC(); 862 virtual void makeConsistentForGC();
822 virtual bool isConsistentForGC(); 863 virtual bool isConsistentForGC();
823 864
824 ThreadState* threadState() { return m_threadState; } 865 ThreadState* threadState() { return m_threadState; }
825 HeapStats& stats() { return m_threadState->stats(); } 866 HeapStats& stats() { return m_threadState->stats(); }
826 void flushHeapContainsCache() 867 void flushHeapContainsCache()
827 { 868 {
828 m_threadState->heapContainsCache()->flush(); 869 m_threadState->heapContainsCache()->flush();
829 } 870 }
830 871
831 inline Address allocate(size_t, const GCInfo*); 872 inline Address allocate(size_t, const GCInfo*);
832 void addToFreeList(Address, size_t); 873 void addToFreeList(Address, size_t);
833 void addPageMemoryToPool(PageMemory*);
834 void addPageToPool(HeapPage<Header>*);
835 inline static size_t roundedAllocationSize(size_t size) 874 inline static size_t roundedAllocationSize(size_t size)
836 { 875 {
837 return allocationSizeFromSize(size) - sizeof(Header); 876 return allocationSizeFromSize(size) - sizeof(Header);
838 } 877 }
839 878
879 void prepareHeapForTermination();
880 void removePageFromHeap(HeapPage<Header>*);
881
840 private: 882 private:
841 // Once pages have been used for one thread heap they will never 883 void addPageToHeap(const GCInfo*);
842 // be reused for another thread heap. Instead of unmapping, we add
843 // the pages to a pool of pages to be reused later by this thread
844 // heap. This is done as a security feature to avoid type
845 // confusion. The heap is type segregated by having separate
846 // thread heaps for various types of objects. Holding on to pages
847 // ensures that the same virtual address space cannot be used for
848 // objects of another type than the type contained in this thread
849 // heap.
850 class PagePoolEntry {
851 public:
852 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
853 : m_storage(storage)
854 , m_next(next)
855 { }
856
857 PageMemory* storage() { return m_storage; }
858 PagePoolEntry* next() { return m_next; }
859
860 private:
861 PageMemory* m_storage;
862 PagePoolEntry* m_next;
863 };
864
865 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); 884 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
866 static size_t allocationSizeFromSize(size_t); 885 static size_t allocationSizeFromSize(size_t);
867 void addPageToHeap(const GCInfo*);
868 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 886 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
869 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 887 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
870 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 888 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
871 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } 889 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
872 void setAllocationPoint(Address point, size_t size) 890 void setAllocationPoint(Address point, size_t size)
873 { 891 {
874 ASSERT(!point || heapPageFromAddress(point)); 892 ASSERT(!point || heapPageFromAddress(point));
875 ASSERT(size <= HeapPage<Header>::payloadSize()); 893 ASSERT(size <= HeapPage<Header>::payloadSize());
876 m_currentAllocationPoint = point; 894 m_currentAllocationPoint = point;
877 m_remainingAllocationSize = size; 895 m_remainingAllocationSize = size;
878 } 896 }
879 void ensureCurrentAllocation(size_t, const GCInfo*); 897 void ensureCurrentAllocation(size_t, const GCInfo*);
880 bool allocateFromFreeList(size_t); 898 bool allocateFromFreeList(size_t);
881 899
882 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); 900 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
883
884 void allocatePage(const GCInfo*); 901 void allocatePage(const GCInfo*);
885 PageMemory* takePageFromPool();
886 void clearPagePool();
887 void deletePages();
888 902
889 Address m_currentAllocationPoint; 903 Address m_currentAllocationPoint;
890 size_t m_remainingAllocationSize; 904 size_t m_remainingAllocationSize;
891 905
892 HeapPage<Header>* m_firstPage; 906 HeapPage<Header>* m_firstPage;
893 LargeHeapObject<Header>* m_firstLargeHeapObject; 907 LargeHeapObject<Header>* m_firstLargeHeapObject;
894 908
895 int m_biggestFreeListIndex; 909 int m_biggestFreeListIndex;
896 ThreadState* m_threadState; 910 ThreadState* m_threadState;
897 911
898 // All FreeListEntries in the nth list have size >= 2^n. 912 // All FreeListEntries in the nth list have size >= 2^n.
899 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 913 FreeListEntry* m_freeLists[blinkPageSizeLog2];
900 914
901 // List of pages that have been previously allocated, but are now 915 // Index into the page pools. This is used to ensure that the pages of the
902 // unused. 916 // same type go into the correct page pool and thus avoid type confusion.
903 PagePoolEntry* m_pagePool; 917 int m_index;
904 }; 918 };
905 919
906 class PLATFORM_EXPORT Heap { 920 class PLATFORM_EXPORT Heap {
907 public: 921 public:
908 static void init(); 922 static void init();
909 static void shutdown(); 923 static void shutdown();
910 static void doShutdown(); 924 static void doShutdown();
911 925
912 static BaseHeapPage* contains(Address); 926 static BaseHeapPage* contains(Address);
913 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } 927 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); }
914 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } 928 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); }
929 #ifndef NDEBUG
930 static bool containedInHeapOrOrphanedPage(void*);
931 #endif
915 932
916 // Push a trace callback on the marking stack. 933 // Push a trace callback on the marking stack.
917 static void pushTraceCallback(void* containerObject, TraceCallback); 934 static void pushTraceCallback(void* containerObject, TraceCallback);
918 935
919 // Add a weak pointer callback to the weak callback work list. General 936 // Add a weak pointer callback to the weak callback work list. General
920 // object pointer callbacks are added to a thread local weak callback work 937 // object pointer callbacks are added to a thread local weak callback work
921 // list and the callback is called on the thread that owns the object, with 938 // list and the callback is called on the thread that owns the object, with
922 // the closure pointer as an argument. Most of the time, the closure and 939 // the closure pointer as an argument. Most of the time, the closure and
923 // the containerObject can be the same thing, but the containerObject is 940 // the containerObject can be the same thing, but the containerObject is
924 // constrained to be on the heap, since the heap is used to identify the 941 // constrained to be on the heap, since the heap is used to identify the
925 // correct thread. 942 // correct thread.
926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); 943 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback);
927 944
928 // Similar to the more general pushWeakObjectPointerCallback, but cell 945 // Similar to the more general pushWeakObjectPointerCallback, but cell
929 // pointer callbacks are added to a static callback work list and the weak 946 // pointer callbacks are added to a static callback work list and the weak
930 // callback is performed on the thread performing garbage collection. This 947 // callback is performed on the thread performing garbage collection. This
931 // is OK because cells are just cleared and no deallocation can happen. 948 // is OK because cells are just cleared and no deallocation can happen.
932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); 949 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
933 950
934 // Pop the top of the marking stack and call the callback with the visitor 951 // Pop the top of the marking stack and call the callback with the visitor
935 // and the object. Returns false when there is nothing more to do. 952 // and the object. Returns false when there is nothing more to do.
936 static bool popAndInvokeTraceCallback(Visitor*); 953 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback( Visitor*);
937 954
938 // Remove an item from the weak callback work list and call the callback 955 // Remove an item from the weak callback work list and call the callback
939 // with the visitor and the closure pointer. Returns false when there is 956 // with the visitor and the closure pointer. Returns false when there is
940 // nothing more to do. 957 // nothing more to do.
941 static bool popAndInvokeWeakPointerCallback(Visitor*); 958 static bool popAndInvokeWeakPointerCallback(Visitor*);
942 959
943 // Register an ephemeron table for fixed-point iteration. 960 // Register an ephemeron table for fixed-point iteration.
944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); 961 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
945 #ifndef NDEBUG 962 #ifndef NDEBUG
946 static bool weakTableRegistered(const void*); 963 static bool weakTableRegistered(const void*);
947 #endif 964 #endif
948 965
949 template<typename T> static Address allocate(size_t); 966 template<typename T> static Address allocate(size_t);
950 template<typename T> static Address reallocate(void* previous, size_t); 967 template<typename T> static Address reallocate(void* previous, size_t);
951 968
952 static void collectGarbage(ThreadState::StackState); 969 static void collectGarbage(ThreadState::StackState);
970 static void collectGarbageForTerminatingThread(ThreadState*);
953 static void collectAllGarbage(); 971 static void collectAllGarbage();
972 template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobal WeakProcessing();
954 static void setForcePreciseGCForTesting(); 973 static void setForcePreciseGCForTesting();
955 974
956 static void prepareForGC(); 975 static void prepareForGC();
957 976
958 // Conservatively checks whether an address is a pointer in any of the threa d 977 // Conservatively checks whether an address is a pointer in any of the threa d
959 // heaps. If so marks the object pointed to as live. 978 // heaps. If so marks the object pointed to as live.
960 static Address checkAndMarkPointer(Visitor*, Address); 979 static Address checkAndMarkPointer(Visitor*, Address);
961 980
962 #if ENABLE(GC_TRACING) 981 #if ENABLE(GC_TRACING)
963 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. 982 // Dump the path to specified object on the next GC. This method is to be in voked from GDB.
(...skipping 17 matching lines...) Expand all
981 static bool isConsistentForGC(); 1000 static bool isConsistentForGC();
982 static void makeConsistentForGC(); 1001 static void makeConsistentForGC();
983 1002
984 static void flushHeapDoesNotContainCache(); 1003 static void flushHeapDoesNotContainCache();
985 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } 1004 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); }
986 1005
987 // Return true if the last GC found a pointer into a heap page 1006 // Return true if the last GC found a pointer into a heap page
988 // during conservative scanning. 1007 // during conservative scanning.
989 static bool lastGCWasConservative() { return s_lastGCWasConservative; } 1008 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
990 1009
1010 static FreePagePool* freePagePool() { return s_freePagePool; }
1011 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
1012
991 private: 1013 private:
992 static Visitor* s_markingVisitor; 1014 static Visitor* s_markingVisitor;
993 1015
994 static CallbackStack* s_markingStack; 1016 static CallbackStack* s_markingStack;
995 static CallbackStack* s_weakCallbackStack; 1017 static CallbackStack* s_weakCallbackStack;
996 static CallbackStack* s_ephemeronStack; 1018 static CallbackStack* s_ephemeronStack;
997 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1019 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
998 static bool s_shutdownCalled; 1020 static bool s_shutdownCalled;
999 static bool s_lastGCWasConservative; 1021 static bool s_lastGCWasConservative;
1022 static FreePagePool* s_freePagePool;
1023 static OrphanedPagePool* s_orphanedPagePool;
1000 friend class ThreadState; 1024 friend class ThreadState;
1001 }; 1025 };
1002 1026
1003 // The NoAllocationScope class is used in debug mode to catch unwanted 1027 // The NoAllocationScope class is used in debug mode to catch unwanted
1004 // allocations. E.g. allocations during GC. 1028 // allocations. E.g. allocations during GC.
1005 template<ThreadAffinity Affinity> 1029 template<ThreadAffinity Affinity>
1006 class NoAllocationScope { 1030 class NoAllocationScope {
1007 public: 1031 public:
1008 NoAllocationScope() : m_active(true) { enter(); } 1032 NoAllocationScope() : m_active(true) { enter(); }
1009 1033
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1296 #define GC_PLUGIN_IGNORE(bug) \ 1320 #define GC_PLUGIN_IGNORE(bug) \
1297 __attribute__((annotate("blink_gc_plugin_ignore"))) 1321 __attribute__((annotate("blink_gc_plugin_ignore")))
1298 #else 1322 #else
1299 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() 1323 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1300 #define GC_PLUGIN_IGNORE(bug) 1324 #define GC_PLUGIN_IGNORE(bug)
1301 #endif 1325 #endif
1302 1326
1303 NO_SANITIZE_ADDRESS 1327 NO_SANITIZE_ADDRESS
1304 void HeapObjectHeader::checkHeader() const 1328 void HeapObjectHeader::checkHeader() const
1305 { 1329 {
1306 ASSERT(m_magic == magic); 1330 #ifndef NDEBUG
1331 BaseHeapPage* page = pageHeaderFromObject(this);
1332 ASSERT(page->orphaned() || m_magic == magic);
1333 #endif
1307 } 1334 }
1308 1335
1309 Address HeapObjectHeader::payload() 1336 Address HeapObjectHeader::payload()
1310 { 1337 {
1311 return reinterpret_cast<Address>(this) + objectHeaderSize; 1338 return reinterpret_cast<Address>(this) + objectHeaderSize;
1312 } 1339 }
1313 1340
1314 size_t HeapObjectHeader::payloadSize() 1341 size_t HeapObjectHeader::payloadSize()
1315 { 1342 {
1316 return size() - objectHeaderSize; 1343 return size() - objectHeaderSize;
(...skipping 1031 matching lines...) Expand 10 before | Expand all | Expand 10 after
2348 }; 2375 };
2349 2376
2350 template<typename T> 2377 template<typename T>
2351 struct IfWeakMember<WeakMember<T> > { 2378 struct IfWeakMember<WeakMember<T> > {
2352 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } 2379 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); }
2353 }; 2380 };
2354 2381
2355 } 2382 }
2356 2383
2357 #endif // Heap_h 2384 #endif // Heap_h
OLDNEW
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698