Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 393823003: Revert "Revert "[oilpan]: Make thread shutdown more robust."" (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: remove redundant NO_SANITIZE_ADDRESS Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
63 // align all allocations even on 32 bit. 63 // align all allocations even on 32 bit.
64 const size_t allocationGranularity = 8; 64 const size_t allocationGranularity = 8;
65 const size_t allocationMask = allocationGranularity - 1; 65 const size_t allocationMask = allocationGranularity - 1;
66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity);
67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
68 const size_t maxHeapObjectSize = 1 << 27; 68 const size_t maxHeapObjectSize = 1 << 27;
69 69
70 const size_t markBitMask = 1; 70 const size_t markBitMask = 1;
71 const size_t freeListMask = 2; 71 const size_t freeListMask = 2;
72 const size_t debugBitMask = 4; 72 // The dead bit is used for objects that have gone through a GC marking, but did
73 // not get swept before a new GC started. In that case we set the dead bit on
74 // objects that were not marked in the previous GC to ensure we are not tracing
75 // them via a conservatively found pointer. Tracing dead objects could lead to
76 // tracing of already finalized objects in another thread's heap which is a
77 // use-after-free situation.
78 const size_t deadBitMask = 4;
73 const size_t sizeMask = ~7; 79 const size_t sizeMask = ~7;
74 const uint8_t freelistZapValue = 42; 80 const uint8_t freelistZapValue = 42;
75 const uint8_t finalizedZapValue = 24; 81 const uint8_t finalizedZapValue = 24;
82 // The orphaned zap value must be zero in the lowest bits to allow for using
83 // the mark bit when tracing.
84 const uint8_t orphanedZapValue = 240;
85
86 enum CallbackInvocationMode {
87 GlobalMarking,
88 ThreadLocalMarking,
89 WeaknessProcessing,
90 };
76 91
77 class HeapStats; 92 class HeapStats;
78 class PageMemory; 93 class PageMemory;
79 template<ThreadAffinity affinity> class ThreadLocalPersistents; 94 template<ThreadAffinity affinity> class ThreadLocalPersistents;
80 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; 95 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent;
81 template<typename T> class CrossThreadPersistent; 96 template<typename T> class CrossThreadPersistent;
82 97
83 PLATFORM_EXPORT size_t osPageSize(); 98 PLATFORM_EXPORT size_t osPageSize();
84 99
85 // Blink heap pages are set up with a guard page before and after the 100 // Blink heap pages are set up with a guard page before and after the
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 135
121 // Sanity check for a page header address: the address of the page 136 // Sanity check for a page header address: the address of the page
122 // header should be OS page size away from being Blink page size 137 // header should be OS page size away from being Blink page size
123 // aligned. 138 // aligned.
124 inline bool isPageHeaderAddress(Address address) 139 inline bool isPageHeaderAddress(Address address)
125 { 140 {
126 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); 141 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize());
127 } 142 }
128 #endif 143 #endif
129 144
130 // Mask an address down to the enclosing oilpan heap page base address. 145 // Mask an address down to the enclosing oilpan heap base page.
131 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. 146 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
132 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. 147 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps.
133 // This is only exported to enable tests in HeapTest.cpp. 148 // This is only exported to enable tests in HeapTest.cpp.
134 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) 149 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
135 { 150 {
136 return blinkPageAddress(address) + osPageSize(); 151 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
152 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSiz e());
137 } 153 }
138 154
139 // Common header for heap pages. 155 NO_SANITIZE_ADDRESS
140 class BaseHeapPage { 156 inline void asanMemset(Address base, uint8_t zapValue, size_t size)
141 public: 157 {
142 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) 158 #if defined(ADDRESS_SANITIZER)
143 : m_storage(storage) 159 // Don't use memset when running with ASan since this needs to zap
Mads Ager (chromium) 2014/07/16 05:19:29 I think we should move the comment to before the m
wibling-chromium 2014/07/16 08:45:33 Done.
144 , m_gcInfo(gcInfo) 160 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
145 , m_threadState(state) 161 // only works for code in this method and not for calls to memset.
146 , m_padding(0) 162 for (Address current = base; current < base + size; ++current) {
147 { 163 *current = zapValue;
148 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 164 }
149 } 165 #else
150 166 memset(base, zapValue, size);
151 // Check if the given address points to an object in this
152 // heap page. If so, find the start of that object and mark it
153 // using the given Visitor. Otherwise do nothing. The pointer must
154 // be within the same aligned blinkPageSize as the this-pointer.
155 //
156 // This is used during conservative stack scanning to
157 // conservatively mark all objects that could be referenced from
158 // the stack.
159 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
160
161 #if ENABLE(GC_TRACING)
162 virtual const GCInfo* findGCInfo(Address) = 0;
163 #endif 167 #endif
164 168 }
165 Address address() { return reinterpret_cast<Address>(this); }
166 PageMemory* storage() const { return m_storage; }
167 ThreadState* threadState() const { return m_threadState; }
168 const GCInfo* gcInfo() { return m_gcInfo; }
169 virtual bool isLargeObject() { return false; }
170
171 private:
172 // Accessor to silence unused warnings for the m_padding field.
173 intptr_t padding() const { return m_padding; }
174
175 PageMemory* m_storage;
176 const GCInfo* m_gcInfo;
177 ThreadState* m_threadState;
178 // Pointer sized integer to ensure proper alignment of the
179 // HeapPage header. This can be used as a bit field if we need
180 // to associate more information with pages.
181 intptr_t m_padding;
182 };
183 169
184 // Large allocations are allocated as separate objects and linked in a 170 // Large allocations are allocated as separate objects and linked in a
185 // list. 171 // list.
186 // 172 //
187 // In order to use the same memory allocation routines for everything 173 // In order to use the same memory allocation routines for everything
188 // allocated in the heap, large objects are considered heap pages 174 // allocated in the heap, large objects are considered heap pages
189 // containing only one object. 175 // containing only one object.
190 // 176 //
191 // The layout of a large heap object is as follows: 177 // The layout of a large heap object is as follows:
192 // 178 //
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 // The LargeHeapObject pseudo-page contains one actual object. Determine 211 // The LargeHeapObject pseudo-page contains one actual object. Determine
226 // whether the pointer is within that object. 212 // whether the pointer is within that object.
227 bool objectContains(Address object) 213 bool objectContains(Address object)
228 { 214 {
229 return (payload() <= object) && (object < address() + size()); 215 return (payload() <= object) && (object < address() + size());
230 } 216 }
231 217
232 // Returns true for any address that is on one of the pages that this 218 // Returns true for any address that is on one of the pages that this
233 // large object uses. That ensures that we can use a negative result to 219 // large object uses. That ensures that we can use a negative result to
234 // populate the negative page cache. 220 // populate the negative page cache.
235 bool contains(Address object) 221 virtual bool contains(Address object) OVERRIDE
236 { 222 {
237 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); 223 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
238 } 224 }
239 225
240 LargeHeapObject<Header>* next() 226 LargeHeapObject<Header>* next()
241 { 227 {
242 return m_next; 228 return m_next;
243 } 229 }
244 230
245 size_t size() 231 size_t size()
246 { 232 {
247 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 233 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
248 } 234 }
249 235
250 Address payload() { return heapObjectHeader()->payload(); } 236 Address payload() { return heapObjectHeader()->payload(); }
251 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } 237 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
252 238
253 Header* heapObjectHeader() 239 Header* heapObjectHeader()
254 { 240 {
255 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 241 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
256 return reinterpret_cast<Header*>(headerAddress); 242 return reinterpret_cast<Header*>(headerAddress);
257 } 243 }
258 244
259 bool isMarked(); 245 bool isMarked();
260 void unmark(); 246 void unmark();
261 void getStats(HeapStats&); 247 void getStats(HeapStats&);
262 void mark(Visitor*); 248 void mark(Visitor*);
263 void finalize(); 249 void finalize();
250 void setDeadMark();
251 virtual void markOrphaned()
252 {
253 // Zap the payload with a recognizable value to detect any incorrect
254 // cross thread pointer usage.
255 memset(payload(), orphanedZapValue, payloadSize());
256 BaseHeapPage::markOrphaned();
257 }
264 258
265 private: 259 private:
266 friend class ThreadHeap<Header>; 260 friend class ThreadHeap<Header>;
267 261
268 LargeHeapObject<Header>* m_next; 262 LargeHeapObject<Header>* m_next;
269 }; 263 };
270 264
271 // The BasicObjectHeader is the minimal object header. It is used when 265 // The BasicObjectHeader is the minimal object header. It is used when
272 // encountering heap space of size allocationGranularity to mark it as 266 // encountering heap space of size allocationGranularity to mark it as
273 // as freelist entry. 267 // as freelist entry.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 316
323 inline void mark(); 317 inline void mark();
324 inline void unmark(); 318 inline void unmark();
325 319
326 inline const GCInfo* gcInfo() { return 0; } 320 inline const GCInfo* gcInfo() { return 0; }
327 321
328 inline Address payload(); 322 inline Address payload();
329 inline size_t payloadSize(); 323 inline size_t payloadSize();
330 inline Address payloadEnd(); 324 inline Address payloadEnd();
331 325
332 inline void setDebugMark(); 326 inline void setDeadMark();
333 inline void clearDebugMark(); 327 inline void clearDeadMark();
334 inline bool hasDebugMark() const; 328 inline bool hasDeadMark() const;
335 329
336 // Zap magic number with a new magic number that means there was once an 330 // Zap magic number with a new magic number that means there was once an
337 // object allocated here, but it was freed because nobody marked it during 331 // object allocated here, but it was freed because nobody marked it during
338 // GC. 332 // GC.
339 void zapMagic(); 333 void zapMagic();
340 334
341 static void finalize(const GCInfo*, Address, size_t); 335 static void finalize(const GCInfo*, Address, size_t);
342 static HeapObjectHeader* fromPayload(const void*); 336 static HeapObjectHeader* fromPayload(const void*);
343 337
344 static const intptr_t magic = 0xc0de247; 338 static const intptr_t magic = 0xc0de247;
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); 455 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
462 456
463 void link(HeapPage**); 457 void link(HeapPage**);
464 static void unlink(HeapPage*, HeapPage**); 458 static void unlink(HeapPage*, HeapPage**);
465 459
466 bool isEmpty(); 460 bool isEmpty();
467 461
468 // Returns true for the whole blinkPageSize page that the page is on, even 462 // Returns true for the whole blinkPageSize page that the page is on, even
469 // for the header, and the unmapped guard page at the start. That ensures 463 // for the header, and the unmapped guard page at the start. That ensures
470 // the result can be used to populate the negative page cache. 464 // the result can be used to populate the negative page cache.
471 bool contains(Address addr) 465 virtual bool contains(Address addr) OVERRIDE
472 { 466 {
473 Address blinkPageStart = roundToBlinkPageStart(address()); 467 Address blinkPageStart = roundToBlinkPageStart(address());
474 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. 468 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size.
475 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 469 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
476 } 470 }
477 471
478 HeapPage* next() { return m_next; } 472 HeapPage* next() { return m_next; }
479 473
480 Address payload() 474 Address payload()
481 { 475 {
482 return address() + sizeof(*this) + headerPadding<Header>(); 476 return address() + sizeof(*this) + headerPadding<Header>();
483 } 477 }
484 478
485 static size_t payloadSize() 479 static size_t payloadSize()
486 { 480 {
487 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; 481 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask;
488 } 482 }
489 483
490 Address end() { return payload() + payloadSize(); } 484 Address end() { return payload() + payloadSize(); }
491 485
492 void getStats(HeapStats&); 486 void getStats(HeapStats&);
493 void clearMarks(); 487 void clearLiveAndMarkDead();
494 void sweep(); 488 void sweep();
495 void clearObjectStartBitMap(); 489 void clearObjectStartBitMap();
496 void finalize(Header*); 490 void finalize(Header*);
497 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; 491 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
498 #if ENABLE(GC_TRACING) 492 #if ENABLE(GC_TRACING)
499 const GCInfo* findGCInfo(Address) OVERRIDE; 493 const GCInfo* findGCInfo(Address) OVERRIDE;
500 #endif 494 #endif
501 ThreadHeap<Header>* heap() { return m_heap; } 495 ThreadHeap<Header>* heap() { return m_heap; }
502 #if defined(ADDRESS_SANITIZER) 496 #if defined(ADDRESS_SANITIZER)
503 void poisonUnmarkedObjects(); 497 void poisonUnmarkedObjects();
504 #endif 498 #endif
499 virtual void markOrphaned()
500 {
501 // Zap the payload with a recognizable value to detect any incorrect
502 // cross thread pointer usage.
503 asanMemset(payload(), orphanedZapValue, payloadSize());
504 BaseHeapPage::markOrphaned();
505 }
505 506
506 protected: 507 protected:
507 Header* findHeaderFromAddress(Address); 508 Header* findHeaderFromAddress(Address);
508 void populateObjectStartBitMap(); 509 void populateObjectStartBitMap();
509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 510 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
510 TraceCallback traceCallback(Header*); 511 TraceCallback traceCallback(Header*);
511 bool hasVTable(Header*); 512 bool hasVTable(Header*);
512 513
513 HeapPage<Header>* m_next; 514 HeapPage<Header>* m_next;
514 ThreadHeap<Header>* m_heap; 515 ThreadHeap<Header>* m_heap;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 using GarbageCollectedFinalized<T>::operator delete; 671 using GarbageCollectedFinalized<T>::operator delete;
671 672
672 protected: 673 protected:
673 ~ThreadSafeRefCountedGarbageCollected() { } 674 ~ThreadSafeRefCountedGarbageCollected() { }
674 675
675 private: 676 private:
676 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; 677 OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
677 mutable Mutex m_mutex; 678 mutable Mutex m_mutex;
678 }; 679 };
679 680
681 template<typename DataType>
682 class PagePool {
683 protected:
684 PagePool();
685
686 class PoolEntry {
687 public:
688 PoolEntry(DataType* data, PoolEntry* next)
689 : data(data)
690 , next(next)
691 { }
692
693 DataType* data;
694 PoolEntry* next;
695 };
696
697 PoolEntry* m_pool[NumberOfHeaps];
698 };
699
700 // Once pages have been used for one type of thread heap they will never be
701 // reused for another type of thread heap. Instead of unmapping, we add the
702 // pages to a pool of pages to be reused later by a thread heap of the same
703 // type. This is done as a security feature to avoid type confusion. The
704 // heaps are type segregated by having separate thread heaps for different
705 // types of objects. Holding on to pages ensures that the same virtual address
706 // space cannot be used for objects of another type than the type contained
707 // in this page to begin with.
708 class FreePagePool : public PagePool<PageMemory> {
709 public:
710 ~FreePagePool();
711 void addFreePage(int, PageMemory*);
712 PageMemory* takeFreePage(int);
713
714 private:
715 Mutex m_mutex[NumberOfHeaps];
716 };
717
718 class OrphanedPagePool : public PagePool<BaseHeapPage> {
719 public:
720 ~OrphanedPagePool();
721 void addOrphanedPage(int, BaseHeapPage*);
722 void decommitOrphanedPages();
723 #ifndef NDEBUG
724 bool contains(void*);
725 #endif
726 };
727
680 // The CallbackStack contains all the visitor callbacks used to trace and mark 728 // The CallbackStack contains all the visitor callbacks used to trace and mark
681 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. 729 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts.
682 // If more space is needed a new CallbackStack instance is created and chained 730 // If more space is needed a new CallbackStack instance is created and chained
683 // together with the former instance. I.e. a logical CallbackStack can be made o f 731 // together with the former instance. I.e. a logical CallbackStack can be made o f
684 // multiple chained CallbackStack object instances. 732 // multiple chained CallbackStack object instances.
685 // There are two logical callback stacks. One containing all the marking callbac ks and 733 // There are two logical callback stacks. One containing all the marking callbac ks and
686 // one containing the weak pointer callbacks. 734 // one containing the weak pointer callbacks.
687 class CallbackStack { 735 class CallbackStack {
688 public: 736 public:
689 CallbackStack(CallbackStack** first) 737 CallbackStack(CallbackStack** first)
(...skipping 30 matching lines...) Expand all
720 768
721 static void init(CallbackStack** first); 769 static void init(CallbackStack** first);
722 static void shutdown(CallbackStack** first); 770 static void shutdown(CallbackStack** first);
723 static void clear(CallbackStack** first) 771 static void clear(CallbackStack** first)
724 { 772 {
725 if (!(*first)->isEmpty()) { 773 if (!(*first)->isEmpty()) {
726 shutdown(first); 774 shutdown(first);
727 init(first); 775 init(first);
728 } 776 }
729 } 777 }
730 bool popAndInvokeCallback(CallbackStack** first, Visitor*); 778 template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStac k** first, Visitor*);
731 static void invokeCallbacks(CallbackStack** first, Visitor*); 779 static void invokeCallbacks(CallbackStack** first, Visitor*);
732 780
733 Item* allocateEntry(CallbackStack** first) 781 Item* allocateEntry(CallbackStack** first)
734 { 782 {
735 if (m_current < m_limit) 783 if (m_current < m_limit)
736 return m_current++; 784 return m_current++;
737 return (new CallbackStack(first))->allocateEntry(first); 785 return (new CallbackStack(first))->allocateEntry(first);
738 } 786 }
739 787
740 #ifndef NDEBUG 788 #ifndef NDEBUG
741 bool hasCallbackForObject(const void*); 789 bool hasCallbackForObject(const void*);
742 #endif 790 #endif
743 791
744 private: 792 private:
745 void invokeOldestCallbacks(Visitor*); 793 void invokeOldestCallbacks(Visitor*);
746 794
747 static const size_t bufferSize = 8000; 795 static const size_t bufferSize = 8000;
748 Item m_buffer[bufferSize]; 796 Item m_buffer[bufferSize];
749 Item* m_limit; 797 Item* m_limit;
750 Item* m_current; 798 Item* m_current;
751 CallbackStack* m_next; 799 CallbackStack* m_next;
752 }; 800 };
753 801
754 // Non-template super class used to pass a heap around to other classes. 802 // Non-template super class used to pass a heap around to other classes.
755 class BaseHeap { 803 class BaseHeap {
756 public: 804 public:
757 virtual ~BaseHeap() { } 805 virtual ~BaseHeap() { }
806 virtual void cleanupPages() = 0;
758 807
759 // Find the page in this thread heap containing the given 808 // Find the page in this thread heap containing the given
760 // address. Returns 0 if the address is not contained in any 809 // address. Returns 0 if the address is not contained in any
761 // page in this thread heap. 810 // page in this thread heap.
762 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; 811 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
763 812
764 #if ENABLE(GC_TRACING) 813 #if ENABLE(GC_TRACING)
765 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; 814 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
766 #endif 815 #endif
767 816
768 // Sweep this part of the Blink heap. This finalizes dead objects 817 // Sweep this part of the Blink heap. This finalizes dead objects
769 // and builds freelists for all the unused memory. 818 // and builds freelists for all the unused memory.
770 virtual void sweep() = 0; 819 virtual void sweep() = 0;
771 820
772 // Forcefully finalize all objects in this part of the Blink heap
773 // (potentially with the exception of one object). This is used
774 // during thread termination to make sure that all objects for the
775 // dying thread are finalized.
776 virtual void assertEmpty() = 0;
777
778 virtual void clearFreeLists() = 0; 821 virtual void clearFreeLists() = 0;
779 virtual void clearMarks() = 0; 822 virtual void clearLiveAndMarkDead() = 0;
780 #ifndef NDEBUG 823 #ifndef NDEBUG
781 virtual void getScannedStats(HeapStats&) = 0; 824 virtual void getScannedStats(HeapStats&) = 0;
782 #endif 825 #endif
783 826
784 virtual void makeConsistentForGC() = 0; 827 virtual void makeConsistentForGC() = 0;
785 virtual bool isConsistentForGC() = 0; 828 virtual bool isConsistentForGC() = 0;
786 829
830 virtual void prepareHeapForTermination() = 0;
831
787 // Returns a bucket number for inserting a FreeListEntry of a 832 // Returns a bucket number for inserting a FreeListEntry of a
788 // given size. All FreeListEntries in the given bucket, n, have 833 // given size. All FreeListEntries in the given bucket, n, have
789 // size >= 2^n. 834 // size >= 2^n.
790 static int bucketIndexForSize(size_t); 835 static int bucketIndexForSize(size_t);
791 }; 836 };
792 837
793 // Thread heaps represent a part of the per-thread Blink heap. 838 // Thread heaps represent a part of the per-thread Blink heap.
794 // 839 //
795 // Each Blink thread has a number of thread heaps: one general heap 840 // Each Blink thread has a number of thread heaps: one general heap
796 // that contains any type of object and a number of heaps specialized 841 // that contains any type of object and a number of heaps specialized
797 // for specific object types (such as Node). 842 // for specific object types (such as Node).
798 // 843 //
799 // Each thread heap contains the functionality to allocate new objects 844 // Each thread heap contains the functionality to allocate new objects
800 // (potentially adding new pages to the heap), to find and mark 845 // (potentially adding new pages to the heap), to find and mark
801 // objects during conservative stack scanning and to sweep the set of 846 // objects during conservative stack scanning and to sweep the set of
802 // pages after a GC. 847 // pages after a GC.
803 template<typename Header> 848 template<typename Header>
804 class ThreadHeap : public BaseHeap { 849 class ThreadHeap : public BaseHeap {
805 public: 850 public:
806 ThreadHeap(ThreadState*); 851 ThreadHeap(ThreadState*, int);
807 virtual ~ThreadHeap(); 852 virtual ~ThreadHeap();
853 virtual void cleanupPages();
808 854
809 virtual BaseHeapPage* heapPageFromAddress(Address); 855 virtual BaseHeapPage* heapPageFromAddress(Address);
810 #if ENABLE(GC_TRACING) 856 #if ENABLE(GC_TRACING)
811 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); 857 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
812 #endif 858 #endif
813 virtual void sweep(); 859 virtual void sweep();
814 virtual void assertEmpty();
815 virtual void clearFreeLists(); 860 virtual void clearFreeLists();
816 virtual void clearMarks(); 861 virtual void clearLiveAndMarkDead();
817 #ifndef NDEBUG 862 #ifndef NDEBUG
818 virtual void getScannedStats(HeapStats&); 863 virtual void getScannedStats(HeapStats&);
819 #endif 864 #endif
820 865
821 virtual void makeConsistentForGC(); 866 virtual void makeConsistentForGC();
822 virtual bool isConsistentForGC(); 867 virtual bool isConsistentForGC();
823 868
824 ThreadState* threadState() { return m_threadState; } 869 ThreadState* threadState() { return m_threadState; }
825 HeapStats& stats() { return m_threadState->stats(); } 870 HeapStats& stats() { return m_threadState->stats(); }
826 void flushHeapContainsCache() 871 void flushHeapContainsCache()
827 { 872 {
828 m_threadState->heapContainsCache()->flush(); 873 m_threadState->heapContainsCache()->flush();
829 } 874 }
830 875
831 inline Address allocate(size_t, const GCInfo*); 876 inline Address allocate(size_t, const GCInfo*);
832 void addToFreeList(Address, size_t); 877 void addToFreeList(Address, size_t);
833 void addPageMemoryToPool(PageMemory*);
834 void addPageToPool(HeapPage<Header>*);
835 inline static size_t roundedAllocationSize(size_t size) 878 inline static size_t roundedAllocationSize(size_t size)
836 { 879 {
837 return allocationSizeFromSize(size) - sizeof(Header); 880 return allocationSizeFromSize(size) - sizeof(Header);
838 } 881 }
839 882
883 void prepareHeapForTermination();
884 void removePageFromHeap(HeapPage<Header>*);
885
840 private: 886 private:
841 // Once pages have been used for one thread heap they will never 887 void addPageToHeap(const GCInfo*);
842 // be reused for another thread heap. Instead of unmapping, we add
843 // the pages to a pool of pages to be reused later by this thread
844 // heap. This is done as a security feature to avoid type
845 // confusion. The heap is type segregated by having separate
846 // thread heaps for various types of objects. Holding on to pages
847 // ensures that the same virtual address space cannot be used for
848 // objects of another type than the type contained in this thread
849 // heap.
850 class PagePoolEntry {
851 public:
852 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
853 : m_storage(storage)
854 , m_next(next)
855 { }
856
857 PageMemory* storage() { return m_storage; }
858 PagePoolEntry* next() { return m_next; }
859
860 private:
861 PageMemory* m_storage;
862 PagePoolEntry* m_next;
863 };
864
865 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); 888 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
866 static size_t allocationSizeFromSize(size_t); 889 static size_t allocationSizeFromSize(size_t);
867 void addPageToHeap(const GCInfo*);
868 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 890 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
869 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 891 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
870 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 892 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
871 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } 893 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
872 void setAllocationPoint(Address point, size_t size) 894 void setAllocationPoint(Address point, size_t size)
873 { 895 {
874 ASSERT(!point || heapPageFromAddress(point)); 896 ASSERT(!point || heapPageFromAddress(point));
875 ASSERT(size <= HeapPage<Header>::payloadSize()); 897 ASSERT(size <= HeapPage<Header>::payloadSize());
876 m_currentAllocationPoint = point; 898 m_currentAllocationPoint = point;
877 m_remainingAllocationSize = size; 899 m_remainingAllocationSize = size;
878 } 900 }
879 void ensureCurrentAllocation(size_t, const GCInfo*); 901 void ensureCurrentAllocation(size_t, const GCInfo*);
880 bool allocateFromFreeList(size_t); 902 bool allocateFromFreeList(size_t);
881 903
882 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); 904 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
883
884 void allocatePage(const GCInfo*); 905 void allocatePage(const GCInfo*);
885 PageMemory* takePageFromPool();
886 void clearPagePool();
887 void deletePages();
888 906
889 Address m_currentAllocationPoint; 907 Address m_currentAllocationPoint;
890 size_t m_remainingAllocationSize; 908 size_t m_remainingAllocationSize;
891 909
892 HeapPage<Header>* m_firstPage; 910 HeapPage<Header>* m_firstPage;
893 LargeHeapObject<Header>* m_firstLargeHeapObject; 911 LargeHeapObject<Header>* m_firstLargeHeapObject;
894 912
895 int m_biggestFreeListIndex; 913 int m_biggestFreeListIndex;
896 ThreadState* m_threadState; 914 ThreadState* m_threadState;
897 915
898 // All FreeListEntries in the nth list have size >= 2^n. 916 // All FreeListEntries in the nth list have size >= 2^n.
899 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 917 FreeListEntry* m_freeLists[blinkPageSizeLog2];
900 918
901 // List of pages that have been previously allocated, but are now 919 // Index into the page pools. This is used to ensure that the pages of the
902 // unused. 920 // same type go into the correct page pool and thus avoid type confusion.
903 PagePoolEntry* m_pagePool; 921 int m_index;
904 }; 922 };
905 923
906 class PLATFORM_EXPORT Heap { 924 class PLATFORM_EXPORT Heap {
907 public: 925 public:
908 static void init(); 926 static void init();
909 static void shutdown(); 927 static void shutdown();
910 static void doShutdown(); 928 static void doShutdown();
911 929
912 static BaseHeapPage* contains(Address); 930 static BaseHeapPage* contains(Address);
913 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } 931 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); }
914 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } 932 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); }
933 #ifndef NDEBUG
934 static bool containedInHeapOrOrphanedPage(void*);
935 #endif
915 936
916 // Push a trace callback on the marking stack. 937 // Push a trace callback on the marking stack.
917 static void pushTraceCallback(void* containerObject, TraceCallback); 938 static void pushTraceCallback(void* containerObject, TraceCallback);
918 939
919 // Add a weak pointer callback to the weak callback work list. General 940 // Add a weak pointer callback to the weak callback work list. General
920 // object pointer callbacks are added to a thread local weak callback work 941 // object pointer callbacks are added to a thread local weak callback work
921 // list and the callback is called on the thread that owns the object, with 942 // list and the callback is called on the thread that owns the object, with
922 // the closure pointer as an argument. Most of the time, the closure and 943 // the closure pointer as an argument. Most of the time, the closure and
923 // the containerObject can be the same thing, but the containerObject is 944 // the containerObject can be the same thing, but the containerObject is
924 // constrained to be on the heap, since the heap is used to identify the 945 // constrained to be on the heap, since the heap is used to identify the
925 // correct thread. 946 // correct thread.
926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); 947 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback);
927 948
928 // Similar to the more general pushWeakObjectPointerCallback, but cell 949 // Similar to the more general pushWeakObjectPointerCallback, but cell
929 // pointer callbacks are added to a static callback work list and the weak 950 // pointer callbacks are added to a static callback work list and the weak
930 // callback is performed on the thread performing garbage collection. This 951 // callback is performed on the thread performing garbage collection. This
931 // is OK because cells are just cleared and no deallocation can happen. 952 // is OK because cells are just cleared and no deallocation can happen.
932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); 953 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
933 954
934 // Pop the top of the marking stack and call the callback with the visitor 955 // Pop the top of the marking stack and call the callback with the visitor
935 // and the object. Returns false when there is nothing more to do. 956 // and the object. Returns false when there is nothing more to do.
936 static bool popAndInvokeTraceCallback(Visitor*); 957 template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback( Visitor*);
937 958
938 // Remove an item from the weak callback work list and call the callback 959 // Remove an item from the weak callback work list and call the callback
939 // with the visitor and the closure pointer. Returns false when there is 960 // with the visitor and the closure pointer. Returns false when there is
940 // nothing more to do. 961 // nothing more to do.
941 static bool popAndInvokeWeakPointerCallback(Visitor*); 962 static bool popAndInvokeWeakPointerCallback(Visitor*);
942 963
943 // Register an ephemeron table for fixed-point iteration. 964 // Register an ephemeron table for fixed-point iteration.
944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); 965 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
945 #ifndef NDEBUG 966 #ifndef NDEBUG
946 static bool weakTableRegistered(const void*); 967 static bool weakTableRegistered(const void*);
947 #endif 968 #endif
948 969
949 template<typename T> static Address allocate(size_t); 970 template<typename T> static Address allocate(size_t);
950 template<typename T> static Address reallocate(void* previous, size_t); 971 template<typename T> static Address reallocate(void* previous, size_t);
951 972
952 static void collectGarbage(ThreadState::StackState); 973 static void collectGarbage(ThreadState::StackState);
974 static void collectGarbageForTerminatingThread(ThreadState*);
953 static void collectAllGarbage(); 975 static void collectAllGarbage();
976 template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobal WeakProcessing();
954 static void setForcePreciseGCForTesting(); 977 static void setForcePreciseGCForTesting();
955 978
956 static void prepareForGC(); 979 static void prepareForGC();
957 980
958 // Conservatively checks whether an address is a pointer in any of the threa d 981 // Conservatively checks whether an address is a pointer in any of the threa d
959 // heaps. If so marks the object pointed to as live. 982 // heaps. If so marks the object pointed to as live.
960 static Address checkAndMarkPointer(Visitor*, Address); 983 static Address checkAndMarkPointer(Visitor*, Address);
961 984
962 #if ENABLE(GC_TRACING) 985 #if ENABLE(GC_TRACING)
963 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. 986 // Dump the path to specified object on the next GC. This method is to be in voked from GDB.
(...skipping 17 matching lines...) Expand all
981 static bool isConsistentForGC(); 1004 static bool isConsistentForGC();
982 static void makeConsistentForGC(); 1005 static void makeConsistentForGC();
983 1006
984 static void flushHeapDoesNotContainCache(); 1007 static void flushHeapDoesNotContainCache();
985 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } 1008 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); }
986 1009
987 // Return true if the last GC found a pointer into a heap page 1010 // Return true if the last GC found a pointer into a heap page
988 // during conservative scanning. 1011 // during conservative scanning.
989 static bool lastGCWasConservative() { return s_lastGCWasConservative; } 1012 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
990 1013
1014 static FreePagePool* freePagePool() { return s_freePagePool; }
1015 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
1016
991 private: 1017 private:
992 static Visitor* s_markingVisitor; 1018 static Visitor* s_markingVisitor;
993 1019
994 static CallbackStack* s_markingStack; 1020 static CallbackStack* s_markingStack;
995 static CallbackStack* s_weakCallbackStack; 1021 static CallbackStack* s_weakCallbackStack;
996 static CallbackStack* s_ephemeronStack; 1022 static CallbackStack* s_ephemeronStack;
997 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1023 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
998 static bool s_shutdownCalled; 1024 static bool s_shutdownCalled;
999 static bool s_lastGCWasConservative; 1025 static bool s_lastGCWasConservative;
1026 static FreePagePool* s_freePagePool;
1027 static OrphanedPagePool* s_orphanedPagePool;
1000 friend class ThreadState; 1028 friend class ThreadState;
1001 }; 1029 };
1002 1030
1003 // The NoAllocationScope class is used in debug mode to catch unwanted 1031 // The NoAllocationScope class is used in debug mode to catch unwanted
1004 // allocations. E.g. allocations during GC. 1032 // allocations. E.g. allocations during GC.
1005 template<ThreadAffinity Affinity> 1033 template<ThreadAffinity Affinity>
1006 class NoAllocationScope { 1034 class NoAllocationScope {
1007 public: 1035 public:
1008 NoAllocationScope() : m_active(true) { enter(); } 1036 NoAllocationScope() : m_active(true) { enter(); }
1009 1037
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1296 #define GC_PLUGIN_IGNORE(bug) \ 1324 #define GC_PLUGIN_IGNORE(bug) \
1297 __attribute__((annotate("blink_gc_plugin_ignore"))) 1325 __attribute__((annotate("blink_gc_plugin_ignore")))
1298 #else 1326 #else
1299 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() 1327 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1300 #define GC_PLUGIN_IGNORE(bug) 1328 #define GC_PLUGIN_IGNORE(bug)
1301 #endif 1329 #endif
1302 1330
1303 NO_SANITIZE_ADDRESS 1331 NO_SANITIZE_ADDRESS
1304 void HeapObjectHeader::checkHeader() const 1332 void HeapObjectHeader::checkHeader() const
1305 { 1333 {
1306 ASSERT(m_magic == magic); 1334 #ifndef NDEBUG
1335 BaseHeapPage* page = pageHeaderFromObject(this);
1336 ASSERT(page->orphaned() || m_magic == magic);
1337 #endif
1307 } 1338 }
1308 1339
1309 Address HeapObjectHeader::payload() 1340 Address HeapObjectHeader::payload()
1310 { 1341 {
1311 return reinterpret_cast<Address>(this) + objectHeaderSize; 1342 return reinterpret_cast<Address>(this) + objectHeaderSize;
1312 } 1343 }
1313 1344
1314 size_t HeapObjectHeader::payloadSize() 1345 size_t HeapObjectHeader::payloadSize()
1315 { 1346 {
1316 return size() - objectHeaderSize; 1347 return size() - objectHeaderSize;
(...skipping 1030 matching lines...) Expand 10 before | Expand all | Expand 10 after
2347 }; 2378 };
2348 2379
2349 template<typename T> 2380 template<typename T>
2350 struct IfWeakMember<WeakMember<T> > { 2381 struct IfWeakMember<WeakMember<T> > {
2351 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } 2382 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); }
2352 }; 2383 };
2353 2384
2354 } 2385 }
2355 2386
2356 #endif // Heap_h 2387 #endif // Heap_h
OLDNEW
« no previous file with comments | « Source/platform/heap/Handle.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698