Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(270)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte
63 // align all allocations even on 32 bit. 63 // align all allocations even on 32 bit.
64 const size_t allocationGranularity = 8; 64 const size_t allocationGranularity = 8;
65 const size_t allocationMask = allocationGranularity - 1; 65 const size_t allocationMask = allocationGranularity - 1;
66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity); 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit y) - 1)) / (8 * allocationGranularity);
67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask); 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
68 const size_t maxHeapObjectSize = 1 << 27; 68 const size_t maxHeapObjectSize = 1 << 27;
69 69
70 const size_t markBitMask = 1; 70 const size_t markBitMask = 1;
71 const size_t freeListMask = 2; 71 const size_t freeListMask = 2;
72 const size_t debugBitMask = 4; 72 // The dead bit is used for objects that have gone through a GC marking, but did
zerny-chromium 2014/07/07 12:11:56 Simpler: The dead bit is set for objects that are
73 // not get swept before a new GC started. In that case we set the dead bit on
74 // objects that were not marked in the previous GC to ensure we are not reviving
75 // them via a conservatively found pointer. This guarantees that we don't end up
76 // tracing a revived object pointing into another thread heap's object where the
77 // other thread did a sweep and cleared out the object.
78 const size_t deadBitMask = 4;
73 const size_t sizeMask = ~7; 79 const size_t sizeMask = ~7;
74 const uint8_t freelistZapValue = 42; 80 const uint8_t freelistZapValue = 42;
75 const uint8_t finalizedZapValue = 24; 81 const uint8_t finalizedZapValue = 24;
76 82
77 class HeapStats; 83 class HeapStats;
78 class PageMemory; 84 class PageMemory;
79 template<ThreadAffinity affinity> class ThreadLocalPersistents; 85 template<ThreadAffinity affinity> class ThreadLocalPersistents;
80 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent; 86 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity > > class Persistent;
81 template<typename T> class CrossThreadPersistent; 87 template<typename T> class CrossThreadPersistent;
82 88
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 126
121 // Sanity check for a page header address: the address of the page 127 // Sanity check for a page header address: the address of the page
122 // header should be OS page size away from being Blink page size 128 // header should be OS page size away from being Blink page size
123 // aligned. 129 // aligned.
124 inline bool isPageHeaderAddress(Address address) 130 inline bool isPageHeaderAddress(Address address)
125 { 131 {
126 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize()); 132 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPa geSize());
127 } 133 }
128 #endif 134 #endif
129 135
130 // Mask an address down to the enclosing oilpan heap page base address. 136 // Mask an address down to the enclosing oilpan heap base page.
131 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. 137 // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
132 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps. 138 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our ty ped heaps.
133 // This is only exported to enable tests in HeapTest.cpp. 139 // This is only exported to enable tests in HeapTest.cpp.
134 PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) 140 PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
135 { 141 {
136 return blinkPageAddress(address) + osPageSize(); 142 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
143 return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSiz e());
137 } 144 }
138 145
139 // Common header for heap pages.
140 class BaseHeapPage {
141 public:
142 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
143 : m_storage(storage)
144 , m_gcInfo(gcInfo)
145 , m_threadState(state)
146 , m_padding(0)
147 {
148 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
149 }
150
151 // Check if the given address points to an object in this
152 // heap page. If so, find the start of that object and mark it
153 // using the given Visitor. Otherwise do nothing. The pointer must
154 // be within the same aligned blinkPageSize as the this-pointer.
155 //
156 // This is used during conservative stack scanning to
157 // conservatively mark all objects that could be referenced from
158 // the stack.
159 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
160
161 #if ENABLE(GC_TRACING)
162 virtual const GCInfo* findGCInfo(Address) = 0;
163 #endif
164
165 Address address() { return reinterpret_cast<Address>(this); }
166 PageMemory* storage() const { return m_storage; }
167 ThreadState* threadState() const { return m_threadState; }
168 const GCInfo* gcInfo() { return m_gcInfo; }
169 virtual bool isLargeObject() { return false; }
170
171 private:
172 // Accessor to silence unused warnings for the m_padding field.
173 intptr_t padding() const { return m_padding; }
174
175 PageMemory* m_storage;
176 const GCInfo* m_gcInfo;
177 ThreadState* m_threadState;
178 // Pointer sized integer to ensure proper alignment of the
179 // HeapPage header. This can be used as a bit field if we need
180 // to associate more information with pages.
181 intptr_t m_padding;
182 };
183
184 // Large allocations are allocated as separate objects and linked in a 146 // Large allocations are allocated as separate objects and linked in a
185 // list. 147 // list.
186 // 148 //
187 // In order to use the same memory allocation routines for everything 149 // In order to use the same memory allocation routines for everything
188 // allocated in the heap, large objects are considered heap pages 150 // allocated in the heap, large objects are considered heap pages
189 // containing only one object. 151 // containing only one object.
190 // 152 //
191 // The layout of a large heap object is as follows: 153 // The layout of a large heap object is as follows:
192 // 154 //
193 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | 155 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 // The LargeHeapObject pseudo-page contains one actual object. Determine 187 // The LargeHeapObject pseudo-page contains one actual object. Determine
226 // whether the pointer is within that object. 188 // whether the pointer is within that object.
227 bool objectContains(Address object) 189 bool objectContains(Address object)
228 { 190 {
229 return (payload() <= object) && (object < address() + size()); 191 return (payload() <= object) && (object < address() + size());
230 } 192 }
231 193
232 // Returns true for any address that is on one of the pages that this 194 // Returns true for any address that is on one of the pages that this
233 // large object uses. That ensures that we can use a negative result to 195 // large object uses. That ensures that we can use a negative result to
234 // populate the negative page cache. 196 // populate the negative page cache.
235 bool contains(Address object) 197 virtual bool contains(Address object) OVERRIDE
236 { 198 {
237 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); 199 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
238 } 200 }
239 201
240 LargeHeapObject<Header>* next() 202 LargeHeapObject<Header>* next()
241 { 203 {
242 return m_next; 204 return m_next;
243 } 205 }
244 206
245 size_t size() 207 size_t size()
246 { 208 {
247 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 209 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
248 } 210 }
249 211
250 Address payload() { return heapObjectHeader()->payload(); } 212 Address payload() { return heapObjectHeader()->payload(); }
251 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } 213 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
252 214
253 Header* heapObjectHeader() 215 Header* heapObjectHeader()
254 { 216 {
255 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 217 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
256 return reinterpret_cast<Header*>(headerAddress); 218 return reinterpret_cast<Header*>(headerAddress);
257 } 219 }
258 220
259 bool isMarked(); 221 bool isMarked();
260 void unmark(); 222 void unmark();
261 void getStats(HeapStats&); 223 void getStats(HeapStats&);
262 void mark(Visitor*); 224 void mark(Visitor*);
263 void finalize(); 225 void finalize();
226 void setDeadMark();
zerny-chromium 2014/07/07 12:11:56 Nit: markDead/unmarkDead for consistency with the
227 virtual void markOrphaned()
228 {
229 // We clear out the payload to detect incorrect usage.
230 memset(payload(), 0, payloadSize());
231 clearReuseMemory(); // Don't reuse memory for large objects, ie. don't m ove to the memory pool.
232 BaseHeapPage::markOrphaned();
233 }
264 234
265 private: 235 private:
266 friend class ThreadHeap<Header>; 236 friend class ThreadHeap<Header>;
267 237
268 LargeHeapObject<Header>* m_next; 238 LargeHeapObject<Header>* m_next;
269 }; 239 };
270 240
271 // The BasicObjectHeader is the minimal object header. It is used when 241 // The BasicObjectHeader is the minimal object header. It is used when
272 // encountering heap space of size allocationGranularity to mark it as 242 // encountering heap space of size allocationGranularity to mark it as
273 // as freelist entry. 243 // as freelist entry.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 292
323 inline void mark(); 293 inline void mark();
324 inline void unmark(); 294 inline void unmark();
325 295
326 inline const GCInfo* gcInfo() { return 0; } 296 inline const GCInfo* gcInfo() { return 0; }
327 297
328 inline Address payload(); 298 inline Address payload();
329 inline size_t payloadSize(); 299 inline size_t payloadSize();
330 inline Address payloadEnd(); 300 inline Address payloadEnd();
331 301
332 inline void setDebugMark(); 302 inline void setDeadMark();
333 inline void clearDebugMark(); 303 inline void clearDeadMark();
334 inline bool hasDebugMark() const; 304 inline bool hasDeadMark() const;
zerny-chromium 2014/07/07 12:11:56 markDead/unmarkDead/isMarkedDead
335 305
336 // Zap magic number with a new magic number that means there was once an 306 // Zap magic number with a new magic number that means there was once an
337 // object allocated here, but it was freed because nobody marked it during 307 // object allocated here, but it was freed because nobody marked it during
338 // GC. 308 // GC.
339 void zapMagic(); 309 void zapMagic();
340 310
341 static void finalize(const GCInfo*, Address, size_t); 311 static void finalize(const GCInfo*, Address, size_t);
342 static HeapObjectHeader* fromPayload(const void*); 312 static HeapObjectHeader* fromPayload(const void*);
343 313
344 static const intptr_t magic = 0xc0de247; 314 static const intptr_t magic = 0xc0de247;
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); 431 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
462 432
463 void link(HeapPage**); 433 void link(HeapPage**);
464 static void unlink(HeapPage*, HeapPage**); 434 static void unlink(HeapPage*, HeapPage**);
465 435
466 bool isEmpty(); 436 bool isEmpty();
467 437
468 // Returns true for the whole blinkPageSize page that the page is on, even 438 // Returns true for the whole blinkPageSize page that the page is on, even
469 // for the header, and the unmapped guard page at the start. That ensures 439 // for the header, and the unmapped guard page at the start. That ensures
470 // the result can be used to populate the negative page cache. 440 // the result can be used to populate the negative page cache.
471 bool contains(Address addr) 441 virtual bool contains(Address addr) OVERRIDE
472 { 442 {
473 Address blinkPageStart = roundToBlinkPageStart(address()); 443 Address blinkPageStart = roundToBlinkPageStart(address());
474 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size. 444 ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligne d address plus guard page size.
475 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 445 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
476 } 446 }
477 447
478 HeapPage* next() { return m_next; } 448 HeapPage* next() { return m_next; }
479 449
480 Address payload() 450 Address payload()
481 { 451 {
482 return address() + sizeof(*this) + headerPadding<Header>(); 452 return address() + sizeof(*this) + headerPadding<Header>();
483 } 453 }
484 454
485 static size_t payloadSize() 455 static size_t payloadSize()
486 { 456 {
487 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; 457 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask;
488 } 458 }
489 459
490 Address end() { return payload() + payloadSize(); } 460 Address end() { return payload() + payloadSize(); }
491 461
492 void getStats(HeapStats&); 462 void getStats(HeapStats&);
493 void clearMarks(); 463 void clearLiveAndMarkDead();
zerny-chromium 2014/07/07 12:11:56 Nit: maybe unmarkLiveAndMarkDead would read more c
494 void sweep(); 464 void sweep();
495 void clearObjectStartBitMap(); 465 void clearObjectStartBitMap();
496 void finalize(Header*); 466 void finalize(Header*);
497 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; 467 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
498 #if ENABLE(GC_TRACING) 468 #if ENABLE(GC_TRACING)
499 const GCInfo* findGCInfo(Address) OVERRIDE; 469 const GCInfo* findGCInfo(Address) OVERRIDE;
500 #endif 470 #endif
501 ThreadHeap<Header>* heap() { return m_heap; } 471 ThreadHeap<Header>* heap() { return m_heap; }
502 #if defined(ADDRESS_SANITIZER) 472 #if defined(ADDRESS_SANITIZER)
503 void poisonUnmarkedObjects(); 473 void poisonUnmarkedObjects();
504 #endif 474 #endif
475 virtual void markOrphaned()
476 {
477 // We clear out the payload to detect incorrect usage.
478 memset(payload(), 0, payloadSize());
479 setReuseMemory(); // Reuse memory for normal blink pages.
480 BaseHeapPage::markOrphaned();
481 }
505 482
506 protected: 483 protected:
507 Header* findHeaderFromAddress(Address); 484 Header* findHeaderFromAddress(Address);
508 void populateObjectStartBitMap(); 485 void populateObjectStartBitMap();
509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 486 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
510 TraceCallback traceCallback(Header*); 487 TraceCallback traceCallback(Header*);
511 bool hasVTable(Header*); 488 bool hasVTable(Header*);
512 489
513 HeapPage<Header>* m_next; 490 HeapPage<Header>* m_next;
514 ThreadHeap<Header>* m_heap; 491 ThreadHeap<Header>* m_heap;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 using GarbageCollectedFinalized<T>::operator delete; 647 using GarbageCollectedFinalized<T>::operator delete;
671 648
672 protected: 649 protected:
673 ~ThreadSafeRefCountedGarbageCollected() { } 650 ~ThreadSafeRefCountedGarbageCollected() { }
674 651
675 private: 652 private:
676 OwnPtr<CrossThreadPersistent<T> > m_keepAlive; 653 OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
677 mutable Mutex m_mutex; 654 mutable Mutex m_mutex;
678 }; 655 };
679 656
657 template<typename DataType>
658 class HeapPool {
659 protected:
660 HeapPool();
661
662 class PoolEntry {
663 public:
664 PoolEntry(DataType* data, PoolEntry* next)
665 : data(data)
666 , next(next)
667 { }
668
669 DataType* data;
670 PoolEntry* next;
671 };
672
673 PoolEntry* m_pool[NumberOfHeaps];
674 };
675
676 // Once pages have been used for one type of thread heap they will never be
677 // reused for another type of thread heap. Instead of unmapping, we add the
678 // pages to a pool of pages to be reused later by a thread heap of the same
679 // type. This is done as a security feature to avoid type confusion. The
680 // heaps are type segregated by having separate thread heaps for different
681 // types of objects. Holding on to pages ensures that the same virtual address
682 // space cannot be used for objects of another type than the type contained
683 // in this page to begin with.
684 class HeapMemoryPool : public HeapPool<PageMemory> {
685 public:
686 ~HeapMemoryPool();
687 void addMemory(int index, PageMemory*);
688 PageMemory* takeMemory(int index);
689
690 private:
691 Mutex m_mutex[NumberOfHeaps];
692 };
693
694 class HeapOrphanedPagePool : public HeapPool<BaseHeapPage> {
695 public:
696 ~HeapOrphanedPagePool();
697 void addOrphanedPage(int, BaseHeapPage*);
698 void addOrphanedPages(int, Vector<BaseHeapPage*>&);
699 void decommitOrphanedPages();
700 bool contains(void*);
701 };
702
680 // The CallbackStack contains all the visitor callbacks used to trace and mark 703 // The CallbackStack contains all the visitor callbacks used to trace and mark
681 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. 704 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts.
682 // If more space is needed a new CallbackStack instance is created and chained 705 // If more space is needed a new CallbackStack instance is created and chained
683 // together with the former instance. I.e. a logical CallbackStack can be made o f 706 // together with the former instance. I.e. a logical CallbackStack can be made o f
684 // multiple chained CallbackStack object instances. 707 // multiple chained CallbackStack object instances.
685 // There are two logical callback stacks. One containing all the marking callbac ks and 708 // There are two logical callback stacks. One containing all the marking callbac ks and
686 // one containing the weak pointer callbacks. 709 // one containing the weak pointer callbacks.
687 class CallbackStack { 710 class CallbackStack {
688 public: 711 public:
689 CallbackStack(CallbackStack** first) 712 CallbackStack(CallbackStack** first)
(...skipping 30 matching lines...) Expand all
720 743
721 static void init(CallbackStack** first); 744 static void init(CallbackStack** first);
722 static void shutdown(CallbackStack** first); 745 static void shutdown(CallbackStack** first);
723 static void clear(CallbackStack** first) 746 static void clear(CallbackStack** first)
724 { 747 {
725 if (!(*first)->isEmpty()) { 748 if (!(*first)->isEmpty()) {
726 shutdown(first); 749 shutdown(first);
727 init(first); 750 init(first);
728 } 751 }
729 } 752 }
730 bool popAndInvokeCallback(CallbackStack** first, Visitor*); 753 template<bool ThreadLocal> bool popAndInvokeCallback(CallbackStack** first, Visitor*);
731 static void invokeCallbacks(CallbackStack** first, Visitor*); 754 template<bool ThreadLocal> static void invokeCallbacks(CallbackStack** first , Visitor*);
732 755
733 Item* allocateEntry(CallbackStack** first) 756 Item* allocateEntry(CallbackStack** first)
734 { 757 {
735 if (m_current < m_limit) 758 if (m_current < m_limit)
736 return m_current++; 759 return m_current++;
737 return (new CallbackStack(first))->allocateEntry(first); 760 return (new CallbackStack(first))->allocateEntry(first);
738 } 761 }
739 762
740 #ifndef NDEBUG 763 #ifndef NDEBUG
741 bool hasCallbackForObject(const void*); 764 bool hasCallbackForObject(const void*);
742 #endif 765 #endif
743 766
744 private: 767 private:
745 void invokeOldestCallbacks(Visitor*); 768 template<bool ThreadLocal> void invokeOldestCallbacks(Visitor*);
746 769
747 static const size_t bufferSize = 8000; 770 static const size_t bufferSize = 8000;
748 Item m_buffer[bufferSize]; 771 Item m_buffer[bufferSize];
749 Item* m_limit; 772 Item* m_limit;
750 Item* m_current; 773 Item* m_current;
751 CallbackStack* m_next; 774 CallbackStack* m_next;
752 }; 775 };
753 776
754 // Non-template super class used to pass a heap around to other classes. 777 // Non-template super class used to pass a heap around to other classes.
755 class BaseHeap { 778 class BaseHeap {
(...skipping 13 matching lines...) Expand all
769 // and builds freelists for all the unused memory. 792 // and builds freelists for all the unused memory.
770 virtual void sweep() = 0; 793 virtual void sweep() = 0;
771 794
772 // Forcefully finalize all objects in this part of the Blink heap 795 // Forcefully finalize all objects in this part of the Blink heap
773 // (potentially with the exception of one object). This is used 796 // (potentially with the exception of one object). This is used
774 // during thread termination to make sure that all objects for the 797 // during thread termination to make sure that all objects for the
775 // dying thread are finalized. 798 // dying thread are finalized.
776 virtual void assertEmpty() = 0; 799 virtual void assertEmpty() = 0;
777 800
778 virtual void clearFreeLists() = 0; 801 virtual void clearFreeLists() = 0;
779 virtual void clearMarks() = 0; 802 virtual void clearLiveAndMarkDead() = 0;
780 #ifndef NDEBUG 803 #ifndef NDEBUG
781 virtual void getScannedStats(HeapStats&) = 0; 804 virtual void getScannedStats(HeapStats&) = 0;
782 #endif 805 #endif
783 806
784 virtual void makeConsistentForGC() = 0; 807 virtual void makeConsistentForGC() = 0;
785 virtual bool isConsistentForGC() = 0; 808 virtual bool isConsistentForGC() = 0;
786 809
810 virtual void setShutdown() = 0;
811
787 // Returns a bucket number for inserting a FreeListEntry of a 812 // Returns a bucket number for inserting a FreeListEntry of a
788 // given size. All FreeListEntries in the given bucket, n, have 813 // given size. All FreeListEntries in the given bucket, n, have
789 // size >= 2^n. 814 // size >= 2^n.
790 static int bucketIndexForSize(size_t); 815 static int bucketIndexForSize(size_t);
791 }; 816 };
792 817
793 // Thread heaps represent a part of the per-thread Blink heap. 818 // Thread heaps represent a part of the per-thread Blink heap.
794 // 819 //
795 // Each Blink thread has a number of thread heaps: one general heap 820 // Each Blink thread has a number of thread heaps: one general heap
796 // that contains any type of object and a number of heaps specialized 821 // that contains any type of object and a number of heaps specialized
797 // for specific object types (such as Node). 822 // for specific object types (such as Node).
798 // 823 //
799 // Each thread heap contains the functionality to allocate new objects 824 // Each thread heap contains the functionality to allocate new objects
800 // (potentially adding new pages to the heap), to find and mark 825 // (potentially adding new pages to the heap), to find and mark
801 // objects during conservative stack scanning and to sweep the set of 826 // objects during conservative stack scanning and to sweep the set of
802 // pages after a GC. 827 // pages after a GC.
803 template<typename Header> 828 template<typename Header>
804 class ThreadHeap : public BaseHeap { 829 class ThreadHeap : public BaseHeap {
805 public: 830 public:
806 ThreadHeap(ThreadState*); 831 ThreadHeap(ThreadState*, int);
807 virtual ~ThreadHeap(); 832 virtual ~ThreadHeap();
808 833
809 virtual BaseHeapPage* heapPageFromAddress(Address); 834 virtual BaseHeapPage* heapPageFromAddress(Address);
810 #if ENABLE(GC_TRACING) 835 #if ENABLE(GC_TRACING)
811 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); 836 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
812 #endif 837 #endif
813 virtual void sweep(); 838 virtual void sweep();
814 virtual void assertEmpty(); 839 virtual void assertEmpty();
815 virtual void clearFreeLists(); 840 virtual void clearFreeLists();
816 virtual void clearMarks(); 841 virtual void clearLiveAndMarkDead();
817 #ifndef NDEBUG 842 #ifndef NDEBUG
818 virtual void getScannedStats(HeapStats&); 843 virtual void getScannedStats(HeapStats&);
819 #endif 844 #endif
820 845
821 virtual void makeConsistentForGC(); 846 virtual void makeConsistentForGC();
822 virtual bool isConsistentForGC(); 847 virtual bool isConsistentForGC();
823 848
824 ThreadState* threadState() { return m_threadState; } 849 ThreadState* threadState() { return m_threadState; }
825 HeapStats& stats() { return m_threadState->stats(); } 850 HeapStats& stats() { return m_threadState->stats(); }
826 void flushHeapContainsCache() 851 void flushHeapContainsCache()
827 { 852 {
828 m_threadState->heapContainsCache()->flush(); 853 m_threadState->heapContainsCache()->flush();
829 } 854 }
830 855
831 inline Address allocate(size_t, const GCInfo*); 856 inline Address allocate(size_t, const GCInfo*);
832 void addToFreeList(Address, size_t); 857 void addToFreeList(Address, size_t);
833 void addPageMemoryToPool(PageMemory*);
834 void addPageToPool(HeapPage<Header>*);
835 inline static size_t roundedAllocationSize(size_t size) 858 inline static size_t roundedAllocationSize(size_t size)
836 { 859 {
837 return allocationSizeFromSize(size) - sizeof(Header); 860 return allocationSizeFromSize(size) - sizeof(Header);
838 } 861 }
839 862
863 void setShutdown();
864 void removePageFromHeap(HeapPage<Header>*);
865
840 private: 866 private:
841 // Once pages have been used for one thread heap they will never 867 void addPageToHeap(const GCInfo*);
842 // be reused for another thread heap. Instead of unmapping, we add
843 // the pages to a pool of pages to be reused later by this thread
844 // heap. This is done as a security feature to avoid type
845 // confusion. The heap is type segregated by having separate
846 // thread heaps for various types of objects. Holding on to pages
847 // ensures that the same virtual address space cannot be used for
848 // objects of another type than the type contained in this thread
849 // heap.
850 class PagePoolEntry {
851 public:
852 PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
853 : m_storage(storage)
854 , m_next(next)
855 { }
856
857 PageMemory* storage() { return m_storage; }
858 PagePoolEntry* next() { return m_next; }
859
860 private:
861 PageMemory* m_storage;
862 PagePoolEntry* m_next;
863 };
864
865 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); 868 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
866 static size_t allocationSizeFromSize(size_t); 869 static size_t allocationSizeFromSize(size_t);
867 void addPageToHeap(const GCInfo*);
868 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 870 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
869 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 871 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
870 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 872 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
871 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } 873 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
872 void setAllocationPoint(Address point, size_t size) 874 void setAllocationPoint(Address point, size_t size)
873 { 875 {
874 ASSERT(!point || heapPageFromAddress(point)); 876 ASSERT(!point || heapPageFromAddress(point));
875 ASSERT(size <= HeapPage<Header>::payloadSize()); 877 ASSERT(size <= HeapPage<Header>::payloadSize());
876 m_currentAllocationPoint = point; 878 m_currentAllocationPoint = point;
877 m_remainingAllocationSize = size; 879 m_remainingAllocationSize = size;
878 } 880 }
879 void ensureCurrentAllocation(size_t, const GCInfo*); 881 void ensureCurrentAllocation(size_t, const GCInfo*);
880 bool allocateFromFreeList(size_t); 882 bool allocateFromFreeList(size_t);
881 883
882 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); 884 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
883
884 void allocatePage(const GCInfo*); 885 void allocatePage(const GCInfo*);
885 PageMemory* takePageFromPool();
886 void clearPagePool();
887 void deletePages();
888 886
889 Address m_currentAllocationPoint; 887 Address m_currentAllocationPoint;
890 size_t m_remainingAllocationSize; 888 size_t m_remainingAllocationSize;
891 889
892 HeapPage<Header>* m_firstPage; 890 HeapPage<Header>* m_firstPage;
893 LargeHeapObject<Header>* m_firstLargeHeapObject; 891 LargeHeapObject<Header>* m_firstLargeHeapObject;
894 892
895 int m_biggestFreeListIndex; 893 int m_biggestFreeListIndex;
896 ThreadState* m_threadState; 894 ThreadState* m_threadState;
897 895
898 // All FreeListEntries in the nth list have size >= 2^n. 896 // All FreeListEntries in the nth list have size >= 2^n.
899 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 897 FreeListEntry* m_freeLists[blinkPageSizeLog2];
900 898
901 // List of pages that have been previously allocated, but are now 899 // Index into the memory pools. This is used so heaps of the same type (ie. index)
902 // unused. 900 // put their pages into the correct pool for avoiding type confusion.
903 PagePoolEntry* m_pagePool; 901 int m_index;
904 }; 902 };
905 903
906 class PLATFORM_EXPORT Heap { 904 class PLATFORM_EXPORT Heap {
907 public: 905 public:
908 static void init(); 906 static void init();
909 static void shutdown(); 907 static void shutdown();
910 static void doShutdown(); 908 static void doShutdown();
911 909
912 static BaseHeapPage* contains(Address); 910 static BaseHeapPage* contains(Address);
913 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } 911 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); }
914 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); } 912 static BaseHeapPage* contains(const void* pointer) { return contains(const_c ast<void*>(pointer)); }
913 #ifndef NDEBUG
914 static bool containedInHeapOrOrphanedPage(void*);
915 #endif
915 916
916 // Push a trace callback on the marking stack. 917 // Push a trace callback on the marking stack.
917 static void pushTraceCallback(void* containerObject, TraceCallback); 918 static void pushTraceCallback(void* containerObject, TraceCallback);
918 919
919 // Add a weak pointer callback to the weak callback work list. General 920 // Add a weak pointer callback to the weak callback work list. General
920 // object pointer callbacks are added to a thread local weak callback work 921 // object pointer callbacks are added to a thread local weak callback work
921 // list and the callback is called on the thread that owns the object, with 922 // list and the callback is called on the thread that owns the object, with
922 // the closure pointer as an argument. Most of the time, the closure and 923 // the closure pointer as an argument. Most of the time, the closure and
923 // the containerObject can be the same thing, but the containerObject is 924 // the containerObject can be the same thing, but the containerObject is
924 // constrained to be on the heap, since the heap is used to identify the 925 // constrained to be on the heap, since the heap is used to identify the
925 // correct thread. 926 // correct thread.
926 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback); 927 static void pushWeakObjectPointerCallback(void* closure, void* containerObje ct, WeakPointerCallback);
927 928
928 // Similar to the more general pushWeakObjectPointerCallback, but cell 929 // Similar to the more general pushWeakObjectPointerCallback, but cell
929 // pointer callbacks are added to a static callback work list and the weak 930 // pointer callbacks are added to a static callback work list and the weak
930 // callback is performed on the thread performing garbage collection. This 931 // callback is performed on the thread performing garbage collection. This
931 // is OK because cells are just cleared and no deallocation can happen. 932 // is OK because cells are just cleared and no deallocation can happen.
932 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback); 933 static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
933 934
934 // Pop the top of the marking stack and call the callback with the visitor 935 // Pop the top of the marking stack and call the callback with the visitor
935 // and the object. Returns false when there is nothing more to do. 936 // and the object. Returns false when there is nothing more to do.
936 static bool popAndInvokeTraceCallback(Visitor*); 937 template<bool ThreadLocal> static bool popAndInvokeTraceCallback(Visitor*);
937 938
938 // Remove an item from the weak callback work list and call the callback 939 // Remove an item from the weak callback work list and call the callback
939 // with the visitor and the closure pointer. Returns false when there is 940 // with the visitor and the closure pointer. Returns false when there is
940 // nothing more to do. 941 // nothing more to do.
941 static bool popAndInvokeWeakPointerCallback(Visitor*); 942 static bool popAndInvokeWeakPointerCallback(Visitor*);
942 943
943 // Register an ephemeron table for fixed-point iteration. 944 // Register an ephemeron table for fixed-point iteration.
944 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); 945 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
945 #ifndef NDEBUG 946 #ifndef NDEBUG
946 static bool weakTableRegistered(const void*); 947 static bool weakTableRegistered(const void*);
947 #endif 948 #endif
948 949
949 template<typename T> static Address allocate(size_t); 950 template<typename T> static Address allocate(size_t);
950 template<typename T> static Address reallocate(void* previous, size_t); 951 template<typename T> static Address reallocate(void* previous, size_t);
951 952
952 static void collectGarbage(ThreadState::StackState); 953 static void collectGarbage(ThreadState::StackState);
954 static void collectGarbageForThread(ThreadState*, bool);
953 static void collectAllGarbage(); 955 static void collectAllGarbage();
956 template<bool ThreadLocal> static void tracingAndGlobalWeakProcessing();
954 static void setForcePreciseGCForTesting(); 957 static void setForcePreciseGCForTesting();
955 958
956 static void prepareForGC(); 959 static void prepareForGC();
957 960
958 // Conservatively checks whether an address is a pointer in any of the threa d 961 // Conservatively checks whether an address is a pointer in any of the threa d
959 // heaps. If so marks the object pointed to as live. 962 // heaps. If so marks the object pointed to as live.
960 static Address checkAndMarkPointer(Visitor*, Address); 963 static Address checkAndMarkPointer(Visitor*, Address);
961 964
962 #if ENABLE(GC_TRACING) 965 #if ENABLE(GC_TRACING)
963 // Dump the path to specified object on the next GC. This method is to be in voked from GDB. 966 // Dump the path to specified object on the next GC. This method is to be in voked from GDB.
(...skipping 17 matching lines...) Expand all
981 static bool isConsistentForGC(); 984 static bool isConsistentForGC();
982 static void makeConsistentForGC(); 985 static void makeConsistentForGC();
983 986
984 static void flushHeapDoesNotContainCache(); 987 static void flushHeapDoesNotContainCache();
985 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); } 988 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); }
986 989
987 // Return true if the last GC found a pointer into a heap page 990 // Return true if the last GC found a pointer into a heap page
988 // during conservative scanning. 991 // during conservative scanning.
989 static bool lastGCWasConservative() { return s_lastGCWasConservative; } 992 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
990 993
994 static HeapMemoryPool* memoryPool() { return s_memoryPool; }
995 static HeapOrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
996
991 private: 997 private:
992 static Visitor* s_markingVisitor; 998 static Visitor* s_markingVisitor;
993 999
994 static CallbackStack* s_markingStack; 1000 static CallbackStack* s_markingStack;
995 static CallbackStack* s_weakCallbackStack; 1001 static CallbackStack* s_weakCallbackStack;
996 static CallbackStack* s_ephemeronStack; 1002 static CallbackStack* s_ephemeronStack;
997 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1003 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
998 static bool s_shutdownCalled; 1004 static bool s_shutdownCalled;
999 static bool s_lastGCWasConservative; 1005 static bool s_lastGCWasConservative;
1006 static HeapMemoryPool* s_memoryPool;
1007 static HeapOrphanedPagePool* s_orphanedPagePool;
1000 friend class ThreadState; 1008 friend class ThreadState;
1001 }; 1009 };
1002 1010
1003 // The NoAllocationScope class is used in debug mode to catch unwanted 1011 // The NoAllocationScope class is used in debug mode to catch unwanted
1004 // allocations. E.g. allocations during GC. 1012 // allocations. E.g. allocations during GC.
1005 template<ThreadAffinity Affinity> 1013 template<ThreadAffinity Affinity>
1006 class NoAllocationScope { 1014 class NoAllocationScope {
1007 public: 1015 public:
1008 NoAllocationScope() : m_active(true) { enter(); } 1016 NoAllocationScope() : m_active(true) { enter(); }
1009 1017
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1296 #define GC_PLUGIN_IGNORE(bug) \ 1304 #define GC_PLUGIN_IGNORE(bug) \
1297 __attribute__((annotate("blink_gc_plugin_ignore"))) 1305 __attribute__((annotate("blink_gc_plugin_ignore")))
1298 #else 1306 #else
1299 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() 1307 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1300 #define GC_PLUGIN_IGNORE(bug) 1308 #define GC_PLUGIN_IGNORE(bug)
1301 #endif 1309 #endif
1302 1310
1303 NO_SANITIZE_ADDRESS 1311 NO_SANITIZE_ADDRESS
1304 void HeapObjectHeader::checkHeader() const 1312 void HeapObjectHeader::checkHeader() const
1305 { 1313 {
1306 ASSERT(m_magic == magic); 1314 #ifndef NDEBUG
1315 BaseHeapPage* page = pageHeaderFromObject(this);
1316 ASSERT(page->orphaned() || m_magic == magic);
1317 #endif
1307 } 1318 }
1308 1319
1309 Address HeapObjectHeader::payload() 1320 Address HeapObjectHeader::payload()
1310 { 1321 {
1311 return reinterpret_cast<Address>(this) + objectHeaderSize; 1322 return reinterpret_cast<Address>(this) + objectHeaderSize;
1312 } 1323 }
1313 1324
1314 size_t HeapObjectHeader::payloadSize() 1325 size_t HeapObjectHeader::payloadSize()
1315 { 1326 {
1316 return size() - objectHeaderSize; 1327 return size() - objectHeaderSize;
(...skipping 998 matching lines...) Expand 10 before | Expand all | Expand 10 after
2315 // Use the payload size as recorded by the heap to determine how many 2326 // Use the payload size as recorded by the heap to determine how many
2316 // elements to finalize. 2327 // elements to finalize.
2317 size_t length = header->payloadSize() / sizeof(Value); 2328 size_t length = header->payloadSize() / sizeof(Value);
2318 Value* table = reinterpret_cast<Value*>(pointer); 2329 Value* table = reinterpret_cast<Value*>(pointer);
2319 for (unsigned i = 0; i < length; i++) { 2330 for (unsigned i = 0; i < length; i++) {
2320 if (!Table::isEmptyOrDeletedBucket(table[i])) 2331 if (!Table::isEmptyOrDeletedBucket(table[i]))
2321 table[i].~Value(); 2332 table[i].~Value();
2322 } 2333 }
2323 } 2334 }
2324 2335
2336 template<bool ThreadLocal>
2337 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor )
2338 {
2339 if (m_current == &(m_buffer[0])) {
2340 if (!m_next) {
2341 #ifndef NDEBUG
2342 clearUnused();
2343 #endif
2344 return false;
2345 }
2346 CallbackStack* nextStack = m_next;
2347 *first = nextStack;
2348 delete this;
2349 return nextStack->popAndInvokeCallback<ThreadLocal>(first, visitor);
2350 }
2351 Item* item = --m_current;
2352
2353 // If the object being traced is located on a page which is dead don't
2354 // trace it. This can happen when a conservative GC kept a dead object
2355 // alive which pointed to a (now gone) object on the cleaned up page.
2356 // Also if doing a thread local GC don't trace objects that are located
2357 // on other thread's heaps, ie. pages where the shuttingDown flag is not
2358 // set.
2359 BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
2360 if (ThreadLocal ? (heapPage->orphaned() || !heapPage->shuttingDown()) : heap Page->orphaned()) {
zerny-chromium 2014/07/07 12:11:56 if (heapPage->orphaned() || !(ThreadLocal && heapP
2361 // If tracing this from a global GC set the traced bit.
2362 if (!ThreadLocal)
2363 heapPage->setTraced();
2364 return true;
2365 }
2366
2367 VisitorCallback callback = item->callback();
2368 #if ENABLE(GC_TRACING)
2369 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback
2370 visitor->setHostInfo(item->object(), classOf(item->object()));
2371 #endif
2372 callback(visitor, item->object());
2373
2374 return true;
2375 }
2376
2325 template<typename T, typename U, typename V, typename W, typename X> 2377 template<typename T, typename U, typename V, typename W, typename X>
2326 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { }; 2378 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2327 template<typename T, typename U, typename V> 2379 template<typename T, typename U, typename V>
2328 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { }; 2380 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2329 template<typename T, typename U, typename V> 2381 template<typename T, typename U, typename V>
2330 struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashS et<T, U, V, HeapAllocator> > { }; 2382 struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashS et<T, U, V, HeapAllocator> > { };
2331 template<typename T, size_t inlineCapacity, typename U> 2383 template<typename T, size_t inlineCapacity, typename U>
2332 struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait< ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { }; 2384 struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait< ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2333 template<typename T, size_t inlineCapacity> 2385 template<typename T, size_t inlineCapacity>
2334 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T , inlineCapacity, HeapAllocator> > { }; 2386 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T , inlineCapacity, HeapAllocator> > { };
(...skipping 12 matching lines...) Expand all
2347 }; 2399 };
2348 2400
2349 template<typename T> 2401 template<typename T>
2350 struct IfWeakMember<WeakMember<T> > { 2402 struct IfWeakMember<WeakMember<T> > {
2351 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } 2403 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); }
2352 }; 2404 };
2353 2405
2354 } 2406 }
2355 2407
2356 #endif // Heap_h 2408 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698