Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(478)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 271703002: Simplify and speed up address-to-page cache for conservative stack scanning. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Merge up Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 } 83 }
84 84
85 // Blink heap pages are aligned to the Blink heap page size. 85 // Blink heap pages are aligned to the Blink heap page size.
86 // Therefore, the start of a Blink page can be obtained by 86 // Therefore, the start of a Blink page can be obtained by
87 // rounding down to the Blink page size. 87 // rounding down to the Blink page size.
88 inline Address roundToBlinkPageStart(Address address) 88 inline Address roundToBlinkPageStart(Address address)
89 { 89 {
90 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin kPageBaseMask); 90 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin kPageBaseMask);
91 } 91 }
92 92
93 inline Address roundToBlinkPageEnd(Address address)
94 {
95 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
96 }
97
93 // Compute the amount of padding we have to add to a header to make 98 // Compute the amount of padding we have to add to a header to make
94 // the size of the header plus the padding a multiple of 8 bytes. 99 // the size of the header plus the padding a multiple of 8 bytes.
95 template<typename Header> 100 template<typename Header>
96 inline size_t headerPadding() 101 inline size_t headerPadding()
97 { 102 {
98 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity; 103 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
99 } 104 }
100 105
101 // Masks an address down to the enclosing blink page base address. 106 // Masks an address down to the enclosing blink page base address.
102 inline Address blinkPageAddress(Address address) 107 inline Address blinkPageAddress(Address address)
(...skipping 26 matching lines...) Expand all
129 public: 134 public:
130 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) 135 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
131 : m_storage(storage) 136 : m_storage(storage)
132 , m_gcInfo(gcInfo) 137 , m_gcInfo(gcInfo)
133 , m_threadState(state) 138 , m_threadState(state)
134 , m_padding(0) 139 , m_padding(0)
135 { 140 {
136 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 141 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
137 } 142 }
138 143
139 // Check if the given address could point to an object in this 144 // Check if the given address points to an object in this
140 // heap page. If so, find the start of that object and mark it 145 // heap page. If so, find the start of that object and mark it
141 // using the given Visitor. 146 // using the given Visitor. Otherwise do nothing. The pointer must
142 // 147 // be within the same aligned blinkPageSize as the this-pointer.
143 // Returns true if the object was found and marked, returns false
144 // otherwise.
145 // 148 //
146 // This is used during conservative stack scanning to 149 // This is used during conservative stack scanning to
147 // conservatively mark all objects that could be referenced from 150 // conservatively mark all objects that could be referenced from
148 // the stack. 151 // the stack.
149 virtual bool checkAndMarkPointer(Visitor*, Address) = 0; 152 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
150 153
151 #if ENABLE(GC_TRACING) 154 #if ENABLE(GC_TRACING)
152 virtual const GCInfo* findGCInfo(Address) = 0; 155 virtual const GCInfo* findGCInfo(Address) = 0;
153 #endif 156 #endif
154 157
155 Address address() { return reinterpret_cast<Address>(this); } 158 Address address() { return reinterpret_cast<Address>(this); }
156 PageMemory* storage() const { return m_storage; } 159 PageMemory* storage() const { return m_storage; }
157 ThreadState* threadState() const { return m_threadState; } 160 ThreadState* threadState() const { return m_threadState; }
158 const GCInfo* gcInfo() { return m_gcInfo; } 161 const GCInfo* gcInfo() { return m_gcInfo; }
162 virtual bool isLargeObject() { return false; }
159 163
160 private: 164 private:
161 // Accessor to silence unused warnings. 165 // Accessor to silence unused warnings.
162 void* padding() const { return m_padding; } 166 void* padding() const { return m_padding; }
163 167
164 PageMemory* m_storage; 168 PageMemory* m_storage;
165 const GCInfo* m_gcInfo; 169 const GCInfo* m_gcInfo;
166 ThreadState* m_threadState; 170 ThreadState* m_threadState;
167 // Free word only needed to ensure proper alignment of the 171 // Free word only needed to ensure proper alignment of the
168 // HeapPage header. 172 // HeapPage header.
(...skipping 11 matching lines...) Expand all
180 // 184 //
181 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | 185 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
182 template<typename Header> 186 template<typename Header>
183 class LargeHeapObject : public BaseHeapPage { 187 class LargeHeapObject : public BaseHeapPage {
184 public: 188 public:
185 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat e) : BaseHeapPage(storage, gcInfo, state) 189 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat e) : BaseHeapPage(storage, gcInfo, state)
186 { 190 {
187 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg e_heap_object_header_misaligned); 191 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg e_heap_object_header_misaligned);
188 } 192 }
189 193
190 virtual bool checkAndMarkPointer(Visitor*, Address); 194 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
195 virtual bool isLargeObject() OVERRIDE { return true; }
191 196
192 #if ENABLE(GC_TRACING) 197 #if ENABLE(GC_TRACING)
193 virtual const GCInfo* findGCInfo(Address) 198 virtual const GCInfo* findGCInfo(Address address)
194 { 199 {
200 if (!objectContains(address))
201 return 0;
195 return gcInfo(); 202 return gcInfo();
196 } 203 }
197 #endif 204 #endif
198 205
199 void link(LargeHeapObject<Header>** previousNext) 206 void link(LargeHeapObject<Header>** previousNext)
200 { 207 {
201 m_next = *previousNext; 208 m_next = *previousNext;
202 *previousNext = this; 209 *previousNext = this;
203 } 210 }
204 211
205 void unlink(LargeHeapObject<Header>** previousNext) 212 void unlink(LargeHeapObject<Header>** previousNext)
206 { 213 {
207 *previousNext = m_next; 214 *previousNext = m_next;
208 } 215 }
209 216
217 // The LargeHeapObject pseudo-page contains one actual object. Determine
218 // whether the pointer is within that object.
219 bool objectContains(Address object)
220 {
221 return (payload() <= object) && (object < address() + size());
222 }
223
224 // Returns true for any address that is on one of the pages that this
225 // large object uses. That ensures that we can use a negative result to
226 // populate the negative page cache.
210 bool contains(Address object) 227 bool contains(Address object)
211 { 228 {
212 return (address() <= object) && (object <= (address() + size())); 229 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
213 } 230 }
214 231
215 LargeHeapObject<Header>* next() 232 LargeHeapObject<Header>* next()
216 { 233 {
217 return m_next; 234 return m_next;
218 } 235 }
219 236
220 size_t size() 237 size_t size()
221 { 238 {
222 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 239 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
223 } 240 }
224 241
225 Address payload() { return heapObjectHeader()->payload(); } 242 Address payload() { return heapObjectHeader()->payload(); }
226 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } 243 size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
227 244
228 Header* heapObjectHeader() 245 Header* heapObjectHeader()
229 { 246 {
230 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); 247 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>();
231 return reinterpret_cast<Header*>(headerAddress); 248 return reinterpret_cast<Header*>(headerAddress);
232 } 249 }
233 250
234 bool isMarked(); 251 bool isMarked();
235 void unmark(); 252 void unmark();
236 void getStats(HeapStats&); 253 void getStats(HeapStats&);
237 void mark(Visitor*); 254 void mark(Visitor*);
238 void finalize(); 255 void finalize();
239 256
240 private: 257 private:
241 friend class Heap;
242 friend class ThreadHeap<Header>; 258 friend class ThreadHeap<Header>;
243 259
244 LargeHeapObject<Header>* m_next; 260 LargeHeapObject<Header>* m_next;
245 }; 261 };
246 262
247 // The BasicObjectHeader is the minimal object header. It is used when 263 // The BasicObjectHeader is the minimal object header. It is used when
248 // encountering heap space of size allocationGranularity to mark it as 264 // encountering heap space of size allocationGranularity to mark it as
249 // as freelist entry. 265 // as freelist entry.
250 class PLATFORM_EXPORT BasicObjectHeader { 266 class PLATFORM_EXPORT BasicObjectHeader {
251 public: 267 public:
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
434 template<typename Header> 450 template<typename Header>
435 class HeapPage : public BaseHeapPage { 451 class HeapPage : public BaseHeapPage {
436 public: 452 public:
437 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); 453 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
438 454
439 void link(HeapPage**); 455 void link(HeapPage**);
440 static void unlink(HeapPage*, HeapPage**); 456 static void unlink(HeapPage*, HeapPage**);
441 457
442 bool isEmpty(); 458 bool isEmpty();
443 459
460 // Returns true for the whole blinkPageSize page that the page is on, even
461 // for the header, and the unmapped guard page at the start. That ensures
462 // the result can be used to populate the negative page cache.
444 bool contains(Address addr) 463 bool contains(Address addr)
445 { 464 {
446 Address blinkPageStart = roundToBlinkPageStart(address()); 465 Address blinkPageStart = roundToBlinkPageStart(address());
447 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr ; 466 ASSERT(blinkPageStart = address() - osPageSize()); // Page is at aligned address plus guard page size.
467 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
448 } 468 }
449 469
450 HeapPage* next() { return m_next; } 470 HeapPage* next() { return m_next; }
451 471
452 Address payload() 472 Address payload()
453 { 473 {
454 return address() + sizeof(*this) + headerPadding<Header>(); 474 return address() + sizeof(*this) + headerPadding<Header>();
455 } 475 }
456 476
457 static size_t payloadSize() 477 static size_t payloadSize()
458 { 478 {
459 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; 479 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask;
460 } 480 }
461 481
462 Address end() { return payload() + payloadSize(); } 482 Address end() { return payload() + payloadSize(); }
463 483
464 void getStats(HeapStats&); 484 void getStats(HeapStats&);
465 void clearMarks(); 485 void clearMarks();
466 void sweep(); 486 void sweep();
467 void clearObjectStartBitMap(); 487 void clearObjectStartBitMap();
468 void finalize(Header*); 488 void finalize(Header*);
469 virtual bool checkAndMarkPointer(Visitor*, Address); 489 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
470 #if ENABLE(GC_TRACING) 490 #if ENABLE(GC_TRACING)
471 const GCInfo* findGCInfo(Address) OVERRIDE; 491 const GCInfo* findGCInfo(Address) OVERRIDE;
472 #endif 492 #endif
473 ThreadHeap<Header>* heap() { return m_heap; } 493 ThreadHeap<Header>* heap() { return m_heap; }
474 #if defined(ADDRESS_SANITIZER) 494 #if defined(ADDRESS_SANITIZER)
475 void poisonUnmarkedObjects(); 495 void poisonUnmarkedObjects();
476 #endif 496 #endif
477 497
478 protected: 498 protected:
479 Header* findHeaderFromAddress(Address); 499 Header* findHeaderFromAddress(Address);
480 void populateObjectStartBitMap(); 500 void populateObjectStartBitMap();
481 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 501 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
482 TraceCallback traceCallback(Header*); 502 TraceCallback traceCallback(Header*);
483 bool hasVTable(Header*); 503 bool hasVTable(Header*);
484 504
485 HeapPage<Header>* m_next; 505 HeapPage<Header>* m_next;
486 ThreadHeap<Header>* m_heap; 506 ThreadHeap<Header>* m_heap;
487 bool m_objectStartBitMapComputed; 507 bool m_objectStartBitMapComputed;
488 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; 508 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
489 509
490 friend class ThreadHeap<Header>; 510 friend class ThreadHeap<Header>;
491 }; 511 };
492 512
493 // A HeapContainsCache provides a fast way of taking an arbitrary 513 class AddressEntry {
514 public:
515 AddressEntry() : m_address(0) { }
516
517 explicit AddressEntry(Address address) : m_address(address) { }
518
519 Address address() const { return m_address; }
520
521 private:
522 Address m_address;
523 };
524
525 class PositiveEntry : public AddressEntry {
526 public:
527 PositiveEntry()
528 : AddressEntry()
529 , m_containingPage(0)
530 {
531 }
532
533 PositiveEntry(Address address, BaseHeapPage* containingPage)
534 : AddressEntry(address)
535 , m_containingPage(containingPage)
536 {
537 }
538
539 BaseHeapPage* result() const { return m_containingPage; }
540
541 typedef BaseHeapPage* LookupResult;
542
543 private:
544 BaseHeapPage* m_containingPage;
545 };
546
547 class NegativeEntry : public AddressEntry {
548 public:
549 NegativeEntry() : AddressEntry() { }
550
551 NegativeEntry(Address address, bool) : AddressEntry(address) { }
552
553 bool result() const { return true; }
554
555 typedef bool LookupResult;
556 };
557
558 // A HeapExtentCache provides a fast way of taking an arbitrary
494 // pointer-sized word, and determining whether it can be interpreted 559 // pointer-sized word, and determining whether it can be interpreted
495 // as a pointer to an area that is managed by the garbage collected 560 // as a pointer to an area that is managed by the garbage collected
496 // Blink heap. There is a cache of 'pages' that have previously been 561 // Blink heap. There is a cache of 'pages' that have previously been
497 // determined to be either wholly inside or wholly outside the 562 // determined to be wholly inside the heap. The size of these pages must be
498 // heap. The size of these pages must be smaller than the allocation 563 // smaller than the allocation alignment of the heap pages. We determine
499 // alignment of the heap pages. We determine on-heap-ness by rounding 564 // on-heap-ness by rounding down the pointer to the nearest page and looking up
500 // down the pointer to the nearest page and looking up the page in the 565 // the page in the cache. If there is a miss in the cache we can ask the heap
501 // cache. If there is a miss in the cache we ask the heap to determine 566 // to determine the status of the pointer by iterating over all of the heap.
502 // the status of the pointer by iterating over all of the heap. The 567 // The result is then cached in the two-way associative page cache.
503 // result is then cached in the two-way associative page cache.
504 // 568 //
505 // A HeapContainsCache is both a positive and negative 569 // A HeapContainsCache is a positive cache. Therefore, it must be flushed when
506 // cache. Therefore, it must be flushed both when new memory is added 570 // memory is removed from the Blink heap. The HeapDoesNotContainCache is a
507 // and when memory is removed from the Blink heap. 571 // negative cache, so it must be flushed when memory is added to the heap.
508 class HeapContainsCache { 572 template<typename Entry>
573 class HeapExtentCache {
509 public: 574 public:
510 HeapContainsCache(); 575 HeapExtentCache()
576 : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries]))
577 , m_hasEntries(false)
578 {
579 }
511 580
512 void flush(); 581 void flush();
513 bool contains(Address); 582 bool contains(Address);
583 bool isEmpty() { return !m_hasEntries; }
514 584
515 // Perform a lookup in the cache. 585 // Perform a lookup in the cache.
516 // 586 //
517 // If lookup returns false the argument address was not found in 587 // If lookup returns null/false the argument address was not found in
518 // the cache and it is unknown if the address is in the Blink 588 // the cache and it is unknown if the address is in the Blink
519 // heap. 589 // heap.
520 // 590 //
521 // If lookup returns true the argument address was found in the 591 // If lookup returns true/a page, the argument address was found in the
522 // cache. In that case, the address is in the heap if the base 592 // cache. For the HeapContainsCache this means the address is in the heap.
523 // heap page out parameter is different from 0 and is not in the 593 // For the HeapDoesNotContainCache this means the address is not in the
524 // heap if the base heap page out parameter is 0. 594 // heap.
525 bool lookup(Address, BaseHeapPage**); 595 PLATFORM_EXPORT typename Entry::LookupResult lookup(Address);
526 596
527 // Add an entry to the cache. Use a 0 base heap page pointer to 597 // Add an entry to the cache.
528 // add a negative entry. 598 PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult);
529 void addEntry(Address, BaseHeapPage*);
530 599
531 private: 600 private:
532 class Entry {
533 public:
534 Entry()
535 : m_address(0)
536 , m_containingPage(0)
537 {
538 }
539
540 Entry(Address address, BaseHeapPage* containingPage)
541 : m_address(address)
542 , m_containingPage(containingPage)
543 {
544 }
545
546 BaseHeapPage* containingPage() { return m_containingPage; }
547 Address address() { return m_address; }
548
549 private:
550 Address m_address;
551 BaseHeapPage* m_containingPage;
552 };
553
554 static const int numberOfEntriesLog2 = 12; 601 static const int numberOfEntriesLog2 = 12;
555 static const int numberOfEntries = 1 << numberOfEntriesLog2; 602 static const int numberOfEntries = 1 << numberOfEntriesLog2;
556 603
557 static size_t hash(Address); 604 static size_t hash(Address);
558 605
559 WTF::OwnPtr<HeapContainsCache::Entry[]> m_entries; 606 WTF::OwnPtr<Entry[]> m_entries;
607 bool m_hasEntries;
560 608
561 friend class ThreadState; 609 friend class ThreadState;
562 }; 610 };
563 611
612 // Normally these would be typedefs instead of subclasses, but that makes them
613 // very hard to forward declare.
614 class HeapContainsCache : public HeapExtentCache<PositiveEntry> {
615 public:
616 BaseHeapPage* lookup(Address);
617 void addEntry(Address, BaseHeapPage*);
618 };
619
620 class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { };
621
564 // FIXME: This is currently used by the WebAudio code. 622 // FIXME: This is currently used by the WebAudio code.
565 // We should attempt to restructure the WebAudio code so that the main thread 623 // We should attempt to restructure the WebAudio code so that the main thread
566 // alone determines life-time and receives messages about life-time from the 624 // alone determines life-time and receives messages about life-time from the
567 // audio thread. 625 // audio thread.
568 template<typename T> 626 template<typename T>
569 class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T> , public WTF::ThreadSafeRefCountedBase { 627 class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T> , public WTF::ThreadSafeRefCountedBase {
570 WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected); 628 WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected);
571 629
572 public: 630 public:
573 ThreadSafeRefCountedGarbageCollected() 631 ThreadSafeRefCountedGarbageCollected()
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 // Non-template super class used to pass a heap around to other classes. 732 // Non-template super class used to pass a heap around to other classes.
675 class BaseHeap { 733 class BaseHeap {
676 public: 734 public:
677 virtual ~BaseHeap() { } 735 virtual ~BaseHeap() { }
678 736
679 // Find the page in this thread heap containing the given 737 // Find the page in this thread heap containing the given
680 // address. Returns 0 if the address is not contained in any 738 // address. Returns 0 if the address is not contained in any
681 // page in this thread heap. 739 // page in this thread heap.
682 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; 740 virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
683 741
684 // Find the large object in this thread heap containing the given
685 // address. Returns 0 if the address is not contained in any
686 // page in this thread heap.
687 virtual BaseHeapPage* largeHeapObjectFromAddress(Address) = 0;
688
689 #if ENABLE(GC_TRACING) 742 #if ENABLE(GC_TRACING)
690 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; 743 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
691 #endif 744 #endif
692 745
693 // Check if the given address could point to an object in this
694 // heap. If so, find the start of that object and mark it using
695 // the given Visitor.
696 //
697 // Returns true if the object was found and marked, returns false
698 // otherwise.
699 //
700 // This is used during conservative stack scanning to
701 // conservatively mark all objects that could be referenced from
702 // the stack.
703 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address) = 0;
704
705 // Sweep this part of the Blink heap. This finalizes dead objects 746 // Sweep this part of the Blink heap. This finalizes dead objects
706 // and builds freelists for all the unused memory. 747 // and builds freelists for all the unused memory.
707 virtual void sweep() = 0; 748 virtual void sweep() = 0;
708 749
709 // Forcefully finalize all objects in this part of the Blink heap 750 // Forcefully finalize all objects in this part of the Blink heap
710 // (potentially with the exception of one object). This is used 751 // (potentially with the exception of one object). This is used
711 // during thread termination to make sure that all objects for the 752 // during thread termination to make sure that all objects for the
712 // dying thread are finalized. 753 // dying thread are finalized.
713 virtual void assertEmpty() = 0; 754 virtual void assertEmpty() = 0;
714 755
(...skipping 22 matching lines...) Expand all
737 // (potentially adding new pages to the heap), to find and mark 778 // (potentially adding new pages to the heap), to find and mark
738 // objects during conservative stack scanning and to sweep the set of 779 // objects during conservative stack scanning and to sweep the set of
739 // pages after a GC. 780 // pages after a GC.
740 template<typename Header> 781 template<typename Header>
741 class ThreadHeap : public BaseHeap { 782 class ThreadHeap : public BaseHeap {
742 public: 783 public:
743 ThreadHeap(ThreadState*); 784 ThreadHeap(ThreadState*);
744 virtual ~ThreadHeap(); 785 virtual ~ThreadHeap();
745 786
746 virtual BaseHeapPage* heapPageFromAddress(Address); 787 virtual BaseHeapPage* heapPageFromAddress(Address);
747 virtual BaseHeapPage* largeHeapObjectFromAddress(Address);
748 #if ENABLE(GC_TRACING) 788 #if ENABLE(GC_TRACING)
749 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); 789 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
750 #endif 790 #endif
751 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address);
752 virtual void sweep(); 791 virtual void sweep();
753 virtual void assertEmpty(); 792 virtual void assertEmpty();
754 virtual void clearFreeLists(); 793 virtual void clearFreeLists();
755 virtual void clearMarks(); 794 virtual void clearMarks();
756 #ifndef NDEBUG 795 #ifndef NDEBUG
757 virtual void getScannedStats(HeapStats&); 796 virtual void getScannedStats(HeapStats&);
758 #endif 797 #endif
759 798
760 virtual void makeConsistentForGC(); 799 virtual void makeConsistentForGC();
761 virtual bool isConsistentForGC(); 800 virtual bool isConsistentForGC();
762 801
763 ThreadState* threadState() { return m_threadState; } 802 ThreadState* threadState() { return m_threadState; }
764 HeapStats& stats() { return m_threadState->stats(); } 803 HeapStats& stats() { return m_threadState->stats(); }
765 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsC ache(); } 804 void flushHeapContainsCache()
805 {
806 m_threadState->heapContainsCache()->flush();
807 }
766 808
767 inline Address allocate(size_t, const GCInfo*); 809 inline Address allocate(size_t, const GCInfo*);
768 void addToFreeList(Address, size_t); 810 void addToFreeList(Address, size_t);
769 void addPageToPool(HeapPage<Header>*); 811 void addPageToPool(HeapPage<Header>*);
770 inline static size_t roundedAllocationSize(size_t size) 812 inline static size_t roundedAllocationSize(size_t size)
771 { 813 {
772 return allocationSizeFromSize(size) - sizeof(Header); 814 return allocationSizeFromSize(size) - sizeof(Header);
773 } 815 }
774 816
775 private: 817 private:
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
899 #endif 941 #endif
900 942
901 // Collect heap stats for all threads attached to the Blink 943 // Collect heap stats for all threads attached to the Blink
902 // garbage collector. Should only be called during garbage 944 // garbage collector. Should only be called during garbage
903 // collection where threads are known to be at safe points. 945 // collection where threads are known to be at safe points.
904 static void getStats(HeapStats*); 946 static void getStats(HeapStats*);
905 947
906 static bool isConsistentForGC(); 948 static bool isConsistentForGC();
907 static void makeConsistentForGC(); 949 static void makeConsistentForGC();
908 950
951 static void flushHeapDoesNotContainCache();
952 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa che->isEmpty(); }
953
954 private:
909 static Visitor* s_markingVisitor; 955 static Visitor* s_markingVisitor;
910 956
911 static CallbackStack* s_markingStack; 957 static CallbackStack* s_markingStack;
912 static CallbackStack* s_weakCallbackStack; 958 static CallbackStack* s_weakCallbackStack;
959 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
913 static bool s_shutdownCalled; 960 static bool s_shutdownCalled;
961 friend class ThreadState;
914 }; 962 };
915 963
916 // The NoAllocationScope class is used in debug mode to catch unwanted 964 // The NoAllocationScope class is used in debug mode to catch unwanted
917 // allocations. E.g. allocations during GC. 965 // allocations. E.g. allocations during GC.
918 template<ThreadAffinity Affinity> 966 template<ThreadAffinity Affinity>
919 class NoAllocationScope { 967 class NoAllocationScope {
920 public: 968 public:
921 NoAllocationScope() : m_active(true) { enter(); } 969 NoAllocationScope() : m_active(true) { enter(); }
922 970
923 explicit NoAllocationScope(bool active) : m_active(active) { enter(); } 971 explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
(...skipping 1331 matching lines...) Expand 10 before | Expand all | Expand 10 after
2255 // to export. This forces it to export all the methods from ThreadHeap. 2303 // to export. This forces it to export all the methods from ThreadHeap.
2256 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*); 2304 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*);
2257 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); 2305 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
2258 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; 2306 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
2259 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>; 2307 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>;
2260 #endif 2308 #endif
2261 2309
2262 } 2310 }
2263 2311
2264 #endif // Heap_h 2312 #endif // Heap_h
OLDNEW
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698