OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
81 } | 81 } |
82 | 82 |
83 // Blink heap pages are aligned to the Blink heap page size. | 83 // Blink heap pages are aligned to the Blink heap page size. |
84 // Therefore, the start of a Blink page can be obtained by | 84 // Therefore, the start of a Blink page can be obtained by |
85 // rounding down to the Blink page size. | 85 // rounding down to the Blink page size. |
86 inline Address roundToBlinkPageStart(Address address) | 86 inline Address roundToBlinkPageStart(Address address) |
87 { | 87 { |
88 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin kPageBaseMask); | 88 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin kPageBaseMask); |
89 } | 89 } |
90 | 90 |
91 inline Address roundToBlinkPageEnd(Address address) | |
92 { | |
93 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize; | |
94 } | |
95 | |
91 // Compute the amount of padding we have to add to a header to make | 96 // Compute the amount of padding we have to add to a header to make |
92 // the size of the header plus the padding a multiple of 8 bytes. | 97 // the size of the header plus the padding a multiple of 8 bytes. |
93 template<typename Header> | 98 template<typename Header> |
94 inline size_t headerPadding() | 99 inline size_t headerPadding() |
95 { | 100 { |
96 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity; | 101 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity; |
97 } | 102 } |
98 | 103 |
99 // Masks an address down to the enclosing blink page base address. | 104 // Masks an address down to the enclosing blink page base address. |
100 inline Address blinkPageAddress(Address address) | 105 inline Address blinkPageAddress(Address address) |
(...skipping 28 matching lines...) Expand all Loading... | |
129 : m_storage(storage) | 134 : m_storage(storage) |
130 , m_gcInfo(gcInfo) | 135 , m_gcInfo(gcInfo) |
131 , m_threadState(state) | 136 , m_threadState(state) |
132 , m_padding(0) | 137 , m_padding(0) |
133 { | 138 { |
134 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 139 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
135 } | 140 } |
136 | 141 |
137 // Check if the given address could point to an object in this | 142 // Check if the given address could point to an object in this |
138 // heap page. If so, find the start of that object and mark it | 143 // heap page. If so, find the start of that object and mark it |
139 // using the given Visitor. | 144 // using the given Visitor. Otherwise does nothing. |
140 // | |
141 // Returns true if the object was found and marked, returns false | |
142 // otherwise. | |
143 // | 145 // |
144 // This is used during conservative stack scanning to | 146 // This is used during conservative stack scanning to |
145 // conservatively mark all objects that could be referenced from | 147 // conservatively mark all objects that could be referenced from |
146 // the stack. | 148 // the stack. |
147 virtual bool checkAndMarkPointer(Visitor*, Address) = 0; | 149 virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
148 | 150 |
149 #if ENABLE(GC_TRACING) | 151 #if ENABLE(GC_TRACING) |
150 virtual const GCInfo* findGCInfo(Address) = 0; | 152 virtual const GCInfo* findGCInfo(Address) = 0; |
151 #endif | 153 #endif |
152 | 154 |
153 Address address() { return reinterpret_cast<Address>(this); } | 155 Address address() { return reinterpret_cast<Address>(this); } |
154 PageMemory* storage() const { return m_storage; } | 156 PageMemory* storage() const { return m_storage; } |
155 ThreadState* threadState() const { return m_threadState; } | 157 ThreadState* threadState() const { return m_threadState; } |
156 const GCInfo* gcInfo() { return m_gcInfo; } | 158 const GCInfo* gcInfo() { return m_gcInfo; } |
159 virtual bool isLargeObject() { return false; } | |
157 | 160 |
158 private: | 161 private: |
159 // Accessor to silence unused warnings. | 162 // Accessor to silence unused warnings. |
160 void* padding() const { return m_padding; } | 163 void* padding() const { return m_padding; } |
161 | 164 |
162 PageMemory* m_storage; | 165 PageMemory* m_storage; |
163 const GCInfo* m_gcInfo; | 166 const GCInfo* m_gcInfo; |
164 ThreadState* m_threadState; | 167 ThreadState* m_threadState; |
165 // Free word only needed to ensure proper alignment of the | 168 // Free word only needed to ensure proper alignment of the |
166 // HeapPage header. | 169 // HeapPage header. |
(...skipping 11 matching lines...) Expand all Loading... | |
178 // | 181 // |
179 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | | 182 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload | |
180 template<typename Header> | 183 template<typename Header> |
181 class LargeHeapObject : public BaseHeapPage { | 184 class LargeHeapObject : public BaseHeapPage { |
182 public: | 185 public: |
183 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat e) : BaseHeapPage(storage, gcInfo, state) | 186 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat e) : BaseHeapPage(storage, gcInfo, state) |
184 { | 187 { |
185 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg e_heap_object_header_misaligned); | 188 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg e_heap_object_header_misaligned); |
186 } | 189 } |
187 | 190 |
188 virtual bool checkAndMarkPointer(Visitor*, Address); | 191 virtual void checkAndMarkPointer(Visitor*, Address); |
192 virtual bool isLargeObject() { return true; } | |
haraken
2014/05/08 05:44:58
Add OVERRIDE.
Erik Corry
2014/05/08 09:26:08
Done.
| |
189 | 193 |
190 #if ENABLE(GC_TRACING) | 194 #if ENABLE(GC_TRACING) |
191 virtual const GCInfo* findGCInfo(Address) | 195 virtual const GCInfo* findGCInfo(Address address) |
192 { | 196 { |
197 if (!objectContains(address)) | |
198 return 0; | |
193 return gcInfo(); | 199 return gcInfo(); |
194 } | 200 } |
195 #endif | 201 #endif |
196 | 202 |
197 void link(LargeHeapObject<Header>** previousNext) | 203 void link(LargeHeapObject<Header>** previousNext) |
198 { | 204 { |
199 m_next = *previousNext; | 205 m_next = *previousNext; |
200 *previousNext = this; | 206 *previousNext = this; |
201 } | 207 } |
202 | 208 |
203 void unlink(LargeHeapObject<Header>** previousNext) | 209 void unlink(LargeHeapObject<Header>** previousNext) |
204 { | 210 { |
205 *previousNext = m_next; | 211 *previousNext = m_next; |
206 } | 212 } |
207 | 213 |
214 // The LargeHeapObject pseudo-page contains one actual object. Determine | |
215 // whether the pointer is within that object. | |
216 bool objectContains(Address object) | |
217 { | |
218 return (payload() <= object) && (object < address() + size()); | |
219 } | |
220 | |
221 // Returns true for any address that is on one of the pages that this | |
222 // large object uses. That ensures that we can use a negative result to | |
223 // populate the negative page cache. | |
208 bool contains(Address object) | 224 bool contains(Address object) |
209 { | 225 { |
210 return (address() <= object) && (object <= (address() + size())); | 226 return address() <= object && object < roundToBlinkPageEnd(address() + s ize()); |
211 } | 227 } |
212 | 228 |
213 LargeHeapObject<Header>* next() | 229 LargeHeapObject<Header>* next() |
214 { | 230 { |
215 return m_next; | 231 return m_next; |
216 } | 232 } |
217 | 233 |
218 size_t size() | 234 size_t size() |
219 { | 235 { |
220 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 236 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
221 } | 237 } |
222 | 238 |
239 size_t sizeRoundedUpToPage() | |
240 { | |
241 return ((size() - 1) & blinkPageBaseMask) + blinkPageSize; | |
haraken
2014/05/08 05:44:58
Can we use roundToBlinkPageEnd? If you use roundTo
Erik Corry
2014/05/08 09:26:08
Removed
| |
242 } | |
243 | |
223 Address payload() { return heapObjectHeader()->payload(); } | 244 Address payload() { return heapObjectHeader()->payload(); } |
224 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 245 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
225 | 246 |
226 Header* heapObjectHeader() | 247 Header* heapObjectHeader() |
227 { | 248 { |
228 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 249 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); |
229 return reinterpret_cast<Header*>(headerAddress); | 250 return reinterpret_cast<Header*>(headerAddress); |
230 } | 251 } |
231 | 252 |
232 bool isMarked(); | 253 bool isMarked(); |
233 void unmark(); | 254 void unmark(); |
234 void getStats(HeapStats&); | 255 void getStats(HeapStats&); |
235 void mark(Visitor*); | 256 void mark(Visitor*); |
236 void finalize(); | 257 void finalize(); |
237 | 258 |
238 private: | 259 private: |
239 friend class Heap; | |
240 friend class ThreadHeap<Header>; | 260 friend class ThreadHeap<Header>; |
241 | 261 |
242 LargeHeapObject<Header>* m_next; | 262 LargeHeapObject<Header>* m_next; |
243 }; | 263 }; |
244 | 264 |
245 // The BasicObjectHeader is the minimal object header. It is used when | 265 // The BasicObjectHeader is the minimal object header. It is used when |
246 // encountering heap space of size allocationGranularity to mark it as | 266 // encountering heap space of size allocationGranularity to mark it as |
247 // as freelist entry. | 267 // as freelist entry. |
248 class PLATFORM_EXPORT BasicObjectHeader { | 268 class PLATFORM_EXPORT BasicObjectHeader { |
249 public: | 269 public: |
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
432 template<typename Header> | 452 template<typename Header> |
433 class HeapPage : public BaseHeapPage { | 453 class HeapPage : public BaseHeapPage { |
434 public: | 454 public: |
435 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); | 455 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); |
436 | 456 |
437 void link(HeapPage**); | 457 void link(HeapPage**); |
438 static void unlink(HeapPage*, HeapPage**); | 458 static void unlink(HeapPage*, HeapPage**); |
439 | 459 |
440 bool isEmpty(); | 460 bool isEmpty(); |
441 | 461 |
462 // Returns true for the whole blinkPageSize page that the page is on, even | |
463 // for the header. That ensures the result can be used to populate the negat ive | |
464 // page cache. | |
442 bool contains(Address addr) | 465 bool contains(Address addr) |
443 { | 466 { |
444 Address blinkPageStart = roundToBlinkPageStart(address()); | 467 ASSERT(address() == roundToBlinkPageStart(address())); |
468 Address blinkPageStart = address(); | |
445 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr ; | 469 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr ; |
446 } | 470 } |
447 | 471 |
448 HeapPage* next() { return m_next; } | 472 HeapPage* next() { return m_next; } |
449 | 473 |
450 Address payload() | 474 Address payload() |
451 { | 475 { |
452 return address() + sizeof(*this) + headerPadding<Header>(); | 476 return address() + sizeof(*this) + headerPadding<Header>(); |
453 } | 477 } |
454 | 478 |
455 static size_t payloadSize() | 479 static size_t payloadSize() |
456 { | 480 { |
457 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; | 481 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; |
458 } | 482 } |
459 | 483 |
460 Address end() { return payload() + payloadSize(); } | 484 Address end() { return payload() + payloadSize(); } |
461 | 485 |
462 void getStats(HeapStats&); | 486 void getStats(HeapStats&); |
463 void clearMarks(); | 487 void clearMarks(); |
464 void sweep(); | 488 void sweep(); |
465 void clearObjectStartBitMap(); | 489 void clearObjectStartBitMap(); |
466 void finalize(Header*); | 490 void finalize(Header*); |
467 virtual bool checkAndMarkPointer(Visitor*, Address); | 491 virtual void checkAndMarkPointer(Visitor*, Address); |
468 #if ENABLE(GC_TRACING) | 492 #if ENABLE(GC_TRACING) |
469 const GCInfo* findGCInfo(Address) OVERRIDE; | 493 const GCInfo* findGCInfo(Address) OVERRIDE; |
470 #endif | 494 #endif |
471 ThreadHeap<Header>* heap() { return m_heap; } | 495 ThreadHeap<Header>* heap() { return m_heap; } |
472 #if defined(ADDRESS_SANITIZER) | 496 #if defined(ADDRESS_SANITIZER) |
473 void poisonUnmarkedObjects(); | 497 void poisonUnmarkedObjects(); |
474 #endif | 498 #endif |
475 | 499 |
476 protected: | 500 protected: |
477 Header* findHeaderFromAddress(Address); | 501 Header* findHeaderFromAddress(Address); |
478 void populateObjectStartBitMap(); | 502 void populateObjectStartBitMap(); |
479 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 503 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
480 TraceCallback traceCallback(Header*); | 504 TraceCallback traceCallback(Header*); |
481 bool hasVTable(Header*); | 505 bool hasVTable(Header*); |
482 | 506 |
483 HeapPage<Header>* m_next; | 507 HeapPage<Header>* m_next; |
484 ThreadHeap<Header>* m_heap; | 508 ThreadHeap<Header>* m_heap; |
485 bool m_objectStartBitMapComputed; | 509 bool m_objectStartBitMapComputed; |
486 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 510 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
487 | 511 |
488 friend class ThreadHeap<Header>; | 512 friend class ThreadHeap<Header>; |
489 }; | 513 }; |
490 | 514 |
491 // A HeapContainsCache provides a fast way of taking an arbitrary | 515 class AddressEntry { |
516 public: | |
517 AddressEntry() : m_address(0) { } | |
518 | |
519 AddressEntry(Address address) : m_address(address) { } | |
haraken
2014/05/08 05:44:58
Add explicit.
Erik Corry
2014/05/08 09:26:08
Done.
| |
520 | |
521 Address address() const { return m_address; } | |
522 | |
523 private: | |
524 Address m_address; | |
525 }; | |
526 | |
527 class PositiveEntry : public AddressEntry { | |
528 public: | |
529 PositiveEntry() | |
530 : AddressEntry() | |
531 , m_containingPage(0) | |
532 { | |
533 } | |
534 | |
535 PositiveEntry(Address address, BaseHeapPage* containingPage) | |
536 : AddressEntry(address) | |
537 , m_containingPage(containingPage) | |
538 { | |
539 } | |
540 | |
541 BaseHeapPage* result() const { return m_containingPage; } | |
542 | |
543 typedef BaseHeapPage* LookupResult; | |
544 | |
545 private: | |
546 BaseHeapPage* m_containingPage; | |
547 }; | |
548 | |
549 class NegativeEntry : public AddressEntry { | |
550 public: | |
551 NegativeEntry() : AddressEntry() { } | |
552 | |
553 NegativeEntry(Address address, bool) : AddressEntry(address) { } | |
haraken
2014/05/08 05:44:58
Do we need the bool parameter? NegativeEntry shoul
Erik Corry
2014/05/08 09:26:08
The addEntry method of HeapExtentCache expects to
| |
554 | |
555 bool result() const { return true; } | |
556 | |
557 typedef bool LookupResult; | |
558 }; | |
559 | |
560 // A HeapExtentCache provides a fast way of taking an arbitrary | |
492 // pointer-sized word, and determining whether it can be interpreted | 561 // pointer-sized word, and determining whether it can be interpreted |
493 // as a pointer to an area that is managed by the garbage collected | 562 // as a pointer to an area that is managed by the garbage collected |
494 // Blink heap. There is a cache of 'pages' that have previously been | 563 // Blink heap. There is a cache of 'pages' that have previously been |
495 // determined to be either wholly inside or wholly outside the | 564 // determined to be wholly inside the heap. The size of these pages must be |
496 // heap. The size of these pages must be smaller than the allocation | 565 // smaller than the allocation alignment of the heap pages. We determine |
497 // alignment of the heap pages. We determine on-heap-ness by rounding | 566 // on-heap-ness by rounding down the pointer to the nearest page and looking up |
498 // down the pointer to the nearest page and looking up the page in the | 567 // the page in the cache. If there is a miss in the cache we can ask the heap |
499 // cache. If there is a miss in the cache we ask the heap to determine | 568 // to determine the status of the pointer by iterating over all of the heap. |
500 // the status of the pointer by iterating over all of the heap. The | 569 // The result is then cached in the two-way associative page cache. |
501 // result is then cached in the two-way associative page cache. | |
502 // | 570 // |
503 // A HeapContainsCache is both a positive and negative | 571 // A HeapContainsCache is a positive cache. Therefore, it must be flushed when |
504 // cache. Therefore, it must be flushed both when new memory is added | 572 // memory is removed from the Blink heap. The HeapDoesNotContainCache is a |
505 // and when memory is removed from the Blink heap. | 573 // negative cache, so it must be flushed when memory is added to the heap. |
506 class HeapContainsCache { | 574 template<typename Entry> |
575 class HeapExtentCache { | |
507 public: | 576 public: |
508 HeapContainsCache(); | 577 HeapExtentCache() |
578 : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries])) | |
579 , m_hasEntries(false) | |
580 { | |
581 } | |
509 | 582 |
510 void flush(); | 583 void flush(); |
511 bool contains(Address); | 584 bool contains(Address); |
512 | 585 |
513 // Perform a lookup in the cache. | 586 // Perform a lookup in the cache. |
514 // | 587 // |
515 // If lookup returns false the argument address was not found in | 588 // If lookup returns null/false the argument address was not found in |
516 // the cache and it is unknown if the address is in the Blink | 589 // the cache and it is unknown if the address is in the Blink |
517 // heap. | 590 // heap. |
518 // | 591 // |
519 // If lookup returns true the argument address was found in the | 592 // If lookup returns true/a page, the argument address was found in the |
520 // cache. In that case, the address is in the heap if the base | 593 // cache. For the HeapContainsCache this means the address is in the heap. |
521 // heap page out parameter is different from 0 and is not in the | 594 // For the HeapDoesNotContainCache this means the address is not in the |
522 // heap if the base heap page out parameter is 0. | 595 // heap. |
523 bool lookup(Address, BaseHeapPage**); | 596 PLATFORM_EXPORT typename Entry::LookupResult lookup(Address); |
524 | 597 |
525 // Add an entry to the cache. Use a 0 base heap page pointer to | 598 // Add an entry to the cache. |
526 // add a negative entry. | 599 PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult); |
527 void addEntry(Address, BaseHeapPage*); | |
528 | 600 |
529 private: | 601 private: |
530 class Entry { | |
531 public: | |
532 Entry() | |
533 : m_address(0) | |
534 , m_containingPage(0) | |
535 { | |
536 } | |
537 | |
538 Entry(Address address, BaseHeapPage* containingPage) | |
539 : m_address(address) | |
540 , m_containingPage(containingPage) | |
541 { | |
542 } | |
543 | |
544 BaseHeapPage* containingPage() { return m_containingPage; } | |
545 Address address() { return m_address; } | |
546 | |
547 private: | |
548 Address m_address; | |
549 BaseHeapPage* m_containingPage; | |
550 }; | |
551 | |
552 static const int numberOfEntriesLog2 = 12; | 602 static const int numberOfEntriesLog2 = 12; |
553 static const int numberOfEntries = 1 << numberOfEntriesLog2; | 603 static const int numberOfEntries = 1 << numberOfEntriesLog2; |
554 | 604 |
555 static size_t hash(Address); | 605 static size_t hash(Address); |
556 | 606 |
557 WTF::OwnPtr<HeapContainsCache::Entry[]> m_entries; | 607 WTF::OwnPtr<Entry[]> m_entries; |
608 bool m_hasEntries; | |
558 | 609 |
559 friend class ThreadState; | 610 friend class ThreadState; |
560 }; | 611 }; |
561 | 612 |
613 // Normally these would be typedefs instead of subclasses, but that makes them | |
614 // very hard to forward declare. | |
615 class HeapContainsCache : public HeapExtentCache<PositiveEntry> { | |
616 public: | |
617 BaseHeapPage* lookup(Address); | |
618 void addEntry(Address, BaseHeapPage*); | |
619 }; | |
620 | |
621 class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { }; | |
622 | |
562 // The CallbackStack contains all the visitor callbacks used to trace and mark | 623 // The CallbackStack contains all the visitor callbacks used to trace and mark |
563 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. | 624 // objects. A specific CallbackStack instance contains at most bufferSize elemen ts. |
564 // If more space is needed a new CallbackStack instance is created and chained | 625 // If more space is needed a new CallbackStack instance is created and chained |
565 // together with the former instance. I.e. a logical CallbackStack can be made o f | 626 // together with the former instance. I.e. a logical CallbackStack can be made o f |
566 // multiple chained CallbackStack object instances. | 627 // multiple chained CallbackStack object instances. |
567 // There are two logical callback stacks. One containing all the marking callbac ks and | 628 // There are two logical callback stacks. One containing all the marking callbac ks and |
568 // one containing the weak pointer callbacks. | 629 // one containing the weak pointer callbacks. |
569 class CallbackStack { | 630 class CallbackStack { |
570 public: | 631 public: |
571 CallbackStack(CallbackStack** first) | 632 CallbackStack(CallbackStack** first) |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
622 // Non-template super class used to pass a heap around to other classes. | 683 // Non-template super class used to pass a heap around to other classes. |
623 class BaseHeap { | 684 class BaseHeap { |
624 public: | 685 public: |
625 virtual ~BaseHeap() { } | 686 virtual ~BaseHeap() { } |
626 | 687 |
627 // Find the page in this thread heap containing the given | 688 // Find the page in this thread heap containing the given |
628 // address. Returns 0 if the address is not contained in any | 689 // address. Returns 0 if the address is not contained in any |
629 // page in this thread heap. | 690 // page in this thread heap. |
630 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; | 691 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; |
631 | 692 |
632 // Find the large object in this thread heap containing the given | |
633 // address. Returns 0 if the address is not contained in any | |
634 // page in this thread heap. | |
635 virtual BaseHeapPage* largeHeapObjectFromAddress(Address) = 0; | |
636 | |
637 #if ENABLE(GC_TRACING) | 693 #if ENABLE(GC_TRACING) |
638 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; | 694 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; |
639 #endif | 695 #endif |
640 | 696 |
641 // Check if the given address could point to an object in this | |
642 // heap. If so, find the start of that object and mark it using | |
643 // the given Visitor. | |
644 // | |
645 // Returns true if the object was found and marked, returns false | |
646 // otherwise. | |
647 // | |
648 // This is used during conservative stack scanning to | |
649 // conservatively mark all objects that could be referenced from | |
650 // the stack. | |
651 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address) = 0; | |
652 | |
653 // Sweep this part of the Blink heap. This finalizes dead objects | 697 // Sweep this part of the Blink heap. This finalizes dead objects |
654 // and builds freelists for all the unused memory. | 698 // and builds freelists for all the unused memory. |
655 virtual void sweep() = 0; | 699 virtual void sweep() = 0; |
656 | 700 |
657 // Forcefully finalize all objects in this part of the Blink heap | 701 // Forcefully finalize all objects in this part of the Blink heap |
658 // (potentially with the exception of one object). This is used | 702 // (potentially with the exception of one object). This is used |
659 // during thread termination to make sure that all objects for the | 703 // during thread termination to make sure that all objects for the |
660 // dying thread are finalized. | 704 // dying thread are finalized. |
661 virtual void assertEmpty() = 0; | 705 virtual void assertEmpty() = 0; |
662 | 706 |
(...skipping 22 matching lines...) Expand all Loading... | |
685 // (potentially adding new pages to the heap), to find and mark | 729 // (potentially adding new pages to the heap), to find and mark |
686 // objects during conservative stack scanning and to sweep the set of | 730 // objects during conservative stack scanning and to sweep the set of |
687 // pages after a GC. | 731 // pages after a GC. |
688 template<typename Header> | 732 template<typename Header> |
689 class ThreadHeap : public BaseHeap { | 733 class ThreadHeap : public BaseHeap { |
690 public: | 734 public: |
691 ThreadHeap(ThreadState*); | 735 ThreadHeap(ThreadState*); |
692 virtual ~ThreadHeap(); | 736 virtual ~ThreadHeap(); |
693 | 737 |
694 virtual BaseHeapPage* heapPageFromAddress(Address); | 738 virtual BaseHeapPage* heapPageFromAddress(Address); |
695 virtual BaseHeapPage* largeHeapObjectFromAddress(Address); | |
696 #if ENABLE(GC_TRACING) | 739 #if ENABLE(GC_TRACING) |
697 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); | 740 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); |
698 #endif | 741 #endif |
699 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address); | |
700 virtual void sweep(); | 742 virtual void sweep(); |
701 virtual void assertEmpty(); | 743 virtual void assertEmpty(); |
702 virtual void clearFreeLists(); | 744 virtual void clearFreeLists(); |
703 virtual void clearMarks(); | 745 virtual void clearMarks(); |
704 #ifndef NDEBUG | 746 #ifndef NDEBUG |
705 virtual void getScannedStats(HeapStats&); | 747 virtual void getScannedStats(HeapStats&); |
706 #endif | 748 #endif |
707 | 749 |
708 virtual void makeConsistentForGC(); | 750 virtual void makeConsistentForGC(); |
709 virtual bool isConsistentForGC(); | 751 virtual bool isConsistentForGC(); |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
847 #endif | 889 #endif |
848 | 890 |
849 // Collect heap stats for all threads attached to the Blink | 891 // Collect heap stats for all threads attached to the Blink |
850 // garbage collector. Should only be called during garbage | 892 // garbage collector. Should only be called during garbage |
851 // collection where threads are known to be at safe points. | 893 // collection where threads are known to be at safe points. |
852 static void getStats(HeapStats*); | 894 static void getStats(HeapStats*); |
853 | 895 |
854 static bool isConsistentForGC(); | 896 static bool isConsistentForGC(); |
855 static void makeConsistentForGC(); | 897 static void makeConsistentForGC(); |
856 | 898 |
899 static bool notInHeap(Address); | |
900 static void addressIsNotInHeap(Address); | |
Mads Ager (chromium)
2014/05/08 06:52:37
I would remove these methods and just inline the o
Erik Corry
2014/05/08 09:26:08
Done.
| |
901 static void flushNotInHeapCache(); | |
haraken
2014/05/08 05:44:58
flushHeapDoesNotContainCache
It's confusing to ha
Erik Corry
2014/05/08 09:26:08
Done.
| |
902 | |
903 private: | |
857 static Visitor* s_markingVisitor; | 904 static Visitor* s_markingVisitor; |
858 | 905 |
859 static CallbackStack* s_markingStack; | 906 static CallbackStack* s_markingStack; |
860 static CallbackStack* s_weakCallbackStack; | 907 static CallbackStack* s_weakCallbackStack; |
908 static HeapDoesNotContainCache* s_notInHeapCache; | |
haraken
2014/05/08 05:44:58
s_notInHeapCache => s_heapDoesNotContainCache
Erik Corry
2014/05/08 09:26:08
Done.
| |
861 static bool s_shutdownCalled; | 909 static bool s_shutdownCalled; |
910 friend class ThreadState; | |
862 }; | 911 }; |
863 | 912 |
864 // The NoAllocationScope class is used in debug mode to catch unwanted | 913 // The NoAllocationScope class is used in debug mode to catch unwanted |
865 // allocations. E.g. allocations during GC. | 914 // allocations. E.g. allocations during GC. |
866 template<ThreadAffinity Affinity> | 915 template<ThreadAffinity Affinity> |
867 class NoAllocationScope { | 916 class NoAllocationScope { |
868 public: | 917 public: |
869 NoAllocationScope() : m_active(true) { enter(); } | 918 NoAllocationScope() : m_active(true) { enter(); } |
870 | 919 |
871 explicit NoAllocationScope(bool active) : m_active(active) { enter(); } | 920 explicit NoAllocationScope(bool active) : m_active(active) { enter(); } |
(...skipping 1331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2203 // to export. This forces it to export all the methods from ThreadHeap. | 2252 // to export. This forces it to export all the methods from ThreadHeap. |
2204 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*); | 2253 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*); |
2205 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); | 2254 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); |
2206 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; | 2255 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; |
2207 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>; | 2256 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>; |
2208 #endif | 2257 #endif |
2209 | 2258 |
2210 } | 2259 } |
2211 | 2260 |
2212 #endif // Heap_h | 2261 #endif // Heap_h |
OLD | NEW |