OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
81 } | 81 } |
82 | 82 |
83 // Blink heap pages are aligned to the Blink heap page size. | 83 // Blink heap pages are aligned to the Blink heap page size. |
84 // Therefore, the start of a Blink page can be obtained by | 84 // Therefore, the start of a Blink page can be obtained by |
85 // rounding down to the Blink page size. | 85 // rounding down to the Blink page size. |
86 inline Address roundToBlinkPageStart(Address address) | 86 inline Address roundToBlinkPageStart(Address address) |
87 { | 87 { |
88 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 88 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
89 } | 89 } |
90 | 90 |
| 91 inline Address roundToBlinkPageEnd(Address address) |
| 92 { |
| 93 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; |
| 94 } |
| 95 |
91 // Compute the amount of padding we have to add to a header to make | 96 // Compute the amount of padding we have to add to a header to make |
92 // the size of the header plus the padding a multiple of 8 bytes. | 97 // the size of the header plus the padding a multiple of 8 bytes. |
93 template<typename Header> | 98 template<typename Header> |
94 inline size_t headerPadding() | 99 inline size_t headerPadding() |
95 { | 100 { |
96 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) %
allocationGranularity; | 101 return (allocationGranularity - (sizeof(Header) % allocationGranularity)) %
allocationGranularity; |
97 } | 102 } |
98 | 103 |
99 // Masks an address down to the enclosing blink page base address. | 104 // Masks an address down to the enclosing blink page base address. |
100 inline Address blinkPageAddress(Address address) | 105 inline Address blinkPageAddress(Address address) |
(...skipping 26 matching lines...) Expand all Loading... |
127 public: | 132 public: |
128 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) | 133 BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) |
129 : m_storage(storage) | 134 : m_storage(storage) |
130 , m_gcInfo(gcInfo) | 135 , m_gcInfo(gcInfo) |
131 , m_threadState(state) | 136 , m_threadState(state) |
132 , m_padding(0) | 137 , m_padding(0) |
133 { | 138 { |
134 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 139 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
135 } | 140 } |
136 | 141 |
137 // Check if the given address could point to an object in this | 142 // Check if the given address points to an object in this |
138 // heap page. If so, find the start of that object and mark it | 143 // heap page. If so, find the start of that object and mark it |
139 // using the given Visitor. | 144 // using the given Visitor. Otherwise do nothing. The pointer must |
140 // | 145 // be within the same aligned blinkPageSize as the this-pointer. |
141 // Returns true if the object was found and marked, returns false | |
142 // otherwise. | |
143 // | 146 // |
144 // This is used during conservative stack scanning to | 147 // This is used during conservative stack scanning to |
145 // conservatively mark all objects that could be referenced from | 148 // conservatively mark all objects that could be referenced from |
146 // the stack. | 149 // the stack. |
147 virtual bool checkAndMarkPointer(Visitor*, Address) = 0; | 150 virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
148 | 151 |
149 #if ENABLE(GC_TRACING) | 152 #if ENABLE(GC_TRACING) |
150 virtual const GCInfo* findGCInfo(Address) = 0; | 153 virtual const GCInfo* findGCInfo(Address) = 0; |
151 #endif | 154 #endif |
152 | 155 |
153 Address address() { return reinterpret_cast<Address>(this); } | 156 Address address() { return reinterpret_cast<Address>(this); } |
154 PageMemory* storage() const { return m_storage; } | 157 PageMemory* storage() const { return m_storage; } |
155 ThreadState* threadState() const { return m_threadState; } | 158 ThreadState* threadState() const { return m_threadState; } |
156 const GCInfo* gcInfo() { return m_gcInfo; } | 159 const GCInfo* gcInfo() { return m_gcInfo; } |
| 160 virtual bool isLargeObject() { return false; } |
157 | 161 |
158 private: | 162 private: |
159 // Accessor to silence unused warnings. | 163 // Accessor to silence unused warnings. |
160 void* padding() const { return m_padding; } | 164 void* padding() const { return m_padding; } |
161 | 165 |
162 PageMemory* m_storage; | 166 PageMemory* m_storage; |
163 const GCInfo* m_gcInfo; | 167 const GCInfo* m_gcInfo; |
164 ThreadState* m_threadState; | 168 ThreadState* m_threadState; |
165 // Free word only needed to ensure proper alignment of the | 169 // Free word only needed to ensure proper alignment of the |
166 // HeapPage header. | 170 // HeapPage header. |
(...skipping 11 matching lines...) Expand all Loading... |
178 // | 182 // |
179 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader
| payload | | 183 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader
| payload | |
180 template<typename Header> | 184 template<typename Header> |
181 class LargeHeapObject : public BaseHeapPage { | 185 class LargeHeapObject : public BaseHeapPage { |
182 public: | 186 public: |
183 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat
e) : BaseHeapPage(storage, gcInfo, state) | 187 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat
e) : BaseHeapPage(storage, gcInfo, state) |
184 { | 188 { |
185 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg
e_heap_object_header_misaligned); | 189 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg
e_heap_object_header_misaligned); |
186 } | 190 } |
187 | 191 |
188 virtual bool checkAndMarkPointer(Visitor*, Address); | 192 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; |
| 193 virtual bool isLargeObject() OVERRIDE { return true; } |
189 | 194 |
190 #if ENABLE(GC_TRACING) | 195 #if ENABLE(GC_TRACING) |
191 virtual const GCInfo* findGCInfo(Address) | 196 virtual const GCInfo* findGCInfo(Address address) |
192 { | 197 { |
| 198 if (!objectContains(address)) |
| 199 return 0; |
193 return gcInfo(); | 200 return gcInfo(); |
194 } | 201 } |
195 #endif | 202 #endif |
196 | 203 |
197 void link(LargeHeapObject<Header>** previousNext) | 204 void link(LargeHeapObject<Header>** previousNext) |
198 { | 205 { |
199 m_next = *previousNext; | 206 m_next = *previousNext; |
200 *previousNext = this; | 207 *previousNext = this; |
201 } | 208 } |
202 | 209 |
203 void unlink(LargeHeapObject<Header>** previousNext) | 210 void unlink(LargeHeapObject<Header>** previousNext) |
204 { | 211 { |
205 *previousNext = m_next; | 212 *previousNext = m_next; |
206 } | 213 } |
207 | 214 |
| 215 // The LargeHeapObject pseudo-page contains one actual object. Determine |
| 216 // whether the pointer is within that object. |
| 217 bool objectContains(Address object) |
| 218 { |
| 219 return (payload() <= object) && (object < address() + size()); |
| 220 } |
| 221 |
| 222 // Returns true for any address that is on one of the pages that this |
| 223 // large object uses. That ensures that we can use a negative result to |
| 224 // populate the negative page cache. |
208 bool contains(Address object) | 225 bool contains(Address object) |
209 { | 226 { |
210 return (address() <= object) && (object <= (address() + size())); | 227 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
211 } | 228 } |
212 | 229 |
213 LargeHeapObject<Header>* next() | 230 LargeHeapObject<Header>* next() |
214 { | 231 { |
215 return m_next; | 232 return m_next; |
216 } | 233 } |
217 | 234 |
218 size_t size() | 235 size_t size() |
219 { | 236 { |
220 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); | 237 return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); |
221 } | 238 } |
222 | 239 |
223 Address payload() { return heapObjectHeader()->payload(); } | 240 Address payload() { return heapObjectHeader()->payload(); } |
224 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 241 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
225 | 242 |
226 Header* heapObjectHeader() | 243 Header* heapObjectHeader() |
227 { | 244 { |
228 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); | 245 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); |
229 return reinterpret_cast<Header*>(headerAddress); | 246 return reinterpret_cast<Header*>(headerAddress); |
230 } | 247 } |
231 | 248 |
232 bool isMarked(); | 249 bool isMarked(); |
233 void unmark(); | 250 void unmark(); |
234 void getStats(HeapStats&); | 251 void getStats(HeapStats&); |
235 void mark(Visitor*); | 252 void mark(Visitor*); |
236 void finalize(); | 253 void finalize(); |
237 | 254 |
238 private: | 255 private: |
239 friend class Heap; | |
240 friend class ThreadHeap<Header>; | 256 friend class ThreadHeap<Header>; |
241 | 257 |
242 LargeHeapObject<Header>* m_next; | 258 LargeHeapObject<Header>* m_next; |
243 }; | 259 }; |
244 | 260 |
245 // The BasicObjectHeader is the minimal object header. It is used when | 261 // The BasicObjectHeader is the minimal object header. It is used when |
246 // encountering heap space of size allocationGranularity to mark it as | 262 // encountering heap space of size allocationGranularity to mark it as |
247 // as freelist entry. | 263 // as freelist entry. |
248 class PLATFORM_EXPORT BasicObjectHeader { | 264 class PLATFORM_EXPORT BasicObjectHeader { |
249 public: | 265 public: |
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
432 template<typename Header> | 448 template<typename Header> |
433 class HeapPage : public BaseHeapPage { | 449 class HeapPage : public BaseHeapPage { |
434 public: | 450 public: |
435 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); | 451 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); |
436 | 452 |
437 void link(HeapPage**); | 453 void link(HeapPage**); |
438 static void unlink(HeapPage*, HeapPage**); | 454 static void unlink(HeapPage*, HeapPage**); |
439 | 455 |
440 bool isEmpty(); | 456 bool isEmpty(); |
441 | 457 |
| 458 // Returns true for the whole blinkPageSize page that the page is on, even |
| 459 // for the header, and the unmapped guard page at the start. That ensures |
| 460 // the result can be used to populate the negative page cache. |
442 bool contains(Address addr) | 461 bool contains(Address addr) |
443 { | 462 { |
444 Address blinkPageStart = roundToBlinkPageStart(address()); | 463 Address blinkPageStart = roundToBlinkPageStart(address()); |
445 return blinkPageStart <= addr && (blinkPageStart + blinkPageSize) > addr
; | 464 ASSERT(blinkPageStart = address() - osPageSize()); // Page is at aligned
address plus guard page size. |
| 465 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
446 } | 466 } |
447 | 467 |
448 HeapPage* next() { return m_next; } | 468 HeapPage* next() { return m_next; } |
449 | 469 |
450 Address payload() | 470 Address payload() |
451 { | 471 { |
452 return address() + sizeof(*this) + headerPadding<Header>(); | 472 return address() + sizeof(*this) + headerPadding<Header>(); |
453 } | 473 } |
454 | 474 |
455 static size_t payloadSize() | 475 static size_t payloadSize() |
456 { | 476 { |
457 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; | 477 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; |
458 } | 478 } |
459 | 479 |
460 Address end() { return payload() + payloadSize(); } | 480 Address end() { return payload() + payloadSize(); } |
461 | 481 |
462 void getStats(HeapStats&); | 482 void getStats(HeapStats&); |
463 void clearMarks(); | 483 void clearMarks(); |
464 void sweep(); | 484 void sweep(); |
465 void clearObjectStartBitMap(); | 485 void clearObjectStartBitMap(); |
466 void finalize(Header*); | 486 void finalize(Header*); |
467 virtual bool checkAndMarkPointer(Visitor*, Address); | 487 virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE; |
468 #if ENABLE(GC_TRACING) | 488 #if ENABLE(GC_TRACING) |
469 const GCInfo* findGCInfo(Address) OVERRIDE; | 489 const GCInfo* findGCInfo(Address) OVERRIDE; |
470 #endif | 490 #endif |
471 ThreadHeap<Header>* heap() { return m_heap; } | 491 ThreadHeap<Header>* heap() { return m_heap; } |
472 #if defined(ADDRESS_SANITIZER) | 492 #if defined(ADDRESS_SANITIZER) |
473 void poisonUnmarkedObjects(); | 493 void poisonUnmarkedObjects(); |
474 #endif | 494 #endif |
475 | 495 |
476 protected: | 496 protected: |
477 Header* findHeaderFromAddress(Address); | 497 Header* findHeaderFromAddress(Address); |
478 void populateObjectStartBitMap(); | 498 void populateObjectStartBitMap(); |
479 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 499 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
480 TraceCallback traceCallback(Header*); | 500 TraceCallback traceCallback(Header*); |
481 bool hasVTable(Header*); | 501 bool hasVTable(Header*); |
482 | 502 |
483 HeapPage<Header>* m_next; | 503 HeapPage<Header>* m_next; |
484 ThreadHeap<Header>* m_heap; | 504 ThreadHeap<Header>* m_heap; |
485 bool m_objectStartBitMapComputed; | 505 bool m_objectStartBitMapComputed; |
486 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 506 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
487 | 507 |
488 friend class ThreadHeap<Header>; | 508 friend class ThreadHeap<Header>; |
489 }; | 509 }; |
490 | 510 |
491 // A HeapContainsCache provides a fast way of taking an arbitrary | 511 class AddressEntry { |
| 512 public: |
| 513 AddressEntry() : m_address(0) { } |
| 514 |
| 515 explicit AddressEntry(Address address) : m_address(address) { } |
| 516 |
| 517 Address address() const { return m_address; } |
| 518 |
| 519 private: |
| 520 Address m_address; |
| 521 }; |
| 522 |
| 523 class PositiveEntry : public AddressEntry { |
| 524 public: |
| 525 PositiveEntry() |
| 526 : AddressEntry() |
| 527 , m_containingPage(0) |
| 528 { |
| 529 } |
| 530 |
| 531 PositiveEntry(Address address, BaseHeapPage* containingPage) |
| 532 : AddressEntry(address) |
| 533 , m_containingPage(containingPage) |
| 534 { |
| 535 } |
| 536 |
| 537 BaseHeapPage* result() const { return m_containingPage; } |
| 538 |
| 539 typedef BaseHeapPage* LookupResult; |
| 540 |
| 541 private: |
| 542 BaseHeapPage* m_containingPage; |
| 543 }; |
| 544 |
| 545 class NegativeEntry : public AddressEntry { |
| 546 public: |
| 547 NegativeEntry() : AddressEntry() { } |
| 548 |
| 549 NegativeEntry(Address address, bool) : AddressEntry(address) { } |
| 550 |
| 551 bool result() const { return true; } |
| 552 |
| 553 typedef bool LookupResult; |
| 554 }; |
| 555 |
| 556 // A HeapExtentCache provides a fast way of taking an arbitrary |
492 // pointer-sized word, and determining whether it can be interpreted | 557 // pointer-sized word, and determining whether it can be interpreted |
493 // as a pointer to an area that is managed by the garbage collected | 558 // as a pointer to an area that is managed by the garbage collected |
494 // Blink heap. There is a cache of 'pages' that have previously been | 559 // Blink heap. There is a cache of 'pages' that have previously been |
495 // determined to be either wholly inside or wholly outside the | 560 // determined to be wholly inside the heap. The size of these pages must be |
496 // heap. The size of these pages must be smaller than the allocation | 561 // smaller than the allocation alignment of the heap pages. We determine |
497 // alignment of the heap pages. We determine on-heap-ness by rounding | 562 // on-heap-ness by rounding down the pointer to the nearest page and looking up |
498 // down the pointer to the nearest page and looking up the page in the | 563 // the page in the cache. If there is a miss in the cache we can ask the heap |
499 // cache. If there is a miss in the cache we ask the heap to determine | 564 // to determine the status of the pointer by iterating over all of the heap. |
500 // the status of the pointer by iterating over all of the heap. The | 565 // The result is then cached in the two-way associative page cache. |
501 // result is then cached in the two-way associative page cache. | |
502 // | 566 // |
503 // A HeapContainsCache is both a positive and negative | 567 // A HeapContainsCache is a positive cache. Therefore, it must be flushed when |
504 // cache. Therefore, it must be flushed both when new memory is added | 568 // memory is removed from the Blink heap. The HeapDoesNotContainCache is a |
505 // and when memory is removed from the Blink heap. | 569 // negative cache, so it must be flushed when memory is added to the heap. |
506 class HeapContainsCache { | 570 template<typename Entry> |
| 571 class HeapExtentCache { |
507 public: | 572 public: |
508 HeapContainsCache(); | 573 HeapExtentCache() |
| 574 : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries])) |
| 575 , m_hasEntries(false) |
| 576 { |
| 577 } |
509 | 578 |
510 void flush(); | 579 void flush(); |
511 bool contains(Address); | 580 bool contains(Address); |
| 581 bool isEmpty() { return !m_hasEntries; } |
512 | 582 |
513 // Perform a lookup in the cache. | 583 // Perform a lookup in the cache. |
514 // | 584 // |
515 // If lookup returns false the argument address was not found in | 585 // If lookup returns null/false the argument address was not found in |
516 // the cache and it is unknown if the address is in the Blink | 586 // the cache and it is unknown if the address is in the Blink |
517 // heap. | 587 // heap. |
518 // | 588 // |
519 // If lookup returns true the argument address was found in the | 589 // If lookup returns true/a page, the argument address was found in the |
520 // cache. In that case, the address is in the heap if the base | 590 // cache. For the HeapContainsCache this means the address is in the heap. |
521 // heap page out parameter is different from 0 and is not in the | 591 // For the HeapDoesNotContainCache this means the address is not in the |
522 // heap if the base heap page out parameter is 0. | 592 // heap. |
523 bool lookup(Address, BaseHeapPage**); | 593 PLATFORM_EXPORT typename Entry::LookupResult lookup(Address); |
524 | 594 |
525 // Add an entry to the cache. Use a 0 base heap page pointer to | 595 // Add an entry to the cache. |
526 // add a negative entry. | 596 PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult); |
527 void addEntry(Address, BaseHeapPage*); | |
528 | 597 |
529 private: | 598 private: |
530 class Entry { | |
531 public: | |
532 Entry() | |
533 : m_address(0) | |
534 , m_containingPage(0) | |
535 { | |
536 } | |
537 | |
538 Entry(Address address, BaseHeapPage* containingPage) | |
539 : m_address(address) | |
540 , m_containingPage(containingPage) | |
541 { | |
542 } | |
543 | |
544 BaseHeapPage* containingPage() { return m_containingPage; } | |
545 Address address() { return m_address; } | |
546 | |
547 private: | |
548 Address m_address; | |
549 BaseHeapPage* m_containingPage; | |
550 }; | |
551 | |
552 static const int numberOfEntriesLog2 = 12; | 599 static const int numberOfEntriesLog2 = 12; |
553 static const int numberOfEntries = 1 << numberOfEntriesLog2; | 600 static const int numberOfEntries = 1 << numberOfEntriesLog2; |
554 | 601 |
555 static size_t hash(Address); | 602 static size_t hash(Address); |
556 | 603 |
557 WTF::OwnPtr<HeapContainsCache::Entry[]> m_entries; | 604 WTF::OwnPtr<Entry[]> m_entries; |
| 605 bool m_hasEntries; |
558 | 606 |
559 friend class ThreadState; | 607 friend class ThreadState; |
560 }; | 608 }; |
561 | 609 |
| 610 // Normally these would be typedefs instead of subclasses, but that makes them |
| 611 // very hard to forward declare. |
| 612 class HeapContainsCache : public HeapExtentCache<PositiveEntry> { |
| 613 public: |
| 614 BaseHeapPage* lookup(Address); |
| 615 void addEntry(Address, BaseHeapPage*); |
| 616 }; |
| 617 |
| 618 class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { }; |
| 619 |
562 // The CallbackStack contains all the visitor callbacks used to trace and mark | 620 // The CallbackStack contains all the visitor callbacks used to trace and mark |
563 // objects. A specific CallbackStack instance contains at most bufferSize elemen
ts. | 621 // objects. A specific CallbackStack instance contains at most bufferSize elemen
ts. |
564 // If more space is needed a new CallbackStack instance is created and chained | 622 // If more space is needed a new CallbackStack instance is created and chained |
565 // together with the former instance. I.e. a logical CallbackStack can be made o
f | 623 // together with the former instance. I.e. a logical CallbackStack can be made o
f |
566 // multiple chained CallbackStack object instances. | 624 // multiple chained CallbackStack object instances. |
567 // There are two logical callback stacks. One containing all the marking callbac
ks and | 625 // There are two logical callback stacks. One containing all the marking callbac
ks and |
568 // one containing the weak pointer callbacks. | 626 // one containing the weak pointer callbacks. |
569 class CallbackStack { | 627 class CallbackStack { |
570 public: | 628 public: |
571 CallbackStack(CallbackStack** first) | 629 CallbackStack(CallbackStack** first) |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
622 // Non-template super class used to pass a heap around to other classes. | 680 // Non-template super class used to pass a heap around to other classes. |
623 class BaseHeap { | 681 class BaseHeap { |
624 public: | 682 public: |
625 virtual ~BaseHeap() { } | 683 virtual ~BaseHeap() { } |
626 | 684 |
627 // Find the page in this thread heap containing the given | 685 // Find the page in this thread heap containing the given |
628 // address. Returns 0 if the address is not contained in any | 686 // address. Returns 0 if the address is not contained in any |
629 // page in this thread heap. | 687 // page in this thread heap. |
630 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; | 688 virtual BaseHeapPage* heapPageFromAddress(Address) = 0; |
631 | 689 |
632 // Find the large object in this thread heap containing the given | |
633 // address. Returns 0 if the address is not contained in any | |
634 // page in this thread heap. | |
635 virtual BaseHeapPage* largeHeapObjectFromAddress(Address) = 0; | |
636 | |
637 #if ENABLE(GC_TRACING) | 690 #if ENABLE(GC_TRACING) |
638 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; | 691 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0; |
639 #endif | 692 #endif |
640 | 693 |
641 // Check if the given address could point to an object in this | |
642 // heap. If so, find the start of that object and mark it using | |
643 // the given Visitor. | |
644 // | |
645 // Returns true if the object was found and marked, returns false | |
646 // otherwise. | |
647 // | |
648 // This is used during conservative stack scanning to | |
649 // conservatively mark all objects that could be referenced from | |
650 // the stack. | |
651 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address) = 0; | |
652 | |
653 // Sweep this part of the Blink heap. This finalizes dead objects | 694 // Sweep this part of the Blink heap. This finalizes dead objects |
654 // and builds freelists for all the unused memory. | 695 // and builds freelists for all the unused memory. |
655 virtual void sweep() = 0; | 696 virtual void sweep() = 0; |
656 | 697 |
657 // Forcefully finalize all objects in this part of the Blink heap | 698 // Forcefully finalize all objects in this part of the Blink heap |
658 // (potentially with the exception of one object). This is used | 699 // (potentially with the exception of one object). This is used |
659 // during thread termination to make sure that all objects for the | 700 // during thread termination to make sure that all objects for the |
660 // dying thread are finalized. | 701 // dying thread are finalized. |
661 virtual void assertEmpty() = 0; | 702 virtual void assertEmpty() = 0; |
662 | 703 |
(...skipping 22 matching lines...) Expand all Loading... |
685 // (potentially adding new pages to the heap), to find and mark | 726 // (potentially adding new pages to the heap), to find and mark |
686 // objects during conservative stack scanning and to sweep the set of | 727 // objects during conservative stack scanning and to sweep the set of |
687 // pages after a GC. | 728 // pages after a GC. |
688 template<typename Header> | 729 template<typename Header> |
689 class ThreadHeap : public BaseHeap { | 730 class ThreadHeap : public BaseHeap { |
690 public: | 731 public: |
691 ThreadHeap(ThreadState*); | 732 ThreadHeap(ThreadState*); |
692 virtual ~ThreadHeap(); | 733 virtual ~ThreadHeap(); |
693 | 734 |
694 virtual BaseHeapPage* heapPageFromAddress(Address); | 735 virtual BaseHeapPage* heapPageFromAddress(Address); |
695 virtual BaseHeapPage* largeHeapObjectFromAddress(Address); | |
696 #if ENABLE(GC_TRACING) | 736 #if ENABLE(GC_TRACING) |
697 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); | 737 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address); |
698 #endif | 738 #endif |
699 virtual bool checkAndMarkLargeHeapObject(Visitor*, Address); | |
700 virtual void sweep(); | 739 virtual void sweep(); |
701 virtual void assertEmpty(); | 740 virtual void assertEmpty(); |
702 virtual void clearFreeLists(); | 741 virtual void clearFreeLists(); |
703 virtual void clearMarks(); | 742 virtual void clearMarks(); |
704 #ifndef NDEBUG | 743 #ifndef NDEBUG |
705 virtual void getScannedStats(HeapStats&); | 744 virtual void getScannedStats(HeapStats&); |
706 #endif | 745 #endif |
707 | 746 |
708 virtual void makeConsistentForGC(); | 747 virtual void makeConsistentForGC(); |
709 virtual bool isConsistentForGC(); | 748 virtual bool isConsistentForGC(); |
710 | 749 |
711 ThreadState* threadState() { return m_threadState; } | 750 ThreadState* threadState() { return m_threadState; } |
712 HeapStats& stats() { return m_threadState->stats(); } | 751 HeapStats& stats() { return m_threadState->stats(); } |
713 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsC
ache(); } | 752 void flushHeapContainsCache() |
| 753 { |
| 754 m_threadState->heapContainsCache()->flush(); |
| 755 } |
714 | 756 |
715 inline Address allocate(size_t, const GCInfo*); | 757 inline Address allocate(size_t, const GCInfo*); |
716 void addToFreeList(Address, size_t); | 758 void addToFreeList(Address, size_t); |
717 void addPageToPool(HeapPage<Header>*); | 759 void addPageToPool(HeapPage<Header>*); |
718 inline static size_t roundedAllocationSize(size_t size) | 760 inline static size_t roundedAllocationSize(size_t size) |
719 { | 761 { |
720 return allocationSizeFromSize(size) - sizeof(Header); | 762 return allocationSizeFromSize(size) - sizeof(Header); |
721 } | 763 } |
722 | 764 |
723 private: | 765 private: |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
847 #endif | 889 #endif |
848 | 890 |
849 // Collect heap stats for all threads attached to the Blink | 891 // Collect heap stats for all threads attached to the Blink |
850 // garbage collector. Should only be called during garbage | 892 // garbage collector. Should only be called during garbage |
851 // collection where threads are known to be at safe points. | 893 // collection where threads are known to be at safe points. |
852 static void getStats(HeapStats*); | 894 static void getStats(HeapStats*); |
853 | 895 |
854 static bool isConsistentForGC(); | 896 static bool isConsistentForGC(); |
855 static void makeConsistentForGC(); | 897 static void makeConsistentForGC(); |
856 | 898 |
| 899 static void flushHeapDoesNotContainCache(); |
| 900 static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCa
che->isEmpty(); } |
| 901 |
| 902 private: |
857 static Visitor* s_markingVisitor; | 903 static Visitor* s_markingVisitor; |
858 | 904 |
859 static CallbackStack* s_markingStack; | 905 static CallbackStack* s_markingStack; |
860 static CallbackStack* s_weakCallbackStack; | 906 static CallbackStack* s_weakCallbackStack; |
| 907 static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
861 static bool s_shutdownCalled; | 908 static bool s_shutdownCalled; |
| 909 friend class ThreadState; |
862 }; | 910 }; |
863 | 911 |
864 // The NoAllocationScope class is used in debug mode to catch unwanted | 912 // The NoAllocationScope class is used in debug mode to catch unwanted |
865 // allocations. E.g. allocations during GC. | 913 // allocations. E.g. allocations during GC. |
866 template<ThreadAffinity Affinity> | 914 template<ThreadAffinity Affinity> |
867 class NoAllocationScope { | 915 class NoAllocationScope { |
868 public: | 916 public: |
869 NoAllocationScope() : m_active(true) { enter(); } | 917 NoAllocationScope() : m_active(true) { enter(); } |
870 | 918 |
871 explicit NoAllocationScope(bool active) : m_active(active) { enter(); } | 919 explicit NoAllocationScope(bool active) : m_active(active) { enter(); } |
(...skipping 1331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2203 // to export. This forces it to export all the methods from ThreadHeap. | 2251 // to export. This forces it to export all the methods from ThreadHeap. |
2204 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf
o*); | 2252 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf
o*); |
2205 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); | 2253 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); |
2206 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; | 2254 extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; |
2207 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>; | 2255 extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>; |
2208 #endif | 2256 #endif |
2209 | 2257 |
2210 } | 2258 } |
2211 | 2259 |
2212 #endif // Heap_h | 2260 #endif // Heap_h |
OLD | NEW |