OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 15 matching lines...) Expand all Loading... |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #ifndef Heap_h | 31 #ifndef Heap_h |
32 #define Heap_h | 32 #define Heap_h |
33 | 33 |
34 #include "platform/PlatformExport.h" | 34 #include "platform/PlatformExport.h" |
35 #include "platform/heap/GCInfo.h" | 35 #include "platform/heap/GCInfo.h" |
| 36 #include "platform/heap/HeapPage.h" |
36 #include "platform/heap/ThreadState.h" | 37 #include "platform/heap/ThreadState.h" |
37 #include "platform/heap/Visitor.h" | 38 #include "platform/heap/Visitor.h" |
38 #include "public/platform/WebThread.h" | |
39 #include "wtf/AddressSanitizer.h" | 39 #include "wtf/AddressSanitizer.h" |
40 #include "wtf/Assertions.h" | 40 #include "wtf/Assertions.h" |
41 #include "wtf/Atomics.h" | 41 #include "wtf/Atomics.h" |
42 #include "wtf/ContainerAnnotations.h" | |
43 #include "wtf/Forward.h" | 42 #include "wtf/Forward.h" |
44 #include "wtf/PageAllocator.h" | |
45 #include <stdint.h> | |
46 | 43 |
47 namespace blink { | 44 namespace blink { |
48 | 45 |
49 const size_t blinkPageSizeLog2 = 17; | |
50 const size_t blinkPageSize = 1 << blinkPageSizeLog2; | |
51 const size_t blinkPageOffsetMask = blinkPageSize - 1; | |
52 const size_t blinkPageBaseMask = ~blinkPageOffsetMask; | |
53 | |
54 // We allocate pages at random addresses but in groups of | |
55 // blinkPagesPerRegion at a given random address. We group pages to | |
56 // not spread out too much over the address space which would blow | |
57 // away the page tables and lead to bad performance. | |
58 const size_t blinkPagesPerRegion = 10; | |
59 | |
60 // TODO(nya): Replace this with something like #if ENABLE_NACL. | |
61 #if 0 | |
62 // NaCl's system page size is 64 KB. This causes a problem in Oilpan's heap | |
63 // layout because Oilpan allocates two guard pages for each blink page | |
64 // (whose size is 128 KB). So we don't use guard pages in NaCl. | |
65 const size_t blinkGuardPageSize = 0; | |
66 #else | |
67 const size_t blinkGuardPageSize = WTF::kSystemPageSize; | |
68 #endif | |
69 | |
70 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte | |
71 // align all allocations even on 32 bit. | |
72 const size_t allocationGranularity = 8; | |
73 const size_t allocationMask = allocationGranularity - 1; | |
74 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit
y) - 1)) / (8 * allocationGranularity); | |
75 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask)
& ~allocationMask); | |
76 const size_t maxHeapObjectSizeLog2 = 27; | |
77 const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2; | |
78 const size_t largeObjectSizeThreshold = blinkPageSize / 2; | |
79 | |
80 // A zap value used for freed memory that is allowed to be added to the free | |
81 // list in the next addToFreeList(). | |
82 const uint8_t reuseAllowedZapValue = 0x2a; | |
83 // A zap value used for freed memory that is forbidden to be added to the free | |
84 // list in the next addToFreeList(). | |
85 const uint8_t reuseForbiddenZapValue = 0x2c; | |
86 | |
87 // In non-production builds, memory is zapped when it's freed. The zapped | |
88 // memory is zeroed out when the memory is reused in Heap::allocateObject(). | |
89 // In production builds, memory is not zapped (for performance). The memory | |
90 // is just zeroed out when it is added to the free list. | |
91 #if defined(MEMORY_SANITIZER) | |
92 // TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be | |
93 // added later. | |
94 #define SET_MEMORY_INACCESSIBLE(address, size) \ | |
95 FreeList::zapFreedMemory(address, size); | |
96 #define SET_MEMORY_ACCESSIBLE(address, size) \ | |
97 memset((address), 0, (size)) | |
98 #elif ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | |
99 #define SET_MEMORY_INACCESSIBLE(address, size) \ | |
100 FreeList::zapFreedMemory(address, size); \ | |
101 ASAN_POISON_MEMORY_REGION(address, size) | |
102 #define SET_MEMORY_ACCESSIBLE(address, size) \ | |
103 ASAN_UNPOISON_MEMORY_REGION(address, size); \ | |
104 memset((address), 0, (size)) | |
105 #else | |
106 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) | |
107 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) | |
108 #endif | |
109 | |
110 class CallbackStack; | |
111 class FreePagePool; | |
112 class NormalPageHeap; | |
113 class OrphanedPagePool; | |
114 class PageMemory; | |
115 class PageMemoryRegion; | |
116 class WebProcessMemoryDump; | |
117 | |
118 #if ENABLE(GC_PROFILING) | |
119 class TracedValue; | |
120 #endif | |
121 | |
122 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | |
123 // | |
124 // | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 b
it) | freed bit (1 bit) | mark bit (1 bit) | | |
125 // | |
126 // - For non-large objects, 14 bit is enough for |size| because the blink | |
127 // page size is 2^17 byte and each object is guaranteed to be aligned with | |
128 // 2^3 byte. | |
129 // - For large objects, |size| is 0. The actual size of a large object is | |
130 // stored in LargeObjectPage::m_payloadSize. | |
131 // - 1 bit used to mark DOM trees for V8. | |
132 // - 14 bit is enough for gcInfoIndex because there are less than 2^14 types | |
133 // in Blink. | |
134 const size_t headerDOMMarkBitMask = 1u << 17; | |
135 const size_t headerGCInfoIndexShift = 18; | |
136 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 14) - 1)) << hea
derGCInfoIndexShift; | |
137 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; | |
138 const size_t headerMarkBitMask = 1; | |
139 const size_t headerFreedBitMask = 2; | |
140 // The dead bit is used for objects that have gone through a GC marking, but did | |
141 // not get swept before a new GC started. In that case we set the dead bit on | |
142 // objects that were not marked in the previous GC to ensure we are not tracing | |
143 // them via a conservatively found pointer. Tracing dead objects could lead to | |
144 // tracing of already finalized objects in another thread's heap which is a | |
145 // use-after-free situation. | |
146 const size_t headerDeadBitMask = 4; | |
147 // On free-list entries we reuse the dead bit to distinguish a normal free-list | |
148 // entry from one that has been promptly freed. | |
149 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask
; | |
150 const size_t largeObjectSizeInHeader = 0; | |
151 const size_t gcInfoIndexForFreeListHeader = 0; | |
152 const size_t nonLargeObjectPageSizeMax = 1 << 17; | |
153 | |
154 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by
HeapObjectHeader must at least be blinkPageSize"); | |
155 | |
156 class PLATFORM_EXPORT HeapObjectHeader { | |
157 public: | |
158 // If gcInfoIndex is 0, this header is interpreted as a free list header. | |
159 NO_SANITIZE_ADDRESS | |
160 HeapObjectHeader(size_t size, size_t gcInfoIndex) | |
161 { | |
162 #if ENABLE(ASSERT) | |
163 m_magic = magic; | |
164 #endif | |
165 #if ENABLE(GC_PROFILING) | |
166 m_age = 0; | |
167 #endif | |
168 // sizeof(HeapObjectHeader) must be equal to or smaller than | |
169 // allocationGranurarity, because HeapObjectHeader is used as a header | |
170 // for an freed entry. Given that the smallest entry size is | |
171 // allocationGranurarity, HeapObjectHeader must fit into the size. | |
172 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o
f HeapObjectHeader must be smaller than allocationGranularity"); | |
173 #if CPU(64BIT) | |
174 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m
ust be 8 byte aligned"); | |
175 #endif | |
176 | |
177 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | |
178 ASSERT(size < nonLargeObjectPageSizeMax); | |
179 ASSERT(!(size & allocationMask)); | |
180 m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoInde
x ? 0 : headerFreedBitMask); | |
181 } | |
182 | |
183 NO_SANITIZE_ADDRESS | |
184 bool isFree() const { return m_encoded & headerFreedBitMask; } | |
185 NO_SANITIZE_ADDRESS | |
186 bool isPromptlyFreed() const { return (m_encoded & headerPromptlyFreedBitMas
k) == headerPromptlyFreedBitMask; } | |
187 NO_SANITIZE_ADDRESS | |
188 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; } | |
189 size_t size() const; | |
190 | |
191 NO_SANITIZE_ADDRESS | |
192 size_t gcInfoIndex() const { return (m_encoded & headerGCInfoIndexMask) >> h
eaderGCInfoIndexShift; } | |
193 NO_SANITIZE_ADDRESS | |
194 void setSize(size_t size) { m_encoded = size | (m_encoded & ~headerSizeMask)
; } | |
195 bool isMarked() const; | |
196 void mark(); | |
197 void unmark(); | |
198 void markDead(); | |
199 bool isDead() const; | |
200 | |
201 Address payload(); | |
202 size_t payloadSize(); | |
203 Address payloadEnd(); | |
204 | |
205 #if ENABLE(ASSERT) | |
206 bool checkHeader() const; | |
207 // Zap magic number with a new magic number that means there was once an | |
208 // object allocated here, but it was freed because nobody marked it during | |
209 // GC. | |
210 void zapMagic(); | |
211 #endif | |
212 | |
213 void finalize(Address, size_t); | |
214 static HeapObjectHeader* fromPayload(const void*); | |
215 | |
216 static const uint16_t magic = 0xfff1; | |
217 static const uint16_t zappedMagic = 0x4321; | |
218 | |
219 #if ENABLE(GC_PROFILING) | |
220 NO_SANITIZE_ADDRESS | |
221 size_t encodedSize() const { return m_encoded; } | |
222 | |
223 NO_SANITIZE_ADDRESS | |
224 size_t age() const { return m_age; } | |
225 | |
226 NO_SANITIZE_ADDRESS | |
227 void incrementAge() | |
228 { | |
229 if (m_age < maxHeapObjectAge) | |
230 m_age++; | |
231 } | |
232 #endif | |
233 | |
234 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILING) && CPU(64BIT) | |
235 // This method is needed just to avoid compilers from removing m_padding. | |
236 uint64_t unusedMethod() const { return m_padding; } | |
237 #endif | |
238 | |
239 private: | |
240 uint32_t m_encoded; | |
241 #if ENABLE(ASSERT) | |
242 uint16_t m_magic; | |
243 #endif | |
244 #if ENABLE(GC_PROFILING) | |
245 uint8_t m_age; | |
246 #endif | |
247 | |
248 // In 64 bit architectures, we intentionally add 4 byte padding immediately | |
249 // after the HeapHeaderObject. This is because: | |
250 // | |
251 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by
te) | | |
252 // ^8 byte aligned ^8 byte aligned | |
253 // | |
254 // is better than: | |
255 // | |
256 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by
te) | | |
257 // ^4 byte aligned ^8 byte aligned ^4 byte aligned | |
258 // | |
259 // since the former layout aligns both header and payload to 8 byte. | |
260 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILING) && CPU(64BIT) | |
261 uint32_t m_padding; | |
262 #endif | |
263 }; | |
264 | |
265 class FreeListEntry final : public HeapObjectHeader { | |
266 public: | |
267 NO_SANITIZE_ADDRESS | |
268 explicit FreeListEntry(size_t size) | |
269 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) | |
270 , m_next(nullptr) | |
271 { | |
272 #if ENABLE(ASSERT) | |
273 ASSERT(size >= sizeof(HeapObjectHeader)); | |
274 zapMagic(); | |
275 #endif | |
276 } | |
277 | |
278 Address address() { return reinterpret_cast<Address>(this); } | |
279 | |
280 NO_SANITIZE_ADDRESS | |
281 void unlink(FreeListEntry** prevNext) | |
282 { | |
283 *prevNext = m_next; | |
284 m_next = nullptr; | |
285 } | |
286 | |
287 NO_SANITIZE_ADDRESS | |
288 void link(FreeListEntry** prevNext) | |
289 { | |
290 m_next = *prevNext; | |
291 *prevNext = this; | |
292 } | |
293 | |
294 NO_SANITIZE_ADDRESS | |
295 FreeListEntry* next() const { return m_next; } | |
296 | |
297 NO_SANITIZE_ADDRESS | |
298 void append(FreeListEntry* next) | |
299 { | |
300 ASSERT(!m_next); | |
301 m_next = next; | |
302 } | |
303 | |
304 private: | |
305 FreeListEntry* m_next; | |
306 }; | |
307 | |
308 // Blink heap pages are set up with a guard page before and after the payload. | |
309 inline size_t blinkPagePayloadSize() | |
310 { | |
311 return blinkPageSize - 2 * blinkGuardPageSize; | |
312 } | |
313 | |
314 // Blink heap pages are aligned to the Blink heap page size. | |
315 // Therefore, the start of a Blink page can be obtained by | |
316 // rounding down to the Blink page size. | |
317 inline Address roundToBlinkPageStart(Address address) | |
318 { | |
319 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | |
320 } | |
321 | |
322 inline Address roundToBlinkPageEnd(Address address) | |
323 { | |
324 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; | |
325 } | |
326 | |
327 // Masks an address down to the enclosing blink page base address. | |
328 inline Address blinkPageAddress(Address address) | |
329 { | |
330 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | |
331 } | |
332 | |
333 inline bool vTableInitialized(void* objectPointer) | |
334 { | |
335 return !!(*reinterpret_cast<Address*>(objectPointer)); | |
336 } | |
337 | |
338 #if ENABLE(ASSERT) | |
339 // Sanity check for a page header address: the address of the page | |
340 // header should be OS page size away from being Blink page size | |
341 // aligned. | |
342 inline bool isPageHeaderAddress(Address address) | |
343 { | |
344 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - blin
kGuardPageSize); | |
345 } | |
346 #endif | |
347 | |
348 // BasePage is a base class for NormalPage and LargeObjectPage. | |
349 // | |
350 // - NormalPage is a page whose size is |blinkPageSize|. NormalPage can contain | |
351 // multiple objects in the page. An object whose size is smaller than | |
352 // |largeObjectSizeThreshold| is stored in NormalPage. | |
353 // | |
354 // - LargeObjectPage is a page that contains only one object. The object size | |
355 // is arbitrary. An object whose size is larger than |blinkPageSize| is stored | |
356 // as a single project in LargeObjectPage. | |
357 // | |
358 // Note: An object whose size is between |largeObjectSizeThreshold| and | |
359 // |blinkPageSize| can go to either of NormalPage or LargeObjectPage. | |
360 class BasePage { | |
361 public: | |
362 BasePage(PageMemory*, BaseHeap*); | |
363 virtual ~BasePage() { } | |
364 | |
365 void link(BasePage** previousNext) | |
366 { | |
367 m_next = *previousNext; | |
368 *previousNext = this; | |
369 } | |
370 void unlink(BasePage** previousNext) | |
371 { | |
372 *previousNext = m_next; | |
373 m_next = nullptr; | |
374 } | |
375 BasePage* next() const { return m_next; } | |
376 | |
377 // virtual methods are slow. So performance-sensitive methods | |
378 // should be defined as non-virtual methods on NormalPage and LargeObjectPag
e. | |
379 // The following methods are not performance-sensitive. | |
380 virtual size_t objectPayloadSizeForTesting() = 0; | |
381 virtual bool isEmpty() = 0; | |
382 virtual void removeFromHeap() = 0; | |
383 virtual void sweep() = 0; | |
384 virtual void makeConsistentForGC() = 0; | |
385 virtual void makeConsistentForMutator() = 0; | |
386 | |
387 #if defined(ADDRESS_SANITIZER) | |
388 virtual void poisonObjects(ThreadState::ObjectsToPoison, ThreadState::Poison
ing) = 0; | |
389 #endif | |
390 // Check if the given address points to an object in this | |
391 // heap page. If so, find the start of that object and mark it | |
392 // using the given Visitor. Otherwise do nothing. The pointer must | |
393 // be within the same aligned blinkPageSize as the this-pointer. | |
394 // | |
395 // This is used during conservative stack scanning to | |
396 // conservatively mark all objects that could be referenced from | |
397 // the stack. | |
398 virtual void checkAndMarkPointer(Visitor*, Address) = 0; | |
399 virtual void markOrphaned(); | |
400 | |
401 virtual void takeSnapshot(String dumpBaseName, size_t pageIndex, ThreadState
::GCSnapshotInfo&, size_t* outFreeSize, size_t* outFreeCount) = 0; | |
402 #if ENABLE(GC_PROFILING) | |
403 virtual const GCInfo* findGCInfo(Address) = 0; | |
404 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; | |
405 virtual void incrementMarkedObjectsAge() = 0; | |
406 virtual void countMarkedObjects(ClassAgeCountsMap&) = 0; | |
407 virtual void countObjectsToSweep(ClassAgeCountsMap&) = 0; | |
408 #endif | |
409 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
410 virtual bool contains(Address) = 0; | |
411 #endif | |
412 virtual size_t size() = 0; | |
413 virtual bool isLargeObjectPage() { return false; } | |
414 | |
415 Address address() { return reinterpret_cast<Address>(this); } | |
416 PageMemory* storage() const { return m_storage; } | |
417 BaseHeap* heap() const { return m_heap; } | |
418 bool orphaned() { return !m_heap; } | |
419 bool terminating() { return m_terminating; } | |
420 void setTerminating() { m_terminating = true; } | |
421 | |
422 // Returns true if this page has been swept by the ongoing lazy sweep. | |
423 bool hasBeenSwept() const { return m_swept; } | |
424 | |
425 void markAsSwept() | |
426 { | |
427 ASSERT(!m_swept); | |
428 m_swept = true; | |
429 } | |
430 | |
431 void markAsUnswept() | |
432 { | |
433 ASSERT(m_swept); | |
434 m_swept = false; | |
435 } | |
436 | |
437 private: | |
438 PageMemory* m_storage; | |
439 BaseHeap* m_heap; | |
440 BasePage* m_next; | |
441 // Whether the page is part of a terminating thread or not. | |
442 bool m_terminating; | |
443 | |
444 // Track the sweeping state of a page. Set to true once | |
445 // the lazy sweep completes has processed it. | |
446 // | |
447 // Set to false at the start of a sweep, true upon completion | |
448 // of lazy sweeping. | |
449 bool m_swept; | |
450 friend class BaseHeap; | |
451 }; | |
452 | |
453 class NormalPage final : public BasePage { | |
454 public: | |
455 NormalPage(PageMemory*, BaseHeap*); | |
456 | |
457 Address payload() | |
458 { | |
459 return address() + pageHeaderSize(); | |
460 } | |
461 size_t payloadSize() | |
462 { | |
463 return (blinkPagePayloadSize() - pageHeaderSize()) & ~allocationMask; | |
464 } | |
465 Address payloadEnd() { return payload() + payloadSize(); } | |
466 bool containedInObjectPayload(Address address) | |
467 { | |
468 return payload() <= address && address < payloadEnd(); | |
469 } | |
470 | |
471 size_t objectPayloadSizeForTesting() override; | |
472 bool isEmpty() override; | |
473 void removeFromHeap() override; | |
474 void sweep() override; | |
475 void makeConsistentForGC() override; | |
476 void makeConsistentForMutator() override; | |
477 #if defined(ADDRESS_SANITIZER) | |
478 void poisonObjects(ThreadState::ObjectsToPoison, ThreadState::Poisoning) ove
rride; | |
479 #endif | |
480 void checkAndMarkPointer(Visitor*, Address) override; | |
481 void markOrphaned() override; | |
482 | |
483 void takeSnapshot(String dumpBaseName, size_t pageIndex, ThreadState::GCSnap
shotInfo&, size_t* outFreeSize, size_t* outFreeCount) override; | |
484 #if ENABLE(GC_PROFILING) | |
485 const GCInfo* findGCInfo(Address) override; | |
486 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | |
487 void incrementMarkedObjectsAge() override; | |
488 void countMarkedObjects(ClassAgeCountsMap&) override; | |
489 void countObjectsToSweep(ClassAgeCountsMap&) override; | |
490 #endif | |
491 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
492 // Returns true for the whole blinkPageSize page that the page is on, even | |
493 // for the header, and the unmapped guard page at the start. That ensures | |
494 // the result can be used to populate the negative page cache. | |
495 bool contains(Address) override; | |
496 #endif | |
497 size_t size() override { return blinkPageSize; } | |
498 static size_t pageHeaderSize() | |
499 { | |
500 // Compute the amount of padding we have to add to a header to make | |
501 // the size of the header plus the padding a multiple of 8 bytes. | |
502 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - (size
of(HeapObjectHeader) % allocationGranularity)) % allocationGranularity; | |
503 return sizeof(NormalPage) + paddingSize; | |
504 } | |
505 | |
506 | |
507 NormalPageHeap* heapForNormalPage(); | |
508 void clearObjectStartBitMap(); | |
509 | |
510 private: | |
511 HeapObjectHeader* findHeaderFromAddress(Address); | |
512 void populateObjectStartBitMap(); | |
513 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | |
514 | |
515 bool m_objectStartBitMapComputed; | |
516 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | |
517 }; | |
518 | |
519 // Large allocations are allocated as separate objects and linked in a list. | |
520 // | |
521 // In order to use the same memory allocation routines for everything allocated | |
522 // in the heap, large objects are considered heap pages containing only one | |
523 // object. | |
524 class LargeObjectPage final : public BasePage { | |
525 public: | |
526 LargeObjectPage(PageMemory*, BaseHeap*, size_t); | |
527 | |
528 Address payload() { return heapObjectHeader()->payload(); } | |
529 size_t payloadSize() { return m_payloadSize; } | |
530 Address payloadEnd() { return payload() + payloadSize(); } | |
531 bool containedInObjectPayload(Address address) | |
532 { | |
533 return payload() <= address && address < payloadEnd(); | |
534 } | |
535 | |
536 size_t objectPayloadSizeForTesting() override; | |
537 bool isEmpty() override; | |
538 void removeFromHeap() override; | |
539 void sweep() override; | |
540 void makeConsistentForGC() override; | |
541 void makeConsistentForMutator() override; | |
542 #if defined(ADDRESS_SANITIZER) | |
543 void poisonObjects(ThreadState::ObjectsToPoison, ThreadState::Poisoning) ove
rride; | |
544 #endif | |
545 void checkAndMarkPointer(Visitor*, Address) override; | |
546 void markOrphaned() override; | |
547 | |
548 void takeSnapshot(String dumpBaseName, size_t pageIndex, ThreadState::GCSnap
shotInfo&, size_t* outFreeSize, size_t* outFreeCount) override; | |
549 #if ENABLE(GC_PROFILING) | |
550 const GCInfo* findGCInfo(Address) override; | |
551 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | |
552 void incrementMarkedObjectsAge() override; | |
553 void countMarkedObjects(ClassAgeCountsMap&) override; | |
554 void countObjectsToSweep(ClassAgeCountsMap&) override; | |
555 #endif | |
556 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
557 // Returns true for any address that is on one of the pages that this | |
558 // large object uses. That ensures that we can use a negative result to | |
559 // populate the negative page cache. | |
560 bool contains(Address) override; | |
561 #endif | |
562 virtual size_t size() | |
563 { | |
564 return pageHeaderSize() + sizeof(HeapObjectHeader) + m_payloadSize; | |
565 } | |
566 static size_t pageHeaderSize() | |
567 { | |
568 // Compute the amount of padding we have to add to a header to make | |
569 // the size of the header plus the padding a multiple of 8 bytes. | |
570 size_t paddingSize = (sizeof(LargeObjectPage) + allocationGranularity -
(sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity; | |
571 return sizeof(LargeObjectPage) + paddingSize; | |
572 } | |
573 bool isLargeObjectPage() override { return true; } | |
574 | |
575 HeapObjectHeader* heapObjectHeader() | |
576 { | |
577 Address headerAddress = address() + pageHeaderSize(); | |
578 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | |
579 } | |
580 | |
581 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | |
582 void setIsVectorBackingPage() { m_isVectorBackingPage = true; } | |
583 bool isVectorBackingPage() const { return m_isVectorBackingPage; } | |
584 #endif | |
585 | |
586 private: | |
587 | |
588 size_t m_payloadSize; | |
589 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | |
590 bool m_isVectorBackingPage; | |
591 #endif | |
592 }; | |
593 | |
594 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | |
595 // pointer-sized word, and determining whether it cannot be interpreted as a | |
596 // pointer to an area that is managed by the garbage collected Blink heap. This | |
597 // is a cache of 'pages' that have previously been determined to be wholly | |
598 // outside of the heap. The size of these pages must be smaller than the | |
599 // allocation alignment of the heap pages. We determine off-heap-ness by | |
600 // rounding down the pointer to the nearest page and looking up the page in the | |
601 // cache. If there is a miss in the cache we can determine the status of the | |
602 // pointer precisely using the heap RegionTree. | |
603 // | |
604 // The HeapDoesNotContainCache is a negative cache, so it must be flushed when | |
605 // memory is added to the heap. | |
606 class HeapDoesNotContainCache { | |
607 public: | |
608 HeapDoesNotContainCache() | |
609 : m_entries(adoptArrayPtr(new Address[HeapDoesNotContainCache::numberOfE
ntries])) | |
610 , m_hasEntries(false) | |
611 { | |
612 // Start by flushing the cache in a non-empty state to initialize all th
e cache entries. | |
613 for (int i = 0; i < numberOfEntries; ++i) | |
614 m_entries[i] = nullptr; | |
615 } | |
616 | |
617 void flush(); | |
618 bool isEmpty() { return !m_hasEntries; } | |
619 | |
620 // Perform a lookup in the cache. | |
621 // | |
622 // If lookup returns false, the argument address was not found in | |
623 // the cache and it is unknown if the address is in the Blink | |
624 // heap. | |
625 // | |
626 // If lookup returns true, the argument address was found in the | |
627 // cache which means the address is not in the heap. | |
628 PLATFORM_EXPORT bool lookup(Address); | |
629 | |
630 // Add an entry to the cache. | |
631 PLATFORM_EXPORT void addEntry(Address); | |
632 | |
633 private: | |
634 static const int numberOfEntriesLog2 = 12; | |
635 static const int numberOfEntries = 1 << numberOfEntriesLog2; | |
636 | |
637 static size_t hash(Address); | |
638 | |
639 WTF::OwnPtr<Address[]> m_entries; | |
640 bool m_hasEntries; | |
641 }; | |
642 | |
643 class FreeList { | |
644 public: | |
645 FreeList(); | |
646 | |
647 void addToFreeList(Address, size_t); | |
648 void clear(); | |
649 | |
650 // Returns a bucket number for inserting a FreeListEntry of a given size. | |
651 // All FreeListEntries in the given bucket, n, have size >= 2^n. | |
652 static int bucketIndexForSize(size_t); | |
653 | |
654 // Returns true if the freelist snapshot is captured. | |
655 bool takeSnapshot(const String& dumpBaseName); | |
656 | |
657 #if ENABLE(GC_PROFILING) | |
658 struct PerBucketFreeListStats { | |
659 size_t entryCount; | |
660 size_t freeSize; | |
661 | |
662 PerBucketFreeListStats() : entryCount(0), freeSize(0) { } | |
663 }; | |
664 | |
665 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; | |
666 #endif | |
667 | |
668 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || d
efined(MEMORY_SANITIZER) | |
669 static void zapFreedMemory(Address, size_t); | |
670 #endif | |
671 | |
672 private: | |
673 int m_biggestFreeListIndex; | |
674 | |
675 // All FreeListEntries in the nth list have size >= 2^n. | |
676 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | |
677 | |
678 friend class NormalPageHeap; | |
679 }; | |
680 | |
681 // Each thread has a number of thread heaps (e.g., Generic heaps, | |
682 // typed heaps for Node, heaps for collection backings etc) | |
683 // and BaseHeap represents each thread heap. | |
684 // | |
685 // BaseHeap is a parent class of NormalPageHeap and LargeObjectHeap. | |
686 // NormalPageHeap represents a heap that contains NormalPages | |
687 // and LargeObjectHeap represents a heap that contains LargeObjectPages. | |
688 class PLATFORM_EXPORT BaseHeap { | |
689 public: | |
690 BaseHeap(ThreadState*, int); | |
691 virtual ~BaseHeap(); | |
692 void cleanupPages(); | |
693 | |
694 void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&); | |
695 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
696 BasePage* findPageFromAddress(Address); | |
697 #endif | |
698 virtual void takeFreelistSnapshot(const String& dumpBaseName) { } | |
699 #if ENABLE(GC_PROFILING) | |
700 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | |
701 virtual void snapshotFreeList(TracedValue&) { } | |
702 | |
703 void countMarkedObjects(ClassAgeCountsMap&) const; | |
704 void countObjectsToSweep(ClassAgeCountsMap&) const; | |
705 void incrementMarkedObjectsAge(); | |
706 #endif | |
707 | |
708 virtual void clearFreeLists() { } | |
709 void makeConsistentForGC(); | |
710 void makeConsistentForMutator(); | |
711 #if ENABLE(ASSERT) | |
712 virtual bool isConsistentForGC() = 0; | |
713 #endif | |
714 size_t objectPayloadSizeForTesting(); | |
715 void prepareHeapForTermination(); | |
716 void prepareForSweep(); | |
717 #if defined(ADDRESS_SANITIZER) | |
718 void poisonHeap(ThreadState::ObjectsToPoison, ThreadState::Poisoning); | |
719 #endif | |
720 Address lazySweep(size_t, size_t gcInfoIndex); | |
721 void sweepUnsweptPage(); | |
722 // Returns true if we have swept all pages within the deadline. | |
723 // Returns false otherwise. | |
724 bool lazySweepWithDeadline(double deadlineSeconds); | |
725 void completeSweep(); | |
726 | |
727 ThreadState* threadState() { return m_threadState; } | |
728 int heapIndex() const { return m_index; } | |
729 | |
730 protected: | |
731 BasePage* m_firstPage; | |
732 BasePage* m_firstUnsweptPage; | |
733 | |
734 private: | |
735 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; | |
736 | |
737 ThreadState* m_threadState; | |
738 | |
739 // Index into the page pools. This is used to ensure that the pages of the | |
740 // same type go into the correct page pool and thus avoid type confusion. | |
741 int m_index; | |
742 }; | |
743 | |
744 class PLATFORM_EXPORT NormalPageHeap final : public BaseHeap { | |
745 public: | |
746 NormalPageHeap(ThreadState*, int); | |
747 void addToFreeList(Address address, size_t size) | |
748 { | |
749 ASSERT(findPageFromAddress(address)); | |
750 ASSERT(findPageFromAddress(address + size - 1)); | |
751 m_freeList.addToFreeList(address, size); | |
752 } | |
753 void clearFreeLists() override; | |
754 #if ENABLE(ASSERT) | |
755 bool isConsistentForGC() override; | |
756 bool pagesToBeSweptContains(Address); | |
757 #endif | |
758 void takeFreelistSnapshot(const String& dumpBaseName) override; | |
759 #if ENABLE(GC_PROFILING) | |
760 void snapshotFreeList(TracedValue&) override; | |
761 #endif | |
762 | |
763 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | |
764 | |
765 void freePage(NormalPage*); | |
766 | |
767 bool coalesce(); | |
768 void promptlyFreeObject(HeapObjectHeader*); | |
769 bool expandObject(HeapObjectHeader*, size_t); | |
770 bool shrinkObject(HeapObjectHeader*, size_t); | |
771 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | |
772 | |
773 private: | |
774 void allocatePage(); | |
775 Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
776 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | |
777 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | |
778 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | |
779 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | |
780 void setAllocationPoint(Address, size_t); | |
781 void updateRemainingAllocationSize(); | |
782 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | |
783 | |
784 FreeList m_freeList; | |
785 Address m_currentAllocationPoint; | |
786 size_t m_remainingAllocationSize; | |
787 size_t m_lastRemainingAllocationSize; | |
788 | |
789 // The size of promptly freed objects in the heap. | |
790 size_t m_promptlyFreedSize; | |
791 | |
792 #if ENABLE(GC_PROFILING) | |
793 size_t m_cumulativeAllocationSize; | |
794 size_t m_allocationCount; | |
795 size_t m_inlineAllocationCount; | |
796 #endif | |
797 }; | |
798 | |
799 class LargeObjectHeap final : public BaseHeap { | |
800 public: | |
801 LargeObjectHeap(ThreadState*, int); | |
802 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); | |
803 void freeLargeObjectPage(LargeObjectPage*); | |
804 #if ENABLE(ASSERT) | |
805 bool isConsistentForGC() override { return true; } | |
806 #endif | |
807 private: | |
808 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex); | |
809 Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
810 }; | |
811 | |
812 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | |
813 // pages are aligned at blinkPageBase plus the size of a guard size. | |
814 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | |
815 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | |
816 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) | |
817 { | |
818 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | |
819 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + bli
nkGuardPageSize); | |
820 ASSERT(page->contains(address)); | |
821 return page; | |
822 } | |
823 | |
824 template<typename T, bool = NeedsAdjustAndMark<T>::value> class ObjectAliveTrait
; | 46 template<typename T, bool = NeedsAdjustAndMark<T>::value> class ObjectAliveTrait
; |
825 | 47 |
826 template<typename T> | 48 template<typename T> |
827 class ObjectAliveTrait<T, false> { | 49 class ObjectAliveTrait<T, false> { |
828 public: | 50 public: |
829 static bool isHeapObjectAlive(T* object) | 51 static bool isHeapObjectAlive(T* object) |
830 { | 52 { |
831 static_assert(sizeof(T), "T must be fully defined"); | 53 static_assert(sizeof(T), "T must be fully defined"); |
832 return HeapObjectHeader::fromPayload(object)->isMarked(); | 54 return HeapObjectHeader::fromPayload(object)->isMarked(); |
833 } | 55 } |
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1228 #else | 450 #else |
1229 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker | 451 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker |
1230 #endif | 452 #endif |
1231 | 453 |
1232 #if !ENABLE(OILPAN) && ENABLE(LAZY_SWEEPING) | 454 #if !ENABLE(OILPAN) && ENABLE(LAZY_SWEEPING) |
1233 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE() | 455 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE() |
1234 #else | 456 #else |
1235 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() | 457 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() |
1236 #endif | 458 #endif |
1237 | 459 |
1238 NO_SANITIZE_ADDRESS inline | |
1239 size_t HeapObjectHeader::size() const | |
1240 { | |
1241 size_t result = m_encoded & headerSizeMask; | |
1242 // Large objects should not refer to header->size(). | |
1243 // The actual size of a large object is stored in | |
1244 // LargeObjectPage::m_payloadSize. | |
1245 ASSERT(result != largeObjectSizeInHeader); | |
1246 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | |
1247 return result; | |
1248 } | |
1249 | |
1250 #if ENABLE(ASSERT) | |
1251 NO_SANITIZE_ADDRESS inline | |
1252 bool HeapObjectHeader::checkHeader() const | |
1253 { | |
1254 return !pageFromObject(this)->orphaned() && m_magic == magic; | |
1255 } | |
1256 #endif | |
1257 | |
1258 inline Address HeapObjectHeader::payload() | |
1259 { | |
1260 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | |
1261 } | |
1262 | |
1263 inline Address HeapObjectHeader::payloadEnd() | |
1264 { | |
1265 return reinterpret_cast<Address>(this) + size(); | |
1266 } | |
1267 | |
1268 NO_SANITIZE_ADDRESS inline | |
1269 size_t HeapObjectHeader::payloadSize() | |
1270 { | |
1271 size_t size = m_encoded & headerSizeMask; | |
1272 if (UNLIKELY(size == largeObjectSizeInHeader)) { | |
1273 ASSERT(pageFromObject(this)->isLargeObjectPage()); | |
1274 return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize(
); | |
1275 } | |
1276 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | |
1277 return size - sizeof(HeapObjectHeader); | |
1278 } | |
1279 | |
1280 inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) | |
1281 { | |
1282 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | |
1283 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(addr - sizeof
(HeapObjectHeader)); | |
1284 ASSERT(header->checkHeader()); | |
1285 return header; | |
1286 } | |
1287 | |
1288 NO_SANITIZE_ADDRESS inline | |
1289 bool HeapObjectHeader::isMarked() const | |
1290 { | |
1291 ASSERT(checkHeader()); | |
1292 return m_encoded & headerMarkBitMask; | |
1293 } | |
1294 | |
1295 NO_SANITIZE_ADDRESS inline | |
1296 void HeapObjectHeader::mark() | |
1297 { | |
1298 ASSERT(checkHeader()); | |
1299 ASSERT(!isMarked()); | |
1300 m_encoded = m_encoded | headerMarkBitMask; | |
1301 } | |
1302 | |
1303 NO_SANITIZE_ADDRESS inline | |
1304 void HeapObjectHeader::unmark() | |
1305 { | |
1306 ASSERT(checkHeader()); | |
1307 ASSERT(isMarked()); | |
1308 m_encoded &= ~headerMarkBitMask; | |
1309 } | |
1310 | |
1311 NO_SANITIZE_ADDRESS inline | |
1312 bool HeapObjectHeader::isDead() const | |
1313 { | |
1314 ASSERT(checkHeader()); | |
1315 return m_encoded & headerDeadBitMask; | |
1316 } | |
1317 | |
1318 NO_SANITIZE_ADDRESS inline | |
1319 void HeapObjectHeader::markDead() | |
1320 { | |
1321 ASSERT(checkHeader()); | |
1322 ASSERT(!isMarked()); | |
1323 m_encoded |= headerDeadBitMask; | |
1324 } | |
1325 | |
1326 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn
foIndex) | |
1327 { | |
1328 #if ENABLE(GC_PROFILING) | |
1329 m_cumulativeAllocationSize += allocationSize; | |
1330 ++m_allocationCount; | |
1331 #endif | |
1332 | |
1333 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | |
1334 #if ENABLE(GC_PROFILING) | |
1335 ++m_inlineAllocationCount; | |
1336 #endif | |
1337 Address headerAddress = m_currentAllocationPoint; | |
1338 m_currentAllocationPoint += allocationSize; | |
1339 m_remainingAllocationSize -= allocationSize; | |
1340 ASSERT(gcInfoIndex > 0); | |
1341 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | |
1342 Address result = headerAddress + sizeof(HeapObjectHeader); | |
1343 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
1344 | |
1345 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader))
; | |
1346 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | |
1347 return result; | |
1348 } | |
1349 return outOfLineAllocate(allocationSize, gcInfoIndex); | |
1350 } | |
1351 | |
1352 template<typename Derived> | |
1353 template<typename T> | |
1354 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) | |
1355 { | |
1356 T** cell = reinterpret_cast<T**>(object); | |
1357 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) | |
1358 *cell = nullptr; | |
1359 } | |
1360 | |
1361 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he
apIndex, size_t gcInfoIndex) | 460 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he
apIndex, size_t gcInfoIndex) |
1362 { | 461 { |
1363 ASSERT(state->isAllocationAllowed()); | 462 ASSERT(state->isAllocationAllowed()); |
1364 ASSERT(heapIndex != ThreadState::LargeObjectHeapIndex); | 463 ASSERT(heapIndex != ThreadState::LargeObjectHeapIndex); |
1365 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 464 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
1366 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 465 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
1367 } | 466 } |
1368 | 467 |
1369 template<typename T> | 468 template<typename T> |
1370 Address Heap::allocate(size_t size, bool eagerlySweep) | 469 Address Heap::allocate(size_t size, bool eagerlySweep) |
(...skipping 28 matching lines...) Expand all Loading... |
1399 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 498 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
1400 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 499 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
1401 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr
ait<T>::index()); | 500 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr
ait<T>::index()); |
1402 size_t copySize = previousHeader->payloadSize(); | 501 size_t copySize = previousHeader->payloadSize(); |
1403 if (copySize > size) | 502 if (copySize > size) |
1404 copySize = size; | 503 copySize = size; |
1405 memcpy(address, previous, copySize); | 504 memcpy(address, previous, copySize); |
1406 return address; | 505 return address; |
1407 } | 506 } |
1408 | 507 |
| 508 template<typename Derived> |
| 509 template<typename T> |
| 510 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) |
| 511 { |
| 512 T** cell = reinterpret_cast<T**>(object); |
| 513 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) |
| 514 *cell = nullptr; |
| 515 } |
| 516 |
1409 } // namespace blink | 517 } // namespace blink |
1410 | 518 |
1411 #endif // Heap_h | 519 #endif // Heap_h |
OLD | NEW |