| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
| 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
| 85 #else | 85 #else |
| 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
| 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
| 88 #endif | 88 #endif |
| 89 | 89 |
| 90 class CallbackStack; | 90 class CallbackStack; |
| 91 class PageMemory; | 91 class PageMemory; |
| 92 class NormalPageHeap; | 92 class ThreadHeapForHeapPage; |
| 93 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 93 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| 94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; | 94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; |
| 95 | 95 |
| 96 #if ENABLE(GC_PROFILING) | 96 #if ENABLE(GC_PROFILING) |
| 97 class TracedValue; | 97 class TracedValue; |
| 98 #endif | 98 #endif |
| 99 | 99 |
| 100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
| 101 // | 101 // |
| 102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | | 102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | |
| 103 // | 103 // |
| 104 // - For non-large objects, 14 bit is enough for |size| because the blink | 104 // - For non-large objects, 14 bit is enough for |size| because the blink |
| 105 // page size is 2^17 byte and each object is guaranteed to be aligned with | 105 // page size is 2^17 byte and each object is guaranteed to be aligned with |
| 106 // 2^3 byte. | 106 // 2^3 byte. |
| 107 // - For large objects, |size| is 0. The actual size of a large object is | 107 // - For large objects, |size| is 0. The actual size of a large object is |
| 108 // stored in LargeObjectPage::m_payloadSize. | 108 // stored in LargeObject::m_payloadSize. |
| 109 // - 15 bit is enough for gcInfoIndex because there are less than 2^15 types | 109 // - 15 bit is enough for gcInfoIndex because there are less than 2^15 types |
| 110 // in Blink. | 110 // in Blink. |
| 111 const size_t headerGCInfoIndexShift = 17; | 111 const size_t headerGCInfoIndexShift = 17; |
| 112 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 15) - 1)) << hea
derGCInfoIndexShift; | 112 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 15) - 1)) << hea
derGCInfoIndexShift; |
| 113 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; | 113 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; |
| 114 const size_t headerMarkBitMask = 1; | 114 const size_t headerMarkBitMask = 1; |
| 115 const size_t headerFreedBitMask = 2; | 115 const size_t headerFreedBitMask = 2; |
| 116 // The dead bit is used for objects that have gone through a GC marking, but did | 116 // The dead bit is used for objects that have gone through a GC marking, but did |
| 117 // not get swept before a new GC started. In that case we set the dead bit on | 117 // not get swept before a new GC started. In that case we set the dead bit on |
| 118 // objects that were not marked in the previous GC to ensure we are not tracing | 118 // objects that were not marked in the previous GC to ensure we are not tracing |
| 119 // them via a conservatively found pointer. Tracing dead objects could lead to | 119 // them via a conservatively found pointer. Tracing dead objects could lead to |
| 120 // tracing of already finalized objects in another thread's heap which is a | 120 // tracing of already finalized objects in another thread's heap which is a |
| 121 // use-after-free situation. | 121 // use-after-free situation. |
| 122 const size_t headerDeadBitMask = 4; | 122 const size_t headerDeadBitMask = 4; |
| 123 // On free-list entries we reuse the dead bit to distinguish a normal free-list | 123 // On free-list entries we reuse the dead bit to distinguish a normal free-list |
| 124 // entry from one that has been promptly freed. | 124 // entry from one that has been promptly freed. |
| 125 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask
; | 125 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask
; |
| 126 const size_t largeObjectSizeInHeader = 0; | 126 const size_t largeObjectSizeInHeader = 0; |
| 127 const size_t gcInfoIndexForFreeListHeader = 0; | 127 const size_t gcInfoIndexForFreeListHeader = 0; |
| 128 const size_t nonLargeObjectPageSizeMax = 1 << 17; | 128 const size_t nonLargeObjectSizeMax = 1 << 17; |
| 129 | 129 |
| 130 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by
HeapObjectHeader must at least be blinkPageSize"); | 130 static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by Hea
pObjectHeader must at least be blinkPageSize"); |
| 131 | 131 |
| 132 class PLATFORM_EXPORT HeapObjectHeader { | 132 class PLATFORM_EXPORT HeapObjectHeader { |
| 133 public: | 133 public: |
| 134 // If gcInfoIndex is 0, this header is interpreted as a free list header. | 134 // If gcInfoIndex is 0, this header is interpreted as a free list header. |
| 135 NO_SANITIZE_ADDRESS | 135 NO_SANITIZE_ADDRESS |
| 136 HeapObjectHeader(size_t size, size_t gcInfoIndex) | 136 HeapObjectHeader(size_t size, size_t gcInfoIndex) |
| 137 { | 137 { |
| 138 #if ENABLE(ASSERT) | 138 #if ENABLE(ASSERT) |
| 139 m_magic = magic; | 139 m_magic = magic; |
| 140 #endif | 140 #endif |
| 141 #if ENABLE(GC_PROFILING) | 141 #if ENABLE(GC_PROFILING) |
| 142 m_age = 0; | 142 m_age = 0; |
| 143 #endif | 143 #endif |
| 144 // sizeof(HeapObjectHeader) must be equal to or smaller than | 144 // sizeof(HeapObjectHeader) must be equal to or smaller than |
| 145 // allocationGranurarity, because HeapObjectHeader is used as a header | 145 // allocationGranurarity, because HeapObjectHeader is used as a header |
| 146 // for an freed entry. Given that the smallest entry size is | 146 // for an freed entry. Given that the smallest entry size is |
| 147 // allocationGranurarity, HeapObjectHeader must fit into the size. | 147 // allocationGranurarity, HeapObjectHeader must fit into the size. |
| 148 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o
f HeapObjectHeader must be smaller than allocationGranularity"); | 148 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o
f HeapObjectHeader must be smaller than allocationGranularity"); |
| 149 #if CPU(64BIT) | 149 #if CPU(64BIT) |
| 150 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m
ust be 8 byte aligned"); | 150 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m
ust be 8 byte aligned"); |
| 151 #endif | 151 #endif |
| 152 | 152 |
| 153 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 153 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
| 154 ASSERT(size < nonLargeObjectPageSizeMax); | 154 ASSERT(size < nonLargeObjectSizeMax); |
| 155 ASSERT(!(size & allocationMask)); | 155 ASSERT(!(size & allocationMask)); |
| 156 m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoInde
x ? 0 : headerFreedBitMask); | 156 m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoInde
x ? 0 : headerFreedBitMask); |
| 157 } | 157 } |
| 158 | 158 |
| 159 NO_SANITIZE_ADDRESS | 159 NO_SANITIZE_ADDRESS |
| 160 bool isFree() const { return m_encoded & headerFreedBitMask; } | 160 bool isFree() const { return m_encoded & headerFreedBitMask; } |
| 161 NO_SANITIZE_ADDRESS | 161 NO_SANITIZE_ADDRESS |
| 162 bool isPromptlyFreed() const { return (m_encoded & headerPromptlyFreedBitMas
k) == headerPromptlyFreedBitMask; } | 162 bool isPromptlyFreed() const { return (m_encoded & headerPromptlyFreedBitMas
k) == headerPromptlyFreedBitMask; } |
| 163 NO_SANITIZE_ADDRESS | 163 NO_SANITIZE_ADDRESS |
| 164 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; } | 164 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; } |
| (...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 // header should be OS page size away from being Blink page size | 350 // header should be OS page size away from being Blink page size |
| 351 // aligned. | 351 // aligned. |
| 352 inline bool isPageHeaderAddress(Address address) | 352 inline bool isPageHeaderAddress(Address address) |
| 353 { | 353 { |
| 354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); | 354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); |
| 355 } | 355 } |
| 356 #endif | 356 #endif |
| 357 | 357 |
| 358 // FIXME: Add a good comment about the heap layout once heap relayout work | 358 // FIXME: Add a good comment about the heap layout once heap relayout work |
| 359 // is done. | 359 // is done. |
| 360 class BasePage { | 360 class BaseHeapPage { |
| 361 public: | 361 public: |
| 362 BasePage(PageMemory*, BaseHeap*); | 362 BaseHeapPage(PageMemory*, ThreadHeap*); |
| 363 virtual ~BasePage() { } | 363 virtual ~BaseHeapPage() { } |
| 364 | 364 |
| 365 void link(BasePage** previousNext) | 365 void link(BaseHeapPage** previousNext) |
| 366 { | 366 { |
| 367 m_next = *previousNext; | 367 m_next = *previousNext; |
| 368 *previousNext = this; | 368 *previousNext = this; |
| 369 } | 369 } |
| 370 void unlink(BasePage** previousNext) | 370 void unlink(BaseHeapPage** previousNext) |
| 371 { | 371 { |
| 372 *previousNext = m_next; | 372 *previousNext = m_next; |
| 373 m_next = nullptr; | 373 m_next = nullptr; |
| 374 } | 374 } |
| 375 BasePage* next() const { return m_next; } | 375 BaseHeapPage* next() const { return m_next; } |
| 376 | 376 |
| 377 // virtual methods are slow. So performance-sensitive methods | 377 // virtual methods are slow. So performance-sensitive methods |
| 378 // should be defined as non-virtual methods on NormalPage and LargeObjectPag
e. | 378 // should be defined as non-virtual methods on HeapPage and LargeObject. |
| 379 // The following methods are not performance-sensitive. | 379 // The following methods are not performance-sensitive. |
| 380 virtual size_t objectPayloadSizeForTesting() = 0; | 380 virtual size_t objectPayloadSizeForTesting() = 0; |
| 381 virtual bool isEmpty() = 0; | 381 virtual bool isEmpty() = 0; |
| 382 virtual void removeFromHeap() = 0; | 382 virtual void removeFromHeap() = 0; |
| 383 virtual void sweep() = 0; | 383 virtual void sweep() = 0; |
| 384 virtual void markUnmarkedObjectsDead() = 0; | 384 virtual void markUnmarkedObjectsDead() = 0; |
| 385 // Check if the given address points to an object in this | 385 // Check if the given address points to an object in this |
| 386 // heap page. If so, find the start of that object and mark it | 386 // heap page. If so, find the start of that object and mark it |
| 387 // using the given Visitor. Otherwise do nothing. The pointer must | 387 // using the given Visitor. Otherwise do nothing. The pointer must |
| 388 // be within the same aligned blinkPageSize as the this-pointer. | 388 // be within the same aligned blinkPageSize as the this-pointer. |
| 389 // | 389 // |
| 390 // This is used during conservative stack scanning to | 390 // This is used during conservative stack scanning to |
| 391 // conservatively mark all objects that could be referenced from | 391 // conservatively mark all objects that could be referenced from |
| 392 // the stack. | 392 // the stack. |
| 393 virtual void checkAndMarkPointer(Visitor*, Address) = 0; | 393 virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
| 394 virtual void markOrphaned(); | 394 virtual void markOrphaned(); |
| 395 #if ENABLE(GC_PROFILING) | 395 #if ENABLE(GC_PROFILING) |
| 396 virtual const GCInfo* findGCInfo(Address) = 0; | 396 virtual const GCInfo* findGCInfo(Address) = 0; |
| 397 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; | 397 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; |
| 398 #endif | 398 #endif |
| 399 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 399 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 400 virtual bool contains(Address) = 0; | 400 virtual bool contains(Address) = 0; |
| 401 #endif | 401 #endif |
| 402 virtual size_t size() = 0; | 402 virtual size_t size() = 0; |
| 403 virtual bool isLargeObjectPage() { return false; } | 403 virtual bool isLargeObject() { return false; } |
| 404 | 404 |
| 405 Address address() { return reinterpret_cast<Address>(this); } | 405 Address address() { return reinterpret_cast<Address>(this); } |
| 406 PageMemory* storage() const { return m_storage; } | 406 PageMemory* storage() const { return m_storage; } |
| 407 BaseHeap* heap() const { return m_heap; } | 407 ThreadHeap* heap() const { return m_heap; } |
| 408 bool orphaned() { return !m_heap; } | 408 bool orphaned() { return !m_heap; } |
| 409 bool terminating() { return m_terminating; } | 409 bool terminating() { return m_terminating; } |
| 410 void setTerminating() { m_terminating = true; } | 410 void setTerminating() { m_terminating = true; } |
| 411 | 411 |
| 412 // Returns true if this page has been swept by the ongoing lazy sweep. | 412 // Returns true if this page has been swept by the ongoing lazy sweep. |
| 413 bool hasBeenSwept() const { return m_swept; } | 413 bool hasBeenSwept() const { return m_swept; } |
| 414 | 414 |
| 415 void markAsSwept() | 415 void markAsSwept() |
| 416 { | 416 { |
| 417 ASSERT(!m_swept); | 417 ASSERT(!m_swept); |
| 418 m_swept = true; | 418 m_swept = true; |
| 419 } | 419 } |
| 420 | 420 |
| 421 void markAsUnswept() | 421 void markAsUnswept() |
| 422 { | 422 { |
| 423 ASSERT(m_swept); | 423 ASSERT(m_swept); |
| 424 m_swept = false; | 424 m_swept = false; |
| 425 } | 425 } |
| 426 | 426 |
| 427 private: | 427 private: |
| 428 PageMemory* m_storage; | 428 PageMemory* m_storage; |
| 429 BaseHeap* m_heap; | 429 ThreadHeap* m_heap; |
| 430 BasePage* m_next; | 430 BaseHeapPage* m_next; |
| 431 // Whether the page is part of a terminating thread or not. | 431 // Whether the page is part of a terminating thread or not. |
| 432 bool m_terminating; | 432 bool m_terminating; |
| 433 | 433 |
| 434 // Track the sweeping state of a page. Set to true once | 434 // Track the sweeping state of a page. Set to true once |
| 435 // the lazy sweep completes has processed it. | 435 // the lazy sweep completes has processed it. |
| 436 // | 436 // |
| 437 // Set to false at the start of a sweep, true upon completion | 437 // Set to false at the start of a sweep, true upon completion |
| 438 // of lazy sweeping. | 438 // of lazy sweeping. |
| 439 bool m_swept; | 439 bool m_swept; |
| 440 friend class BaseHeap; | 440 friend class ThreadHeap; |
| 441 }; | 441 }; |
| 442 | 442 |
| 443 class NormalPage final : public BasePage { | 443 class HeapPage final : public BaseHeapPage { |
| 444 public: | 444 public: |
| 445 NormalPage(PageMemory*, BaseHeap*); | 445 HeapPage(PageMemory*, ThreadHeap*); |
| 446 | 446 |
| 447 Address payload() | 447 Address payload() |
| 448 { | 448 { |
| 449 return address() + sizeof(NormalPage) + headerPadding(); | 449 return address() + sizeof(HeapPage) + headerPadding(); |
| 450 } | 450 } |
| 451 size_t payloadSize() | 451 size_t payloadSize() |
| 452 { | 452 { |
| 453 return (blinkPagePayloadSize() - sizeof(NormalPage) - headerPadding()) &
~allocationMask; | 453 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; |
| 454 } | 454 } |
| 455 Address payloadEnd() { return payload() + payloadSize(); } | 455 Address payloadEnd() { return payload() + payloadSize(); } |
| 456 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 456 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 457 | 457 |
| 458 virtual size_t objectPayloadSizeForTesting() override; | 458 virtual size_t objectPayloadSizeForTesting() override; |
| 459 virtual bool isEmpty() override; | 459 virtual bool isEmpty() override; |
| 460 virtual void removeFromHeap() override; | 460 virtual void removeFromHeap() override; |
| 461 virtual void sweep() override; | 461 virtual void sweep() override; |
| 462 virtual void markUnmarkedObjectsDead() override; | 462 virtual void markUnmarkedObjectsDead() override; |
| 463 virtual void checkAndMarkPointer(Visitor*, Address) override; | 463 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 464 virtual void markOrphaned() override | 464 virtual void markOrphaned() override |
| 465 { | 465 { |
| 466 // Zap the payload with a recognizable value to detect any incorrect | 466 // Zap the payload with a recognizable value to detect any incorrect |
| 467 // cross thread pointer usage. | 467 // cross thread pointer usage. |
| 468 #if defined(ADDRESS_SANITIZER) | 468 #if defined(ADDRESS_SANITIZER) |
| 469 // This needs to zap poisoned memory as well. | 469 // This needs to zap poisoned memory as well. |
| 470 // Force unpoison memory before memset. | 470 // Force unpoison memory before memset. |
| 471 ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize()); | 471 ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize()); |
| 472 #endif | 472 #endif |
| 473 memset(payload(), orphanedZapValue, payloadSize()); | 473 memset(payload(), orphanedZapValue, payloadSize()); |
| 474 BasePage::markOrphaned(); | 474 BaseHeapPage::markOrphaned(); |
| 475 } | 475 } |
| 476 #if ENABLE(GC_PROFILING) | 476 #if ENABLE(GC_PROFILING) |
| 477 const GCInfo* findGCInfo(Address) override; | 477 const GCInfo* findGCInfo(Address) override; |
| 478 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 478 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 479 void incrementMarkedObjectsAge(); | 479 void incrementMarkedObjectsAge(); |
| 480 void countMarkedObjects(ClassAgeCountsMap&); | 480 void countMarkedObjects(ClassAgeCountsMap&); |
| 481 void countObjectsToSweep(ClassAgeCountsMap&); | 481 void countObjectsToSweep(ClassAgeCountsMap&); |
| 482 #endif | 482 #endif |
| 483 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 483 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 484 // Returns true for the whole blinkPageSize page that the page is on, even | 484 // Returns true for the whole blinkPageSize page that the page is on, even |
| 485 // for the header, and the unmapped guard page at the start. That ensures | 485 // for the header, and the unmapped guard page at the start. That ensures |
| 486 // the result can be used to populate the negative page cache. | 486 // the result can be used to populate the negative page cache. |
| 487 virtual bool contains(Address addr) override | 487 virtual bool contains(Address addr) override |
| 488 { | 488 { |
| 489 Address blinkPageStart = roundToBlinkPageStart(address()); | 489 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 490 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 490 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
| 491 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 491 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 492 } | 492 } |
| 493 #endif | 493 #endif |
| 494 virtual size_t size() override { return blinkPageSize; } | 494 virtual size_t size() override { return blinkPageSize; } |
| 495 | 495 |
| 496 NormalPageHeap* heapForNormalPage(); | 496 ThreadHeapForHeapPage* heapForHeapPage(); |
| 497 void clearObjectStartBitMap(); | 497 void clearObjectStartBitMap(); |
| 498 | 498 |
| 499 #if defined(ADDRESS_SANITIZER) | 499 #if defined(ADDRESS_SANITIZER) |
| 500 void poisonUnmarkedObjects(); | 500 void poisonUnmarkedObjects(); |
| 501 #endif | 501 #endif |
| 502 | 502 |
| 503 // This method is needed just to avoid compilers from removing m_padding. | 503 // This method is needed just to avoid compilers from removing m_padding. |
| 504 uint64_t unusedMethod() const { return m_padding; } | 504 uint64_t unusedMethod() const { return m_padding; } |
| 505 | 505 |
| 506 private: | 506 private: |
| 507 HeapObjectHeader* findHeaderFromAddress(Address); | 507 HeapObjectHeader* findHeaderFromAddress(Address); |
| 508 void populateObjectStartBitMap(); | 508 void populateObjectStartBitMap(); |
| 509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 510 | 510 |
| 511 bool m_objectStartBitMapComputed; | 511 bool m_objectStartBitMapComputed; |
| 512 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 512 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 513 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 513 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 514 }; | 514 }; |
| 515 | 515 |
| 516 // Large allocations are allocated as separate objects and linked in a list. | 516 // Large allocations are allocated as separate objects and linked in a list. |
| 517 // | 517 // |
| 518 // In order to use the same memory allocation routines for everything allocated | 518 // In order to use the same memory allocation routines for everything allocated |
| 519 // in the heap, large objects are considered heap pages containing only one | 519 // in the heap, large objects are considered heap pages containing only one |
| 520 // object. | 520 // object. |
| 521 class LargeObjectPage final : public BasePage { | 521 class LargeObject final : public BaseHeapPage { |
| 522 public: | 522 public: |
| 523 LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t payloadSize) | 523 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) |
| 524 : BasePage(storage, heap) | 524 : BaseHeapPage(storage, heap) |
| 525 , m_payloadSize(payloadSize) | 525 , m_payloadSize(payloadSize) |
| 526 { | 526 { |
| 527 } | 527 } |
| 528 | 528 |
| 529 Address payload() { return heapObjectHeader()->payload(); } | 529 Address payload() { return heapObjectHeader()->payload(); } |
| 530 size_t payloadSize() { return m_payloadSize; } | 530 size_t payloadSize() { return m_payloadSize; } |
| 531 Address payloadEnd() { return payload() + payloadSize(); } | 531 Address payloadEnd() { return payload() + payloadSize(); } |
| 532 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 532 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 533 | 533 |
| 534 virtual size_t objectPayloadSizeForTesting() override; | 534 virtual size_t objectPayloadSizeForTesting() override; |
| 535 virtual bool isEmpty() override; | 535 virtual bool isEmpty() override; |
| 536 virtual void removeFromHeap() override; | 536 virtual void removeFromHeap() override; |
| 537 virtual void sweep() override; | 537 virtual void sweep() override; |
| 538 virtual void markUnmarkedObjectsDead() override; | 538 virtual void markUnmarkedObjectsDead() override; |
| 539 virtual void checkAndMarkPointer(Visitor*, Address) override; | 539 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 540 virtual void markOrphaned() override | 540 virtual void markOrphaned() override |
| 541 { | 541 { |
| 542 // Zap the payload with a recognizable value to detect any incorrect | 542 // Zap the payload with a recognizable value to detect any incorrect |
| 543 // cross thread pointer usage. | 543 // cross thread pointer usage. |
| 544 memset(payload(), orphanedZapValue, payloadSize()); | 544 memset(payload(), orphanedZapValue, payloadSize()); |
| 545 BasePage::markOrphaned(); | 545 BaseHeapPage::markOrphaned(); |
| 546 } | 546 } |
| 547 | 547 |
| 548 #if ENABLE(GC_PROFILING) | 548 #if ENABLE(GC_PROFILING) |
| 549 virtual const GCInfo* findGCInfo(Address) override; | 549 virtual const GCInfo* findGCInfo(Address) override; |
| 550 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | 550 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
| 551 void incrementMarkedObjectsAge(); | 551 void incrementMarkedObjectsAge(); |
| 552 void countMarkedObjects(ClassAgeCountsMap&); | 552 void countMarkedObjects(ClassAgeCountsMap&); |
| 553 void countObjectsToSweep(ClassAgeCountsMap&); | 553 void countObjectsToSweep(ClassAgeCountsMap&); |
| 554 #endif | 554 #endif |
| 555 | 555 |
| 556 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 556 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 557 // Returns true for any address that is on one of the pages that this | 557 // Returns true for any address that is on one of the pages that this |
| 558 // large object uses. That ensures that we can use a negative result to | 558 // large object uses. That ensures that we can use a negative result to |
| 559 // populate the negative page cache. | 559 // populate the negative page cache. |
| 560 virtual bool contains(Address object) override | 560 virtual bool contains(Address object) override |
| 561 { | 561 { |
| 562 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); | 562 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
| 563 } | 563 } |
| 564 #endif | 564 #endif |
| 565 virtual size_t size() | 565 virtual size_t size() |
| 566 { | 566 { |
| 567 return sizeof(LargeObjectPage) + headerPadding() + sizeof(HeapObjectHea
der) + m_payloadSize; | 567 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; |
| 568 } | 568 } |
| 569 virtual bool isLargeObjectPage() override { return true; } | 569 virtual bool isLargeObject() override { return true; } |
| 570 | 570 |
| 571 HeapObjectHeader* heapObjectHeader() | 571 HeapObjectHeader* heapObjectHeader() |
| 572 { | 572 { |
| 573 Address headerAddress = address() + sizeof(LargeObjectPage) + headerPadd
ing(); | 573 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); |
| 574 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | 574 return reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 575 } | 575 } |
| 576 | 576 |
| 577 // This method is needed just to avoid compilers from removing m_padding. | 577 // This method is needed just to avoid compilers from removing m_padding. |
| 578 uint64_t unusedMethod() const { return m_padding; } | 578 uint64_t unusedMethod() const { return m_padding; } |
| 579 | 579 |
| 580 private: | 580 private: |
| 581 size_t m_payloadSize; | 581 size_t m_payloadSize; |
| 582 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 582 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 583 }; | 583 }; |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 661 class FreePagePool : public PagePool<PageMemory> { | 661 class FreePagePool : public PagePool<PageMemory> { |
| 662 public: | 662 public: |
| 663 ~FreePagePool(); | 663 ~FreePagePool(); |
| 664 void addFreePage(int, PageMemory*); | 664 void addFreePage(int, PageMemory*); |
| 665 PageMemory* takeFreePage(int); | 665 PageMemory* takeFreePage(int); |
| 666 | 666 |
| 667 private: | 667 private: |
| 668 Mutex m_mutex[NumberOfHeaps]; | 668 Mutex m_mutex[NumberOfHeaps]; |
| 669 }; | 669 }; |
| 670 | 670 |
| 671 class OrphanedPagePool : public PagePool<BasePage> { | 671 class OrphanedPagePool : public PagePool<BaseHeapPage> { |
| 672 public: | 672 public: |
| 673 ~OrphanedPagePool(); | 673 ~OrphanedPagePool(); |
| 674 void addOrphanedPage(int, BasePage*); | 674 void addOrphanedPage(int, BaseHeapPage*); |
| 675 void decommitOrphanedPages(); | 675 void decommitOrphanedPages(); |
| 676 #if ENABLE(ASSERT) | 676 #if ENABLE(ASSERT) |
| 677 bool contains(void*); | 677 bool contains(void*); |
| 678 #endif | 678 #endif |
| 679 private: | 679 private: |
| 680 void clearMemory(PageMemory*); | 680 void clearMemory(PageMemory*); |
| 681 }; | 681 }; |
| 682 | 682 |
| 683 class FreeList { | 683 class FreeList { |
| 684 public: | 684 public: |
| (...skipping 16 matching lines...) Expand all Loading... |
| 701 | 701 |
| 702 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; | 702 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; |
| 703 #endif | 703 #endif |
| 704 | 704 |
| 705 private: | 705 private: |
| 706 int m_biggestFreeListIndex; | 706 int m_biggestFreeListIndex; |
| 707 | 707 |
| 708 // All FreeListEntries in the nth list have size >= 2^n. | 708 // All FreeListEntries in the nth list have size >= 2^n. |
| 709 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 709 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 710 | 710 |
| 711 friend class NormalPageHeap; | 711 friend class ThreadHeapForHeapPage; |
| 712 }; | 712 }; |
| 713 | 713 |
| 714 // Thread heaps represent a part of the per-thread Blink heap. | 714 // Thread heaps represent a part of the per-thread Blink heap. |
| 715 // | 715 // |
| 716 // Each Blink thread has a number of thread heaps: one general heap | 716 // Each Blink thread has a number of thread heaps: one general heap |
| 717 // that contains any type of object and a number of heaps specialized | 717 // that contains any type of object and a number of heaps specialized |
| 718 // for specific object types (such as Node). | 718 // for specific object types (such as Node). |
| 719 // | 719 // |
| 720 // Each thread heap contains the functionality to allocate new objects | 720 // Each thread heap contains the functionality to allocate new objects |
| 721 // (potentially adding new pages to the heap), to find and mark | 721 // (potentially adding new pages to the heap), to find and mark |
| 722 // objects during conservative stack scanning and to sweep the set of | 722 // objects during conservative stack scanning and to sweep the set of |
| 723 // pages after a GC. | 723 // pages after a GC. |
| 724 class PLATFORM_EXPORT BaseHeap { | 724 class PLATFORM_EXPORT ThreadHeap { |
| 725 public: | 725 public: |
| 726 BaseHeap(ThreadState*, int); | 726 ThreadHeap(ThreadState*, int); |
| 727 virtual ~BaseHeap(); | 727 virtual ~ThreadHeap(); |
| 728 void cleanupPages(); | 728 void cleanupPages(); |
| 729 | 729 |
| 730 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 730 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 731 BasePage* findPageFromAddress(Address); | 731 BaseHeapPage* findPageFromAddress(Address); |
| 732 #endif | 732 #endif |
| 733 #if ENABLE(GC_PROFILING) | 733 #if ENABLE(GC_PROFILING) |
| 734 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 734 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 735 void incrementMarkedObjectsAge(); | 735 void incrementMarkedObjectsAge(); |
| 736 #endif | 736 #endif |
| 737 | 737 |
| 738 virtual void clearFreeLists() { } | 738 virtual void clearFreeLists() { } |
| 739 void makeConsistentForSweeping(); | 739 void makeConsistentForSweeping(); |
| 740 #if ENABLE(ASSERT) | 740 #if ENABLE(ASSERT) |
| 741 virtual bool isConsistentForSweeping() = 0; | 741 virtual bool isConsistentForSweeping() = 0; |
| 742 #endif | 742 #endif |
| 743 size_t objectPayloadSizeForTesting(); | 743 size_t objectPayloadSizeForTesting(); |
| 744 void prepareHeapForTermination(); | 744 void prepareHeapForTermination(); |
| 745 void prepareForSweep(); | 745 void prepareForSweep(); |
| 746 Address lazySweep(size_t, size_t gcInfoIndex); | 746 Address lazySweep(size_t, size_t gcInfoIndex); |
| 747 void completeSweep(); | 747 void completeSweep(); |
| 748 | 748 |
| 749 ThreadState* threadState() { return m_threadState; } | 749 ThreadState* threadState() { return m_threadState; } |
| 750 int heapIndex() const { return m_index; } | 750 int heapIndex() const { return m_index; } |
| 751 inline static size_t allocationSizeFromSize(size_t); | 751 inline static size_t allocationSizeFromSize(size_t); |
| 752 inline static size_t roundedAllocationSize(size_t size) | 752 inline static size_t roundedAllocationSize(size_t size) |
| 753 { | 753 { |
| 754 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | 754 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 755 } | 755 } |
| 756 | 756 |
| 757 protected: | 757 protected: |
| 758 BasePage* m_firstPage; | 758 BaseHeapPage* m_firstPage; |
| 759 BasePage* m_firstUnsweptPage; | 759 BaseHeapPage* m_firstUnsweptPage; |
| 760 | 760 |
| 761 private: | 761 private: |
| 762 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; | 762 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; |
| 763 | 763 |
| 764 ThreadState* m_threadState; | 764 ThreadState* m_threadState; |
| 765 | 765 |
| 766 // Index into the page pools. This is used to ensure that the pages of the | 766 // Index into the page pools. This is used to ensure that the pages of the |
| 767 // same type go into the correct page pool and thus avoid type confusion. | 767 // same type go into the correct page pool and thus avoid type confusion. |
| 768 int m_index; | 768 int m_index; |
| 769 }; | 769 }; |
| 770 | 770 |
| 771 class PLATFORM_EXPORT NormalPageHeap final : public BaseHeap { | 771 class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap { |
| 772 public: | 772 public: |
| 773 NormalPageHeap(ThreadState*, int); | 773 ThreadHeapForHeapPage(ThreadState*, int); |
| 774 void addToFreeList(Address address, size_t size) | 774 void addToFreeList(Address address, size_t size) |
| 775 { | 775 { |
| 776 ASSERT(findPageFromAddress(address)); | 776 ASSERT(findPageFromAddress(address)); |
| 777 ASSERT(findPageFromAddress(address + size - 1)); | 777 ASSERT(findPageFromAddress(address + size - 1)); |
| 778 m_freeList.addToFreeList(address, size); | 778 m_freeList.addToFreeList(address, size); |
| 779 } | 779 } |
| 780 virtual void clearFreeLists() override; | 780 virtual void clearFreeLists() override; |
| 781 #if ENABLE(ASSERT) | 781 #if ENABLE(ASSERT) |
| 782 virtual bool isConsistentForSweeping() override; | 782 virtual bool isConsistentForSweeping() override; |
| 783 bool pagesToBeSweptContains(Address); | 783 bool pagesToBeSweptContains(Address); |
| 784 #endif | 784 #endif |
| 785 | 785 |
| 786 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); | 786 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); |
| 787 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 787 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 788 | 788 |
| 789 void freePage(NormalPage*); | 789 void freePage(HeapPage*); |
| 790 | 790 |
| 791 bool coalesce(); | 791 bool coalesce(); |
| 792 void promptlyFreeObject(HeapObjectHeader*); | 792 void promptlyFreeObject(HeapObjectHeader*); |
| 793 bool expandObject(HeapObjectHeader*, size_t); | 793 bool expandObject(HeapObjectHeader*, size_t); |
| 794 void shrinkObject(HeapObjectHeader*, size_t); | 794 void shrinkObject(HeapObjectHeader*, size_t); |
| 795 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 795 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
| 796 | 796 |
| 797 #if ENABLE(GC_PROFILING) | 797 #if ENABLE(GC_PROFILING) |
| 798 void snapshotFreeList(TracedValue&); | 798 void snapshotFreeList(TracedValue&); |
| 799 | 799 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 820 // The size of promptly freed objects in the heap. | 820 // The size of promptly freed objects in the heap. |
| 821 size_t m_promptlyFreedSize; | 821 size_t m_promptlyFreedSize; |
| 822 | 822 |
| 823 #if ENABLE(GC_PROFILING) | 823 #if ENABLE(GC_PROFILING) |
| 824 size_t m_cumulativeAllocationSize; | 824 size_t m_cumulativeAllocationSize; |
| 825 size_t m_allocationCount; | 825 size_t m_allocationCount; |
| 826 size_t m_inlineAllocationCount; | 826 size_t m_inlineAllocationCount; |
| 827 #endif | 827 #endif |
| 828 }; | 828 }; |
| 829 | 829 |
| 830 class LargeObjectHeap final : public BaseHeap { | 830 class ThreadHeapForLargeObject final : public ThreadHeap { |
| 831 public: | 831 public: |
| 832 LargeObjectHeap(ThreadState*, int); | 832 ThreadHeapForLargeObject(ThreadState*, int); |
| 833 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); | 833 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 834 void freeLargeObjectPage(LargeObjectPage*); | 834 void freeLargeObject(LargeObject*); |
| 835 #if ENABLE(ASSERT) | 835 #if ENABLE(ASSERT) |
| 836 virtual bool isConsistentForSweeping() override { return true; } | 836 virtual bool isConsistentForSweeping() override { return true; } |
| 837 #endif | 837 #endif |
| 838 private: | 838 private: |
| 839 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex); | 839 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); |
| 840 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; | 840 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 841 }; | 841 }; |
| 842 | 842 |
| 843 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 843 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
| 844 // pages are aligned at blinkPageBase plus an OS page size. | 844 // pages are aligned at blinkPageBase plus an OS page size. |
| 845 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 845 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
| 846 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | 846 // typed heaps. This is only exported to enable tests in HeapTest.cpp. |
| 847 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) | 847 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) |
| 848 { | 848 { |
| 849 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 849 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 850 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + WTF
::kSystemPageSize); | 850 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); |
| 851 ASSERT(page->contains(address)); | 851 ASSERT(page->contains(address)); |
| 852 return page; | 852 return page; |
| 853 } | 853 } |
| 854 | 854 |
| 855 class PLATFORM_EXPORT Heap { | 855 class PLATFORM_EXPORT Heap { |
| 856 public: | 856 public: |
| 857 static void init(); | 857 static void init(); |
| 858 static void shutdown(); | 858 static void shutdown(); |
| 859 static void doShutdown(); | 859 static void doShutdown(); |
| 860 | 860 |
| 861 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 861 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 862 static BasePage* findPageFromAddress(Address); | 862 static BaseHeapPage* findPageFromAddress(Address); |
| 863 static BasePage* findPageFromAddress(void* pointer) { return findPageFromAdd
ress(reinterpret_cast<Address>(pointer)); } | 863 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro
mAddress(reinterpret_cast<Address>(pointer)); } |
| 864 static bool containedInHeapOrOrphanedPage(void*); | 864 static bool containedInHeapOrOrphanedPage(void*); |
| 865 #endif | 865 #endif |
| 866 | 866 |
| 867 // Is the finalizable GC object still alive, but slated for lazy sweeping? | 867 // Is the finalizable GC object still alive, but slated for lazy sweeping? |
| 868 // If a lazy sweep is in progress, returns true if the object was found | 868 // If a lazy sweep is in progress, returns true if the object was found |
| 869 // to be not reachable during the marking phase, but it has yet to be swept | 869 // to be not reachable during the marking phase, but it has yet to be swept |
| 870 // and finalized. The predicate returns false in all other cases. | 870 // and finalized. The predicate returns false in all other cases. |
| 871 // | 871 // |
| 872 // Holding a reference to an already-dead object is not a valid state | 872 // Holding a reference to an already-dead object is not a valid state |
| 873 // to be in; willObjectBeLazilySwept() has undefined behavior if passed | 873 // to be in; willObjectBeLazilySwept() has undefined behavior if passed |
| 874 // such a reference. | 874 // such a reference. |
| 875 template<typename T> | 875 template<typename T> |
| 876 static bool willObjectBeLazilySwept(const T* objectPointer) | 876 static bool willObjectBeLazilySwept(const T* objectPointer) |
| 877 { | 877 { |
| 878 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f
rom GarbageCollected can be used."); | 878 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f
rom GarbageCollected can be used."); |
| 879 #if ENABLE(OILPAN) | 879 #if ENABLE(OILPAN) |
| 880 BasePage* page = pageFromObject(objectPointer); | 880 BaseHeapPage* page = pageFromObject(objectPointer); |
| 881 if (page->hasBeenSwept()) | 881 if (page->hasBeenSwept()) |
| 882 return false; | 882 return false; |
| 883 ASSERT(page->heap()->threadState()->isSweepingInProgress()); | 883 ASSERT(page->heap()->threadState()->isSweepingInProgress()); |
| 884 | 884 |
| 885 return !ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_c
ast<T*>(objectPointer)); | 885 return !ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_c
ast<T*>(objectPointer)); |
| 886 #else | 886 #else |
| 887 // FIXME: remove when lazy sweeping is always on | 887 // FIXME: remove when lazy sweeping is always on |
| 888 // (cf. ThreadState::postGCProcessing()). | 888 // (cf. ThreadState::postGCProcessing()). |
| 889 return false; | 889 return false; |
| 890 #endif | 890 #endif |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 973 // Return true if the last GC found a pointer into a heap page | 973 // Return true if the last GC found a pointer into a heap page |
| 974 // during conservative scanning. | 974 // during conservative scanning. |
| 975 static bool lastGCWasConservative() { return s_lastGCWasConservative; } | 975 static bool lastGCWasConservative() { return s_lastGCWasConservative; } |
| 976 | 976 |
| 977 static FreePagePool* freePagePool() { return s_freePagePool; } | 977 static FreePagePool* freePagePool() { return s_freePagePool; } |
| 978 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } | 978 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } |
| 979 | 979 |
| 980 // This look-up uses the region search tree and a negative contains cache to | 980 // This look-up uses the region search tree and a negative contains cache to |
| 981 // provide an efficient mapping from arbitrary addresses to the containing | 981 // provide an efficient mapping from arbitrary addresses to the containing |
| 982 // heap-page if one exists. | 982 // heap-page if one exists. |
| 983 static BasePage* lookup(Address); | 983 static BaseHeapPage* lookup(Address); |
| 984 static void addPageMemoryRegion(PageMemoryRegion*); | 984 static void addPageMemoryRegion(PageMemoryRegion*); |
| 985 static void removePageMemoryRegion(PageMemoryRegion*); | 985 static void removePageMemoryRegion(PageMemoryRegion*); |
| 986 | 986 |
| 987 static const GCInfo* gcInfo(size_t gcInfoIndex) | 987 static const GCInfo* gcInfo(size_t gcInfoIndex) |
| 988 { | 988 { |
| 989 ASSERT(gcInfoIndex >= 1); | 989 ASSERT(gcInfoIndex >= 1); |
| 990 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 990 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
| 991 ASSERT(s_gcInfoTable); | 991 ASSERT(s_gcInfoTable); |
| 992 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; | 992 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; |
| 993 ASSERT(info); | 993 ASSERT(info); |
| (...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1262 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() | 1262 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() |
| 1263 #define GC_PLUGIN_IGNORE(bug) | 1263 #define GC_PLUGIN_IGNORE(bug) |
| 1264 #endif | 1264 #endif |
| 1265 | 1265 |
| 1266 NO_SANITIZE_ADDRESS inline | 1266 NO_SANITIZE_ADDRESS inline |
| 1267 size_t HeapObjectHeader::size() const | 1267 size_t HeapObjectHeader::size() const |
| 1268 { | 1268 { |
| 1269 size_t result = m_encoded & headerSizeMask; | 1269 size_t result = m_encoded & headerSizeMask; |
| 1270 // Large objects should not refer to header->size(). | 1270 // Large objects should not refer to header->size(). |
| 1271 // The actual size of a large object is stored in | 1271 // The actual size of a large object is stored in |
| 1272 // LargeObjectPage::m_payloadSize. | 1272 // LargeObject::m_payloadSize. |
| 1273 ASSERT(result != largeObjectSizeInHeader); | 1273 ASSERT(result != largeObjectSizeInHeader); |
| 1274 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 1274 ASSERT(!pageFromObject(this)->isLargeObject()); |
| 1275 return result; | 1275 return result; |
| 1276 } | 1276 } |
| 1277 | 1277 |
| 1278 NO_SANITIZE_ADDRESS | 1278 NO_SANITIZE_ADDRESS |
| 1279 void HeapObjectHeader::checkHeader() const | 1279 void HeapObjectHeader::checkHeader() const |
| 1280 { | 1280 { |
| 1281 ASSERT(pageFromObject(this)->orphaned() || m_magic == magic); | 1281 ASSERT(pageFromObject(this)->orphaned() || m_magic == magic); |
| 1282 } | 1282 } |
| 1283 | 1283 |
| 1284 Address HeapObjectHeader::payload() | 1284 Address HeapObjectHeader::payload() |
| 1285 { | 1285 { |
| 1286 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | 1286 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); |
| 1287 } | 1287 } |
| 1288 | 1288 |
| 1289 Address HeapObjectHeader::payloadEnd() | 1289 Address HeapObjectHeader::payloadEnd() |
| 1290 { | 1290 { |
| 1291 return reinterpret_cast<Address>(this) + size(); | 1291 return reinterpret_cast<Address>(this) + size(); |
| 1292 } | 1292 } |
| 1293 | 1293 |
| 1294 NO_SANITIZE_ADDRESS inline | 1294 NO_SANITIZE_ADDRESS inline |
| 1295 size_t HeapObjectHeader::payloadSize() | 1295 size_t HeapObjectHeader::payloadSize() |
| 1296 { | 1296 { |
| 1297 size_t size = m_encoded & headerSizeMask; | 1297 size_t size = m_encoded & headerSizeMask; |
| 1298 if (UNLIKELY(size == largeObjectSizeInHeader)) { | 1298 if (UNLIKELY(size == largeObjectSizeInHeader)) { |
| 1299 ASSERT(pageFromObject(this)->isLargeObjectPage()); | 1299 ASSERT(pageFromObject(this)->isLargeObject()); |
| 1300 return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize(
); | 1300 return static_cast<LargeObject*>(pageFromObject(this))->payloadSize(); |
| 1301 } | 1301 } |
| 1302 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 1302 ASSERT(!pageFromObject(this)->isLargeObject()); |
| 1303 return size - sizeof(HeapObjectHeader); | 1303 return size - sizeof(HeapObjectHeader); |
| 1304 } | 1304 } |
| 1305 | 1305 |
| 1306 NO_SANITIZE_ADDRESS inline | 1306 NO_SANITIZE_ADDRESS inline |
| 1307 bool HeapObjectHeader::isMarked() const | 1307 bool HeapObjectHeader::isMarked() const |
| 1308 { | 1308 { |
| 1309 checkHeader(); | 1309 checkHeader(); |
| 1310 return m_encoded & headerMarkBitMask; | 1310 return m_encoded & headerMarkBitMask; |
| 1311 } | 1311 } |
| 1312 | 1312 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1334 } | 1334 } |
| 1335 | 1335 |
| 1336 NO_SANITIZE_ADDRESS inline | 1336 NO_SANITIZE_ADDRESS inline |
| 1337 void HeapObjectHeader::markDead() | 1337 void HeapObjectHeader::markDead() |
| 1338 { | 1338 { |
| 1339 checkHeader(); | 1339 checkHeader(); |
| 1340 ASSERT(!isMarked()); | 1340 ASSERT(!isMarked()); |
| 1341 m_encoded |= headerDeadBitMask; | 1341 m_encoded |= headerDeadBitMask; |
| 1342 } | 1342 } |
| 1343 | 1343 |
| 1344 size_t BaseHeap::allocationSizeFromSize(size_t size) | 1344 size_t ThreadHeap::allocationSizeFromSize(size_t size) |
| 1345 { | 1345 { |
| 1346 // Check the size before computing the actual allocation size. The | 1346 // Check the size before computing the actual allocation size. The |
| 1347 // allocation size calculation can overflow for large sizes and the check | 1347 // allocation size calculation can overflow for large sizes and the check |
| 1348 // therefore has to happen before any calculation on the size. | 1348 // therefore has to happen before any calculation on the size. |
| 1349 RELEASE_ASSERT(size < maxHeapObjectSize); | 1349 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 1350 | 1350 |
| 1351 // Add space for header. | 1351 // Add space for header. |
| 1352 size_t allocationSize = size + sizeof(HeapObjectHeader); | 1352 size_t allocationSize = size + sizeof(HeapObjectHeader); |
| 1353 // Align size with allocation granularity. | 1353 // Align size with allocation granularity. |
| 1354 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1354 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1355 return allocationSize; | 1355 return allocationSize; |
| 1356 } | 1356 } |
| 1357 | 1357 |
| 1358 Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex
) | 1358 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
foIndex) |
| 1359 { | 1359 { |
| 1360 #if ENABLE(GC_PROFILING) | 1360 #if ENABLE(GC_PROFILING) |
| 1361 m_cumulativeAllocationSize += allocationSize; | 1361 m_cumulativeAllocationSize += allocationSize; |
| 1362 ++m_allocationCount; | 1362 ++m_allocationCount; |
| 1363 #endif | 1363 #endif |
| 1364 | 1364 |
| 1365 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1365 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1366 #if ENABLE(GC_PROFILING) | 1366 #if ENABLE(GC_PROFILING) |
| 1367 ++m_inlineAllocationCount; | 1367 ++m_inlineAllocationCount; |
| 1368 #endif | 1368 #endif |
| 1369 Address headerAddress = m_currentAllocationPoint; | 1369 Address headerAddress = m_currentAllocationPoint; |
| 1370 m_currentAllocationPoint += allocationSize; | 1370 m_currentAllocationPoint += allocationSize; |
| 1371 m_remainingAllocationSize -= allocationSize; | 1371 m_remainingAllocationSize -= allocationSize; |
| 1372 ASSERT(gcInfoIndex > 0); | 1372 ASSERT(gcInfoIndex > 0); |
| 1373 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | 1373 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); |
| 1374 Address result = headerAddress + sizeof(HeapObjectHeader); | 1374 Address result = headerAddress + sizeof(HeapObjectHeader); |
| 1375 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1375 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1376 | 1376 |
| 1377 // Unpoison the memory used for the object (payload). | 1377 // Unpoison the memory used for the object (payload). |
| 1378 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1378 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1379 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1379 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1380 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 1380 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
| 1381 return result; | 1381 return result; |
| 1382 } | 1382 } |
| 1383 return outOfLineAllocate(allocationSize, gcInfoIndex); | 1383 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 1384 } | 1384 } |
| 1385 | 1385 |
| 1386 Address NormalPageHeap::allocate(size_t size, size_t gcInfoIndex) | 1386 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) |
| 1387 { | 1387 { |
| 1388 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1388 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
| 1389 } | 1389 } |
| 1390 | 1390 |
| 1391 template<typename T> | 1391 template<typename T> |
| 1392 struct HeapIndexTrait { | 1392 struct HeapIndexTrait { |
| 1393 static int index() { return NormalPageHeapIndex; }; | 1393 static int index() { return GeneralHeap; }; |
| 1394 }; | 1394 }; |
| 1395 | 1395 |
| 1396 // FIXME: The forward declaration is layering violation. | 1396 // FIXME: The forward declaration is layering violation. |
| 1397 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 1397 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
| 1398 class Type; \ | 1398 class Type; \ |
| 1399 template<> \ | 1399 template<> \ |
| 1400 struct HeapIndexTrait<class Type> { \ | 1400 struct HeapIndexTrait<class Type> { \ |
| 1401 static int index() { return Type##Heap; }; \ | 1401 static int index() { return Type##Heap; }; \ |
| 1402 }; | 1402 }; |
| 1403 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 1403 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
| 1404 #undef DEFINE_TYPED_HEAP_TRAIT | 1404 #undef DEFINE_TYPED_HEAP_TRAIT |
| 1405 | 1405 |
| 1406 template<typename T> | 1406 template<typename T> |
| 1407 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) | 1407 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) |
| 1408 { | 1408 { |
| 1409 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1409 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1410 ASSERT(state->isAllocationAllowed()); | 1410 ASSERT(state->isAllocationAllowed()); |
| 1411 return static_cast<NormalPageHeap*>(state->heap(heapIndex))->allocate(size,
gcInfoIndex); | 1411 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate
(size, gcInfoIndex); |
| 1412 } | 1412 } |
| 1413 | 1413 |
| 1414 template<typename T> | 1414 template<typename T> |
| 1415 Address Heap::allocate(size_t size) | 1415 Address Heap::allocate(size_t size) |
| 1416 { | 1416 { |
| 1417 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); | 1417 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); |
| 1418 } | 1418 } |
| 1419 | 1419 |
| 1420 template<typename T> | 1420 template<typename T> |
| 1421 Address Heap::reallocate(void* previous, size_t size) | 1421 Address Heap::reallocate(void* previous, size_t size) |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1440 memcpy(address, previous, copySize); | 1440 memcpy(address, previous, copySize); |
| 1441 return address; | 1441 return address; |
| 1442 } | 1442 } |
| 1443 | 1443 |
| 1444 class HeapAllocatorQuantizer { | 1444 class HeapAllocatorQuantizer { |
| 1445 public: | 1445 public: |
| 1446 template<typename T> | 1446 template<typename T> |
| 1447 static size_t quantizedSize(size_t count) | 1447 static size_t quantizedSize(size_t count) |
| 1448 { | 1448 { |
| 1449 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); | 1449 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); |
| 1450 return BaseHeap::roundedAllocationSize(count * sizeof(T)); | 1450 return ThreadHeap::roundedAllocationSize(count * sizeof(T)); |
| 1451 } | 1451 } |
| 1452 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; | 1452 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; |
| 1453 }; | 1453 }; |
| 1454 | 1454 |
| 1455 // This is a static-only class used as a trait on collections to make them heap | 1455 // This is a static-only class used as a trait on collections to make them heap |
| 1456 // allocated. However see also HeapListHashSetAllocator. | 1456 // allocated. However see also HeapListHashSetAllocator. |
| 1457 class HeapAllocator { | 1457 class HeapAllocator { |
| 1458 public: | 1458 public: |
| 1459 using Quantizer = HeapAllocatorQuantizer; | 1459 using Quantizer = HeapAllocatorQuantizer; |
| 1460 using Visitor = blink::Visitor; | 1460 using Visitor = blink::Visitor; |
| 1461 static const bool isGarbageCollected = true; | 1461 static const bool isGarbageCollected = true; |
| 1462 | 1462 |
| 1463 template <typename T> | 1463 template <typename T> |
| 1464 static T* allocateVectorBacking(size_t size) | 1464 static T* allocateVectorBacking(size_t size) |
| 1465 { | 1465 { |
| 1466 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>:
:index(); | 1466 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>:
:index(); |
| 1467 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorHea
pIndex, gcInfoIndex)); | 1467 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorBac
kingHeap, gcInfoIndex)); |
| 1468 } | 1468 } |
| 1469 PLATFORM_EXPORT static void freeVectorBacking(void* address); | 1469 PLATFORM_EXPORT static void freeVectorBacking(void* address); |
| 1470 PLATFORM_EXPORT static bool expandVectorBacking(void*, size_t); | 1470 PLATFORM_EXPORT static bool expandVectorBacking(void*, size_t); |
| 1471 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurren
tSize, size_t quantizedShrunkSize) | 1471 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurren
tSize, size_t quantizedShrunkSize) |
| 1472 { | 1472 { |
| 1473 shrinkVectorBackingInternal(address, quantizedCurrentSize, quantizedShru
nkSize); | 1473 shrinkVectorBackingInternal(address, quantizedCurrentSize, quantizedShru
nkSize); |
| 1474 return true; | 1474 return true; |
| 1475 } | 1475 } |
| 1476 template <typename T> | 1476 template <typename T> |
| 1477 static T* allocateInlineVectorBacking(size_t size) | 1477 static T* allocateInlineVectorBacking(size_t size) |
| 1478 { | 1478 { |
| 1479 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>:
:index(); | 1479 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>:
:index(); |
| 1480 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVec
torHeapIndex, gcInfoIndex)); | 1480 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVec
torBackingHeap, gcInfoIndex)); |
| 1481 } | 1481 } |
| 1482 PLATFORM_EXPORT static void freeInlineVectorBacking(void* address); | 1482 PLATFORM_EXPORT static void freeInlineVectorBacking(void* address); |
| 1483 PLATFORM_EXPORT static bool expandInlineVectorBacking(void*, size_t); | 1483 PLATFORM_EXPORT static bool expandInlineVectorBacking(void*, size_t); |
| 1484 static inline bool shrinkInlineVectorBacking(void* address, size_t quantized
CurrentSize, size_t quantizedShrinkedSize) | 1484 static inline bool shrinkInlineVectorBacking(void* address, size_t quantized
CurrentSize, size_t quantizedShrinkedSize) |
| 1485 { | 1485 { |
| 1486 shrinkInlineVectorBackingInternal(address, quantizedCurrentSize, quantiz
edShrinkedSize); | 1486 shrinkInlineVectorBackingInternal(address, quantizedCurrentSize, quantiz
edShrinkedSize); |
| 1487 return true; | 1487 return true; |
| 1488 } | 1488 } |
| 1489 | 1489 |
| 1490 | 1490 |
| 1491 template <typename T, typename HashTable> | 1491 template <typename T, typename HashTable> |
| 1492 static T* allocateHashTableBacking(size_t size) | 1492 static T* allocateHashTableBacking(size_t size) |
| 1493 { | 1493 { |
| 1494 size_t gcInfoIndex = GCInfoTrait<HeapHashTableBacking<HashTable>>::index
(); | 1494 size_t gcInfoIndex = GCInfoTrait<HeapHashTableBacking<HashTable>>::index
(); |
| 1495 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTable
HeapIndex, gcInfoIndex)); | 1495 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTable
BackingHeap, gcInfoIndex)); |
| 1496 } | 1496 } |
| 1497 template <typename T, typename HashTable> | 1497 template <typename T, typename HashTable> |
| 1498 static T* allocateZeroedHashTableBacking(size_t size) | 1498 static T* allocateZeroedHashTableBacking(size_t size) |
| 1499 { | 1499 { |
| 1500 return allocateHashTableBacking<T, HashTable>(size); | 1500 return allocateHashTableBacking<T, HashTable>(size); |
| 1501 } | 1501 } |
| 1502 PLATFORM_EXPORT static void freeHashTableBacking(void* address); | 1502 PLATFORM_EXPORT static void freeHashTableBacking(void* address); |
| 1503 PLATFORM_EXPORT static bool expandHashTableBacking(void*, size_t); | 1503 PLATFORM_EXPORT static bool expandHashTableBacking(void*, size_t); |
| 1504 | 1504 |
| 1505 template <typename Return, typename Metadata> | 1505 template <typename Return, typename Metadata> |
| (...skipping 949 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2455 template<typename T, size_t inlineCapacity> | 2455 template<typename T, size_t inlineCapacity> |
| 2456 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2456 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
| 2457 template<typename T, size_t inlineCapacity> | 2457 template<typename T, size_t inlineCapacity> |
| 2458 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2458 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
| 2459 template<typename T, typename U, typename V> | 2459 template<typename T, typename U, typename V> |
| 2460 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2460 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
| 2461 | 2461 |
| 2462 } // namespace blink | 2462 } // namespace blink |
| 2463 | 2463 |
| 2464 #endif // Heap_h | 2464 #endif // Heap_h |
| OLD | NEW |