OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 #if defined(MEMORY_SANITIZER) | 96 #if defined(MEMORY_SANITIZER) |
97 // TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be | 97 // TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be |
98 // added later. | 98 // added later. |
99 #define SET_MEMORY_INACCESSIBLE(address, size) \ | 99 #define SET_MEMORY_INACCESSIBLE(address, size) \ |
100 FreeList::zapFreedMemory(address, size); | 100 FreeList::zapFreedMemory(address, size); |
101 #define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size)) | 101 #define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size)) |
102 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ | 102 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ |
103 ASAN_UNPOISON_MEMORY_REGION(address, size); \ | 103 ASAN_UNPOISON_MEMORY_REGION(address, size); \ |
104 FreeList::checkFreedMemoryIsZapped(address, size); \ | 104 FreeList::checkFreedMemoryIsZapped(address, size); \ |
105 ASAN_POISON_MEMORY_REGION(address, size) | 105 ASAN_POISON_MEMORY_REGION(address, size) |
106 #elif ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 106 #elif DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
107 #define SET_MEMORY_INACCESSIBLE(address, size) \ | 107 #define SET_MEMORY_INACCESSIBLE(address, size) \ |
108 FreeList::zapFreedMemory(address, size); \ | 108 FreeList::zapFreedMemory(address, size); \ |
109 ASAN_POISON_MEMORY_REGION(address, size) | 109 ASAN_POISON_MEMORY_REGION(address, size) |
110 #define SET_MEMORY_ACCESSIBLE(address, size) \ | 110 #define SET_MEMORY_ACCESSIBLE(address, size) \ |
111 ASAN_UNPOISON_MEMORY_REGION(address, size); \ | 111 ASAN_UNPOISON_MEMORY_REGION(address, size); \ |
112 memset((address), 0, (size)) | 112 memset((address), 0, (size)) |
113 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ | 113 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ |
114 ASAN_UNPOISON_MEMORY_REGION(address, size); \ | 114 ASAN_UNPOISON_MEMORY_REGION(address, size); \ |
115 FreeList::checkFreedMemoryIsZapped(address, size); \ | 115 FreeList::checkFreedMemoryIsZapped(address, size); \ |
116 ASAN_POISON_MEMORY_REGION(address, size) | 116 ASAN_POISON_MEMORY_REGION(address, size) |
117 #else | 117 #else |
118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) | 118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) |
119 #define SET_MEMORY_ACCESSIBLE(address, size) \ | 119 #define SET_MEMORY_ACCESSIBLE(address, size) \ |
120 do { \ | 120 do { \ |
121 } while (false) | 121 } while (false) |
122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ | 122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ |
123 do { \ | 123 do { \ |
124 } while (false) | 124 } while (false) |
125 #endif | 125 #endif |
126 | 126 |
127 #if !ENABLE(ASSERT) && CPU(64BIT) | 127 #if !DCHECK_IS_ON() && CPU(64BIT) |
128 #define USE_4BYTE_HEADER_PADDING 1 | 128 #define USE_4BYTE_HEADER_PADDING 1 |
129 #else | 129 #else |
130 #define USE_4BYTE_HEADER_PADDING 0 | 130 #define USE_4BYTE_HEADER_PADDING 0 |
131 #endif | 131 #endif |
132 | 132 |
133 class NormalPageArena; | 133 class NormalPageArena; |
134 class PageMemory; | 134 class PageMemory; |
135 | 135 |
136 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 136 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
137 // | 137 // |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
176 nonLargeObjectPageSizeMax >= blinkPageSize, | 176 nonLargeObjectPageSizeMax >= blinkPageSize, |
177 "max size supported by HeapObjectHeader must at least be blinkPageSize"); | 177 "max size supported by HeapObjectHeader must at least be blinkPageSize"); |
178 | 178 |
179 class PLATFORM_EXPORT HeapObjectHeader { | 179 class PLATFORM_EXPORT HeapObjectHeader { |
180 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); | 180 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); |
181 | 181 |
182 public: | 182 public: |
183 // If gcInfoIndex is 0, this header is interpreted as a free list header. | 183 // If gcInfoIndex is 0, this header is interpreted as a free list header. |
184 NO_SANITIZE_ADDRESS | 184 NO_SANITIZE_ADDRESS |
185 HeapObjectHeader(size_t size, size_t gcInfoIndex) { | 185 HeapObjectHeader(size_t size, size_t gcInfoIndex) { |
186 #if ENABLE(ASSERT) | 186 #if DCHECK_IS_ON() |
187 m_magic = magic; | 187 m_magic = magic; |
188 #endif | 188 #endif |
189 // sizeof(HeapObjectHeader) must be equal to or smaller than | 189 // sizeof(HeapObjectHeader) must be equal to or smaller than |
190 // allocationGranurarity, because HeapObjectHeader is used as a header | 190 // allocationGranurarity, because HeapObjectHeader is used as a header |
191 // for an freed entry. Given that the smallest entry size is | 191 // for an freed entry. Given that the smallest entry size is |
192 // allocationGranurarity, HeapObjectHeader must fit into the size. | 192 // allocationGranurarity, HeapObjectHeader must fit into the size. |
193 static_assert( | 193 static_assert( |
194 sizeof(HeapObjectHeader) <= allocationGranularity, | 194 sizeof(HeapObjectHeader) <= allocationGranularity, |
195 "size of HeapObjectHeader must be smaller than allocationGranularity"); | 195 "size of HeapObjectHeader must be smaller than allocationGranularity"); |
196 #if CPU(64BIT) | 196 #if CPU(64BIT) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
232 bool isMarked() const; | 232 bool isMarked() const; |
233 void mark(); | 233 void mark(); |
234 void unmark(); | 234 void unmark(); |
235 void markDead(); | 235 void markDead(); |
236 bool isDead() const; | 236 bool isDead() const; |
237 | 237 |
238 Address payload(); | 238 Address payload(); |
239 size_t payloadSize(); | 239 size_t payloadSize(); |
240 Address payloadEnd(); | 240 Address payloadEnd(); |
241 | 241 |
242 #if ENABLE(ASSERT) | 242 #if DCHECK_IS_ON() |
243 bool checkHeader() const; | 243 bool checkHeader() const; |
244 // Zap magic number with a new magic number that means there was once an | 244 // Zap magic number with a new magic number that means there was once an |
245 // object allocated here, but it was freed because nobody marked it during | 245 // object allocated here, but it was freed because nobody marked it during |
246 // GC. | 246 // GC. |
247 void zapMagic(); | 247 void zapMagic(); |
248 #endif | 248 #endif |
249 | 249 |
250 void finalize(Address, size_t); | 250 void finalize(Address, size_t); |
251 static HeapObjectHeader* fromPayload(const void*); | 251 static HeapObjectHeader* fromPayload(const void*); |
252 | 252 |
253 static const uint16_t magic = 0xfff1; | 253 static const uint16_t magic = 0xfff1; |
254 static const uint16_t zappedMagic = 0x4321; | 254 static const uint16_t zappedMagic = 0x4321; |
255 | 255 |
256 private: | 256 private: |
257 uint32_t m_encoded; | 257 uint32_t m_encoded; |
258 #if ENABLE(ASSERT) | 258 #if DCHECK_IS_ON() |
259 uint16_t m_magic; | 259 uint16_t m_magic; |
260 #endif | 260 #endif |
261 | 261 |
262 // In 64 bit architectures, we intentionally add 4 byte padding immediately | 262 // In 64 bit architectures, we intentionally add 4 byte padding immediately |
263 // after the HeapObjectHeader. This is because: | 263 // after the HeapObjectHeader. This is because: |
264 // | 264 // |
265 // | HeapObjectHeader (4 byte) | <- 8 byte aligned | 265 // | HeapObjectHeader (4 byte) | <- 8 byte aligned |
266 // | padding (4 byte) | | 266 // | padding (4 byte) | |
267 // | object payload (8 * n byte) | <- 8 byte aligned | 267 // | object payload (8 * n byte) | <- 8 byte aligned |
268 // | 268 // |
269 // is better than: | 269 // is better than: |
270 // | 270 // |
271 // | HeapObjectHeader (4 byte) | <- 4 byte aligned | 271 // | HeapObjectHeader (4 byte) | <- 4 byte aligned |
272 // | object payload (8 * n byte) | <- 8 byte aligned | 272 // | object payload (8 * n byte) | <- 8 byte aligned |
273 // | padding (4 byte) | <- 4 byte aligned | 273 // | padding (4 byte) | <- 4 byte aligned |
274 // | 274 // |
275 // since the former layout aligns both header and payload to 8 byte. | 275 // since the former layout aligns both header and payload to 8 byte. |
276 #if USE_4BYTE_HEADER_PADDING | 276 #if USE_4BYTE_HEADER_PADDING |
277 public: | 277 public: |
278 uint32_t m_padding; | 278 uint32_t m_padding; |
279 #endif | 279 #endif |
280 }; | 280 }; |
281 | 281 |
282 class FreeListEntry final : public HeapObjectHeader { | 282 class FreeListEntry final : public HeapObjectHeader { |
283 public: | 283 public: |
284 NO_SANITIZE_ADDRESS | 284 NO_SANITIZE_ADDRESS |
285 explicit FreeListEntry(size_t size) | 285 explicit FreeListEntry(size_t size) |
286 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { | 286 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { |
287 #if ENABLE(ASSERT) | 287 #if DCHECK_IS_ON() |
288 ASSERT(size >= sizeof(HeapObjectHeader)); | 288 ASSERT(size >= sizeof(HeapObjectHeader)); |
289 zapMagic(); | 289 zapMagic(); |
290 #endif | 290 #endif |
291 } | 291 } |
292 | 292 |
293 Address getAddress() { return reinterpret_cast<Address>(this); } | 293 Address getAddress() { return reinterpret_cast<Address>(this); } |
294 | 294 |
295 NO_SANITIZE_ADDRESS | 295 NO_SANITIZE_ADDRESS |
296 void unlink(FreeListEntry** prevNext) { | 296 void unlink(FreeListEntry** prevNext) { |
297 *prevNext = m_next; | 297 *prevNext = m_next; |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
339 // Masks an address down to the enclosing blink page base address. | 339 // Masks an address down to the enclosing blink page base address. |
340 inline Address blinkPageAddress(Address address) { | 340 inline Address blinkPageAddress(Address address) { |
341 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & | 341 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & |
342 blinkPageBaseMask); | 342 blinkPageBaseMask); |
343 } | 343 } |
344 | 344 |
345 inline bool vTableInitialized(void* objectPointer) { | 345 inline bool vTableInitialized(void* objectPointer) { |
346 return !!(*reinterpret_cast<Address*>(objectPointer)); | 346 return !!(*reinterpret_cast<Address*>(objectPointer)); |
347 } | 347 } |
348 | 348 |
349 #if ENABLE(ASSERT) | 349 #if DCHECK_IS_ON() |
350 // Sanity check for a page header address: the address of the page | 350 // Sanity check for a page header address: the address of the page |
351 // header should be OS page size away from being Blink page size | 351 // header should be OS page size away from being Blink page size |
352 // aligned. | 352 // aligned. |
353 inline bool isPageHeaderAddress(Address address) { | 353 inline bool isPageHeaderAddress(Address address) { |
354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - | 354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - |
355 blinkGuardPageSize); | 355 blinkGuardPageSize); |
356 } | 356 } |
357 #endif | 357 #endif |
358 | 358 |
359 // BasePage is a base class for NormalPage and LargeObjectPage. | 359 // BasePage is a base class for NormalPage and LargeObjectPage. |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
414 STACK_ALLOCATED(); | 414 STACK_ALLOCATED(); |
415 | 415 |
416 public: | 416 public: |
417 size_t freeCount = 0; | 417 size_t freeCount = 0; |
418 size_t freeSize = 0; | 418 size_t freeSize = 0; |
419 }; | 419 }; |
420 | 420 |
421 virtual void takeSnapshot(base::trace_event::MemoryAllocatorDump*, | 421 virtual void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
422 ThreadState::GCSnapshotInfo&, | 422 ThreadState::GCSnapshotInfo&, |
423 HeapSnapshotInfo&) = 0; | 423 HeapSnapshotInfo&) = 0; |
424 #if ENABLE(ASSERT) | 424 #if DCHECK_IS_ON() |
425 virtual bool contains(Address) = 0; | 425 virtual bool contains(Address) = 0; |
426 #endif | 426 #endif |
427 virtual size_t size() = 0; | 427 virtual size_t size() = 0; |
428 virtual bool isLargeObjectPage() { return false; } | 428 virtual bool isLargeObjectPage() { return false; } |
429 | 429 |
430 Address getAddress() { return reinterpret_cast<Address>(this); } | 430 Address getAddress() { return reinterpret_cast<Address>(this); } |
431 PageMemory* storage() const { return m_storage; } | 431 PageMemory* storage() const { return m_storage; } |
432 BaseArena* arena() const { return m_arena; } | 432 BaseArena* arena() const { return m_arena; } |
433 bool orphaned() { return !m_arena; } | 433 bool orphaned() { return !m_arena; } |
434 bool terminating() { return m_terminating; } | 434 bool terminating() { return m_terminating; } |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
487 } | 487 } |
488 #if defined(ADDRESS_SANITIZER) | 488 #if defined(ADDRESS_SANITIZER) |
489 void poisonUnmarkedObjects() override; | 489 void poisonUnmarkedObjects() override; |
490 #endif | 490 #endif |
491 void checkAndMarkPointer(Visitor*, Address) override; | 491 void checkAndMarkPointer(Visitor*, Address) override; |
492 void markOrphaned() override; | 492 void markOrphaned() override; |
493 | 493 |
494 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, | 494 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
495 ThreadState::GCSnapshotInfo&, | 495 ThreadState::GCSnapshotInfo&, |
496 HeapSnapshotInfo&) override; | 496 HeapSnapshotInfo&) override; |
497 #if ENABLE(ASSERT) | 497 #if DCHECK_IS_ON() |
498 // Returns true for the whole blinkPageSize page that the page is on, even | 498 // Returns true for the whole blinkPageSize page that the page is on, even |
499 // for the header, and the unmapped guard page at the start. That ensures | 499 // for the header, and the unmapped guard page at the start. That ensures |
500 // the result can be used to populate the negative page cache. | 500 // the result can be used to populate the negative page cache. |
501 bool contains(Address) override; | 501 bool contains(Address) override; |
502 #endif | 502 #endif |
503 size_t size() override { return blinkPageSize; } | 503 size_t size() override { return blinkPageSize; } |
504 static size_t pageHeaderSize() { | 504 static size_t pageHeaderSize() { |
505 // Compute the amount of padding we have to add to a header to make | 505 // Compute the amount of padding we have to add to a header to make |
506 // the size of the header plus the padding a multiple of 8 bytes. | 506 // the size of the header plus the padding a multiple of 8 bytes. |
507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - | 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
565 void invalidateObjectStartBitmap() override {} | 565 void invalidateObjectStartBitmap() override {} |
566 #if defined(ADDRESS_SANITIZER) | 566 #if defined(ADDRESS_SANITIZER) |
567 void poisonUnmarkedObjects() override; | 567 void poisonUnmarkedObjects() override; |
568 #endif | 568 #endif |
569 void checkAndMarkPointer(Visitor*, Address) override; | 569 void checkAndMarkPointer(Visitor*, Address) override; |
570 void markOrphaned() override; | 570 void markOrphaned() override; |
571 | 571 |
572 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, | 572 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
573 ThreadState::GCSnapshotInfo&, | 573 ThreadState::GCSnapshotInfo&, |
574 HeapSnapshotInfo&) override; | 574 HeapSnapshotInfo&) override; |
575 #if ENABLE(ASSERT) | 575 #if DCHECK_IS_ON() |
576 // Returns true for any address that is on one of the pages that this | 576 // Returns true for any address that is on one of the pages that this |
577 // large object uses. That ensures that we can use a negative result to | 577 // large object uses. That ensures that we can use a negative result to |
578 // populate the negative page cache. | 578 // populate the negative page cache. |
579 bool contains(Address) override; | 579 bool contains(Address) override; |
580 #endif | 580 #endif |
581 virtual size_t size() { | 581 virtual size_t size() { |
582 return pageHeaderSize() + sizeof(HeapObjectHeader) + m_payloadSize; | 582 return pageHeaderSize() + sizeof(HeapObjectHeader) + m_payloadSize; |
583 } | 583 } |
584 static size_t pageHeaderSize() { | 584 static size_t pageHeaderSize() { |
585 // Compute the amount of padding we have to add to a header to make | 585 // Compute the amount of padding we have to add to a header to make |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
666 void addToFreeList(Address, size_t); | 666 void addToFreeList(Address, size_t); |
667 void clear(); | 667 void clear(); |
668 | 668 |
669 // Returns a bucket number for inserting a FreeListEntry of a given size. | 669 // Returns a bucket number for inserting a FreeListEntry of a given size. |
670 // All FreeListEntries in the given bucket, n, have size >= 2^n. | 670 // All FreeListEntries in the given bucket, n, have size >= 2^n. |
671 static int bucketIndexForSize(size_t); | 671 static int bucketIndexForSize(size_t); |
672 | 672 |
673 // Returns true if the freelist snapshot is captured. | 673 // Returns true if the freelist snapshot is captured. |
674 bool takeSnapshot(const String& dumpBaseName); | 674 bool takeSnapshot(const String& dumpBaseName); |
675 | 675 |
676 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | 676 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
677 defined(MEMORY_SANITIZER) | 677 defined(MEMORY_SANITIZER) |
678 static void zapFreedMemory(Address, size_t); | 678 static void zapFreedMemory(Address, size_t); |
679 static void checkFreedMemoryIsZapped(Address, size_t); | 679 static void checkFreedMemoryIsZapped(Address, size_t); |
680 #endif | 680 #endif |
681 | 681 |
682 private: | 682 private: |
683 int m_biggestFreeListIndex; | 683 int m_biggestFreeListIndex; |
684 | 684 |
685 // All FreeListEntries in the nth list have size >= 2^n. | 685 // All FreeListEntries in the nth list have size >= 2^n. |
686 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 686 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
(...skipping 13 matching lines...) Expand all Loading... |
700 // LargeObjectPages. | 700 // LargeObjectPages. |
701 class PLATFORM_EXPORT BaseArena { | 701 class PLATFORM_EXPORT BaseArena { |
702 USING_FAST_MALLOC(BaseArena); | 702 USING_FAST_MALLOC(BaseArena); |
703 | 703 |
704 public: | 704 public: |
705 BaseArena(ThreadState*, int); | 705 BaseArena(ThreadState*, int); |
706 virtual ~BaseArena(); | 706 virtual ~BaseArena(); |
707 void cleanupPages(); | 707 void cleanupPages(); |
708 | 708 |
709 void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&); | 709 void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&); |
710 #if ENABLE(ASSERT) | 710 #if DCHECK_IS_ON() |
711 BasePage* findPageFromAddress(Address); | 711 BasePage* findPageFromAddress(Address); |
712 #endif | 712 #endif |
713 virtual void takeFreelistSnapshot(const String& dumpBaseName) {} | 713 virtual void takeFreelistSnapshot(const String& dumpBaseName) {} |
714 virtual void clearFreeLists() {} | 714 virtual void clearFreeLists() {} |
715 void makeConsistentForGC(); | 715 void makeConsistentForGC(); |
716 void makeConsistentForMutator(); | 716 void makeConsistentForMutator(); |
717 #if ENABLE(ASSERT) | 717 #if DCHECK_IS_ON() |
718 virtual bool isConsistentForGC() = 0; | 718 virtual bool isConsistentForGC() = 0; |
719 #endif | 719 #endif |
720 size_t objectPayloadSizeForTesting(); | 720 size_t objectPayloadSizeForTesting(); |
721 void prepareHeapForTermination(); | 721 void prepareHeapForTermination(); |
722 void prepareForSweep(); | 722 void prepareForSweep(); |
723 #if defined(ADDRESS_SANITIZER) | 723 #if defined(ADDRESS_SANITIZER) |
724 void poisonArena(); | 724 void poisonArena(); |
725 #endif | 725 #endif |
726 Address lazySweep(size_t, size_t gcInfoIndex); | 726 Address lazySweep(size_t, size_t gcInfoIndex); |
727 void sweepUnsweptPage(); | 727 void sweepUnsweptPage(); |
(...skipping 25 matching lines...) Expand all Loading... |
753 | 753 |
754 class PLATFORM_EXPORT NormalPageArena final : public BaseArena { | 754 class PLATFORM_EXPORT NormalPageArena final : public BaseArena { |
755 public: | 755 public: |
756 NormalPageArena(ThreadState*, int); | 756 NormalPageArena(ThreadState*, int); |
757 void addToFreeList(Address address, size_t size) { | 757 void addToFreeList(Address address, size_t size) { |
758 ASSERT(findPageFromAddress(address)); | 758 ASSERT(findPageFromAddress(address)); |
759 ASSERT(findPageFromAddress(address + size - 1)); | 759 ASSERT(findPageFromAddress(address + size - 1)); |
760 m_freeList.addToFreeList(address, size); | 760 m_freeList.addToFreeList(address, size); |
761 } | 761 } |
762 void clearFreeLists() override; | 762 void clearFreeLists() override; |
763 #if ENABLE(ASSERT) | 763 #if DCHECK_IS_ON() |
764 bool isConsistentForGC() override; | 764 bool isConsistentForGC() override; |
765 bool pagesToBeSweptContains(Address); | 765 bool pagesToBeSweptContains(Address); |
766 #endif | 766 #endif |
767 void takeFreelistSnapshot(const String& dumpBaseName) override; | 767 void takeFreelistSnapshot(const String& dumpBaseName) override; |
768 | 768 |
769 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 769 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
770 | 770 |
771 void freePage(NormalPage*); | 771 void freePage(NormalPage*); |
772 | 772 |
773 bool coalesce(); | 773 bool coalesce(); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 size_t m_promptlyFreedSize; | 815 size_t m_promptlyFreedSize; |
816 | 816 |
817 bool m_isLazySweeping; | 817 bool m_isLazySweeping; |
818 }; | 818 }; |
819 | 819 |
820 class LargeObjectArena final : public BaseArena { | 820 class LargeObjectArena final : public BaseArena { |
821 public: | 821 public: |
822 LargeObjectArena(ThreadState*, int); | 822 LargeObjectArena(ThreadState*, int); |
823 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); | 823 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); |
824 void freeLargeObjectPage(LargeObjectPage*); | 824 void freeLargeObjectPage(LargeObjectPage*); |
825 #if ENABLE(ASSERT) | 825 #if DCHECK_IS_ON() |
826 bool isConsistentForGC() override { return true; } | 826 bool isConsistentForGC() override { return true; } |
827 #endif | 827 #endif |
828 private: | 828 private: |
829 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex); | 829 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex); |
830 Address lazySweepPages(size_t, size_t gcInfoIndex) override; | 830 Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
831 }; | 831 }; |
832 | 832 |
833 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 833 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
834 // pages are aligned at blinkPageBase plus the size of a guard size. | 834 // pages are aligned at blinkPageBase plus the size of a guard size. |
835 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 835 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
836 // typed arenas. This is only exported to enable tests in HeapTest.cpp. | 836 // typed arenas. This is only exported to enable tests in HeapTest.cpp. |
837 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) { | 837 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) { |
838 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 838 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
839 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + | 839 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + |
840 blinkGuardPageSize); | 840 blinkGuardPageSize); |
841 ASSERT(page->contains(address)); | 841 ASSERT(page->contains(address)); |
842 return page; | 842 return page; |
843 } | 843 } |
844 | 844 |
845 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { | 845 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { |
846 size_t result = m_encoded & headerSizeMask; | 846 size_t result = m_encoded & headerSizeMask; |
847 // Large objects should not refer to header->size(). | 847 // Large objects should not refer to header->size(). |
848 // The actual size of a large object is stored in | 848 // The actual size of a large object is stored in |
849 // LargeObjectPage::m_payloadSize. | 849 // LargeObjectPage::m_payloadSize. |
850 ASSERT(result != largeObjectSizeInHeader); | 850 ASSERT(result != largeObjectSizeInHeader); |
851 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 851 ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
852 return result; | 852 return result; |
853 } | 853 } |
854 | 854 |
855 #if ENABLE(ASSERT) | 855 #if DCHECK_IS_ON() |
856 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { | 856 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { |
857 return !pageFromObject(this)->orphaned() && m_magic == magic; | 857 return !pageFromObject(this)->orphaned() && m_magic == magic; |
858 } | 858 } |
859 #endif | 859 #endif |
860 | 860 |
861 inline Address HeapObjectHeader::payload() { | 861 inline Address HeapObjectHeader::payload() { |
862 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | 862 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); |
863 } | 863 } |
864 | 864 |
865 inline Address HeapObjectHeader::payloadEnd() { | 865 inline Address HeapObjectHeader::payloadEnd() { |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
951 return outOfLineAllocate(allocationSize, gcInfoIndex); | 951 return outOfLineAllocate(allocationSize, gcInfoIndex); |
952 } | 952 } |
953 | 953 |
954 inline NormalPageArena* NormalPage::arenaForNormalPage() const { | 954 inline NormalPageArena* NormalPage::arenaForNormalPage() const { |
955 return static_cast<NormalPageArena*>(arena()); | 955 return static_cast<NormalPageArena*>(arena()); |
956 } | 956 } |
957 | 957 |
958 } // namespace blink | 958 } // namespace blink |
959 | 959 |
960 #endif // HeapPage_h | 960 #endif // HeapPage_h |
OLD | NEW |