Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(232)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.h

Issue 2619493003: Replace ASSERTs in platform/heap/ with DCHECKs
Patch Set: temp Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 #if defined(MEMORY_SANITIZER) 96 #if defined(MEMORY_SANITIZER)
97 // TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be 97 // TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be
98 // added later. 98 // added later.
99 #define SET_MEMORY_INACCESSIBLE(address, size) \ 99 #define SET_MEMORY_INACCESSIBLE(address, size) \
100 FreeList::zapFreedMemory(address, size); 100 FreeList::zapFreedMemory(address, size);
101 #define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size)) 101 #define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
102 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 102 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
103 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 103 ASAN_UNPOISON_MEMORY_REGION(address, size); \
104 FreeList::checkFreedMemoryIsZapped(address, size); \ 104 FreeList::checkFreedMemoryIsZapped(address, size); \
105 ASAN_POISON_MEMORY_REGION(address, size) 105 ASAN_POISON_MEMORY_REGION(address, size)
106 #elif ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 106 #elif DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
107 #define SET_MEMORY_INACCESSIBLE(address, size) \ 107 #define SET_MEMORY_INACCESSIBLE(address, size) \
108 FreeList::zapFreedMemory(address, size); \ 108 FreeList::zapFreedMemory(address, size); \
109 ASAN_POISON_MEMORY_REGION(address, size) 109 ASAN_POISON_MEMORY_REGION(address, size)
110 #define SET_MEMORY_ACCESSIBLE(address, size) \ 110 #define SET_MEMORY_ACCESSIBLE(address, size) \
111 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 111 ASAN_UNPOISON_MEMORY_REGION(address, size); \
112 memset((address), 0, (size)) 112 memset((address), 0, (size))
113 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 113 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
114 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 114 ASAN_UNPOISON_MEMORY_REGION(address, size); \
115 FreeList::checkFreedMemoryIsZapped(address, size); \ 115 FreeList::checkFreedMemoryIsZapped(address, size); \
116 ASAN_POISON_MEMORY_REGION(address, size) 116 ASAN_POISON_MEMORY_REGION(address, size)
117 #else 117 #else
118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) 118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
119 #define SET_MEMORY_ACCESSIBLE(address, size) \ 119 #define SET_MEMORY_ACCESSIBLE(address, size) \
120 do { \ 120 do { \
121 } while (false) 121 } while (false)
122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ 122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \
123 do { \ 123 do { \
124 } while (false) 124 } while (false)
125 #endif 125 #endif
126 126
127 #if !ENABLE(ASSERT) && CPU(64BIT) 127 #if !DCHECK_IS_ON() && CPU(64BIT)
128 #define USE_4BYTE_HEADER_PADDING 1 128 #define USE_4BYTE_HEADER_PADDING 1
129 #else 129 #else
130 #define USE_4BYTE_HEADER_PADDING 0 130 #define USE_4BYTE_HEADER_PADDING 0
131 #endif 131 #endif
132 132
133 class NormalPageArena; 133 class NormalPageArena;
134 class PageMemory; 134 class PageMemory;
135 135
136 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 136 // HeapObjectHeader is 4 byte (32 bit) that has the following layout:
137 // 137 //
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 nonLargeObjectPageSizeMax >= blinkPageSize, 176 nonLargeObjectPageSizeMax >= blinkPageSize,
177 "max size supported by HeapObjectHeader must at least be blinkPageSize"); 177 "max size supported by HeapObjectHeader must at least be blinkPageSize");
178 178
179 class PLATFORM_EXPORT HeapObjectHeader { 179 class PLATFORM_EXPORT HeapObjectHeader {
180 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); 180 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
181 181
182 public: 182 public:
183 // If gcInfoIndex is 0, this header is interpreted as a free list header. 183 // If gcInfoIndex is 0, this header is interpreted as a free list header.
184 NO_SANITIZE_ADDRESS 184 NO_SANITIZE_ADDRESS
185 HeapObjectHeader(size_t size, size_t gcInfoIndex) { 185 HeapObjectHeader(size_t size, size_t gcInfoIndex) {
186 #if ENABLE(ASSERT) 186 #if DCHECK_IS_ON()
187 m_magic = magic; 187 m_magic = magic;
188 #endif 188 #endif
189 // sizeof(HeapObjectHeader) must be equal to or smaller than 189 // sizeof(HeapObjectHeader) must be equal to or smaller than
190 // allocationGranurarity, because HeapObjectHeader is used as a header 190 // allocationGranurarity, because HeapObjectHeader is used as a header
191 // for an freed entry. Given that the smallest entry size is 191 // for an freed entry. Given that the smallest entry size is
192 // allocationGranurarity, HeapObjectHeader must fit into the size. 192 // allocationGranurarity, HeapObjectHeader must fit into the size.
193 static_assert( 193 static_assert(
194 sizeof(HeapObjectHeader) <= allocationGranularity, 194 sizeof(HeapObjectHeader) <= allocationGranularity,
195 "size of HeapObjectHeader must be smaller than allocationGranularity"); 195 "size of HeapObjectHeader must be smaller than allocationGranularity");
196 #if CPU(64BIT) 196 #if CPU(64BIT)
197 static_assert(sizeof(HeapObjectHeader) == 8, 197 static_assert(sizeof(HeapObjectHeader) == 8,
198 "size of HeapObjectHeader must be 8 byte aligned"); 198 "size of HeapObjectHeader must be 8 byte aligned");
199 #endif 199 #endif
200 200
201 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 201 DCHECK_LT(gcInfoIndex, gcInfoMaxIndex);
202 ASSERT(size < nonLargeObjectPageSizeMax); 202 DCHECK_LT(size, nonLargeObjectPageSizeMax);
203 ASSERT(!(size & allocationMask)); 203 DCHECK(!(size & allocationMask));
204 m_encoded = static_cast<uint32_t>( 204 m_encoded = static_cast<uint32_t>(
205 (gcInfoIndex << headerGCInfoIndexShift) | size | 205 (gcInfoIndex << headerGCInfoIndexShift) | size |
206 (gcInfoIndex == gcInfoIndexForFreeListHeader ? headerFreedBitMask : 0)); 206 (gcInfoIndex == gcInfoIndexForFreeListHeader ? headerFreedBitMask : 0));
207 } 207 }
208 208
209 NO_SANITIZE_ADDRESS 209 NO_SANITIZE_ADDRESS
210 bool isFree() const { return m_encoded & headerFreedBitMask; } 210 bool isFree() const { return m_encoded & headerFreedBitMask; }
211 NO_SANITIZE_ADDRESS 211 NO_SANITIZE_ADDRESS
212 bool isPromptlyFreed() const { 212 bool isPromptlyFreed() const {
213 return (m_encoded & headerPromptlyFreedBitMask) == 213 return (m_encoded & headerPromptlyFreedBitMask) ==
214 headerPromptlyFreedBitMask; 214 headerPromptlyFreedBitMask;
215 } 215 }
216 NO_SANITIZE_ADDRESS 216 NO_SANITIZE_ADDRESS
217 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; } 217 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; }
218 size_t size() const; 218 size_t size() const;
219 219
220 NO_SANITIZE_ADDRESS 220 NO_SANITIZE_ADDRESS
221 size_t gcInfoIndex() const { 221 size_t gcInfoIndex() const {
222 return (m_encoded & headerGCInfoIndexMask) >> headerGCInfoIndexShift; 222 return (m_encoded & headerGCInfoIndexMask) >> headerGCInfoIndexShift;
223 } 223 }
224 NO_SANITIZE_ADDRESS 224 NO_SANITIZE_ADDRESS
225 void setSize(size_t size) { 225 void setSize(size_t size) {
226 ASSERT(size < nonLargeObjectPageSizeMax); 226 DCHECK_LT(size, nonLargeObjectPageSizeMax);
227 m_encoded = static_cast<uint32_t>(size) | (m_encoded & ~headerSizeMask); 227 m_encoded = static_cast<uint32_t>(size) | (m_encoded & ~headerSizeMask);
228 } 228 }
229 bool isWrapperHeaderMarked() const; 229 bool isWrapperHeaderMarked() const;
230 void markWrapperHeader(); 230 void markWrapperHeader();
231 void unmarkWrapperHeader(); 231 void unmarkWrapperHeader();
232 bool isMarked() const; 232 bool isMarked() const;
233 void mark(); 233 void mark();
234 void unmark(); 234 void unmark();
235 void markDead(); 235 void markDead();
236 bool isDead() const; 236 bool isDead() const;
237 237
238 Address payload(); 238 Address payload();
239 size_t payloadSize(); 239 size_t payloadSize();
240 Address payloadEnd(); 240 Address payloadEnd();
241 241
242 #if ENABLE(ASSERT) 242 #if DCHECK_IS_ON()
243 bool checkHeader() const; 243 bool checkHeader() const;
244 // Zap magic number with a new magic number that means there was once an 244 // Zap magic number with a new magic number that means there was once an
245 // object allocated here, but it was freed because nobody marked it during 245 // object allocated here, but it was freed because nobody marked it during
246 // GC. 246 // GC.
247 void zapMagic(); 247 void zapMagic();
248 #endif 248 #endif
249 249
250 void finalize(Address, size_t); 250 void finalize(Address, size_t);
251 static HeapObjectHeader* fromPayload(const void*); 251 static HeapObjectHeader* fromPayload(const void*);
252 252
253 static const uint16_t magic = 0xfff1; 253 static const uint16_t magic = 0xfff1;
254 static const uint16_t zappedMagic = 0x4321; 254 static const uint16_t zappedMagic = 0x4321;
255 255
256 private: 256 private:
257 uint32_t m_encoded; 257 uint32_t m_encoded;
258 #if ENABLE(ASSERT) 258 #if DCHECK_IS_ON()
259 uint16_t m_magic; 259 uint16_t m_magic;
260 #endif 260 #endif
261 261
262 // In 64 bit architectures, we intentionally add 4 byte padding immediately 262 // In 64 bit architectures, we intentionally add 4 byte padding immediately
263 // after the HeapObjectHeader. This is because: 263 // after the HeapObjectHeader. This is because:
264 // 264 //
265 // | HeapObjectHeader (4 byte) | <- 8 byte aligned 265 // | HeapObjectHeader (4 byte) | <- 8 byte aligned
266 // | padding (4 byte) | 266 // | padding (4 byte) |
267 // | object payload (8 * n byte) | <- 8 byte aligned 267 // | object payload (8 * n byte) | <- 8 byte aligned
268 // 268 //
269 // is better than: 269 // is better than:
270 // 270 //
271 // | HeapObjectHeader (4 byte) | <- 4 byte aligned 271 // | HeapObjectHeader (4 byte) | <- 4 byte aligned
272 // | object payload (8 * n byte) | <- 8 byte aligned 272 // | object payload (8 * n byte) | <- 8 byte aligned
273 // | padding (4 byte) | <- 4 byte aligned 273 // | padding (4 byte) | <- 4 byte aligned
274 // 274 //
275 // since the former layout aligns both header and payload to 8 byte. 275 // since the former layout aligns both header and payload to 8 byte.
276 #if USE_4BYTE_HEADER_PADDING 276 #if USE_4BYTE_HEADER_PADDING
277 public: 277 public:
278 uint32_t m_padding; 278 uint32_t m_padding;
279 #endif 279 #endif
280 }; 280 };
281 281
282 class FreeListEntry final : public HeapObjectHeader { 282 class FreeListEntry final : public HeapObjectHeader {
283 public: 283 public:
284 NO_SANITIZE_ADDRESS 284 NO_SANITIZE_ADDRESS
285 explicit FreeListEntry(size_t size) 285 explicit FreeListEntry(size_t size)
286 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { 286 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) {
287 #if ENABLE(ASSERT) 287 #if DCHECK_IS_ON()
288 ASSERT(size >= sizeof(HeapObjectHeader)); 288 DCHECK_GE(size, sizeof(HeapObjectHeader));
289 zapMagic(); 289 zapMagic();
290 #endif 290 #endif
291 } 291 }
292 292
293 Address getAddress() { return reinterpret_cast<Address>(this); } 293 Address getAddress() { return reinterpret_cast<Address>(this); }
294 294
295 NO_SANITIZE_ADDRESS 295 NO_SANITIZE_ADDRESS
296 void unlink(FreeListEntry** prevNext) { 296 void unlink(FreeListEntry** prevNext) {
297 *prevNext = m_next; 297 *prevNext = m_next;
298 m_next = nullptr; 298 m_next = nullptr;
299 } 299 }
300 300
301 NO_SANITIZE_ADDRESS 301 NO_SANITIZE_ADDRESS
302 void link(FreeListEntry** prevNext) { 302 void link(FreeListEntry** prevNext) {
303 m_next = *prevNext; 303 m_next = *prevNext;
304 *prevNext = this; 304 *prevNext = this;
305 } 305 }
306 306
307 NO_SANITIZE_ADDRESS 307 NO_SANITIZE_ADDRESS
308 FreeListEntry* next() const { return m_next; } 308 FreeListEntry* next() const { return m_next; }
309 309
310 NO_SANITIZE_ADDRESS 310 NO_SANITIZE_ADDRESS
311 void append(FreeListEntry* next) { 311 void append(FreeListEntry* next) {
312 ASSERT(!m_next); 312 DCHECK(!m_next);
313 m_next = next; 313 m_next = next;
314 } 314 }
315 315
316 private: 316 private:
317 FreeListEntry* m_next; 317 FreeListEntry* m_next;
318 }; 318 };
319 319
320 // Blink heap pages are set up with a guard page before and after the payload. 320 // Blink heap pages are set up with a guard page before and after the payload.
321 inline size_t blinkPagePayloadSize() { 321 inline size_t blinkPagePayloadSize() {
322 return blinkPageSize - 2 * blinkGuardPageSize; 322 return blinkPageSize - 2 * blinkGuardPageSize;
(...skipping 16 matching lines...) Expand all
339 // Masks an address down to the enclosing blink page base address. 339 // Masks an address down to the enclosing blink page base address.
340 inline Address blinkPageAddress(Address address) { 340 inline Address blinkPageAddress(Address address) {
341 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & 341 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) &
342 blinkPageBaseMask); 342 blinkPageBaseMask);
343 } 343 }
344 344
345 inline bool vTableInitialized(void* objectPointer) { 345 inline bool vTableInitialized(void* objectPointer) {
346 return !!(*reinterpret_cast<Address*>(objectPointer)); 346 return !!(*reinterpret_cast<Address*>(objectPointer));
347 } 347 }
348 348
349 #if ENABLE(ASSERT) 349 #if DCHECK_IS_ON()
350 // Sanity check for a page header address: the address of the page 350 // Sanity check for a page header address: the address of the page
351 // header should be OS page size away from being Blink page size 351 // header should be OS page size away from being Blink page size
352 // aligned. 352 // aligned.
353 inline bool isPageHeaderAddress(Address address) { 353 inline bool isPageHeaderAddress(Address address) {
354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - 354 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) -
355 blinkGuardPageSize); 355 blinkGuardPageSize);
356 } 356 }
357 #endif 357 #endif
358 358
359 // BasePage is a base class for NormalPage and LargeObjectPage. 359 // BasePage is a base class for NormalPage and LargeObjectPage.
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
414 STACK_ALLOCATED(); 414 STACK_ALLOCATED();
415 415
416 public: 416 public:
417 size_t freeCount = 0; 417 size_t freeCount = 0;
418 size_t freeSize = 0; 418 size_t freeSize = 0;
419 }; 419 };
420 420
421 virtual void takeSnapshot(base::trace_event::MemoryAllocatorDump*, 421 virtual void takeSnapshot(base::trace_event::MemoryAllocatorDump*,
422 ThreadState::GCSnapshotInfo&, 422 ThreadState::GCSnapshotInfo&,
423 HeapSnapshotInfo&) = 0; 423 HeapSnapshotInfo&) = 0;
424 #if ENABLE(ASSERT) 424 #if DCHECK_IS_ON()
425 virtual bool contains(Address) = 0; 425 virtual bool contains(Address) = 0;
426 #endif 426 #endif
427 virtual size_t size() = 0; 427 virtual size_t size() = 0;
428 virtual bool isLargeObjectPage() { return false; } 428 virtual bool isLargeObjectPage() { return false; }
429 429
430 Address getAddress() { return reinterpret_cast<Address>(this); } 430 Address getAddress() { return reinterpret_cast<Address>(this); }
431 PageMemory* storage() const { return m_storage; } 431 PageMemory* storage() const { return m_storage; }
432 BaseArena* arena() const { return m_arena; } 432 BaseArena* arena() const { return m_arena; }
433 bool orphaned() { return !m_arena; } 433 bool orphaned() { return !m_arena; }
434 bool terminating() { return m_terminating; } 434 bool terminating() { return m_terminating; }
435 void setTerminating() { m_terminating = true; } 435 void setTerminating() { m_terminating = true; }
436 436
437 // Returns true if this page has been swept by the ongoing lazy sweep. 437 // Returns true if this page has been swept by the ongoing lazy sweep.
438 bool hasBeenSwept() const { return m_swept; } 438 bool hasBeenSwept() const { return m_swept; }
439 439
440 void markAsSwept() { 440 void markAsSwept() {
441 ASSERT(!m_swept); 441 DCHECK(!m_swept);
442 m_swept = true; 442 m_swept = true;
443 } 443 }
444 444
445 void markAsUnswept() { 445 void markAsUnswept() {
446 ASSERT(m_swept); 446 DCHECK(m_swept);
447 m_swept = false; 447 m_swept = false;
448 } 448 }
449 449
450 private: 450 private:
451 PageMemory* m_storage; 451 PageMemory* m_storage;
452 BaseArena* m_arena; 452 BaseArena* m_arena;
453 BasePage* m_next; 453 BasePage* m_next;
454 // Whether the page is part of a terminating thread or not. 454 // Whether the page is part of a terminating thread or not.
455 bool m_terminating; 455 bool m_terminating;
456 456
(...skipping 30 matching lines...) Expand all
487 } 487 }
488 #if defined(ADDRESS_SANITIZER) 488 #if defined(ADDRESS_SANITIZER)
489 void poisonUnmarkedObjects() override; 489 void poisonUnmarkedObjects() override;
490 #endif 490 #endif
491 void checkAndMarkPointer(Visitor*, Address) override; 491 void checkAndMarkPointer(Visitor*, Address) override;
492 void markOrphaned() override; 492 void markOrphaned() override;
493 493
494 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, 494 void takeSnapshot(base::trace_event::MemoryAllocatorDump*,
495 ThreadState::GCSnapshotInfo&, 495 ThreadState::GCSnapshotInfo&,
496 HeapSnapshotInfo&) override; 496 HeapSnapshotInfo&) override;
497 #if ENABLE(ASSERT) 497 #if DCHECK_IS_ON()
498 // Returns true for the whole blinkPageSize page that the page is on, even 498 // Returns true for the whole blinkPageSize page that the page is on, even
499 // for the header, and the unmapped guard page at the start. That ensures 499 // for the header, and the unmapped guard page at the start. That ensures
500 // the result can be used to populate the negative page cache. 500 // the result can be used to populate the negative page cache.
501 bool contains(Address) override; 501 bool contains(Address) override;
502 #endif 502 #endif
503 size_t size() override { return blinkPageSize; } 503 size_t size() override { return blinkPageSize; }
504 static size_t pageHeaderSize() { 504 static size_t pageHeaderSize() {
505 // Compute the amount of padding we have to add to a header to make 505 // Compute the amount of padding we have to add to a header to make
506 // the size of the header plus the padding a multiple of 8 bytes. 506 // the size of the header plus the padding a multiple of 8 bytes.
507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity -
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 void invalidateObjectStartBitmap() override {} 565 void invalidateObjectStartBitmap() override {}
566 #if defined(ADDRESS_SANITIZER) 566 #if defined(ADDRESS_SANITIZER)
567 void poisonUnmarkedObjects() override; 567 void poisonUnmarkedObjects() override;
568 #endif 568 #endif
569 void checkAndMarkPointer(Visitor*, Address) override; 569 void checkAndMarkPointer(Visitor*, Address) override;
570 void markOrphaned() override; 570 void markOrphaned() override;
571 571
572 void takeSnapshot(base::trace_event::MemoryAllocatorDump*, 572 void takeSnapshot(base::trace_event::MemoryAllocatorDump*,
573 ThreadState::GCSnapshotInfo&, 573 ThreadState::GCSnapshotInfo&,
574 HeapSnapshotInfo&) override; 574 HeapSnapshotInfo&) override;
575 #if ENABLE(ASSERT) 575 #if DCHECK_IS_ON()
576 // Returns true for any address that is on one of the pages that this 576 // Returns true for any address that is on one of the pages that this
577 // large object uses. That ensures that we can use a negative result to 577 // large object uses. That ensures that we can use a negative result to
578 // populate the negative page cache. 578 // populate the negative page cache.
579 bool contains(Address) override; 579 bool contains(Address) override;
580 #endif 580 #endif
581 virtual size_t size() { 581 virtual size_t size() {
582 return pageHeaderSize() + sizeof(HeapObjectHeader) + m_payloadSize; 582 return pageHeaderSize() + sizeof(HeapObjectHeader) + m_payloadSize;
583 } 583 }
584 static size_t pageHeaderSize() { 584 static size_t pageHeaderSize() {
585 // Compute the amount of padding we have to add to a header to make 585 // Compute the amount of padding we have to add to a header to make
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
666 void addToFreeList(Address, size_t); 666 void addToFreeList(Address, size_t);
667 void clear(); 667 void clear();
668 668
669 // Returns a bucket number for inserting a FreeListEntry of a given size. 669 // Returns a bucket number for inserting a FreeListEntry of a given size.
670 // All FreeListEntries in the given bucket, n, have size >= 2^n. 670 // All FreeListEntries in the given bucket, n, have size >= 2^n.
671 static int bucketIndexForSize(size_t); 671 static int bucketIndexForSize(size_t);
672 672
673 // Returns true if the freelist snapshot is captured. 673 // Returns true if the freelist snapshot is captured.
674 bool takeSnapshot(const String& dumpBaseName); 674 bool takeSnapshot(const String& dumpBaseName);
675 675
676 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ 676 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
677 defined(MEMORY_SANITIZER) 677 defined(MEMORY_SANITIZER)
678 static void zapFreedMemory(Address, size_t); 678 static void zapFreedMemory(Address, size_t);
679 static void checkFreedMemoryIsZapped(Address, size_t); 679 static void checkFreedMemoryIsZapped(Address, size_t);
680 #endif 680 #endif
681 681
682 private: 682 private:
683 int m_biggestFreeListIndex; 683 int m_biggestFreeListIndex;
684 684
685 // All FreeListEntries in the nth list have size >= 2^n. 685 // All FreeListEntries in the nth list have size >= 2^n.
686 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 686 FreeListEntry* m_freeLists[blinkPageSizeLog2];
(...skipping 13 matching lines...) Expand all
700 // LargeObjectPages. 700 // LargeObjectPages.
701 class PLATFORM_EXPORT BaseArena { 701 class PLATFORM_EXPORT BaseArena {
702 USING_FAST_MALLOC(BaseArena); 702 USING_FAST_MALLOC(BaseArena);
703 703
704 public: 704 public:
705 BaseArena(ThreadState*, int); 705 BaseArena(ThreadState*, int);
706 virtual ~BaseArena(); 706 virtual ~BaseArena();
707 void cleanupPages(); 707 void cleanupPages();
708 708
709 void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&); 709 void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&);
710 #if ENABLE(ASSERT) 710 #if DCHECK_IS_ON()
711 BasePage* findPageFromAddress(Address); 711 BasePage* findPageFromAddress(Address);
712 #endif 712 #endif
713 virtual void takeFreelistSnapshot(const String& dumpBaseName) {} 713 virtual void takeFreelistSnapshot(const String& dumpBaseName) {}
714 virtual void clearFreeLists() {} 714 virtual void clearFreeLists() {}
715 void makeConsistentForGC(); 715 void makeConsistentForGC();
716 void makeConsistentForMutator(); 716 void makeConsistentForMutator();
717 #if ENABLE(ASSERT) 717 #if DCHECK_IS_ON()
718 virtual bool isConsistentForGC() = 0; 718 virtual bool isConsistentForGC() = 0;
719 #endif 719 #endif
720 size_t objectPayloadSizeForTesting(); 720 size_t objectPayloadSizeForTesting();
721 void prepareHeapForTermination(); 721 void prepareHeapForTermination();
722 void prepareForSweep(); 722 void prepareForSweep();
723 #if defined(ADDRESS_SANITIZER) 723 #if defined(ADDRESS_SANITIZER)
724 void poisonArena(); 724 void poisonArena();
725 #endif 725 #endif
726 Address lazySweep(size_t, size_t gcInfoIndex); 726 Address lazySweep(size_t, size_t gcInfoIndex);
727 void sweepUnsweptPage(); 727 void sweepUnsweptPage();
(...skipping 20 matching lines...) Expand all
748 748
749 // Index into the page pools. This is used to ensure that the pages of the 749 // Index into the page pools. This is used to ensure that the pages of the
750 // same type go into the correct page pool and thus avoid type confusion. 750 // same type go into the correct page pool and thus avoid type confusion.
751 int m_index; 751 int m_index;
752 }; 752 };
753 753
754 class PLATFORM_EXPORT NormalPageArena final : public BaseArena { 754 class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
755 public: 755 public:
756 NormalPageArena(ThreadState*, int); 756 NormalPageArena(ThreadState*, int);
757 void addToFreeList(Address address, size_t size) { 757 void addToFreeList(Address address, size_t size) {
758 ASSERT(findPageFromAddress(address)); 758 DCHECK(findPageFromAddress(address));
759 ASSERT(findPageFromAddress(address + size - 1)); 759 DCHECK(findPageFromAddress(address + size - 1));
760 m_freeList.addToFreeList(address, size); 760 m_freeList.addToFreeList(address, size);
761 } 761 }
762 void clearFreeLists() override; 762 void clearFreeLists() override;
763 #if ENABLE(ASSERT) 763 #if DCHECK_IS_ON()
764 bool isConsistentForGC() override; 764 bool isConsistentForGC() override;
765 bool pagesToBeSweptContains(Address); 765 bool pagesToBeSweptContains(Address);
766 #endif 766 #endif
767 void takeFreelistSnapshot(const String& dumpBaseName) override; 767 void takeFreelistSnapshot(const String& dumpBaseName) override;
768 768
769 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); 769 Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
770 770
771 void freePage(NormalPage*); 771 void freePage(NormalPage*);
772 772
773 bool coalesce(); 773 bool coalesce();
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
815 size_t m_promptlyFreedSize; 815 size_t m_promptlyFreedSize;
816 816
817 bool m_isLazySweeping; 817 bool m_isLazySweeping;
818 }; 818 };
819 819
820 class LargeObjectArena final : public BaseArena { 820 class LargeObjectArena final : public BaseArena {
821 public: 821 public:
822 LargeObjectArena(ThreadState*, int); 822 LargeObjectArena(ThreadState*, int);
823 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); 823 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex);
824 void freeLargeObjectPage(LargeObjectPage*); 824 void freeLargeObjectPage(LargeObjectPage*);
825 #if ENABLE(ASSERT) 825 #if DCHECK_IS_ON()
826 bool isConsistentForGC() override { return true; } 826 bool isConsistentForGC() override { return true; }
827 #endif 827 #endif
828 private: 828 private:
829 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex); 829 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex);
830 Address lazySweepPages(size_t, size_t gcInfoIndex) override; 830 Address lazySweepPages(size_t, size_t gcInfoIndex) override;
831 }; 831 };
832 832
833 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap 833 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
834 // pages are aligned at blinkPageBase plus the size of a guard size. 834 // pages are aligned at blinkPageBase plus the size of a guard size.
835 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our 835 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
836 // typed arenas. This is only exported to enable tests in HeapTest.cpp. 836 // typed arenas. This is only exported to enable tests in HeapTest.cpp.
837 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) { 837 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) {
838 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); 838 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
839 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + 839 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) +
840 blinkGuardPageSize); 840 blinkGuardPageSize);
841 ASSERT(page->contains(address)); 841 DCHECK(page->contains(address));
842 return page; 842 return page;
843 } 843 }
844 844
845 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { 845 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const {
846 size_t result = m_encoded & headerSizeMask; 846 size_t result = m_encoded & headerSizeMask;
847 // Large objects should not refer to header->size(). 847 // Large objects should not refer to header->size().
848 // The actual size of a large object is stored in 848 // The actual size of a large object is stored in
849 // LargeObjectPage::m_payloadSize. 849 // LargeObjectPage::m_payloadSize.
850 ASSERT(result != largeObjectSizeInHeader); 850 DCHECK_NE(result, largeObjectSizeInHeader);
851 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 851 DCHECK(!pageFromObject(this)->isLargeObjectPage());
852 return result; 852 return result;
853 } 853 }
854 854
855 #if ENABLE(ASSERT) 855 #if DCHECK_IS_ON()
856 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { 856 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const {
857 return !pageFromObject(this)->orphaned() && m_magic == magic; 857 return !pageFromObject(this)->orphaned() && m_magic == magic;
858 } 858 }
859 #endif 859 #endif
860 860
861 inline Address HeapObjectHeader::payload() { 861 inline Address HeapObjectHeader::payload() {
862 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); 862 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader);
863 } 863 }
864 864
865 inline Address HeapObjectHeader::payloadEnd() { 865 inline Address HeapObjectHeader::payloadEnd() {
866 return reinterpret_cast<Address>(this) + size(); 866 return reinterpret_cast<Address>(this) + size();
867 } 867 }
868 868
869 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::payloadSize() { 869 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::payloadSize() {
870 size_t size = m_encoded & headerSizeMask; 870 size_t size = m_encoded & headerSizeMask;
871 if (UNLIKELY(size == largeObjectSizeInHeader)) { 871 if (UNLIKELY(size == largeObjectSizeInHeader)) {
872 ASSERT(pageFromObject(this)->isLargeObjectPage()); 872 DCHECK(pageFromObject(this)->isLargeObjectPage());
873 return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize(); 873 return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize();
874 } 874 }
875 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 875 DCHECK(!pageFromObject(this)->isLargeObjectPage());
876 return size - sizeof(HeapObjectHeader); 876 return size - sizeof(HeapObjectHeader);
877 } 877 }
878 878
879 inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) { 879 inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) {
880 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 880 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
881 HeapObjectHeader* header = 881 HeapObjectHeader* header =
882 reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader)); 882 reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader));
883 ASSERT(header->checkHeader()); 883 DCHECK(header->checkHeader());
884 return header; 884 return header;
885 } 885 }
886 886
887 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isWrapperHeaderMarked() 887 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isWrapperHeaderMarked()
888 const { 888 const {
889 ASSERT(checkHeader()); 889 DCHECK(checkHeader());
890 return m_encoded & headerWrapperMarkBitMask; 890 return m_encoded & headerWrapperMarkBitMask;
891 } 891 }
892 892
893 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markWrapperHeader() { 893 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markWrapperHeader() {
894 ASSERT(checkHeader()); 894 DCHECK(checkHeader());
895 ASSERT(!isWrapperHeaderMarked()); 895 DCHECK(!isWrapperHeaderMarked());
896 m_encoded |= headerWrapperMarkBitMask; 896 m_encoded |= headerWrapperMarkBitMask;
897 } 897 }
898 898
899 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmarkWrapperHeader() { 899 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmarkWrapperHeader() {
900 ASSERT(checkHeader()); 900 DCHECK(checkHeader());
901 ASSERT(isWrapperHeaderMarked()); 901 DCHECK(isWrapperHeaderMarked());
902 m_encoded &= ~headerWrapperMarkBitMask; 902 m_encoded &= ~headerWrapperMarkBitMask;
903 } 903 }
904 904
905 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isMarked() const { 905 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isMarked() const {
906 ASSERT(checkHeader()); 906 DCHECK(checkHeader());
907 return m_encoded & headerMarkBitMask; 907 return m_encoded & headerMarkBitMask;
908 } 908 }
909 909
910 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::mark() { 910 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::mark() {
911 ASSERT(checkHeader()); 911 DCHECK(checkHeader());
912 ASSERT(!isMarked()); 912 DCHECK(!isMarked());
913 m_encoded = m_encoded | headerMarkBitMask; 913 m_encoded = m_encoded | headerMarkBitMask;
914 } 914 }
915 915
916 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmark() { 916 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmark() {
917 ASSERT(checkHeader()); 917 DCHECK(checkHeader());
918 ASSERT(isMarked()); 918 DCHECK(isMarked());
919 m_encoded &= ~headerMarkBitMask; 919 m_encoded &= ~headerMarkBitMask;
920 } 920 }
921 921
922 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isDead() const { 922 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isDead() const {
923 ASSERT(checkHeader()); 923 DCHECK(checkHeader());
924 return m_encoded & headerDeadBitMask; 924 return m_encoded & headerDeadBitMask;
925 } 925 }
926 926
927 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markDead() { 927 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markDead() {
928 ASSERT(checkHeader()); 928 DCHECK(checkHeader());
929 ASSERT(!isMarked()); 929 DCHECK(!isMarked());
930 m_encoded |= headerDeadBitMask; 930 m_encoded |= headerDeadBitMask;
931 } 931 }
932 932
933 inline Address NormalPageArena::allocateObject(size_t allocationSize, 933 inline Address NormalPageArena::allocateObject(size_t allocationSize,
934 size_t gcInfoIndex) { 934 size_t gcInfoIndex) {
935 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 935 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
936 Address headerAddress = m_currentAllocationPoint; 936 Address headerAddress = m_currentAllocationPoint;
937 m_currentAllocationPoint += allocationSize; 937 m_currentAllocationPoint += allocationSize;
938 m_remainingAllocationSize -= allocationSize; 938 m_remainingAllocationSize -= allocationSize;
939 ASSERT(gcInfoIndex > 0); 939 DCHECK_GT(gcInfoIndex, 0UL);
940 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoIndex); 940 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoIndex);
941 Address result = headerAddress + sizeof(HeapObjectHeader); 941 Address result = headerAddress + sizeof(HeapObjectHeader);
942 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 942 DCHECK(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
943 943
944 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)); 944 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader));
945 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 945 DCHECK(findPageFromAddress(headerAddress + allocationSize - 1));
946 return result; 946 return result;
947 } 947 }
948 return outOfLineAllocate(allocationSize, gcInfoIndex); 948 return outOfLineAllocate(allocationSize, gcInfoIndex);
949 } 949 }
950 950
951 inline NormalPageArena* NormalPage::arenaForNormalPage() const { 951 inline NormalPageArena* NormalPage::arenaForNormalPage() const {
952 return static_cast<NormalPageArena*>(arena()); 952 return static_cast<NormalPageArena*>(arena());
953 } 953 }
954 954
955 } // namespace blink 955 } // namespace blink
956 956
957 #endif // HeapPage_h 957 #endif // HeapPage_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698