Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(310)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 906213002: Oilpan: Rename heap classes (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false)
84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size))
85 #else 85 #else
86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size))
87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false)
88 #endif 88 #endif
89 89
90 class CallbackStack; 90 class CallbackStack;
91 class PageMemory; 91 class PageMemory;
92 class ThreadHeapForHeapPage; 92 class NormalPageHeap;
93 template<ThreadAffinity affinity> class ThreadLocalPersistents; 93 template<ThreadAffinity affinity> class ThreadLocalPersistents;
94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent; 94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent;
95 95
96 #if ENABLE(GC_PROFILING) 96 #if ENABLE(GC_PROFILING)
97 class TracedValue; 97 class TracedValue;
98 #endif 98 #endif
99 99
100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: 100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout:
101 // 101 //
102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) | 102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) |
103 // 103 //
104 // - For non-large objects, 14 bit is enough for |size| because the blink 104 // - For non-large objects, 14 bit is enough for |size| because the blink
105 // page size is 2^17 byte and each object is guaranteed to be aligned with 105 // page size is 2^17 byte and each object is guaranteed to be aligned with
106 // 2^3 byte. 106 // 2^3 byte.
107 // - For large objects, |size| is 0. The actual size of a large object is 107 // - For large objects, |size| is 0. The actual size of a large object is
108 // stored in LargeObject::m_payloadSize. 108 // stored in LargeObjectPage::m_payloadSize.
109 // - 15 bit is enough for gcInfoIndex because there are less than 2^15 types 109 // - 15 bit is enough for gcInfoIndex because there are less than 2^15 types
110 // in Blink. 110 // in Blink.
111 const size_t headerGCInfoIndexShift = 17; 111 const size_t headerGCInfoIndexShift = 17;
112 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 15) - 1)) << hea derGCInfoIndexShift; 112 const size_t headerGCInfoIndexMask = (static_cast<size_t>((1 << 15) - 1)) << hea derGCInfoIndexShift;
113 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3; 113 const size_t headerSizeMask = (static_cast<size_t>((1 << 14) - 1)) << 3;
114 const size_t headerMarkBitMask = 1; 114 const size_t headerMarkBitMask = 1;
115 const size_t headerFreedBitMask = 2; 115 const size_t headerFreedBitMask = 2;
116 // The dead bit is used for objects that have gone through a GC marking, but did 116 // The dead bit is used for objects that have gone through a GC marking, but did
117 // not get swept before a new GC started. In that case we set the dead bit on 117 // not get swept before a new GC started. In that case we set the dead bit on
118 // objects that were not marked in the previous GC to ensure we are not tracing 118 // objects that were not marked in the previous GC to ensure we are not tracing
119 // them via a conservatively found pointer. Tracing dead objects could lead to 119 // them via a conservatively found pointer. Tracing dead objects could lead to
120 // tracing of already finalized objects in another thread's heap which is a 120 // tracing of already finalized objects in another thread's heap which is a
121 // use-after-free situation. 121 // use-after-free situation.
122 const size_t headerDeadBitMask = 4; 122 const size_t headerDeadBitMask = 4;
123 // On free-list entries we reuse the dead bit to distinguish a normal free-list 123 // On free-list entries we reuse the dead bit to distinguish a normal free-list
124 // entry from one that has been promptly freed. 124 // entry from one that has been promptly freed.
125 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ; 125 const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask ;
126 const size_t largeObjectSizeInHeader = 0; 126 const size_t largeObjectSizeInHeader = 0;
127 const size_t gcInfoIndexForFreeListHeader = 0; 127 const size_t gcInfoIndexForFreeListHeader = 0;
128 const size_t nonLargeObjectSizeMax = 1 << 17; 128 const size_t nonLargeObjectPageSizeMax = 1 << 17;
129 129
130 static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by Hea pObjectHeader must at least be blinkPageSize"); 130 static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
131 131
132 class PLATFORM_EXPORT HeapObjectHeader { 132 class PLATFORM_EXPORT HeapObjectHeader {
133 public: 133 public:
134 // If gcInfoIndex is 0, this header is interpreted as a free list header. 134 // If gcInfoIndex is 0, this header is interpreted as a free list header.
135 NO_SANITIZE_ADDRESS 135 NO_SANITIZE_ADDRESS
136 HeapObjectHeader(size_t size, size_t gcInfoIndex) 136 HeapObjectHeader(size_t size, size_t gcInfoIndex)
137 { 137 {
138 #if ENABLE(ASSERT) 138 #if ENABLE(ASSERT)
139 m_magic = magic; 139 m_magic = magic;
140 #endif 140 #endif
141 #if ENABLE(GC_PROFILING) 141 #if ENABLE(GC_PROFILING)
142 m_age = 0; 142 m_age = 0;
143 #endif 143 #endif
144 // sizeof(HeapObjectHeader) must be equal to or smaller than 144 // sizeof(HeapObjectHeader) must be equal to or smaller than
145 // allocationGranurarity, because HeapObjectHeader is used as a header 145 // allocationGranurarity, because HeapObjectHeader is used as a header
146 // for an freed entry. Given that the smallest entry size is 146 // for an freed entry. Given that the smallest entry size is
147 // allocationGranurarity, HeapObjectHeader must fit into the size. 147 // allocationGranurarity, HeapObjectHeader must fit into the size.
148 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity"); 148 static_assert(sizeof(HeapObjectHeader) <= allocationGranularity, "size o f HeapObjectHeader must be smaller than allocationGranularity");
149 #if CPU(64BIT) 149 #if CPU(64BIT)
150 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned"); 150 static_assert(sizeof(HeapObjectHeader) == 8, "size of HeapObjectHeader m ust be 8 byte aligned");
151 #endif 151 #endif
152 152
153 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 153 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
154 ASSERT(size < nonLargeObjectSizeMax); 154 ASSERT(size < nonLargeObjectPageSizeMax);
155 ASSERT(!(size & allocationMask)); 155 ASSERT(!(size & allocationMask));
156 m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoInde x ? 0 : headerFreedBitMask); 156 m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoInde x ? 0 : headerFreedBitMask);
157 } 157 }
158 158
159 NO_SANITIZE_ADDRESS 159 NO_SANITIZE_ADDRESS
160 bool isFree() const { return m_encoded & headerFreedBitMask; } 160 bool isFree() const { return m_encoded & headerFreedBitMask; }
161 NO_SANITIZE_ADDRESS 161 NO_SANITIZE_ADDRESS
162 bool isPromptlyFreed() const { return (m_encoded & headerPromptlyFreedBitMas k) == headerPromptlyFreedBitMask; } 162 bool isPromptlyFreed() const { return (m_encoded & headerPromptlyFreedBitMas k) == headerPromptlyFreedBitMask; }
163 NO_SANITIZE_ADDRESS 163 NO_SANITIZE_ADDRESS
164 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; } 164 void markPromptlyFreed() { m_encoded |= headerPromptlyFreedBitMask; }
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 // header should be OS page size away from being Blink page size 343 // header should be OS page size away from being Blink page size
344 // aligned. 344 // aligned.
345 inline bool isPageHeaderAddress(Address address) 345 inline bool isPageHeaderAddress(Address address)
346 { 346 {
347 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF: :kSystemPageSize); 347 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF: :kSystemPageSize);
348 } 348 }
349 #endif 349 #endif
350 350
351 // FIXME: Add a good comment about the heap layout once heap relayout work 351 // FIXME: Add a good comment about the heap layout once heap relayout work
352 // is done. 352 // is done.
353 class BaseHeapPage { 353 class BasePage {
354 public: 354 public:
355 BaseHeapPage(PageMemory*, ThreadHeap*); 355 BasePage(PageMemory*, BaseHeap*);
356 virtual ~BaseHeapPage() { } 356 virtual ~BasePage() { }
357 357
358 void link(BaseHeapPage** previousNext) 358 void link(BasePage** previousNext)
359 { 359 {
360 m_next = *previousNext; 360 m_next = *previousNext;
361 *previousNext = this; 361 *previousNext = this;
362 } 362 }
363 void unlink(BaseHeapPage** previousNext) 363 void unlink(BasePage** previousNext)
364 { 364 {
365 *previousNext = m_next; 365 *previousNext = m_next;
366 m_next = nullptr; 366 m_next = nullptr;
367 } 367 }
368 BaseHeapPage* next() const { return m_next; } 368 BasePage* next() const { return m_next; }
369 369
370 // virtual methods are slow. So performance-sensitive methods 370 // virtual methods are slow. So performance-sensitive methods
371 // should be defined as non-virtual methods on HeapPage and LargeObject. 371 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e.
372 // The following methods are not performance-sensitive. 372 // The following methods are not performance-sensitive.
373 virtual size_t objectPayloadSizeForTesting() = 0; 373 virtual size_t objectPayloadSizeForTesting() = 0;
374 virtual bool isEmpty() = 0; 374 virtual bool isEmpty() = 0;
375 virtual void removeFromHeap() = 0; 375 virtual void removeFromHeap() = 0;
376 virtual void sweep() = 0; 376 virtual void sweep() = 0;
377 virtual void markUnmarkedObjectsDead() = 0; 377 virtual void markUnmarkedObjectsDead() = 0;
378 // Check if the given address points to an object in this 378 // Check if the given address points to an object in this
379 // heap page. If so, find the start of that object and mark it 379 // heap page. If so, find the start of that object and mark it
380 // using the given Visitor. Otherwise do nothing. The pointer must 380 // using the given Visitor. Otherwise do nothing. The pointer must
381 // be within the same aligned blinkPageSize as the this-pointer. 381 // be within the same aligned blinkPageSize as the this-pointer.
382 // 382 //
383 // This is used during conservative stack scanning to 383 // This is used during conservative stack scanning to
384 // conservatively mark all objects that could be referenced from 384 // conservatively mark all objects that could be referenced from
385 // the stack. 385 // the stack.
386 virtual void checkAndMarkPointer(Visitor*, Address) = 0; 386 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
387 virtual void markOrphaned(); 387 virtual void markOrphaned();
388 #if ENABLE(GC_PROFILING) 388 #if ENABLE(GC_PROFILING)
389 virtual const GCInfo* findGCInfo(Address) = 0; 389 virtual const GCInfo* findGCInfo(Address) = 0;
390 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; 390 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
391 #endif 391 #endif
392 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) 392 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
393 virtual bool contains(Address) = 0; 393 virtual bool contains(Address) = 0;
394 #endif 394 #endif
395 virtual size_t size() = 0; 395 virtual size_t size() = 0;
396 virtual bool isLargeObject() { return false; } 396 virtual bool isLargeObjectPage() { return false; }
397 397
398 Address address() { return reinterpret_cast<Address>(this); } 398 Address address() { return reinterpret_cast<Address>(this); }
399 PageMemory* storage() const { return m_storage; } 399 PageMemory* storage() const { return m_storage; }
400 ThreadHeap* heap() const { return m_heap; } 400 BaseHeap* heap() const { return m_heap; }
401 bool orphaned() { return !m_heap; } 401 bool orphaned() { return !m_heap; }
402 bool terminating() { return m_terminating; } 402 bool terminating() { return m_terminating; }
403 void setTerminating() { m_terminating = true; } 403 void setTerminating() { m_terminating = true; }
404 404
405 // Returns true if this page has been swept by the ongoing lazy sweep. 405 // Returns true if this page has been swept by the ongoing lazy sweep.
406 bool hasBeenSwept() const { return m_swept; } 406 bool hasBeenSwept() const { return m_swept; }
407 407
408 void markAsSwept() 408 void markAsSwept()
409 { 409 {
410 ASSERT(!m_swept); 410 ASSERT(!m_swept);
411 m_swept = true; 411 m_swept = true;
412 } 412 }
413 413
414 void markAsUnswept() 414 void markAsUnswept()
415 { 415 {
416 ASSERT(m_swept); 416 ASSERT(m_swept);
417 m_swept = false; 417 m_swept = false;
418 } 418 }
419 419
420 private: 420 private:
421 PageMemory* m_storage; 421 PageMemory* m_storage;
422 ThreadHeap* m_heap; 422 BaseHeap* m_heap;
423 BaseHeapPage* m_next; 423 BasePage* m_next;
424 // Whether the page is part of a terminating thread or not. 424 // Whether the page is part of a terminating thread or not.
425 bool m_terminating; 425 bool m_terminating;
426 426
427 // Track the sweeping state of a page. Set to true once 427 // Track the sweeping state of a page. Set to true once
428 // the lazy sweep completes has processed it. 428 // the lazy sweep completes has processed it.
429 // 429 //
430 // Set to false at the start of a sweep, true upon completion 430 // Set to false at the start of a sweep, true upon completion
431 // of lazy sweeping. 431 // of lazy sweeping.
432 bool m_swept; 432 bool m_swept;
433 friend class ThreadHeap; 433 friend class BaseHeap;
434 }; 434 };
435 435
436 class HeapPage final : public BaseHeapPage { 436 class NormalPage final : public BasePage {
437 public: 437 public:
438 HeapPage(PageMemory*, ThreadHeap*); 438 NormalPage(PageMemory*, BaseHeap*);
439 439
440 Address payload() 440 Address payload()
441 { 441 {
442 return address() + sizeof(HeapPage) + headerPadding(); 442 return address() + sizeof(NormalPage) + headerPadding();
443 } 443 }
444 size_t payloadSize() 444 size_t payloadSize()
445 { 445 {
446 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~ allocationMask; 446 return (blinkPagePayloadSize() - sizeof(NormalPage) - headerPadding()) & ~allocationMask;
447 } 447 }
448 Address payloadEnd() { return payload() + payloadSize(); } 448 Address payloadEnd() { return payload() + payloadSize(); }
449 bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); } 449 bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); }
450 450
451 virtual size_t objectPayloadSizeForTesting() override; 451 virtual size_t objectPayloadSizeForTesting() override;
452 virtual bool isEmpty() override; 452 virtual bool isEmpty() override;
453 virtual void removeFromHeap() override; 453 virtual void removeFromHeap() override;
454 virtual void sweep() override; 454 virtual void sweep() override;
455 virtual void markUnmarkedObjectsDead() override; 455 virtual void markUnmarkedObjectsDead() override;
456 virtual void checkAndMarkPointer(Visitor*, Address) override; 456 virtual void checkAndMarkPointer(Visitor*, Address) override;
457 virtual void markOrphaned() override 457 virtual void markOrphaned() override
458 { 458 {
459 // Zap the payload with a recognizable value to detect any incorrect 459 // Zap the payload with a recognizable value to detect any incorrect
460 // cross thread pointer usage. 460 // cross thread pointer usage.
461 #if defined(ADDRESS_SANITIZER) 461 #if defined(ADDRESS_SANITIZER)
462 // This needs to zap poisoned memory as well. 462 // This needs to zap poisoned memory as well.
463 // Force unpoison memory before memset. 463 // Force unpoison memory before memset.
464 ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize()); 464 ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize());
465 #endif 465 #endif
466 memset(payload(), orphanedZapValue, payloadSize()); 466 memset(payload(), orphanedZapValue, payloadSize());
467 BaseHeapPage::markOrphaned(); 467 BasePage::markOrphaned();
468 } 468 }
469 #if ENABLE(GC_PROFILING) 469 #if ENABLE(GC_PROFILING)
470 const GCInfo* findGCInfo(Address) override; 470 const GCInfo* findGCInfo(Address) override;
471 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); 471 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
472 void incrementMarkedObjectsAge(); 472 void incrementMarkedObjectsAge();
473 void countMarkedObjects(ClassAgeCountsMap&); 473 void countMarkedObjects(ClassAgeCountsMap&);
474 void countObjectsToSweep(ClassAgeCountsMap&); 474 void countObjectsToSweep(ClassAgeCountsMap&);
475 #endif 475 #endif
476 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) 476 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
477 // Returns true for the whole blinkPageSize page that the page is on, even 477 // Returns true for the whole blinkPageSize page that the page is on, even
478 // for the header, and the unmapped guard page at the start. That ensures 478 // for the header, and the unmapped guard page at the start. That ensures
479 // the result can be used to populate the negative page cache. 479 // the result can be used to populate the negative page cache.
480 virtual bool contains(Address addr) override 480 virtual bool contains(Address addr) override
481 { 481 {
482 Address blinkPageStart = roundToBlinkPageStart(address()); 482 Address blinkPageStart = roundToBlinkPageStart(address());
483 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a t aligned address plus guard page size. 483 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a t aligned address plus guard page size.
484 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 484 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
485 } 485 }
486 #endif 486 #endif
487 virtual size_t size() override { return blinkPageSize; } 487 virtual size_t size() override { return blinkPageSize; }
488 // Compute the amount of padding we have to add to a header to make 488 // Compute the amount of padding we have to add to a header to make
489 // the size of the header plus the padding a multiple of 8 bytes. 489 // the size of the header plus the padding a multiple of 8 bytes.
490 static size_t headerPadding() 490 static size_t headerPadding()
491 { 491 {
492 return (sizeof(HeapPage) + allocationGranularity - (sizeof(HeapObjectHea der) % allocationGranularity)) % allocationGranularity; 492 return (sizeof(NormalPage) + allocationGranularity - (sizeof(HeapObjectH eader) % allocationGranularity)) % allocationGranularity;
493 } 493 }
494 494
495 495
496 ThreadHeapForHeapPage* heapForHeapPage(); 496 NormalPageHeap* heapForNormalPage();
497 void clearObjectStartBitMap(); 497 void clearObjectStartBitMap();
498 498
499 #if defined(ADDRESS_SANITIZER) 499 #if defined(ADDRESS_SANITIZER)
500 void poisonUnmarkedObjects(); 500 void poisonUnmarkedObjects();
501 #endif 501 #endif
502 502
503 private: 503 private:
504 HeapObjectHeader* findHeaderFromAddress(Address); 504 HeapObjectHeader* findHeaderFromAddress(Address);
505 void populateObjectStartBitMap(); 505 void populateObjectStartBitMap();
506 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } 506 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
507 507
508 bool m_objectStartBitMapComputed; 508 bool m_objectStartBitMapComputed;
509 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; 509 uint8_t m_objectStartBitMap[reservedForObjectBitMap];
510 }; 510 };
511 511
512 // Large allocations are allocated as separate objects and linked in a list. 512 // Large allocations are allocated as separate objects and linked in a list.
513 // 513 //
514 // In order to use the same memory allocation routines for everything allocated 514 // In order to use the same memory allocation routines for everything allocated
515 // in the heap, large objects are considered heap pages containing only one 515 // in the heap, large objects are considered heap pages containing only one
516 // object. 516 // object.
517 class LargeObject final : public BaseHeapPage { 517 class LargeObjectPage final : public BasePage {
518 public: 518 public:
519 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) 519 LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t payloadSize)
520 : BaseHeapPage(storage, heap) 520 : BasePage(storage, heap)
521 , m_payloadSize(payloadSize) 521 , m_payloadSize(payloadSize)
522 { 522 {
523 } 523 }
524 524
525 Address payload() { return heapObjectHeader()->payload(); } 525 Address payload() { return heapObjectHeader()->payload(); }
526 size_t payloadSize() { return m_payloadSize; } 526 size_t payloadSize() { return m_payloadSize; }
527 Address payloadEnd() { return payload() + payloadSize(); } 527 Address payloadEnd() { return payload() + payloadSize(); }
528 bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); } 528 bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); }
529 529
530 virtual size_t objectPayloadSizeForTesting() override; 530 virtual size_t objectPayloadSizeForTesting() override;
531 virtual bool isEmpty() override; 531 virtual bool isEmpty() override;
532 virtual void removeFromHeap() override; 532 virtual void removeFromHeap() override;
533 virtual void sweep() override; 533 virtual void sweep() override;
534 virtual void markUnmarkedObjectsDead() override; 534 virtual void markUnmarkedObjectsDead() override;
535 virtual void checkAndMarkPointer(Visitor*, Address) override; 535 virtual void checkAndMarkPointer(Visitor*, Address) override;
536 virtual void markOrphaned() override 536 virtual void markOrphaned() override
537 { 537 {
538 // Zap the payload with a recognizable value to detect any incorrect 538 // Zap the payload with a recognizable value to detect any incorrect
539 // cross thread pointer usage. 539 // cross thread pointer usage.
540 memset(payload(), orphanedZapValue, payloadSize()); 540 memset(payload(), orphanedZapValue, payloadSize());
541 BaseHeapPage::markOrphaned(); 541 BasePage::markOrphaned();
542 } 542 }
543 543
544 #if ENABLE(GC_PROFILING) 544 #if ENABLE(GC_PROFILING)
545 virtual const GCInfo* findGCInfo(Address) override; 545 virtual const GCInfo* findGCInfo(Address) override;
546 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 546 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
547 void incrementMarkedObjectsAge(); 547 void incrementMarkedObjectsAge();
548 void countMarkedObjects(ClassAgeCountsMap&); 548 void countMarkedObjects(ClassAgeCountsMap&);
549 void countObjectsToSweep(ClassAgeCountsMap&); 549 void countObjectsToSweep(ClassAgeCountsMap&);
550 #endif 550 #endif
551 551
552 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) 552 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
553 // Returns true for any address that is on one of the pages that this 553 // Returns true for any address that is on one of the pages that this
554 // large object uses. That ensures that we can use a negative result to 554 // large object uses. That ensures that we can use a negative result to
555 // populate the negative page cache. 555 // populate the negative page cache.
556 virtual bool contains(Address object) override 556 virtual bool contains(Address object) override
557 { 557 {
558 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size()); 558 return roundToBlinkPageStart(address()) <= object && object < roundToBli nkPageEnd(address() + size());
559 } 559 }
560 #endif 560 #endif
561 virtual size_t size() 561 virtual size_t size()
562 { 562 {
563 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader) + m_payloadSize; 563 return sizeof(LargeObjectPage) + headerPadding() + sizeof(HeapObjectHea der) + m_payloadSize;
564 } 564 }
565 // Compute the amount of padding we have to add to a header to make 565 // Compute the amount of padding we have to add to a header to make
566 // the size of the header plus the padding a multiple of 8 bytes. 566 // the size of the header plus the padding a multiple of 8 bytes.
567 static size_t headerPadding() 567 static size_t headerPadding()
568 { 568 {
569 return (sizeof(LargeObject) + allocationGranularity - (sizeof(HeapObject Header) % allocationGranularity)) % allocationGranularity; 569 return (sizeof(LargeObjectPage) + allocationGranularity - (sizeof(HeapOb jectHeader) % allocationGranularity)) % allocationGranularity;
570 } 570 }
571 virtual bool isLargeObject() override { return true; } 571 virtual bool isLargeObjectPage() override { return true; }
572 572
573 HeapObjectHeader* heapObjectHeader() 573 HeapObjectHeader* heapObjectHeader()
574 { 574 {
575 Address headerAddress = address() + sizeof(LargeObject) + headerPadding( ); 575 Address headerAddress = address() + sizeof(LargeObjectPage) + headerPadd ing();
576 return reinterpret_cast<HeapObjectHeader*>(headerAddress); 576 return reinterpret_cast<HeapObjectHeader*>(headerAddress);
577 } 577 }
578 578
579 private: 579 private:
580 580
581 size_t m_payloadSize; 581 size_t m_payloadSize;
582 }; 582 };
583 583
584 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary 584 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary
585 // pointer-sized word, and determining whether it cannot be interpreted as a 585 // pointer-sized word, and determining whether it cannot be interpreted as a
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
660 class FreePagePool : public PagePool<PageMemory> { 660 class FreePagePool : public PagePool<PageMemory> {
661 public: 661 public:
662 ~FreePagePool(); 662 ~FreePagePool();
663 void addFreePage(int, PageMemory*); 663 void addFreePage(int, PageMemory*);
664 PageMemory* takeFreePage(int); 664 PageMemory* takeFreePage(int);
665 665
666 private: 666 private:
667 Mutex m_mutex[NumberOfHeaps]; 667 Mutex m_mutex[NumberOfHeaps];
668 }; 668 };
669 669
670 class OrphanedPagePool : public PagePool<BaseHeapPage> { 670 class OrphanedPagePool : public PagePool<BasePage> {
671 public: 671 public:
672 ~OrphanedPagePool(); 672 ~OrphanedPagePool();
673 void addOrphanedPage(int, BaseHeapPage*); 673 void addOrphanedPage(int, BasePage*);
674 void decommitOrphanedPages(); 674 void decommitOrphanedPages();
675 #if ENABLE(ASSERT) 675 #if ENABLE(ASSERT)
676 bool contains(void*); 676 bool contains(void*);
677 #endif 677 #endif
678 private: 678 private:
679 void clearMemory(PageMemory*); 679 void clearMemory(PageMemory*);
680 }; 680 };
681 681
682 class FreeList { 682 class FreeList {
683 public: 683 public:
(...skipping 16 matching lines...) Expand all
700 700
701 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz e) const; 701 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz e) const;
702 #endif 702 #endif
703 703
704 private: 704 private:
705 int m_biggestFreeListIndex; 705 int m_biggestFreeListIndex;
706 706
707 // All FreeListEntries in the nth list have size >= 2^n. 707 // All FreeListEntries in the nth list have size >= 2^n.
708 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 708 FreeListEntry* m_freeLists[blinkPageSizeLog2];
709 709
710 friend class ThreadHeapForHeapPage; 710 friend class NormalPageHeap;
711 }; 711 };
712 712
713 // Thread heaps represent a part of the per-thread Blink heap. 713 // Thread heaps represent a part of the per-thread Blink heap.
714 // 714 //
715 // Each Blink thread has a number of thread heaps: one general heap 715 // Each Blink thread has a number of thread heaps: one general heap
716 // that contains any type of object and a number of heaps specialized 716 // that contains any type of object and a number of heaps specialized
717 // for specific object types (such as Node). 717 // for specific object types (such as Node).
718 // 718 //
719 // Each thread heap contains the functionality to allocate new objects 719 // Each thread heap contains the functionality to allocate new objects
720 // (potentially adding new pages to the heap), to find and mark 720 // (potentially adding new pages to the heap), to find and mark
721 // objects during conservative stack scanning and to sweep the set of 721 // objects during conservative stack scanning and to sweep the set of
722 // pages after a GC. 722 // pages after a GC.
723 class PLATFORM_EXPORT ThreadHeap { 723 class PLATFORM_EXPORT BaseHeap {
724 public: 724 public:
725 ThreadHeap(ThreadState*, int); 725 BaseHeap(ThreadState*, int);
726 virtual ~ThreadHeap(); 726 virtual ~BaseHeap();
727 void cleanupPages(); 727 void cleanupPages();
728 728
729 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) 729 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
730 BaseHeapPage* findPageFromAddress(Address); 730 BasePage* findPageFromAddress(Address);
731 #endif 731 #endif
732 #if ENABLE(GC_PROFILING) 732 #if ENABLE(GC_PROFILING)
733 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); 733 void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
734 void incrementMarkedObjectsAge(); 734 void incrementMarkedObjectsAge();
735 #endif 735 #endif
736 736
737 virtual void clearFreeLists() { } 737 virtual void clearFreeLists() { }
738 void makeConsistentForSweeping(); 738 void makeConsistentForSweeping();
739 #if ENABLE(ASSERT) 739 #if ENABLE(ASSERT)
740 virtual bool isConsistentForSweeping() = 0; 740 virtual bool isConsistentForSweeping() = 0;
741 #endif 741 #endif
742 size_t objectPayloadSizeForTesting(); 742 size_t objectPayloadSizeForTesting();
743 void prepareHeapForTermination(); 743 void prepareHeapForTermination();
744 void prepareForSweep(); 744 void prepareForSweep();
745 Address lazySweep(size_t, size_t gcInfoIndex); 745 Address lazySweep(size_t, size_t gcInfoIndex);
746 void completeSweep(); 746 void completeSweep();
747 747
748 ThreadState* threadState() { return m_threadState; } 748 ThreadState* threadState() { return m_threadState; }
749 int heapIndex() const { return m_index; } 749 int heapIndex() const { return m_index; }
750 inline static size_t allocationSizeFromSize(size_t); 750 inline static size_t allocationSizeFromSize(size_t);
751 inline static size_t roundedAllocationSize(size_t size) 751 inline static size_t roundedAllocationSize(size_t size)
752 { 752 {
753 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); 753 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
754 } 754 }
755 755
756 protected: 756 protected:
757 BaseHeapPage* m_firstPage; 757 BasePage* m_firstPage;
758 BaseHeapPage* m_firstUnsweptPage; 758 BasePage* m_firstUnsweptPage;
759 759
760 private: 760 private:
761 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; 761 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0;
762 762
763 ThreadState* m_threadState; 763 ThreadState* m_threadState;
764 764
765 // Index into the page pools. This is used to ensure that the pages of the 765 // Index into the page pools. This is used to ensure that the pages of the
766 // same type go into the correct page pool and thus avoid type confusion. 766 // same type go into the correct page pool and thus avoid type confusion.
767 int m_index; 767 int m_index;
768 }; 768 };
769 769
770 class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap { 770 class PLATFORM_EXPORT NormalPageHeap final : public BaseHeap {
771 public: 771 public:
772 ThreadHeapForHeapPage(ThreadState*, int); 772 NormalPageHeap(ThreadState*, int);
773 void addToFreeList(Address address, size_t size) 773 void addToFreeList(Address address, size_t size)
774 { 774 {
775 ASSERT(findPageFromAddress(address)); 775 ASSERT(findPageFromAddress(address));
776 ASSERT(findPageFromAddress(address + size - 1)); 776 ASSERT(findPageFromAddress(address + size - 1));
777 m_freeList.addToFreeList(address, size); 777 m_freeList.addToFreeList(address, size);
778 } 778 }
779 virtual void clearFreeLists() override; 779 virtual void clearFreeLists() override;
780 #if ENABLE(ASSERT) 780 #if ENABLE(ASSERT)
781 virtual bool isConsistentForSweeping() override; 781 virtual bool isConsistentForSweeping() override;
782 bool pagesToBeSweptContains(Address); 782 bool pagesToBeSweptContains(Address);
783 #endif 783 #endif
784 784
785 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); 785 inline Address allocate(size_t payloadSize, size_t gcInfoIndex);
786 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); 786 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
787 787
788 void freePage(HeapPage*); 788 void freePage(NormalPage*);
789 789
790 bool coalesce(); 790 bool coalesce();
791 void promptlyFreeObject(HeapObjectHeader*); 791 void promptlyFreeObject(HeapObjectHeader*);
792 bool expandObject(HeapObjectHeader*, size_t); 792 bool expandObject(HeapObjectHeader*, size_t);
793 void shrinkObject(HeapObjectHeader*, size_t); 793 void shrinkObject(HeapObjectHeader*, size_t);
794 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } 794 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; }
795 795
796 #if ENABLE(GC_PROFILING) 796 #if ENABLE(GC_PROFILING)
797 void snapshotFreeList(TracedValue&); 797 void snapshotFreeList(TracedValue&);
798 798
(...skipping 20 matching lines...) Expand all
819 // The size of promptly freed objects in the heap. 819 // The size of promptly freed objects in the heap.
820 size_t m_promptlyFreedSize; 820 size_t m_promptlyFreedSize;
821 821
822 #if ENABLE(GC_PROFILING) 822 #if ENABLE(GC_PROFILING)
823 size_t m_cumulativeAllocationSize; 823 size_t m_cumulativeAllocationSize;
824 size_t m_allocationCount; 824 size_t m_allocationCount;
825 size_t m_inlineAllocationCount; 825 size_t m_inlineAllocationCount;
826 #endif 826 #endif
827 }; 827 };
828 828
829 class ThreadHeapForLargeObject final : public ThreadHeap { 829 class LargeObjectHeap final : public BaseHeap {
830 public: 830 public:
831 ThreadHeapForLargeObject(ThreadState*, int); 831 LargeObjectHeap(ThreadState*, int);
832 Address allocateLargeObject(size_t, size_t gcInfoIndex); 832 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex);
833 void freeLargeObject(LargeObject*); 833 void freeLargeObjectPage(LargeObjectPage*);
834 #if ENABLE(ASSERT) 834 #if ENABLE(ASSERT)
835 virtual bool isConsistentForSweeping() override { return true; } 835 virtual bool isConsistentForSweeping() override { return true; }
836 #endif 836 #endif
837 private: 837 private:
838 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); 838 Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex);
839 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; 839 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
840 }; 840 };
841 841
842 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap 842 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
843 // pages are aligned at blinkPageBase plus an OS page size. 843 // pages are aligned at blinkPageBase plus an OS page size.
844 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our 844 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
845 // typed heaps. This is only exported to enable tests in HeapTest.cpp. 845 // typed heaps. This is only exported to enable tests in HeapTest.cpp.
846 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) 846 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object)
847 { 847 {
848 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); 848 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
849 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres s) + WTF::kSystemPageSize); 849 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + WTF ::kSystemPageSize);
850 ASSERT(page->contains(address)); 850 ASSERT(page->contains(address));
851 return page; 851 return page;
852 } 852 }
853 853
854 class PLATFORM_EXPORT Heap { 854 class PLATFORM_EXPORT Heap {
855 public: 855 public:
856 static void init(); 856 static void init();
857 static void shutdown(); 857 static void shutdown();
858 static void doShutdown(); 858 static void doShutdown();
859 859
860 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) 860 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
861 static BaseHeapPage* findPageFromAddress(Address); 861 static BasePage* findPageFromAddress(Address);
862 static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFro mAddress(reinterpret_cast<Address>(pointer)); } 862 static BasePage* findPageFromAddress(void* pointer) { return findPageFromAdd ress(reinterpret_cast<Address>(pointer)); }
863 static bool containedInHeapOrOrphanedPage(void*); 863 static bool containedInHeapOrOrphanedPage(void*);
864 #endif 864 #endif
865 865
866 // Is the finalizable GC object still alive, but slated for lazy sweeping? 866 // Is the finalizable GC object still alive, but slated for lazy sweeping?
867 // If a lazy sweep is in progress, returns true if the object was found 867 // If a lazy sweep is in progress, returns true if the object was found
868 // to be not reachable during the marking phase, but it has yet to be swept 868 // to be not reachable during the marking phase, but it has yet to be swept
869 // and finalized. The predicate returns false in all other cases. 869 // and finalized. The predicate returns false in all other cases.
870 // 870 //
871 // Holding a reference to an already-dead object is not a valid state 871 // Holding a reference to an already-dead object is not a valid state
872 // to be in; willObjectBeLazilySwept() has undefined behavior if passed 872 // to be in; willObjectBeLazilySwept() has undefined behavior if passed
873 // such a reference. 873 // such a reference.
874 template<typename T> 874 template<typename T>
875 static bool willObjectBeLazilySwept(const T* objectPointer) 875 static bool willObjectBeLazilySwept(const T* objectPointer)
876 { 876 {
877 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); 877 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used.");
878 #if ENABLE(OILPAN) 878 #if ENABLE(OILPAN)
879 BaseHeapPage* page = pageFromObject(objectPointer); 879 BasePage* page = pageFromObject(objectPointer);
880 if (page->hasBeenSwept()) 880 if (page->hasBeenSwept())
881 return false; 881 return false;
882 ASSERT(page->heap()->threadState()->isSweepingInProgress()); 882 ASSERT(page->heap()->threadState()->isSweepingInProgress());
883 883
884 return !ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_c ast<T*>(objectPointer)); 884 return !ObjectAliveTrait<T>::isHeapObjectAlive(s_markingVisitor, const_c ast<T*>(objectPointer));
885 #else 885 #else
886 // FIXME: remove when lazy sweeping is always on 886 // FIXME: remove when lazy sweeping is always on
887 // (cf. ThreadState::postGCProcessing()). 887 // (cf. ThreadState::postGCProcessing()).
888 return false; 888 return false;
889 #endif 889 #endif
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
972 // Return true if the last GC found a pointer into a heap page 972 // Return true if the last GC found a pointer into a heap page
973 // during conservative scanning. 973 // during conservative scanning.
974 static bool lastGCWasConservative() { return s_lastGCWasConservative; } 974 static bool lastGCWasConservative() { return s_lastGCWasConservative; }
975 975
976 static FreePagePool* freePagePool() { return s_freePagePool; } 976 static FreePagePool* freePagePool() { return s_freePagePool; }
977 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } 977 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
978 978
979 // This look-up uses the region search tree and a negative contains cache to 979 // This look-up uses the region search tree and a negative contains cache to
980 // provide an efficient mapping from arbitrary addresses to the containing 980 // provide an efficient mapping from arbitrary addresses to the containing
981 // heap-page if one exists. 981 // heap-page if one exists.
982 static BaseHeapPage* lookup(Address); 982 static BasePage* lookup(Address);
983 static void addPageMemoryRegion(PageMemoryRegion*); 983 static void addPageMemoryRegion(PageMemoryRegion*);
984 static void removePageMemoryRegion(PageMemoryRegion*); 984 static void removePageMemoryRegion(PageMemoryRegion*);
985 985
986 static const GCInfo* gcInfo(size_t gcInfoIndex) 986 static const GCInfo* gcInfo(size_t gcInfoIndex)
987 { 987 {
988 ASSERT(gcInfoIndex >= 1); 988 ASSERT(gcInfoIndex >= 1);
989 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 989 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
990 ASSERT(s_gcInfoTable); 990 ASSERT(s_gcInfoTable);
991 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; 991 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
992 ASSERT(info); 992 ASSERT(info);
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after
1261 #define STACK_ALLOCATED() DISALLOW_ALLOCATION() 1261 #define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1262 #define GC_PLUGIN_IGNORE(bug) 1262 #define GC_PLUGIN_IGNORE(bug)
1263 #endif 1263 #endif
1264 1264
1265 NO_SANITIZE_ADDRESS inline 1265 NO_SANITIZE_ADDRESS inline
1266 size_t HeapObjectHeader::size() const 1266 size_t HeapObjectHeader::size() const
1267 { 1267 {
1268 size_t result = m_encoded & headerSizeMask; 1268 size_t result = m_encoded & headerSizeMask;
1269 // Large objects should not refer to header->size(). 1269 // Large objects should not refer to header->size().
1270 // The actual size of a large object is stored in 1270 // The actual size of a large object is stored in
1271 // LargeObject::m_payloadSize. 1271 // LargeObjectPage::m_payloadSize.
1272 ASSERT(result != largeObjectSizeInHeader); 1272 ASSERT(result != largeObjectSizeInHeader);
1273 ASSERT(!pageFromObject(this)->isLargeObject()); 1273 ASSERT(!pageFromObject(this)->isLargeObjectPage());
1274 return result; 1274 return result;
1275 } 1275 }
1276 1276
1277 NO_SANITIZE_ADDRESS 1277 NO_SANITIZE_ADDRESS
1278 void HeapObjectHeader::checkHeader() const 1278 void HeapObjectHeader::checkHeader() const
1279 { 1279 {
1280 ASSERT(pageFromObject(this)->orphaned() || m_magic == magic); 1280 ASSERT(pageFromObject(this)->orphaned() || m_magic == magic);
1281 } 1281 }
1282 1282
1283 Address HeapObjectHeader::payload() 1283 Address HeapObjectHeader::payload()
1284 { 1284 {
1285 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); 1285 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader);
1286 } 1286 }
1287 1287
1288 Address HeapObjectHeader::payloadEnd() 1288 Address HeapObjectHeader::payloadEnd()
1289 { 1289 {
1290 return reinterpret_cast<Address>(this) + size(); 1290 return reinterpret_cast<Address>(this) + size();
1291 } 1291 }
1292 1292
1293 NO_SANITIZE_ADDRESS inline 1293 NO_SANITIZE_ADDRESS inline
1294 size_t HeapObjectHeader::payloadSize() 1294 size_t HeapObjectHeader::payloadSize()
1295 { 1295 {
1296 size_t size = m_encoded & headerSizeMask; 1296 size_t size = m_encoded & headerSizeMask;
1297 if (UNLIKELY(size == largeObjectSizeInHeader)) { 1297 if (UNLIKELY(size == largeObjectSizeInHeader)) {
1298 ASSERT(pageFromObject(this)->isLargeObject()); 1298 ASSERT(pageFromObject(this)->isLargeObjectPage());
1299 return static_cast<LargeObject*>(pageFromObject(this))->payloadSize(); 1299 return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize( );
1300 } 1300 }
1301 ASSERT(!pageFromObject(this)->isLargeObject()); 1301 ASSERT(!pageFromObject(this)->isLargeObjectPage());
1302 return size - sizeof(HeapObjectHeader); 1302 return size - sizeof(HeapObjectHeader);
1303 } 1303 }
1304 1304
1305 NO_SANITIZE_ADDRESS inline 1305 NO_SANITIZE_ADDRESS inline
1306 bool HeapObjectHeader::isMarked() const 1306 bool HeapObjectHeader::isMarked() const
1307 { 1307 {
1308 checkHeader(); 1308 checkHeader();
1309 return m_encoded & headerMarkBitMask; 1309 return m_encoded & headerMarkBitMask;
1310 } 1310 }
1311 1311
(...skipping 21 matching lines...) Expand all
1333 } 1333 }
1334 1334
1335 NO_SANITIZE_ADDRESS inline 1335 NO_SANITIZE_ADDRESS inline
1336 void HeapObjectHeader::markDead() 1336 void HeapObjectHeader::markDead()
1337 { 1337 {
1338 checkHeader(); 1338 checkHeader();
1339 ASSERT(!isMarked()); 1339 ASSERT(!isMarked());
1340 m_encoded |= headerDeadBitMask; 1340 m_encoded |= headerDeadBitMask;
1341 } 1341 }
1342 1342
1343 size_t ThreadHeap::allocationSizeFromSize(size_t size) 1343 size_t BaseHeap::allocationSizeFromSize(size_t size)
1344 { 1344 {
1345 // Check the size before computing the actual allocation size. The 1345 // Check the size before computing the actual allocation size. The
1346 // allocation size calculation can overflow for large sizes and the check 1346 // allocation size calculation can overflow for large sizes and the check
1347 // therefore has to happen before any calculation on the size. 1347 // therefore has to happen before any calculation on the size.
1348 RELEASE_ASSERT(size < maxHeapObjectSize); 1348 RELEASE_ASSERT(size < maxHeapObjectSize);
1349 1349
1350 // Add space for header. 1350 // Add space for header.
1351 size_t allocationSize = size + sizeof(HeapObjectHeader); 1351 size_t allocationSize = size + sizeof(HeapObjectHeader);
1352 // Align size with allocation granularity. 1352 // Align size with allocation granularity.
1353 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 1353 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1354 return allocationSize; 1354 return allocationSize;
1355 } 1355 }
1356 1356
1357 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn foIndex) 1357 Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex )
1358 { 1358 {
1359 #if ENABLE(GC_PROFILING) 1359 #if ENABLE(GC_PROFILING)
1360 m_cumulativeAllocationSize += allocationSize; 1360 m_cumulativeAllocationSize += allocationSize;
1361 ++m_allocationCount; 1361 ++m_allocationCount;
1362 #endif 1362 #endif
1363 1363
1364 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { 1364 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
1365 #if ENABLE(GC_PROFILING) 1365 #if ENABLE(GC_PROFILING)
1366 ++m_inlineAllocationCount; 1366 ++m_inlineAllocationCount;
1367 #endif 1367 #endif
1368 Address headerAddress = m_currentAllocationPoint; 1368 Address headerAddress = m_currentAllocationPoint;
1369 m_currentAllocationPoint += allocationSize; 1369 m_currentAllocationPoint += allocationSize;
1370 m_remainingAllocationSize -= allocationSize; 1370 m_remainingAllocationSize -= allocationSize;
1371 ASSERT(gcInfoIndex > 0); 1371 ASSERT(gcInfoIndex > 0);
1372 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); 1372 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x);
1373 Address result = headerAddress + sizeof(HeapObjectHeader); 1373 Address result = headerAddress + sizeof(HeapObjectHeader);
1374 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1374 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1375 1375
1376 // Unpoison the memory used for the object (payload). 1376 // Unpoison the memory used for the object (payload).
1377 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader)); 1377 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader));
1378 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe ader)); 1378 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe ader));
1379 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 1379 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1));
1380 return result; 1380 return result;
1381 } 1381 }
1382 return outOfLineAllocate(allocationSize, gcInfoIndex); 1382 return outOfLineAllocate(allocationSize, gcInfoIndex);
1383 } 1383 }
1384 1384
1385 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) 1385 Address NormalPageHeap::allocate(size_t size, size_t gcInfoIndex)
1386 { 1386 {
1387 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); 1387 return allocateObject(allocationSizeFromSize(size), gcInfoIndex);
1388 } 1388 }
1389 1389
1390 template<typename T> 1390 template<typename T>
1391 struct HeapIndexTrait { 1391 struct HeapIndexTrait {
1392 static int index() { return GeneralHeap; }; 1392 static int index() { return NormalPageHeapIndex; };
1393 }; 1393 };
1394 1394
1395 // FIXME: The forward declaration is layering violation. 1395 // FIXME: The forward declaration is layering violation.
1396 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ 1396 #define DEFINE_TYPED_HEAP_TRAIT(Type) \
1397 class Type; \ 1397 class Type; \
1398 template<> \ 1398 template<> \
1399 struct HeapIndexTrait<class Type> { \ 1399 struct HeapIndexTrait<class Type> { \
1400 static int index() { return Type##Heap; }; \ 1400 static int index() { return Type##Heap; }; \
1401 }; 1401 };
1402 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) 1402 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT)
1403 #undef DEFINE_TYPED_HEAP_TRAIT 1403 #undef DEFINE_TYPED_HEAP_TRAIT
1404 1404
1405 template<typename T> 1405 template<typename T>
1406 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex ) 1406 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex )
1407 { 1407 {
1408 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1408 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1409 ASSERT(state->isAllocationAllowed()); 1409 ASSERT(state->isAllocationAllowed());
1410 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate (size, gcInfoIndex); 1410 return static_cast<NormalPageHeap*>(state->heap(heapIndex))->allocate(size, gcInfoIndex);
1411 } 1411 }
1412 1412
1413 template<typename T> 1413 template<typename T>
1414 Address Heap::allocate(size_t size) 1414 Address Heap::allocate(size_t size)
1415 { 1415 {
1416 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait< T>::index()); 1416 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait< T>::index());
1417 } 1417 }
1418 1418
1419 template<typename T> 1419 template<typename T>
1420 Address Heap::reallocate(void* previous, size_t size) 1420 Address Heap::reallocate(void* previous, size_t size)
(...skipping 18 matching lines...) Expand all
1439 memcpy(address, previous, copySize); 1439 memcpy(address, previous, copySize);
1440 return address; 1440 return address;
1441 } 1441 }
1442 1442
1443 class HeapAllocatorQuantizer { 1443 class HeapAllocatorQuantizer {
1444 public: 1444 public:
1445 template<typename T> 1445 template<typename T>
1446 static size_t quantizedSize(size_t count) 1446 static size_t quantizedSize(size_t count)
1447 { 1447 {
1448 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); 1448 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1449 return ThreadHeap::roundedAllocationSize(count * sizeof(T)); 1449 return BaseHeap::roundedAllocationSize(count * sizeof(T));
1450 } 1450 }
1451 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; 1451 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1452 }; 1452 };
1453 1453
1454 // This is a static-only class used as a trait on collections to make them heap 1454 // This is a static-only class used as a trait on collections to make them heap
1455 // allocated. However see also HeapListHashSetAllocator. 1455 // allocated. However see also HeapListHashSetAllocator.
1456 class HeapAllocator { 1456 class HeapAllocator {
1457 public: 1457 public:
1458 using Quantizer = HeapAllocatorQuantizer; 1458 using Quantizer = HeapAllocatorQuantizer;
1459 using Visitor = blink::Visitor; 1459 using Visitor = blink::Visitor;
1460 static const bool isGarbageCollected = true; 1460 static const bool isGarbageCollected = true;
1461 1461
1462 template <typename T> 1462 template <typename T>
1463 static T* allocateVectorBacking(size_t size) 1463 static T* allocateVectorBacking(size_t size)
1464 { 1464 {
1465 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>: :index(); 1465 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>: :index();
1466 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorBac kingHeap, gcInfoIndex)); 1466 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorHea pIndex, gcInfoIndex));
1467 } 1467 }
1468 PLATFORM_EXPORT static void freeVectorBacking(void* address); 1468 PLATFORM_EXPORT static void freeVectorBacking(void* address);
1469 PLATFORM_EXPORT static bool expandVectorBacking(void*, size_t); 1469 PLATFORM_EXPORT static bool expandVectorBacking(void*, size_t);
1470 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurren tSize, size_t quantizedShrunkSize) 1470 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurren tSize, size_t quantizedShrunkSize)
1471 { 1471 {
1472 shrinkVectorBackingInternal(address, quantizedCurrentSize, quantizedShru nkSize); 1472 shrinkVectorBackingInternal(address, quantizedCurrentSize, quantizedShru nkSize);
1473 return true; 1473 return true;
1474 } 1474 }
1475 template <typename T> 1475 template <typename T>
1476 static T* allocateInlineVectorBacking(size_t size) 1476 static T* allocateInlineVectorBacking(size_t size)
1477 { 1477 {
1478 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>: :index(); 1478 size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>: :index();
1479 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVec torBackingHeap, gcInfoIndex)); 1479 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVec torHeapIndex, gcInfoIndex));
1480 } 1480 }
1481 PLATFORM_EXPORT static void freeInlineVectorBacking(void* address); 1481 PLATFORM_EXPORT static void freeInlineVectorBacking(void* address);
1482 PLATFORM_EXPORT static bool expandInlineVectorBacking(void*, size_t); 1482 PLATFORM_EXPORT static bool expandInlineVectorBacking(void*, size_t);
1483 static inline bool shrinkInlineVectorBacking(void* address, size_t quantized CurrentSize, size_t quantizedShrinkedSize) 1483 static inline bool shrinkInlineVectorBacking(void* address, size_t quantized CurrentSize, size_t quantizedShrinkedSize)
1484 { 1484 {
1485 shrinkInlineVectorBackingInternal(address, quantizedCurrentSize, quantiz edShrinkedSize); 1485 shrinkInlineVectorBackingInternal(address, quantizedCurrentSize, quantiz edShrinkedSize);
1486 return true; 1486 return true;
1487 } 1487 }
1488 1488
1489 1489
1490 template <typename T, typename HashTable> 1490 template <typename T, typename HashTable>
1491 static T* allocateHashTableBacking(size_t size) 1491 static T* allocateHashTableBacking(size_t size)
1492 { 1492 {
1493 size_t gcInfoIndex = GCInfoTrait<HeapHashTableBacking<HashTable>>::index (); 1493 size_t gcInfoIndex = GCInfoTrait<HeapHashTableBacking<HashTable>>::index ();
1494 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTable BackingHeap, gcInfoIndex)); 1494 return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTable HeapIndex, gcInfoIndex));
1495 } 1495 }
1496 template <typename T, typename HashTable> 1496 template <typename T, typename HashTable>
1497 static T* allocateZeroedHashTableBacking(size_t size) 1497 static T* allocateZeroedHashTableBacking(size_t size)
1498 { 1498 {
1499 return allocateHashTableBacking<T, HashTable>(size); 1499 return allocateHashTableBacking<T, HashTable>(size);
1500 } 1500 }
1501 PLATFORM_EXPORT static void freeHashTableBacking(void* address); 1501 PLATFORM_EXPORT static void freeHashTableBacking(void* address);
1502 PLATFORM_EXPORT static bool expandHashTableBacking(void*, size_t); 1502 PLATFORM_EXPORT static bool expandHashTableBacking(void*, size_t);
1503 1503
1504 template <typename Return, typename Metadata> 1504 template <typename Return, typename Metadata>
(...skipping 949 matching lines...) Expand 10 before | Expand all | Expand 10 after
2454 template<typename T, size_t inlineCapacity> 2454 template<typename T, size_t inlineCapacity>
2455 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; 2455 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { };
2456 template<typename T, size_t inlineCapacity> 2456 template<typename T, size_t inlineCapacity>
2457 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; 2457 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { };
2458 template<typename T, typename U, typename V> 2458 template<typename T, typename U, typename V>
2459 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; 2459 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { };
2460 2460
2461 } // namespace blink 2461 } // namespace blink
2462 2462
2463 #endif // Heap_h 2463 #endif // Heap_h
OLDNEW
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698