OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
85 #else | 85 #else |
86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
88 #endif | 88 #endif |
89 | 89 |
90 class CallbackStack; | 90 class CallbackStack; |
91 class PageMemory; | 91 class PageMemory; |
92 class ThreadHeapForHeapPage; | |
93 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 92 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; | 93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; |
95 | 94 |
96 #if ENABLE(GC_PROFILING) | 95 #if ENABLE(GC_PROFILING) |
97 class TracedValue; | 96 class TracedValue; |
98 #endif | 97 #endif |
99 | 98 |
100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
101 // | 100 // |
102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | | 101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
355 } | 354 } |
356 #endif | 355 #endif |
357 | 356 |
358 // FIXME: Add a good comment about the heap layout once heap relayout work | 357 // FIXME: Add a good comment about the heap layout once heap relayout work |
359 // is done. | 358 // is done. |
360 class BaseHeapPage { | 359 class BaseHeapPage { |
361 public: | 360 public: |
362 BaseHeapPage(PageMemory*, ThreadHeap*); | 361 BaseHeapPage(PageMemory*, ThreadHeap*); |
363 virtual ~BaseHeapPage() { } | 362 virtual ~BaseHeapPage() { } |
364 | 363 |
365 void link(BaseHeapPage** previousNext) | |
366 { | |
367 m_next = *previousNext; | |
368 *previousNext = this; | |
369 } | |
370 void unlink(BaseHeapPage** previousNext) | |
371 { | |
372 *previousNext = m_next; | |
373 m_next = nullptr; | |
374 } | |
375 BaseHeapPage* next() const { return m_next; } | |
376 | |
377 // virtual methods are slow. So performance-sensitive methods | 364 // virtual methods are slow. So performance-sensitive methods |
378 // should be defined as non-virtual methods on HeapPage and LargeObject. | 365 // should be defined as non-virtual methods on HeapPage and LargeObject. |
379 // The following methods are not performance-sensitive. | 366 // The following methods are not performance-sensitive. |
380 virtual size_t objectPayloadSizeForTesting() = 0; | 367 virtual size_t objectPayloadSizeForTesting() = 0; |
381 virtual bool isEmpty() = 0; | 368 virtual bool isEmpty() = 0; |
382 virtual void removeFromHeap() = 0; | 369 virtual void removeFromHeap(ThreadHeap*) = 0; |
383 virtual void sweep() = 0; | 370 virtual void sweep() = 0; |
384 virtual void markUnmarkedObjectsDead() = 0; | 371 virtual void markUnmarkedObjectsDead() = 0; |
385 // Check if the given address points to an object in this | 372 // Check if the given address points to an object in this |
386 // heap page. If so, find the start of that object and mark it | 373 // heap page. If so, find the start of that object and mark it |
387 // using the given Visitor. Otherwise do nothing. The pointer must | 374 // using the given Visitor. Otherwise do nothing. The pointer must |
388 // be within the same aligned blinkPageSize as the this-pointer. | 375 // be within the same aligned blinkPageSize as the this-pointer. |
389 // | 376 // |
390 // This is used during conservative stack scanning to | 377 // This is used during conservative stack scanning to |
391 // conservatively mark all objects that could be referenced from | 378 // conservatively mark all objects that could be referenced from |
392 // the stack. | 379 // the stack. |
(...skipping 27 matching lines...) Expand all Loading... |
420 | 407 |
421 void markAsUnswept() | 408 void markAsUnswept() |
422 { | 409 { |
423 ASSERT(m_swept); | 410 ASSERT(m_swept); |
424 m_swept = false; | 411 m_swept = false; |
425 } | 412 } |
426 | 413 |
427 private: | 414 private: |
428 PageMemory* m_storage; | 415 PageMemory* m_storage; |
429 ThreadHeap* m_heap; | 416 ThreadHeap* m_heap; |
430 BaseHeapPage* m_next; | |
431 // Whether the page is part of a terminating thread or not. | 417 // Whether the page is part of a terminating thread or not. |
432 bool m_terminating; | 418 bool m_terminating; |
433 | 419 |
434 // Track the sweeping state of a page. Set to true once | 420 // Track the sweeping state of a page. Set to true once |
435 // the lazy sweep completes has processed it. | 421 // the lazy sweep completes has processed it. |
436 // | 422 // |
437 // Set to false at the start of a sweep, true upon completion | 423 // Set to false at the start of a sweep, true upon completion |
438 // of lazy sweeping. | 424 // of lazy sweeping. |
439 bool m_swept; | 425 bool m_swept; |
440 friend class ThreadHeap; | |
441 }; | 426 }; |
442 | 427 |
443 class HeapPage final : public BaseHeapPage { | 428 class HeapPage final : public BaseHeapPage { |
444 public: | 429 public: |
445 HeapPage(PageMemory*, ThreadHeap*); | 430 HeapPage(PageMemory*, ThreadHeap*); |
446 | 431 |
447 Address payload() | 432 Address payload() |
448 { | 433 { |
449 return address() + sizeof(HeapPage) + headerPadding(); | 434 return address() + sizeof(HeapPage) + headerPadding(); |
450 } | 435 } |
451 size_t payloadSize() | 436 size_t payloadSize() |
452 { | 437 { |
453 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; | 438 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; |
454 } | 439 } |
455 Address payloadEnd() { return payload() + payloadSize(); } | 440 Address payloadEnd() { return payload() + payloadSize(); } |
456 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 441 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
457 | 442 |
| 443 void link(HeapPage** previousNext) |
| 444 { |
| 445 m_next = *previousNext; |
| 446 *previousNext = this; |
| 447 } |
| 448 |
| 449 void unlink(HeapPage** previousNext) |
| 450 { |
| 451 *previousNext = m_next; |
| 452 m_next = nullptr; |
| 453 } |
| 454 |
458 virtual size_t objectPayloadSizeForTesting() override; | 455 virtual size_t objectPayloadSizeForTesting() override; |
459 virtual bool isEmpty() override; | 456 virtual bool isEmpty() override; |
460 virtual void removeFromHeap() override; | 457 virtual void removeFromHeap(ThreadHeap*) override; |
461 virtual void sweep() override; | 458 virtual void sweep() override; |
462 virtual void markUnmarkedObjectsDead() override; | 459 virtual void markUnmarkedObjectsDead() override; |
463 virtual void checkAndMarkPointer(Visitor*, Address) override; | 460 virtual void checkAndMarkPointer(Visitor*, Address) override; |
464 virtual void markOrphaned() override | 461 virtual void markOrphaned() override |
465 { | 462 { |
466 // Zap the payload with a recognizable value to detect any incorrect | 463 // Zap the payload with a recognizable value to detect any incorrect |
467 // cross thread pointer usage. | 464 // cross thread pointer usage. |
468 #if defined(ADDRESS_SANITIZER) | 465 #if defined(ADDRESS_SANITIZER) |
469 // This needs to zap poisoned memory as well. | 466 // This needs to zap poisoned memory as well. |
470 // Force unpoison memory before memset. | 467 // Force unpoison memory before memset. |
(...skipping 15 matching lines...) Expand all Loading... |
486 // the result can be used to populate the negative page cache. | 483 // the result can be used to populate the negative page cache. |
487 virtual bool contains(Address addr) override | 484 virtual bool contains(Address addr) override |
488 { | 485 { |
489 Address blinkPageStart = roundToBlinkPageStart(address()); | 486 Address blinkPageStart = roundToBlinkPageStart(address()); |
490 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 487 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
491 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 488 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
492 } | 489 } |
493 #endif | 490 #endif |
494 virtual size_t size() override { return blinkPageSize; } | 491 virtual size_t size() override { return blinkPageSize; } |
495 | 492 |
496 ThreadHeapForHeapPage* heapForHeapPage(); | 493 HeapPage* next() { return m_next; } |
| 494 |
497 void clearObjectStartBitMap(); | 495 void clearObjectStartBitMap(); |
498 | 496 |
499 #if defined(ADDRESS_SANITIZER) | 497 #if defined(ADDRESS_SANITIZER) |
500 void poisonUnmarkedObjects(); | 498 void poisonUnmarkedObjects(); |
501 #endif | 499 #endif |
502 | 500 |
503 // This method is needed just to avoid compilers from removing m_padding. | 501 // This method is needed just to avoid compilers from removing m_padding. |
504 uint64_t unusedMethod() const { return m_padding; } | 502 uint64_t unusedMethod() const { return m_padding; } |
505 | 503 |
506 private: | 504 private: |
507 HeapObjectHeader* findHeaderFromAddress(Address); | 505 HeapObjectHeader* findHeaderFromAddress(Address); |
508 void populateObjectStartBitMap(); | 506 void populateObjectStartBitMap(); |
509 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 507 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
510 | 508 |
| 509 HeapPage* m_next; |
511 bool m_objectStartBitMapComputed; | 510 bool m_objectStartBitMapComputed; |
512 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 511 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
513 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 512 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 513 |
| 514 friend class ThreadHeap; |
514 }; | 515 }; |
515 | 516 |
516 // Large allocations are allocated as separate objects and linked in a list. | 517 // Large allocations are allocated as separate objects and linked in a list. |
517 // | 518 // |
518 // In order to use the same memory allocation routines for everything allocated | 519 // In order to use the same memory allocation routines for everything allocated |
519 // in the heap, large objects are considered heap pages containing only one | 520 // in the heap, large objects are considered heap pages containing only one |
520 // object. | 521 // object. |
521 class LargeObject final : public BaseHeapPage { | 522 class LargeObject final : public BaseHeapPage { |
522 public: | 523 public: |
523 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) | 524 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) |
524 : BaseHeapPage(storage, heap) | 525 : BaseHeapPage(storage, heap) |
525 , m_payloadSize(payloadSize) | 526 , m_payloadSize(payloadSize) |
526 { | 527 { |
527 } | 528 } |
528 | 529 |
529 Address payload() { return heapObjectHeader()->payload(); } | 530 Address payload() { return heapObjectHeader()->payload(); } |
530 size_t payloadSize() { return m_payloadSize; } | 531 size_t payloadSize() { return m_payloadSize; } |
531 Address payloadEnd() { return payload() + payloadSize(); } | 532 Address payloadEnd() { return payload() + payloadSize(); } |
532 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 533 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
533 | 534 |
534 virtual size_t objectPayloadSizeForTesting() override; | 535 virtual size_t objectPayloadSizeForTesting() override; |
535 virtual bool isEmpty() override; | 536 virtual bool isEmpty() override; |
536 virtual void removeFromHeap() override; | 537 virtual void removeFromHeap(ThreadHeap*) override; |
537 virtual void sweep() override; | 538 virtual void sweep() override; |
538 virtual void markUnmarkedObjectsDead() override; | 539 virtual void markUnmarkedObjectsDead() override; |
539 virtual void checkAndMarkPointer(Visitor*, Address) override; | 540 virtual void checkAndMarkPointer(Visitor*, Address) override; |
540 virtual void markOrphaned() override | 541 virtual void markOrphaned() override |
541 { | 542 { |
542 // Zap the payload with a recognizable value to detect any incorrect | 543 // Zap the payload with a recognizable value to detect any incorrect |
543 // cross thread pointer usage. | 544 // cross thread pointer usage. |
544 memset(payload(), orphanedZapValue, payloadSize()); | 545 memset(payload(), orphanedZapValue, payloadSize()); |
545 BaseHeapPage::markOrphaned(); | 546 BaseHeapPage::markOrphaned(); |
546 } | 547 } |
(...skipping 14 matching lines...) Expand all Loading... |
561 { | 562 { |
562 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); | 563 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
563 } | 564 } |
564 #endif | 565 #endif |
565 virtual size_t size() | 566 virtual size_t size() |
566 { | 567 { |
567 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; | 568 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; |
568 } | 569 } |
569 virtual bool isLargeObject() override { return true; } | 570 virtual bool isLargeObject() override { return true; } |
570 | 571 |
| 572 void link(LargeObject** previousNext) |
| 573 { |
| 574 m_next = *previousNext; |
| 575 *previousNext = this; |
| 576 } |
| 577 |
| 578 void unlink(LargeObject** previousNext) |
| 579 { |
| 580 *previousNext = m_next; |
| 581 m_next = nullptr; |
| 582 } |
| 583 |
| 584 LargeObject* next() |
| 585 { |
| 586 return m_next; |
| 587 } |
| 588 |
571 HeapObjectHeader* heapObjectHeader() | 589 HeapObjectHeader* heapObjectHeader() |
572 { | 590 { |
573 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); | 591 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); |
574 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | 592 return reinterpret_cast<HeapObjectHeader*>(headerAddress); |
575 } | 593 } |
576 | 594 |
577 // This method is needed just to avoid compilers from removing m_padding. | 595 // This method is needed just to avoid compilers from removing m_padding. |
578 uint64_t unusedMethod() const { return m_padding; } | 596 uint64_t unusedMethod() const { return m_padding; } |
579 | 597 |
580 private: | 598 private: |
| 599 friend class ThreadHeap; |
| 600 LargeObject* m_next; |
581 size_t m_payloadSize; | 601 size_t m_payloadSize; |
582 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 602 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
583 }; | 603 }; |
584 | 604 |
585 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | 605 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary |
586 // pointer-sized word, and determining whether it cannot be interpreted as a | 606 // pointer-sized word, and determining whether it cannot be interpreted as a |
587 // pointer to an area that is managed by the garbage collected Blink heap. This | 607 // pointer to an area that is managed by the garbage collected Blink heap. This |
588 // is a cache of 'pages' that have previously been determined to be wholly | 608 // is a cache of 'pages' that have previously been determined to be wholly |
589 // outside of the heap. The size of these pages must be smaller than the | 609 // outside of the heap. The size of these pages must be smaller than the |
590 // allocation alignment of the heap pages. We determine off-heap-ness by | 610 // allocation alignment of the heap pages. We determine off-heap-ness by |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
701 | 721 |
702 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; | 722 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; |
703 #endif | 723 #endif |
704 | 724 |
705 private: | 725 private: |
706 int m_biggestFreeListIndex; | 726 int m_biggestFreeListIndex; |
707 | 727 |
708 // All FreeListEntries in the nth list have size >= 2^n. | 728 // All FreeListEntries in the nth list have size >= 2^n. |
709 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 729 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
710 | 730 |
711 friend class ThreadHeapForHeapPage; | 731 friend class ThreadHeap; |
712 }; | 732 }; |
713 | 733 |
714 // Thread heaps represent a part of the per-thread Blink heap. | 734 // Thread heaps represent a part of the per-thread Blink heap. |
715 // | 735 // |
716 // Each Blink thread has a number of thread heaps: one general heap | 736 // Each Blink thread has a number of thread heaps: one general heap |
717 // that contains any type of object and a number of heaps specialized | 737 // that contains any type of object and a number of heaps specialized |
718 // for specific object types (such as Node). | 738 // for specific object types (such as Node). |
719 // | 739 // |
720 // Each thread heap contains the functionality to allocate new objects | 740 // Each thread heap contains the functionality to allocate new objects |
721 // (potentially adding new pages to the heap), to find and mark | 741 // (potentially adding new pages to the heap), to find and mark |
722 // objects during conservative stack scanning and to sweep the set of | 742 // objects during conservative stack scanning and to sweep the set of |
723 // pages after a GC. | 743 // pages after a GC. |
724 class PLATFORM_EXPORT ThreadHeap { | 744 class PLATFORM_EXPORT ThreadHeap final { |
725 public: | 745 public: |
726 ThreadHeap(ThreadState*, int); | 746 ThreadHeap(ThreadState*, int); |
727 virtual ~ThreadHeap(); | 747 ~ThreadHeap(); |
728 void cleanupPages(); | 748 void cleanupPages(); |
729 | 749 |
730 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 750 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
731 BaseHeapPage* findPageFromAddress(Address); | 751 BaseHeapPage* findPageFromAddress(Address); |
732 #endif | 752 #endif |
733 #if ENABLE(GC_PROFILING) | 753 #if ENABLE(GC_PROFILING) |
734 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 754 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
735 void incrementMarkedObjectsAge(); | 755 void incrementMarkedObjectsAge(); |
736 #endif | 756 #endif |
737 | 757 |
738 virtual void clearFreeLists() { } | 758 void clearFreeLists(); |
739 void makeConsistentForSweeping(); | 759 void makeConsistentForSweeping(); |
740 #if ENABLE(ASSERT) | 760 #if ENABLE(ASSERT) |
741 virtual bool isConsistentForSweeping() = 0; | 761 bool isConsistentForSweeping(); |
742 #endif | 762 #endif |
743 size_t objectPayloadSizeForTesting(); | 763 size_t objectPayloadSizeForTesting(); |
744 void prepareHeapForTermination(); | |
745 void prepareForSweep(); | |
746 Address lazySweep(size_t, size_t gcInfoIndex); | |
747 void completeSweep(); | |
748 | 764 |
749 ThreadState* threadState() { return m_threadState; } | 765 ThreadState* threadState() { return m_threadState; } |
750 int heapIndex() const { return m_index; } | |
751 inline static size_t allocationSizeFromSize(size_t); | |
752 inline static size_t roundedAllocationSize(size_t size) | |
753 { | |
754 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | |
755 } | |
756 | 766 |
757 protected: | |
758 BaseHeapPage* m_firstPage; | |
759 BaseHeapPage* m_firstUnsweptPage; | |
760 | |
761 private: | |
762 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; | |
763 | |
764 ThreadState* m_threadState; | |
765 | |
766 // Index into the page pools. This is used to ensure that the pages of the | |
767 // same type go into the correct page pool and thus avoid type confusion. | |
768 int m_index; | |
769 }; | |
770 | |
771 class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap { | |
772 public: | |
773 ThreadHeapForHeapPage(ThreadState*, int); | |
774 void addToFreeList(Address address, size_t size) | 767 void addToFreeList(Address address, size_t size) |
775 { | 768 { |
776 ASSERT(findPageFromAddress(address)); | 769 ASSERT(findPageFromAddress(address)); |
777 ASSERT(findPageFromAddress(address + size - 1)); | 770 ASSERT(findPageFromAddress(address + size - 1)); |
778 m_freeList.addToFreeList(address, size); | 771 m_freeList.addToFreeList(address, size); |
779 } | 772 } |
780 virtual void clearFreeLists() override; | |
781 #if ENABLE(ASSERT) | |
782 virtual bool isConsistentForSweeping() override; | |
783 bool pagesToBeSweptContains(Address); | |
784 #endif | |
785 | 773 |
786 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); | 774 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); |
787 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 775 inline static size_t roundedAllocationSize(size_t size) |
| 776 { |
| 777 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 778 } |
| 779 inline static size_t allocationSizeFromSize(size_t); |
| 780 |
| 781 void prepareHeapForTermination(); |
| 782 void prepareForSweep(); |
| 783 void completeSweep(); |
788 | 784 |
789 void freePage(HeapPage*); | 785 void freePage(HeapPage*); |
| 786 void freeLargeObject(LargeObject*); |
790 | 787 |
791 bool coalesce(); | |
792 void promptlyFreeObject(HeapObjectHeader*); | 788 void promptlyFreeObject(HeapObjectHeader*); |
793 bool expandObject(HeapObjectHeader*, size_t); | 789 bool expandObject(HeapObjectHeader*, size_t); |
794 void shrinkObject(HeapObjectHeader*, size_t); | 790 void shrinkObject(HeapObjectHeader*, size_t); |
795 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 791 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
796 | 792 |
797 #if ENABLE(GC_PROFILING) | 793 #if ENABLE(GC_PROFILING) |
798 void snapshotFreeList(TracedValue&); | 794 void snapshotFreeList(TracedValue&); |
799 | 795 |
800 void countMarkedObjects(ClassAgeCountsMap&) const; | 796 void countMarkedObjects(ClassAgeCountsMap&) const; |
801 void countObjectsToSweep(ClassAgeCountsMap&) const; | 797 void countObjectsToSweep(ClassAgeCountsMap&) const; |
802 #endif | 798 #endif |
803 | 799 |
804 private: | 800 private: |
805 void allocatePage(); | |
806 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
807 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 801 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
808 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 802 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
809 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 803 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
810 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | 804 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } |
811 inline void setAllocationPoint(Address, size_t); | 805 inline void setAllocationPoint(Address, size_t); |
812 void updateRemainingAllocationSize(); | 806 void updateRemainingAllocationSize(); |
813 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 807 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 808 Address lazySweepPages(size_t, size_t gcInfoIndex); |
| 809 bool lazySweepLargeObjects(size_t); |
814 | 810 |
815 FreeList m_freeList; | 811 void allocatePage(); |
| 812 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 813 |
| 814 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 815 |
| 816 #if ENABLE(ASSERT) |
| 817 bool pagesToBeSweptContains(Address); |
| 818 #endif |
| 819 |
| 820 bool coalesce(); |
| 821 void preparePagesForSweeping(); |
| 822 |
816 Address m_currentAllocationPoint; | 823 Address m_currentAllocationPoint; |
817 size_t m_remainingAllocationSize; | 824 size_t m_remainingAllocationSize; |
818 size_t m_lastRemainingAllocationSize; | 825 size_t m_lastRemainingAllocationSize; |
819 | 826 |
| 827 HeapPage* m_firstPage; |
| 828 LargeObject* m_firstLargeObject; |
| 829 HeapPage* m_firstUnsweptPage; |
| 830 LargeObject* m_firstUnsweptLargeObject; |
| 831 |
| 832 ThreadState* m_threadState; |
| 833 |
| 834 FreeList m_freeList; |
| 835 |
| 836 // Index into the page pools. This is used to ensure that the pages of the |
| 837 // same type go into the correct page pool and thus avoid type confusion. |
| 838 int m_index; |
| 839 |
820 // The size of promptly freed objects in the heap. | 840 // The size of promptly freed objects in the heap. |
821 size_t m_promptlyFreedSize; | 841 size_t m_promptlyFreedSize; |
822 | 842 |
823 #if ENABLE(GC_PROFILING) | 843 #if ENABLE(GC_PROFILING) |
824 size_t m_cumulativeAllocationSize; | 844 size_t m_cumulativeAllocationSize; |
825 size_t m_allocationCount; | 845 size_t m_allocationCount; |
826 size_t m_inlineAllocationCount; | 846 size_t m_inlineAllocationCount; |
827 #endif | 847 #endif |
828 }; | 848 }; |
829 | 849 |
830 class ThreadHeapForLargeObject final : public ThreadHeap { | |
831 public: | |
832 ThreadHeapForLargeObject(ThreadState*, int); | |
833 Address allocateLargeObject(size_t, size_t gcInfoIndex); | |
834 void freeLargeObject(LargeObject*); | |
835 #if ENABLE(ASSERT) | |
836 virtual bool isConsistentForSweeping() override { return true; } | |
837 #endif | |
838 private: | |
839 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); | |
840 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
841 }; | |
842 | |
843 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 850 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
844 // pages are aligned at blinkPageBase plus an OS page size. | 851 // pages are aligned at blinkPageBase plus an OS page size. |
845 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 852 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
846 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | 853 // typed heaps. This is only exported to enable tests in HeapTest.cpp. |
847 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | 854 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) |
848 { | 855 { |
849 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 856 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
850 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); | 857 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); |
851 ASSERT(page->contains(address)); | 858 ASSERT(page->contains(address)); |
852 return page; | 859 return page; |
(...skipping 495 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1348 // therefore has to happen before any calculation on the size. | 1355 // therefore has to happen before any calculation on the size. |
1349 RELEASE_ASSERT(size < maxHeapObjectSize); | 1356 RELEASE_ASSERT(size < maxHeapObjectSize); |
1350 | 1357 |
1351 // Add space for header. | 1358 // Add space for header. |
1352 size_t allocationSize = size + sizeof(HeapObjectHeader); | 1359 size_t allocationSize = size + sizeof(HeapObjectHeader); |
1353 // Align size with allocation granularity. | 1360 // Align size with allocation granularity. |
1354 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1361 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
1355 return allocationSize; | 1362 return allocationSize; |
1356 } | 1363 } |
1357 | 1364 |
1358 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
foIndex) | 1365 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) |
1359 { | 1366 { |
1360 #if ENABLE(GC_PROFILING) | 1367 #if ENABLE(GC_PROFILING) |
1361 m_cumulativeAllocationSize += allocationSize; | 1368 m_cumulativeAllocationSize += allocationSize; |
1362 ++m_allocationCount; | 1369 ++m_allocationCount; |
1363 #endif | 1370 #endif |
1364 | 1371 |
1365 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1372 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
1366 #if ENABLE(GC_PROFILING) | 1373 #if ENABLE(GC_PROFILING) |
1367 ++m_inlineAllocationCount; | 1374 ++m_inlineAllocationCount; |
1368 #endif | 1375 #endif |
1369 Address headerAddress = m_currentAllocationPoint; | 1376 Address headerAddress = m_currentAllocationPoint; |
1370 m_currentAllocationPoint += allocationSize; | 1377 m_currentAllocationPoint += allocationSize; |
1371 m_remainingAllocationSize -= allocationSize; | 1378 m_remainingAllocationSize -= allocationSize; |
1372 ASSERT(gcInfoIndex > 0); | 1379 ASSERT(gcInfoIndex > 0); |
1373 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | 1380 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); |
1374 Address result = headerAddress + sizeof(HeapObjectHeader); | 1381 Address result = headerAddress + sizeof(HeapObjectHeader); |
1375 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1382 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1376 | 1383 |
1377 // Unpoison the memory used for the object (payload). | 1384 // Unpoison the memory used for the object (payload). |
1378 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1385 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
1379 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1386 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
1380 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 1387 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
1381 return result; | 1388 return result; |
1382 } | 1389 } |
1383 return outOfLineAllocate(allocationSize, gcInfoIndex); | 1390 return outOfLineAllocate(allocationSize, gcInfoIndex); |
1384 } | 1391 } |
1385 | 1392 |
1386 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) | 1393 Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex) |
1387 { | 1394 { |
1388 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1395 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
1389 } | 1396 } |
1390 | 1397 |
1391 template<typename T> | 1398 template<typename T> |
1392 struct HeapIndexTrait { | 1399 struct HeapIndexTrait { |
1393 static int index() { return GeneralHeap; }; | 1400 static int index() { return GeneralHeap; }; |
1394 }; | 1401 }; |
1395 | 1402 |
1396 // FIXME: The forward declaration is layering violation. | 1403 // FIXME: The forward declaration is layering violation. |
1397 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 1404 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
1398 class Type; \ | 1405 class Type; \ |
1399 template<> \ | 1406 template<> \ |
1400 struct HeapIndexTrait<class Type> { \ | 1407 struct HeapIndexTrait<class Type> { \ |
1401 static int index() { return Type##Heap; }; \ | 1408 static int index() { return Type##Heap; }; \ |
1402 }; | 1409 }; |
1403 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 1410 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
1404 #undef DEFINE_TYPED_HEAP_TRAIT | 1411 #undef DEFINE_TYPED_HEAP_TRAIT |
1405 | 1412 |
1406 template<typename T> | 1413 template<typename T> |
1407 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) | 1414 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) |
1408 { | 1415 { |
1409 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1416 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1410 ASSERT(state->isAllocationAllowed()); | 1417 ASSERT(state->isAllocationAllowed()); |
1411 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate
(size, gcInfoIndex); | 1418 return state->heap(heapIndex)->allocate(size, gcInfoIndex); |
1412 } | 1419 } |
1413 | 1420 |
1414 template<typename T> | 1421 template<typename T> |
1415 Address Heap::allocate(size_t size) | 1422 Address Heap::allocate(size_t size) |
1416 { | 1423 { |
1417 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); | 1424 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); |
1418 } | 1425 } |
1419 | 1426 |
1420 template<typename T> | 1427 template<typename T> |
1421 Address Heap::reallocate(void* previous, size_t size) | 1428 Address Heap::reallocate(void* previous, size_t size) |
(...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2455 template<typename T, size_t inlineCapacity> | 2462 template<typename T, size_t inlineCapacity> |
2456 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2463 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
2457 template<typename T, size_t inlineCapacity> | 2464 template<typename T, size_t inlineCapacity> |
2458 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2465 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
2459 template<typename T, typename U, typename V> | 2466 template<typename T, typename U, typename V> |
2460 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2467 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
2461 | 2468 |
2462 } // namespace blink | 2469 } // namespace blink |
2463 | 2470 |
2464 #endif // Heap_h | 2471 #endif // Heap_h |
OLD | NEW |