| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 505 // Compute the amount of padding we have to add to a header to make | 505 // Compute the amount of padding we have to add to a header to make |
| 506 // the size of the header plus the padding a multiple of 8 bytes. | 506 // the size of the header plus the padding a multiple of 8 bytes. |
| 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - | 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - |
| 508 (sizeof(HeapObjectHeader) % allocationGranularity)) % | 508 (sizeof(HeapObjectHeader) % allocationGranularity)) % |
| 509 allocationGranularity; | 509 allocationGranularity; |
| 510 return sizeof(NormalPage) + paddingSize; | 510 return sizeof(NormalPage) + paddingSize; |
| 511 } | 511 } |
| 512 | 512 |
| 513 inline NormalPageArena* arenaForNormalPage() const; | 513 inline NormalPageArena* arenaForNormalPage() const; |
| 514 | 514 |
| 515 // Context object holding the state of the arena page compaction pass, | |
| 516 // passed in when compacting individual pages. | |
| 517 class CompactionContext { | |
| 518 STACK_ALLOCATED(); | |
| 519 | |
| 520 public: | |
| 521 // Page compacting into. | |
| 522 NormalPage* m_currentPage = nullptr; | |
| 523 // Offset into |m_currentPage| to the next free address. | |
| 524 size_t m_allocationPoint = 0; | |
| 525 // Chain of available pages to use for compaction. Page compaction | |
| 526 // picks the next one when the current one is exhausted. | |
| 527 BasePage* m_availablePages = nullptr; | |
| 528 // Chain of pages that have been compacted. Page compaction will | |
| 529 // add compacted pages once the current one becomes exhausted. | |
| 530 BasePage** m_compactedPages = nullptr; | |
| 531 }; | |
| 532 | |
| 533 void sweepAndCompact(CompactionContext&); | |
| 534 | |
| 535 private: | 515 private: |
| 536 HeapObjectHeader* findHeaderFromAddress(Address); | 516 HeapObjectHeader* findHeaderFromAddress(Address); |
| 537 void populateObjectStartBitMap(); | 517 void populateObjectStartBitMap(); |
| 538 | 518 |
| 539 bool m_objectStartBitMapComputed; | 519 bool m_objectStartBitMapComputed; |
| 540 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 520 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 541 }; | 521 }; |
| 542 | 522 |
| 543 // Large allocations are allocated as separate objects and linked in a list. | 523 // Large allocations are allocated as separate objects and linked in a list. |
| 544 // | 524 // |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 678 static void zapFreedMemory(Address, size_t); | 658 static void zapFreedMemory(Address, size_t); |
| 679 static void checkFreedMemoryIsZapped(Address, size_t); | 659 static void checkFreedMemoryIsZapped(Address, size_t); |
| 680 #endif | 660 #endif |
| 681 | 661 |
| 682 private: | 662 private: |
| 683 int m_biggestFreeListIndex; | 663 int m_biggestFreeListIndex; |
| 684 | 664 |
| 685 // All FreeListEntries in the nth list have size >= 2^n. | 665 // All FreeListEntries in the nth list have size >= 2^n. |
| 686 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 666 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 687 | 667 |
| 688 size_t freeListSize() const; | |
| 689 | |
| 690 friend class NormalPageArena; | 668 friend class NormalPageArena; |
| 691 }; | 669 }; |
| 692 | 670 |
| 693 // Each thread has a number of thread arenas (e.g., Generic arenas, | 671 // Each thread has a number of thread arenas (e.g., Generic arenas, |
| 694 // typed arenas for Node, arenas for collection backings etc) | 672 // typed arenas for Node, arenas for collection backings etc) |
| 695 // and BaseArena represents each thread arena. | 673 // and BaseArena represents each thread arena. |
| 696 // | 674 // |
| 697 // BaseArena is a parent class of NormalPageArena and LargeObjectArena. | 675 // BaseArena is a parent class of NormalPageArena and LargeObjectArena. |
| 698 // NormalPageArena represents a part of a heap that contains NormalPages | 676 // NormalPageArena represents a part of a heap that contains NormalPages |
| 699 // and LargeObjectArena represents a part of a heap that contains | 677 // and LargeObjectArena represents a part of a heap that contains |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 776 bool shrinkObject(HeapObjectHeader*, size_t); | 754 bool shrinkObject(HeapObjectHeader*, size_t); |
| 777 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 755 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
| 778 | 756 |
| 779 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) { | 757 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) { |
| 780 return header->payloadEnd() == m_currentAllocationPoint; | 758 return header->payloadEnd() == m_currentAllocationPoint; |
| 781 } | 759 } |
| 782 | 760 |
| 783 bool isLazySweeping() const { return m_isLazySweeping; } | 761 bool isLazySweeping() const { return m_isLazySweeping; } |
| 784 void setIsLazySweeping(bool flag) { m_isLazySweeping = flag; } | 762 void setIsLazySweeping(bool flag) { m_isLazySweeping = flag; } |
| 785 | 763 |
| 786 size_t arenaSize(); | |
| 787 size_t freeListSize(); | |
| 788 | |
| 789 void sweepAndCompact(); | |
| 790 | |
| 791 private: | 764 private: |
| 792 void allocatePage(); | 765 void allocatePage(); |
| 793 | |
| 794 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 766 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
| 795 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 767 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 796 | 768 |
| 797 Address lazySweepPages(size_t, size_t gcInfoIndex) override; | 769 Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 798 | 770 |
| 799 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 771 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 800 bool hasCurrentAllocationArea() const { | 772 bool hasCurrentAllocationArea() const { |
| 801 return currentAllocationPoint() && remainingAllocationSize(); | 773 return currentAllocationPoint() && remainingAllocationSize(); |
| 802 } | 774 } |
| 803 void setAllocationPoint(Address, size_t); | 775 void setAllocationPoint(Address, size_t); |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 948 return outOfLineAllocate(allocationSize, gcInfoIndex); | 920 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 949 } | 921 } |
| 950 | 922 |
| 951 inline NormalPageArena* NormalPage::arenaForNormalPage() const { | 923 inline NormalPageArena* NormalPage::arenaForNormalPage() const { |
| 952 return static_cast<NormalPageArena*>(arena()); | 924 return static_cast<NormalPageArena*>(arena()); |
| 953 } | 925 } |
| 954 | 926 |
| 955 } // namespace blink | 927 } // namespace blink |
| 956 | 928 |
| 957 #endif // HeapPage_h | 929 #endif // HeapPage_h |
| OLD | NEW |