| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 505 // Compute the amount of padding we have to add to a header to make | 505 // Compute the amount of padding we have to add to a header to make |
| 506 // the size of the header plus the padding a multiple of 8 bytes. | 506 // the size of the header plus the padding a multiple of 8 bytes. |
| 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - | 507 size_t paddingSize = (sizeof(NormalPage) + allocationGranularity - |
| 508 (sizeof(HeapObjectHeader) % allocationGranularity)) % | 508 (sizeof(HeapObjectHeader) % allocationGranularity)) % |
| 509 allocationGranularity; | 509 allocationGranularity; |
| 510 return sizeof(NormalPage) + paddingSize; | 510 return sizeof(NormalPage) + paddingSize; |
| 511 } | 511 } |
| 512 | 512 |
| 513 inline NormalPageArena* arenaForNormalPage() const; | 513 inline NormalPageArena* arenaForNormalPage() const; |
| 514 | 514 |
| 515 // Context object holding the state of the arena page compaction pass, |
| 516 // passed in when compacting individual pages. |
| 517 class CompactionContext { |
| 518 STACK_ALLOCATED(); |
| 519 |
| 520 public: |
| 521 // Page compacting into. |
| 522 NormalPage* m_currentPage = nullptr; |
| 523 // Offset into |m_currentPage| to the next free address. |
| 524 size_t m_allocationPoint = 0; |
| 525 // Chain of available pages to use for compaction. Page compaction |
| 526 // picks the next one when the current one is exhausted. |
| 527 BasePage* m_availablePages = nullptr; |
| 528 // Chain of pages that have been compacted. Page compaction will |
| 529 // add compacted pages once the current one becomes exhausted. |
| 530 BasePage** m_compactedPages = nullptr; |
| 531 }; |
| 532 |
| 533 void sweepAndCompact(CompactionContext&); |
| 534 |
| 515 private: | 535 private: |
| 516 HeapObjectHeader* findHeaderFromAddress(Address); | 536 HeapObjectHeader* findHeaderFromAddress(Address); |
| 517 void populateObjectStartBitMap(); | 537 void populateObjectStartBitMap(); |
| 518 | 538 |
| 519 bool m_objectStartBitMapComputed; | 539 bool m_objectStartBitMapComputed; |
| 520 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 540 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 521 }; | 541 }; |
| 522 | 542 |
| 523 // Large allocations are allocated as separate objects and linked in a list. | 543 // Large allocations are allocated as separate objects and linked in a list. |
| 524 // | 544 // |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 658 static void zapFreedMemory(Address, size_t); | 678 static void zapFreedMemory(Address, size_t); |
| 659 static void checkFreedMemoryIsZapped(Address, size_t); | 679 static void checkFreedMemoryIsZapped(Address, size_t); |
| 660 #endif | 680 #endif |
| 661 | 681 |
| 662 private: | 682 private: |
| 663 int m_biggestFreeListIndex; | 683 int m_biggestFreeListIndex; |
| 664 | 684 |
| 665 // All FreeListEntries in the nth list have size >= 2^n. | 685 // All FreeListEntries in the nth list have size >= 2^n. |
| 666 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 686 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 667 | 687 |
| 688 size_t freeListSize() const; |
| 689 |
| 668 friend class NormalPageArena; | 690 friend class NormalPageArena; |
| 669 }; | 691 }; |
| 670 | 692 |
| 671 // Each thread has a number of thread arenas (e.g., Generic arenas, | 693 // Each thread has a number of thread arenas (e.g., Generic arenas, |
| 672 // typed arenas for Node, arenas for collection backings etc) | 694 // typed arenas for Node, arenas for collection backings etc) |
| 673 // and BaseArena represents each thread arena. | 695 // and BaseArena represents each thread arena. |
| 674 // | 696 // |
| 675 // BaseArena is a parent class of NormalPageArena and LargeObjectArena. | 697 // BaseArena is a parent class of NormalPageArena and LargeObjectArena. |
| 676 // NormalPageArena represents a part of a heap that contains NormalPages | 698 // NormalPageArena represents a part of a heap that contains NormalPages |
| 677 // and LargeObjectArena represents a part of a heap that contains | 699 // and LargeObjectArena represents a part of a heap that contains |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 bool shrinkObject(HeapObjectHeader*, size_t); | 776 bool shrinkObject(HeapObjectHeader*, size_t); |
| 755 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 777 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
| 756 | 778 |
| 757 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) { | 779 bool isObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) { |
| 758 return header->payloadEnd() == m_currentAllocationPoint; | 780 return header->payloadEnd() == m_currentAllocationPoint; |
| 759 } | 781 } |
| 760 | 782 |
| 761 bool isLazySweeping() const { return m_isLazySweeping; } | 783 bool isLazySweeping() const { return m_isLazySweeping; } |
| 762 void setIsLazySweeping(bool flag) { m_isLazySweeping = flag; } | 784 void setIsLazySweeping(bool flag) { m_isLazySweeping = flag; } |
| 763 | 785 |
| 786 size_t arenaSize(); |
| 787 size_t freeListSize(); |
| 788 |
| 789 void sweepAndCompact(); |
| 790 |
| 764 private: | 791 private: |
| 765 void allocatePage(); | 792 void allocatePage(); |
| 793 |
| 766 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 794 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
| 767 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 795 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 768 | 796 |
| 769 Address lazySweepPages(size_t, size_t gcInfoIndex) override; | 797 Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 770 | 798 |
| 771 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 799 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 772 bool hasCurrentAllocationArea() const { | 800 bool hasCurrentAllocationArea() const { |
| 773 return currentAllocationPoint() && remainingAllocationSize(); | 801 return currentAllocationPoint() && remainingAllocationSize(); |
| 774 } | 802 } |
| 775 void setAllocationPoint(Address, size_t); | 803 void setAllocationPoint(Address, size_t); |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 920 return outOfLineAllocate(allocationSize, gcInfoIndex); | 948 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 921 } | 949 } |
| 922 | 950 |
| 923 inline NormalPageArena* NormalPage::arenaForNormalPage() const { | 951 inline NormalPageArena* NormalPage::arenaForNormalPage() const { |
| 924 return static_cast<NormalPageArena*>(arena()); | 952 return static_cast<NormalPageArena*>(arena()); |
| 925 } | 953 } |
| 926 | 954 |
| 927 } // namespace blink | 955 } // namespace blink |
| 928 | 956 |
| 929 #endif // HeapPage_h | 957 #endif // HeapPage_h |
| OLD | NEW |