Chromium Code Reviews| Index: src/heap/spaces.h |
| diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
| index 7890de345d6f14221c0368841ab6440160958724..982ceb3e9f19fec40f14e29c3b2c4fd4c8c76377 100644 |
| --- a/src/heap/spaces.h |
| +++ b/src/heap/spaces.h |
| @@ -27,6 +27,7 @@ class FreeList; |
| class Isolate; |
| class MemoryAllocator; |
| class MemoryChunk; |
| +class Page; |
| class PagedSpace; |
| class SemiSpace; |
| class SkipList; |
| @@ -285,6 +286,113 @@ class Bitmap { |
| } |
| }; |
| +enum FreeListCategoryType { |
| + kTiniest, |
| + kTiny, |
| + kSmall, |
| + kMedium, |
| + kLarge, |
| + kHuge, |
| + |
| + kFirstCategory = kTiniest, |
| + kLastCategory = kHuge, |
| + kNumberOfCategories = kLastCategory + 1, |
| + kInvalidCategory |
| +}; |
| + |
| +enum FreeMode { kLinkCategory, kDoNotLinkCategory }; |
| + |
| +// A free list category maintains a linked list of free memory blocks. |
| +class FreeListCategory { |
| + public: |
| + static const int kSize = kIntSize + // FreeListCategoryType type_ |
| + kIntSize + // int available_ |
| + kPointerSize + // FreeSpace* top_ |
| + kPointerSize + // FreeListCategory* prev_ |
| + kPointerSize; // FreeListCategory* next_ |
| + |
| + FreeListCategory() |
| + : type_(kInvalidCategory), |
| + available_(0), |
| + top_(nullptr), |
| + prev_(nullptr), |
| + next_(nullptr) {} |
| + |
| + void Initialize(FreeListCategoryType type) { |
| + type_ = type; |
| + available_ = 0; |
| + top_ = nullptr; |
| + prev_ = nullptr; |
| + next_ = nullptr; |
| + } |
| + |
| + void Invalidate(); |
| + |
| + void Reset(); |
| + |
| + void ResetStats() { Reset(); } |
| + |
| + void RepairFreeList(Heap* heap); |
| + |
| + // Relinks the category into the currently owning free list. Requires that the |
| + // category is currently unlinked. |
| + void Relink(); |
| + |
| + bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode); |
|
Hannes Payer (out of office)
2016/03/11 09:56:24
Why not templatize on mode?
Michael Lippautz
2016/03/11 10:19:12
Templatizing FreeListCategory::Free would then req
|
| + |
| + // Picks a node from the list and stores its size in |node_size|. Returns |
| + // nullptr if the category is empty. |
| + FreeSpace* PickNodeFromList(int* node_size); |
| + |
| + // Performs a single try to pick a node of at least |minimum_size| from the |
| + // category. Stores the actual size in |node_size|. Returns nullptr if no |
| + // node is found. |
| + FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size); |
| + |
| + // Picks a node of at least |minimum_size| from the category. Stores the |
| + // actual size in |node_size|. Returns nullptr if no node is found. |
| + FreeSpace* SearchForNodeInList(int minimum_size, int* node_size); |
| + |
| + inline FreeList* owner(); |
| + inline bool is_linked(); |
| + bool is_empty() { return top() == nullptr; } |
| + int available() const { return available_; } |
| + |
| +#ifdef DEBUG |
| + intptr_t SumFreeList(); |
| + int FreeListLength(); |
| +#endif |
| + |
| + private: |
| + // For debug builds we accurately compute free lists lengths up until |
| + // {kVeryLongFreeList} by manually walking the list. |
| + static const int kVeryLongFreeList = 500; |
| + |
| + inline Page* page(); |
| + |
| + FreeSpace* top() { return top_; } |
| + void set_top(FreeSpace* top) { top_ = top; } |
| + FreeListCategory* prev() { return prev_; } |
| + void set_prev(FreeListCategory* prev) { prev_ = prev; } |
| + FreeListCategory* next() { return next_; } |
| + void set_next(FreeListCategory* next) { next_ = next; } |
| + |
| + // |type_|: The type of this free list category. |
| + FreeListCategoryType type_; |
| + |
| + // |available_|: Total available bytes in all blocks of this free list |
| + // category. |
|
Hannes Payer (out of office)
2016/03/11 09:56:24
remvove space
Michael Lippautz
2016/03/11 10:19:12
Done.
|
| + int available_; |
| + |
| + // |top_|: Points to the top FreeSpace* in the free list category. |
| + FreeSpace* top_; |
| + |
| + FreeListCategory* prev_; |
| + FreeListCategory* next_; |
| + |
| + friend class FreeList; |
| + friend class PagedSpace; |
| +}; |
| // MemoryChunk represents a memory region owned by a specific space. |
| // It is divided into the header and the body. Chunk start is always |
| @@ -407,7 +515,9 @@ class MemoryChunk { |
| + kPointerSize // AtomicValue parallel_compaction_ |
| + 2 * kPointerSize // AtomicNumber free-list statistics |
| + kPointerSize // AtomicValue next_chunk_ |
| - + kPointerSize; // AtomicValue prev_chunk_ |
| + + kPointerSize // AtomicValue prev_chunk_ |
| + // FreeListCategory categories_[kNumberOfCategories] |
| + + FreeListCategory::kSize * kNumberOfCategories; |
| // We add some more space to the computed header size to amount for missing |
| // alignment requirements in our computation. |
| @@ -589,19 +699,6 @@ class MemoryChunk { |
| return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| } |
| - void MarkEvacuationCandidate() { |
| - DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| - DCHECK_NULL(old_to_old_slots_); |
| - DCHECK_NULL(typed_old_to_old_slots_); |
| - SetFlag(EVACUATION_CANDIDATE); |
| - } |
| - |
| - void ClearEvacuationCandidate() { |
| - DCHECK_NULL(old_to_old_slots_); |
| - DCHECK_NULL(typed_old_to_old_slots_); |
| - ClearFlag(EVACUATION_CANDIDATE); |
| - } |
| - |
| bool ShouldSkipEvacuationSlotRecording() { |
| return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| } |
| @@ -712,6 +809,8 @@ class MemoryChunk { |
| // prev_chunk_ holds a pointer of type MemoryChunk |
| AtomicValue<MemoryChunk*> prev_chunk_; |
| + FreeListCategory categories_[kNumberOfCategories]; |
| + |
| private: |
| void InitializeReservedMemory() { reservation_.Reset(); } |
| @@ -719,19 +818,6 @@ class MemoryChunk { |
| friend class MemoryChunkValidator; |
| }; |
| -enum FreeListCategoryType { |
| - kTiniest, |
| - kTiny, |
| - kSmall, |
| - kMedium, |
| - kLarge, |
| - kHuge, |
| - |
| - kFirstCategory = kTiniest, |
| - kLastCategory = kHuge, |
| - kNumberOfCategories = kLastCategory + 1 |
| -}; |
| - |
| // ----------------------------------------------------------------------------- |
| // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| // |
| @@ -835,6 +921,17 @@ class Page : public MemoryChunk { |
| available_in_free_list()); |
| } |
| + template <typename Callback> |
| + inline void ForAllFreeListCategories(Callback callback) { |
| + for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| + callback(&categories_[i]); |
| + } |
| + } |
| + |
| + FreeListCategory* free_list_category(FreeListCategoryType type) { |
| + return &categories_[type]; |
| + } |
| + |
| #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
| type name() { return name##_.Value(); } \ |
| void set_##name(type name) { name##_.SetValue(name); } \ |
| @@ -849,6 +946,13 @@ class Page : public MemoryChunk { |
| void Print(); |
| #endif // DEBUG |
| + inline void MarkNeverAllocateForTesting(); |
| + inline void MarkEvacuationCandidate(); |
| + inline void ClearEvacuationCandidate(); |
| + |
| + private: |
| + inline void InitializeFreeListCategories(); |
| + |
| friend class MemoryAllocator; |
| }; |
| @@ -1488,12 +1592,6 @@ class AllocationStats BASE_EMBEDDED { |
| void ClearSize() { size_ = capacity_; } |
| - // Reset the allocation statistics (i.e., available = capacity with no wasted |
| - // or allocated bytes). |
| - void Reset() { |
| - size_ = 0; |
| - } |
| - |
| // Accessors for the allocation statistics. |
| intptr_t Capacity() { return capacity_; } |
| intptr_t MaxCapacity() { return max_capacity_; } |
| @@ -1520,13 +1618,13 @@ class AllocationStats BASE_EMBEDDED { |
| void ShrinkSpace(int size_in_bytes) { |
| capacity_ -= size_in_bytes; |
| size_ -= size_in_bytes; |
| - CHECK(size_ >= 0); |
| + CHECK_GE(size_, 0); |
| } |
| // Allocate from available bytes (available -> size). |
| void AllocateBytes(intptr_t size_in_bytes) { |
| size_ += size_in_bytes; |
| - CHECK(size_ >= 0); |
| + CHECK_GE(size_, 0); |
| } |
| // Free allocated bytes, making them available (size -> available). |
| @@ -1565,80 +1663,6 @@ class AllocationStats BASE_EMBEDDED { |
| intptr_t size_; |
| }; |
| - |
| -// A free list category maintains a linked list of free memory blocks. |
| -class FreeListCategory { |
| - public: |
| - FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {} |
| - |
| - void Initialize(FreeList* owner, FreeListCategoryType type) { |
| - owner_ = owner; |
| - type_ = type; |
| - } |
| - |
| - // Concatenates {category} into {this}. |
| - // |
| - // Note: Thread-safe. |
| - intptr_t Concatenate(FreeListCategory* category); |
| - |
| - void Reset(); |
| - |
| - void Free(FreeSpace* node, int size_in_bytes); |
| - |
| - // Pick a node from the list. |
| - FreeSpace* PickNodeFromList(int* node_size); |
| - |
| - // Pick a node from the list and compare it against {size_in_bytes}. If the |
| - // node's size is greater or equal return the node and null otherwise. |
| - FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); |
| - |
| - // Search for a node of size {size_in_bytes}. |
| - FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size); |
| - |
| - intptr_t EvictFreeListItemsInList(Page* p); |
| - bool ContainsPageFreeListItemsInList(Page* p); |
| - |
| - void RepairFreeList(Heap* heap); |
| - |
| - bool IsEmpty() { return top() == nullptr; } |
| - |
| - FreeList* owner() { return owner_; } |
| - int available() const { return available_; } |
| - |
| -#ifdef DEBUG |
| - intptr_t SumFreeList(); |
| - int FreeListLength(); |
| - bool IsVeryLong(); |
| -#endif |
| - |
| - private: |
| - // For debug builds we accurately compute free lists lengths up until |
| - // {kVeryLongFreeList} by manually walking the list. |
| - static const int kVeryLongFreeList = 500; |
| - |
| - FreeSpace* top() { return top_.Value(); } |
| - void set_top(FreeSpace* top) { top_.SetValue(top); } |
| - |
| - FreeSpace* end() const { return end_; } |
| - void set_end(FreeSpace* end) { end_ = end; } |
| - |
| - // |type_|: The type of this free list category. |
| - FreeListCategoryType type_; |
| - |
| - // |top_|: Points to the top FreeSpace* in the free list category. |
| - AtomicValue<FreeSpace*> top_; |
| - |
| - // |end_|: Points to the end FreeSpace* in the free list category. |
| - FreeSpace* end_; |
| - |
| - // |available_|: Total available bytes in all blocks of this free list |
| - // category. |
| - int available_; |
| - |
| - // |owner_|: The owning free list of this category. |
| - FreeList* owner_; |
| -}; |
| - |
| // A free list maintaining free blocks of memory. The free list is organized in |
| // a way to encourage objects allocated around the same time to be near each |
| // other. The normal way to allocate is intended to be by bumping a 'top' |
| @@ -1683,19 +1707,13 @@ class FreeList { |
| explicit FreeList(PagedSpace* owner); |
| - // The method concatenates {other} into {this} and returns the added bytes, |
| - // including waste. |
| - // |
| - // Note: Thread-safe. |
| - intptr_t Concatenate(FreeList* other); |
| - |
| // Adds a node on the free list. The block of size {size_in_bytes} starting |
| // at {start} is placed on the free list. The return value is the number of |
| // bytes that were not added to the free list, because they freed memory block |
| // was too small. Bookkeeping information will be written to the block, i.e., |
| // its contents will be destroyed. The start address should be word aligned, |
| // and the size should be a non-zero multiple of the word size. |
| - int Free(Address start, int size_in_bytes); |
| + int Free(Address start, int size_in_bytes, FreeMode mode); |
| // Allocate a block of size {size_in_bytes} from the free list. The block is |
| // unitialized. A failure is returned if no block is available. The size |
| @@ -1705,49 +1723,83 @@ class FreeList { |
| // Clear the free list. |
| void Reset(); |
| - void ResetStats() { wasted_bytes_ = 0; } |
| + void ResetStats() { |
| + wasted_bytes_.SetValue(0); |
| + ForAllFreeListCategories( |
| + [](FreeListCategory* category) { category->ResetStats(); }); |
| + } |
| // Return the number of bytes available on the free list. |
| intptr_t Available() { |
| intptr_t available = 0; |
| - for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| - available += category_[i].available(); |
| - } |
| + ForAllFreeListCategories([&available](FreeListCategory* category) { |
| + available += category->available(); |
| + }); |
| return available; |
| } |
| - // The method tries to find a {FreeSpace} node of at least {size_in_bytes} |
| - // size in the free list category exactly matching the size. If no suitable |
| - // node could be found, the method falls back to retrieving a {FreeSpace} |
| - // from the large or huge free list category. |
| - // |
| - // Can be used concurrently. |
| - MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes); |
| - |
| bool IsEmpty() { |
| - for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| - if (!category_[i].IsEmpty()) return false; |
| - } |
| - return true; |
| + bool empty = true; |
| + ForAllFreeListCategories([&empty](FreeListCategory* category) { |
| + if (!category->is_empty()) empty = false; |
| + }); |
| + return empty; |
| } |
| // Used after booting the VM. |
| void RepairLists(Heap* heap); |
| - intptr_t EvictFreeListItems(Page* p); |
| - bool ContainsPageFreeListItems(Page* p); |
| + intptr_t EvictFreeListItems(Page* page); |
| + bool ContainsPageFreeListItems(Page* page); |
| PagedSpace* owner() { return owner_; } |
| - intptr_t wasted_bytes() { return wasted_bytes_; } |
| - base::Mutex* mutex() { return &mutex_; } |
| + intptr_t wasted_bytes() { return wasted_bytes_.Value(); } |
| + |
| + template <typename Callback> |
| + void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { |
| + FreeListCategory* current = categories_[type]; |
| + while (current != nullptr) { |
| + FreeListCategory* next = current->next(); |
| + callback(current); |
| + current = next; |
| + } |
| + } |
| + |
| + template <typename Callback> |
| + void ForAllFreeListCategories(Callback callback) { |
| + for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| + ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback); |
| + } |
| + } |
| + |
| + bool AddCategory(FreeListCategory* category); |
| + void RemoveCategory(FreeListCategory* category); |
| + void PrintCategories(FreeListCategoryType type); |
| #ifdef DEBUG |
| - void Zap(); |
| intptr_t SumFreeLists(); |
| bool IsVeryLong(); |
| #endif |
| private: |
| + class FreeListCategoryIterator { |
| + public: |
| + FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type) |
| + : current_(free_list->categories_[type]) {} |
| + |
| + bool HasNext() { return current_ != nullptr; } |
| + |
| + FreeListCategory* Next() { |
| + DCHECK(HasNext()); |
| + FreeListCategory* tmp = current_; |
| + current_ = current_->next(); |
| + return tmp; |
| + } |
| + |
| + private: |
| + FreeListCategory* current_; |
| + }; |
| + |
| // The size range of blocks, in bytes. |
| static const int kMinBlockSize = 3 * kPointerSize; |
| static const int kMaxBlockSize = Page::kAllocatableMemory; |
| @@ -1763,11 +1815,19 @@ class FreeList { |
| static const int kLargeAllocationMax = kMediumListMax; |
| FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); |
| - FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size); |
| - FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { |
| - return &category_[category]; |
| - } |
| + // Walks all available categories for a given |type| and tries to retrieve |
| + // a node. Returns nullptr if the category is empty. |
| + FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size); |
| + |
| + // Tries to retrieve a node from the first category in a given |type|. |
| + // Returns nullptr if the category is empty. |
| + FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size, |
| + int minimum_size); |
| + |
| + // Searches a given |type| for a node of at least |minimum_size|. |
| + FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size, |
| + int minimum_size); |
| FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { |
| if (size_in_bytes <= kTiniestListMax) { |
| @@ -1797,10 +1857,13 @@ class FreeList { |
| return kHuge; |
| } |
| + FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; } |
| + |
| PagedSpace* owner_; |
| - base::Mutex mutex_; |
| - intptr_t wasted_bytes_; |
| - FreeListCategory category_[kNumberOfCategories]; |
| + AtomicNumber<intptr_t> wasted_bytes_; |
| + FreeListCategory* categories_[kNumberOfCategories]; |
| + |
| + friend class FreeListCategory; |
| DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| }; |
| @@ -1902,7 +1965,6 @@ class LocalAllocationBuffer { |
| AllocationInfo allocation_info_; |
| }; |
| - |
| class PagedSpace : public Space { |
| public: |
| static const intptr_t kCompactionMemoryWanted = 500 * KB; |
| @@ -1959,11 +2021,6 @@ class PagedSpace : public Space { |
| ResetFreeListStatistics(); |
| } |
| - // Increases the number of available bytes of that space. |
| - void AddToAccountingStats(intptr_t bytes) { |
| - accounting_stats_.DeallocateBytes(bytes); |
| - } |
| - |
| // Available bytes without growing. These are the bytes on the free list. |
| // The bytes in the linear allocation area are not included in this total |
| // because updating the stats would slow down allocation. New pages are |
| @@ -2019,11 +2076,16 @@ class PagedSpace : public Space { |
| // If add_to_freelist is false then just accounting stats are updated and |
| // no attempt to add area to free list is made. |
| int Free(Address start, int size_in_bytes) { |
| - int wasted = free_list_.Free(start, size_in_bytes); |
| + int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory); |
| accounting_stats_.DeallocateBytes(size_in_bytes); |
| return size_in_bytes - wasted; |
| } |
| + int UnaccountedFree(Address start, int size_in_bytes) { |
| + int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory); |
| + return size_in_bytes - wasted; |
| + } |
| + |
| void ResetFreeList() { free_list_.Reset(); } |
| // Set space allocation info. |
| @@ -2048,7 +2110,7 @@ class PagedSpace : public Space { |
| void IncreaseCapacity(int size); |
| // Releases an unused page and shrinks the space. |
| - void ReleasePage(Page* page, bool evict_free_list_items); |
| + void ReleasePage(Page* page); |
| // The dummy page that anchors the linked list of pages. |
| Page* anchor() { return &anchor_; } |
| @@ -2104,17 +2166,18 @@ class PagedSpace : public Space { |
| // sweeper. |
| virtual void RefillFreeList(); |
| - protected: |
| - void AddMemory(Address start, intptr_t size); |
| + FreeList* free_list() { return &free_list_; } |
| - void MoveOverFreeMemory(PagedSpace* other); |
| + base::Mutex* mutex() { return &space_mutex_; } |
| + inline void UnlinkFreeListCategories(Page* page); |
| + inline intptr_t RelinkFreeListCategories(Page* page); |
| + |
| + protected: |
| // PagedSpaces that should be included in snapshots have different, i.e., |
| // smaller, initial pages. |
| virtual bool snapshotable() { return true; } |
| - FreeList* free_list() { return &free_list_; } |
| - |
| bool HasPages() { return anchor_.next_page() != &anchor_; } |
| // Cleans up the space, frees all pages in this space except those belonging |
| @@ -2811,8 +2874,6 @@ class CompactionSpace : public PagedSpace { |
| bool is_local() override { return true; } |
| - void RefillFreeList() override; |
| - |
| protected: |
| // The space is temporary and not included in any snapshots. |
| bool snapshotable() override { return false; } |