Chromium Code Reviews| Index: src/spaces.h |
| =================================================================== |
| --- src/spaces.h (revision 10407) |
| +++ src/spaces.h (working copy) |
| @@ -502,11 +502,9 @@ |
| static const int kObjectStartOffset = kBodyOffset - 1 + |
| (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| - size_t size() const { return size_; } |
| + intptr_t size() const { return size_; } |
| - void set_size(size_t size) { |
| - size_ = size; |
| - } |
| + void set_size(size_t size) { size_ = size; } |
| Executability executable() { |
| return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| @@ -658,7 +656,7 @@ |
| Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
| // Returns the end address (exclusive) of the object area in this page. |
| - Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
| + Address ObjectAreaEnd() { return address() + size(); } |
| // Checks whether an address is page aligned. |
| static bool IsAlignedToPageSize(Address a) { |
| @@ -677,6 +675,10 @@ |
| return address() + offset; |
| } |
| + // Expand the committed area for pages that are small. This |
| + // happens primarily when the VM is newly booted. |
| + void CommitMore(intptr_t space_needed); |
| + |
| // --------------------------------------------------------------------- |
| // Page size in bytes. This must be a multiple of the OS page size. |
| @@ -846,12 +848,10 @@ |
| FreeBlock(Address start_arg, size_t size_arg) |
| : start(start_arg), size(size_arg) { |
| ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| - ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| } |
| FreeBlock(void* start_arg, size_t size_arg) |
| : start(static_cast<Address>(start_arg)), size(size_arg) { |
| ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| - ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| } |
| Address start; |
| @@ -947,7 +947,9 @@ |
| void TearDown(); |
| - Page* AllocatePage(PagedSpace* owner, Executability executable); |
| + Page* AllocatePage(intptr_t object_area_size, |
| + PagedSpace* owner, |
| + Executability executable); |
| LargePage* AllocateLargePage(intptr_t object_size, |
| Executability executable, |
| @@ -956,10 +958,14 @@ |
| void Free(MemoryChunk* chunk); |
| // Returns the maximum available bytes of heaps. |
| - intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
| + intptr_t Available() { |
| + return capacity_ < memory_allocator_reserved_ ? |
| + 0 : |
| + capacity_ - memory_allocator_reserved_; |
| + } |
| // Returns allocated spaces in bytes. |
| - intptr_t Size() { return size_; } |
| + intptr_t Size() { return memory_allocator_reserved_; } |
| // Returns the maximum available executable bytes of heaps. |
| intptr_t AvailableExecutable() { |
| @@ -981,6 +987,7 @@ |
| #endif |
| MemoryChunk* AllocateChunk(intptr_t body_size, |
| + intptr_t committed_body_size, |
| Executability executable, |
| Space* space); |
| @@ -988,6 +995,7 @@ |
| size_t alignment, |
| VirtualMemory* controller); |
| Address AllocateAlignedMemory(size_t requested, |
| + size_t committed, |
| size_t alignment, |
| Executability executable, |
| VirtualMemory* controller); |
| @@ -1007,6 +1015,12 @@ |
| // and false otherwise. |
| bool UncommitBlock(Address start, size_t size); |
| + void AllocationBookkeeping(Space* owner, |
| + Address base, |
| + intptr_t reserved_size, |
| + intptr_t committed_size, |
| + Executability executable); |
| + |
| // Zaps a contiguous block of memory [start..(start+size)[ thus |
| // filling it up with a recognizable non-NULL bit pattern. |
| void ZapBlock(Address start, size_t size); |
| @@ -1034,7 +1048,7 @@ |
| size_t capacity_executable_; |
| // Allocated space size in bytes. |
| - size_t size_; |
| + size_t memory_allocator_reserved_; |
| // Allocated executable space size in bytes. |
| size_t size_executable_; |
| @@ -1379,9 +1393,15 @@ |
| static const int kMinBlockSize = 3 * kPointerSize; |
| static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
| - FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); |
| + FreeListNode* PickNodeFromList(FreeListNode** list, |
| + int* node_size, |
| + int minimum_size); |
| - FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); |
| + FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); |
| + FreeListNode* FindAbuttingNode(int size_in_bytes, |
| + int* node_size, |
| + Address limit, |
| + FreeListNode** list_head); |
| PagedSpace* owner_; |
| Heap* heap_; |
| @@ -1481,6 +1501,8 @@ |
| // free bytes that were not found at all due to lazy sweeping. |
| virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
| + virtual int ObjectAlignment() { return kPointerSize; } |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
kObjectAlignment instead of kPointerSize?
Erik Corry
2012/01/17 11:37:22
Done.
|
| + |
| // Returns the allocation pointer in this space. |
| Address top() { return allocation_info_.top; } |
| Address limit() { return allocation_info_.limit; } |
| @@ -1495,7 +1517,7 @@ |
| // the free list or accounted as waste. |
| // If add_to_freelist is false then just accounting stats are updated and |
| // no attempt to add area to free list is made. |
| - int Free(Address start, int size_in_bytes) { |
| + int AddToFreeLists(Address start, int size_in_bytes) { |
| int wasted = free_list_.Free(start, size_in_bytes); |
| accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
| return size_in_bytes - wasted; |
| @@ -1503,6 +1525,7 @@ |
| // Set space allocation info. |
| void SetTop(Address top, Address limit) { |
| + ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart()); |
| ASSERT(top == limit || |
| Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| allocation_info_.top = top; |
| @@ -1573,6 +1596,7 @@ |
| return !first_unswept_page_->is_valid(); |
| } |
| + inline bool HasAPage() { return anchor_.next_page() != &anchor_; } |
| Page* FirstPage() { return anchor_.next_page(); } |
| Page* LastPage() { return anchor_.prev_page(); } |
| @@ -1645,12 +1669,6 @@ |
| // Normal allocation information. |
| AllocationInfo allocation_info_; |
| - // Bytes of each page that cannot be allocated. Possibly non-zero |
| - // for pages in spaces with only fixed-size objects. Always zero |
| - // for pages in spaces with variable sized objects (those pages are |
| - // padded with free-list nodes). |
| - int page_extra_; |
| - |
| bool was_swept_conservatively_; |
| // The first page to be swept when the lazy sweeper advances. Is set |
| @@ -1662,10 +1680,11 @@ |
| // done conservatively. |
| intptr_t unswept_free_bytes_; |
| - // Expands the space by allocating a fixed number of pages. Returns false if |
| - // it cannot allocate requested number of pages from OS, or if the hard heap |
| - // size limit has been hit. |
| - bool Expand(); |
| + // Expands the space by allocating a page. Returns false if it cannot |
| + // allocate a page from OS, or if the hard heap size limit has been hit. The |
| + // new page will have at least enough committed space to satisfy the object |
| + // size indicated by the allocation_size argument; |
| + bool Expand(intptr_t allocation_size); |
| // Generic fast case allocation function that tries linear allocation at the |
| // address denoted by top in allocation_info_. |
| @@ -2325,15 +2344,8 @@ |
| intptr_t max_capacity, |
| AllocationSpace id, |
| Executability executable) |
| - : PagedSpace(heap, max_capacity, id, executable) { |
| - page_extra_ = 0; |
| - } |
| + : PagedSpace(heap, max_capacity, id, executable) { } |
| - // The limit of allocation for a page in this space. |
| - virtual Address PageAllocationLimit(Page* page) { |
| - return page->ObjectAreaEnd(); |
| - } |
| - |
| public: |
| TRACK_MEMORY("OldSpace") |
| }; |
| @@ -2359,17 +2371,12 @@ |
| const char* name) |
| : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
| object_size_in_bytes_(object_size_in_bytes), |
| - name_(name) { |
| - page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| - } |
| + name_(name) { } |
| - // The limit of allocation for a page in this space. |
| - virtual Address PageAllocationLimit(Page* page) { |
| - return page->ObjectAreaEnd() - page_extra_; |
| - } |
| - |
| int object_size_in_bytes() { return object_size_in_bytes_; } |
| + virtual int ObjectAlignment() { return object_size_in_bytes_; } |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
for this to be object alignment we have to guarant
Erik Corry
2012/01/17 11:37:22
No, the alignment is relative to the ObjectAreaSta
|
| + |
| // Prepares for a mark-compact GC. |
| virtual void PrepareForMarkCompact(); |