Index: src/spaces.h |
=================================================================== |
--- src/spaces.h (revision 4449) |
+++ src/spaces.h (working copy) |
@@ -167,9 +167,18 @@ |
return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
} |
+ // True if this page was in use before current compaction started. |
+ // Result is valid only for pages owned by paged spaces and |
+ // only after PagedSpace::PrepareForMarkCompact was called. |
+ inline bool WasInUseBeforeMC(); |
+ |
+ inline void SetWasInUseBeforeMC(bool was_in_use); |
+ |
// True if this page is a large object page. |
- bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; } |
+ inline bool IsLargeObjectPage(); |
+ inline void SetIsLargeObjectPage(bool is_large_object_page); |
+ |
// Returns the offset of a given address to this page. |
INLINE(int Offset(Address a)) { |
int offset = static_cast<int>(a - address()); |
@@ -244,6 +253,14 @@ |
// Maximum object size that fits in a page. |
static const int kMaxHeapObjectSize = kObjectAreaSize; |
+ enum PageFlag { |
+ IS_NORMAL_PAGE = 1 << 0, |
+ WAS_IN_USE_BEFORE_MC = 1 << 1 |
+ }; |
+ |
+ inline bool GetPageFlag(PageFlag flag); |
+ inline void SetPageFlag(PageFlag flag, bool value); |
+ |
//--------------------------------------------------------------------------- |
// Page header description. |
// |
@@ -262,7 +279,8 @@ |
// second word *may* (if the page start and large object chunk start are |
// the same) contain the large object chunk size. In either case, the |
// low-order bit for large object pages will be cleared. |
- int is_normal_page; |
+ // For normal pages this word is used to store various page flags. |
+ int flags; |
// The following fields may overlap with remembered set, they can only |
// be used in the mark-compact collector when remembered set is not |
@@ -407,7 +425,14 @@ |
// |
// The memory allocator also allocates chunks for the large object space, but |
// they are managed by the space itself. The new space does not expand. |
+// |
+// The fact that pages for paged spaces are allocated and deallocated in chunks |
+// induces a constraint on the order of pages in a linked lists. We say that |
+// pages are linked in the chunk-order if and only if every two consecutive |
+// pages from the same chunk are consecutive in the linked list. |
+// |
+ |
class MemoryAllocator : public AllStatic { |
public: |
// Initializes its internal bookkeeping structures. |
@@ -466,13 +491,18 @@ |
static Page* AllocatePages(int requested_pages, int* allocated_pages, |
PagedSpace* owner); |
- // Frees pages from a given page and after. If 'p' is the first page |
- // of a chunk, pages from 'p' are freed and this function returns an |
- // invalid page pointer. Otherwise, the function searches a page |
- // after 'p' that is the first page of a chunk. Pages after the |
- // found page are freed and the function returns 'p'. |
+ // Frees pages from a given page and after. Requires pages to be |
+ // linked in chunk-order (see comment for class). |
+ // If 'p' is the first page of a chunk, pages from 'p' are freed |
+ // and this function returns an invalid page pointer. |
+ // Otherwise, the function searches a page after 'p' that is |
+ // the first page of a chunk. Pages after the found page |
+ // are freed and the function returns 'p'. |
static Page* FreePages(Page* p); |
+ // Frees all pages owned by given space. |
+ static void FreeAllPages(PagedSpace* space); |
+ |
// Allocates and frees raw memory of certain size. |
// These are just thin wrappers around OS::Allocate and OS::Free, |
// but keep track of allocated bytes as part of heap. |
@@ -511,6 +541,15 @@ |
static Page* FindFirstPageInSameChunk(Page* p); |
static Page* FindLastPageInSameChunk(Page* p); |
+ // Relinks list of pages owned by space to make it chunk-ordered. |
+ // Returns new first and last pages of space. |
+ // Also returns last page in relinked list which has WasInUsedBeforeMC |
+ // flag set. |
+ static void RelinkPageListInChunkOrder(PagedSpace* space, |
+ Page** first_page, |
+ Page** last_page, |
+ Page** last_page_in_use); |
+ |
#ifdef ENABLE_HEAP_PROTECTION |
// Protect/unprotect a block of memory by marking it read-only/writable. |
static inline void Protect(Address start, size_t size); |
@@ -599,6 +638,12 @@ |
// used as a marking stack and its page headers are destroyed. |
static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
PagedSpace* owner); |
+ |
+ static Page* RelinkPagesInChunk(int chunk_id, |
+ Address chunk_start, |
+ int chunk_size, |
+ Page* prev, |
+ Page** last_page_in_use); |
}; |
@@ -880,7 +925,7 @@ |
void ClearRSet(); |
// Prepares for a mark-compact GC. |
- virtual void PrepareForMarkCompact(bool will_compact) = 0; |
+ virtual void PrepareForMarkCompact(bool will_compact); |
virtual Address PageAllocationTop(Page* page) = 0; |
@@ -920,6 +965,16 @@ |
// Used by ReserveSpace. |
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; |
+ // Free all pages in range from prev (exclusive) to last (inclusive). |
+ // Freed pages are moved to the end of page list. |
+ void FreePages(Page* prev, Page* last); |
+ |
+ // Set space allocation info. |
+ void SetTop(Address top, Address limit) { |
+ allocation_info_.top = top; |
+ allocation_info_.limit = limit; |
+ } |
+ |
// --------------------------------------------------------------------------- |
// Mark-compact collection support functions |
@@ -968,6 +1023,9 @@ |
static void ResetCodeStatistics(); |
#endif |
+ // Returns the page of the allocation pointer. |
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); } |
+ |
protected: |
// Maximum capacity of this space. |
int max_capacity_; |
@@ -982,6 +1040,10 @@ |
// Expand and Shrink. |
Page* last_page_; |
+ // True if pages owned by this space are linked in chunk-order. |
+ // See comment for class MemoryAllocator for definition of chunk-order. |
+ bool page_list_is_chunk_ordered_; |
+ |
// Normal allocation information. |
AllocationInfo allocation_info_; |
@@ -1043,8 +1105,6 @@ |
void DoPrintRSet(const char* space_name); |
#endif |
private: |
- // Returns the page of the allocation pointer. |
- Page* AllocationTopPage() { return TopPageOf(allocation_info_); } |
// Returns a pointer to the page of the relocation pointer. |
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } |
@@ -1671,10 +1731,15 @@ |
// Give a block of memory to the space's free list. It might be added to |
// the free list or accounted as waste. |
- void Free(Address start, int size_in_bytes) { |
- int wasted_bytes = free_list_.Free(start, size_in_bytes); |
+ // If add_to_freelist is false then just accounting stats are updated and |
+ // no attempt to add area to free list is made. |
+ void Free(Address start, int size_in_bytes, bool add_to_freelist) { |
accounting_stats_.DeallocateBytes(size_in_bytes); |
- accounting_stats_.WasteBytes(wasted_bytes); |
+ |
+ if (add_to_freelist) { |
+ int wasted_bytes = free_list_.Free(start, size_in_bytes); |
+ accounting_stats_.WasteBytes(wasted_bytes); |
+ } |
} |
// Prepare for full garbage collection. Resets the relocation pointer and |
@@ -1736,8 +1801,12 @@ |
int object_size_in_bytes() { return object_size_in_bytes_; } |
// Give a fixed sized block of memory to the space's free list. |
- void Free(Address start) { |
- free_list_.Free(start); |
+ // If add_to_freelist is false then just accounting stats are updated and |
+ // no attempt to add area to free list is made. |
+ void Free(Address start, bool add_to_freelist) { |
+ if (add_to_freelist) { |
+ free_list_.Free(start); |
+ } |
accounting_stats_.DeallocateBytes(object_size_in_bytes_); |
} |