| Index: src/spaces.h
|
| ===================================================================
|
| --- src/spaces.h (revision 7267)
|
| +++ src/spaces.h (working copy)
|
| @@ -34,6 +34,8 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| +class Isolate;
|
| +
|
| // -----------------------------------------------------------------------------
|
| // Heap structures:
|
| //
|
| @@ -241,7 +243,7 @@
|
| static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
|
|
| static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
|
| - kIntSize + kPointerSize;
|
| + kIntSize + kPointerSize + kPointerSize;
|
|
|
| // The start offset of the object area in a page. Aligned to both maps and
|
| // code alignment to be suitable for both.
|
| @@ -286,7 +288,7 @@
|
| // This invariant guarantees that after flipping flag meaning at the
|
| // beginning of scavenge all pages in use will be marked as having valid
|
| // watermark.
|
| - static inline void FlipMeaningOfInvalidatedWatermarkFlag();
|
| + static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
|
|
|
| // Returns true if the page allocation watermark was not altered during
|
| // scavenge.
|
| @@ -312,11 +314,6 @@
|
| STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
|
| kAllocationWatermarkOffsetBits);
|
|
|
| - // This field contains the meaning of the WATERMARK_INVALIDATED flag.
|
| - // Instead of clearing this flag from all pages we just flip
|
| - // its meaning at the beginning of a scavenge.
|
| - static intptr_t watermark_invalidated_mark_;
|
| -
|
| //---------------------------------------------------------------------------
|
| // Page header description.
|
| //
|
| @@ -353,6 +350,8 @@
|
| // During scavenge collection this field is used to store allocation watermark
|
| // if it is altered during scavenge.
|
| Address mc_first_forwarded;
|
| +
|
| + Heap* heap_;
|
| };
|
|
|
|
|
| @@ -360,11 +359,13 @@
|
| // Space is the abstract superclass for all allocation spaces.
|
| class Space : public Malloced {
|
| public:
|
| - Space(AllocationSpace id, Executability executable)
|
| - : id_(id), executable_(executable) {}
|
| + Space(Heap* heap, AllocationSpace id, Executability executable)
|
| + : heap_(heap), id_(id), executable_(executable) {}
|
|
|
| virtual ~Space() {}
|
|
|
| + Heap* heap() const { return heap_; }
|
| +
|
| // Does the space need executable memory?
|
| Executability executable() { return executable_; }
|
|
|
| @@ -397,6 +398,7 @@
|
| virtual bool ReserveSpace(int bytes) = 0;
|
|
|
| private:
|
| + Heap* heap_;
|
| AllocationSpace id_;
|
| Executability executable_;
|
| };
|
| @@ -409,19 +411,19 @@
|
| // displacements cover the entire 4GB virtual address space. On 64-bit
|
| // platforms, we support this using the CodeRange object, which reserves and
|
| // manages a range of virtual memory.
|
| -class CodeRange : public AllStatic {
|
| +class CodeRange {
|
| public:
|
| // Reserves a range of virtual memory, but does not commit any of it.
|
| // Can only be called once, at heap initialization time.
|
| // Returns false on failure.
|
| - static bool Setup(const size_t requested_size);
|
| + bool Setup(const size_t requested_size);
|
|
|
| // Frees the range of virtual memory, and frees the data structures used to
|
| // manage it.
|
| - static void TearDown();
|
| + void TearDown();
|
|
|
| - static bool exists() { return code_range_ != NULL; }
|
| - static bool contains(Address address) {
|
| + bool exists() { return code_range_ != NULL; }
|
| + bool contains(Address address) {
|
| if (code_range_ == NULL) return false;
|
| Address start = static_cast<Address>(code_range_->address());
|
| return start <= address && address < start + code_range_->size();
|
| @@ -430,13 +432,15 @@
|
| // Allocates a chunk of memory from the large-object portion of
|
| // the code range. On platforms with no separate code range, should
|
| // not be called.
|
| - MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
|
| - size_t* allocated);
|
| - static void FreeRawMemory(void* buf, size_t length);
|
| + MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
|
| + size_t* allocated);
|
| + void FreeRawMemory(void* buf, size_t length);
|
|
|
| private:
|
| + CodeRange();
|
| +
|
| // The reserved range of virtual memory that all code objects are put in.
|
| - static VirtualMemory* code_range_;
|
| + VirtualMemory* code_range_;
|
| // Plain old data class, just a struct plus a constructor.
|
| class FreeBlock {
|
| public:
|
| @@ -452,20 +456,26 @@
|
| // Freed blocks of memory are added to the free list. When the allocation
|
| // list is exhausted, the free list is sorted and merged to make the new
|
| // allocation list.
|
| - static List<FreeBlock> free_list_;
|
| + List<FreeBlock> free_list_;
|
| // Memory is allocated from the free blocks on the allocation list.
|
| // The block at current_allocation_block_index_ is the current block.
|
| - static List<FreeBlock> allocation_list_;
|
| - static int current_allocation_block_index_;
|
| + List<FreeBlock> allocation_list_;
|
| + int current_allocation_block_index_;
|
|
|
| // Finds a block on the allocation list that contains at least the
|
| // requested amount of memory. If none is found, sorts and merges
|
| // the existing free memory blocks, and searches again.
|
| // If none can be found, terminates V8 with FatalProcessOutOfMemory.
|
| - static void GetNextAllocationBlock(size_t requested);
|
| + void GetNextAllocationBlock(size_t requested);
|
| // Compares the start addresses of two free blocks.
|
| static int CompareFreeBlockAddress(const FreeBlock* left,
|
| const FreeBlock* right);
|
| +
|
| + friend class Isolate;
|
| +
|
| + Isolate* isolate_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(CodeRange);
|
| };
|
|
|
|
|
| @@ -493,14 +503,14 @@
|
| //
|
|
|
|
|
| -class MemoryAllocator : public AllStatic {
|
| +class MemoryAllocator {
|
| public:
|
| // Initializes its internal bookkeeping structures.
|
| // Max capacity of the total space and executable memory limit.
|
| - static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
|
| + bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
|
|
|
| // Deletes valid chunks.
|
| - static void TearDown();
|
| + void TearDown();
|
|
|
| // Reserves an initial address range of virtual memory to be split between
|
| // the two new space semispaces, the old space, and the map space. The
|
| @@ -511,7 +521,7 @@
|
| // address of the initial chunk if successful, with the side effect of
|
| // setting the initial chunk, or else NULL if unsuccessful and leaves the
|
| // initial chunk NULL.
|
| - static void* ReserveInitialChunk(const size_t requested);
|
| + void* ReserveInitialChunk(const size_t requested);
|
|
|
| // Commits pages from an as-yet-unmanaged block of virtual memory into a
|
| // paged space. The block should be part of the initial chunk reserved via
|
| @@ -520,24 +530,24 @@
|
| // address is non-null and that it is big enough to hold at least one
|
| // page-aligned page. The call always succeeds, and num_pages is always
|
| // greater than zero.
|
| - static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
|
| - int* num_pages);
|
| + Page* CommitPages(Address start, size_t size, PagedSpace* owner,
|
| + int* num_pages);
|
|
|
| // Commit a contiguous block of memory from the initial chunk. Assumes that
|
| // the address is not NULL, the size is greater than zero, and that the
|
| // block is contained in the initial chunk. Returns true if it succeeded
|
| // and false otherwise.
|
| - static bool CommitBlock(Address start, size_t size, Executability executable);
|
| + bool CommitBlock(Address start, size_t size, Executability executable);
|
|
|
| // Uncommit a contiguous block of memory [start..(start+size)[.
|
| // start is not NULL, the size is greater than zero, and the
|
| // block is contained in the initial chunk. Returns true if it succeeded
|
| // and false otherwise.
|
| - static bool UncommitBlock(Address start, size_t size);
|
| + bool UncommitBlock(Address start, size_t size);
|
|
|
| // Zaps a contiguous block of memory [start..(start+size)[ thus
|
| // filling it up with a recognizable non-NULL bit pattern.
|
| - static void ZapBlock(Address start, size_t size);
|
| + void ZapBlock(Address start, size_t size);
|
|
|
| // Attempts to allocate the requested (non-zero) number of pages from the
|
| // OS. Fewer pages might be allocated than requested. If it fails to
|
| @@ -548,8 +558,8 @@
|
| // number of allocated pages is returned in the output parameter
|
| // allocated_pages. If the PagedSpace owner is executable and there is
|
| // a code range, the pages are allocated from the code range.
|
| - static Page* AllocatePages(int requested_pages, int* allocated_pages,
|
| - PagedSpace* owner);
|
| + Page* AllocatePages(int requested_pages, int* allocated_pages,
|
| + PagedSpace* owner);
|
|
|
| // Frees pages from a given page and after. Requires pages to be
|
| // linked in chunk-order (see comment for class).
|
| @@ -558,10 +568,10 @@
|
| // Otherwise, the function searches a page after 'p' that is
|
| // the first page of a chunk. Pages after the found page
|
| // are freed and the function returns 'p'.
|
| - static Page* FreePages(Page* p);
|
| + Page* FreePages(Page* p);
|
|
|
| // Frees all pages owned by given space.
|
| - static void FreeAllPages(PagedSpace* space);
|
| + void FreeAllPages(PagedSpace* space);
|
|
|
| // Allocates and frees raw memory of certain size.
|
| // These are just thin wrappers around OS::Allocate and OS::Free,
|
| @@ -569,96 +579,83 @@
|
| // If the flag is EXECUTABLE and a code range exists, the requested
|
| // memory is allocated from the code range. If a code range exists
|
| // and the freed memory is in it, the code range manages the freed memory.
|
| - MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
|
| - size_t* allocated,
|
| - Executability executable);
|
| - static void FreeRawMemory(void* buf,
|
| - size_t length,
|
| - Executability executable);
|
| - static void PerformAllocationCallback(ObjectSpace space,
|
| - AllocationAction action,
|
| - size_t size);
|
| + MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
|
| + size_t* allocated,
|
| + Executability executable);
|
| + void FreeRawMemory(void* buf,
|
| + size_t length,
|
| + Executability executable);
|
| + void PerformAllocationCallback(ObjectSpace space,
|
| + AllocationAction action,
|
| + size_t size);
|
|
|
| - static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
|
| - ObjectSpace space,
|
| - AllocationAction action);
|
| - static void RemoveMemoryAllocationCallback(
|
| - MemoryAllocationCallback callback);
|
| - static bool MemoryAllocationCallbackRegistered(
|
| - MemoryAllocationCallback callback);
|
| + void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
|
| + ObjectSpace space,
|
| + AllocationAction action);
|
| + void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
|
| + bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
|
|
|
| // Returns the maximum available bytes of heaps.
|
| - static intptr_t Available() {
|
| - return capacity_ < size_ ? 0 : capacity_ - size_;
|
| - }
|
| + intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
|
|
|
| // Returns allocated spaces in bytes.
|
| - static intptr_t Size() { return size_; }
|
| + intptr_t Size() { return size_; }
|
|
|
| // Returns the maximum available executable bytes of heaps.
|
| - static intptr_t AvailableExecutable() {
|
| + intptr_t AvailableExecutable() {
|
| if (capacity_executable_ < size_executable_) return 0;
|
| return capacity_executable_ - size_executable_;
|
| }
|
|
|
| // Returns allocated executable spaces in bytes.
|
| - static intptr_t SizeExecutable() { return size_executable_; }
|
| + intptr_t SizeExecutable() { return size_executable_; }
|
|
|
| // Returns maximum available bytes that the old space can have.
|
| - static intptr_t MaxAvailable() {
|
| + intptr_t MaxAvailable() {
|
| return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
|
| }
|
|
|
| - // Sanity check on a pointer.
|
| - static bool SafeIsInAPageChunk(Address addr);
|
| -
|
| // Links two pages.
|
| - static inline void SetNextPage(Page* prev, Page* next);
|
| + inline void SetNextPage(Page* prev, Page* next);
|
|
|
| // Returns the next page of a given page.
|
| - static inline Page* GetNextPage(Page* p);
|
| + inline Page* GetNextPage(Page* p);
|
|
|
| // Checks whether a page belongs to a space.
|
| - static inline bool IsPageInSpace(Page* p, PagedSpace* space);
|
| + inline bool IsPageInSpace(Page* p, PagedSpace* space);
|
|
|
| // Returns the space that owns the given page.
|
| - static inline PagedSpace* PageOwner(Page* page);
|
| + inline PagedSpace* PageOwner(Page* page);
|
|
|
| // Finds the first/last page in the same chunk as a given page.
|
| - static Page* FindFirstPageInSameChunk(Page* p);
|
| - static Page* FindLastPageInSameChunk(Page* p);
|
| + Page* FindFirstPageInSameChunk(Page* p);
|
| + Page* FindLastPageInSameChunk(Page* p);
|
|
|
| // Relinks list of pages owned by space to make it chunk-ordered.
|
| // Returns new first and last pages of space.
|
| // Also returns last page in relinked list which has WasInUsedBeforeMC
|
| // flag set.
|
| - static void RelinkPageListInChunkOrder(PagedSpace* space,
|
| - Page** first_page,
|
| - Page** last_page,
|
| - Page** last_page_in_use);
|
| + void RelinkPageListInChunkOrder(PagedSpace* space,
|
| + Page** first_page,
|
| + Page** last_page,
|
| + Page** last_page_in_use);
|
|
|
| #ifdef ENABLE_HEAP_PROTECTION
|
| // Protect/unprotect a block of memory by marking it read-only/writable.
|
| - static inline void Protect(Address start, size_t size);
|
| - static inline void Unprotect(Address start, size_t size,
|
| - Executability executable);
|
| + inline void Protect(Address start, size_t size);
|
| + inline void Unprotect(Address start, size_t size,
|
| + Executability executable);
|
|
|
| // Protect/unprotect a chunk given a page in the chunk.
|
| - static inline void ProtectChunkFromPage(Page* page);
|
| - static inline void UnprotectChunkFromPage(Page* page);
|
| + inline void ProtectChunkFromPage(Page* page);
|
| + inline void UnprotectChunkFromPage(Page* page);
|
| #endif
|
|
|
| #ifdef DEBUG
|
| // Reports statistic info of the space.
|
| - static void ReportStatistics();
|
| + void ReportStatistics();
|
| #endif
|
|
|
| - static void AddToAllocatedChunks(Address addr, intptr_t size);
|
| - static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
|
| - // Note: This only checks the regular chunks, not the odd-sized initial
|
| - // chunk.
|
| - static bool InAllocatedChunks(Address addr);
|
| -
|
| // Due to encoding limitation, we can only have 8K chunks.
|
| static const int kMaxNofChunks = 1 << kPageSizeBits;
|
| // If a chunk has at least 16 pages, the maximum heap size is about
|
| @@ -678,29 +675,21 @@
|
| #endif
|
|
|
| private:
|
| + MemoryAllocator();
|
| +
|
| static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
|
| static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
|
| - static const int kChunkTableTopLevelEntries =
|
| - 1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
|
| - (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
|
|
|
| - // The chunks are not chunk-size aligned so for a given chunk-sized area of
|
| - // memory there can be two chunks that cover it.
|
| - static const int kChunkTableFineGrainedWordsPerEntry = 2;
|
| - static const uintptr_t kUnusedChunkTableEntry = 0;
|
| -
|
| // Maximum space size in bytes.
|
| - static intptr_t capacity_;
|
| + intptr_t capacity_;
|
| // Maximum subset of capacity_ that can be executable
|
| - static intptr_t capacity_executable_;
|
| + intptr_t capacity_executable_;
|
|
|
| - // Top level table to track whether memory is part of a chunk or not.
|
| - static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
|
| + // Allocated space size in bytes.
|
| + intptr_t size_;
|
|
|
| - // Allocated space size in bytes.
|
| - static intptr_t size_;
|
| // Allocated executable space size in bytes.
|
| - static intptr_t size_executable_;
|
| + intptr_t size_executable_;
|
|
|
| struct MemoryAllocationCallbackRegistration {
|
| MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
|
| @@ -713,11 +702,11 @@
|
| AllocationAction action;
|
| };
|
| // A List of callback that are triggered when memory is allocated or free'd
|
| - static List<MemoryAllocationCallbackRegistration>
|
| + List<MemoryAllocationCallbackRegistration>
|
| memory_allocation_callbacks_;
|
|
|
| // The initial chunk of virtual memory.
|
| - static VirtualMemory* initial_chunk_;
|
| + VirtualMemory* initial_chunk_;
|
|
|
| // Allocated chunk info: chunk start address, chunk size, and owning space.
|
| class ChunkInfo BASE_EMBEDDED {
|
| @@ -725,7 +714,8 @@
|
| ChunkInfo() : address_(NULL),
|
| size_(0),
|
| owner_(NULL),
|
| - executable_(NOT_EXECUTABLE) {}
|
| + executable_(NOT_EXECUTABLE),
|
| + owner_identity_(FIRST_SPACE) {}
|
| inline void init(Address a, size_t s, PagedSpace* o);
|
| Address address() { return address_; }
|
| size_t size() { return size_; }
|
| @@ -733,74 +723,60 @@
|
| // We save executability of the owner to allow using it
|
| // when collecting stats after the owner has been destroyed.
|
| Executability executable() const { return executable_; }
|
| + AllocationSpace owner_identity() const { return owner_identity_; }
|
|
|
| private:
|
| Address address_;
|
| size_t size_;
|
| PagedSpace* owner_;
|
| Executability executable_;
|
| + AllocationSpace owner_identity_;
|
| };
|
|
|
| // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
|
| - static List<ChunkInfo> chunks_;
|
| - static List<int> free_chunk_ids_;
|
| - static int max_nof_chunks_;
|
| - static int top_;
|
| + List<ChunkInfo> chunks_;
|
| + List<int> free_chunk_ids_;
|
| + int max_nof_chunks_;
|
| + int top_;
|
|
|
| // Push/pop a free chunk id onto/from the stack.
|
| - static void Push(int free_chunk_id);
|
| - static int Pop();
|
| - static bool OutOfChunkIds() { return top_ == 0; }
|
| + void Push(int free_chunk_id);
|
| + int Pop();
|
| + bool OutOfChunkIds() { return top_ == 0; }
|
|
|
| // Frees a chunk.
|
| - static void DeleteChunk(int chunk_id);
|
| + void DeleteChunk(int chunk_id);
|
|
|
| - // Helpers to maintain and query the chunk tables.
|
| - static void AddChunkUsingAddress(
|
| - uintptr_t chunk_start, // Where the chunk starts.
|
| - uintptr_t chunk_index_base); // Used to place the chunk in the tables.
|
| - static void RemoveChunkFoundUsingAddress(
|
| - uintptr_t chunk_start, // Where the chunk starts.
|
| - uintptr_t chunk_index_base); // Used to locate the entry in the tables.
|
| - // Controls whether the lookup creates intermediate levels of tables as
|
| - // needed.
|
| - enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
|
| - static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
|
| - uintptr_t address,
|
| - int bit_position,
|
| - CreateTables create_as_needed);
|
| - static void FreeChunkTables(uintptr_t* array, int length, int level);
|
| - static int FineGrainedIndexForAddress(uintptr_t address) {
|
| - int index = ((address >> kChunkSizeLog2) &
|
| - ((1 << kChunkTableBitsPerLevel) - 1));
|
| - return index * kChunkTableFineGrainedWordsPerEntry;
|
| - }
|
| -
|
| -
|
| // Basic check whether a chunk id is in the valid range.
|
| - static inline bool IsValidChunkId(int chunk_id);
|
| + inline bool IsValidChunkId(int chunk_id);
|
|
|
| // Checks whether a chunk id identifies an allocated chunk.
|
| - static inline bool IsValidChunk(int chunk_id);
|
| + inline bool IsValidChunk(int chunk_id);
|
|
|
| // Returns the chunk id that a page belongs to.
|
| - static inline int GetChunkId(Page* p);
|
| + inline int GetChunkId(Page* p);
|
|
|
| // True if the address lies in the initial chunk.
|
| - static inline bool InInitialChunk(Address address);
|
| + inline bool InInitialChunk(Address address);
|
|
|
| // Initializes pages in a chunk. Returns the first page address.
|
| // This function and GetChunkId() are provided for the mark-compact
|
| // collector to rebuild page headers in the from space, which is
|
| // used as a marking stack and its page headers are destroyed.
|
| - static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
| - PagedSpace* owner);
|
| + Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
| + PagedSpace* owner);
|
|
|
| - static Page* RelinkPagesInChunk(int chunk_id,
|
| - Address chunk_start,
|
| - size_t chunk_size,
|
| - Page* prev,
|
| - Page** last_page_in_use);
|
| + Page* RelinkPagesInChunk(int chunk_id,
|
| + Address chunk_start,
|
| + size_t chunk_size,
|
| + Page* prev,
|
| + Page** last_page_in_use);
|
| +
|
| + friend class Isolate;
|
| +
|
| + Isolate* isolate_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
|
| };
|
|
|
|
|
| @@ -1048,7 +1024,8 @@
|
| class PagedSpace : public Space {
|
| public:
|
| // Creates a space with a maximum capacity, and an id.
|
| - PagedSpace(intptr_t max_capacity,
|
| + PagedSpace(Heap* heap,
|
| + intptr_t max_capacity,
|
| AllocationSpace id,
|
| Executability executable);
|
|
|
| @@ -1341,7 +1318,7 @@
|
| class SemiSpace : public Space {
|
| public:
|
| // Constructor.
|
| - SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
|
| + explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
|
| start_ = NULL;
|
| age_mark_ = NULL;
|
| }
|
| @@ -1508,7 +1485,10 @@
|
| class NewSpace : public Space {
|
| public:
|
| // Constructor.
|
| - NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
|
| + explicit NewSpace(Heap* heap)
|
| + : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
|
| + to_space_(heap),
|
| + from_space_(heap) {}
|
|
|
| // Sets up the new space using the given chunk.
|
| bool Setup(Address start, int size);
|
| @@ -1909,10 +1889,11 @@
|
| public:
|
| // Creates an old space object with a given maximum capacity.
|
| // The constructor does not allocate pages from OS.
|
| - explicit OldSpace(intptr_t max_capacity,
|
| - AllocationSpace id,
|
| - Executability executable)
|
| - : PagedSpace(max_capacity, id, executable), free_list_(id) {
|
| + OldSpace(Heap* heap,
|
| + intptr_t max_capacity,
|
| + AllocationSpace id,
|
| + Executability executable)
|
| + : PagedSpace(heap, max_capacity, id, executable), free_list_(id) {
|
| page_extra_ = 0;
|
| }
|
|
|
| @@ -1981,11 +1962,12 @@
|
|
|
| class FixedSpace : public PagedSpace {
|
| public:
|
| - FixedSpace(intptr_t max_capacity,
|
| + FixedSpace(Heap* heap,
|
| + intptr_t max_capacity,
|
| AllocationSpace id,
|
| int object_size_in_bytes,
|
| const char* name)
|
| - : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
|
| + : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
|
| object_size_in_bytes_(object_size_in_bytes),
|
| name_(name),
|
| free_list_(id, object_size_in_bytes) {
|
| @@ -2059,8 +2041,11 @@
|
| class MapSpace : public FixedSpace {
|
| public:
|
| // Creates a map space object with a maximum capacity.
|
| - MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
|
| - : FixedSpace(max_capacity, id, Map::kSize, "map"),
|
| + MapSpace(Heap* heap,
|
| + intptr_t max_capacity,
|
| + int max_map_space_pages,
|
| + AllocationSpace id)
|
| + : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
|
| max_map_space_pages_(max_map_space_pages) {
|
| ASSERT(max_map_space_pages < kMaxMapPageIndex);
|
| }
|
| @@ -2170,8 +2155,9 @@
|
| class CellSpace : public FixedSpace {
|
| public:
|
| // Creates a property cell space object with a maximum capacity.
|
| - CellSpace(intptr_t max_capacity, AllocationSpace id)
|
| - : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
|
| + CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
|
| + : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
|
| + {}
|
|
|
| protected:
|
| #ifdef DEBUG
|
| @@ -2246,7 +2232,7 @@
|
|
|
| class LargeObjectSpace : public Space {
|
| public:
|
| - explicit LargeObjectSpace(AllocationSpace id);
|
| + LargeObjectSpace(Heap* heap, AllocationSpace id);
|
| virtual ~LargeObjectSpace() {}
|
|
|
| // Initializes internal data structures.
|
| @@ -2263,9 +2249,7 @@
|
| MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
|
|
|
| // Available bytes for objects in this space.
|
| - intptr_t Available() {
|
| - return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
|
| - }
|
| + inline intptr_t Available();
|
|
|
| virtual intptr_t Size() {
|
| return size_;
|
| @@ -2357,6 +2341,22 @@
|
| };
|
|
|
|
|
| +#ifdef DEBUG
|
| +struct CommentStatistic {
|
| + const char* comment;
|
| + int size;
|
| + int count;
|
| + void Clear() {
|
| + comment = NULL;
|
| + size = 0;
|
| + count = 0;
|
| + }
|
| + // Must be small, since an iteration is used for lookup.
|
| + static const int kMaxComments = 64;
|
| +};
|
| +#endif
|
| +
|
| +
|
| } } // namespace v8::internal
|
|
|
| #endif // V8_SPACES_H_
|
|
|