Index: src/heap/spaces.h |
diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
index a2e92ff533fa62c9cd48ff22662d05479681ad5e..7a2be4c1cbf92c26ef1edd994a9cf58bcd41faa2 100644 |
--- a/src/heap/spaces.h |
+++ b/src/heap/spaces.h |
@@ -1230,12 +1230,31 @@ class MemoryAllocator { |
kRegular, |
kPooled, |
}; |
+ |
enum FreeMode { |
kFull, |
kPreFreeAndQueue, |
kPooledAndQueue, |
}; |
+ static int CodePageGuardStartOffset(); |
Michael Lippautz
2016/10/04 19:44:50
Just moved up. size_t'ing these constants is an ex
|
+ |
+ static int CodePageGuardSize(); |
+ |
+ static int CodePageAreaStartOffset(); |
+ |
+ static int CodePageAreaEndOffset(); |
+ |
+ static int CodePageAreaSize() { |
+ return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
+ } |
+ |
+ static int PageAreaSize(AllocationSpace space) { |
+ DCHECK_NE(LO_SPACE, space); |
+ return (space == CODE_SPACE) ? CodePageAreaSize() |
+ : Page::kAllocatableMemory; |
+ } |
+ |
explicit MemoryAllocator(Isolate* isolate); |
// Initializes its internal bookkeeping structures. |
@@ -1261,26 +1280,26 @@ class MemoryAllocator { |
bool CanFreeMemoryChunk(MemoryChunk* chunk); |
// Returns allocated spaces in bytes. |
- intptr_t Size() { return size_.Value(); } |
+ size_t Size() { return size_.Value(); } |
// Returns allocated executable spaces in bytes. |
- intptr_t SizeExecutable() { return size_executable_.Value(); } |
+ size_t SizeExecutable() { return size_executable_.Value(); } |
// Returns the maximum available bytes of heaps. |
- intptr_t Available() { |
- intptr_t size = Size(); |
+ size_t Available() { |
+ const size_t size = Size(); |
return capacity_ < size ? 0 : capacity_ - size; |
} |
// Returns the maximum available executable bytes of heaps. |
- intptr_t AvailableExecutable() { |
- intptr_t executable_size = SizeExecutable(); |
+ size_t AvailableExecutable() { |
+ const size_t executable_size = SizeExecutable(); |
if (capacity_executable_ < executable_size) return 0; |
return capacity_executable_ - executable_size; |
} |
// Returns maximum available bytes that the old space can have. |
- intptr_t MaxAvailable() { |
+ size_t MaxAvailable() { |
return (Available() / Page::kPageSize) * Page::kAllocatableMemory; |
} |
@@ -1291,11 +1310,6 @@ class MemoryAllocator { |
address >= highest_ever_allocated_.Value(); |
} |
-#ifdef DEBUG |
- // Reports statistic info of the space. |
- void ReportStatistics(); |
-#endif |
- |
// Returns a MemoryChunk in which the memory region from commit_area_size to |
// reserve_area_size of the chunk area is reserved but not committed, it |
// could be committed later by calling MemoryChunk::CommitArea. |
@@ -1333,24 +1347,6 @@ class MemoryAllocator { |
// filling it up with a recognizable non-NULL bit pattern. |
void ZapBlock(Address start, size_t size); |
- static int CodePageGuardStartOffset(); |
- |
- static int CodePageGuardSize(); |
- |
- static int CodePageAreaStartOffset(); |
- |
- static int CodePageAreaEndOffset(); |
- |
- static int CodePageAreaSize() { |
- return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
- } |
- |
- static int PageAreaSize(AllocationSpace space) { |
- DCHECK_NE(LO_SPACE, space); |
- return (space == CODE_SPACE) ? CodePageAreaSize() |
- : Page::kAllocatableMemory; |
- } |
- |
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
Address start, size_t commit_size, |
size_t reserved_size); |
@@ -1358,6 +1354,11 @@ class MemoryAllocator { |
CodeRange* code_range() { return code_range_; } |
Unmapper* unmapper() { return &unmapper_; } |
+#ifdef DEBUG |
+ // Reports statistic info of the space. |
+ void ReportStatistics(); |
+#endif |
+ |
private: |
// PreFree logically frees the object, i.e., it takes care of the size |
// bookkeeping and calls the allocation callback. |
@@ -1371,28 +1372,6 @@ class MemoryAllocator { |
template <typename SpaceType> |
MemoryChunk* AllocatePagePooled(SpaceType* owner); |
- Isolate* isolate_; |
- |
- CodeRange* code_range_; |
- |
- // Maximum space size in bytes. |
- intptr_t capacity_; |
- // Maximum subset of capacity_ that can be executable |
- intptr_t capacity_executable_; |
- |
- // Allocated space size in bytes. |
- base::AtomicNumber<intptr_t> size_; |
- // Allocated executable space size in bytes. |
- base::AtomicNumber<intptr_t> size_executable_; |
- |
- // We keep the lowest and highest addresses allocated as a quick way |
- // of determining that pointers are outside the heap. The estimate is |
- // conservative, i.e. not all addrsses in 'allocated' space are allocated |
- // to our heap. The range is [lowest, highest[, inclusive on the low end |
- // and exclusive on the high end. |
- base::AtomicValue<void*> lowest_ever_allocated_; |
- base::AtomicValue<void*> highest_ever_allocated_; |
- |
// Initializes pages in a chunk. Returns the first page address. |
// This function and GetChunkId() are provided for the mark-compact |
// collector to rebuild page headers in the from space, which is |
@@ -1413,6 +1392,27 @@ class MemoryAllocator { |
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
} |
+ Isolate* isolate_; |
+ CodeRange* code_range_; |
+ |
+ // Maximum space size in bytes. |
+ size_t capacity_; |
+ // Maximum subset of capacity_ that can be executable |
+ size_t capacity_executable_; |
+ |
+ // Allocated space size in bytes. |
+ base::AtomicNumber<size_t> size_; |
+ // Allocated executable space size in bytes. |
+ base::AtomicNumber<size_t> size_executable_; |
+ |
+ // We keep the lowest and highest addresses allocated as a quick way |
+ // of determining that pointers are outside the heap. The estimate is |
+ // conservative, i.e. not all addresses in 'allocated' space are allocated |
+ // to our heap. The range is [lowest, highest[, inclusive on the low end |
+ // and exclusive on the high end. |
+ base::AtomicValue<void*> lowest_ever_allocated_; |
+ base::AtomicValue<void*> highest_ever_allocated_; |
+ |
base::VirtualMemory last_chunk_; |
Unmapper unmapper_; |