Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 604d32152f1356678f856ba2e4f6df2b92201436..374189532277cd494d71b5166799c314362f390c 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -20,6 +20,12 @@ |
namespace v8 { |
namespace internal { |
+intptr_t GetCommitPageSize() { |
Michael Lippautz
2016/11/14 09:24:44
For consistency, this should go as a static functi
|
+ if (FLAG_target_os_page_size != 0) |
+ return FLAG_target_os_page_size; |
+ else |
+ return base::OS::CommitPageSize(); |
+} |
// ---------------------------------------------------------------------------- |
// HeapObjectIterator |
@@ -106,8 +112,7 @@ bool CodeRange::SetUp(size_t requested) { |
requested = kMinimumCodeRangeSize; |
} |
- const size_t reserved_area = |
- kReservedCodeRangePages * base::OS::CommitPageSize(); |
+ const size_t reserved_area = kReservedCodeRangePages * GetCommitPageSize(); |
if (requested < (kMaximalCodeRangeSize - reserved_area)) |
requested += reserved_area; |
@@ -545,10 +550,9 @@ bool MemoryChunk::CommitArea(size_t requested) { |
size_t guard_size = |
IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; |
size_t header_size = area_start() - address() - guard_size; |
- size_t commit_size = |
- RoundUp(header_size + requested, base::OS::CommitPageSize()); |
- size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
- base::OS::CommitPageSize()); |
+ size_t commit_size = RoundUp(header_size + requested, GetCommitPageSize()); |
+ size_t committed_size = |
+ RoundUp(header_size + (area_end() - area_start()), GetCommitPageSize()); |
if (commit_size > committed_size) { |
// Commit size should be less or equal than the reserved size. |
@@ -616,8 +620,8 @@ void MemoryChunk::Unlink() { |
} |
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); |
- DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); |
+ DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize())); |
+ DCHECK_EQ(0, bytes_to_shrink % GetCommitPageSize()); |
Address free_start = chunk->area_end_ - bytes_to_shrink; |
// Don't adjust the size of the page. The area is just uncomitted but not |
// released. |
@@ -627,7 +631,7 @@ void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
if (chunk->reservation_.IsReserved()) |
chunk->reservation_.Guard(chunk->area_end_); |
else |
- base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); |
+ base::OS::Guard(chunk->area_end_, GetCommitPageSize()); |
} |
} |
@@ -676,7 +680,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, |
if (executable == EXECUTABLE) { |
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
- base::OS::CommitPageSize()) + |
+ GetCommitPageSize()) + |
CodePageGuardSize(); |
// Check executable memory limit. |
@@ -688,7 +692,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, |
// Size of header (not executable) plus area (executable). |
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
- base::OS::CommitPageSize()); |
+ GetCommitPageSize()); |
// Allocate executable memory either from code range or from the |
// OS. |
#ifdef V8_TARGET_ARCH_MIPS64 |
@@ -724,10 +728,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, |
area_end = area_start + commit_area_size; |
} else { |
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, |
- base::OS::CommitPageSize()); |
+ GetCommitPageSize()); |
size_t commit_size = |
RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, |
- base::OS::CommitPageSize()); |
+ GetCommitPageSize()); |
base = |
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
executable, &reservation); |
@@ -804,7 +808,7 @@ size_t Page::ShrinkToHighWaterMark() { |
size_t unused = RoundDown( |
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), |
- base::OS::CommitPageSize()); |
+ GetCommitPageSize()); |
if (unused > 0) { |
if (FLAG_trace_gc_verbose) { |
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", |
@@ -1002,11 +1006,11 @@ void MemoryAllocator::ReportStatistics() { |
size_t MemoryAllocator::CodePageGuardStartOffset() { |
// We are guarding code pages: the first OS page after the header |
// will be protected as non-writable. |
- return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
+ return RoundUp(Page::kObjectStartOffset, GetCommitPageSize()); |
} |
size_t MemoryAllocator::CodePageGuardSize() { |
- return static_cast<int>(base::OS::CommitPageSize()); |
+ return static_cast<int>(GetCommitPageSize()); |
} |
size_t MemoryAllocator::CodePageAreaStartOffset() { |
@@ -1018,7 +1022,7 @@ size_t MemoryAllocator::CodePageAreaStartOffset() { |
size_t MemoryAllocator::CodePageAreaEndOffset() { |
// We are guarding code pages: the last OS page will be protected as |
// non-writable. |
- return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); |
+ return Page::kPageSize - static_cast<int>(GetCommitPageSize()); |
} |
@@ -2892,7 +2896,7 @@ Address LargePage::GetAddressToShrink() { |
return 0; |
} |
size_t used_size = RoundUp((object->address() - address()) + object->Size(), |
- base::OS::CommitPageSize()); |
+ GetCommitPageSize()); |
if (used_size < CommittedPhysicalMemory()) { |
return address() + used_size; |
} |