Index: src/spaces.cc |
=================================================================== |
--- src/spaces.cc (revision 13384) |
+++ src/spaces.cc (working copy) |
@@ -207,9 +207,10 @@ |
} |
- |
Address CodeRange::AllocateRawMemory(const size_t requested, |
- size_t* allocated) { |
+ size_t* allocated, |
+ size_t initial_commit_size) { |
+ ASSERT(initial_commit_size <= requested); |
ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
if (requested > allocation_list_[current_allocation_block_index_].size) { |
// Find an allocation block large enough. This function call may |
@@ -229,7 +230,7 @@ |
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
if (!MemoryAllocator::CommitCodePage(code_range_, |
current.start, |
- *allocated)) { |
+ initial_commit_size)) { |
*allocated = 0; |
return NULL; |
} |
@@ -242,6 +243,19 @@ |
} |
+bool CodeRange::RecommitRawMemory(Address start, size_t size) { |
danno
2013/01/16 14:48:54
If you make the changes in MemoryChunk to support
haitao.feng
2013/01/17 05:34:01
Done.
|
+ ASSERT(reinterpret_cast<int64_t>(start) % MemoryChunk::kAlignment == |
+ MemoryAllocator::CodePageAreaStartOffset()); |
+ |
+ // Recommit page body (executable). |
+ if (!code_range_->Commit(start, size, true)) return false; |
+ // Append a guard page. |
+ if (!code_range_->Guard(start + size)) return false; |
danno
2013/01/16 14:48:54
Guard pages are managed by the MemoryChunk (not he
haitao.feng
2013/01/17 05:34:01
Done.
|
+ |
+ return true; |
+} |
+ |
+ |
void CodeRange::FreeRawMemory(Address address, size_t length) { |
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
free_list_.Add(FreeBlock(address, length)); |
@@ -346,8 +360,7 @@ |
if (!reservation.IsReserved()) return NULL; |
size_ += reservation.size(); |
- Address base = RoundUp(static_cast<Address>(reservation.address()), |
- alignment); |
+ Address base = static_cast<Address>(reservation.address()); |
controller->TakeControl(&reservation); |
return base; |
} |
@@ -355,18 +368,20 @@ |
Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
size_t alignment, |
+ size_t initial_commit_size, |
Executability executable, |
VirtualMemory* controller) { |
+ ASSERT(initial_commit_size <= size); |
VirtualMemory reservation; |
Address base = ReserveAlignedMemory(size, alignment, &reservation); |
if (base == NULL) return NULL; |
if (executable == EXECUTABLE) { |
- if (!CommitCodePage(&reservation, base, size)) { |
+ if (!CommitCodePage(&reservation, base, initial_commit_size)) { |
danno
2013/01/16 14:48:54
CommitCodePage should take both size and initial_c
haitao.feng
2013/01/17 05:34:01
Done.
|
base = NULL; |
} |
} else { |
- if (!reservation.Commit(base, size, false)) { |
+ if (!reservation.Commit(base, initial_commit_size, false)) { |
base = NULL; |
} |
} |
@@ -470,6 +485,35 @@ |
} |
+bool MemoryChunk::RecommitBody(size_t body_size, Executability executable) { |
danno
2013/01/16 14:48:54
I think I'd prefer that the MemoryChunk remembered
haitao.feng
2013/01/17 05:34:01
I am using area_end_ - area_start_ to remember "co
|
+ ASSERT(area_start_ + body_size <= area_end_); |
+ if (reservation_.IsReserved()) { |
+ if (executable == EXECUTABLE) { |
+ // Recommit page body (executable). |
+ if (!reservation_.Commit(area_start_, body_size, true)) return false; |
+ // Append a guard page. |
+ if (!reservation_.Guard(area_start_ + body_size)) return false; |
danno
2013/01/16 14:48:54
As mentioned above, the guard page should always b
haitao.feng
2013/01/17 05:34:01
Done.
|
+ } else { |
+ if (!reservation_.Commit(area_start_, body_size, false)) return false; |
+ } |
+ } else { |
+ ASSERT(heap_->isolate()->code_range()->exists() && |
+ (executable == EXECUTABLE)); |
+ // Recommit page body (executable). |
+ if (!heap_->isolate()->code_range()->RecommitRawMemory(area_start_, |
danno
2013/01/16 14:48:54
If you change this method to be called "CommitBody
haitao.feng
2013/01/17 05:34:01
Done.
|
+ body_size)) { |
+ return false; |
+ } |
+ } |
+ |
+ if (Heap::ShouldZapGarbage()) { |
danno
2013/01/16 14:48:54
Just zap the delta of uncommitted pages.
haitao.feng
2013/01/17 05:34:01
Done.
|
+ heap_->isolate()->memory_allocator()->ZapBlock(area_start_, body_size); |
+ } |
+ |
+ return true; |
+} |
+ |
+ |
void MemoryChunk::InsertAfter(MemoryChunk* other) { |
next_chunk_ = other->next_chunk_; |
prev_chunk_ = other; |
@@ -491,9 +535,12 @@ |
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
+ intptr_t commit_size, |
Executability executable, |
Space* owner) { |
- size_t chunk_size; |
+ ASSERT(commit_size <= body_size); |
+ |
+ size_t chunk_size, initial_commit_size; |
danno
2013/01/16 14:48:54
nit: declare initial_commit_size in the scope that
|
Heap* heap = isolate_->heap(); |
Address base = NULL; |
VirtualMemory reservation; |
@@ -512,10 +559,15 @@ |
return NULL; |
} |
+ initial_commit_size = RoundUp(CodePageAreaStartOffset() + commit_size, |
danno
2013/01/16 14:48:54
I don't think you need to round this up if you mak
danno
2013/01/16 14:48:54
As noted above:
size_t initial_commit_size = ...
haitao.feng
2013/01/17 05:34:01
Done.
haitao.feng
2013/01/17 05:34:01
Done.
|
+ OS::CommitPageSize()) + CodePageGuardSize(); |
+ |
// Allocate executable memory either from code range or from the |
// OS. |
if (isolate_->code_range()->exists()) { |
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
+ &chunk_size, |
+ initial_commit_size); |
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
MemoryChunk::kAlignment)); |
if (base == NULL) return NULL; |
@@ -525,6 +577,7 @@ |
} else { |
base = AllocateAlignedMemory(chunk_size, |
MemoryChunk::kAlignment, |
+ initial_commit_size, |
executable, |
&reservation); |
if (base == NULL) return NULL; |
@@ -534,22 +587,24 @@ |
if (Heap::ShouldZapGarbage()) { |
ZapBlock(base, CodePageGuardStartOffset()); |
- ZapBlock(base + CodePageAreaStartOffset(), body_size); |
+ ZapBlock(base + CodePageAreaStartOffset(), commit_size); |
} |
area_start = base + CodePageAreaStartOffset(); |
area_end = area_start + body_size; |
} else { |
chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
+ initial_commit_size = MemoryChunk::kObjectStartOffset + commit_size; |
danno
2013/01/16 14:48:54
As noted above:
size_t initial_commit_size = ...
haitao.feng
2013/01/17 05:34:01
Done.
|
base = AllocateAlignedMemory(chunk_size, |
MemoryChunk::kAlignment, |
+ initial_commit_size, |
executable, |
&reservation); |
if (base == NULL) return NULL; |
if (Heap::ShouldZapGarbage()) { |
- ZapBlock(base, chunk_size); |
+ ZapBlock(base, commit_size); |
} |
area_start = base + Page::kObjectStartOffset; |
@@ -580,7 +635,7 @@ |
Page* MemoryAllocator::AllocatePage(intptr_t size, |
PagedSpace* owner, |
Executability executable) { |
- MemoryChunk* chunk = AllocateChunk(size, executable, owner); |
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
if (chunk == NULL) return NULL; |
@@ -591,7 +646,10 @@ |
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
Space* owner, |
Executability executable) { |
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
+ MemoryChunk* chunk = AllocateChunk(object_size, |
+ object_size, |
+ executable, |
+ owner); |
if (chunk == NULL) return NULL; |
return LargePage::Initialize(isolate_->heap(), chunk); |
} |