Chromium Code Reviews| Index: src/spaces.cc |
| =================================================================== |
| --- src/spaces.cc (revision 13384) |
| +++ src/spaces.cc (working copy) |
| @@ -207,17 +207,18 @@ |
| } |
| - |
| -Address CodeRange::AllocateRawMemory(const size_t requested, |
| +Address CodeRange::AllocateRawMemory(const size_t requested_size, |
| + const size_t commit_size, |
| size_t* allocated) { |
| + ASSERT(commit_size <= requested_size); |
| ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| - if (requested > allocation_list_[current_allocation_block_index_].size) { |
| + if (requested_size > allocation_list_[current_allocation_block_index_].size) { |
| // Find an allocation block large enough. This function call may |
| // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
| - GetNextAllocationBlock(requested); |
| + GetNextAllocationBlock(requested_size); |
| } |
| // Commit the requested memory at the start of the current allocation block. |
| - size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); |
| + size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| if (aligned_requested >= (current.size - Page::kPageSize)) { |
| // Don't leave a small free block, useless for a large object or chunk. |
| @@ -227,9 +228,10 @@ |
| } |
| ASSERT(*allocated <= current.size); |
| ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| - if (!MemoryAllocator::CommitCodePage(code_range_, |
| - current.start, |
| - *allocated)) { |
| + if (!MemoryAllocator::CommitExecutableMemory(code_range_, |
| + current.start, |
| + commit_size, |
| + *allocated)) { |
| *allocated = 0; |
| return NULL; |
| } |
| @@ -242,6 +244,12 @@ |
| } |
| +bool CodeRange::CommitRawMemory(Address start, size_t length) { |
| + // Commit page body (executable). |
| + return code_range_->Commit(start, length, true); |
| +} |
| + |
| + |
| void CodeRange::FreeRawMemory(Address address, size_t length) { |
| ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| free_list_.Add(FreeBlock(address, length)); |
| @@ -346,27 +354,31 @@ |
| if (!reservation.IsReserved()) return NULL; |
| size_ += reservation.size(); |
| - Address base = RoundUp(static_cast<Address>(reservation.address()), |
| - alignment); |
| + Address base = static_cast<Address>(reservation.address()); |
| controller->TakeControl(&reservation); |
| return base; |
| } |
| -Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| +Address MemoryAllocator::AllocateAlignedMemory(size_t requested_size, |
|
danno
2013/01/17 16:00:55
s/requested_size/reserve_size/
haitao.feng
2013/01/18 12:59:26
Done.
|
| + size_t commit_size, |
| size_t alignment, |
| Executability executable, |
| VirtualMemory* controller) { |
| + ASSERT(commit_size <= requested_size); |
| VirtualMemory reservation; |
| - Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| + Address base = ReserveAlignedMemory(requested_size, alignment, &reservation); |
| if (base == NULL) return NULL; |
| if (executable == EXECUTABLE) { |
| - if (!CommitCodePage(&reservation, base, size)) { |
| + if (!CommitExecutableMemory(&reservation, |
| + base, |
| + commit_size, |
| + requested_size)) { |
| base = NULL; |
| } |
| } else { |
| - if (!reservation.Commit(base, size, false)) { |
| + if (!reservation.Commit(base, commit_size, false)) { |
| base = NULL; |
| } |
| } |
| @@ -470,6 +482,34 @@ |
| } |
| +bool MemoryChunk::CommitBody(size_t body_size, Executability executable) { |
|
danno
2013/01/17 16:00:55
I don't think that CommitBody should take the exec
|
| + ASSERT(body_size <= size_ - (area_start_ - address()) - |
| + (executable == EXECUTABLE ? MemoryAllocator::CodePageGuardSize() : 0)); |
| + |
| + // Already committed, no uncommitment. |
| + if (body_size <= (area_end_ - area_start_)) return true; |
|
danno
2013/01/17 16:00:55
I think you should actually call UnCommit if the b
haitao.feng
2013/01/18 12:59:26
Done.
|
| + |
| + size_t length = body_size - (area_end_ - area_start_); |
| + if (reservation_.IsReserved()) { |
| + if (!reservation_.Commit(area_end_, length, executable == EXECUTABLE)) { |
| + return false; |
| + } |
| + } else { |
| + CodeRange* code_range = heap_->isolate()->code_range(); |
| + ASSERT(code_range->exists() && (executable == EXECUTABLE)); |
| + if (!code_range->CommitRawMemory(area_end_, length)) return false; |
| + } |
| + |
| + if (Heap::ShouldZapGarbage()) { |
| + heap_->isolate()->memory_allocator()->ZapBlock(area_end_, length); |
| + } |
| + |
| + area_end_ = area_start_ + body_size; |
| + |
| + return true; |
| +} |
| + |
| + |
| void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| next_chunk_ = other->next_chunk_; |
| prev_chunk_ = other; |
| @@ -490,9 +530,12 @@ |
| } |
| -MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_body_size, |
| + intptr_t commit_body_size, |
| Executability executable, |
| Space* owner) { |
| + ASSERT(commit_body_size <= reserve_body_size); |
| + |
| size_t chunk_size; |
| Heap* heap = isolate_->heap(); |
| Address base = NULL; |
| @@ -501,7 +544,7 @@ |
| Address area_end = NULL; |
| if (executable == EXECUTABLE) { |
| - chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
| + chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_body_size, |
| OS::CommitPageSize()) + CodePageGuardSize(); |
| // Check executable memory limit. |
| @@ -515,7 +558,9 @@ |
| // Allocate executable memory either from code range or from the |
| // OS. |
| if (isolate_->code_range()->exists()) { |
| - base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| + base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
| + commit_body_size, |
| + &chunk_size); |
| ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| MemoryChunk::kAlignment)); |
| if (base == NULL) return NULL; |
| @@ -524,6 +569,7 @@ |
| size_executable_ += chunk_size; |
| } else { |
| base = AllocateAlignedMemory(chunk_size, |
| + commit_body_size, |
| MemoryChunk::kAlignment, |
| executable, |
| &reservation); |
| @@ -534,14 +580,16 @@ |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock(base, CodePageGuardStartOffset()); |
| - ZapBlock(base + CodePageAreaStartOffset(), body_size); |
| + ZapBlock(base + CodePageAreaStartOffset(), commit_body_size); |
| } |
| area_start = base + CodePageAreaStartOffset(); |
| - area_end = area_start + body_size; |
| + area_end = area_start + commit_body_size; |
| } else { |
| - chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| + chunk_size = MemoryChunk::kObjectStartOffset + reserve_body_size; |
| + size_t commit_size = MemoryChunk::kObjectStartOffset + commit_body_size; |
| base = AllocateAlignedMemory(chunk_size, |
| + commit_size, |
| MemoryChunk::kAlignment, |
| executable, |
| &reservation); |
| @@ -549,13 +597,15 @@ |
| if (base == NULL) return NULL; |
| if (Heap::ShouldZapGarbage()) { |
| - ZapBlock(base, chunk_size); |
| + ZapBlock(base, MemoryChunk::kObjectStartOffset + commit_body_size); |
| } |
| area_start = base + Page::kObjectStartOffset; |
| - area_end = base + chunk_size; |
| + area_end = area_start + commit_body_size; |
| } |
| + // Use chunk_size for statistics and callbacks because we assume that they |
| + // treat reserved but not-yet committed memory regions of chunks as allocated. |
| isolate_->counters()->memory_allocated()-> |
| Increment(static_cast<int>(chunk_size)); |
| @@ -580,7 +630,7 @@ |
| Page* MemoryAllocator::AllocatePage(intptr_t size, |
| PagedSpace* owner, |
| Executability executable) { |
| - MemoryChunk* chunk = AllocateChunk(size, executable, owner); |
| + MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| if (chunk == NULL) return NULL; |
| @@ -591,7 +641,10 @@ |
| LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| Space* owner, |
| Executability executable) { |
| - MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
| + MemoryChunk* chunk = AllocateChunk(object_size, |
| + object_size, |
| + executable, |
| + owner); |
| if (chunk == NULL) return NULL; |
| return LargePage::Initialize(isolate_->heap(), chunk); |
| } |
| @@ -733,9 +786,10 @@ |
| } |
| -bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, |
| - Address start, |
| - size_t size) { |
| +bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, |
| + Address start, |
| + size_t commit_size, |
| + size_t reserved_size) { |
| // Commit page header (not executable). |
| if (!vm->Commit(start, |
| CodePageGuardStartOffset(), |
| @@ -749,15 +803,14 @@ |
| } |
| // Commit page body (executable). |
| - size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize(); |
| if (!vm->Commit(start + CodePageAreaStartOffset(), |
| - area_size, |
| + commit_size, |
| true)) { |
| return false; |
| } |
| - // Create guard page after the allocatable area. |
| - if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { |
| + // Create guard page before the end. |
| + if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { |
| return false; |
| } |