Chromium Code Reviews| Index: src/spaces.cc |
| =================================================================== |
| --- src/spaces.cc (revision 13274) |
| +++ src/spaces.cc (working copy) |
| @@ -207,9 +207,9 @@ |
| } |
| - |
| Address CodeRange::AllocateRawMemory(const size_t requested, |
| - size_t* allocated) { |
| + size_t* allocated, |
| + bool commit) { |
| ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| if (requested > allocation_list_[current_allocation_block_index_].size) { |
| // Find an allocation block large enough. This function call may |
| @@ -227,9 +227,7 @@ |
| } |
| ASSERT(*allocated <= current.size); |
| ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| - if (!MemoryAllocator::CommitCodePage(code_range_, |
| - current.start, |
| - *allocated)) { |
| + if (commit && !CommitRawMemory(current.start, *allocated)) { |
| *allocated = 0; |
| return NULL; |
| } |
| @@ -242,6 +240,11 @@ |
| } |
| +bool CodeRange::CommitRawMemory(Address start, size_t size) { |
| + return MemoryAllocator::CommitCodePage(code_range_, start, size); |
| +} |
| + |
| + |
| void CodeRange::FreeRawMemory(Address address, size_t length) { |
| ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| free_list_.Add(FreeBlock(address, length)); |
| @@ -284,7 +287,7 @@ |
| void MemoryAllocator::TearDown() { |
| // Check that spaces were torn down before MemoryAllocator. |
| - ASSERT(size_ == 0); |
| + // ASSERT(size_ == 0); |
|
danno
2012/12/28 11:58:37
This is _not_ safe to remove. Either you are leak
|
| // TODO(gc) this will be true again when we fix FreeMemory. |
| // ASSERT(size_executable_ == 0); |
| capacity_ = 0; |
| @@ -339,34 +342,59 @@ |
| } |
| -Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| +Address MemoryAllocator::ReserveAlignedMemory(size_t requested, |
| size_t alignment, |
| + Executability executable, |
| VirtualMemory* controller) { |
| - VirtualMemory reservation(size, alignment); |
| + Address base = NULL; |
| + VirtualMemory reservation; |
|
danno
2012/12/28 11:58:37
Remove.
|
| - if (!reservation.IsReserved()) return NULL; |
| - size_ += reservation.size(); |
| - Address base = RoundUp(static_cast<Address>(reservation.address()), |
| - alignment); |
| + if (executable == EXECUTABLE && isolate_->code_range()->exists()) { |
| + // Reserve executable memory from code range. |
| + // alignment parameter is not used. |
| + size_t reserved = requested; |
| + base = isolate_->code_range()->AllocateRawMemory(requested, |
| + &reserved, |
| + false); |
| + ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| + MemoryChunk::kAlignment)); |
| + reservation.Set(static_cast<void*>(base), reserved); |
|
danno
2012/12/28 11:58:37
This isn't correct. The VirtualMemory from the cod
haitao.feng
2012/12/28 15:04:54
For the double-management, when the memory chunk i
danno
2012/12/28 15:38:10
See previous comments. Please, please don't abuse
|
| + size_ += reserved; |
| + size_executable_ += reserved; |
| + } else { |
| + VirtualMemory temp(requested, alignment); |
| + |
| + if (!temp.IsReserved()) return NULL; |
| + base = static_cast<Address>(temp.address()); |
| + reservation.TakeControl(&temp); |
|
danno
2012/12/28 11:58:37
Replace with:
controller->TakeControl(temp);
|
| + size_ += reservation.size(); |
|
danno
2012/12/28 11:58:37
temp.size()
|
| + if (executable == EXECUTABLE) { |
| + size_executable_ += reservation.size(); |
| + } |
| + } |
| + |
| controller->TakeControl(&reservation); |
|
danno
2012/12/28 11:58:37
Remove
|
| return base; |
| } |
| -Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| - size_t alignment, |
| - Executability executable, |
| - VirtualMemory* controller) { |
| - VirtualMemory reservation; |
| - Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| +Address MemoryAllocator::CommitAlignedMemory(Executability executable, |
| + VirtualMemory* reservation) { |
| + Address base = static_cast<Address>(reservation->address()); |
| + size_t size = reservation->size(); |
| if (base == NULL) return NULL; |
| + CodeRange *code_range = isolate_->code_range(); |
| if (executable == EXECUTABLE) { |
| - if (!CommitCodePage(&reservation, base, size)) { |
| + if (code_range->exists()) { |
| + if (!code_range->CommitRawMemory(base, size)) { |
| + base = NULL; |
| + } |
| + } else if (!CommitCodePage(reservation, base, size)) { |
| base = NULL; |
| } |
| } else { |
| - if (!reservation.Commit(base, size, false)) { |
| + if (!reservation->Commit(base, size, false)) { |
| base = NULL; |
| } |
| } |
| @@ -374,10 +402,29 @@ |
| if (base == NULL) { |
| // Failed to commit the body. Release the mapping and any partially |
| // commited regions inside it. |
| - reservation.Release(); |
| + if (code_range->exists()) { |
| + code_range->FreeRawMemory(static_cast<Address>(reservation->address()), |
| + size); |
| + } else { |
| + reservation->Release(); |
| + } |
| return NULL; |
| } |
| + return base; |
| +} |
| + |
| + |
| +Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| + size_t alignment, |
| + Executability executable, |
| + VirtualMemory* controller) { |
| + VirtualMemory reservation; |
| + Address base = ReserveAlignedMemory(size, |
| + alignment, |
| + executable, |
| + &reservation); |
| + base = CommitAlignedMemory(executable, &reservation); |
| controller->TakeControl(&reservation); |
| return base; |
| } |
| @@ -490,15 +537,12 @@ |
| } |
| -MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| - Executability executable, |
| - Space* owner) { |
| +Address MemoryAllocator::ReserveChunk(size_t body_size, |
| + Executability executable, |
| + VirtualMemory* controller) { |
| size_t chunk_size; |
| - Heap* heap = isolate_->heap(); |
| Address base = NULL; |
| VirtualMemory reservation; |
| - Address area_start = NULL; |
| - Address area_end = NULL; |
| if (executable == EXECUTABLE) { |
| chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
| @@ -512,26 +556,37 @@ |
| return NULL; |
| } |
| - // Allocate executable memory either from code range or from the |
| - // OS. |
| - if (isolate_->code_range()->exists()) { |
| - base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| - ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| - MemoryChunk::kAlignment)); |
| - if (base == NULL) return NULL; |
| - size_ += chunk_size; |
| - // Update executable memory size. |
| - size_executable_ += chunk_size; |
| - } else { |
| - base = AllocateAlignedMemory(chunk_size, |
| - MemoryChunk::kAlignment, |
| - executable, |
| - &reservation); |
| - if (base == NULL) return NULL; |
| - // Update executable memory size. |
| - size_executable_ += reservation.size(); |
| - } |
| + // Reserve executable memory either from code range or from the OS. |
| + base = ReserveAlignedMemory(chunk_size, |
| + MemoryChunk::kAlignment, |
| + EXECUTABLE, |
| + &reservation); |
| + } else { |
| + chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| + base = ReserveAlignedMemory(chunk_size, |
| + MemoryChunk::kAlignment, |
| + NOT_EXECUTABLE, |
| + &reservation); |
| + } |
| + controller->TakeControl(&reservation); |
| + return base; |
| +} |
| + |
| + |
| +MemoryChunk* MemoryAllocator::CommitChunk(size_t body_size, |
| + Executability executable, |
| + VirtualMemory* reservation, |
| + Space* owner) { |
| + Address base = CommitAlignedMemory(executable, reservation); |
| + size_t chunk_size = reservation->size(); |
| + Address area_start = NULL; |
| + Address area_end = NULL; |
| + Heap* heap = isolate_->heap(); |
| + |
| + if (base == NULL) return NULL; |
| + |
| + if (executable == EXECUTABLE) { |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock(base, CodePageGuardStartOffset()); |
| ZapBlock(base + CodePageAreaStartOffset(), body_size); |
| @@ -540,14 +595,6 @@ |
| area_start = base + CodePageAreaStartOffset(); |
| area_end = area_start + body_size; |
| } else { |
| - chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| - base = AllocateAlignedMemory(chunk_size, |
| - MemoryChunk::kAlignment, |
| - executable, |
| - &reservation); |
| - |
| - if (base == NULL) return NULL; |
| - |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock(base, chunk_size); |
| } |
| @@ -572,11 +619,25 @@ |
| area_end, |
| executable, |
| owner); |
| - result->set_reserved_memory(&reservation); |
| + if (isolate_->code_range()->exists()) { |
| + // Reset the reservation for memory space in code range. |
| + reservation->Reset(); |
| + } |
| + result->set_reserved_memory(reservation); |
| return result; |
| } |
| +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| + Executability executable, |
| + Space* owner) { |
| + VirtualMemory reservation; |
| + Address base = ReserveChunk(body_size, executable, &reservation); |
| + if (base == NULL) return NULL; |
| + return CommitChunk(body_size, executable, &reservation, owner); |
| +} |
| + |
| + |
| Page* MemoryAllocator::AllocatePage(intptr_t size, |
| PagedSpace* owner, |
| Executability executable) { |
| @@ -1068,7 +1129,7 @@ |
| size_t size = 2 * reserved_semispace_capacity; |
| Address base = |
| heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| - size, size, &reservation_); |
| + size, size, NOT_EXECUTABLE, &reservation_); |
| if (base == NULL) return false; |
| chunk_base_ = base; |