Index: src/spaces.cc |
=================================================================== |
--- src/spaces.cc (revision 13290) |
+++ src/spaces.cc (working copy) |
@@ -207,9 +207,9 @@ |
} |
- |
Address CodeRange::AllocateRawMemory(const size_t requested, |
- size_t* allocated) { |
+ size_t* allocated, |
+ bool commit) { |
ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
if (requested > allocation_list_[current_allocation_block_index_].size) { |
// Find an allocation block large enough. This function call may |
@@ -227,9 +227,7 @@ |
} |
ASSERT(*allocated <= current.size); |
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
- if (!MemoryAllocator::CommitCodePage(code_range_, |
- current.start, |
- *allocated)) { |
+ if (commit && !CommitRawMemory(current.start, *allocated)) { |
*allocated = 0; |
return NULL; |
} |
@@ -242,6 +240,11 @@ |
} |
+bool CodeRange::CommitRawMemory(Address start, size_t size) { |
+ return MemoryAllocator::CommitCodePage(code_range_, start, size); |
+} |
+ |
+ |
void CodeRange::FreeRawMemory(Address address, size_t length) { |
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
free_list_.Add(FreeBlock(address, length)); |
@@ -285,8 +288,7 @@ |
void MemoryAllocator::TearDown() { |
// Check that spaces were torn down before MemoryAllocator. |
ASSERT(size_ == 0); |
- // TODO(gc) this will be true again when we fix FreeMemory. |
- // ASSERT(size_executable_ == 0); |
+ ASSERT(size_executable_ == 0); |
capacity_ = 0; |
capacity_executable_ = 0; |
} |
@@ -339,34 +341,37 @@ |
} |
-Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
+Address MemoryAllocator::ReserveAlignedMemory(size_t requested, |
size_t alignment, |
+ Executability executable, |
VirtualMemory* controller) { |
- VirtualMemory reservation(size, alignment); |
+ VirtualMemory reservation(requested, alignment); |
+ if (!reservation.IsReserved()) return NULL; |
- if (!reservation.IsReserved()) return NULL; |
+ Address base = static_cast<Address>(reservation.address()); |
size_ += reservation.size(); |
- Address base = RoundUp(static_cast<Address>(reservation.address()), |
- alignment); |
+ if (executable == EXECUTABLE) { |
danno
2013/01/15 09:41:18
It seems that you've removed the logic that rounds
haitao.feng
2013/01/16 13:02:06
This RoundUp is done in the reservation(requested,
|
+ size_executable_ += reservation.size(); |
+ } |
+ |
controller->TakeControl(&reservation); |
return base; |
} |
-Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
- size_t alignment, |
- Executability executable, |
- VirtualMemory* controller) { |
- VirtualMemory reservation; |
- Address base = ReserveAlignedMemory(size, alignment, &reservation); |
+Address MemoryAllocator::CommitAlignedMemory(VirtualMemory* reservation, |
+ Executability executable) { |
+ Address base = static_cast<Address>(reservation->address()); |
if (base == NULL) return NULL; |
+ size_t size = reservation->size(); |
+ |
if (executable == EXECUTABLE) { |
- if (!CommitCodePage(&reservation, base, size)) { |
+ if (!CommitCodePage(reservation, base, size)) { |
base = NULL; |
} |
} else { |
- if (!reservation.Commit(base, size, false)) { |
+ if (!reservation->Commit(base, size, false)) { |
base = NULL; |
} |
} |
@@ -374,10 +379,24 @@ |
if (base == NULL) { |
// Failed to commit the body. Release the mapping and any partially |
// commited regions inside it. |
- reservation.Release(); |
+ reservation->Release(); |
return NULL; |
} |
+ return base; |
+} |
+ |
+ |
+Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
+ size_t alignment, |
+ Executability executable, |
+ VirtualMemory* controller) { |
+ VirtualMemory reservation; |
+ Address base = ReserveAlignedMemory(size, |
+ alignment, |
+ executable, |
+ &reservation); |
+ base = CommitAlignedMemory(&reservation, executable); |
controller->TakeControl(&reservation); |
return base; |
} |
@@ -490,15 +509,12 @@ |
} |
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
- Executability executable, |
- Space* owner) { |
+Address MemoryAllocator::ReserveChunk(size_t body_size, |
+ Executability executable, |
+ VirtualMemory* controller) { |
size_t chunk_size; |
- Heap* heap = isolate_->heap(); |
Address base = NULL; |
VirtualMemory reservation; |
- Address area_start = NULL; |
- Address area_end = NULL; |
if (executable == EXECUTABLE) { |
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
@@ -512,50 +528,47 @@ |
return NULL; |
} |
- // Allocate executable memory either from code range or from the |
- // OS. |
- if (isolate_->code_range()->exists()) { |
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
- MemoryChunk::kAlignment)); |
- if (base == NULL) return NULL; |
- size_ += chunk_size; |
- // Update executable memory size. |
- size_executable_ += chunk_size; |
- } else { |
- base = AllocateAlignedMemory(chunk_size, |
- MemoryChunk::kAlignment, |
- executable, |
- &reservation); |
- if (base == NULL) return NULL; |
- // Update executable memory size. |
- size_executable_ += reservation.size(); |
- } |
- |
- if (Heap::ShouldZapGarbage()) { |
- ZapBlock(base, CodePageGuardStartOffset()); |
- ZapBlock(base + CodePageAreaStartOffset(), body_size); |
- } |
- |
- area_start = base + CodePageAreaStartOffset(); |
- area_end = area_start + body_size; |
+ // Reserve executable chunk from the OS. |
+ base = ReserveAlignedMemory(chunk_size, |
+ MemoryChunk::kAlignment, |
+ EXECUTABLE, |
+ &reservation); |
} else { |
chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
- base = AllocateAlignedMemory(chunk_size, |
- MemoryChunk::kAlignment, |
- executable, |
- &reservation); |
+ base = ReserveAlignedMemory(chunk_size, |
danno
2013/01/15 09:41:18
Please move the Reserve call after the if {} else
|
+ MemoryChunk::kAlignment, |
+ NOT_EXECUTABLE, |
+ &reservation); |
+ } |
- if (base == NULL) return NULL; |
+ controller->TakeControl(&reservation); |
+ return base; |
+} |
- if (Heap::ShouldZapGarbage()) { |
- ZapBlock(base, chunk_size); |
- } |
- area_start = base + Page::kObjectStartOffset; |
- area_end = base + chunk_size; |
- } |
+Address MemoryAllocator::ReserveChunk(size_t body_size, size_t* reserved) { |
+ // Reserve chunk from the code range. |
+ ASSERT(isolate_->code_range()->exists()); |
+ size_t chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
+ OS::CommitPageSize()) + CodePageGuardSize(); |
+ Address base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
+ reserved, |
+ false); |
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
+ size_ += *reserved; |
+ size_executable_ += *reserved; |
+ return base; |
+} |
+ |
+MemoryChunk* MemoryAllocator::CommitChunkShared(Heap* heap, |
+ Address base, |
+ size_t chunk_size, |
+ Address area_start, |
+ Address area_end, |
+ Executability executable, |
+ Space* owner, |
+ VirtualMemory* reservation) { |
isolate_->counters()->memory_allocated()-> |
Increment(static_cast<int>(chunk_size)); |
@@ -572,11 +585,102 @@ |
area_end, |
executable, |
owner); |
- result->set_reserved_memory(&reservation); |
+ result->set_reserved_memory(reservation); |
return result; |
} |
+MemoryChunk* MemoryAllocator::CommitChunk(size_t body_size, |
+ Executability executable, |
+ VirtualMemory* reservation, |
+ Space* owner) { |
+ Address base = CommitAlignedMemory(reservation, executable); |
+ if (base == NULL) return NULL; |
+ |
+ size_t chunk_size = reservation->size(); |
+ Address area_start = NULL; |
+ Address area_end = NULL; |
+ Heap* heap = isolate_->heap(); |
+ |
+ if (executable == EXECUTABLE) { |
+ if (Heap::ShouldZapGarbage()) { |
+ ZapBlock(base, CodePageGuardStartOffset()); |
+ ZapBlock(base + CodePageAreaStartOffset(), body_size); |
+ } |
+ |
+ area_start = base + CodePageAreaStartOffset(); |
+ area_end = area_start + body_size; |
+ } else { |
+ if (Heap::ShouldZapGarbage()) { |
+ ZapBlock(base, chunk_size); |
+ } |
+ |
+ area_start = base + Page::kObjectStartOffset; |
+ area_end = base + chunk_size; |
+ } |
+ |
+ return CommitChunkShared(heap, |
+ base, |
+ chunk_size, |
+ area_start, |
+ area_end, |
+ executable, |
+ owner, |
+ reservation); |
+} |
+ |
+ |
+MemoryChunk* MemoryAllocator::CommitChunk(size_t body_size, |
+ Address base, |
+ size_t chunk_size, |
+ Space* owner) { |
+ ASSERT(isolate_->code_range()->exists()); |
+ if (base == NULL) return NULL; |
+ |
+ if (!isolate_->code_range()->CommitRawMemory(base, chunk_size)) { |
+ return NULL; |
+ } |
+ |
+ Address area_start = NULL; |
+ Address area_end = NULL; |
+ Heap* heap = isolate_->heap(); |
+ VirtualMemory empty; |
+ |
+ if (Heap::ShouldZapGarbage()) { |
+ ZapBlock(base, CodePageGuardStartOffset()); |
+ ZapBlock(base + CodePageAreaStartOffset(), body_size); |
+ } |
+ |
+ area_start = base + CodePageAreaStartOffset(); |
+ area_end = area_start + body_size; |
+ |
+ return CommitChunkShared(heap, |
+ base, |
+ chunk_size, |
+ area_start, |
+ area_end, |
+ EXECUTABLE, |
+ owner, |
+ &empty); |
+} |
+ |
+ |
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
+ Executability executable, |
+ Space* owner) { |
+ if ((executable == EXECUTABLE) && isolate_->code_range()->exists()) { |
+ size_t chunk_size; |
+ Address base = ReserveChunk(body_size, &chunk_size); |
+ return CommitChunk(body_size, base, chunk_size, owner); |
+ } else { |
+ VirtualMemory reservation; |
+ Address base = ReserveChunk(body_size, executable, &reservation); |
+ if (base == NULL) return NULL; |
+ return CommitChunk(body_size, executable, &reservation, owner); |
+ } |
+} |
+ |
+ |
Page* MemoryAllocator::AllocatePage(intptr_t size, |
PagedSpace* owner, |
Executability executable) { |
@@ -1068,7 +1172,7 @@ |
size_t size = 2 * reserved_semispace_capacity; |
Address base = |
heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
- size, size, &reservation_); |
+ size, size, NOT_EXECUTABLE, &reservation_); |
if (base == NULL) return false; |
chunk_base_ = base; |