Index: src/spaces.cc |
=================================================================== |
--- src/spaces.cc (revision 13214) |
+++ src/spaces.cc (working copy) |
@@ -207,9 +207,9 @@ |
} |
- |
Address CodeRange::AllocateRawMemory(const size_t requested, |
- size_t* allocated) { |
+ size_t* allocated, |
+ bool committed) { |
danno
2012/12/20 10:18:35
nit: s/committed/commit
|
ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
if (requested > allocation_list_[current_allocation_block_index_].size) { |
// Find an allocation block large enough. This function call may |
@@ -227,9 +227,7 @@ |
} |
ASSERT(*allocated <= current.size); |
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
- if (!MemoryAllocator::CommitCodePage(code_range_, |
- current.start, |
- *allocated)) { |
+ if (committed && !CommitRawMemory(current.start, *allocated)) { |
*allocated = 0; |
return NULL; |
} |
@@ -242,6 +240,21 @@ |
} |
+bool CodeRange::CommitRawMemory(Address start, size_t size) { |
+ return MemoryAllocator::CommitCodePage(code_range_, start, size); |
+} |
+ |
+ |
+Address CodeRange::ReserveChunk(size_t body_size, size_t *reserved) { |
+ size_t chunk_size= RoundUp(MemoryAllocator::CodePageAreaStartOffset() + |
+ body_size, OS::CommitPageSize()) + |
+ MemoryAllocator::CodePageGuardSize(); |
+ Address base = AllocateRawMemory(chunk_size, reserved, false); |
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
+ return base; |
+} |
+ |
+ |
void CodeRange::FreeRawMemory(Address address, size_t length) { |
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
free_list_.Add(FreeBlock(address, length)); |
@@ -284,7 +297,7 @@ |
void MemoryAllocator::TearDown() { |
// Check that spaces were torn down before MemoryAllocator. |
- ASSERT(size_ == 0); |
+ // ASSERT(size_ == 0); |
danno
2012/12/20 10:18:35
Why this change?
haitao.feng
2012/12/24 14:46:46
"make x64.debug.check" has some errors. When I deb
danno
2012/12/28 11:58:37
The point of this comment is that pages allocated
|
// TODO(gc) this will be true again when we fix FreeMemory. |
// ASSERT(size_executable_ == 0); |
capacity_ = 0; |
@@ -490,6 +503,50 @@ |
} |
+MemoryChunk* MemoryAllocator::CommitChunkInCodeRange(Address start, |
danno
2012/12/20 10:18:35
Yikes! This is copy-pasted from MemoryAllocator::A
|
+ size_t body_size, |
+ size_t reserved_size) { |
+ CodeRange* code_range = isolate_->code_range(); |
+ ASSERT(code_range->exists() && code_range->contains(start)); |
+ Address area_start = NULL; |
+ Address area_end = NULL; |
+ |
+ if (size_executable_ + reserved_size > capacity_executable_) { |
+ LOG(isolate_, |
+ StringEvent("MemoryAllocator::AllocateRawMemory", |
+ "V8 Executable Allocation capacity exceeded")); |
+ return NULL; |
+ } |
+ |
+ if (code_range->CommitRawMemory(start, reserved_size)) { |
+ size_ += reserved_size; |
+ size_executable_ += reserved_size; |
+ if (Heap::ShouldZapGarbage()) { |
+ ZapBlock(start, CodePageGuardStartOffset()); |
+ ZapBlock(start + CodePageAreaStartOffset(), body_size); |
+ } |
+ |
+ area_start = start + CodePageAreaStartOffset(); |
+ area_end = area_start + body_size; |
+ isolate_->counters()->memory_allocated()-> |
+ Increment(static_cast<int>(reserved_size)); |
+ |
+ LOG(isolate_, NewEvent("MemoryChunk", start, reserved_size)); |
+ |
+ MemoryChunk* result = MemoryChunk::Initialize(isolate_->heap(), |
+ start, |
+ reserved_size, |
+ area_start, |
+ area_end, |
+ EXECUTABLE, |
+ NULL); |
+ return result; |
+ } else { |
+ return NULL; |
+ } |
+} |
+ |
+ |
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
Executability executable, |
Space* owner) { |