| Index: src/heap/spaces.cc
|
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
|
| index 30b141fcd32965637bd6ed1a2cc29284dff33750..51a0ef68e0fb72b18d671e20fddc57451f81b805 100644
|
| --- a/src/heap/spaces.cc
|
| +++ b/src/heap/spaces.cc
|
| @@ -93,7 +93,8 @@ CodeRange::CodeRange(Isolate* isolate)
|
| code_range_(NULL),
|
| free_list_(0),
|
| allocation_list_(0),
|
| - current_allocation_block_index_(0) {}
|
| + current_allocation_block_index_(0),
|
| + emergency_block_() {}
|
|
|
|
|
| bool CodeRange::SetUp(size_t requested) {
|
| @@ -144,6 +145,7 @@ bool CodeRange::SetUp(size_t requested) {
|
| current_allocation_block_index_ = 0;
|
|
|
| LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
|
| + ReserveEmergencyBlock();
|
| return true;
|
| }
|
|
|
| @@ -202,35 +204,20 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
|
| const size_t commit_size,
|
| size_t* allocated) {
|
| DCHECK(commit_size <= requested_size);
|
| - DCHECK(allocation_list_.length() == 0 ||
|
| - current_allocation_block_index_ < allocation_list_.length());
|
| - if (allocation_list_.length() == 0 ||
|
| - requested_size > allocation_list_[current_allocation_block_index_].size) {
|
| - // Find an allocation block large enough.
|
| - if (!GetNextAllocationBlock(requested_size)) return NULL;
|
| - }
|
| - // Commit the requested memory at the start of the current allocation block.
|
| - size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
|
| - FreeBlock current = allocation_list_[current_allocation_block_index_];
|
| - if (aligned_requested >= (current.size - Page::kPageSize)) {
|
| - // Don't leave a small free block, useless for a large object or chunk.
|
| - *allocated = current.size;
|
| - } else {
|
| - *allocated = aligned_requested;
|
| + FreeBlock current;
|
| + if (!ReserveBlock(requested_size, ¤t)) {
|
| + *allocated = 0;
|
| + return NULL;
|
| }
|
| + *allocated = current.size;
|
| DCHECK(*allocated <= current.size);
|
| DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
|
| if (!isolate_->memory_allocator()->CommitExecutableMemory(
|
| code_range_, current.start, commit_size, *allocated)) {
|
| *allocated = 0;
|
| + ReleaseBlock(¤t);
|
| return NULL;
|
| }
|
| - allocation_list_[current_allocation_block_index_].start += *allocated;
|
| - allocation_list_[current_allocation_block_index_].size -= *allocated;
|
| - if (*allocated == current.size) {
|
| - // This block is used up, get the next one.
|
| - GetNextAllocationBlock(0);
|
| - }
|
| return current.start;
|
| }
|
|
|
| @@ -260,6 +247,49 @@ void CodeRange::TearDown() {
|
| }
|
|
|
|
|
| +bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
|
| + DCHECK(allocation_list_.length() == 0 ||
|
| + current_allocation_block_index_ < allocation_list_.length());
|
| + if (allocation_list_.length() == 0 ||
|
| + requested_size > allocation_list_[current_allocation_block_index_].size) {
|
| + // Find an allocation block large enough.
|
| + if (!GetNextAllocationBlock(requested_size)) return false;
|
| + }
|
| + // Commit the requested memory at the start of the current allocation block.
|
| + size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
|
| + *block = allocation_list_[current_allocation_block_index_];
|
| + // Don't leave a small free block, useless for a large object or chunk.
|
| + if (aligned_requested < (block->size - Page::kPageSize)) {
|
| + block->size = aligned_requested;
|
| + }
|
| + DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
|
| + allocation_list_[current_allocation_block_index_].start += block->size;
|
| + allocation_list_[current_allocation_block_index_].size -= block->size;
|
| + return true;
|
| +}
|
| +
|
| +
|
| +void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
|
| +
|
| +
|
| +void CodeRange::ReserveEmergencyBlock() {
|
| + const size_t requested_size = MemoryAllocator::CodePageAreaSize();
|
| + if (emergency_block_.size == 0) {
|
| + ReserveBlock(requested_size, &emergency_block_);
|
| + } else {
|
| + DCHECK(emergency_block_.size >= requested_size);
|
| + }
|
| +}
|
| +
|
| +
|
| +void CodeRange::ReleaseEmergencyBlock() {
|
| + if (emergency_block_.size != 0) {
|
| + ReleaseBlock(&emergency_block_);
|
| + emergency_block_.size = 0;
|
| + }
|
| +}
|
| +
|
| +
|
| // -----------------------------------------------------------------------------
|
| // MemoryAllocator
|
| //
|
| @@ -1106,6 +1136,14 @@ void PagedSpace::ReleasePage(Page* page) {
|
|
|
|
|
| void PagedSpace::CreateEmergencyMemory() {
|
| + if (identity() == CODE_SPACE) {
|
| + // Make the emergency block available to the allocator.
|
| + CodeRange* code_range = heap()->isolate()->code_range();
|
| + if (code_range != NULL && code_range->valid()) {
|
| + code_range->ReleaseEmergencyBlock();
|
| + }
|
| + DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
|
| + }
|
| emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
|
| AreaSize(), AreaSize(), executable(), this);
|
| }
|
|
|