Index: src/deoptimizer.cc |
=================================================================== |
--- src/deoptimizer.cc (revision 13522) |
+++ src/deoptimizer.cc (working copy) |
@@ -44,8 +44,18 @@ |
eager_deoptimization_entry_code_entries_ = -1; |
lazy_deoptimization_entry_code_entries_ = -1; |
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize(); |
- eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); |
- lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); |
+ MemoryAllocator* allocator = Isolate::Current()->memory_allocator(); |
+ size_t initial_commit_size = OS::CommitPageSize(); |
+ eager_deoptimization_entry_code_ = |
+ allocator->AllocateChunk(deopt_table_size, |
+ initial_commit_size, |
+ EXECUTABLE, |
+ NULL); |
+ lazy_deoptimization_entry_code_ = |
+ allocator->AllocateChunk(deopt_table_size, |
+ initial_commit_size, |
+ EXECUTABLE, |
+ NULL); |
current_ = NULL; |
deoptimizing_code_list_ = NULL; |
#ifdef ENABLE_DEBUGGER_SUPPORT |
@@ -55,9 +65,11 @@ |
DeoptimizerData::~DeoptimizerData() { |
- delete eager_deoptimization_entry_code_; |
+ Isolate::Current()->memory_allocator()->Free( |
+ eager_deoptimization_entry_code_); |
eager_deoptimization_entry_code_ = NULL; |
- delete lazy_deoptimization_entry_code_; |
+ Isolate::Current()->memory_allocator()->Free( |
+ lazy_deoptimization_entry_code_); |
lazy_deoptimization_entry_code_ = NULL; |
DeoptimizingCodeListNode* current = deoptimizing_code_list_; |
@@ -619,7 +631,7 @@ |
GetEntryMode mode) { |
ASSERT(id >= 0); |
if (id >= kMaxNumberOfEntries) return NULL; |
- VirtualMemory* base = NULL; |
+ MemoryChunk* base = NULL; |
if (mode == ENSURE_ENTRY_CODE) { |
EnsureCodeForDeoptimizationEntry(type, id); |
} else { |
@@ -631,28 +643,27 @@ |
} else { |
base = data->lazy_deoptimization_entry_code_; |
} |
- return |
- static_cast<Address>(base->address()) + (id * table_entry_size_); |
+ return base->area_start() + (id * table_entry_size_); |
} |
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { |
- VirtualMemory* base = NULL; |
+ MemoryChunk* base = NULL; |
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
if (type == EAGER) { |
base = data->eager_deoptimization_entry_code_; |
} else { |
base = data->lazy_deoptimization_entry_code_; |
} |
- Address base_casted = reinterpret_cast<Address>(base->address()); |
+ Address start = base->area_start(); |
if (base == NULL || |
- addr < base->address() || |
- addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) { |
+ addr < start || |
+ addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { |
return kNotDeoptimizationEntry; |
} |
ASSERT_EQ(0, |
- static_cast<int>(addr - base_casted) % table_entry_size_); |
- return static_cast<int>(addr - base_casted) / table_entry_size_; |
+ static_cast<int>(addr - start) % table_entry_size_); |
+ return static_cast<int>(addr - start) / table_entry_size_; |
} |
@@ -1571,14 +1582,14 @@ |
CodeDesc desc; |
masm.GetCode(&desc); |
- VirtualMemory* memory = type == EAGER |
+ MemoryChunk* chunk = type == EAGER |
? data->eager_deoptimization_entry_code_ |
: data->lazy_deoptimization_entry_code_; |
- size_t table_size = Deoptimizer::GetMaxDeoptTableSize(); |
- ASSERT(static_cast<int>(table_size) >= desc.instr_size); |
- memory->Commit(memory->address(), table_size, true); |
- memcpy(memory->address(), desc.buffer, desc.instr_size); |
- CPU::FlushICache(memory->address(), desc.instr_size); |
+ ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= |
+ desc.instr_size); |
+ chunk->CommitArea(desc.instr_size); |
+ memcpy(chunk->area_start(), desc.buffer, desc.instr_size); |
+ CPU::FlushICache(chunk->area_start(), desc.instr_size); |
if (type == EAGER) { |
data->eager_deoptimization_entry_code_entries_ = entry_count; |