Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(183)

Unified Diff: src/deoptimizer.cc

Issue 11566011: Use MemoryChunk-based allocation for deoptimization entry code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/deoptimizer.cc
===================================================================
--- src/deoptimizer.cc (revision 13274)
+++ src/deoptimizer.cc (working copy)
@@ -44,8 +44,27 @@
eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
- lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+ eager_deoptimization_entry_code_ = new VirtualMemory();
+ lazy_deoptimization_entry_code_ = new VirtualMemory();
+ MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
+ eager_deoptimization_entry_start_ = allocator->ReserveChunk(deopt_table_size,
danno 2012/12/28 11:58:37 Please don't store into a member variable and chan
haitao.feng 2012/12/28 15:04:54 In this CL, I am using eager_deoptimization_entry_
+ EXECUTABLE,
+ eager_deoptimization_entry_code_);
+ if (eager_deoptimization_entry_start_ == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ }
+ eager_deoptimization_entry_start_ +=
+ MemoryAllocator::CodePageAreaStartOffset();
+ lazy_deoptimization_entry_start_ = allocator->ReserveChunk(deopt_table_size,
+ EXECUTABLE,
+ lazy_deoptimization_entry_code_);
+ if (lazy_deoptimization_entry_start_ == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ }
+ lazy_deoptimization_entry_start_ +=
+ MemoryAllocator::CodePageAreaStartOffset();
+ eager_deoptimization_chunk_ = NULL;
+ lazy_deoptimization_chunk_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -59,6 +78,16 @@
eager_deoptimization_entry_code_ = NULL;
delete lazy_deoptimization_entry_code_;
lazy_deoptimization_entry_code_ = NULL;
+ if (eager_deoptimization_chunk_ != NULL) {
+ Isolate::Current()->memory_allocator()->Free(
+ eager_deoptimization_chunk_);
+ eager_deoptimization_chunk_ = NULL;
+ }
+ if (lazy_deoptimization_chunk_ != NULL) {
+ Isolate::Current()->memory_allocator()->Free(
+ lazy_deoptimization_chunk_);
+ lazy_deoptimization_chunk_ = NULL;
+ }
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
while (current != NULL) {
@@ -617,7 +646,7 @@
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
- VirtualMemory* base = NULL;
+ Address base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
@@ -625,32 +654,31 @@
}
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_start_;
} else {
- base = data->lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_start_;
}
return
- static_cast<Address>(base->address()) + (id * table_entry_size_);
+ base + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- VirtualMemory* base = NULL;
+ Address base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- base = data->eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_start_;
} else {
- base = data->lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_start_;
}
- Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
- addr < base->address() ||
- addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
+ addr < base ||
+ addr >= base + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base_casted) % table_entry_size_);
- return static_cast<int>(addr - base_casted) / table_entry_size_;
+ static_cast<int>(addr - base) % table_entry_size_);
+ return static_cast<int>(addr - base) / table_entry_size_;
}
@@ -1574,10 +1602,20 @@
: data->lazy_deoptimization_entry_code_;
size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(static_cast<int>(table_size) >= desc.instr_size);
- memory->Commit(memory->address(), table_size, true);
- memcpy(memory->address(), desc.buffer, desc.instr_size);
- CPU::FlushICache(memory->address(), desc.instr_size);
+ MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
+ MemoryChunk** chunk = type == EAGER
danno 2012/12/28 11:58:37 Can you please not use the conditional operator he
+ ? &data->eager_deoptimization_chunk_
+ : &data->lazy_deoptimization_chunk_;
+ if (*chunk == NULL) {
danno 2012/12/28 11:58:37 I don't think this is right. If the deopt table gr
haitao.feng 2012/12/28 15:04:54 If I read the codes right, table_size is Deoptimiz
+ *chunk = allocator->CommitChunk(table_size, EXECUTABLE, memory, NULL);
+ if (*chunk == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ }
+ }
+ memcpy((*chunk)->area_start(), desc.buffer, desc.instr_size);
+ CPU::FlushICache((*chunk)->area_start(), desc.instr_size);
+
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
} else {

Powered by Google App Engine
This is Rietveld 408576698