Index: runtime/vm/flow_graph_compiler.cc |
diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc |
index ab31e943ccd049516ce48b17aa8bec12cbfc41d5..27c3d068c507ace4e5aef9063b7fc73736d78e2a 100644 |
--- a/runtime/vm/flow_graph_compiler.cc |
+++ b/runtime/vm/flow_graph_compiler.cc |
@@ -33,6 +33,7 @@ FlowGraphCompiler::FlowGraphCompiler( |
const ParsedFunction& parsed_function, |
const GrowableArray<BlockEntryInstr*>& block_order, |
bool is_optimizing, |
+ bool is_ssa, |
bool is_leaf) |
: assembler_(assembler), |
parsed_function_(parsed_function), |
@@ -44,12 +45,14 @@ FlowGraphCompiler::FlowGraphCompiler( |
block_info_(block_order.length()), |
deopt_stubs_(), |
is_optimizing_(is_optimizing), |
+ is_ssa_(is_ssa), |
is_dart_leaf_(is_leaf), |
bool_true_(Bool::ZoneHandle(Bool::True())), |
bool_false_(Bool::ZoneHandle(Bool::False())), |
double_class_(Class::ZoneHandle( |
Isolate::Current()->object_store()->double_class())), |
- frame_register_allocator_(this, is_optimizing) { |
+ frame_register_allocator_(this, is_optimizing, is_ssa), |
+ parallel_move_resolver_(this) { |
ASSERT(assembler != NULL); |
srdjan
2012/07/10 23:27:54
You could also ASSERT(is_optimizing_ || !is_ssa_);
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Done.
|
} |
@@ -101,9 +104,14 @@ void FlowGraphCompiler::VisitBlocks() { |
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
instr = it.Current(); |
if (FLAG_code_comments) EmitComment(instr); |
- ASSERT(instr->locs() != NULL); |
- EmitInstructionPrologue(instr); |
- instr->EmitNativeCode(this); |
+ if (instr->IsParallelMove()) { |
+ parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); |
+ } else { |
+ ASSERT(instr->locs() != NULL); |
+ EmitInstructionPrologue(instr); |
+ pending_deoptimization_env_ = instr->env(); |
+ instr->EmitNativeCode(this); |
+ } |
} |
if (instr->next() != NULL) { |
BlockEntryInstr* successor = instr->next()->AsBlockEntry(); |
@@ -189,10 +197,14 @@ Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, |
Register reg3) { |
DeoptimizationStub* stub = |
new DeoptimizationStub(deopt_id, deopt_token_pos, try_index, reason); |
- frame_register_allocator()->SpillInDeoptStub(stub); |
- if (reg1 != kNoRegister) stub->Push(reg1); |
- if (reg2 != kNoRegister) stub->Push(reg2); |
- if (reg3 != kNoRegister) stub->Push(reg3); |
+ if (pending_deoptimization_env_ == NULL) { |
+ frame_register_allocator()->SpillInDeoptStub(stub); |
+ if (reg1 != kNoRegister) stub->Push(reg1); |
+ if (reg2 != kNoRegister) stub->Push(reg2); |
+ if (reg3 != kNoRegister) stub->Push(reg3); |
+ } else { |
+ stub->set_deoptimization_env(pending_deoptimization_env_); |
+ } |
deopt_stubs_.Add(stub); |
return stub->entry_label(); |
} |
@@ -476,6 +488,8 @@ void FrameRegisterAllocator::SpillRegister(Register reg) { |
void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
+ if (is_ssa_) return; |
+ |
LocationSummary* locs = instr->locs(); |
bool blocked_registers[kNumberOfCpuRegisters]; |
@@ -492,7 +506,7 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
// Mark all fixed input, temp and output registers as used. |
for (intptr_t i = 0; i < locs->input_count(); i++) { |
Location loc = locs->in(i); |
- if (loc.kind() == Location::kRegister) { |
+ if (loc.IsRegister()) { |
ASSERT(!blocked_registers[loc.reg()]); |
blocked_registers[loc.reg()] = true; |
if (registers_[loc.reg()] != NULL) { |
@@ -508,14 +522,14 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
for (intptr_t i = 0; i < locs->temp_count(); i++) { |
Location loc = locs->temp(i); |
- if (loc.kind() == Location::kRegister) { |
+ if (loc.IsRegister()) { |
ASSERT(!blocked_registers[loc.reg()]); |
blocked_registers[loc.reg()] = true; |
blocked_temp_registers[loc.reg()] = true; |
} |
} |
- if (locs->out().kind() == Location::kRegister) { |
+ if (locs->out().IsRegister()) { |
// Fixed output registers are allowed to overlap with |
// temps and inputs. |
blocked_registers[locs->out().reg()] = true; |
@@ -533,9 +547,9 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
for (intptr_t i = locs->input_count() - 1; i >= 0; i--) { |
Location loc = locs->in(i); |
Register reg = kNoRegister; |
- if (loc.kind() == Location::kRegister) { |
+ if (loc.IsRegister()) { |
reg = loc.reg(); |
- } else if (loc.kind() == Location::kUnallocated) { |
+ } else if (loc.IsUnallocated()) { |
ASSERT(loc.policy() == Location::kRequiresRegister); |
if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) { |
reg = stack_.Last(); |
@@ -558,7 +572,7 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
// Allocate all unallocated temp locations. |
for (intptr_t i = 0; i < locs->temp_count(); i++) { |
Location loc = locs->temp(i); |
- if (loc.kind() == Location::kUnallocated) { |
+ if (loc.IsUnallocated()) { |
ASSERT(loc.policy() == Location::kRequiresRegister); |
loc = Location::RegisterLocation( |
AllocateFreeRegister(blocked_registers)); |
@@ -568,7 +582,7 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
} |
Location result_location = locs->out(); |
- if (result_location.kind() == Location::kUnallocated) { |
+ if (result_location.IsUnallocated()) { |
switch (result_location.policy()) { |
case Location::kRequiresRegister: |
result_location = Location::RegisterLocation( |
@@ -581,13 +595,15 @@ void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
locs->set_out(result_location); |
} |
- if (result_location.kind() == Location::kRegister) { |
+ if (result_location.IsRegister()) { |
SpillRegister(result_location.reg()); |
} |
} |
void FrameRegisterAllocator::Pop(Register dst, Value* val) { |
+ if (is_ssa_) return; |
+ |
if (stack_.length() > 0) { |
ASSERT(keep_values_in_registers_); |
Register src = stack_.Last(); |
@@ -602,6 +618,8 @@ void FrameRegisterAllocator::Pop(Register dst, Value* val) { |
void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { |
+ if (is_ssa_) return; |
+ |
ASSERT(registers_[reg] == NULL); |
if (keep_values_in_registers_) { |
registers_[reg] = val; |
@@ -613,6 +631,8 @@ void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { |
void FrameRegisterAllocator::Spill() { |
+ if (is_ssa_) return; |
+ |
for (int i = 0; i < stack_.length(); i++) { |
Register r = stack_[i]; |
registers_[r] = NULL; |
@@ -623,6 +643,8 @@ void FrameRegisterAllocator::Spill() { |
void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) { |
+ if (is_ssa_) return; |
+ |
for (int i = 0; i < stack_.length(); i++) { |
stub->Push(stack_[i]); |
} |