Index: runtime/vm/flow_graph_compiler_x64.cc |
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc |
index bfc547d799e1f43bf1596939da941407ebf53a44..86ae510d6661aca3092ece3c7625cdeec12b836e 100644 |
--- a/runtime/vm/flow_graph_compiler_x64.cc |
+++ b/runtime/vm/flow_graph_compiler_x64.cc |
@@ -28,11 +28,34 @@ void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler) { |
#define __ assem-> |
__ Comment("Deopt stub for id %d", deopt_id_); |
__ Bind(entry_label()); |
- for (intptr_t i = 0; i < registers_.length(); i++) { |
- if (registers_[i] != kNoRegister) { |
- __ pushq(registers_[i]); |
+ |
+ if (deoptimization_env_ == NULL) { |
+ for (intptr_t i = 0; i < registers_.length(); i++) { |
+ if (registers_[i] != kNoRegister) { |
+ __ pushq(registers_[i]); |
+ } |
+ } |
+ } else { |
+ // We have a deoptimization environment, we have to tear down optimized |
+ // frame and recreate non-optimized one. |
+ __ leaq(RSP, |
+ Address(RBP, ParsedFunction::kFirstLocalSlotIndex * kWordSize)); |
srdjan
2012/07/10 23:27:54
How do you handle incoming parameters and incoming
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Positional parameters are in the environment as we
|
+ |
+ const ZoneGrowableArray<Value*>& values = deoptimization_env_->values(); |
+ const GrowableArray<Location>* locations = deoptimization_env_->locations(); |
+ |
+ for (intptr_t i = 0; i < values.length(); i++) { |
+ Location loc = (*locations)[i]; |
+ if (loc.IsInvalid()) { |
+ ASSERT(values[i]->IsConstant()); |
+ __ PushObject(values[i]->AsConstant()->value()); |
+ } else { |
+ ASSERT(loc.IsRegister()); |
+ __ pushq(loc.reg()); |
+ } |
} |
} |
+ |
if (compiler->IsLeaf()) { |
Label L; |
__ call(&L); |
@@ -1080,6 +1103,164 @@ void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, |
} |
+ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler) |
+ : compiler_(compiler), moves_(32) {} |
srdjan
2012/07/10 23:27:54
Some methods should be shared, I guess.
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Yes this is the intent. Done when porting to ia32.
|
+ |
+ |
+void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) { |
+ ASSERT(moves_.is_empty()); |
+ // Build up a worklist of moves. |
+ BuildInitialMoveList(parallel_move); |
+ |
+ for (int i = 0; i < moves_.length(); ++i) { |
+ MoveOperands move = moves_[i]; |
+ // Skip constants to perform them last. They don't block other moves |
+ // and skipping such moves with register destinations keeps those |
+ // registers free for the whole algorithm. |
+ if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); |
+ } |
+ |
+ // Perform the moves with constant sources. |
+ for (int i = 0; i < moves_.length(); ++i) { |
+ if (!moves_[i].IsEliminated()) { |
+ ASSERT(moves_[i].src().IsConstant()); |
+ EmitMove(i); |
+ } |
+ } |
+ |
+ moves_.Clear(); |
+} |
+ |
+ |
+void ParallelMoveResolver::BuildInitialMoveList( |
+ ParallelMoveInstr* parallel_move) { |
srdjan
2012/07/10 23:27:54
indent 4 spaces
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Done.
|
+ // Perform a linear sweep of the moves to add them to the initial list of |
+ // moves to perform, ignoring any move that is redundant (the source is |
+ // the same as the destination, the destination is ignored and |
+ // unallocated, or the move was already eliminated). |
+ const GrowableArray<MoveOperands>& moves = parallel_move->moves(); |
+ for (int i = 0; i < moves.length(); ++i) { |
+ MoveOperands move = moves[i]; |
+ if (!move.IsRedundant()) moves_.Add(move); |
+ } |
+} |
+ |
+ |
+void ParallelMoveResolver::PerformMove(int index) { |
+ // Each call to this function performs a move and deletes it from the move |
+ // graph. We first recursively perform any move blocking this one. We |
+ // mark a move as "pending" on entry to PerformMove in order to detect |
+ // cycles in the move graph. We use operand swaps to resolve cycles, |
+ // which means that a call to PerformMove could change any source operand |
+ // in the move graph. |
+ |
+ ASSERT(!moves_[index].IsPending()); |
+ ASSERT(!moves_[index].IsRedundant()); |
+ |
+ // Clear this move's destination to indicate a pending move. The actual |
+ // destination is saved in a stack-allocated local. Recursion may allow |
+ // multiple moves to be pending. |
+ ASSERT(!moves_[index].src().IsInvalid()); |
+ Location destination = moves_[index].MarkPending(); |
+ |
+ // Perform a depth-first traversal of the move graph to resolve |
+ // dependencies. Any unperformed, unpending move with a source the same |
+ // as this one's destination blocks this one so recursively perform all |
+ // such moves. |
+ for (int i = 0; i < moves_.length(); ++i) { |
+ MoveOperands other_move = moves_[i]; |
+ if (other_move.Blocks(destination) && !other_move.IsPending()) { |
+ // Though PerformMove can change any source operand in the move graph, |
+ // this call cannot create a blocking move via a swap (this loop does |
+ // not miss any). Assume there is a non-blocking move with source A |
+ // and this move is blocked on source B and there is a swap of A and |
+ // B. Then A and B must be involved in the same cycle (or they would |
+ // not be swapped). Since this move's destination is B and there is |
+ // only a single incoming edge to an operand, this move must also be |
+ // involved in the same cycle. In that case, the blocking move will |
+ // be created but will be "pending" when we return from PerformMove. |
+ PerformMove(i); |
+ } |
+ } |
+ |
+ // We are about to resolve this move and don't need it marked as |
+ // pending, so restore its destination. |
+ moves_[index].ClearPending(destination); |
+ |
+ // This move's source may have changed due to swaps to resolve cycles and |
+ // so it may now be the last move in the cycle. If so remove it. |
+ if (moves_[index].src().Equals(destination)) { |
+ moves_[index].Eliminate(); |
+ return; |
+ } |
+ |
+ // The move may be blocked on a (at most one) pending move, in which case |
+ // we have a cycle. Search for such a blocking move and perform a swap to |
+ // resolve it. |
+ for (int i = 0; i < moves_.length(); ++i) { |
+ MoveOperands other_move = moves_[i]; |
+ if (other_move.Blocks(destination)) { |
+ ASSERT(other_move.IsPending()); |
+ EmitSwap(index); |
+ return; |
+ } |
+ } |
+ |
+ // This move is not blocked. |
+ EmitMove(index); |
+} |
+ |
+#undef __ |
+ |
+#undef __ |
srdjan
2012/07/10 23:27:54
One undef too many.
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Done.
|
+#define __ compiler_->assembler()-> |
+ |
+void ParallelMoveResolver::EmitMove(int index) { |
+ Location source = moves_[index].src(); |
+ Location destination = moves_[index].dest(); |
+ |
+ ASSERT(destination.IsRegister()); |
+ if (source.IsRegister()) { |
+ __ movq(destination.reg(), source.reg()); |
srdjan
2012/07/10 23:27:54
indent
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Done.
|
+ } else { |
+ ASSERT(source.IsConstant()); |
+ const Object& value = source.constant(); |
+ if (value.IsSmi()) { |
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
This is apparently not needed assembler does the s
|
+ int64_t imm = reinterpret_cast<int64_t>(value.raw()); |
+ __ movq(destination.reg(), Immediate(imm)); |
+ } else { |
+ __ LoadObject(destination.reg(), value); |
+ } |
+ } |
+ moves_[index].Eliminate(); |
+} |
+ |
+ |
+void ParallelMoveResolver::EmitSwap(int index) { |
+ Location source = moves_[index].src(); |
+ Location destination = moves_[index].dest(); |
+ |
+ ASSERT(source.IsRegister() && destination.IsRegister()); |
+ __ xchgq(destination.reg(), source.reg()); |
+ |
+ // The swap of source and destination has executed a move from source to |
+ // destination. |
+ moves_[index].Eliminate(); |
+ |
+ // Any unperformed (including pending) move with a source of either |
+ // this move's source or destination needs to have their source |
+ // changed to reflect the state of affairs after the swap. |
+ for (int i = 0; i < moves_.length(); ++i) { |
+ MoveOperands other_move = moves_[i]; |
+ if (other_move.Blocks(source)) { |
+ moves_[i].set_src(destination); |
+ } else if (other_move.Blocks(destination)) { |
+ moves_[i].set_src(source); |
+ } |
+ } |
+} |
+ |
+ |
#undef __ |
} // namespace dart |