OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/gap-resolver.h" |
| 6 |
| 7 #include <algorithm> |
| 8 #include <functional> |
| 9 #include <set> |
| 10 |
| 11 namespace v8 { |
| 12 namespace internal { |
| 13 namespace compiler { |
| 14 |
| 15 typedef ZoneList<MoveOperands>::iterator op_iterator; |
| 16 |
| 17 #ifdef ENABLE_SLOW_ASSERTS |
| 18 // TODO(svenpanne) Brush up InstructionOperand with comparison? |
| 19 struct InstructionOperandComparator { |
| 20 bool operator()(const InstructionOperand* x, const InstructionOperand* y) { |
| 21 return (x->kind() < y->kind()) || |
| 22 (x->kind() == y->kind() && x->index() < y->index()); |
| 23 } |
| 24 }; |
| 25 #endif |
| 26 |
| 27 // No operand should be the destination for more than one move. |
| 28 static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) { |
| 29 #ifdef ENABLE_SLOW_ASSERTS |
| 30 std::set<InstructionOperand*, InstructionOperandComparator> seen; |
| 31 for (op_iterator i = moves->begin(); i != moves->end(); ++i) { |
| 32 SLOW_ASSERT(seen.find(i->destination()) == seen.end()); |
| 33 seen.insert(i->destination()); |
| 34 } |
| 35 #endif |
| 36 } |
| 37 |
| 38 |
| 39 void GapResolver::Resolve(ParallelMove* parallel_move) const { |
| 40 ZoneList<MoveOperands>* moves = parallel_move->move_operands(); |
| 41 // TODO(svenpanne) Use the member version of remove_if when we use real lists. |
| 42 op_iterator end = |
| 43 std::remove_if(moves->begin(), moves->end(), |
| 44 std::mem_fun_ref(&MoveOperands::IsRedundant)); |
| 45 moves->Rewind(static_cast<int>(end - moves->begin())); |
| 46 |
| 47 VerifyMovesAreInjective(moves); |
| 48 |
| 49 for (op_iterator move = moves->begin(); move != moves->end(); ++move) { |
| 50 if (!move->IsEliminated()) PerformMove(moves, &*move); |
| 51 } |
| 52 } |
| 53 |
| 54 |
| 55 void GapResolver::PerformMove(ZoneList<MoveOperands>* moves, |
| 56 MoveOperands* move) const { |
| 57 // Each call to this function performs a move and deletes it from the move |
| 58 // graph. We first recursively perform any move blocking this one. We mark a |
| 59 // move as "pending" on entry to PerformMove in order to detect cycles in the |
| 60 // move graph. We use operand swaps to resolve cycles, which means that a |
| 61 // call to PerformMove could change any source operand in the move graph. |
| 62 ASSERT(!move->IsPending()); |
| 63 ASSERT(!move->IsRedundant()); |
| 64 |
| 65 // Clear this move's destination to indicate a pending move. The actual |
| 66 // destination is saved on the side. |
| 67 ASSERT_NOT_NULL(move->source()); // Or else it will look eliminated. |
| 68 InstructionOperand* destination = move->destination(); |
| 69 move->set_destination(NULL); |
| 70 |
| 71 // Perform a depth-first traversal of the move graph to resolve dependencies. |
| 72 // Any unperformed, unpending move with a source the same as this one's |
| 73 // destination blocks this one so recursively perform all such moves. |
| 74 for (op_iterator other = moves->begin(); other != moves->end(); ++other) { |
| 75 if (other->Blocks(destination) && !other->IsPending()) { |
| 76 // Though PerformMove can change any source operand in the move graph, |
| 77 // this call cannot create a blocking move via a swap (this loop does not |
| 78 // miss any). Assume there is a non-blocking move with source A and this |
| 79 // move is blocked on source B and there is a swap of A and B. Then A and |
| 80 // B must be involved in the same cycle (or they would not be swapped). |
| 81 // Since this move's destination is B and there is only a single incoming |
| 82 // edge to an operand, this move must also be involved in the same cycle. |
| 83 // In that case, the blocking move will be created but will be "pending" |
| 84 // when we return from PerformMove. |
| 85 PerformMove(moves, other); |
| 86 } |
| 87 } |
| 88 |
| 89 // We are about to resolve this move and don't need it marked as pending, so |
| 90 // restore its destination. |
| 91 move->set_destination(destination); |
| 92 |
| 93 // This move's source may have changed due to swaps to resolve cycles and so |
| 94 // it may now be the last move in the cycle. If so remove it. |
| 95 InstructionOperand* source = move->source(); |
| 96 if (source->Equals(destination)) { |
| 97 move->Eliminate(); |
| 98 return; |
| 99 } |
| 100 |
| 101 // The move may be blocked on a (at most one) pending move, in which case we |
| 102 // have a cycle. Search for such a blocking move and perform a swap to |
| 103 // resolve it. |
| 104 op_iterator blocker = std::find_if( |
| 105 moves->begin(), moves->end(), |
| 106 std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination)); |
| 107 if (blocker == moves->end()) { |
| 108 // The easy case: This move is not blocked. |
| 109 assembler_->AssembleMove(source, destination); |
| 110 move->Eliminate(); |
| 111 return; |
| 112 } |
| 113 |
| 114 ASSERT(blocker->IsPending()); |
| 115 // Ensure source is a register or both are stack slots, to limit swap cases. |
| 116 if (source->IsStackSlot() || source->IsDoubleStackSlot()) { |
| 117 std::swap(source, destination); |
| 118 } |
| 119 assembler_->AssembleSwap(source, destination); |
| 120 move->Eliminate(); |
| 121 |
| 122 // Any unperformed (including pending) move with a source of either this |
| 123 // move's source or destination needs to have their source changed to |
| 124 // reflect the state of affairs after the swap. |
| 125 for (op_iterator other = moves->begin(); other != moves->end(); ++other) { |
| 126 if (other->Blocks(source)) { |
| 127 other->set_source(destination); |
| 128 } else if (other->Blocks(destination)) { |
| 129 other->set_source(source); |
| 130 } |
| 131 } |
| 132 } |
| 133 } |
| 134 } |
| 135 } // namespace v8::internal::compiler |
OLD | NEW |