Index: src/compiler/move-optimizer.cc |
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc |
index 330f32f65d584fba17d082f3d72924c9d97a6f6c..74fcabe161aec71c761b05e1218aa30475f1ce63 100644 |
--- a/src/compiler/move-optimizer.cc |
+++ b/src/compiler/move-optimizer.cc |
@@ -8,69 +8,70 @@ namespace v8 { |
namespace internal { |
namespace compiler { |
+namespace { |
+ |
+struct MoveKey { |
Benedikt Meurer
2015/01/28 05:18:51
Once the issues mentioned below are addressed, how
dcarney
2015/02/02 09:23:33
Done.
|
+ InstructionOperand src; |
+ InstructionOperand dst; |
+}; |
+ |
+struct OperandLess { |
Benedikt Meurer
2015/01/28 05:18:51
Can you add an operator< (and operator>) to Instru
dcarney
2015/02/02 09:23:33
now that it's 64 bits, it might be a little wierd,
|
+ bool operator()(const InstructionOperand a, |
+ const InstructionOperand b) const { |
+ if (a.kind() == b.kind()) return a.index() < b.index(); |
+ return a.kind() < b.kind(); |
+ } |
+}; |
+ |
+struct MoveKeyLess { |
+ bool operator()(const MoveKey& a, const MoveKey& b) const { |
+ OperandLess operand_less; |
+ if (a.src.Equals(&b.src)) return operand_less(a.dst, b.dst); |
+ return operand_less(a.src, b.src); |
+ } |
+}; |
+ |
+typedef std::set<InstructionOperand, std::less<InstructionOperand>, |
Benedikt Meurer
2015/01/28 05:18:51
I guess this shouldn't be std::less<InstructionOpe
dcarney
2015/02/02 09:23:33
Done.
|
+ zone_allocator<InstructionOperand>> OperandSetSuper; |
+typedef std::map<MoveKey, unsigned, MoveKeyLess, |
Benedikt Meurer
2015/01/28 05:18:51
How about adding ZoneSet and ZoneMap to zone-conta
dcarney
2015/02/02 09:23:33
Done.
|
+ zone_allocator<std::pair<const MoveKey, unsigned>>> |
+ MoveMapSuper; |
+ |
+ |
+class MoveMap : public MoveMapSuper { |
+ public: |
+ explicit MoveMap(Zone* zone) |
+ : MoveMapSuper(key_compare(), allocator_type(zone)) {} |
+}; |
+ |
+ |
+class OperandSet : public OperandSetSuper { |
+ public: |
+ explicit OperandSet(Zone* zone) |
+ : OperandSetSuper(key_compare(), allocator_type(zone)) {} |
+}; |
+ |
+} // namespace |
+ |
+ |
MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) |
: local_zone_(local_zone), |
code_(code), |
+ to_finalize_(local_zone), |
temp_vector_0_(local_zone), |
temp_vector_1_(local_zone) {} |
void MoveOptimizer::Run() { |
- // First smash all consecutive moves into the left most move slot. |
for (auto* block : code()->instruction_blocks()) { |
- GapInstruction* prev_gap = nullptr; |
- for (int index = block->code_start(); index < block->code_end(); ++index) { |
- auto instr = code()->instructions()[index]; |
- if (!instr->IsGapMoves()) { |
- if (instr->IsSourcePosition() || instr->IsNop()) continue; |
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap); |
- prev_gap = nullptr; |
- continue; |
- } |
- auto gap = GapInstruction::cast(instr); |
- // Find first non-empty slot. |
- int i = GapInstruction::FIRST_INNER_POSITION; |
- for (; i <= GapInstruction::LAST_INNER_POSITION; i++) { |
- auto move = gap->parallel_moves()[i]; |
- if (move == nullptr) continue; |
- auto move_ops = move->move_operands(); |
- auto op = move_ops->begin(); |
- for (; op != move_ops->end(); ++op) { |
- if (!op->IsRedundant()) break; |
- } |
- if (op == move_ops->end()) { |
- move_ops->Rewind(0); // Clear this redundant move. |
- } else { |
- break; // Found index of first non-redundant move. |
- } |
- } |
- // Nothing to do here. |
- if (i == GapInstruction::LAST_INNER_POSITION + 1) { |
- if (prev_gap != nullptr) { |
- // Slide prev_gap down so we always know where to look for it. |
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]); |
- prev_gap = gap; |
- } |
- continue; |
- } |
- // Move the first non-empty gap to position 0. |
- std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]); |
- auto left = gap->parallel_moves()[0]; |
- // Compress everything into position 0. |
- for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) { |
- auto move = gap->parallel_moves()[i]; |
- if (move == nullptr) continue; |
- CompressMoves(&temp_vector_0_, left, move); |
- } |
- if (prev_gap != nullptr) { |
- // Smash left into prev_gap, killing left. |
- auto pred_moves = prev_gap->parallel_moves()[0]; |
- CompressMoves(&temp_vector_0_, pred_moves, left); |
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]); |
- } |
- prev_gap = gap; |
- } |
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap); |
+ CompressBlock(block); |
+ } |
+ for (auto block : code()->instruction_blocks()) { |
+ if (block->PredecessorCount() <= 1) continue; |
+ OptimizeMerge(block); |
+ } |
+ for (auto gap : to_finalize_) { |
+ FinalizeMoves(gap); |
} |
} |
@@ -106,21 +107,21 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, |
ParallelMove* right) { |
DCHECK(eliminated->empty()); |
auto move_ops = right->move_operands(); |
- // Modify the right moves in place and collect moves that will be killed by |
- // merging the two gaps. |
- for (auto op = move_ops->begin(); op != move_ops->end(); ++op) { |
- if (op->IsRedundant()) continue; |
- MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone()); |
- if (to_eliminate != nullptr) { |
- eliminated->push_back(to_eliminate); |
+ if (!left->move_operands()->is_empty()) { |
+ // Modify the right moves in place and collect moves that will be killed by |
+ // merging the two gaps. |
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) { |
+ if (op->IsRedundant()) continue; |
+ MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone()); |
+ if (to_eliminate != nullptr) eliminated->push_back(to_eliminate); |
} |
+ // Eliminate dead moves. Must happen before insertion of new moves as the |
+ // contents of eliminated are pointers into a list. |
+ for (auto to_eliminate : *eliminated) { |
+ to_eliminate->Eliminate(); |
+ } |
+ eliminated->clear(); |
} |
- // Eliminate dead moves. Must happen before insertion of new moves as the |
- // contents of eliminated are pointers into a list. |
- for (auto to_eliminate : *eliminated) { |
- to_eliminate->Eliminate(); |
- } |
- eliminated->clear(); |
// Add all possibly modified moves from right side. |
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) { |
if (op->IsRedundant()) continue; |
@@ -131,13 +132,185 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, |
} |
-void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves, |
- GapInstruction* gap) { |
- DCHECK(loads->empty()); |
- DCHECK(new_moves->empty()); |
- if (gap == nullptr) return; |
- // Split multiple loads of the same constant or stack slot off into the second |
- // slot and keep remaining moves in the first slot. |
+static bool GapsCanMoveOver(Instruction* instr) { |
Benedikt Meurer
2015/01/28 05:18:51
Nit: use anonymous namespace instead of static.
|
+ DCHECK(!instr->IsGapMoves()); |
+ return instr->IsSourcePosition() || instr->IsNop(); |
+} |
+ |
+ |
+static int FindFirstNonEmptySlot(GapInstruction* gap) { |
Benedikt Meurer
2015/01/28 05:18:51
Nit: use anonymous namespace instead of static.
dcarney
2015/02/02 09:23:33
Done.
|
+ int i = GapInstruction::FIRST_INNER_POSITION; |
+ for (; i <= GapInstruction::LAST_INNER_POSITION; i++) { |
+ auto move = gap->parallel_moves()[i]; |
+ if (move == nullptr) continue; |
+ auto move_ops = move->move_operands(); |
+ auto op = move_ops->begin(); |
+ for (; op != move_ops->end(); ++op) { |
+ if (!op->IsRedundant()) break; |
+ op->Eliminate(); |
+ } |
+ if (op != move_ops->end()) break; // Found non-redundant move. |
+ move_ops->Rewind(0); // Clear this redundant move. |
+ } |
+ return i; |
+} |
+ |
+ |
+// Smash all consecutive moves into the left most move slot and accumulate them |
+// as much as possible across instructions. |
+void MoveOptimizer::CompressBlock(InstructionBlock* block) { |
+ auto temp_vector = temp_vector_0(); |
+ DCHECK(temp_vector.empty()); |
+ GapInstruction* prev_gap = nullptr; |
+ for (int index = block->code_start(); index < block->code_end(); ++index) { |
+ auto instr = code()->instructions()[index]; |
+ if (!instr->IsGapMoves()) { |
+ if (GapsCanMoveOver(instr)) continue; |
+ if (prev_gap != nullptr) to_finalize_.push_back(prev_gap); |
+ prev_gap = nullptr; |
+ continue; |
+ } |
+ auto gap = GapInstruction::cast(instr); |
+ int i = FindFirstNonEmptySlot(gap); |
+ // Nothing to do here. |
+ if (i == GapInstruction::LAST_INNER_POSITION + 1) { |
+ if (prev_gap != nullptr) { |
+ // Slide prev_gap down so we always know where to look for it. |
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]); |
+ prev_gap = gap; |
+ } |
+ continue; |
+ } |
+ // Move the first non-empty gap to position 0. |
+ std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]); |
+ auto left = gap->parallel_moves()[0]; |
+ // Compress everything into position 0. |
+ for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) { |
+ auto move = gap->parallel_moves()[i]; |
+ if (move == nullptr) continue; |
+ CompressMoves(&temp_vector, left, move); |
+ } |
+ if (prev_gap != nullptr) { |
+ // Smash left into prev_gap, killing left. |
+ auto pred_moves = prev_gap->parallel_moves()[0]; |
+ CompressMoves(&temp_vector, pred_moves, left); |
+ // Slide prev_gap down so we always know where to look for it. |
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]); |
+ } |
+ prev_gap = gap; |
+ } |
+ if (prev_gap != nullptr) to_finalize_.push_back(prev_gap); |
+} |
+ |
+ |
+GapInstruction* MoveOptimizer::LastGap(InstructionBlock* block) { |
+ int gap_index = block->last_instruction_index() - 1; |
+ auto instr = code()->instructions()[gap_index]; |
+ return GapInstruction::cast(instr); |
+} |
+ |
+ |
+void MoveOptimizer::OptimizeMerge(InstructionBlock* block) { |
+ DCHECK(block->PredecessorCount() > 1); |
+ // Ensure that the last instruction in all incoming blocks don't contain |
+ // things that would prevent moving gap moves across them. |
+ for (auto pred_index : block->predecessors()) { |
+ auto pred = code()->InstructionBlockAt(pred_index); |
+ auto last_instr = code()->instructions()[pred->last_instruction_index()]; |
+ DCHECK(!last_instr->IsGapMoves()); |
+ if (last_instr->IsSourcePosition()) continue; |
+ if (last_instr->IsCall()) return; |
+ if (last_instr->TempCount() != 0) return; |
+ if (last_instr->OutputCount() != 0) return; |
+ for (size_t i = 0; i < last_instr->InputCount(); ++i) { |
+ auto op = last_instr->InputAt(i); |
+ if (!op->IsConstant() && !op->IsImmediate()) return; |
+ } |
+ } |
+ // TODO(dcarney): pass a ZonePool down for this? |
+ MoveMap move_map(local_zone()); |
+ size_t correct_counts = 0; |
+ // Accumulate set of shared moves. |
+ for (auto pred_index : block->predecessors()) { |
+ auto pred = code()->InstructionBlockAt(pred_index); |
+ auto gap = LastGap(pred); |
+ if (gap->parallel_moves()[0] == nullptr || |
+ gap->parallel_moves()[0]->move_operands()->is_empty()) { |
+ return; |
+ } |
+ auto move_ops = gap->parallel_moves()[0]->move_operands(); |
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) { |
+ if (op->IsRedundant()) continue; |
+ auto src = *op->source(); |
+ auto dst = *op->destination(); |
+ MoveKey key = {src, dst}; |
+ auto res = move_map.insert(std::make_pair(key, 1)); |
+ if (!res.second) { |
+ res.first->second++; |
+ if (res.first->second == block->PredecessorCount()) { |
+ correct_counts++; |
+ } |
+ } |
+ } |
+ } |
+ if (move_map.empty() || correct_counts != move_map.size()) return; |
+ // Find insertion point. |
+ GapInstruction* gap = nullptr; |
+ for (int i = block->first_instruction_index(); |
+ i <= block->last_instruction_index(); ++i) { |
+ auto instr = code()->instructions()[i]; |
+ if (instr->IsGapMoves()) { |
+ gap = GapInstruction::cast(instr); |
+ continue; |
+ } |
+ if (!GapsCanMoveOver(instr)) break; |
+ } |
+ DCHECK(gap != nullptr); |
+ bool gap_initialized = true; |
+ if (gap->parallel_moves()[0] == nullptr || |
+ gap->parallel_moves()[0]->move_operands()->is_empty()) { |
+ to_finalize_.push_back(gap); |
+ } else { |
+ // Will compress after insertion. |
+ gap_initialized = false; |
+ std::swap(gap->parallel_moves()[0], gap->parallel_moves()[1]); |
+ } |
+ auto move = gap->GetOrCreateParallelMove( |
+ static_cast<GapInstruction::InnerPosition>(0), code_zone()); |
+ // Delete relevant entries in predecessors and move everything to block. |
+ bool first_iteration = true; |
+ for (auto pred_index : block->predecessors()) { |
+ auto pred = code()->InstructionBlockAt(pred_index); |
+ auto gap = LastGap(pred); |
+ auto move_ops = gap->parallel_moves()[0]->move_operands(); |
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) { |
+ if (op->IsRedundant()) continue; |
+ MoveKey key = {*op->source(), *op->destination()}; |
+ auto it = move_map.find(key); |
+ DCHECK(it != move_map.end()); |
+ if (first_iteration) { |
+ move->AddMove(op->source(), op->destination(), code_zone()); |
+ } |
+ op->Eliminate(); |
+ } |
+ first_iteration = false; |
+ } |
+ // Compress. |
+ if (!gap_initialized) { |
+ MoveOpVector temp_vector(local_zone()); |
+ CompressMoves(&temp_vector, gap->parallel_moves()[0], |
+ gap->parallel_moves()[1]); |
+ } |
+} |
+ |
+ |
+// Split multiple loads of the same constant or stack slot off into the second |
+// slot and keep remaining moves in the first slot. |
+void MoveOptimizer::FinalizeMoves(GapInstruction* gap) { |
+ auto loads = temp_vector_0(); |
+ DCHECK(loads.empty()); |
+ auto new_moves = temp_vector_1(); |
+ DCHECK(new_moves.empty()); |
auto move_ops = gap->parallel_moves()[0]->move_operands(); |
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) { |
if (move->IsRedundant()) { |
@@ -149,7 +322,7 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves, |
continue; |
// Search for existing move to this slot. |
MoveOperands* found = nullptr; |
- for (auto load : *loads) { |
+ for (auto load : loads) { |
if (load->source()->Equals(move->source())) { |
found = load; |
break; |
@@ -157,7 +330,7 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves, |
} |
// Not found so insert. |
if (found == nullptr) { |
- loads->push_back(move); |
+ loads.push_back(move); |
// Replace source with copy for later use. |
auto dest = move->destination(); |
move->set_destination(new (code_zone()) |
@@ -180,24 +353,24 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves, |
} |
// move from load destination. |
move->set_source(found->destination()); |
- new_moves->push_back(move); |
+ new_moves.push_back(move); |
} |
- loads->clear(); |
- if (new_moves->empty()) return; |
+ loads.clear(); |
+ if (new_moves.empty()) return; |
// Insert all new moves into slot 1. |
auto slot_1 = gap->GetOrCreateParallelMove( |
static_cast<GapInstruction::InnerPosition>(1), code_zone()); |
DCHECK(slot_1->move_operands()->is_empty()); |
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr), |
- static_cast<int>(new_moves->size()), |
+ static_cast<int>(new_moves.size()), |
code_zone()); |
auto it = slot_1->move_operands()->begin(); |
- for (auto new_move : *new_moves) { |
+ for (auto new_move : new_moves) { |
std::swap(*new_move, *it); |
++it; |
} |
DCHECK_EQ(it, slot_1->move_operands()->end()); |
- new_moves->clear(); |
+ new_moves.clear(); |
} |
} // namespace compiler |