| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/move-optimizer.h" | 5 #include "src/compiler/move-optimizer.h" |
| 6 | 6 |
| 7 namespace v8 { | 7 namespace v8 { |
| 8 namespace internal { | 8 namespace internal { |
| 9 namespace compiler { | 9 namespace compiler { |
| 10 | 10 |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 58 if (operands.count(move->source()) > 0 || | 58 if (operands.count(move->source()) > 0 || |
| 59 operands.count(move->destination()) > 0) { | 59 operands.count(move->destination()) > 0) { |
| 60 return false; | 60 return false; |
| 61 } | 61 } |
| 62 } | 62 } |
| 63 } | 63 } |
| 64 return true; | 64 return true; |
| 65 } | 65 } |
| 66 | 66 |
| 67 | 67 |
| 68 int FindFirstNonEmptySlot(Instruction* instr) { | 68 int FindFirstNonEmptySlot(const Instruction* instr) { |
| 69 int i = Instruction::FIRST_GAP_POSITION; | 69 int i = Instruction::FIRST_GAP_POSITION; |
| 70 for (; i <= Instruction::LAST_GAP_POSITION; i++) { | 70 for (; i <= Instruction::LAST_GAP_POSITION; i++) { |
| 71 auto moves = instr->parallel_moves()[i]; | 71 ParallelMove* moves = instr->parallel_moves()[i]; |
| 72 if (moves == nullptr) continue; | 72 if (moves == nullptr) continue; |
| 73 for (auto move : *moves) { | 73 for (MoveOperands* move : *moves) { |
| 74 if (!move->IsRedundant()) return i; | 74 if (!move->IsRedundant()) return i; |
| 75 move->Eliminate(); | 75 move->Eliminate(); |
| 76 } | 76 } |
| 77 moves->clear(); // Clear this redundant move. | 77 moves->clear(); // Clear this redundant move. |
| 78 } | 78 } |
| 79 return i; | 79 return i; |
| 80 } | 80 } |
| 81 | 81 |
| 82 } // namespace | 82 } // namespace |
| 83 | 83 |
| 84 | 84 |
| 85 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) | 85 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) |
| 86 : local_zone_(local_zone), | 86 : local_zone_(local_zone), |
| 87 code_(code), | 87 code_(code), |
| 88 to_finalize_(local_zone), | 88 to_finalize_(local_zone), |
| 89 temp_vector_0_(local_zone), | 89 temp_vector_0_(local_zone), |
| 90 temp_vector_1_(local_zone) {} | 90 temp_vector_1_(local_zone) {} |
| 91 | 91 |
| 92 | 92 |
| 93 void MoveOptimizer::Run() { | 93 void MoveOptimizer::Run() { |
| 94 for (InstructionBlock* block : code()->instruction_blocks()) { | 94 for (InstructionBlock* block : code()->instruction_blocks()) { |
| 95 CompressBlock(block); | 95 CompressBlock(block); |
| 96 } | 96 } |
| 97 for (InstructionBlock* block : code()->instruction_blocks()) { | 97 for (InstructionBlock* block : code()->instruction_blocks()) { |
| 98 if (block->PredecessorCount() <= 1) continue; | 98 if (block->PredecessorCount() <= 1) continue; |
| 99 if (!block->IsDeferred()) { | 99 if (!block->IsDeferred()) { |
| 100 bool has_only_deferred = true; | 100 bool has_only_deferred = true; |
| 101 for (RpoNumber pred_id : block->predecessors()) { | 101 for (RpoNumber& pred_id : block->predecessors()) { |
| 102 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) { | 102 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) { |
| 103 has_only_deferred = false; | 103 has_only_deferred = false; |
| 104 break; | 104 break; |
| 105 } | 105 } |
| 106 } | 106 } |
| 107 // This would pull down common moves. If the moves occur in deferred | 107 // This would pull down common moves. If the moves occur in deferred |
| 108 // blocks, and the closest common successor is not deferred, we lose the | 108 // blocks, and the closest common successor is not deferred, we lose the |
| 109 // optimization of just spilling/filling in deferred blocks, when the | 109 // optimization of just spilling/filling in deferred blocks, when the |
| 110 // current block is not deferred. | 110 // current block is not deferred. |
| 111 if (has_only_deferred) continue; | 111 if (has_only_deferred) continue; |
| 112 } | 112 } |
| 113 OptimizeMerge(block); | 113 OptimizeMerge(block); |
| 114 } | 114 } |
| 115 for (auto gap : to_finalize_) { | 115 for (Instruction* gap : to_finalize_) { |
| 116 FinalizeMoves(gap); | 116 FinalizeMoves(gap); |
| 117 } | 117 } |
| 118 } | 118 } |
| 119 | 119 |
| 120 | 120 |
| 121 void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, | 121 void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, |
| 122 ParallelMove* right) { | 122 ParallelMove* right) { |
| 123 DCHECK(eliminated->empty()); | 123 DCHECK(eliminated->empty()); |
| 124 if (!left->empty()) { | 124 if (!left->empty()) { |
| 125 // Modify the right moves in place and collect moves that will be killed by | 125 // Modify the right moves in place and collect moves that will be killed by |
| 126 // merging the two gaps. | 126 // merging the two gaps. |
| 127 for (auto move : *right) { | 127 for (MoveOperands* move : *right) { |
| 128 if (move->IsRedundant()) continue; | 128 if (move->IsRedundant()) continue; |
| 129 auto to_eliminate = left->PrepareInsertAfter(move); | 129 MoveOperands* to_eliminate = left->PrepareInsertAfter(move); |
| 130 if (to_eliminate != nullptr) eliminated->push_back(to_eliminate); | 130 if (to_eliminate != nullptr) eliminated->push_back(to_eliminate); |
| 131 } | 131 } |
| 132 // Eliminate dead moves. | 132 // Eliminate dead moves. |
| 133 for (auto to_eliminate : *eliminated) { | 133 for (MoveOperands* to_eliminate : *eliminated) { |
| 134 to_eliminate->Eliminate(); | 134 to_eliminate->Eliminate(); |
| 135 } | 135 } |
| 136 eliminated->clear(); | 136 eliminated->clear(); |
| 137 } | 137 } |
| 138 // Add all possibly modified moves from right side. | 138 // Add all possibly modified moves from right side. |
| 139 for (auto move : *right) { | 139 for (MoveOperands* move : *right) { |
| 140 if (move->IsRedundant()) continue; | 140 if (move->IsRedundant()) continue; |
| 141 left->push_back(move); | 141 left->push_back(move); |
| 142 } | 142 } |
| 143 // Nuke right. | 143 // Nuke right. |
| 144 right->clear(); | 144 right->clear(); |
| 145 } | 145 } |
| 146 | 146 |
| 147 | 147 |
| 148 // Smash all consecutive moves into the left most move slot and accumulate them | 148 // Smash all consecutive moves into the left most move slot and accumulate them |
| 149 // as much as possible across instructions. | 149 // as much as possible across instructions. |
| 150 void MoveOptimizer::CompressBlock(InstructionBlock* block) { | 150 void MoveOptimizer::CompressBlock(InstructionBlock* block) { |
| 151 auto temp_vector = temp_vector_0(); | 151 MoveOpVector& temp_vector = temp_vector_0(); |
| 152 DCHECK(temp_vector.empty()); | 152 DCHECK(temp_vector.empty()); |
| 153 Instruction* prev_instr = nullptr; | 153 Instruction* prev_instr = nullptr; |
| 154 for (int index = block->code_start(); index < block->code_end(); ++index) { | 154 for (int index = block->code_start(); index < block->code_end(); ++index) { |
| 155 auto instr = code()->instructions()[index]; | 155 Instruction* instr = code()->instructions()[index]; |
| 156 int i = FindFirstNonEmptySlot(instr); | 156 int i = FindFirstNonEmptySlot(instr); |
| 157 if (i <= Instruction::LAST_GAP_POSITION) { | 157 if (i <= Instruction::LAST_GAP_POSITION) { |
| 158 // Move the first non-empty gap to position 0. | 158 // Move the first non-empty gap to position 0. |
| 159 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]); | 159 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]); |
| 160 auto left = instr->parallel_moves()[0]; | 160 ParallelMove* left = instr->parallel_moves()[0]; |
| 161 // Compress everything into position 0. | 161 // Compress everything into position 0. |
| 162 for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) { | 162 for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) { |
| 163 auto move = instr->parallel_moves()[i]; | 163 ParallelMove* move = instr->parallel_moves()[i]; |
| 164 if (move == nullptr) continue; | 164 if (move == nullptr) continue; |
| 165 CompressMoves(&temp_vector, left, move); | 165 CompressMoves(&temp_vector, left, move); |
| 166 } | 166 } |
| 167 if (prev_instr != nullptr) { | 167 if (prev_instr != nullptr) { |
| 168 // Smash left into prev_instr, killing left. | 168 // Smash left into prev_instr, killing left. |
| 169 auto pred_moves = prev_instr->parallel_moves()[0]; | 169 ParallelMove* pred_moves = prev_instr->parallel_moves()[0]; |
| 170 CompressMoves(&temp_vector, pred_moves, left); | 170 CompressMoves(&temp_vector, pred_moves, left); |
| 171 } | 171 } |
| 172 } | 172 } |
| 173 if (prev_instr != nullptr) { | 173 if (prev_instr != nullptr) { |
| 174 // Slide prev_instr down so we always know where to look for it. | 174 // Slide prev_instr down so we always know where to look for it. |
| 175 std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]); | 175 std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]); |
| 176 } | 176 } |
| 177 prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr; | 177 prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr; |
| 178 if (GapsCanMoveOver(instr, local_zone())) continue; | 178 if (GapsCanMoveOver(instr, local_zone())) continue; |
| 179 if (prev_instr != nullptr) { | 179 if (prev_instr != nullptr) { |
| 180 to_finalize_.push_back(prev_instr); | 180 to_finalize_.push_back(prev_instr); |
| 181 prev_instr = nullptr; | 181 prev_instr = nullptr; |
| 182 } | 182 } |
| 183 } | 183 } |
| 184 if (prev_instr != nullptr) { | 184 if (prev_instr != nullptr) { |
| 185 to_finalize_.push_back(prev_instr); | 185 to_finalize_.push_back(prev_instr); |
| 186 } | 186 } |
| 187 } | 187 } |
| 188 | 188 |
| 189 | 189 |
| 190 Instruction* MoveOptimizer::LastInstruction(InstructionBlock* block) { | 190 const Instruction* MoveOptimizer::LastInstruction( |
| 191 const InstructionBlock* block) const { |
| 191 return code()->instructions()[block->last_instruction_index()]; | 192 return code()->instructions()[block->last_instruction_index()]; |
| 192 } | 193 } |
| 193 | 194 |
| 194 | 195 |
| 195 void MoveOptimizer::OptimizeMerge(InstructionBlock* block) { | 196 void MoveOptimizer::OptimizeMerge(InstructionBlock* block) { |
| 196 DCHECK(block->PredecessorCount() > 1); | 197 DCHECK(block->PredecessorCount() > 1); |
| 197 // Ensure that the last instruction in all incoming blocks don't contain | 198 // Ensure that the last instruction in all incoming blocks don't contain |
| 198 // things that would prevent moving gap moves across them. | 199 // things that would prevent moving gap moves across them. |
| 199 for (auto pred_index : block->predecessors()) { | 200 for (RpoNumber& pred_index : block->predecessors()) { |
| 200 auto pred = code()->InstructionBlockAt(pred_index); | 201 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); |
| 201 auto last_instr = code()->instructions()[pred->last_instruction_index()]; | 202 const Instruction* last_instr = |
| 203 code()->instructions()[pred->last_instruction_index()]; |
| 202 if (last_instr->IsCall()) return; | 204 if (last_instr->IsCall()) return; |
| 203 if (last_instr->TempCount() != 0) return; | 205 if (last_instr->TempCount() != 0) return; |
| 204 if (last_instr->OutputCount() != 0) return; | 206 if (last_instr->OutputCount() != 0) return; |
| 205 for (size_t i = 0; i < last_instr->InputCount(); ++i) { | 207 for (size_t i = 0; i < last_instr->InputCount(); ++i) { |
| 206 auto op = last_instr->InputAt(i); | 208 const InstructionOperand* op = last_instr->InputAt(i); |
| 207 if (!op->IsConstant() && !op->IsImmediate()) return; | 209 if (!op->IsConstant() && !op->IsImmediate()) return; |
| 208 } | 210 } |
| 209 } | 211 } |
| 210 // TODO(dcarney): pass a ZonePool down for this? | 212 // TODO(dcarney): pass a ZonePool down for this? |
| 211 MoveMap move_map(local_zone()); | 213 MoveMap move_map(local_zone()); |
| 212 size_t correct_counts = 0; | 214 size_t correct_counts = 0; |
| 213 // Accumulate set of shared moves. | 215 // Accumulate set of shared moves. |
| 214 for (auto pred_index : block->predecessors()) { | 216 for (RpoNumber& pred_index : block->predecessors()) { |
| 215 auto pred = code()->InstructionBlockAt(pred_index); | 217 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); |
| 216 auto instr = LastInstruction(pred); | 218 const Instruction* instr = LastInstruction(pred); |
| 217 if (instr->parallel_moves()[0] == nullptr || | 219 if (instr->parallel_moves()[0] == nullptr || |
| 218 instr->parallel_moves()[0]->empty()) { | 220 instr->parallel_moves()[0]->empty()) { |
| 219 return; | 221 return; |
| 220 } | 222 } |
| 221 for (auto move : *instr->parallel_moves()[0]) { | 223 for (const MoveOperands* move : *instr->parallel_moves()[0]) { |
| 222 if (move->IsRedundant()) continue; | 224 if (move->IsRedundant()) continue; |
| 223 auto src = move->source(); | 225 InstructionOperand src = move->source(); |
| 224 auto dst = move->destination(); | 226 InstructionOperand dst = move->destination(); |
| 225 MoveKey key = {src, dst}; | 227 MoveKey key = {src, dst}; |
| 226 auto res = move_map.insert(std::make_pair(key, 1)); | 228 auto res = move_map.insert(std::make_pair(key, 1)); |
| 227 if (!res.second) { | 229 if (!res.second) { |
| 228 res.first->second++; | 230 res.first->second++; |
| 229 if (res.first->second == block->PredecessorCount()) { | 231 if (res.first->second == block->PredecessorCount()) { |
| 230 correct_counts++; | 232 correct_counts++; |
| 231 } | 233 } |
| 232 } | 234 } |
| 233 } | 235 } |
| 234 } | 236 } |
| 235 if (move_map.empty() || correct_counts != move_map.size()) return; | 237 if (move_map.empty() || correct_counts != move_map.size()) return; |
| 236 // Find insertion point. | 238 // Find insertion point. |
| 237 Instruction* instr = nullptr; | 239 Instruction* instr = nullptr; |
| 238 for (int i = block->first_instruction_index(); | 240 for (int i = block->first_instruction_index(); |
| 239 i <= block->last_instruction_index(); ++i) { | 241 i <= block->last_instruction_index(); ++i) { |
| 240 instr = code()->instructions()[i]; | 242 instr = code()->instructions()[i]; |
| 241 if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant()) | 243 if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant()) |
| 242 break; | 244 break; |
| 243 } | 245 } |
| 244 DCHECK(instr != nullptr); | 246 DCHECK(instr != nullptr); |
| 245 bool gap_initialized = true; | 247 bool gap_initialized = true; |
| 246 if (instr->parallel_moves()[0] == nullptr || | 248 if (instr->parallel_moves()[0] == nullptr || |
| 247 instr->parallel_moves()[0]->empty()) { | 249 instr->parallel_moves()[0]->empty()) { |
| 248 to_finalize_.push_back(instr); | 250 to_finalize_.push_back(instr); |
| 249 } else { | 251 } else { |
| 250 // Will compress after insertion. | 252 // Will compress after insertion. |
| 251 gap_initialized = false; | 253 gap_initialized = false; |
| 252 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]); | 254 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]); |
| 253 } | 255 } |
| 254 auto moves = instr->GetOrCreateParallelMove( | 256 ParallelMove* moves = instr->GetOrCreateParallelMove( |
| 255 static_cast<Instruction::GapPosition>(0), code_zone()); | 257 static_cast<Instruction::GapPosition>(0), code_zone()); |
| 256 // Delete relevant entries in predecessors and move everything to block. | 258 // Delete relevant entries in predecessors and move everything to block. |
| 257 bool first_iteration = true; | 259 bool first_iteration = true; |
| 258 for (auto pred_index : block->predecessors()) { | 260 for (RpoNumber& pred_index : block->predecessors()) { |
| 259 auto pred = code()->InstructionBlockAt(pred_index); | 261 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); |
| 260 for (auto move : *LastInstruction(pred)->parallel_moves()[0]) { | 262 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) { |
| 261 if (move->IsRedundant()) continue; | 263 if (move->IsRedundant()) continue; |
| 262 MoveKey key = {move->source(), move->destination()}; | 264 MoveKey key = {move->source(), move->destination()}; |
| 263 auto it = move_map.find(key); | 265 auto it = move_map.find(key); |
| 264 USE(it); | 266 USE(it); |
| 265 DCHECK(it != move_map.end()); | 267 DCHECK(it != move_map.end()); |
| 266 if (first_iteration) { | 268 if (first_iteration) { |
| 267 moves->AddMove(move->source(), move->destination()); | 269 moves->AddMove(move->source(), move->destination()); |
| 268 } | 270 } |
| 269 move->Eliminate(); | 271 move->Eliminate(); |
| 270 } | 272 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 293 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; | 295 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; |
| 294 return a->destination().CompareCanonicalized(b->destination()); | 296 return a->destination().CompareCanonicalized(b->destination()); |
| 295 } | 297 } |
| 296 | 298 |
| 297 } // namespace | 299 } // namespace |
| 298 | 300 |
| 299 | 301 |
| 300 // Split multiple loads of the same constant or stack slot off into the second | 302 // Split multiple loads of the same constant or stack slot off into the second |
| 301 // slot and keep remaining moves in the first slot. | 303 // slot and keep remaining moves in the first slot. |
| 302 void MoveOptimizer::FinalizeMoves(Instruction* instr) { | 304 void MoveOptimizer::FinalizeMoves(Instruction* instr) { |
| 303 auto loads = temp_vector_0(); | 305 MoveOpVector& loads = temp_vector_0(); |
| 304 DCHECK(loads.empty()); | 306 DCHECK(loads.empty()); |
| 305 // Find all the loads. | 307 // Find all the loads. |
| 306 for (auto move : *instr->parallel_moves()[0]) { | 308 for (MoveOperands* move : *instr->parallel_moves()[0]) { |
| 307 if (move->IsRedundant()) continue; | 309 if (move->IsRedundant()) continue; |
| 308 if (move->source().IsConstant() || IsSlot(move->source())) { | 310 if (move->source().IsConstant() || IsSlot(move->source())) { |
| 309 loads.push_back(move); | 311 loads.push_back(move); |
| 310 } | 312 } |
| 311 } | 313 } |
| 312 if (loads.empty()) return; | 314 if (loads.empty()) return; |
| 313 // Group the loads by source, moving the preferred destination to the | 315 // Group the loads by source, moving the preferred destination to the |
| 314 // beginning of the group. | 316 // beginning of the group. |
| 315 std::sort(loads.begin(), loads.end(), LoadCompare); | 317 std::sort(loads.begin(), loads.end(), LoadCompare); |
| 316 MoveOperands* group_begin = nullptr; | 318 MoveOperands* group_begin = nullptr; |
| 317 for (auto load : loads) { | 319 for (MoveOperands* load : loads) { |
| 318 // New group. | 320 // New group. |
| 319 if (group_begin == nullptr || | 321 if (group_begin == nullptr || |
| 320 !load->source().EqualsCanonicalized(group_begin->source())) { | 322 !load->source().EqualsCanonicalized(group_begin->source())) { |
| 321 group_begin = load; | 323 group_begin = load; |
| 322 continue; | 324 continue; |
| 323 } | 325 } |
| 324 // Nothing to be gained from splitting here. | 326 // Nothing to be gained from splitting here. |
| 325 if (IsSlot(group_begin->destination())) continue; | 327 if (IsSlot(group_begin->destination())) continue; |
| 326 // Insert new move into slot 1. | 328 // Insert new move into slot 1. |
| 327 auto slot_1 = instr->GetOrCreateParallelMove( | 329 ParallelMove* slot_1 = instr->GetOrCreateParallelMove( |
| 328 static_cast<Instruction::GapPosition>(1), code_zone()); | 330 static_cast<Instruction::GapPosition>(1), code_zone()); |
| 329 slot_1->AddMove(group_begin->destination(), load->destination()); | 331 slot_1->AddMove(group_begin->destination(), load->destination()); |
| 330 load->Eliminate(); | 332 load->Eliminate(); |
| 331 } | 333 } |
| 332 loads.clear(); | 334 loads.clear(); |
| 333 } | 335 } |
| 334 | 336 |
| 335 } // namespace compiler | 337 } // namespace compiler |
| 336 } // namespace internal | 338 } // namespace internal |
| 337 } // namespace v8 | 339 } // namespace v8 |
| OLD | NEW |