| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-scheduler.h" | 5 #include "src/compiler/instruction-scheduler.h" |
| 6 | 6 |
| 7 #include "src/base/adapters.h" | 7 #include "src/base/adapters.h" |
| 8 #include "src/base/utils/random-number-generator.h" | 8 #include "src/base/utils/random-number-generator.h" |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 76 | 76 |
| 77 | 77 |
| 78 InstructionScheduler::InstructionScheduler(Zone* zone, | 78 InstructionScheduler::InstructionScheduler(Zone* zone, |
| 79 InstructionSequence* sequence) | 79 InstructionSequence* sequence) |
| 80 : zone_(zone), | 80 : zone_(zone), |
| 81 sequence_(sequence), | 81 sequence_(sequence), |
| 82 graph_(zone), | 82 graph_(zone), |
| 83 last_side_effect_instr_(nullptr), | 83 last_side_effect_instr_(nullptr), |
| 84 pending_loads_(zone), | 84 pending_loads_(zone), |
| 85 last_live_in_reg_marker_(nullptr), | 85 last_live_in_reg_marker_(nullptr), |
| 86 last_deopt_(nullptr) { | 86 last_deopt_(nullptr), |
| 87 } | 87 operands_map_(zone) {} |
| 88 | 88 |
| 89 | 89 |
| 90 void InstructionScheduler::StartBlock(RpoNumber rpo) { | 90 void InstructionScheduler::StartBlock(RpoNumber rpo) { |
| 91 DCHECK(graph_.empty()); | 91 DCHECK(graph_.empty()); |
| 92 DCHECK(last_side_effect_instr_ == nullptr); | 92 DCHECK(last_side_effect_instr_ == nullptr); |
| 93 DCHECK(pending_loads_.empty()); | 93 DCHECK(pending_loads_.empty()); |
| 94 DCHECK(last_live_in_reg_marker_ == nullptr); | 94 DCHECK(last_live_in_reg_marker_ == nullptr); |
| 95 DCHECK(last_deopt_ == nullptr); | 95 DCHECK(last_deopt_ == nullptr); |
| 96 DCHECK(operands_map_.empty()); |
| 96 sequence()->StartBlock(rpo); | 97 sequence()->StartBlock(rpo); |
| 97 } | 98 } |
| 98 | 99 |
| 99 | 100 |
| 100 void InstructionScheduler::EndBlock(RpoNumber rpo) { | 101 void InstructionScheduler::EndBlock(RpoNumber rpo) { |
| 101 if (FLAG_turbo_stress_instruction_scheduling) { | 102 if (FLAG_turbo_stress_instruction_scheduling) { |
| 102 ScheduleBlock<StressSchedulerQueue>(); | 103 ScheduleBlock<StressSchedulerQueue>(); |
| 103 } else { | 104 } else { |
| 104 ScheduleBlock<CriticalPathFirstQueue>(); | 105 ScheduleBlock<CriticalPathFirstQueue>(); |
| 105 } | 106 } |
| 106 sequence()->EndBlock(rpo); | 107 sequence()->EndBlock(rpo); |
| 107 graph_.clear(); | 108 graph_.clear(); |
| 108 last_side_effect_instr_ = nullptr; | 109 last_side_effect_instr_ = nullptr; |
| 109 pending_loads_.clear(); | 110 pending_loads_.clear(); |
| 110 last_live_in_reg_marker_ = nullptr; | 111 last_live_in_reg_marker_ = nullptr; |
| 111 last_deopt_ = nullptr; | 112 last_deopt_ = nullptr; |
| 113 operands_map_.clear(); |
| 112 } | 114 } |
| 113 | 115 |
| 114 | 116 |
| 115 void InstructionScheduler::AddInstruction(Instruction* instr) { | 117 void InstructionScheduler::AddInstruction(Instruction* instr) { |
| 116 ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr); | 118 ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr); |
| 117 | 119 |
| 118 if (IsBlockTerminator(instr)) { | 120 if (IsBlockTerminator(instr)) { |
| 119 // Make sure that basic block terminators are not moved by adding them | 121 // Make sure that basic block terminators are not moved by adding them |
| 120 // as successor of every instruction. | 122 // as successor of every instruction. |
| 121 for (ScheduleGraphNode* node : graph_) { | 123 for (ScheduleGraphNode* node : graph_) { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 158 } else if (instr->IsDeoptimizeCall()) { | 160 } else if (instr->IsDeoptimizeCall()) { |
| 159 // Ensure that deopts are not reordered with respect to side-effect | 161 // Ensure that deopts are not reordered with respect to side-effect |
| 160 // instructions. | 162 // instructions. |
| 161 if (last_side_effect_instr_ != nullptr) { | 163 if (last_side_effect_instr_ != nullptr) { |
| 162 last_side_effect_instr_->AddSuccessor(new_node); | 164 last_side_effect_instr_->AddSuccessor(new_node); |
| 163 } | 165 } |
| 164 last_deopt_ = new_node; | 166 last_deopt_ = new_node; |
| 165 } | 167 } |
| 166 | 168 |
| 167 // Look for operand dependencies. | 169 // Look for operand dependencies. |
| 168 for (ScheduleGraphNode* node : graph_) { | 170 for (size_t i = 0; i < instr->InputCount(); ++i) { |
| 169 if (HasOperandDependency(node->instruction(), instr)) { | 171 const InstructionOperand* input = instr->InputAt(i); |
| 170 node->AddSuccessor(new_node); | 172 if (input->IsUnallocated()) { |
| 173 int32_t vreg = UnallocatedOperand::cast(input)->virtual_register(); |
| 174 auto it = operands_map_.find(vreg); |
| 175 if (it != operands_map_.end()) { |
| 176 it->second->AddSuccessor(new_node); |
| 177 } |
| 178 } |
| 179 } |
| 180 |
| 181 // Record the virtual registers defined by this instruction. |
| 182 for (size_t i = 0; i < instr->OutputCount(); ++i) { |
| 183 const InstructionOperand* output = instr->OutputAt(i); |
| 184 if (output->IsUnallocated()) { |
| 185 operands_map_[UnallocatedOperand::cast(output)->virtual_register()] = |
| 186 new_node; |
| 187 } else if (output->IsConstant()) { |
| 188 operands_map_[ConstantOperand::cast(output)->virtual_register()] = |
| 189 new_node; |
| 171 } | 190 } |
| 172 } | 191 } |
| 173 } | 192 } |
| 174 | 193 |
| 175 graph_.push_back(new_node); | 194 graph_.push_back(new_node); |
| 176 } | 195 } |
| 177 | 196 |
| 178 | 197 |
| 179 template <typename QueueType> | 198 template <typename QueueType> |
| 180 void InstructionScheduler::ScheduleBlock() { | 199 void InstructionScheduler::ScheduleBlock() { |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 310 TARGET_ARCH_OPCODE_LIST(CASE) | 329 TARGET_ARCH_OPCODE_LIST(CASE) |
| 311 #undef CASE | 330 #undef CASE |
| 312 return GetTargetInstructionFlags(instr); | 331 return GetTargetInstructionFlags(instr); |
| 313 } | 332 } |
| 314 | 333 |
| 315 UNREACHABLE(); | 334 UNREACHABLE(); |
| 316 return kNoOpcodeFlags; | 335 return kNoOpcodeFlags; |
| 317 } | 336 } |
| 318 | 337 |
| 319 | 338 |
| 320 bool InstructionScheduler::HasOperandDependency( | |
| 321 const Instruction* instr1, const Instruction* instr2) const { | |
| 322 for (size_t i = 0; i < instr1->OutputCount(); ++i) { | |
| 323 for (size_t j = 0; j < instr2->InputCount(); ++j) { | |
| 324 const InstructionOperand* output = instr1->OutputAt(i); | |
| 325 const InstructionOperand* input = instr2->InputAt(j); | |
| 326 | |
| 327 if (output->IsUnallocated() && input->IsUnallocated() && | |
| 328 (UnallocatedOperand::cast(output)->virtual_register() == | |
| 329 UnallocatedOperand::cast(input)->virtual_register())) { | |
| 330 return true; | |
| 331 } | |
| 332 | |
| 333 if (output->IsConstant() && input->IsUnallocated() && | |
| 334 (ConstantOperand::cast(output)->virtual_register() == | |
| 335 UnallocatedOperand::cast(input)->virtual_register())) { | |
| 336 return true; | |
| 337 } | |
| 338 } | |
| 339 } | |
| 340 | |
| 341 // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies? | |
| 342 | |
| 343 return false; | |
| 344 } | |
| 345 | |
| 346 | |
| 347 bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const { | 339 bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const { |
| 348 return ((GetInstructionFlags(instr) & kIsBlockTerminator) || | 340 return ((GetInstructionFlags(instr) & kIsBlockTerminator) || |
| 349 (instr->flags_mode() == kFlags_branch)); | 341 (instr->flags_mode() == kFlags_branch)); |
| 350 } | 342 } |
| 351 | 343 |
| 352 | 344 |
| 353 void InstructionScheduler::ComputeTotalLatencies() { | 345 void InstructionScheduler::ComputeTotalLatencies() { |
| 354 for (ScheduleGraphNode* node : base::Reversed(graph_)) { | 346 for (ScheduleGraphNode* node : base::Reversed(graph_)) { |
| 355 int max_latency = 0; | 347 int max_latency = 0; |
| 356 | 348 |
| 357 for (ScheduleGraphNode* successor : node->successors()) { | 349 for (ScheduleGraphNode* successor : node->successors()) { |
| 358 DCHECK(successor->total_latency() != -1); | 350 DCHECK(successor->total_latency() != -1); |
| 359 if (successor->total_latency() > max_latency) { | 351 if (successor->total_latency() > max_latency) { |
| 360 max_latency = successor->total_latency(); | 352 max_latency = successor->total_latency(); |
| 361 } | 353 } |
| 362 } | 354 } |
| 363 | 355 |
| 364 node->set_total_latency(max_latency + node->latency()); | 356 node->set_total_latency(max_latency + node->latency()); |
| 365 } | 357 } |
| 366 } | 358 } |
| 367 | 359 |
| 368 } // namespace compiler | 360 } // namespace compiler |
| 369 } // namespace internal | 361 } // namespace internal |
| 370 } // namespace v8 | 362 } // namespace v8 |
| OLD | NEW |