OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
| 5 #include <deque> |
| 6 #include <queue> |
| 7 |
5 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
6 | 9 |
7 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
8 #include "src/compiler/graph-inl.h" | 11 #include "src/compiler/graph-inl.h" |
9 #include "src/compiler/node.h" | 12 #include "src/compiler/node.h" |
10 #include "src/compiler/node-properties.h" | 13 #include "src/compiler/node-properties.h" |
11 #include "src/compiler/node-properties-inl.h" | 14 #include "src/compiler/node-properties-inl.h" |
12 #include "src/data-flow.h" | 15 #include "src/data-flow.h" |
13 | 16 |
14 namespace v8 { | 17 namespace v8 { |
15 namespace internal { | 18 namespace internal { |
16 namespace compiler { | 19 namespace compiler { |
17 | 20 |
18 static inline void Trace(const char* msg, ...) { | 21 static inline void Trace(const char* msg, ...) { |
19 if (FLAG_trace_turbo_scheduler) { | 22 if (FLAG_trace_turbo_scheduler) { |
20 va_list arguments; | 23 va_list arguments; |
21 va_start(arguments, msg); | 24 va_start(arguments, msg); |
22 base::OS::VPrint(msg, arguments); | 25 base::OS::VPrint(msg, arguments); |
23 va_end(arguments); | 26 va_end(arguments); |
24 } | 27 } |
25 } | 28 } |
26 | 29 |
27 | 30 |
28 // Internal class to build a control flow graph (i.e the basic blocks and edges | 31 // Internal class to build a control flow graph (i.e the basic blocks and edges |
29 // between them within a Schedule) from the node graph. | 32 // between them within a Schedule) from the node graph. |
30 // Visits the graph backwards from end, following control and data edges. | 33 // Visits the control edges of the graph backwards from end in order to find |
31 class CFGBuilder : public NullNodeVisitor { | 34 // the connected control subgraph, needed for scheduling. |
| 35 class CFGBuilder { |
32 public: | 36 public: |
| 37 Scheduler* scheduler_; |
33 Schedule* schedule_; | 38 Schedule* schedule_; |
| 39 std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_; |
| 40 NodeVector control_; |
34 | 41 |
35 explicit CFGBuilder(Schedule* schedule) : schedule_(schedule) {} | 42 CFGBuilder(Zone* zone, Scheduler* scheduler) |
| 43 : scheduler_(scheduler), |
| 44 schedule_(scheduler->schedule_), |
| 45 queue_(std::deque<Node*, NodePtrZoneAllocator>( |
| 46 NodePtrZoneAllocator(zone))), |
| 47 control_(NodeVector::allocator_type(zone)) {} |
36 | 48 |
37 // Create the blocks for the schedule in pre-order. | 49 // Run the control flow graph construction algorithm by walking the graph |
38 void PreEdge(Node* from, int index, Node* node) { | 50 // backwards from end through control edges, building and connecting the |
| 51 // basic blocks for control nodes. |
| 52 void Run() { |
| 53 Graph* graph = scheduler_->graph_; |
| 54 FixNode(schedule_->start(), graph->start()); |
| 55 Queue(graph->end()); |
| 56 |
| 57 while (!queue_.empty()) { // Breadth-first backwards traversal. |
| 58 Node* node = queue_.front(); |
| 59 queue_.pop(); |
| 60 int max = NodeProperties::PastControlIndex(node); |
| 61 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) { |
| 62 Queue(node->InputAt(i)); |
| 63 } |
| 64 } |
| 65 |
| 66 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) { |
| 67 ConnectBlocks(*i); // Connect block to its predecessor/successors. |
| 68 } |
| 69 |
| 70 FixNode(schedule_->end(), graph->end()); |
| 71 } |
| 72 |
| 73 void FixNode(BasicBlock* block, Node* node) { |
| 74 schedule_->AddNode(block, node); |
| 75 scheduler_->GetData(node)->is_connected_control_ = true; |
| 76 scheduler_->GetData(node)->placement_ = Scheduler::kFixed; |
| 77 } |
| 78 |
| 79 void Queue(Node* node) { |
| 80 // Mark the connected control nodes as they queued. |
| 81 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 82 if (!data->is_connected_control_) { |
| 83 BuildBlocks(node); |
| 84 queue_.push(node); |
| 85 control_.push_back(node); |
| 86 data->is_connected_control_ = true; |
| 87 } |
| 88 } |
| 89 |
| 90 void BuildBlocks(Node* node) { |
39 switch (node->opcode()) { | 91 switch (node->opcode()) { |
40 case IrOpcode::kLoop: | 92 case IrOpcode::kLoop: |
41 case IrOpcode::kMerge: | 93 case IrOpcode::kMerge: |
42 BuildBlockForNode(node); | 94 BuildBlockForNode(node); |
43 break; | 95 break; |
44 case IrOpcode::kBranch: | 96 case IrOpcode::kBranch: |
45 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); | 97 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); |
46 break; | 98 break; |
47 case IrOpcode::kCall: | 99 case IrOpcode::kCall: |
48 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { | 100 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { |
49 BuildBlocksForSuccessors(node, IrOpcode::kContinuation, | 101 BuildBlocksForSuccessors(node, IrOpcode::kContinuation, |
50 IrOpcode::kLazyDeoptimization); | 102 IrOpcode::kLazyDeoptimization); |
51 } | 103 } |
52 break; | 104 break; |
53 default: | 105 default: |
54 break; | 106 break; |
55 } | 107 } |
56 } | 108 } |
57 | 109 |
58 // Connect the blocks after nodes have been visited in post-order. | 110 void ConnectBlocks(Node* node) { |
59 GenericGraphVisit::Control Post(Node* node) { | |
60 switch (node->opcode()) { | 111 switch (node->opcode()) { |
61 case IrOpcode::kLoop: | 112 case IrOpcode::kLoop: |
62 case IrOpcode::kMerge: | 113 case IrOpcode::kMerge: |
63 ConnectMerge(node); | 114 ConnectMerge(node); |
64 break; | 115 break; |
65 case IrOpcode::kBranch: | 116 case IrOpcode::kBranch: |
| 117 scheduler_->schedule_root_nodes_.push_back(node); |
66 ConnectBranch(node); | 118 ConnectBranch(node); |
67 break; | 119 break; |
68 case IrOpcode::kDeoptimize: | 120 case IrOpcode::kDeoptimize: |
| 121 scheduler_->schedule_root_nodes_.push_back(node); |
69 ConnectDeoptimize(node); | 122 ConnectDeoptimize(node); |
70 case IrOpcode::kCall: | 123 case IrOpcode::kCall: |
71 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { | 124 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { |
| 125 scheduler_->schedule_root_nodes_.push_back(node); |
72 ConnectCall(node); | 126 ConnectCall(node); |
73 } | 127 } |
74 break; | 128 break; |
75 case IrOpcode::kReturn: | 129 case IrOpcode::kReturn: |
| 130 scheduler_->schedule_root_nodes_.push_back(node); |
76 ConnectReturn(node); | 131 ConnectReturn(node); |
77 break; | 132 break; |
78 default: | 133 default: |
79 break; | 134 break; |
80 } | 135 } |
81 return GenericGraphVisit::CONTINUE; | |
82 } | 136 } |
83 | 137 |
84 void BuildBlockForNode(Node* node) { | 138 void BuildBlockForNode(Node* node) { |
85 if (schedule_->block(node) == NULL) { | 139 if (schedule_->block(node) == NULL) { |
86 BasicBlock* block = schedule_->NewBasicBlock(); | 140 BasicBlock* block = schedule_->NewBasicBlock(); |
87 Trace("Create block B%d for node %d (%s)\n", block->id(), node->id(), | 141 Trace("Create block B%d for #%d:%s\n", block->id(), node->id(), |
88 IrOpcode::Mnemonic(node->opcode())); | 142 node->op()->mnemonic()); |
89 schedule_->AddNode(block, node); | 143 FixNode(block, node); |
90 } | 144 } |
91 } | 145 } |
92 | 146 |
93 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a, | 147 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a, |
94 IrOpcode::Value b) { | 148 IrOpcode::Value b) { |
95 Node* successors[2]; | 149 Node* successors[2]; |
96 CollectSuccessorProjections(node, successors, a, b); | 150 CollectSuccessorProjections(node, successors, a, b); |
97 BuildBlockForNode(successors[0]); | 151 BuildBlockForNode(successors[0]); |
98 BuildBlockForNode(successors[1]); | 152 BuildBlockForNode(successors[1]); |
99 } | 153 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
186 TraceConnect(call, call_block, successor_blocks[0]); | 240 TraceConnect(call, call_block, successor_blocks[0]); |
187 TraceConnect(call, call_block, successor_blocks[1]); | 241 TraceConnect(call, call_block, successor_blocks[1]); |
188 | 242 |
189 schedule_->AddCall(call_block, call, successor_blocks[0], | 243 schedule_->AddCall(call_block, call, successor_blocks[0], |
190 successor_blocks[1]); | 244 successor_blocks[1]); |
191 } | 245 } |
192 | 246 |
193 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { | 247 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { |
194 DCHECK_NE(NULL, block); | 248 DCHECK_NE(NULL, block); |
195 if (succ == NULL) { | 249 if (succ == NULL) { |
196 Trace("node %d (%s) in block %d -> end\n", node->id(), | 250 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), |
197 IrOpcode::Mnemonic(node->opcode()), block->id()); | 251 block->id()); |
198 } else { | 252 } else { |
199 Trace("node %d (%s) in block %d -> block %d\n", node->id(), | 253 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), |
200 IrOpcode::Mnemonic(node->opcode()), block->id(), succ->id()); | 254 block->id(), succ->id()); |
201 } | 255 } |
202 } | 256 } |
203 }; | 257 }; |
204 | 258 |
205 | 259 |
206 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) | 260 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) |
207 : zone_(zone), | 261 : zone_(zone), |
208 graph_(graph), | 262 graph_(graph), |
209 schedule_(schedule), | 263 schedule_(schedule), |
210 unscheduled_uses_(IntVector::allocator_type(zone)), | |
211 scheduled_nodes_(NodeVectorVector::allocator_type(zone)), | 264 scheduled_nodes_(NodeVectorVector::allocator_type(zone)), |
212 schedule_root_nodes_(NodeVector::allocator_type(zone)), | 265 schedule_root_nodes_(NodeVector::allocator_type(zone)), |
213 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {} | 266 node_data_(zone_allocator<SchedulerData>(zone)), |
| 267 has_floating_control_(false) { |
| 268 // Initialize Per-node data. |
| 269 SchedulerData zero = {0, 0, false, false, kUnknown}; |
| 270 node_data_.resize(graph_->NodeCount(), zero); |
| 271 } |
214 | 272 |
215 | 273 |
216 Schedule* Scheduler::ComputeSchedule(Graph* graph) { | 274 Schedule* Scheduler::ComputeSchedule(Graph* graph) { |
217 Zone tmp_zone(graph->zone()->isolate()); | 275 Schedule* schedule; |
218 Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); | 276 bool had_floating_control = false; |
| 277 do { |
| 278 Zone tmp_zone(graph->zone()->isolate()); |
| 279 schedule = new (graph->zone()) Schedule(graph->zone()); |
| 280 Scheduler scheduler(&tmp_zone, graph, schedule); |
219 | 281 |
220 Scheduler::ComputeCFG(graph, schedule); | 282 scheduler.BuildCFG(); |
221 | 283 |
222 Scheduler scheduler(&tmp_zone, graph, schedule); | 284 Scheduler::ComputeSpecialRPO(schedule); |
| 285 scheduler.GenerateImmediateDominatorTree(); |
223 | 286 |
224 scheduler.PrepareAuxiliaryNodeData(); | 287 scheduler.PrepareUses(); |
| 288 scheduler.ScheduleEarly(); |
| 289 scheduler.ScheduleLate(); |
225 | 290 |
226 scheduler.PrepareAuxiliaryBlockData(); | 291 had_floating_control = scheduler.ConnectFloatingControl(); |
227 | 292 } while (had_floating_control); |
228 Scheduler::ComputeSpecialRPO(schedule); | |
229 scheduler.GenerateImmediateDominatorTree(); | |
230 | |
231 scheduler.PrepareUses(); | |
232 scheduler.ScheduleEarly(); | |
233 scheduler.ScheduleLate(); | |
234 | 293 |
235 return schedule; | 294 return schedule; |
236 } | 295 } |
237 | 296 |
238 | 297 |
239 bool Scheduler::IsBasicBlockBegin(Node* node) { | 298 Scheduler::Placement Scheduler::GetPlacement(Node* node) { |
240 return OperatorProperties::IsBasicBlockBegin(node->op()); | 299 SchedulerData* data = GetData(node); |
| 300 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. |
| 301 switch (node->opcode()) { |
| 302 case IrOpcode::kParameter: |
| 303 // Parameters are always fixed to the start node. |
| 304 data->placement_ = kFixed; |
| 305 break; |
| 306 case IrOpcode::kPhi: |
| 307 case IrOpcode::kEffectPhi: { |
| 308 // Phis and effect phis are fixed if their control inputs are. |
| 309 data->placement_ = GetPlacement(NodeProperties::GetControlInput(node)); |
| 310 break; |
| 311 } |
| 312 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: |
| 313 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) |
| 314 #undef DEFINE_FLOATING_CONTROL_CASE |
| 315 { |
| 316 // Control nodes that were not control-reachable from end may float. |
| 317 data->placement_ = kSchedulable; |
| 318 if (!data->is_connected_control_) { |
| 319 data->is_floating_control_ = true; |
| 320 has_floating_control_ = true; |
| 321 Trace("Floating control found: #%d:%s\n", node->id(), |
| 322 node->op()->mnemonic()); |
| 323 } |
| 324 break; |
| 325 } |
| 326 default: |
| 327 data->placement_ = kSchedulable; |
| 328 break; |
| 329 } |
| 330 } |
| 331 return data->placement_; |
241 } | 332 } |
242 | 333 |
243 | 334 |
244 bool Scheduler::HasFixedSchedulePosition(Node* node) { | 335 void Scheduler::BuildCFG() { |
245 IrOpcode::Value opcode = node->opcode(); | 336 Trace("---------------- CREATING CFG ------------------\n"); |
246 return (IrOpcode::IsControlOpcode(opcode)) || | 337 CFGBuilder cfg_builder(zone_, this); |
247 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || | 338 cfg_builder.Run(); |
248 opcode == IrOpcode::kPhi; | 339 // Initialize per-block data. |
| 340 scheduled_nodes_.resize(schedule_->BasicBlockCount(), |
| 341 NodeVector(NodeVector::allocator_type(zone_))); |
249 } | 342 } |
250 | 343 |
251 | 344 |
252 bool Scheduler::IsScheduleRoot(Node* node) { | |
253 IrOpcode::Value opcode = node->opcode(); | |
254 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi || | |
255 opcode == IrOpcode::kPhi; | |
256 } | |
257 | |
258 | |
259 void Scheduler::ComputeCFG(Graph* graph, Schedule* schedule) { | |
260 CFGBuilder cfg_builder(schedule); | |
261 Trace("---------------- CREATING CFG ------------------\n"); | |
262 schedule->AddNode(schedule->start(), graph->start()); | |
263 graph->VisitNodeInputsFromEnd(&cfg_builder); | |
264 schedule->AddNode(schedule->end(), graph->end()); | |
265 } | |
266 | |
267 | |
268 void Scheduler::PrepareAuxiliaryNodeData() { | |
269 unscheduled_uses_.resize(graph_->NodeCount(), 0); | |
270 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0); | |
271 } | |
272 | |
273 | |
274 void Scheduler::PrepareAuxiliaryBlockData() { | |
275 Zone* zone = schedule_->zone(); | |
276 scheduled_nodes_.resize(schedule_->BasicBlockCount(), | |
277 NodeVector(NodeVector::allocator_type(zone))); | |
278 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); | |
279 } | |
280 | |
281 | |
282 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 345 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
283 while (b1 != b2) { | 346 while (b1 != b2) { |
284 int b1_rpo = GetRPONumber(b1); | 347 int b1_rpo = GetRPONumber(b1); |
285 int b2_rpo = GetRPONumber(b2); | 348 int b2_rpo = GetRPONumber(b2); |
286 DCHECK(b1_rpo != b2_rpo); | 349 DCHECK(b1_rpo != b2_rpo); |
287 if (b1_rpo < b2_rpo) { | 350 if (b1_rpo < b2_rpo) { |
288 b2 = schedule_->immediate_dominator_[b2->id()]; | 351 b2 = b2->dominator_; |
289 } else { | 352 } else { |
290 b1 = schedule_->immediate_dominator_[b1->id()]; | 353 b1 = b1->dominator_; |
291 } | 354 } |
292 } | 355 } |
293 return b1; | 356 return b1; |
294 } | 357 } |
295 | 358 |
296 | 359 |
297 void Scheduler::GenerateImmediateDominatorTree() { | 360 void Scheduler::GenerateImmediateDominatorTree() { |
298 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's | 361 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's |
299 // if this becomes really slow. | 362 // if this becomes really slow. |
300 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); | 363 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); |
(...skipping 11 matching lines...) Expand all Loading... |
312 // dominator is found. | 375 // dominator is found. |
313 int current_rpo_pos = GetRPONumber(current_rpo); | 376 int current_rpo_pos = GetRPONumber(current_rpo); |
314 while (current_pred != end) { | 377 while (current_pred != end) { |
315 // Don't examine backwards edges | 378 // Don't examine backwards edges |
316 BasicBlock* pred = *current_pred; | 379 BasicBlock* pred = *current_pred; |
317 if (GetRPONumber(pred) < current_rpo_pos) { | 380 if (GetRPONumber(pred) < current_rpo_pos) { |
318 dominator = GetCommonDominator(dominator, *current_pred); | 381 dominator = GetCommonDominator(dominator, *current_pred); |
319 } | 382 } |
320 ++current_pred; | 383 ++current_pred; |
321 } | 384 } |
322 schedule_->immediate_dominator_[current_rpo->id()] = dominator; | 385 current_rpo->dominator_ = dominator; |
323 Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id()); | 386 Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id()); |
324 } | 387 } |
325 } | 388 } |
326 } | 389 } |
327 | 390 |
328 | 391 |
329 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { | 392 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { |
330 public: | 393 public: |
331 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) | 394 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) |
332 : has_changed_rpo_constraints_(true), | 395 : has_changed_rpo_constraints_(true), |
333 scheduler_(scheduler), | 396 scheduler_(scheduler), |
334 schedule_(scheduler->schedule_) {} | 397 schedule_(scheduler->schedule_) {} |
335 | 398 |
336 GenericGraphVisit::Control Pre(Node* node) { | 399 GenericGraphVisit::Control Pre(Node* node) { |
337 int id = node->id(); | |
338 int max_rpo = 0; | 400 int max_rpo = 0; |
339 // Fixed nodes already know their schedule early position. | 401 // Fixed nodes already know their schedule early position. |
340 if (scheduler_->HasFixedSchedulePosition(node)) { | 402 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
341 BasicBlock* block = schedule_->block(node); | 403 BasicBlock* block = schedule_->block(node); |
342 DCHECK(block != NULL); | 404 DCHECK(block != NULL); |
343 max_rpo = block->rpo_number_; | 405 max_rpo = block->rpo_number_; |
344 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { | 406 if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) { |
345 has_changed_rpo_constraints_ = true; | 407 has_changed_rpo_constraints_ = true; |
346 } | 408 } |
347 scheduler_->schedule_early_rpo_index_[id] = max_rpo; | 409 scheduler_->GetData(node)->minimum_rpo_ = max_rpo; |
348 Trace("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo); | 410 Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(), |
| 411 node->op()->mnemonic(), max_rpo); |
349 } | 412 } |
350 return GenericGraphVisit::CONTINUE; | 413 return GenericGraphVisit::CONTINUE; |
351 } | 414 } |
352 | 415 |
353 GenericGraphVisit::Control Post(Node* node) { | 416 GenericGraphVisit::Control Post(Node* node) { |
354 int id = node->id(); | |
355 int max_rpo = 0; | 417 int max_rpo = 0; |
356 // Otherwise, the minimum rpo for the node is the max of all of the inputs. | 418 // Otherwise, the minimum rpo for the node is the max of all of the inputs. |
357 if (!scheduler_->HasFixedSchedulePosition(node)) { | 419 if (scheduler_->GetPlacement(node) != Scheduler::kFixed) { |
358 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); | 420 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); |
359 ++i) { | 421 ++i) { |
360 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; | 422 int control_rpo = scheduler_->GetData(*i)->minimum_rpo_; |
361 if (control_rpo > max_rpo) { | 423 if (control_rpo > max_rpo) { |
362 max_rpo = control_rpo; | 424 max_rpo = control_rpo; |
363 } | 425 } |
364 } | 426 } |
365 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { | 427 if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) { |
366 has_changed_rpo_constraints_ = true; | 428 has_changed_rpo_constraints_ = true; |
367 } | 429 } |
368 scheduler_->schedule_early_rpo_index_[id] = max_rpo; | 430 scheduler_->GetData(node)->minimum_rpo_ = max_rpo; |
369 Trace("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo); | 431 Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(), |
| 432 node->op()->mnemonic(), max_rpo); |
370 } | 433 } |
371 return GenericGraphVisit::CONTINUE; | 434 return GenericGraphVisit::CONTINUE; |
372 } | 435 } |
373 | 436 |
374 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be | 437 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be |
375 // rewritten to use a pre-order traversal from the start instead. | 438 // rewritten to use a pre-order traversal from the start instead. |
376 bool has_changed_rpo_constraints_; | 439 bool has_changed_rpo_constraints_; |
377 | 440 |
378 private: | 441 private: |
379 Scheduler* scheduler_; | 442 Scheduler* scheduler_; |
(...skipping 15 matching lines...) Expand all Loading... |
395 Trace("It took %d iterations to determine fixpoint\n", fixpoint_count); | 458 Trace("It took %d iterations to determine fixpoint\n", fixpoint_count); |
396 } | 459 } |
397 | 460 |
398 | 461 |
399 class PrepareUsesVisitor : public NullNodeVisitor { | 462 class PrepareUsesVisitor : public NullNodeVisitor { |
400 public: | 463 public: |
401 explicit PrepareUsesVisitor(Scheduler* scheduler) | 464 explicit PrepareUsesVisitor(Scheduler* scheduler) |
402 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} | 465 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} |
403 | 466 |
404 GenericGraphVisit::Control Pre(Node* node) { | 467 GenericGraphVisit::Control Pre(Node* node) { |
405 // Some nodes must be scheduled explicitly to ensure they are in exactly the | 468 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
406 // right place; it's a convenient place during the preparation of use counts | 469 // Fixed nodes are always roots for schedule late. |
407 // to schedule them. | |
408 if (!schedule_->IsScheduled(node) && | |
409 scheduler_->HasFixedSchedulePosition(node)) { | |
410 Trace("Fixed position node %d is unscheduled, scheduling now\n", | |
411 node->id()); | |
412 IrOpcode::Value opcode = node->opcode(); | |
413 BasicBlock* block = | |
414 opcode == IrOpcode::kParameter | |
415 ? schedule_->start() | |
416 : schedule_->block(NodeProperties::GetControlInput(node)); | |
417 DCHECK(block != NULL); | |
418 schedule_->AddNode(block, node); | |
419 } | |
420 | |
421 if (scheduler_->IsScheduleRoot(node)) { | |
422 scheduler_->schedule_root_nodes_.push_back(node); | 470 scheduler_->schedule_root_nodes_.push_back(node); |
| 471 if (!schedule_->IsScheduled(node)) { |
| 472 // Make sure root nodes are scheduled in their respective blocks. |
| 473 Trace(" Scheduling fixed position node #%d:%s\n", node->id(), |
| 474 node->op()->mnemonic()); |
| 475 IrOpcode::Value opcode = node->opcode(); |
| 476 BasicBlock* block = |
| 477 opcode == IrOpcode::kParameter |
| 478 ? schedule_->start() |
| 479 : schedule_->block(NodeProperties::GetControlInput(node)); |
| 480 DCHECK(block != NULL); |
| 481 schedule_->AddNode(block, node); |
| 482 } |
423 } | 483 } |
424 | 484 |
425 return GenericGraphVisit::CONTINUE; | 485 return GenericGraphVisit::CONTINUE; |
426 } | 486 } |
427 | 487 |
428 void PostEdge(Node* from, int index, Node* to) { | 488 void PostEdge(Node* from, int index, Node* to) { |
429 // If the edge is from an unscheduled node, then tally it in the use count | 489 // If the edge is from an unscheduled node, then tally it in the use count |
430 // for all of its inputs. The same criterion will be used in ScheduleLate | 490 // for all of its inputs. The same criterion will be used in ScheduleLate |
431 // for decrementing use counts. | 491 // for decrementing use counts. |
432 if (!schedule_->IsScheduled(from)) { | 492 if (!schedule_->IsScheduled(from)) { |
433 DCHECK(!scheduler_->HasFixedSchedulePosition(from)); | 493 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); |
434 ++scheduler_->unscheduled_uses_[to->id()]; | 494 ++(scheduler_->GetData(to)->unscheduled_count_); |
435 Trace("Incrementing uses of node %d from %d to %d\n", to->id(), | 495 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), |
436 from->id(), scheduler_->unscheduled_uses_[to->id()]); | 496 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 497 scheduler_->GetData(to)->unscheduled_count_); |
437 } | 498 } |
438 } | 499 } |
439 | 500 |
440 private: | 501 private: |
441 Scheduler* scheduler_; | 502 Scheduler* scheduler_; |
442 Schedule* schedule_; | 503 Schedule* schedule_; |
443 }; | 504 }; |
444 | 505 |
445 | 506 |
446 void Scheduler::PrepareUses() { | 507 void Scheduler::PrepareUses() { |
447 Trace("------------------- PREPARE USES ------------------\n"); | 508 Trace("------------------- PREPARE USES ------------------\n"); |
448 // Count the uses of every node, it will be used to ensure that all of a | 509 // Count the uses of every node, it will be used to ensure that all of a |
449 // node's uses are scheduled before the node itself. | 510 // node's uses are scheduled before the node itself. |
450 PrepareUsesVisitor prepare_uses(this); | 511 PrepareUsesVisitor prepare_uses(this); |
451 graph_->VisitNodeInputsFromEnd(&prepare_uses); | 512 graph_->VisitNodeInputsFromEnd(&prepare_uses); |
452 } | 513 } |
453 | 514 |
454 | 515 |
455 class ScheduleLateNodeVisitor : public NullNodeVisitor { | 516 class ScheduleLateNodeVisitor : public NullNodeVisitor { |
456 public: | 517 public: |
457 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 518 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) |
458 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 519 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
459 | 520 |
460 GenericGraphVisit::Control Pre(Node* node) { | 521 GenericGraphVisit::Control Pre(Node* node) { |
461 // Don't schedule nodes that are already scheduled. | 522 // Don't schedule nodes that are already scheduled. |
462 if (schedule_->IsScheduled(node)) { | 523 if (schedule_->IsScheduled(node)) { |
463 return GenericGraphVisit::CONTINUE; | 524 return GenericGraphVisit::CONTINUE; |
464 } | 525 } |
465 DCHECK(!scheduler_->HasFixedSchedulePosition(node)); | 526 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 527 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); |
466 | 528 |
467 // If all the uses of a node have been scheduled, then the node itself can | 529 // If all the uses of a node have been scheduled, then the node itself can |
468 // be scheduled. | 530 // be scheduled. |
469 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; | 531 bool eligible = data->unscheduled_count_ == 0; |
470 Trace("Testing for schedule eligibility for node %d -> %s\n", node->id(), | 532 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), |
471 eligible ? "true" : "false"); | 533 node->op()->mnemonic(), eligible ? "true" : "false"); |
472 if (!eligible) return GenericGraphVisit::DEFER; | 534 if (!eligible) return GenericGraphVisit::DEFER; |
473 | 535 |
474 // Determine the dominating block for all of the uses of this node. It is | 536 // Determine the dominating block for all of the uses of this node. It is |
475 // the latest block that this node can be scheduled in. | 537 // the latest block that this node can be scheduled in. |
476 BasicBlock* block = NULL; | 538 BasicBlock* block = NULL; |
477 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); | 539 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); |
478 ++i) { | 540 ++i) { |
479 BasicBlock* use_block = GetBlockForUse(i.edge()); | 541 BasicBlock* use_block = GetBlockForUse(i.edge()); |
480 block = block == NULL ? use_block : use_block == NULL | 542 block = block == NULL ? use_block : use_block == NULL |
481 ? block | 543 ? block |
482 : scheduler_->GetCommonDominator( | 544 : scheduler_->GetCommonDominator( |
483 block, use_block); | 545 block, use_block); |
484 } | 546 } |
485 DCHECK(block != NULL); | 547 DCHECK(block != NULL); |
486 | 548 |
487 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; | 549 int min_rpo = data->minimum_rpo_; |
488 Trace( | 550 Trace( |
489 "Schedule late conservative for node %d is block %d at " | 551 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " |
490 "loop depth %d, min rpo = %d\n", | 552 "minimum_rpo = %d\n", |
491 node->id(), block->id(), block->loop_depth_, min_rpo); | 553 node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_, |
| 554 min_rpo); |
492 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 555 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
493 // into enlcosing loop pre-headers until they would preceed their | 556 // into enclosing loop pre-headers until they would preceed their |
494 // ScheduleEarly position. | 557 // ScheduleEarly position. |
495 BasicBlock* hoist_block = block; | 558 BasicBlock* hoist_block = block; |
496 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { | 559 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { |
497 if (hoist_block->loop_depth_ < block->loop_depth_) { | 560 if (hoist_block->loop_depth_ < block->loop_depth_) { |
498 block = hoist_block; | 561 block = hoist_block; |
499 Trace("Hoisting node %d to block %d\n", node->id(), block->id()); | 562 Trace(" hoisting #%d:%s to block %d\n", node->id(), |
| 563 node->op()->mnemonic(), block->id()); |
500 } | 564 } |
501 // Try to hoist to the pre-header of the loop header. | 565 // Try to hoist to the pre-header of the loop header. |
502 hoist_block = hoist_block->loop_header(); | 566 hoist_block = hoist_block->loop_header(); |
503 if (hoist_block != NULL) { | 567 if (hoist_block != NULL) { |
504 BasicBlock* pre_header = schedule_->dominator(hoist_block); | 568 BasicBlock* pre_header = hoist_block->dominator_; |
505 DCHECK(pre_header == NULL || | 569 DCHECK(pre_header == NULL || |
506 *hoist_block->predecessors().begin() == pre_header); | 570 *hoist_block->predecessors().begin() == pre_header); |
507 Trace( | 571 Trace( |
508 "Try hoist to pre-header block %d of loop header block %d," | 572 " hoist to pre-header B%d of loop header B%d, depth would be %d\n", |
509 " depth would be %d\n", | |
510 pre_header->id(), hoist_block->id(), pre_header->loop_depth_); | 573 pre_header->id(), hoist_block->id(), pre_header->loop_depth_); |
511 hoist_block = pre_header; | 574 hoist_block = pre_header; |
512 } | 575 } |
513 } | 576 } |
514 | 577 |
515 ScheduleNode(block, node); | 578 ScheduleNode(block, node); |
516 | 579 |
517 return GenericGraphVisit::CONTINUE; | 580 return GenericGraphVisit::CONTINUE; |
518 } | 581 } |
519 | 582 |
520 private: | 583 private: |
521 BasicBlock* GetBlockForUse(Node::Edge edge) { | 584 BasicBlock* GetBlockForUse(Node::Edge edge) { |
522 Node* use = edge.from(); | 585 Node* use = edge.from(); |
523 IrOpcode::Value opcode = use->opcode(); | 586 IrOpcode::Value opcode = use->opcode(); |
524 // If the use is a phi, forward through the phi to the basic block | |
525 // corresponding to the phi's input. | |
526 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { | 587 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { |
| 588 // If the use is from a fixed (i.e. non-floating) phi, use the block |
| 589 // of the corresponding control input to the merge. |
527 int index = edge.index(); | 590 int index = edge.index(); |
528 Trace("Use %d is input %d to a phi\n", use->id(), index); | 591 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { |
529 use = NodeProperties::GetControlInput(use, 0); | 592 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), |
530 opcode = use->opcode(); | 593 use->op()->mnemonic()); |
531 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); | 594 Node* merge = NodeProperties::GetControlInput(use, 0); |
532 use = NodeProperties::GetControlInput(use, index); | 595 opcode = merge->opcode(); |
| 596 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); |
| 597 use = NodeProperties::GetControlInput(merge, index); |
| 598 } |
533 } | 599 } |
534 BasicBlock* result = schedule_->block(use); | 600 BasicBlock* result = schedule_->block(use); |
535 if (result == NULL) return NULL; | 601 if (result == NULL) return NULL; |
536 Trace("Must dominate use %d in block %d\n", use->id(), result->id()); | 602 Trace(" must dominate use #%d:%s in B%d\n", use->id(), |
| 603 use->op()->mnemonic(), result->id()); |
537 return result; | 604 return result; |
538 } | 605 } |
539 | 606 |
540 void ScheduleNode(BasicBlock* block, Node* node) { | 607 void ScheduleNode(BasicBlock* block, Node* node) { |
541 schedule_->PlanNode(block, node); | 608 schedule_->PlanNode(block, node); |
542 scheduler_->scheduled_nodes_[block->id()].push_back(node); | 609 scheduler_->scheduled_nodes_[block->id()].push_back(node); |
543 | 610 |
544 // Reduce the use count of the node's inputs to potentially make them | 611 // Reduce the use count of the node's inputs to potentially make them |
545 // scheduable. | 612 // schedulable. |
546 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 613 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
547 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); | 614 Scheduler::SchedulerData* data = scheduler_->GetData(*i); |
548 --scheduler_->unscheduled_uses_[(*i)->id()]; | 615 DCHECK(data->unscheduled_count_ > 0); |
| 616 --data->unscheduled_count_; |
549 if (FLAG_trace_turbo_scheduler) { | 617 if (FLAG_trace_turbo_scheduler) { |
550 Trace("Decrementing use count for node %d from node %d (now %d)\n", | 618 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), |
551 (*i)->id(), i.edge().from()->id(), | 619 (*i)->op()->mnemonic(), i.edge().from()->id(), |
552 scheduler_->unscheduled_uses_[(*i)->id()]); | 620 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); |
553 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { | 621 if (data->unscheduled_count_ == 0) { |
554 Trace("node %d is now eligible for scheduling\n", (*i)->id()); | 622 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
| 623 (*i)->op()->mnemonic()); |
555 } | 624 } |
556 } | 625 } |
557 } | 626 } |
558 } | 627 } |
559 | 628 |
560 Scheduler* scheduler_; | 629 Scheduler* scheduler_; |
561 Schedule* schedule_; | 630 Schedule* schedule_; |
562 }; | 631 }; |
563 | 632 |
564 | 633 |
565 void Scheduler::ScheduleLate() { | 634 void Scheduler::ScheduleLate() { |
566 Trace("------------------- SCHEDULE LATE -----------------\n"); | 635 Trace("------------------- SCHEDULE LATE -----------------\n"); |
| 636 if (FLAG_trace_turbo_scheduler) { |
| 637 Trace("roots: "); |
| 638 for (NodeVectorIter i = schedule_root_nodes_.begin(); |
| 639 i != schedule_root_nodes_.end(); ++i) { |
| 640 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); |
| 641 } |
| 642 Trace("\n"); |
| 643 } |
567 | 644 |
568 // Schedule: Places nodes in dominator block of all their uses. | 645 // Schedule: Places nodes in dominator block of all their uses. |
569 ScheduleLateNodeVisitor schedule_late_visitor(this); | 646 ScheduleLateNodeVisitor schedule_late_visitor(this); |
570 | 647 |
571 { | 648 { |
572 Zone zone(zone_->isolate()); | 649 Zone zone(zone_->isolate()); |
573 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, | 650 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, |
574 NodeInputIterationTraits<Node> >( | 651 NodeInputIterationTraits<Node> >( |
575 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), | 652 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), |
576 &schedule_late_visitor); | 653 &schedule_late_visitor); |
577 } | 654 } |
578 | 655 |
579 // Add collected nodes for basic blocks to their blocks in the right order. | 656 // Add collected nodes for basic blocks to their blocks in the right order. |
580 int block_num = 0; | 657 int block_num = 0; |
581 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); | 658 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); |
582 i != scheduled_nodes_.end(); ++i) { | 659 i != scheduled_nodes_.end(); ++i) { |
583 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { | 660 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { |
584 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); | 661 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); |
585 } | 662 } |
586 block_num++; | 663 block_num++; |
587 } | 664 } |
588 } | 665 } |
589 | 666 |
590 | 667 |
| 668 bool Scheduler::ConnectFloatingControl() { |
| 669 if (!has_floating_control_) return false; |
| 670 |
| 671 Trace("Connecting floating control...\n"); |
| 672 |
| 673 // Process blocks and instructions backwards to find and connect floating |
| 674 // control nodes into the control graph according to the block they were |
| 675 // scheduled into. |
| 676 int max = static_cast<int>(schedule_->rpo_order()->size()); |
| 677 for (int i = max - 1; i >= 0; i--) { |
| 678 BasicBlock* block = schedule_->rpo_order()->at(i); |
| 679 for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) { |
| 680 Node* node = block->nodes_[j]; |
| 681 SchedulerData* data = GetData(node); |
| 682 if (data->is_floating_control_ && !data->is_connected_control_) { |
| 683 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(), |
| 684 node->op()->mnemonic(), block->id()); |
| 685 ConnectFloatingControlSubgraph(block, node); |
| 686 } |
| 687 } |
| 688 } |
| 689 |
| 690 return true; |
| 691 } |
| 692 |
| 693 |
| 694 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) { |
| 695 Node* block_start = block->nodes_[0]; |
| 696 DCHECK(IrOpcode::IsControlOpcode(block_start->opcode())); |
| 697 // Find the current "control successor" of the node that starts the block |
| 698 // by searching the control uses for a control input edge from a connected |
| 699 // control node. |
| 700 Node* control_succ = NULL; |
| 701 for (UseIter i = block_start->uses().begin(); i != block_start->uses().end(); |
| 702 ++i) { |
| 703 Node::Edge edge = i.edge(); |
| 704 if (NodeProperties::IsControlEdge(edge) && |
| 705 GetData(edge.from())->is_connected_control_) { |
| 706 DCHECK_EQ(NULL, control_succ); |
| 707 control_succ = edge.from(); |
| 708 control_succ->ReplaceInput(edge.index(), end); |
| 709 } |
| 710 } |
| 711 DCHECK_NE(NULL, control_succ); |
| 712 Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n", |
| 713 end->id(), end->op()->mnemonic(), control_succ->id(), |
| 714 control_succ->op()->mnemonic(), block_start->id(), |
| 715 block_start->op()->mnemonic()); |
| 716 |
| 717 // Find the "start" node of the control subgraph, which should be the |
| 718 // unique node that is itself floating control but has a control input that |
| 719 // is not floating. |
| 720 Node* start = NULL; |
| 721 std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue( |
| 722 (std::deque<Node*, NodePtrZoneAllocator>(NodePtrZoneAllocator(zone_)))); |
| 723 queue.push(end); |
| 724 GetData(end)->is_connected_control_ = true; |
| 725 while (!queue.empty()) { |
| 726 Node* node = queue.front(); |
| 727 queue.pop(); |
| 728 Trace(" Search #%d:%s for control subgraph start\n", node->id(), |
| 729 node->op()->mnemonic()); |
| 730 int max = NodeProperties::PastControlIndex(node); |
| 731 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) { |
| 732 Node* input = node->InputAt(i); |
| 733 SchedulerData* data = GetData(input); |
| 734 if (data->is_floating_control_) { |
| 735 // {input} is floating control. |
| 736 if (!data->is_connected_control_) { |
| 737 // First time seeing {input} during this traversal, queue it. |
| 738 queue.push(input); |
| 739 data->is_connected_control_ = true; |
| 740 } |
| 741 } else { |
| 742 // Otherwise, {node} is the start node, because it is floating control |
| 743 // but is connected to {input} that is not floating control. |
| 744 DCHECK_EQ(NULL, start); // There can be only one. |
| 745 start = node; |
| 746 } |
| 747 } |
| 748 } |
| 749 |
| 750 DCHECK_NE(NULL, start); |
| 751 start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start); |
| 752 |
| 753 Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(), |
| 754 start->op()->mnemonic(), block_start->id(), |
| 755 block_start->op()->mnemonic()); |
| 756 } |
| 757 |
| 758 |
591 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | 759 // Numbering for BasicBlockData.rpo_number_ for this block traversal: |
592 static const int kBlockOnStack = -2; | 760 static const int kBlockOnStack = -2; |
593 static const int kBlockVisited1 = -3; | 761 static const int kBlockVisited1 = -3; |
594 static const int kBlockVisited2 = -4; | 762 static const int kBlockVisited2 = -4; |
595 static const int kBlockUnvisited1 = -1; | 763 static const int kBlockUnvisited1 = -1; |
596 static const int kBlockUnvisited2 = kBlockVisited1; | 764 static const int kBlockUnvisited2 = kBlockVisited1; |
597 | 765 |
598 struct SpecialRPOStackFrame { | 766 struct SpecialRPOStackFrame { |
599 BasicBlock* block; | 767 BasicBlock* block; |
600 int index; | 768 int index; |
(...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
950 ++i) { | 1118 ++i) { |
951 BasicBlock* current = *i; | 1119 BasicBlock* current = *i; |
952 current->loop_header_ = current_header; | 1120 current->loop_header_ = current_header; |
953 if (current->IsLoopHeader()) { | 1121 if (current->IsLoopHeader()) { |
954 loop_depth++; | 1122 loop_depth++; |
955 current_loop = &loops[current->loop_end_]; | 1123 current_loop = &loops[current->loop_end_]; |
956 BlockList* end = current_loop->end; | 1124 BlockList* end = current_loop->end; |
957 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) | 1125 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) |
958 : end->block->rpo_number_; | 1126 : end->block->rpo_number_; |
959 current_header = current_loop->header; | 1127 current_header = current_loop->header; |
960 Trace("Block %d is a loop header, increment loop depth to %d\n", | 1128 Trace("B%d is a loop header, increment loop depth to %d\n", current->id(), |
961 current->id(), loop_depth); | 1129 loop_depth); |
962 } else { | 1130 } else { |
963 while (current_header != NULL && | 1131 while (current_header != NULL && |
964 current->rpo_number_ >= current_header->loop_end_) { | 1132 current->rpo_number_ >= current_header->loop_end_) { |
965 DCHECK(current_header->IsLoopHeader()); | 1133 DCHECK(current_header->IsLoopHeader()); |
966 DCHECK(current_loop != NULL); | 1134 DCHECK(current_loop != NULL); |
967 current_loop = current_loop->prev; | 1135 current_loop = current_loop->prev; |
968 current_header = current_loop == NULL ? NULL : current_loop->header; | 1136 current_header = current_loop == NULL ? NULL : current_loop->header; |
969 --loop_depth; | 1137 --loop_depth; |
970 } | 1138 } |
971 } | 1139 } |
972 current->loop_depth_ = loop_depth; | 1140 current->loop_depth_ = loop_depth; |
973 Trace("Block %d's loop header is block %d, loop depth %d\n", current->id(), | 1141 if (current->loop_header_ == NULL) { |
974 current->loop_header_ == NULL ? -1 : current->loop_header_->id(), | 1142 Trace("B%d is not in a loop (depth == %d)\n", current->id(), |
975 current->loop_depth_); | 1143 current->loop_depth_); |
| 1144 } else { |
| 1145 Trace("B%d has loop header B%d, (depth == %d)\n", current->id(), |
| 1146 current->loop_header_->id(), current->loop_depth_); |
| 1147 } |
976 } | 1148 } |
977 | 1149 |
978 #if DEBUG | 1150 #if DEBUG |
979 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1151 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
980 VerifySpecialRPO(num_loops, loops, final_order); | 1152 VerifySpecialRPO(num_loops, loops, final_order); |
981 #endif | 1153 #endif |
982 return final_order; | 1154 return final_order; |
983 } | 1155 } |
984 } | 1156 } |
985 } | 1157 } |
986 } // namespace v8::internal::compiler | 1158 } // namespace v8::internal::compiler |
OLD | NEW |