OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/scheduler.h" | 5 #include "src/compiler/scheduler.h" |
6 | 6 |
7 #include "src/bit-vector.h" | 7 #include "src/bit-vector.h" |
8 #include "src/compiler/common-operator.h" | 8 #include "src/compiler/common-operator.h" |
9 #include "src/compiler/control-equivalence.h" | 9 #include "src/compiler/control-equivalence.h" |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
211 // Phase 1: Build control-flow graph. | 211 // Phase 1: Build control-flow graph. |
212 | 212 |
213 | 213 |
214 // Internal class to build a control flow graph (i.e the basic blocks and edges | 214 // Internal class to build a control flow graph (i.e the basic blocks and edges |
215 // between them within a Schedule) from the node graph. Visits control edges of | 215 // between them within a Schedule) from the node graph. Visits control edges of |
216 // the graph backwards from an end node in order to find the connected control | 216 // the graph backwards from an end node in order to find the connected control |
217 // subgraph, needed for scheduling. | 217 // subgraph, needed for scheduling. |
218 class CFGBuilder : public ZoneObject { | 218 class CFGBuilder : public ZoneObject { |
219 public: | 219 public: |
220 CFGBuilder(Zone* zone, Scheduler* scheduler) | 220 CFGBuilder(Zone* zone, Scheduler* scheduler) |
221 : scheduler_(scheduler), | 221 : zone_(zone), |
| 222 scheduler_(scheduler), |
222 schedule_(scheduler->schedule_), | 223 schedule_(scheduler->schedule_), |
223 queued_(scheduler->graph_, 2), | 224 queued_(scheduler->graph_, 2), |
224 queue_(zone), | 225 queue_(zone), |
225 control_(zone), | 226 control_(zone), |
226 component_entry_(NULL), | 227 component_entry_(NULL), |
227 component_start_(NULL), | 228 component_start_(NULL), |
228 component_end_(NULL) {} | 229 component_end_(NULL) {} |
229 | 230 |
230 // Run the control flow graph construction algorithm by walking the graph | 231 // Run the control flow graph construction algorithm by walking the graph |
231 // backwards from end through control edges, building and connecting the | 232 // backwards from end through control edges, building and connecting the |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
309 FixNode(schedule_->end(), node); | 310 FixNode(schedule_->end(), node); |
310 break; | 311 break; |
311 case IrOpcode::kStart: | 312 case IrOpcode::kStart: |
312 FixNode(schedule_->start(), node); | 313 FixNode(schedule_->start(), node); |
313 break; | 314 break; |
314 case IrOpcode::kLoop: | 315 case IrOpcode::kLoop: |
315 case IrOpcode::kMerge: | 316 case IrOpcode::kMerge: |
316 BuildBlockForNode(node); | 317 BuildBlockForNode(node); |
317 break; | 318 break; |
318 case IrOpcode::kBranch: | 319 case IrOpcode::kBranch: |
319 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); | 320 case IrOpcode::kSwitch: |
| 321 BuildBlocksForSuccessors(node); |
320 break; | 322 break; |
321 default: | 323 default: |
322 break; | 324 break; |
323 } | 325 } |
324 } | 326 } |
325 | 327 |
326 void ConnectBlocks(Node* node) { | 328 void ConnectBlocks(Node* node) { |
327 switch (node->opcode()) { | 329 switch (node->opcode()) { |
328 case IrOpcode::kLoop: | 330 case IrOpcode::kLoop: |
329 case IrOpcode::kMerge: | 331 case IrOpcode::kMerge: |
330 ConnectMerge(node); | 332 ConnectMerge(node); |
331 break; | 333 break; |
332 case IrOpcode::kBranch: | 334 case IrOpcode::kBranch: |
333 scheduler_->UpdatePlacement(node, Scheduler::kFixed); | 335 scheduler_->UpdatePlacement(node, Scheduler::kFixed); |
334 ConnectBranch(node); | 336 ConnectBranch(node); |
335 break; | 337 break; |
| 338 case IrOpcode::kSwitch: |
| 339 scheduler_->UpdatePlacement(node, Scheduler::kFixed); |
| 340 ConnectSwitch(node); |
| 341 break; |
336 case IrOpcode::kReturn: | 342 case IrOpcode::kReturn: |
337 scheduler_->UpdatePlacement(node, Scheduler::kFixed); | 343 scheduler_->UpdatePlacement(node, Scheduler::kFixed); |
338 ConnectReturn(node); | 344 ConnectReturn(node); |
339 break; | 345 break; |
340 case IrOpcode::kThrow: | 346 case IrOpcode::kThrow: |
341 scheduler_->UpdatePlacement(node, Scheduler::kFixed); | 347 scheduler_->UpdatePlacement(node, Scheduler::kFixed); |
342 ConnectThrow(node); | 348 ConnectThrow(node); |
343 break; | 349 break; |
344 default: | 350 default: |
345 break; | 351 break; |
346 } | 352 } |
347 } | 353 } |
348 | 354 |
349 BasicBlock* BuildBlockForNode(Node* node) { | 355 BasicBlock* BuildBlockForNode(Node* node) { |
350 BasicBlock* block = schedule_->block(node); | 356 BasicBlock* block = schedule_->block(node); |
351 if (block == NULL) { | 357 if (block == NULL) { |
352 block = schedule_->NewBasicBlock(); | 358 block = schedule_->NewBasicBlock(); |
353 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(), | 359 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(), |
354 node->op()->mnemonic()); | 360 node->op()->mnemonic()); |
355 FixNode(block, node); | 361 FixNode(block, node); |
356 } | 362 } |
357 return block; | 363 return block; |
358 } | 364 } |
359 | 365 |
360 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a, | 366 void BuildBlocksForSuccessors(Node* node) { |
361 IrOpcode::Value b) { | 367 size_t const successor_count = node->op()->ControlOutputCount(); |
362 Node* successors[2]; | 368 Node** successors = |
363 CollectSuccessorProjections(node, successors, a, b); | 369 zone_->NewArray<Node*>(static_cast<int>(successor_count)); |
364 BuildBlockForNode(successors[0]); | 370 CollectSuccessorProjections(node, successors, successor_count); |
365 BuildBlockForNode(successors[1]); | 371 for (size_t index = 0; index < successor_count; ++index) { |
| 372 BuildBlockForNode(successors[index]); |
| 373 } |
366 } | 374 } |
367 | 375 |
368 // Collect the branch-related projections from a node, such as IfTrue, | 376 // Collect the branch-related projections from a node, such as IfTrue, |
369 // IfFalse. | 377 // IfFalse, and Case. |
370 // TODO(titzer): consider moving this to node.h | 378 void CollectSuccessorProjections(Node* node, Node** successors, |
371 void CollectSuccessorProjections(Node* node, Node** buffer, | 379 size_t successor_count) { |
372 IrOpcode::Value true_opcode, | 380 #ifdef DEBUG |
373 IrOpcode::Value false_opcode) { | 381 DCHECK_EQ(static_cast<int>(successor_count), node->UseCount()); |
374 buffer[0] = NULL; | 382 std::memset(successors, 0, sizeof(*successors) * successor_count); |
375 buffer[1] = NULL; | 383 #endif |
376 for (Node* use : node->uses()) { | 384 for (Node* const use : node->uses()) { |
377 if (use->opcode() == true_opcode) { | 385 size_t index; |
378 DCHECK(!buffer[0]); | 386 switch (use->opcode()) { |
379 buffer[0] = use; | 387 default: |
| 388 UNREACHABLE(); |
| 389 // Fall through. |
| 390 case IrOpcode::kIfTrue: |
| 391 DCHECK_EQ(IrOpcode::kBranch, node->opcode()); |
| 392 index = 0; |
| 393 break; |
| 394 case IrOpcode::kIfFalse: |
| 395 DCHECK_EQ(IrOpcode::kBranch, node->opcode()); |
| 396 index = 1; |
| 397 break; |
| 398 case IrOpcode::kCase: |
| 399 DCHECK_EQ(IrOpcode::kSwitch, node->opcode()); |
| 400 index = CaseIndexOf(use->op()); |
| 401 break; |
380 } | 402 } |
381 if (use->opcode() == false_opcode) { | 403 DCHECK_LT(index, successor_count); |
382 DCHECK(!buffer[1]); | 404 DCHECK(successors[index] == nullptr); |
383 buffer[1] = use; | 405 successors[index] = use; |
384 } | |
385 } | 406 } |
386 DCHECK(buffer[0]); | 407 #ifdef DEBUG |
387 DCHECK(buffer[1]); | 408 for (size_t index = 0; index < successor_count; ++index) { |
| 409 DCHECK_NOT_NULL(successors[index]); |
| 410 } |
| 411 #endif |
388 } | 412 } |
389 | 413 |
390 void CollectSuccessorBlocks(Node* node, BasicBlock** buffer, | 414 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks, |
391 IrOpcode::Value true_opcode, | 415 size_t successor_count) { |
392 IrOpcode::Value false_opcode) { | 416 Node** successors = reinterpret_cast<Node**>(successor_blocks); |
393 Node* successors[2]; | 417 CollectSuccessorProjections(node, successors, successor_count); |
394 CollectSuccessorProjections(node, successors, true_opcode, false_opcode); | 418 for (size_t index = 0; index < successor_count; ++index) { |
395 buffer[0] = schedule_->block(successors[0]); | 419 successor_blocks[index] = schedule_->block(successors[index]); |
396 buffer[1] = schedule_->block(successors[1]); | 420 } |
397 } | 421 } |
398 | 422 |
399 void ConnectBranch(Node* branch) { | 423 void ConnectBranch(Node* branch) { |
400 BasicBlock* successor_blocks[2]; | 424 BasicBlock* successor_blocks[2]; |
401 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue, | 425 CollectSuccessorBlocks(branch, successor_blocks, |
402 IrOpcode::kIfFalse); | 426 arraysize(successor_blocks)); |
403 | 427 |
404 // Consider branch hints. | 428 // Consider branch hints. |
405 switch (BranchHintOf(branch->op())) { | 429 switch (BranchHintOf(branch->op())) { |
406 case BranchHint::kNone: | 430 case BranchHint::kNone: |
407 break; | 431 break; |
408 case BranchHint::kTrue: | 432 case BranchHint::kTrue: |
409 successor_blocks[1]->set_deferred(true); | 433 successor_blocks[1]->set_deferred(true); |
410 break; | 434 break; |
411 case BranchHint::kFalse: | 435 case BranchHint::kFalse: |
412 successor_blocks[0]->set_deferred(true); | 436 successor_blocks[0]->set_deferred(true); |
413 break; | 437 break; |
414 } | 438 } |
415 | 439 |
416 if (branch == component_entry_) { | 440 if (branch == component_entry_) { |
417 TraceConnect(branch, component_start_, successor_blocks[0]); | 441 TraceConnect(branch, component_start_, successor_blocks[0]); |
418 TraceConnect(branch, component_start_, successor_blocks[1]); | 442 TraceConnect(branch, component_start_, successor_blocks[1]); |
419 schedule_->InsertBranch(component_start_, component_end_, branch, | 443 schedule_->InsertBranch(component_start_, component_end_, branch, |
420 successor_blocks[0], successor_blocks[1]); | 444 successor_blocks[0], successor_blocks[1]); |
421 } else { | 445 } else { |
422 Node* branch_block_node = NodeProperties::GetControlInput(branch); | 446 Node* branch_block_node = NodeProperties::GetControlInput(branch); |
423 BasicBlock* branch_block = schedule_->block(branch_block_node); | 447 BasicBlock* branch_block = schedule_->block(branch_block_node); |
424 DCHECK(branch_block != NULL); | 448 DCHECK_NOT_NULL(branch_block); |
425 | 449 |
426 TraceConnect(branch, branch_block, successor_blocks[0]); | 450 TraceConnect(branch, branch_block, successor_blocks[0]); |
427 TraceConnect(branch, branch_block, successor_blocks[1]); | 451 TraceConnect(branch, branch_block, successor_blocks[1]); |
428 schedule_->AddBranch(branch_block, branch, successor_blocks[0], | 452 schedule_->AddBranch(branch_block, branch, successor_blocks[0], |
429 successor_blocks[1]); | 453 successor_blocks[1]); |
430 } | 454 } |
431 } | 455 } |
432 | 456 |
| 457 void ConnectSwitch(Node* sw) { |
| 458 size_t const successor_count = sw->op()->ControlOutputCount(); |
| 459 BasicBlock** successor_blocks = |
| 460 zone_->NewArray<BasicBlock*>(static_cast<int>(successor_count)); |
| 461 CollectSuccessorBlocks(sw, successor_blocks, successor_count); |
| 462 |
| 463 if (sw == component_entry_) { |
| 464 for (size_t index = 0; index < successor_count; ++index) { |
| 465 TraceConnect(sw, component_start_, successor_blocks[index]); |
| 466 } |
| 467 schedule_->InsertSwitch(component_start_, component_end_, sw, |
| 468 successor_blocks, successor_count); |
| 469 } else { |
| 470 Node* sw_block_node = NodeProperties::GetControlInput(sw); |
| 471 BasicBlock* sw_block = schedule_->block(sw_block_node); |
| 472 DCHECK_NOT_NULL(sw_block); |
| 473 |
| 474 for (size_t index = 0; index < successor_count; ++index) { |
| 475 TraceConnect(sw, sw_block, successor_blocks[index]); |
| 476 } |
| 477 schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count); |
| 478 } |
| 479 } |
| 480 |
433 void ConnectMerge(Node* merge) { | 481 void ConnectMerge(Node* merge) { |
434 // Don't connect the special merge at the end to its predecessors. | 482 // Don't connect the special merge at the end to its predecessors. |
435 if (IsFinalMerge(merge)) return; | 483 if (IsFinalMerge(merge)) return; |
436 | 484 |
437 BasicBlock* block = schedule_->block(merge); | 485 BasicBlock* block = schedule_->block(merge); |
438 DCHECK(block != NULL); | 486 DCHECK_NOT_NULL(block); |
439 // For all of the merge's control inputs, add a goto at the end to the | 487 // For all of the merge's control inputs, add a goto at the end to the |
440 // merge's basic block. | 488 // merge's basic block. |
441 for (Node* const input : merge->inputs()) { | 489 for (Node* const input : merge->inputs()) { |
442 BasicBlock* predecessor_block = schedule_->block(input); | 490 BasicBlock* predecessor_block = schedule_->block(input); |
443 TraceConnect(merge, predecessor_block, block); | 491 TraceConnect(merge, predecessor_block, block); |
444 schedule_->AddGoto(predecessor_block, block); | 492 schedule_->AddGoto(predecessor_block, block); |
445 } | 493 } |
446 } | 494 } |
447 | 495 |
448 void ConnectReturn(Node* ret) { | 496 void ConnectReturn(Node* ret) { |
449 Node* return_block_node = NodeProperties::GetControlInput(ret); | 497 Node* return_block_node = NodeProperties::GetControlInput(ret); |
450 BasicBlock* return_block = schedule_->block(return_block_node); | 498 BasicBlock* return_block = schedule_->block(return_block_node); |
451 TraceConnect(ret, return_block, NULL); | 499 TraceConnect(ret, return_block, NULL); |
452 schedule_->AddReturn(return_block, ret); | 500 schedule_->AddReturn(return_block, ret); |
453 } | 501 } |
454 | 502 |
455 void ConnectThrow(Node* thr) { | 503 void ConnectThrow(Node* thr) { |
456 Node* throw_block_node = NodeProperties::GetControlInput(thr); | 504 Node* throw_block_node = NodeProperties::GetControlInput(thr); |
457 BasicBlock* throw_block = schedule_->block(throw_block_node); | 505 BasicBlock* throw_block = schedule_->block(throw_block_node); |
458 TraceConnect(thr, throw_block, NULL); | 506 TraceConnect(thr, throw_block, NULL); |
459 schedule_->AddThrow(throw_block, thr); | 507 schedule_->AddThrow(throw_block, thr); |
460 } | 508 } |
461 | 509 |
462 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { | 510 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { |
463 DCHECK(block); | 511 DCHECK_NOT_NULL(block); |
464 if (succ == NULL) { | 512 if (succ == NULL) { |
465 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), | 513 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), |
466 block->id().ToInt()); | 514 block->id().ToInt()); |
467 } else { | 515 } else { |
468 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), | 516 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), |
469 block->id().ToInt(), succ->id().ToInt()); | 517 block->id().ToInt(), succ->id().ToInt()); |
470 } | 518 } |
471 } | 519 } |
472 | 520 |
473 bool IsFinalMerge(Node* node) { | 521 bool IsFinalMerge(Node* node) { |
474 return (node->opcode() == IrOpcode::kMerge && | 522 return (node->opcode() == IrOpcode::kMerge && |
475 node == scheduler_->graph_->end()->InputAt(0)); | 523 node == scheduler_->graph_->end()->InputAt(0)); |
476 } | 524 } |
477 | 525 |
478 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const { | 526 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const { |
479 size_t entry_class = scheduler_->equivalence_->ClassOf(entry); | 527 size_t entry_class = scheduler_->equivalence_->ClassOf(entry); |
480 size_t exit_class = scheduler_->equivalence_->ClassOf(exit); | 528 size_t exit_class = scheduler_->equivalence_->ClassOf(exit); |
481 return entry != exit && entry_class == exit_class; | 529 return entry != exit && entry_class == exit_class; |
482 } | 530 } |
483 | 531 |
484 void ResetDataStructures() { | 532 void ResetDataStructures() { |
485 control_.clear(); | 533 control_.clear(); |
486 DCHECK(queue_.empty()); | 534 DCHECK(queue_.empty()); |
487 DCHECK(control_.empty()); | 535 DCHECK(control_.empty()); |
488 } | 536 } |
489 | 537 |
| 538 Zone* zone_; |
490 Scheduler* scheduler_; | 539 Scheduler* scheduler_; |
491 Schedule* schedule_; | 540 Schedule* schedule_; |
492 NodeMarker<bool> queued_; // Mark indicating whether node is queued. | 541 NodeMarker<bool> queued_; // Mark indicating whether node is queued. |
493 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal. | 542 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal. |
494 NodeVector control_; // List of encountered control nodes. | 543 NodeVector control_; // List of encountered control nodes. |
495 Node* component_entry_; // Component single-entry node. | 544 Node* component_entry_; // Component single-entry node. |
496 BasicBlock* component_start_; // Component single-entry block. | 545 BasicBlock* component_start_; // Component single-entry block. |
497 BasicBlock* component_end_; // Component single-exit block. | 546 BasicBlock* component_end_; // Component single-exit block. |
498 }; | 547 }; |
499 | 548 |
(...skipping 1100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1600 for (Node* const node : *nodes) { | 1649 for (Node* const node : *nodes) { |
1601 schedule_->SetBlockForNode(to, node); | 1650 schedule_->SetBlockForNode(to, node); |
1602 scheduled_nodes_[to->id().ToSize()].push_back(node); | 1651 scheduled_nodes_[to->id().ToSize()].push_back(node); |
1603 } | 1652 } |
1604 nodes->clear(); | 1653 nodes->clear(); |
1605 } | 1654 } |
1606 | 1655 |
1607 } // namespace compiler | 1656 } // namespace compiler |
1608 } // namespace internal | 1657 } // namespace internal |
1609 } // namespace v8 | 1658 } // namespace v8 |
OLD | NEW |