Chromium Code Reviews

Side by Side Diff: src/compiler/scheduler.cc

Issue 928213003: Model exceptional edges from call nodes in TurboFan. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebased. Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/scheduler.h" 5 #include "src/compiler/scheduler.h"
6 6
7 #include "src/bit-vector.h" 7 #include "src/bit-vector.h"
8 #include "src/compiler/common-operator.h" 8 #include "src/compiler/common-operator.h"
9 #include "src/compiler/control-equivalence.h" 9 #include "src/compiler/control-equivalence.h"
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
(...skipping 268 matching lines...)
279 } 279 }
280 } 280 }
281 DCHECK(component_entry_); 281 DCHECK(component_entry_);
282 282
283 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) { 283 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
284 ConnectBlocks(*i); // Connect block to its predecessor/successors. 284 ConnectBlocks(*i); // Connect block to its predecessor/successors.
285 } 285 }
286 } 286 }
287 287
288 private: 288 private:
289 // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl. 289 friend class ScheduleLateNodeVisitor;
290 friend class Scheduler; 290 friend class Scheduler;
291 291
292 void FixNode(BasicBlock* block, Node* node) { 292 void FixNode(BasicBlock* block, Node* node) {
293 schedule_->AddNode(block, node); 293 schedule_->AddNode(block, node);
294 scheduler_->UpdatePlacement(node, Scheduler::kFixed); 294 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
295 } 295 }
296 296
297 void Queue(Node* node) { 297 void Queue(Node* node) {
298 // Mark the connected control nodes as they are queued. 298 // Mark the connected control nodes as they are queued.
299 if (!queued_.Get(node)) { 299 if (!queued_.Get(node)) {
(...skipping 13 matching lines...)
313 FixNode(schedule_->start(), node); 313 FixNode(schedule_->start(), node);
314 break; 314 break;
315 case IrOpcode::kLoop: 315 case IrOpcode::kLoop:
316 case IrOpcode::kMerge: 316 case IrOpcode::kMerge:
317 BuildBlockForNode(node); 317 BuildBlockForNode(node);
318 break; 318 break;
319 case IrOpcode::kBranch: 319 case IrOpcode::kBranch:
320 case IrOpcode::kSwitch: 320 case IrOpcode::kSwitch:
321 BuildBlocksForSuccessors(node); 321 BuildBlocksForSuccessors(node);
322 break; 322 break;
323 case IrOpcode::kCall:
324 if (IsExceptionalCall(node)) {
325 BuildBlocksForSuccessors(node);
326 }
327 break;
323 default: 328 default:
324 break; 329 break;
325 } 330 }
326 } 331 }
327 332
328 void ConnectBlocks(Node* node) { 333 void ConnectBlocks(Node* node) {
329 switch (node->opcode()) { 334 switch (node->opcode()) {
330 case IrOpcode::kLoop: 335 case IrOpcode::kLoop:
331 case IrOpcode::kMerge: 336 case IrOpcode::kMerge:
332 ConnectMerge(node); 337 ConnectMerge(node);
333 break; 338 break;
334 case IrOpcode::kBranch: 339 case IrOpcode::kBranch:
335 scheduler_->UpdatePlacement(node, Scheduler::kFixed); 340 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
336 ConnectBranch(node); 341 ConnectBranch(node);
337 break; 342 break;
338 case IrOpcode::kSwitch: 343 case IrOpcode::kSwitch:
339 scheduler_->UpdatePlacement(node, Scheduler::kFixed); 344 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
340 ConnectSwitch(node); 345 ConnectSwitch(node);
341 break; 346 break;
342 case IrOpcode::kReturn: 347 case IrOpcode::kReturn:
343 scheduler_->UpdatePlacement(node, Scheduler::kFixed); 348 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
344 ConnectReturn(node); 349 ConnectReturn(node);
345 break; 350 break;
346 case IrOpcode::kThrow: 351 case IrOpcode::kThrow:
347 scheduler_->UpdatePlacement(node, Scheduler::kFixed); 352 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
348 ConnectThrow(node); 353 ConnectThrow(node);
349 break; 354 break;
355 case IrOpcode::kCall:
356 if (IsExceptionalCall(node)) {
357 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
358 ConnectCall(node);
359 }
360 break;
350 default: 361 default:
351 break; 362 break;
352 } 363 }
353 } 364 }
354 365
355 BasicBlock* BuildBlockForNode(Node* node) { 366 BasicBlock* BuildBlockForNode(Node* node) {
356 BasicBlock* block = schedule_->block(node); 367 BasicBlock* block = schedule_->block(node);
357 if (block == NULL) { 368 if (block == NULL) {
358 block = schedule_->NewBasicBlock(); 369 block = schedule_->NewBasicBlock();
359 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(), 370 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
360 node->op()->mnemonic()); 371 node->op()->mnemonic());
361 FixNode(block, node); 372 FixNode(block, node);
362 } 373 }
363 return block; 374 return block;
364 } 375 }
365 376
366 void BuildBlocksForSuccessors(Node* node) { 377 void BuildBlocksForSuccessors(Node* node) {
367 size_t const successor_count = node->op()->ControlOutputCount(); 378 size_t const successor_count = node->op()->ControlOutputCount();
368 Node** successors = zone_->NewArray<Node*>(successor_count); 379 Node** successors = zone_->NewArray<Node*>(successor_count);
369 CollectSuccessorProjections(node, successors, successor_count); 380 CollectSuccessorProjections(node, successors, successor_count);
370 for (size_t index = 0; index < successor_count; ++index) { 381 for (size_t index = 0; index < successor_count; ++index) {
371 BuildBlockForNode(successors[index]); 382 BuildBlockForNode(successors[index]);
372 } 383 }
373 } 384 }
374 385
375 // Collect the branch-related projections from a node, such as IfTrue, 386 // Collect the branch-related projections from a node, such as IfTrue,
376 // IfFalse, Case and Default. 387 // IfFalse, IfSuccess, IfException, IfValue and IfDefault.
377 void CollectSuccessorProjections(Node* node, Node** successors, 388 void CollectSuccessorProjections(Node* node, Node** successors,
378 size_t successor_count) { 389 size_t successor_count) {
379 #ifdef DEBUG 390 #ifdef DEBUG
380 DCHECK_EQ(static_cast<int>(successor_count), node->UseCount()); 391 DCHECK_LE(static_cast<int>(successor_count), node->UseCount());
381 std::memset(successors, 0, sizeof(*successors) * successor_count); 392 std::memset(successors, 0, sizeof(*successors) * successor_count);
382 #endif 393 #endif
383 size_t if_value_index = 0; 394 size_t if_value_index = 0;
384 for (Node* const use : node->uses()) { 395 for (Node* const use : node->uses()) {
385 size_t index; 396 size_t index;
386 switch (use->opcode()) { 397 switch (use->opcode()) {
387 default:
388 UNREACHABLE();
389 // Fall through.
390 case IrOpcode::kIfTrue: 398 case IrOpcode::kIfTrue:
391 DCHECK_EQ(IrOpcode::kBranch, node->opcode()); 399 DCHECK_EQ(IrOpcode::kBranch, node->opcode());
392 index = 0; 400 index = 0;
393 break; 401 break;
394 case IrOpcode::kIfFalse: 402 case IrOpcode::kIfFalse:
395 DCHECK_EQ(IrOpcode::kBranch, node->opcode()); 403 DCHECK_EQ(IrOpcode::kBranch, node->opcode());
396 index = 1; 404 index = 1;
397 break; 405 break;
406 case IrOpcode::kIfSuccess:
407 DCHECK_EQ(IrOpcode::kCall, node->opcode());
408 index = 0;
409 break;
410 case IrOpcode::kIfException:
411 DCHECK_EQ(IrOpcode::kCall, node->opcode());
412 index = 1;
413 break;
398 case IrOpcode::kIfValue: 414 case IrOpcode::kIfValue:
399 DCHECK_EQ(IrOpcode::kSwitch, node->opcode()); 415 DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
400 index = if_value_index++; 416 index = if_value_index++;
401 break; 417 break;
402 case IrOpcode::kIfDefault: 418 case IrOpcode::kIfDefault:
403 DCHECK_EQ(IrOpcode::kSwitch, node->opcode()); 419 DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
404 index = successor_count - 1; 420 index = successor_count - 1;
405 break; 421 break;
422 default:
423 continue;
406 } 424 }
407 DCHECK_LT(if_value_index, successor_count); 425 DCHECK_LT(if_value_index, successor_count);
408 DCHECK_LT(index, successor_count); 426 DCHECK_LT(index, successor_count);
409 DCHECK_NULL(successors[index]); 427 DCHECK_NULL(successors[index]);
410 successors[index] = use; 428 successors[index] = use;
411 } 429 }
412 #ifdef DEBUG 430 #ifdef DEBUG
413 for (size_t index = 0; index < successor_count; ++index) { 431 for (size_t index = 0; index < successor_count; ++index) {
414 DCHECK_NOT_NULL(successors[index]); 432 DCHECK_NOT_NULL(successors[index]);
415 } 433 }
416 #endif 434 #endif
417 } 435 }
418 436
419 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks, 437 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
420 size_t successor_count) { 438 size_t successor_count) {
421 Node** successors = reinterpret_cast<Node**>(successor_blocks); 439 Node** successors = reinterpret_cast<Node**>(successor_blocks);
422 CollectSuccessorProjections(node, successors, successor_count); 440 CollectSuccessorProjections(node, successors, successor_count);
423 for (size_t index = 0; index < successor_count; ++index) { 441 for (size_t index = 0; index < successor_count; ++index) {
424 successor_blocks[index] = schedule_->block(successors[index]); 442 successor_blocks[index] = schedule_->block(successors[index]);
425 } 443 }
426 } 444 }
427 445
446 BasicBlock* FindPredecessorBlock(Node* node) {
447 BasicBlock* predecessor_block = nullptr;
448 while (true) {
449 predecessor_block = schedule_->block(node);
450 if (predecessor_block != nullptr) break;
451 node = NodeProperties::GetControlInput(node);
452 }
453 return predecessor_block;
454 }
455
456 void ConnectCall(Node* call) {
457 BasicBlock* successor_blocks[2];
458 CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
459
460 // Consider the exception continuation to be deferred.
461 successor_blocks[1]->set_deferred(true);
462
463 Node* call_control = NodeProperties::GetControlInput(call);
464 BasicBlock* call_block = FindPredecessorBlock(call_control);
465 TraceConnect(call, call_block, successor_blocks[0]);
466 TraceConnect(call, call_block, successor_blocks[1]);
467 schedule_->AddCall(call_block, call, successor_blocks[0],
468 successor_blocks[1]);
469 }
470
428 void ConnectBranch(Node* branch) { 471 void ConnectBranch(Node* branch) {
429 BasicBlock* successor_blocks[2]; 472 BasicBlock* successor_blocks[2];
430 CollectSuccessorBlocks(branch, successor_blocks, 473 CollectSuccessorBlocks(branch, successor_blocks,
431 arraysize(successor_blocks)); 474 arraysize(successor_blocks));
432 475
433 // Consider branch hints. 476 // Consider branch hints.
434 switch (BranchHintOf(branch->op())) { 477 switch (BranchHintOf(branch->op())) {
435 case BranchHint::kNone: 478 case BranchHint::kNone:
436 break; 479 break;
437 case BranchHint::kTrue: 480 case BranchHint::kTrue:
438 successor_blocks[1]->set_deferred(true); 481 successor_blocks[1]->set_deferred(true);
439 break; 482 break;
440 case BranchHint::kFalse: 483 case BranchHint::kFalse:
441 successor_blocks[0]->set_deferred(true); 484 successor_blocks[0]->set_deferred(true);
442 break; 485 break;
443 } 486 }
444 487
445 if (branch == component_entry_) { 488 if (branch == component_entry_) {
446 TraceConnect(branch, component_start_, successor_blocks[0]); 489 TraceConnect(branch, component_start_, successor_blocks[0]);
447 TraceConnect(branch, component_start_, successor_blocks[1]); 490 TraceConnect(branch, component_start_, successor_blocks[1]);
448 schedule_->InsertBranch(component_start_, component_end_, branch, 491 schedule_->InsertBranch(component_start_, component_end_, branch,
449 successor_blocks[0], successor_blocks[1]); 492 successor_blocks[0], successor_blocks[1]);
450 } else { 493 } else {
451 Node* branch_block_node = NodeProperties::GetControlInput(branch); 494 Node* branch_control = NodeProperties::GetControlInput(branch);
452 BasicBlock* branch_block = schedule_->block(branch_block_node); 495 BasicBlock* branch_block = FindPredecessorBlock(branch_control);
453 DCHECK_NOT_NULL(branch_block);
454
455 TraceConnect(branch, branch_block, successor_blocks[0]); 496 TraceConnect(branch, branch_block, successor_blocks[0]);
456 TraceConnect(branch, branch_block, successor_blocks[1]); 497 TraceConnect(branch, branch_block, successor_blocks[1]);
457 schedule_->AddBranch(branch_block, branch, successor_blocks[0], 498 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
458 successor_blocks[1]); 499 successor_blocks[1]);
459 } 500 }
460 } 501 }
461 502
462 void ConnectSwitch(Node* sw) { 503 void ConnectSwitch(Node* sw) {
463 size_t const successor_count = sw->op()->ControlOutputCount(); 504 size_t const successor_count = sw->op()->ControlOutputCount();
464 BasicBlock** successor_blocks = 505 BasicBlock** successor_blocks =
465 zone_->NewArray<BasicBlock*>(successor_count); 506 zone_->NewArray<BasicBlock*>(successor_count);
466 CollectSuccessorBlocks(sw, successor_blocks, successor_count); 507 CollectSuccessorBlocks(sw, successor_blocks, successor_count);
467 508
468 if (sw == component_entry_) { 509 if (sw == component_entry_) {
469 for (size_t index = 0; index < successor_count; ++index) { 510 for (size_t index = 0; index < successor_count; ++index) {
470 TraceConnect(sw, component_start_, successor_blocks[index]); 511 TraceConnect(sw, component_start_, successor_blocks[index]);
471 } 512 }
472 schedule_->InsertSwitch(component_start_, component_end_, sw, 513 schedule_->InsertSwitch(component_start_, component_end_, sw,
473 successor_blocks, successor_count); 514 successor_blocks, successor_count);
474 } else { 515 } else {
475 Node* sw_block_node = NodeProperties::GetControlInput(sw); 516 Node* switch_control = NodeProperties::GetControlInput(sw);
476 BasicBlock* sw_block = schedule_->block(sw_block_node); 517 BasicBlock* switch_block = FindPredecessorBlock(switch_control);
477 DCHECK_NOT_NULL(sw_block);
478
479 for (size_t index = 0; index < successor_count; ++index) { 518 for (size_t index = 0; index < successor_count; ++index) {
480 TraceConnect(sw, sw_block, successor_blocks[index]); 519 TraceConnect(sw, switch_block, successor_blocks[index]);
481 } 520 }
482 schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count); 521 schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
483 } 522 }
484 } 523 }
485 524
486 void ConnectMerge(Node* merge) { 525 void ConnectMerge(Node* merge) {
487 // Don't connect the special merge at the end to its predecessors. 526 // Don't connect the special merge at the end to its predecessors.
488 if (IsFinalMerge(merge)) return; 527 if (IsFinalMerge(merge)) return;
489 528
490 BasicBlock* block = schedule_->block(merge); 529 BasicBlock* block = schedule_->block(merge);
491 DCHECK_NOT_NULL(block); 530 DCHECK_NOT_NULL(block);
492 // For all of the merge's control inputs, add a goto at the end to the 531 // For all of the merge's control inputs, add a goto at the end to the
493 // merge's basic block. 532 // merge's basic block.
494 for (Node* const input : merge->inputs()) { 533 for (Node* const input : merge->inputs()) {
495 BasicBlock* predecessor_block = schedule_->block(input); 534 BasicBlock* predecessor_block = FindPredecessorBlock(input);
496 TraceConnect(merge, predecessor_block, block); 535 TraceConnect(merge, predecessor_block, block);
497 schedule_->AddGoto(predecessor_block, block); 536 schedule_->AddGoto(predecessor_block, block);
498 } 537 }
499 } 538 }
500 539
501 void ConnectReturn(Node* ret) { 540 void ConnectReturn(Node* ret) {
502 Node* return_block_node = NodeProperties::GetControlInput(ret); 541 Node* return_control = NodeProperties::GetControlInput(ret);
503 BasicBlock* return_block = schedule_->block(return_block_node); 542 BasicBlock* return_block = FindPredecessorBlock(return_control);
504 TraceConnect(ret, return_block, NULL); 543 TraceConnect(ret, return_block, NULL);
505 schedule_->AddReturn(return_block, ret); 544 schedule_->AddReturn(return_block, ret);
506 } 545 }
507 546
508 void ConnectThrow(Node* thr) { 547 void ConnectThrow(Node* thr) {
509 Node* throw_block_node = NodeProperties::GetControlInput(thr); 548 Node* throw_control = NodeProperties::GetControlInput(thr);
510 BasicBlock* throw_block = schedule_->block(throw_block_node); 549 BasicBlock* throw_block = FindPredecessorBlock(throw_control);
511 TraceConnect(thr, throw_block, NULL); 550 TraceConnect(thr, throw_block, NULL);
512 schedule_->AddThrow(throw_block, thr); 551 schedule_->AddThrow(throw_block, thr);
513 } 552 }
514 553
515 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { 554 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
516 DCHECK_NOT_NULL(block); 555 DCHECK_NOT_NULL(block);
517 if (succ == NULL) { 556 if (succ == NULL) {
518 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), 557 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
519 block->id().ToInt()); 558 block->id().ToInt());
520 } else { 559 } else {
521 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), 560 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
522 block->id().ToInt(), succ->id().ToInt()); 561 block->id().ToInt(), succ->id().ToInt());
523 } 562 }
524 } 563 }
525 564
565 bool IsExceptionalCall(Node* node) {
566 for (Node* const use : node->uses()) {
567 if (use->opcode() == IrOpcode::kIfException) return true;
568 }
569 return false;
570 }
571
526 bool IsFinalMerge(Node* node) { 572 bool IsFinalMerge(Node* node) {
527 return (node->opcode() == IrOpcode::kMerge && 573 return (node->opcode() == IrOpcode::kMerge &&
528 node == scheduler_->graph_->end()->InputAt(0)); 574 node == scheduler_->graph_->end()->InputAt(0));
529 } 575 }
530 576
531 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const { 577 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
532 size_t entry_class = scheduler_->equivalence_->ClassOf(entry); 578 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
533 size_t exit_class = scheduler_->equivalence_->ClassOf(exit); 579 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
534 return entry != exit && entry_class == exit_class; 580 return entry != exit && entry_class == exit_class;
535 } 581 }
(...skipping 825 matching lines...)
1361 block = hoist_block; 1407 block = hoist_block;
1362 hoist_block = GetPreHeader(hoist_block); 1408 hoist_block = GetPreHeader(hoist_block);
1363 } while (hoist_block && 1409 } while (hoist_block &&
1364 hoist_block->dominator_depth() >= min_block->dominator_depth()); 1410 hoist_block->dominator_depth() >= min_block->dominator_depth());
1365 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) { 1411 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1366 // Split the {node} if beneficial and return the new {block} for it. 1412 // Split the {node} if beneficial and return the new {block} for it.
1367 block = SplitNode(block, node); 1413 block = SplitNode(block, node);
1368 } 1414 }
1369 1415
1370 // Schedule the node or a floating control structure. 1416 // Schedule the node or a floating control structure.
1371 if (NodeProperties::IsControl(node)) { 1417 if (IrOpcode::IsMergeOpcode(node->opcode())) {
1372 ScheduleFloatingControl(block, node); 1418 ScheduleFloatingControl(block, node);
1373 } else { 1419 } else {
1374 ScheduleNode(block, node); 1420 ScheduleNode(block, node);
1375 } 1421 }
1376 } 1422 }
1377 1423
1378 // Mark {block} and push its non-marked predecessor on the marking queue. 1424 // Mark {block} and push its non-marked predecessor on the marking queue.
1379 void MarkBlock(BasicBlock* block) { 1425 void MarkBlock(BasicBlock* block) {
1380 DCHECK_LT(block->id().ToSize(), marked_.size()); 1426 DCHECK_LT(block->id().ToSize(), marked_.size());
1381 marked_[block->id().ToSize()] = true; 1427 marked_[block->id().ToSize()] = true;
(...skipping 103 matching lines...)
1485 for (Edge edge : node->use_edges()) { 1531 for (Edge edge : node->use_edges()) {
1486 BasicBlock* use_block = GetBlockForUse(edge); 1532 BasicBlock* use_block = GetBlockForUse(edge);
1487 block = block == NULL ? use_block : use_block == NULL 1533 block = block == NULL ? use_block : use_block == NULL
1488 ? block 1534 ? block
1489 : BasicBlock::GetCommonDominator( 1535 : BasicBlock::GetCommonDominator(
1490 block, use_block); 1536 block, use_block);
1491 } 1537 }
1492 return block; 1538 return block;
1493 } 1539 }
1494 1540
1541 BasicBlock* FindPredecessorBlock(Node* node) {
1542 return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1543 }
1544
1495 BasicBlock* GetBlockForUse(Edge edge) { 1545 BasicBlock* GetBlockForUse(Edge edge) {
1496 Node* use = edge.from(); 1546 Node* use = edge.from();
1497 IrOpcode::Value opcode = use->opcode(); 1547 if (IrOpcode::IsPhiOpcode(use->opcode())) {
1498 if (IrOpcode::IsPhiOpcode(opcode)) {
1499 // If the use is from a coupled (i.e. floating) phi, compute the common 1548 // If the use is from a coupled (i.e. floating) phi, compute the common
1500 // dominator of its uses. This will not recurse more than one level. 1549 // dominator of its uses. This will not recurse more than one level.
1501 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) { 1550 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1502 Trace(" inspecting uses of coupled #%d:%s\n", use->id(), 1551 Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
1503 use->op()->mnemonic()); 1552 use->op()->mnemonic());
1504 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use)); 1553 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1505 return GetCommonDominatorOfUses(use); 1554 return GetCommonDominatorOfUses(use);
1506 } 1555 }
1507 // If the use is from a fixed (i.e. non-floating) phi, use the block 1556 // If the use is from a fixed (i.e. non-floating) phi, we use the
1508 // of the corresponding control input to the merge. 1557 // predecessor block of the corresponding control input to the merge.
1509 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { 1558 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1510 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(), 1559 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1511 use->op()->mnemonic()); 1560 use->op()->mnemonic());
1512 Node* merge = NodeProperties::GetControlInput(use, 0); 1561 Node* merge = NodeProperties::GetControlInput(use, 0);
1513 opcode = merge->opcode(); 1562 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1514 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); 1563 Node* input = NodeProperties::GetControlInput(merge, edge.index());
1515 use = NodeProperties::GetControlInput(merge, edge.index()); 1564 return FindPredecessorBlock(input);
1565 }
1566 } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1567 // If the use is from a fixed (i.e. non-floating) merge, we use the
1568 // predecessor block of the current input to the merge.
1569 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1570 Trace(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1571 use->op()->mnemonic());
1572 return FindPredecessorBlock(edge.to());
1516 } 1573 }
1517 } 1574 }
1518 BasicBlock* result = schedule_->block(use); 1575 BasicBlock* result = schedule_->block(use);
1519 if (result == NULL) return NULL; 1576 if (result == NULL) return NULL;
1520 Trace(" must dominate use #%d:%s in B%d\n", use->id(), 1577 Trace(" must dominate use #%d:%s in B%d\n", use->id(),
1521 use->op()->mnemonic(), result->id().ToInt()); 1578 use->op()->mnemonic(), result->id().ToInt());
1522 return result; 1579 return result;
1523 } 1580 }
1524 1581
1525 void ScheduleFloatingControl(BasicBlock* block, Node* node) { 1582 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
(...skipping 128 matching lines...)
1654 for (Node* const node : *nodes) { 1711 for (Node* const node : *nodes) {
1655 schedule_->SetBlockForNode(to, node); 1712 schedule_->SetBlockForNode(to, node);
1656 scheduled_nodes_[to->id().ToSize()].push_back(node); 1713 scheduled_nodes_[to->id().ToSize()].push_back(node);
1657 } 1714 }
1658 nodes->clear(); 1715 nodes->clear();
1659 } 1716 }
1660 1717
1661 } // namespace compiler 1718 } // namespace compiler
1662 } // namespace internal 1719 } // namespace internal
1663 } // namespace v8 1720 } // namespace v8
OLDNEW

Powered by Google App Engine