 Chromium Code Reviews
 Chromium Code Reviews Issue 504143003:
  Support Int32 representation for selected binary operations.  (Closed) 
  Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
    
  
    Issue 504143003:
  Support Int32 representation for selected binary operations.  (Closed) 
  Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart| OLD | NEW | 
|---|---|
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a | 
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "vm/flow_graph_optimizer.h" | 5 #include "vm/flow_graph_optimizer.h" | 
| 6 | 6 | 
| 7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" | 
| 8 #include "vm/cha.h" | 8 #include "vm/cha.h" | 
| 9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" | 
| 10 #include "vm/dart_entry.h" | 10 #include "vm/dart_entry.h" | 
| (...skipping 24 matching lines...) Expand all Loading... | |
| 35 " otherwise use megamorphic dispatch."); | 35 " otherwise use megamorphic dispatch."); | 
| 36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); | 36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); | 
| 37 DEFINE_FLAG(bool, trace_constant_propagation, false, | 37 DEFINE_FLAG(bool, trace_constant_propagation, false, | 
| 38 "Print constant propagation and useless code elimination."); | 38 "Print constant propagation and useless code elimination."); | 
| 39 DEFINE_FLAG(bool, trace_load_optimization, false, | 39 DEFINE_FLAG(bool, trace_load_optimization, false, | 
| 40 "Print live sets for load optimization pass."); | 40 "Print live sets for load optimization pass."); | 
| 41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); | 41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); | 
| 42 DEFINE_FLAG(bool, truncating_left_shift, true, | 42 DEFINE_FLAG(bool, truncating_left_shift, true, | 
| 43 "Optimize left shift to truncate if possible"); | 43 "Optimize left shift to truncate if possible"); | 
| 44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); | 44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); | 
| 45 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | |
| 46 DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); | |
| 47 #endif | |
| 45 DECLARE_FLAG(bool, enable_type_checks); | 48 DECLARE_FLAG(bool, enable_type_checks); | 
| 46 DECLARE_FLAG(bool, source_lines); | 49 DECLARE_FLAG(bool, source_lines); | 
| 47 DECLARE_FLAG(bool, trace_type_check_elimination); | 50 DECLARE_FLAG(bool, trace_type_check_elimination); | 
| 48 DECLARE_FLAG(bool, warn_on_javascript_compatibility); | 51 DECLARE_FLAG(bool, warn_on_javascript_compatibility); | 
| 49 | 52 | 
| 50 // Quick access to the locally defined isolate() method. | 53 // Quick access to the locally defined isolate() method. | 
| 51 #define I (isolate()) | 54 #define I (isolate()) | 
| 52 | 55 | 
| 53 static bool ShouldInlineSimd() { | 56 static bool ShouldInlineSimd() { | 
| 54 return FlowGraphCompiler::SupportsUnboxedSimd128(); | 57 return FlowGraphCompiler::SupportsUnboxedSimd128(); | 
| (...skipping 519 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 574 ASSERT((replacement == NULL) || current->IsDefinition()); | 577 ASSERT((replacement == NULL) || current->IsDefinition()); | 
| 575 ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); | 578 ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); | 
| 576 changed = true; | 579 changed = true; | 
| 577 } | 580 } | 
| 578 } | 581 } | 
| 579 } | 582 } | 
| 580 return changed; | 583 return changed; | 
| 581 } | 584 } | 
| 582 | 585 | 
| 583 | 586 | 
| 587 static bool IsUnboxedInteger(Representation rep) { | |
| 588 return (rep == kUnboxedInt32) || | |
| 589 (rep == kUnboxedUint32) || | |
| 590 (rep == kUnboxedMint); | |
| 591 } | |
| 592 | |
| 593 | |
| 584 void FlowGraphOptimizer::InsertConversion(Representation from, | 594 void FlowGraphOptimizer::InsertConversion(Representation from, | 
| 585 Representation to, | 595 Representation to, | 
| 586 Value* use, | 596 Value* use, | 
| 587 bool is_environment_use) { | 597 bool is_environment_use) { | 
| 588 Instruction* insert_before; | 598 Instruction* insert_before; | 
| 589 Instruction* deopt_target; | 599 Instruction* deopt_target; | 
| 590 PhiInstr* phi = use->instruction()->AsPhi(); | 600 PhiInstr* phi = use->instruction()->AsPhi(); | 
| 591 if (phi != NULL) { | 601 if (phi != NULL) { | 
| 592 ASSERT(phi->is_alive()); | 602 ASSERT(phi->is_alive()); | 
| 593 // For phis conversions have to be inserted in the predecessor. | 603 // For phis conversions have to be inserted in the predecessor. | 
| 594 insert_before = | 604 insert_before = | 
| 595 phi->block()->PredecessorAt(use->use_index())->last_instruction(); | 605 phi->block()->PredecessorAt(use->use_index())->last_instruction(); | 
| 596 deopt_target = NULL; | 606 deopt_target = NULL; | 
| 597 } else { | 607 } else { | 
| 598 deopt_target = insert_before = use->instruction(); | 608 deopt_target = insert_before = use->instruction(); | 
| 599 } | 609 } | 
| 600 | 610 | 
| 601 Definition* converted = NULL; | 611 Definition* converted = NULL; | 
| 602 if ((from == kTagged) && (to == kUnboxedMint)) { | 612 if ((from == kTagged) && (to == kUnboxedMint)) { | 
| 603 ASSERT((deopt_target != NULL) || | 613 ASSERT((deopt_target != NULL) || | 
| 604 (use->Type()->ToCid() == kUnboxedMint)); | 614 (use->Type()->ToCid() == kUnboxedMint)); | 
| 605 const intptr_t deopt_id = (deopt_target != NULL) ? | 615 const intptr_t deopt_id = (deopt_target != NULL) ? | 
| 606 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 616 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 
| 607 converted = new(I) UnboxIntegerInstr(use->CopyWithType(), deopt_id); | 617 converted = new(I) UnboxIntegerInstr(use->CopyWithType(), deopt_id); | 
| 608 } else if ((from == kUnboxedMint) && (to == kTagged)) { | 618 } else if ((from == kUnboxedMint) && (to == kTagged)) { | 
| 609 converted = new(I) BoxIntegerInstr(use->CopyWithType()); | 619 converted = new(I) BoxIntegerInstr(use->CopyWithType()); | 
| 610 } else if ((from == kUnboxedMint) && (to == kUnboxedUint32)) { | 620 } else if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { | 
| 611 converted = new(I) UnboxedIntConverterInstr(from, to, use->CopyWithType()); | 621 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) ? | 
| 612 } else if ((from == kUnboxedUint32) && (to == kUnboxedMint)) { | 622 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 
| 613 converted = new(I) UnboxedIntConverterInstr(from, to, use->CopyWithType()); | 623 converted = new(I) UnboxedIntConverterInstr(from, | 
| 624 to, | |
| 625 use->CopyWithType(), | |
| 626 deopt_id); | |
| 
srdjan
2014/08/27 16:17:13
Why does the case (to == kUnboxedUin32 && from ==
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Uint32 representation is kinda special right now i
 | |
| 627 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { | |
| 628 converted = new Int32ToDoubleInstr(use->CopyWithType()); | |
| 629 } else if ((from == kTagged) && (to == kUnboxedInt32)) { | |
| 630 const intptr_t deopt_id = (deopt_target != NULL) ? | |
| 631 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | |
| 632 converted = new UnboxInt32Instr(use->CopyWithType(), deopt_id); | |
| 633 } else if ((from == kUnboxedInt32) && (to == kTagged)) { | |
| 634 converted = new BoxInt32Instr(use->CopyWithType()); | |
| 614 } else if ((from == kTagged) && (to == kUnboxedUint32)) { | 635 } else if ((from == kTagged) && (to == kUnboxedUint32)) { | 
| 615 const intptr_t deopt_id = (deopt_target != NULL) ? | 636 const intptr_t deopt_id = (deopt_target != NULL) ? | 
| 616 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 637 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 
| 617 converted = new UnboxUint32Instr(use->CopyWithType(), deopt_id); | 638 converted = new UnboxUint32Instr(use->CopyWithType(), deopt_id); | 
| 618 } else if (from == kUnboxedMint && to == kUnboxedDouble) { | 639 } else if (from == kUnboxedMint && to == kUnboxedDouble) { | 
| 619 ASSERT(CanUnboxDouble()); | 640 ASSERT(CanUnboxDouble()); | 
| 620 // Convert by boxing/unboxing. | 641 // Convert by boxing/unboxing. | 
| 621 // TODO(fschneider): Implement direct unboxed mint-to-double conversion. | 642 // TODO(fschneider): Implement direct unboxed mint-to-double conversion. | 
| 622 BoxIntegerInstr* boxed = | 643 BoxIntegerInstr* boxed = | 
| 623 new(I) BoxIntegerInstr(use->CopyWithType()); | 644 new(I) BoxIntegerInstr(use->CopyWithType()); | 
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 684 if (from == kUnboxedDouble) { | 705 if (from == kUnboxedDouble) { | 
| 685 boxed = new(I) BoxDoubleInstr(use->CopyWithType()); | 706 boxed = new(I) BoxDoubleInstr(use->CopyWithType()); | 
| 686 } else if (from == kUnboxedInt32x4) { | 707 } else if (from == kUnboxedInt32x4) { | 
| 687 boxed = new(I) BoxInt32x4Instr(use->CopyWithType()); | 708 boxed = new(I) BoxInt32x4Instr(use->CopyWithType()); | 
| 688 } else if (from == kUnboxedFloat32x4) { | 709 } else if (from == kUnboxedFloat32x4) { | 
| 689 boxed = new(I) BoxFloat32x4Instr(use->CopyWithType()); | 710 boxed = new(I) BoxFloat32x4Instr(use->CopyWithType()); | 
| 690 } else if (from == kUnboxedMint) { | 711 } else if (from == kUnboxedMint) { | 
| 691 boxed = new(I) BoxIntegerInstr(use->CopyWithType()); | 712 boxed = new(I) BoxIntegerInstr(use->CopyWithType()); | 
| 692 } else if (from == kUnboxedFloat64x2) { | 713 } else if (from == kUnboxedFloat64x2) { | 
| 693 boxed = new(I) BoxFloat64x2Instr(use->CopyWithType()); | 714 boxed = new(I) BoxFloat64x2Instr(use->CopyWithType()); | 
| 715 } else if (from == kUnboxedInt32) { | |
| 716 boxed = new(I) BoxInt32Instr(use->CopyWithType()); | |
| 717 } else if (from == kUnboxedUint32) { | |
| 718 boxed = new(I) BoxUint32Instr(use->CopyWithType()); | |
| 694 } else { | 719 } else { | 
| 695 UNIMPLEMENTED(); | 720 UNIMPLEMENTED(); | 
| 696 } | 721 } | 
| 697 use->BindTo(boxed); | 722 use->BindTo(boxed); | 
| 698 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); | 723 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); | 
| 699 Value* to_value = new(I) Value(boxed); | 724 Value* to_value = new(I) Value(boxed); | 
| 700 if (to == kUnboxedDouble) { | 725 if (to == kUnboxedDouble) { | 
| 701 converted = new(I) UnboxDoubleInstr(to_value, deopt_id); | 726 converted = new(I) UnboxDoubleInstr(to_value, deopt_id); | 
| 702 } else if (to == kUnboxedInt32x4) { | 727 } else if (to == kUnboxedInt32x4) { | 
| 703 converted = new(I) UnboxInt32x4Instr(to_value, deopt_id); | 728 converted = new(I) UnboxInt32x4Instr(to_value, deopt_id); | 
| 704 } else if (to == kUnboxedFloat32x4) { | 729 } else if (to == kUnboxedFloat32x4) { | 
| 705 converted = new(I) UnboxFloat32x4Instr(to_value, deopt_id); | 730 converted = new(I) UnboxFloat32x4Instr(to_value, deopt_id); | 
| 706 } else if (to == kUnboxedMint) { | 731 } else if (to == kUnboxedMint) { | 
| 707 converted = new(I) UnboxIntegerInstr(to_value, deopt_id); | 732 converted = new(I) UnboxIntegerInstr(to_value, deopt_id); | 
| 708 } else if (to == kUnboxedFloat64x2) { | 733 } else if (to == kUnboxedFloat64x2) { | 
| 709 converted = new(I) UnboxFloat64x2Instr(to_value, deopt_id); | 734 converted = new(I) UnboxFloat64x2Instr(to_value, deopt_id); | 
| 735 } else if (to == kUnboxedInt32) { | |
| 736 boxed = new(I) UnboxInt32Instr(use->CopyWithType(), deopt_id); | |
| 737 } else if (to == kUnboxedUint32) { | |
| 738 boxed = new(I) UnboxUint32Instr(use->CopyWithType(), deopt_id); | |
| 710 } else { | 739 } else { | 
| 711 UNIMPLEMENTED(); | 740 UNIMPLEMENTED(); | 
| 712 } | 741 } | 
| 713 } | 742 } | 
| 714 ASSERT(converted != NULL); | 743 ASSERT(converted != NULL); | 
| 715 InsertBefore(insert_before, converted, use->instruction()->env(), | 744 InsertBefore(insert_before, converted, use->instruction()->env(), | 
| 716 FlowGraph::kValue); | 745 FlowGraph::kValue); | 
| 717 if (is_environment_use) { | 746 if (is_environment_use) { | 
| 718 use->BindToEnvironment(converted); | 747 use->BindToEnvironment(converted); | 
| 719 } else { | 748 } else { | 
| 720 use->BindTo(converted); | 749 use->BindTo(converted); | 
| 721 } | 750 } | 
| 751 | |
| 752 if ((to == kUnboxedInt32) && (phi != NULL)) { | |
| 753 // Int32 phis are unboxed optimistically. Ensure that unboxing | |
| 754 // has deoptimization target attached from the goto instruction. | |
| 755 flow_graph_->CopyDeoptTarget(converted, insert_before); | |
| 756 } | |
| 722 } | 757 } | 
| 723 | 758 | 
| 724 | 759 | 
| 725 void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { | 760 void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { | 
| 726 const Representation to_rep = | 761 const Representation to_rep = | 
| 727 use->instruction()->RequiredInputRepresentation(use->use_index()); | 762 use->instruction()->RequiredInputRepresentation(use->use_index()); | 
| 728 if (from_rep == to_rep || to_rep == kNoRepresentation) { | 763 if (from_rep == to_rep || to_rep == kNoRepresentation) { | 
| 729 return; | 764 return; | 
| 730 } | 765 } | 
| 731 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); | 766 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); | 
| (...skipping 26 matching lines...) Expand all Loading... | |
| 758 if (use->instruction()->MayThrow() && | 793 if (use->instruction()->MayThrow() && | 
| 759 use->instruction()->GetBlock()->InsideTryBlock()) { | 794 use->instruction()->GetBlock()->InsideTryBlock()) { | 
| 760 // Environment uses at calls inside try-blocks must be converted to | 795 // Environment uses at calls inside try-blocks must be converted to | 
| 761 // tagged representation. | 796 // tagged representation. | 
| 762 ConvertEnvironmentUse(it.Current(), from_rep); | 797 ConvertEnvironmentUse(it.Current(), from_rep); | 
| 763 } | 798 } | 
| 764 } | 799 } | 
| 765 } | 800 } | 
| 766 | 801 | 
| 767 | 802 | 
| 768 // Returns true if phi's representation was changed. | 803 static void UnboxPhi(PhiInstr* phi) { | 
| 769 static bool UnboxPhi(PhiInstr* phi) { | 804 Representation unboxed = phi->representation(); | 
| 770 Representation current = phi->representation(); | |
| 771 Representation unboxed = current; | |
| 772 | 805 | 
| 773 switch (phi->Type()->ToCid()) { | 806 switch (phi->Type()->ToCid()) { | 
| 774 case kDoubleCid: | 807 case kDoubleCid: | 
| 775 if (CanUnboxDouble()) { | 808 if (CanUnboxDouble()) { | 
| 776 unboxed = kUnboxedDouble; | 809 unboxed = kUnboxedDouble; | 
| 777 } | 810 } | 
| 778 break; | 811 break; | 
| 779 case kFloat32x4Cid: | 812 case kFloat32x4Cid: | 
| 780 if (ShouldInlineSimd()) { | 813 if (ShouldInlineSimd()) { | 
| 781 unboxed = kUnboxedFloat32x4; | 814 unboxed = kUnboxedFloat32x4; | 
| 782 } | 815 } | 
| 783 break; | 816 break; | 
| 784 case kInt32x4Cid: | 817 case kInt32x4Cid: | 
| 785 if (ShouldInlineSimd()) { | 818 if (ShouldInlineSimd()) { | 
| 786 unboxed = kUnboxedInt32x4; | 819 unboxed = kUnboxedInt32x4; | 
| 787 } | 820 } | 
| 788 break; | 821 break; | 
| 789 case kFloat64x2Cid: | 822 case kFloat64x2Cid: | 
| 790 if (ShouldInlineSimd()) { | 823 if (ShouldInlineSimd()) { | 
| 791 unboxed = kUnboxedFloat64x2; | 824 unboxed = kUnboxedFloat64x2; | 
| 792 } | 825 } | 
| 793 break; | 826 break; | 
| 794 } | 827 } | 
| 795 | 828 | 
| 796 if (unboxed != current) { | 829 phi->set_representation(unboxed); | 
| 797 phi->set_representation(unboxed); | |
| 798 return true; | |
| 799 } | |
| 800 | |
| 801 return false; | |
| 802 } | 830 } | 
| 803 | 831 | 
| 804 | 832 | 
| 805 void FlowGraphOptimizer::SelectRepresentations() { | 833 void FlowGraphOptimizer::SelectRepresentations() { | 
| 806 // Conservatively unbox all phis that were proven to be of Double, | 834 // Conservatively unbox all phis that were proven to be of Double, | 
| 807 // Float32x4, or Int32x4 type. | 835 // Float32x4, or Int32x4 type. | 
| 808 for (intptr_t i = 0; i < block_order_.length(); ++i) { | 836 for (intptr_t i = 0; i < block_order_.length(); ++i) { | 
| 809 JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); | 837 JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); | 
| 810 if (join_entry != NULL) { | 838 if (join_entry != NULL) { | 
| 811 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 839 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 
| (...skipping 3774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4586 } | 4614 } | 
| 4587 | 4615 | 
| 4588 // Discard the environment from the original instruction because the store | 4616 // Discard the environment from the original instruction because the store | 
| 4589 // can't deoptimize. | 4617 // can't deoptimize. | 
| 4590 instr->RemoveEnvironment(); | 4618 instr->RemoveEnvironment(); | 
| 4591 ReplaceCall(instr, store); | 4619 ReplaceCall(instr, store); | 
| 4592 return true; | 4620 return true; | 
| 4593 } | 4621 } | 
| 4594 | 4622 | 
| 4595 | 4623 | 
| 4624 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | |
| 4625 // Smi widening pass is only meaningful on platforms where Smi | |
| 4626 // is smaller than 32bit. For now only support it on ARM and ia32. | |
| 4627 | |
| 4628 class DefinitionWorklist { | |
| 
srdjan
2014/08/27 16:17:13
public ValueObject
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4629 public: | |
| 4630 DefinitionWorklist(FlowGraph* flow_graph, | |
| 4631 intptr_t initial_capacity) | |
| 4632 : defs_(initial_capacity), | |
| 4633 contains_(new BitVector(flow_graph->current_ssa_temp_index())) { | |
| 4634 } | |
| 4635 | |
| 4636 void Add(Definition* defn) { | |
| 4637 if (!Contains(defn)) { | |
| 4638 defs_.Add(defn); | |
| 4639 contains_->Add(defn->ssa_temp_index()); | |
| 4640 } | |
| 4641 } | |
| 4642 | |
| 4643 bool Contains(Definition* defn) const { | |
| 4644 return (defn->ssa_temp_index() >= 0) && | |
| 4645 contains_->Contains(defn->ssa_temp_index()); | |
| 4646 } | |
| 4647 | |
| 4648 const GrowableArray<Definition*>& definitions() const { return defs_; } | |
| 4649 BitVector* contains() const { return contains_; } | |
| 4650 | |
| 4651 void Clear() { | |
| 4652 defs_.TruncateTo(0); | |
| 4653 contains_->Clear(); | |
| 4654 } | |
| 4655 | |
| 4656 private: | |
| 4657 GrowableArray<Definition*> defs_; | |
| 4658 BitVector* contains_; | |
| 
srdjan
2014/08/27 16:17:13
I think it is slightly confusing to have contains(
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4659 }; | |
| 4660 | |
| 4661 | |
| 4662 static bool CanBeWidened(BinarySmiOpInstr* smi_op) { | |
| 4663 return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), | |
| 4664 smi_op->left(), | |
| 4665 smi_op->right()); | |
| 4666 } | |
| 4667 | |
| 4668 | |
| 4669 static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { | |
| 4670 // TODO(vegorov): when shifts with non-constants shift count are supported | |
| 4671 // add them here as we save untagging for the count. | |
| 4672 switch (smi_op->op_kind()) { | |
| 4673 case Token::kMUL: | |
| 4674 case Token::kSHR: | |
| 4675 // For kMUL we save untagging of the argument for kSHR | |
| 4676 // we save tagging of the result. | |
| 4677 return true; | |
| 
srdjan
2014/08/27 16:17:13
Why not kSHL?
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Shift Left by a *constant* does not need to untag
 | |
| 4678 | |
| 4679 default: | |
| 4680 return false; | |
| 4681 } | |
| 4682 } | |
| 4683 | |
| 4684 | |
| 4685 void FlowGraphOptimizer::WidenSmiToInt32() { | |
| 4686 GrowableArray<BinarySmiOpInstr*> candidates; | |
| 4687 | |
| 4688 // Step 1. Collect all instructions that potentially benefit from widening of | |
| 4689 // their operands (or their result) into int32 range. | |
| 4690 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | |
| 4691 !block_it.Done(); | |
| 4692 block_it.Advance()) { | |
| 4693 for (ForwardInstructionIterator instr_it(block_it.Current()); | |
| 4694 !instr_it.Done(); | |
| 4695 instr_it.Advance()) { | |
| 4696 BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); | |
| 4697 if (smi_op != NULL && | |
| 
srdjan
2014/08/27 16:17:13
add parentheses
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4698 BenefitsFromWidening(smi_op) && | |
| 4699 CanBeWidened(smi_op)) { | |
| 4700 candidates.Add(smi_op); | |
| 4701 } | |
| 4702 } | |
| 4703 } | |
| 4704 | |
| 4705 if (candidates.length() == 0) { | |
| 
srdjan
2014/08/27 16:17:13
candidates.is_empty()
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4706 return; | |
| 4707 } | |
| 4708 | |
| 4709 // Step 2. For each block in the graph compute which loop it belongs to. | |
| 4710 // We will use this information later during computation of the widening's | |
| 4711 // gain: we are going to assume that only conversion occuring inside the | |
| 4712 // same loop should be counted against the gain, all other conversions | |
| 4713 // can be hoisted and thus cost nothing compared to the loop cost itself. | |
| 4714 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | |
| 4715 flow_graph()->loop_headers(); | |
| 
srdjan
2014/08/27 16:17:13
4 spaces indent
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4716 | |
| 4717 GrowableArray<intptr_t> loops(flow_graph_->preorder().length()); | |
| 4718 for (intptr_t i = 0; i < flow_graph_->preorder().length(); i++) { | |
| 4719 loops.Add(-1); | |
| 4720 } | |
| 4721 | |
| 4722 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { | |
| 4723 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); | |
| 4724 !loop_it.Done(); | |
| 4725 loop_it.Advance()) { | |
| 4726 loops[loop_it.Current()] = loop_id; | |
| 4727 } | |
| 4728 } | |
| 4729 | |
| 4730 // Step 3. For each candidate transitively collect all other BinarySmiOpInstr | |
| 4731 // and PhiInstr that depend on it and that it depends on and count amount of | |
| 4732 // untagging operations that we save in assumption that this whole graph of | |
| 4733 // values is using kUnboxedInt32 representation instead of kTagged. | |
| 4734 // Convert those graphs that have positive gain to kUnboxedInt32. | |
| 4735 | |
| 4736 // BitVector containing SSA indexes of all processed definitions. Used to skip | |
| 4737 // those candidates that belong to dependency graph of another candidate. | |
| 4738 BitVector* processed = new BitVector(flow_graph_->current_ssa_temp_index()); | |
| 4739 | |
| 4740 // Worklist used to collect dependency graph. | |
| 4741 DefinitionWorklist worklist(flow_graph_, candidates.length()); | |
| 4742 for (intptr_t i = 0; i < candidates.length(); i++) { | |
| 4743 BinarySmiOpInstr* op = candidates[i]; | |
| 4744 if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { | |
| 4745 continue; | |
| 4746 } | |
| 4747 | |
| 4748 if (FLAG_trace_smi_widening) { | |
| 4749 OS::Print("analysing candidate: %s\n", op->ToCString()); | |
| 4750 } | |
| 4751 worklist.Clear(); | |
| 4752 worklist.Add(op); | |
| 4753 | |
| 4754 // Collect dependency graph. Note: more items are added to worklist | |
| 4755 // inside this loop. | |
| 4756 intptr_t gain = 0; | |
| 4757 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | |
| 4758 Definition* defn = worklist.definitions()[j]; | |
| 4759 | |
| 4760 if (FLAG_trace_smi_widening) { | |
| 4761 OS::Print("> %s\n", defn->ToCString()); | |
| 4762 } | |
| 4763 | |
| 4764 if (defn->IsBinarySmiOp() && | |
| 4765 BenefitsFromWidening(defn->AsBinarySmiOp())) { | |
| 4766 gain++; | |
| 4767 if (FLAG_trace_smi_widening) { | |
| 4768 OS::Print("^ [%d] (o) %s\n", gain, defn->ToCString()); | |
| 4769 } | |
| 4770 } | |
| 4771 | |
| 4772 const intptr_t defn_loop = loops[defn->GetBlock()->preorder_number()]; | |
| 4773 | |
| 4774 // Process all inputs. | |
| 4775 for (intptr_t k = 0; k < defn->InputCount(); k++) { | |
| 4776 Definition* input = defn->InputAt(k)->definition(); | |
| 4777 if (input->IsBinarySmiOp() && | |
| 4778 CanBeWidened(input->AsBinarySmiOp())) { | |
| 4779 worklist.Add(input); | |
| 4780 } else if (input->IsPhi() && input->Type()->ToCid() == kSmiCid) { | |
| 
srdjan
2014/08/27 16:17:13
Add parentheses
 
Vyacheslav Egorov (Google)
2014/08/28 20:48:37
Done.
 | |
| 4781 worklist.Add(input); | |
| 4782 } else if (input->IsBinaryMintOp()) { | |
| 4783 // Mint operation produces untagged result. We avoid tagging. | |
| 4784 gain++; | |
| 4785 if (FLAG_trace_smi_widening) { | |
| 4786 OS::Print("^ [%d] (i) %s\n", gain, input->ToCString()); | |
| 4787 } | |
| 4788 } else if (defn_loop == loops[input->GetBlock()->preorder_number()] && | |
| 4789 (input->Type()->ToCid() == kSmiCid)) { | |
| 4790 // Input comes from the same loop, is known to be smi and requires | |
| 4791 // untagging. | |
| 4792 // TODO(vegorov) this heuristic assumes that values that are not | |
| 4793 // known to be smi have to be checked and this check can be | |
| 4794 // coalesced with untagging. Start coalescing them. | |
| 4795 gain--; | |
| 4796 if (FLAG_trace_smi_widening) { | |
| 4797 OS::Print("v [%d] (i) %s\n", gain, input->ToCString()); | |
| 4798 } | |
| 4799 } | |
| 4800 } | |
| 4801 | |
| 4802 // Process all uses. | |
| 4803 for (Value* use = defn->input_use_list(); | |
| 4804 use != NULL; | |
| 4805 use = use->next_use()) { | |
| 4806 Instruction* instr = use->instruction(); | |
| 4807 Definition* use_defn = instr->AsDefinition(); | |
| 4808 if (use_defn == NULL) { | |
| 4809 // We assume that tagging before returning or pushing argument costs | |
| 4810 // very little compared to the cost of the return/call itself. | |
| 4811 if (!instr->IsReturn() && !instr->IsPushArgument()) { | |
| 4812 gain--; | |
| 4813 if (FLAG_trace_smi_widening) { | |
| 4814 OS::Print("v [%d] (u) %s\n", | |
| 4815 gain, use->instruction()->ToCString()); | |
| 4816 } | |
| 4817 } | |
| 4818 continue; | |
| 4819 } else if (use_defn->IsBinarySmiOp() && | |
| 4820 CanBeWidened(use_defn->AsBinarySmiOp())) { | |
| 4821 worklist.Add(use_defn); | |
| 4822 } else if (use_defn->IsPhi() && | |
| 4823 use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { | |
| 4824 worklist.Add(use_defn); | |
| 4825 } else if (use_defn->IsBinaryMintOp()) { | |
| 4826 // BinaryMintOp requires untagging of its inputs. | |
| 4827 // Converting kUnboxedInt32 to kUnboxedMint is essentially zero cost | |
| 4828 // sign extension operation. | |
| 4829 gain++; | |
| 4830 if (FLAG_trace_smi_widening) { | |
| 4831 OS::Print("^ [%d] (u) %s\n", gain, use->instruction()->ToCString()); | |
| 4832 } | |
| 4833 } else if (defn_loop == loops[instr->GetBlock()->preorder_number()]) { | |
| 4834 gain--; | |
| 4835 if (FLAG_trace_smi_widening) { | |
| 4836 OS::Print("v [%d] (u) %s\n", gain, use->instruction()->ToCString()); | |
| 4837 } | |
| 4838 } | |
| 4839 } | |
| 4840 } | |
| 4841 | |
| 4842 processed->AddAll(worklist.contains()); | |
| 4843 | |
| 4844 if (FLAG_trace_smi_widening) { | |
| 4845 OS::Print("~ %s gain %d\n", op->ToCString(), gain); | |
| 4846 } | |
| 4847 | |
| 4848 if (gain > 0) { | |
| 4849 // We have positive gain from widening. Convert all BinarySmiOpInstr into | |
| 4850 // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. | |
| 4851 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | |
| 4852 Definition* defn = worklist.definitions()[j]; | |
| 4853 ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); | |
| 4854 | |
| 4855 if (defn->IsBinarySmiOp()) { | |
| 4856 BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); | |
| 4857 BinaryInt32OpInstr* int32_op = new(I) BinaryInt32OpInstr( | |
| 4858 smi_op->op_kind(), | |
| 4859 smi_op->left()->CopyWithType(), | |
| 4860 smi_op->right()->CopyWithType(), | |
| 4861 smi_op->DeoptimizationTarget()); | |
| 4862 | |
| 4863 smi_op->ReplaceWith(int32_op, NULL); | |
| 4864 } else if (defn->IsPhi()) { | |
| 4865 defn->AsPhi()->set_representation(kUnboxedInt32); | |
| 4866 } | |
| 4867 } | |
| 4868 } | |
| 4869 } | |
| 4870 } | |
| 4871 #else | |
| 4872 void FlowGraphOptimizer::WidenSmiToInt32() { | |
| 4873 // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi | |
| 4874 // operations to 32-bit where it saves tagging and untagging and allows | |
| 4875 // to use shorted (and faster) instructions. But we currently don't | |
| 4876 // save enough range information in the ICData to drive this decision. | |
| 4877 } | |
| 4878 #endif | |
| 4879 | |
| 4596 void FlowGraphOptimizer::InferIntRanges() { | 4880 void FlowGraphOptimizer::InferIntRanges() { | 
| 4597 RangeAnalysis range_analysis(flow_graph_); | 4881 RangeAnalysis range_analysis(flow_graph_); | 
| 4598 range_analysis.Analyze(); | 4882 range_analysis.Analyze(); | 
| 4599 } | 4883 } | 
| 4600 | 4884 | 
| 4601 | 4885 | 
| 4602 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { | 4886 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { | 
| 4603 // For every catch-block: Iterate over all call instructions inside the | 4887 // For every catch-block: Iterate over all call instructions inside the | 
| 4604 // corresponding try-block and figure out for each environment value if it | 4888 // corresponding try-block and figure out for each environment value if it | 
| 4605 // is the same constant at all calls. If yes, replace the initial definition | 4889 // is the same constant at all calls. If yes, replace the initial definition | 
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4693 // speculatively hoisting checks. | 4977 // speculatively hoisting checks. | 
| 4694 if (FLAG_trace_optimization) { | 4978 if (FLAG_trace_optimization) { | 
| 4695 OS::Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", | 4979 OS::Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", | 
| 4696 current->DebugName(), | 4980 current->DebugName(), | 
| 4697 current->GetDeoptId(), | 4981 current->GetDeoptId(), | 
| 4698 current->GetBlock()->block_id(), | 4982 current->GetBlock()->block_id(), | 
| 4699 pre_header->block_id()); | 4983 pre_header->block_id()); | 
| 4700 } | 4984 } | 
| 4701 // Move the instruction out of the loop. | 4985 // Move the instruction out of the loop. | 
| 4702 current->RemoveEnvironment(); | 4986 current->RemoveEnvironment(); | 
| 4703 it->RemoveCurrentFromGraph(); | 4987 if (it != NULL) { | 
| 4988 it->RemoveCurrentFromGraph(); | |
| 4989 } else { | |
| 4990 current->RemoveFromGraph(); | |
| 4991 } | |
| 4704 GotoInstr* last = pre_header->last_instruction()->AsGoto(); | 4992 GotoInstr* last = pre_header->last_instruction()->AsGoto(); | 
| 4705 // Using kind kEffect will not assign a fresh ssa temporary index. | 4993 // Using kind kEffect will not assign a fresh ssa temporary index. | 
| 4706 flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); | 4994 flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); | 
| 4707 current->deopt_id_ = last->GetDeoptId(); | 4995 current->deopt_id_ = last->GetDeoptId(); | 
| 4708 } | 4996 } | 
| 4709 | 4997 | 
| 4710 | 4998 | 
| 4711 void LICM::TryHoistCheckSmiThroughPhi(ForwardInstructionIterator* it, | 4999 void LICM::TrySpecializeSmiPhi(PhiInstr* phi, | 
| 4712 BlockEntryInstr* header, | 5000 BlockEntryInstr* header, | 
| 4713 BlockEntryInstr* pre_header, | 5001 BlockEntryInstr* pre_header) { | 
| 4714 CheckSmiInstr* current) { | 5002 if (phi->Type()->ToCid() == kSmiCid) { | 
| 4715 PhiInstr* phi = current->value()->definition()->AsPhi(); | |
| 4716 if (!header->loop_info()->Contains(phi->block()->preorder_number())) { | |
| 4717 return; | 5003 return; | 
| 4718 } | 5004 } | 
| 4719 | 5005 | 
| 4720 if (phi->Type()->ToCid() == kSmiCid) { | |
| 4721 it->RemoveCurrentFromGraph(); | |
| 4722 return; | |
| 4723 } | |
| 4724 | |
| 4725 // Check if there is only a single kDynamicCid input to the phi that | 5006 // Check if there is only a single kDynamicCid input to the phi that | 
| 4726 // comes from the pre-header. | 5007 // comes from the pre-header. | 
| 4727 const intptr_t kNotFound = -1; | 5008 const intptr_t kNotFound = -1; | 
| 4728 intptr_t non_smi_input = kNotFound; | 5009 intptr_t non_smi_input = kNotFound; | 
| 4729 for (intptr_t i = 0; i < phi->InputCount(); ++i) { | 5010 for (intptr_t i = 0; i < phi->InputCount(); ++i) { | 
| 4730 Value* input = phi->InputAt(i); | 5011 Value* input = phi->InputAt(i); | 
| 4731 if (input->Type()->ToCid() != kSmiCid) { | 5012 if (input->Type()->ToCid() != kSmiCid) { | 
| 4732 if ((non_smi_input != kNotFound) || | 5013 if ((non_smi_input != kNotFound) || | 
| 4733 (input->Type()->ToCid() != kDynamicCid)) { | 5014 (input->Type()->ToCid() != kDynamicCid)) { | 
| 4734 // There are multiple kDynamicCid inputs or there is an input that is | 5015 // There are multiple kDynamicCid inputs or there is an input that is | 
| 4735 // known to be non-smi. | 5016 // known to be non-smi. | 
| 4736 return; | 5017 return; | 
| 4737 } else { | 5018 } else { | 
| 4738 non_smi_input = i; | 5019 non_smi_input = i; | 
| 4739 } | 5020 } | 
| 4740 } | 5021 } | 
| 4741 } | 5022 } | 
| 4742 | 5023 | 
| 4743 if ((non_smi_input == kNotFound) || | 5024 if ((non_smi_input == kNotFound) || | 
| 4744 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { | 5025 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { | 
| 4745 return; | 5026 return; | 
| 4746 } | 5027 } | 
| 4747 | 5028 | 
| 5029 CheckSmiInstr* check = NULL; | |
| 5030 for (Value* use = phi->input_use_list(); | |
| 5031 (use != NULL) && (check == NULL); | |
| 5032 use = use->next_use()) { | |
| 5033 check = use->instruction()->AsCheckSmi(); | |
| 5034 } | |
| 5035 | |
| 5036 if (check == NULL) { | |
| 5037 return; | |
| 5038 } | |
| 5039 | |
| 4748 // Host CheckSmi instruction and make this phi smi one. | 5040 // Host CheckSmi instruction and make this phi smi one. | 
| 4749 Hoist(it, pre_header, current); | 5041 Hoist(NULL, pre_header, check); | 
| 4750 | 5042 | 
| 4751 // Replace value we are checking with phi's input. | 5043 // Replace value we are checking with phi's input. | 
| 4752 current->value()->BindTo(phi->InputAt(non_smi_input)->definition()); | 5044 check->value()->BindTo(phi->InputAt(non_smi_input)->definition()); | 
| 4753 | 5045 | 
| 4754 phi->UpdateType(CompileType::FromCid(kSmiCid)); | 5046 phi->UpdateType(CompileType::FromCid(kSmiCid)); | 
| 4755 } | 5047 } | 
| 4756 | 5048 | 
| 4757 | 5049 | 
| 4758 // Load instructions handled by load elimination. | 5050 // Load instructions handled by load elimination. | 
| 4759 static bool IsLoadEliminationCandidate(Instruction* instr) { | 5051 static bool IsLoadEliminationCandidate(Instruction* instr) { | 
| 4760 return instr->IsLoadField() | 5052 return instr->IsLoadField() | 
| 4761 || instr->IsLoadIndexed() | 5053 || instr->IsLoadIndexed() | 
| 4762 || instr->IsLoadStaticField() | 5054 || instr->IsLoadStaticField() | 
| 4763 || instr->IsCurrentContext(); | 5055 || instr->IsCurrentContext(); | 
| 4764 } | 5056 } | 
| 4765 | 5057 | 
| 4766 | 5058 | 
| 4767 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, | 5059 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, | 
| 4768 intptr_t loop_header_index, | 5060 intptr_t loop_header_index, | 
| 4769 Instruction* instr) { | 5061 Instruction* instr) { | 
| 4770 return IsLoadEliminationCandidate(instr) && | 5062 return IsLoadEliminationCandidate(instr) && | 
| 4771 (sets != NULL) && | 5063 (sets != NULL) && | 
| 4772 instr->HasPlaceId() && | 5064 instr->HasPlaceId() && | 
| 4773 ((*sets)[loop_header_index] != NULL) && | 5065 ((*sets)[loop_header_index] != NULL) && | 
| 4774 (*sets)[loop_header_index]->Contains(instr->place_id()); | 5066 (*sets)[loop_header_index]->Contains(instr->place_id()); | 
| 4775 } | 5067 } | 
| 4776 | 5068 | 
| 4777 | 5069 | 
| 5070 void LICM::OptimisticallySpecializeSmiPhis() { | |
| 5071 if (!flow_graph()->parsed_function().function(). | |
| 5072 allows_hoisting_check_class()) { | |
| 5073 // Do not hoist any. | |
| 5074 return; | |
| 5075 } | |
| 5076 | |
| 5077 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | |
| 5078 flow_graph()->loop_headers(); | |
| 5079 | |
| 5080 for (intptr_t i = 0; i < loop_headers.length(); ++i) { | |
| 5081 JoinEntryInstr* header = loop_headers[i]->AsJoinEntry(); | |
| 5082 // Skip loop that don't have a pre-header block. | |
| 5083 BlockEntryInstr* pre_header = FindPreHeader(header); | |
| 5084 if (pre_header == NULL) continue; | |
| 5085 | |
| 5086 for (PhiIterator it(header); !it.Done(); it.Advance()) { | |
| 5087 TrySpecializeSmiPhi(it.Current(), header, pre_header); | |
| 5088 } | |
| 5089 } | |
| 5090 } | |
| 5091 | |
| 5092 | |
| 4778 void LICM::Optimize() { | 5093 void LICM::Optimize() { | 
| 4779 if (!flow_graph()->parsed_function().function(). | 5094 if (!flow_graph()->parsed_function().function(). | 
| 4780 allows_hoisting_check_class()) { | 5095 allows_hoisting_check_class()) { | 
| 4781 // Do not hoist any. | 5096 // Do not hoist any. | 
| 4782 return; | 5097 return; | 
| 4783 } | 5098 } | 
| 4784 | 5099 | 
| 4785 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | 5100 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | 
| 4786 flow_graph()->loop_headers(); | 5101 flow_graph()->loop_headers(); | 
| 4787 | 5102 | 
| (...skipping 26 matching lines...) Expand all Loading... | |
| 4814 inputs_loop_invariant = false; | 5129 inputs_loop_invariant = false; | 
| 4815 break; | 5130 break; | 
| 4816 } | 5131 } | 
| 4817 } | 5132 } | 
| 4818 if (inputs_loop_invariant && | 5133 if (inputs_loop_invariant && | 
| 4819 !current->IsAssertAssignable() && | 5134 !current->IsAssertAssignable() && | 
| 4820 !current->IsAssertBoolean()) { | 5135 !current->IsAssertBoolean()) { | 
| 4821 // TODO(fschneider): Enable hoisting of Assert-instructions | 5136 // TODO(fschneider): Enable hoisting of Assert-instructions | 
| 4822 // if it safe to do. | 5137 // if it safe to do. | 
| 4823 Hoist(&it, pre_header, current); | 5138 Hoist(&it, pre_header, current); | 
| 4824 } else if (current->IsCheckSmi() && | |
| 4825 current->InputAt(0)->definition()->IsPhi()) { | |
| 4826 TryHoistCheckSmiThroughPhi( | |
| 4827 &it, header, pre_header, current->AsCheckSmi()); | |
| 4828 } | 5139 } | 
| 4829 } | 5140 } | 
| 4830 } | 5141 } | 
| 4831 } | 5142 } | 
| 4832 } | 5143 } | 
| 4833 } | 5144 } | 
| 4834 | 5145 | 
| 4835 | 5146 | 
| 4836 // Place describes an abstract location (e.g. field) that IR can load | 5147 // Place describes an abstract location (e.g. field) that IR can load | 
| 4837 // from or store to. | 5148 // from or store to. | 
| (...skipping 3092 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7930 ASSERT(!result.IsNull()); | 8241 ASSERT(!result.IsNull()); | 
| 7931 SetValue(defn, result); | 8242 SetValue(defn, result); | 
| 7932 } | 8243 } | 
| 7933 | 8244 | 
| 7934 | 8245 | 
| 7935 void ConstantPropagator::VisitBinarySmiOp(BinarySmiOpInstr* instr) { | 8246 void ConstantPropagator::VisitBinarySmiOp(BinarySmiOpInstr* instr) { | 
| 7936 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 8247 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 
| 7937 } | 8248 } | 
| 7938 | 8249 | 
| 7939 | 8250 | 
| 8251 void ConstantPropagator::VisitBinaryInt32Op(BinaryInt32OpInstr* instr) { | |
| 8252 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | |
| 8253 } | |
| 8254 | |
| 8255 | |
| 7940 void ConstantPropagator::VisitBoxInteger(BoxIntegerInstr* instr) { | 8256 void ConstantPropagator::VisitBoxInteger(BoxIntegerInstr* instr) { | 
| 7941 // TODO(kmillikin): Handle box operation. | 8257 // TODO(kmillikin): Handle box operation. | 
| 7942 SetValue(instr, non_constant_); | 8258 SetValue(instr, non_constant_); | 
| 7943 } | 8259 } | 
| 7944 | 8260 | 
| 7945 | 8261 | 
| 7946 void ConstantPropagator::VisitUnboxInteger(UnboxIntegerInstr* instr) { | 8262 void ConstantPropagator::VisitUnboxInteger(UnboxIntegerInstr* instr) { | 
| 7947 // TODO(kmillikin): Handle unbox operation. | 8263 // TODO(kmillikin): Handle unbox operation. | 
| 7948 SetValue(instr, non_constant_); | 8264 SetValue(instr, non_constant_); | 
| 7949 } | 8265 } | 
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7991 const Object& value = instr->value()->definition()->constant_value(); | 8307 const Object& value = instr->value()->definition()->constant_value(); | 
| 7992 if (IsConstant(value) && value.IsInteger()) { | 8308 if (IsConstant(value) && value.IsInteger()) { | 
| 7993 SetValue(instr, Double::Handle(I, | 8309 SetValue(instr, Double::Handle(I, | 
| 7994 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); | 8310 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); | 
| 7995 } else if (IsNonConstant(value)) { | 8311 } else if (IsNonConstant(value)) { | 
| 7996 SetValue(instr, non_constant_); | 8312 SetValue(instr, non_constant_); | 
| 7997 } | 8313 } | 
| 7998 } | 8314 } | 
| 7999 | 8315 | 
| 8000 | 8316 | 
| 8317 void ConstantPropagator::VisitInt32ToDouble(Int32ToDoubleInstr* instr) { | |
| 8318 const Object& value = instr->value()->definition()->constant_value(); | |
| 8319 if (IsConstant(value) && value.IsInteger()) { | |
| 8320 SetValue(instr, Double::Handle(I, | |
| 8321 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); | |
| 8322 } else if (IsNonConstant(value)) { | |
| 8323 SetValue(instr, non_constant_); | |
| 8324 } | |
| 8325 } | |
| 8326 | |
| 8327 | |
| 8001 void ConstantPropagator::VisitDoubleToInteger(DoubleToIntegerInstr* instr) { | 8328 void ConstantPropagator::VisitDoubleToInteger(DoubleToIntegerInstr* instr) { | 
| 8002 // TODO(kmillikin): Handle conversion. | 8329 // TODO(kmillikin): Handle conversion. | 
| 8003 SetValue(instr, non_constant_); | 8330 SetValue(instr, non_constant_); | 
| 8004 } | 8331 } | 
| 8005 | 8332 | 
| 8006 | 8333 | 
| 8007 void ConstantPropagator::VisitDoubleToSmi(DoubleToSmiInstr* instr) { | 8334 void ConstantPropagator::VisitDoubleToSmi(DoubleToSmiInstr* instr) { | 
| 8008 // TODO(kmillikin): Handle conversion. | 8335 // TODO(kmillikin): Handle conversion. | 
| 8009 SetValue(instr, non_constant_); | 8336 SetValue(instr, non_constant_); | 
| 8010 } | 8337 } | 
| (...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 8375 SetValue(instr, non_constant_); | 8702 SetValue(instr, non_constant_); | 
| 8376 } | 8703 } | 
| 8377 | 8704 | 
| 8378 | 8705 | 
| 8379 void ConstantPropagator::VisitUnboxUint32(UnboxUint32Instr* instr) { | 8706 void ConstantPropagator::VisitUnboxUint32(UnboxUint32Instr* instr) { | 
| 8380 // TODO(kmillikin): Handle unbox operation. | 8707 // TODO(kmillikin): Handle unbox operation. | 
| 8381 SetValue(instr, non_constant_); | 8708 SetValue(instr, non_constant_); | 
| 8382 } | 8709 } | 
| 8383 | 8710 | 
| 8384 | 8711 | 
| 8712 void ConstantPropagator::VisitBoxInt32(BoxInt32Instr* instr) { | |
| 8713 // TODO(kmillikin): Handle box operation. | |
| 8714 SetValue(instr, non_constant_); | |
| 8715 } | |
| 8716 | |
| 8717 | |
| 8718 void ConstantPropagator::VisitUnboxInt32(UnboxInt32Instr* instr) { | |
| 8719 // TODO(kmillikin): Handle unbox operation. | |
| 8720 SetValue(instr, non_constant_); | |
| 8721 } | |
| 8722 | |
| 8723 | |
| 8385 void ConstantPropagator::VisitUnboxedIntConverter( | 8724 void ConstantPropagator::VisitUnboxedIntConverter( | 
| 8386 UnboxedIntConverterInstr* instr) { | 8725 UnboxedIntConverterInstr* instr) { | 
| 8387 SetValue(instr, non_constant_); | 8726 SetValue(instr, non_constant_); | 
| 8388 } | 8727 } | 
| 8389 | 8728 | 
| 8390 | 8729 | 
| 8391 void ConstantPropagator::VisitBinaryUint32Op(BinaryUint32OpInstr* instr) { | 8730 void ConstantPropagator::VisitBinaryUint32Op(BinaryUint32OpInstr* instr) { | 
| 8392 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 8731 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 
| 8393 TruncateInteger(instr, static_cast<int64_t>(0xFFFFFFFF)); | 8732 TruncateInteger(instr, static_cast<int64_t>(0xFFFFFFFF)); | 
| 8394 } | 8733 } | 
| (...skipping 1220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 9615 | 9954 | 
| 9616 // Insert materializations at environment uses. | 9955 // Insert materializations at environment uses. | 
| 9617 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { | 9956 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { | 
| 9618 CreateMaterializationAt( | 9957 CreateMaterializationAt( | 
| 9619 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); | 9958 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); | 
| 9620 } | 9959 } | 
| 9621 } | 9960 } | 
| 9622 | 9961 | 
| 9623 | 9962 | 
| 9624 } // namespace dart | 9963 } // namespace dart | 
| OLD | NEW |