Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/flow_graph_optimizer.h" | 5 #include "vm/flow_graph_optimizer.h" |
| 6 | 6 |
| 7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" |
| 8 #include "vm/cha.h" | 8 #include "vm/cha.h" |
| 9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
| 10 #include "vm/dart_entry.h" | 10 #include "vm/dart_entry.h" |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 35 " otherwise use megamorphic dispatch."); | 35 " otherwise use megamorphic dispatch."); |
| 36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); | 36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); |
| 37 DEFINE_FLAG(bool, trace_constant_propagation, false, | 37 DEFINE_FLAG(bool, trace_constant_propagation, false, |
| 38 "Print constant propagation and useless code elimination."); | 38 "Print constant propagation and useless code elimination."); |
| 39 DEFINE_FLAG(bool, trace_load_optimization, false, | 39 DEFINE_FLAG(bool, trace_load_optimization, false, |
| 40 "Print live sets for load optimization pass."); | 40 "Print live sets for load optimization pass."); |
| 41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); | 41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); |
| 42 DEFINE_FLAG(bool, truncating_left_shift, true, | 42 DEFINE_FLAG(bool, truncating_left_shift, true, |
| 43 "Optimize left shift to truncate if possible"); | 43 "Optimize left shift to truncate if possible"); |
| 44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); | 44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); |
| 45 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | |
| 46 DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); | |
| 47 #endif | |
| 45 DECLARE_FLAG(bool, enable_type_checks); | 48 DECLARE_FLAG(bool, enable_type_checks); |
| 46 DECLARE_FLAG(bool, source_lines); | 49 DECLARE_FLAG(bool, source_lines); |
| 47 DECLARE_FLAG(bool, trace_type_check_elimination); | 50 DECLARE_FLAG(bool, trace_type_check_elimination); |
| 48 DECLARE_FLAG(bool, warn_on_javascript_compatibility); | 51 DECLARE_FLAG(bool, warn_on_javascript_compatibility); |
| 49 | 52 |
| 50 // Quick access to the locally defined isolate() method. | 53 // Quick access to the locally defined isolate() method. |
| 51 #define I (isolate()) | 54 #define I (isolate()) |
| 52 | 55 |
| 53 static bool ShouldInlineSimd() { | 56 static bool ShouldInlineSimd() { |
| 54 return FlowGraphCompiler::SupportsUnboxedSimd128(); | 57 return FlowGraphCompiler::SupportsUnboxedSimd128(); |
| (...skipping 519 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 574 ASSERT((replacement == NULL) || current->IsDefinition()); | 577 ASSERT((replacement == NULL) || current->IsDefinition()); |
| 575 ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); | 578 ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); |
| 576 changed = true; | 579 changed = true; |
| 577 } | 580 } |
| 578 } | 581 } |
| 579 } | 582 } |
| 580 return changed; | 583 return changed; |
| 581 } | 584 } |
| 582 | 585 |
| 583 | 586 |
| 587 static bool IsUnboxedInteger(Representation rep) { | |
| 588 return (rep == kUnboxedInt32) || | |
| 589 (rep == kUnboxedUint32) || | |
| 590 (rep == kUnboxedMint); | |
| 591 } | |
| 592 | |
| 593 | |
| 584 void FlowGraphOptimizer::InsertConversion(Representation from, | 594 void FlowGraphOptimizer::InsertConversion(Representation from, |
| 585 Representation to, | 595 Representation to, |
| 586 Value* use, | 596 Value* use, |
| 587 bool is_environment_use) { | 597 bool is_environment_use) { |
| 588 Instruction* insert_before; | 598 Instruction* insert_before; |
| 589 Instruction* deopt_target; | 599 Instruction* deopt_target; |
| 590 PhiInstr* phi = use->instruction()->AsPhi(); | 600 PhiInstr* phi = use->instruction()->AsPhi(); |
| 591 if (phi != NULL) { | 601 if (phi != NULL) { |
|
Florian Schneider
2014/08/27 09:36:51
Can you simplify the computation of deopt_target a
Vyacheslav Egorov (Google)
2014/08/27 11:45:37
Done.
| |
| 592 ASSERT(phi->is_alive()); | 602 ASSERT(phi->is_alive()); |
| 593 // For phis conversions have to be inserted in the predecessor. | 603 // For phis conversions have to be inserted in the predecessor. |
| 594 insert_before = | 604 insert_before = |
| 595 phi->block()->PredecessorAt(use->use_index())->last_instruction(); | 605 phi->block()->PredecessorAt(use->use_index())->last_instruction(); |
| 596 deopt_target = NULL; | 606 deopt_target = NULL; |
| 597 } else { | 607 } else { |
| 598 deopt_target = insert_before = use->instruction(); | 608 deopt_target = insert_before = use->instruction(); |
| 599 } | 609 } |
| 600 | 610 |
| 611 bool force_inherit_deopt_target = false; | |
| 612 | |
| 601 Definition* converted = NULL; | 613 Definition* converted = NULL; |
| 602 if ((from == kTagged) && (to == kUnboxedMint)) { | 614 if ((from == kTagged) && (to == kUnboxedMint)) { |
| 603 ASSERT((deopt_target != NULL) || | 615 ASSERT((deopt_target != NULL) || |
| 604 (use->Type()->ToCid() == kUnboxedMint)); | 616 (use->Type()->ToCid() == kUnboxedMint)); |
| 605 const intptr_t deopt_id = (deopt_target != NULL) ? | 617 const intptr_t deopt_id = (deopt_target != NULL) ? |
| 606 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 618 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |
| 607 converted = new(I) UnboxIntegerInstr(use->CopyWithType(), deopt_id); | 619 converted = new(I) UnboxIntegerInstr(use->CopyWithType(), deopt_id); |
| 608 } else if ((from == kUnboxedMint) && (to == kTagged)) { | 620 } else if ((from == kUnboxedMint) && (to == kTagged)) { |
| 609 converted = new(I) BoxIntegerInstr(use->CopyWithType()); | 621 converted = new(I) BoxIntegerInstr(use->CopyWithType()); |
| 610 } else if ((from == kUnboxedMint) && (to == kUnboxedUint32)) { | 622 } else if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { |
| 611 converted = new(I) UnboxedIntConverterInstr(from, to, use->CopyWithType()); | 623 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) ? |
| 612 } else if ((from == kUnboxedUint32) && (to == kUnboxedMint)) { | 624 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |
| 613 converted = new(I) UnboxedIntConverterInstr(from, to, use->CopyWithType()); | 625 force_inherit_deopt_target = (to == kUnboxedInt32) && (phi != NULL); |
| 626 converted = new(I) UnboxedIntConverterInstr(from, | |
| 627 to, | |
| 628 use->CopyWithType(), | |
| 629 deopt_id); | |
| 630 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { | |
| 631 converted = new Int32ToDoubleInstr(use->CopyWithType()); | |
| 632 } else if ((from == kTagged) && (to == kUnboxedInt32)) { | |
| 633 const intptr_t deopt_id = (deopt_target != NULL) ? | |
| 634 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | |
| 635 force_inherit_deopt_target = (phi != NULL); | |
| 636 converted = new UnboxInt32Instr(use->CopyWithType(), deopt_id); | |
| 637 } else if ((from == kUnboxedInt32) && (to == kTagged)) { | |
| 638 converted = new BoxInt32Instr(use->CopyWithType()); | |
| 614 } else if ((from == kTagged) && (to == kUnboxedUint32)) { | 639 } else if ((from == kTagged) && (to == kUnboxedUint32)) { |
| 615 const intptr_t deopt_id = (deopt_target != NULL) ? | 640 const intptr_t deopt_id = (deopt_target != NULL) ? |
| 616 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; | 641 deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |
| 617 converted = new UnboxUint32Instr(use->CopyWithType(), deopt_id); | 642 converted = new UnboxUint32Instr(use->CopyWithType(), deopt_id); |
| 618 } else if (from == kUnboxedMint && to == kUnboxedDouble) { | 643 } else if (from == kUnboxedMint && to == kUnboxedDouble) { |
| 619 ASSERT(CanUnboxDouble()); | 644 ASSERT(CanUnboxDouble()); |
| 620 // Convert by boxing/unboxing. | 645 // Convert by boxing/unboxing. |
| 621 // TODO(fschneider): Implement direct unboxed mint-to-double conversion. | 646 // TODO(fschneider): Implement direct unboxed mint-to-double conversion. |
| 622 BoxIntegerInstr* boxed = | 647 BoxIntegerInstr* boxed = |
| 623 new(I) BoxIntegerInstr(use->CopyWithType()); | 648 new(I) BoxIntegerInstr(use->CopyWithType()); |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 684 if (from == kUnboxedDouble) { | 709 if (from == kUnboxedDouble) { |
| 685 boxed = new(I) BoxDoubleInstr(use->CopyWithType()); | 710 boxed = new(I) BoxDoubleInstr(use->CopyWithType()); |
| 686 } else if (from == kUnboxedInt32x4) { | 711 } else if (from == kUnboxedInt32x4) { |
| 687 boxed = new(I) BoxInt32x4Instr(use->CopyWithType()); | 712 boxed = new(I) BoxInt32x4Instr(use->CopyWithType()); |
| 688 } else if (from == kUnboxedFloat32x4) { | 713 } else if (from == kUnboxedFloat32x4) { |
| 689 boxed = new(I) BoxFloat32x4Instr(use->CopyWithType()); | 714 boxed = new(I) BoxFloat32x4Instr(use->CopyWithType()); |
| 690 } else if (from == kUnboxedMint) { | 715 } else if (from == kUnboxedMint) { |
| 691 boxed = new(I) BoxIntegerInstr(use->CopyWithType()); | 716 boxed = new(I) BoxIntegerInstr(use->CopyWithType()); |
| 692 } else if (from == kUnboxedFloat64x2) { | 717 } else if (from == kUnboxedFloat64x2) { |
| 693 boxed = new(I) BoxFloat64x2Instr(use->CopyWithType()); | 718 boxed = new(I) BoxFloat64x2Instr(use->CopyWithType()); |
| 719 } else if (from == kUnboxedInt32) { | |
| 720 boxed = new(I) BoxInt32Instr(use->CopyWithType()); | |
| 721 } else if (from == kUnboxedUint32) { | |
| 722 boxed = new(I) BoxUint32Instr(use->CopyWithType()); | |
| 694 } else { | 723 } else { |
| 695 UNIMPLEMENTED(); | 724 UNIMPLEMENTED(); |
| 696 } | 725 } |
| 697 use->BindTo(boxed); | 726 use->BindTo(boxed); |
| 698 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); | 727 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); |
| 699 Value* to_value = new(I) Value(boxed); | 728 Value* to_value = new(I) Value(boxed); |
| 700 if (to == kUnboxedDouble) { | 729 if (to == kUnboxedDouble) { |
| 701 converted = new(I) UnboxDoubleInstr(to_value, deopt_id); | 730 converted = new(I) UnboxDoubleInstr(to_value, deopt_id); |
| 702 } else if (to == kUnboxedInt32x4) { | 731 } else if (to == kUnboxedInt32x4) { |
| 703 converted = new(I) UnboxInt32x4Instr(to_value, deopt_id); | 732 converted = new(I) UnboxInt32x4Instr(to_value, deopt_id); |
| 704 } else if (to == kUnboxedFloat32x4) { | 733 } else if (to == kUnboxedFloat32x4) { |
| 705 converted = new(I) UnboxFloat32x4Instr(to_value, deopt_id); | 734 converted = new(I) UnboxFloat32x4Instr(to_value, deopt_id); |
| 706 } else if (to == kUnboxedMint) { | 735 } else if (to == kUnboxedMint) { |
| 707 converted = new(I) UnboxIntegerInstr(to_value, deopt_id); | 736 converted = new(I) UnboxIntegerInstr(to_value, deopt_id); |
| 708 } else if (to == kUnboxedFloat64x2) { | 737 } else if (to == kUnboxedFloat64x2) { |
| 709 converted = new(I) UnboxFloat64x2Instr(to_value, deopt_id); | 738 converted = new(I) UnboxFloat64x2Instr(to_value, deopt_id); |
| 739 } else if (to == kUnboxedInt32) { | |
| 740 boxed = new(I) UnboxInt32Instr(use->CopyWithType(), deopt_id); | |
| 741 } else if (to == kUnboxedUint32) { | |
| 742 boxed = new(I) UnboxUint32Instr(use->CopyWithType(), deopt_id); | |
| 710 } else { | 743 } else { |
| 711 UNIMPLEMENTED(); | 744 UNIMPLEMENTED(); |
| 712 } | 745 } |
| 713 } | 746 } |
| 714 ASSERT(converted != NULL); | 747 ASSERT(converted != NULL); |
| 715 InsertBefore(insert_before, converted, use->instruction()->env(), | 748 InsertBefore(insert_before, converted, use->instruction()->env(), |
| 716 FlowGraph::kValue); | 749 FlowGraph::kValue); |
| 717 if (is_environment_use) { | 750 if (is_environment_use) { |
| 718 use->BindToEnvironment(converted); | 751 use->BindToEnvironment(converted); |
| 719 } else { | 752 } else { |
| 720 use->BindTo(converted); | 753 use->BindTo(converted); |
| 721 } | 754 } |
| 755 | |
| 756 if (force_inherit_deopt_target) { | |
| 757 flow_graph_->CopyDeoptTarget(converted, insert_before); | |
| 758 } | |
| 722 } | 759 } |
| 723 | 760 |
| 724 | 761 |
| 725 void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { | 762 void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { |
| 726 const Representation to_rep = | 763 const Representation to_rep = |
| 727 use->instruction()->RequiredInputRepresentation(use->use_index()); | 764 use->instruction()->RequiredInputRepresentation(use->use_index()); |
| 728 if (from_rep == to_rep || to_rep == kNoRepresentation) { | 765 if (from_rep == to_rep || to_rep == kNoRepresentation) { |
| 729 return; | 766 return; |
| 730 } | 767 } |
| 731 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); | 768 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 758 if (use->instruction()->MayThrow() && | 795 if (use->instruction()->MayThrow() && |
| 759 use->instruction()->GetBlock()->InsideTryBlock()) { | 796 use->instruction()->GetBlock()->InsideTryBlock()) { |
| 760 // Environment uses at calls inside try-blocks must be converted to | 797 // Environment uses at calls inside try-blocks must be converted to |
| 761 // tagged representation. | 798 // tagged representation. |
| 762 ConvertEnvironmentUse(it.Current(), from_rep); | 799 ConvertEnvironmentUse(it.Current(), from_rep); |
| 763 } | 800 } |
| 764 } | 801 } |
| 765 } | 802 } |
| 766 | 803 |
| 767 | 804 |
| 768 // Returns true if phi's representation was changed. | 805 static void UnboxPhi(PhiInstr* phi) { |
| 769 static bool UnboxPhi(PhiInstr* phi) { | 806 Representation unboxed = phi->representation(); |
| 770 Representation current = phi->representation(); | |
| 771 Representation unboxed = current; | |
| 772 | 807 |
| 773 switch (phi->Type()->ToCid()) { | 808 switch (phi->Type()->ToCid()) { |
| 774 case kDoubleCid: | 809 case kDoubleCid: |
| 775 if (CanUnboxDouble()) { | 810 if (CanUnboxDouble()) { |
| 776 unboxed = kUnboxedDouble; | 811 unboxed = kUnboxedDouble; |
| 777 } | 812 } |
| 778 break; | 813 break; |
| 779 case kFloat32x4Cid: | 814 case kFloat32x4Cid: |
| 780 if (ShouldInlineSimd()) { | 815 if (ShouldInlineSimd()) { |
| 781 unboxed = kUnboxedFloat32x4; | 816 unboxed = kUnboxedFloat32x4; |
| 782 } | 817 } |
| 783 break; | 818 break; |
| 784 case kInt32x4Cid: | 819 case kInt32x4Cid: |
| 785 if (ShouldInlineSimd()) { | 820 if (ShouldInlineSimd()) { |
| 786 unboxed = kUnboxedInt32x4; | 821 unboxed = kUnboxedInt32x4; |
| 787 } | 822 } |
| 788 break; | 823 break; |
| 789 case kFloat64x2Cid: | 824 case kFloat64x2Cid: |
| 790 if (ShouldInlineSimd()) { | 825 if (ShouldInlineSimd()) { |
| 791 unboxed = kUnboxedFloat64x2; | 826 unboxed = kUnboxedFloat64x2; |
| 792 } | 827 } |
| 793 break; | 828 break; |
| 794 } | 829 } |
| 795 | 830 |
| 796 if (unboxed != current) { | 831 phi->set_representation(unboxed); |
| 797 phi->set_representation(unboxed); | |
| 798 return true; | |
| 799 } | |
| 800 | |
| 801 return false; | |
| 802 } | 832 } |
| 803 | 833 |
| 804 | 834 |
| 805 void FlowGraphOptimizer::SelectRepresentations() { | 835 void FlowGraphOptimizer::SelectRepresentations() { |
| 806 // Conservatively unbox all phis that were proven to be of Double, | 836 // Conservatively unbox all phis that were proven to be of Double, |
| 807 // Float32x4, or Int32x4 type. | 837 // Float32x4, or Int32x4 type. |
| 808 for (intptr_t i = 0; i < block_order_.length(); ++i) { | 838 for (intptr_t i = 0; i < block_order_.length(); ++i) { |
| 809 JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); | 839 JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); |
| 810 if (join_entry != NULL) { | 840 if (join_entry != NULL) { |
| 811 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 841 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |
| (...skipping 3774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4586 } | 4616 } |
| 4587 | 4617 |
| 4588 // Discard the environment from the original instruction because the store | 4618 // Discard the environment from the original instruction because the store |
| 4589 // can't deoptimize. | 4619 // can't deoptimize. |
| 4590 instr->RemoveEnvironment(); | 4620 instr->RemoveEnvironment(); |
| 4591 ReplaceCall(instr, store); | 4621 ReplaceCall(instr, store); |
| 4592 return true; | 4622 return true; |
| 4593 } | 4623 } |
| 4594 | 4624 |
| 4595 | 4625 |
| 4626 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | |
| 4627 // Smi widening pass is only meaningful on platforms where Smi | |
| 4628 // is smaller than 32bit. For now only support it on ARM and ia32. | |
| 4629 | |
| 4630 class DefinitionWorklist { | |
| 4631 public: | |
| 4632 DefinitionWorklist(FlowGraph* flow_graph, | |
| 4633 intptr_t initial_capacity) | |
| 4634 : defs_(initial_capacity), | |
| 4635 contains_(new BitVector(flow_graph->current_ssa_temp_index())) { | |
| 4636 } | |
| 4637 | |
| 4638 void Add(Definition* defn) { | |
| 4639 if (!Contains(defn)) { | |
| 4640 defs_.Add(defn); | |
| 4641 contains_->Add(defn->ssa_temp_index()); | |
| 4642 } | |
| 4643 } | |
| 4644 | |
| 4645 bool Contains(Definition* defn) const { | |
| 4646 return (defn->ssa_temp_index() >= 0) && | |
| 4647 contains_->Contains(defn->ssa_temp_index()); | |
| 4648 } | |
| 4649 | |
| 4650 const GrowableArray<Definition*>& definitions() const { return defs_; } | |
| 4651 BitVector* contains() const { return contains_; } | |
| 4652 | |
| 4653 void Clear() { | |
| 4654 defs_.TruncateTo(0); | |
| 4655 contains_->Clear(); | |
| 4656 } | |
| 4657 | |
| 4658 private: | |
| 4659 GrowableArray<Definition*> defs_; | |
| 4660 BitVector* contains_; | |
| 4661 }; | |
| 4662 | |
| 4663 | |
| 4664 static bool CanBeWidened(BinarySmiOpInstr* smi_op) { | |
| 4665 return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), | |
| 4666 smi_op->left(), | |
| 4667 smi_op->right()); | |
| 4668 } | |
| 4669 | |
| 4670 | |
| 4671 static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { | |
| 4672 switch (smi_op->op_kind()) { | |
| 4673 case Token::kMUL: | |
| 4674 case Token::kSHR: | |
| 4675 // For kMUL we save untagging of the argument for kSHR | |
| 4676 // we save tagging of the result. | |
| 4677 return true; | |
|
Florian Schneider
2014/08/27 09:36:51
For left and right shift we can save untagging of
Vyacheslav Egorov (Google)
2014/08/27 11:45:37
Done.
| |
| 4678 default: | |
| 4679 return false; | |
| 4680 } | |
| 4681 } | |
| 4682 | |
| 4683 | |
| 4684 void FlowGraphOptimizer::WidenSmiToInt32() { | |
| 4685 GrowableArray<BinarySmiOpInstr*> candidates; | |
| 4686 | |
| 4687 // Step 1. Collect all instructions that potentially benefit from widening of | |
| 4688 // their operands (or their result) into int32 range. | |
| 4689 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | |
| 4690 !block_it.Done(); | |
| 4691 block_it.Advance()) { | |
| 4692 for (ForwardInstructionIterator instr_it(block_it.Current()); | |
| 4693 !instr_it.Done(); | |
| 4694 instr_it.Advance()) { | |
| 4695 BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); | |
| 4696 if (smi_op != NULL && | |
| 4697 BenefitsFromWidening(smi_op) && | |
| 4698 CanBeWidened(smi_op)) { | |
| 4699 candidates.Add(smi_op); | |
| 4700 } | |
| 4701 } | |
| 4702 } | |
| 4703 | |
| 4704 if (candidates.length() == 0) { | |
| 4705 return; | |
| 4706 } | |
| 4707 | |
| 4708 // Step 2. For each block in the graph compute which loop it belongs to. | |
| 4709 // We will use this information later during computation of the widening's | |
| 4710 // gain: we are going to assume that only conversion occuring inside the | |
| 4711 // same loop should be counted against the gain, all other conversions | |
| 4712 // can be hoisted and thus cost nothing compared to the loop cost itself. | |
| 4713 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | |
| 4714 flow_graph()->loop_headers(); | |
| 4715 | |
| 4716 GrowableArray<intptr_t> loops(flow_graph_->preorder().length()); | |
| 4717 for (intptr_t i = 0; i < flow_graph_->preorder().length(); i++) { | |
| 4718 loops.Add(-1); | |
| 4719 } | |
| 4720 | |
| 4721 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { | |
| 4722 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); | |
| 4723 !loop_it.Done(); | |
| 4724 loop_it.Advance()) { | |
| 4725 loops[loop_it.Current()] = loop_id; | |
| 4726 } | |
| 4727 } | |
| 4728 | |
| 4729 // Step 3. For each candidate transitively collect all other BinarySmiOpInstr | |
| 4730 // and PhiInstr that depend on it and that it depends on and count amount of | |
| 4731 // untagging opertaions that we save in assumption that this whole graph of | |
| 4732 // values is using kUnboxedInt32 representaion instead of kTagged. | |
|
Florian Schneider
2014/08/27 09:36:51
s/representaion/representation/
Vyacheslav Egorov (Google)
2014/08/27 11:45:37
Done.
| |
| 4733 // Convert those graphs that have positive gain to kUnboxedInt32. | |
| 4734 | |
| 4735 // BitVector containing SSA indexes of all processed definitions. Used to skip | |
| 4736 // those candidates that belong to dependency graph of another candidate. | |
| 4737 BitVector* processed = new BitVector(flow_graph_->current_ssa_temp_index()); | |
| 4738 | |
| 4739 // Worklist used to collect dependency graph. | |
| 4740 DefinitionWorklist worklist(flow_graph_, candidates.length()); | |
| 4741 for (intptr_t i = 0; i < candidates.length(); i++) { | |
| 4742 BinarySmiOpInstr* op = candidates[i]; | |
| 4743 if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { | |
| 4744 continue; | |
| 4745 } | |
| 4746 | |
| 4747 if (FLAG_trace_smi_widening) { | |
| 4748 OS::Print("analysing candidate: %s\n", op->ToCString()); | |
| 4749 } | |
| 4750 worklist.Clear(); | |
| 4751 worklist.Add(op); | |
| 4752 | |
| 4753 // Collect dependency graph. Note: more items are added to worklist | |
| 4754 // inside this loop. | |
| 4755 intptr_t gain = 0; | |
| 4756 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | |
| 4757 Definition* defn = worklist.definitions()[j]; | |
| 4758 | |
| 4759 if (FLAG_trace_smi_widening) { | |
| 4760 OS::Print("> %s\n", defn->ToCString()); | |
| 4761 } | |
| 4762 | |
| 4763 if (defn->IsBinarySmiOp() && | |
| 4764 BenefitsFromWidening(defn->AsBinarySmiOp())) { | |
| 4765 gain++; | |
| 4766 if (FLAG_trace_smi_widening) { | |
| 4767 OS::Print("^ [%d] (o) %s\n", gain, defn->ToCString()); | |
| 4768 } | |
| 4769 } | |
| 4770 | |
| 4771 const intptr_t defn_loop = loops[defn->GetBlock()->preorder_number()]; | |
| 4772 | |
| 4773 // Process all inputs. | |
| 4774 for (intptr_t k = 0; k < defn->InputCount(); k++) { | |
| 4775 Definition* input = defn->InputAt(k)->definition(); | |
| 4776 if (input->IsBinarySmiOp() && | |
| 4777 CanBeWidened(input->AsBinarySmiOp())) { | |
| 4778 worklist.Add(input); | |
| 4779 } else if (input->IsPhi() && input->Type()->ToCid() == kSmiCid) { | |
| 4780 worklist.Add(input); | |
| 4781 } else if (input->IsBinaryMintOp()) { | |
| 4782 // Mint operation produces untagged result. We avoid tagging. | |
| 4783 gain++; | |
| 4784 if (FLAG_trace_smi_widening) { | |
| 4785 OS::Print("^ [%d] (i) %s\n", gain, input->ToCString()); | |
| 4786 } | |
| 4787 } else if (defn_loop == loops[input->GetBlock()->preorder_number()] && | |
| 4788 (input->Type()->ToCid() == kSmiCid)) { | |
| 4789 // Input comes from the same loop, is known to be smi and requires | |
| 4790 // untagging. | |
| 4791 // TODO(vegorov) this heuristic assumes that values that are not | |
| 4792 // known to be smi have to be checked and this check can be | |
| 4793 // coalesced with untagging. Start coalescing them. | |
| 4794 gain--; | |
| 4795 if (FLAG_trace_smi_widening) { | |
| 4796 OS::Print("v [%d] (i) %s\n", gain, input->ToCString()); | |
| 4797 } | |
| 4798 } | |
| 4799 } | |
| 4800 | |
| 4801 // Process all uses. | |
| 4802 for (Value* use = defn->input_use_list(); | |
| 4803 use != NULL; | |
| 4804 use = use->next_use()) { | |
| 4805 Instruction* instr = use->instruction(); | |
| 4806 Definition* use_defn = instr->AsDefinition(); | |
| 4807 if (use_defn == NULL) { | |
| 4808 // We assume that tagging before returning or pushing argument costs | |
| 4809 // very little compared to the cost of the return/call itself. | |
| 4810 if (!instr->IsReturn() && !instr->IsPushArgument()) { | |
| 4811 gain--; | |
| 4812 if (FLAG_trace_smi_widening) { | |
| 4813 OS::Print("v [%d] (u) %s\n", | |
| 4814 gain, use->instruction()->ToCString()); | |
| 4815 } | |
| 4816 } | |
| 4817 continue; | |
| 4818 } else if (use_defn->IsBinarySmiOp() && | |
| 4819 CanBeWidened(use_defn->AsBinarySmiOp())) { | |
| 4820 worklist.Add(use_defn); | |
| 4821 } else if (use_defn->IsPhi() && | |
| 4822 use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { | |
| 4823 worklist.Add(use_defn); | |
| 4824 } else if (use_defn->IsBinaryMintOp()) { | |
| 4825 // BinaryMintOp requires untagging of its inputs. | |
| 4826 // Converting kUnboxedInt32 to kUnboxedMint is essentially zero cost | |
| 4827 // sign extension operation. | |
| 4828 gain++; | |
| 4829 if (FLAG_trace_smi_widening) { | |
| 4830 OS::Print("^ [%d] (u) %s\n", gain, use->instruction()->ToCString()); | |
| 4831 } | |
| 4832 } else if (defn_loop == loops[instr->GetBlock()->preorder_number()]) { | |
| 4833 gain--; | |
| 4834 if (FLAG_trace_smi_widening) { | |
| 4835 OS::Print("v [%d] (u) %s\n", gain, use->instruction()->ToCString()); | |
| 4836 } | |
| 4837 } | |
| 4838 } | |
| 4839 } | |
| 4840 | |
| 4841 processed->AddAll(worklist.contains()); | |
| 4842 | |
| 4843 if (FLAG_trace_smi_widening) { | |
| 4844 OS::Print("~ %s gain %d\n", op->ToCString(), gain); | |
| 4845 } | |
| 4846 | |
| 4847 if (gain > 0) { | |
| 4848 // We have positive gain from widening. Convert all BinarySmiOpInstr into | |
| 4849 // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. | |
| 4850 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | |
| 4851 Definition* defn = worklist.definitions()[j]; | |
| 4852 ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); | |
| 4853 | |
| 4854 if (defn->IsBinarySmiOp()) { | |
| 4855 BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); | |
| 4856 BinaryInt32OpInstr* int32_op = new(I) BinaryInt32OpInstr( | |
| 4857 smi_op->op_kind(), | |
| 4858 smi_op->left()->CopyWithType(), | |
| 4859 smi_op->right()->CopyWithType(), | |
| 4860 smi_op->DeoptimizationTarget()); | |
| 4861 | |
| 4862 smi_op->ReplaceWith(int32_op, NULL); | |
| 4863 } else if (defn->IsPhi()) { | |
| 4864 defn->AsPhi()->set_representation(kUnboxedInt32); | |
| 4865 } | |
| 4866 } | |
| 4867 } | |
| 4868 } | |
| 4869 } | |
| 4870 #else | |
| 4871 void FlowGraphOptimizer::WidenSmiToInt32() { | |
| 4872 // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi | |
| 4873 // operations to 32-bit where it saves tagging and untagging and allows | |
| 4874 // to use shorted (and faster) instructions. But we currently don't | |
| 4875 // save enough range information in the ICData to drive this decision. | |
| 4876 } | |
| 4877 #endif | |
| 4878 | |
| 4596 void FlowGraphOptimizer::InferIntRanges() { | 4879 void FlowGraphOptimizer::InferIntRanges() { |
| 4597 RangeAnalysis range_analysis(flow_graph_); | 4880 RangeAnalysis range_analysis(flow_graph_); |
| 4598 range_analysis.Analyze(); | 4881 range_analysis.Analyze(); |
| 4599 } | 4882 } |
| 4600 | 4883 |
| 4601 | 4884 |
| 4602 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { | 4885 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { |
| 4603 // For every catch-block: Iterate over all call instructions inside the | 4886 // For every catch-block: Iterate over all call instructions inside the |
| 4604 // corresponding try-block and figure out for each environment value if it | 4887 // corresponding try-block and figure out for each environment value if it |
| 4605 // is the same constant at all calls. If yes, replace the initial definition | 4888 // is the same constant at all calls. If yes, replace the initial definition |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4693 // speculatively hoisting checks. | 4976 // speculatively hoisting checks. |
| 4694 if (FLAG_trace_optimization) { | 4977 if (FLAG_trace_optimization) { |
| 4695 OS::Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", | 4978 OS::Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", |
| 4696 current->DebugName(), | 4979 current->DebugName(), |
| 4697 current->GetDeoptId(), | 4980 current->GetDeoptId(), |
| 4698 current->GetBlock()->block_id(), | 4981 current->GetBlock()->block_id(), |
| 4699 pre_header->block_id()); | 4982 pre_header->block_id()); |
| 4700 } | 4983 } |
| 4701 // Move the instruction out of the loop. | 4984 // Move the instruction out of the loop. |
| 4702 current->RemoveEnvironment(); | 4985 current->RemoveEnvironment(); |
| 4703 it->RemoveCurrentFromGraph(); | 4986 if (it != NULL) { |
| 4987 it->RemoveCurrentFromGraph(); | |
| 4988 } else { | |
| 4989 current->RemoveFromGraph(); | |
| 4990 } | |
| 4704 GotoInstr* last = pre_header->last_instruction()->AsGoto(); | 4991 GotoInstr* last = pre_header->last_instruction()->AsGoto(); |
| 4705 // Using kind kEffect will not assign a fresh ssa temporary index. | 4992 // Using kind kEffect will not assign a fresh ssa temporary index. |
| 4706 flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); | 4993 flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); |
| 4707 current->deopt_id_ = last->GetDeoptId(); | 4994 current->deopt_id_ = last->GetDeoptId(); |
| 4708 } | 4995 } |
| 4709 | 4996 |
| 4710 | 4997 |
| 4711 void LICM::TryHoistCheckSmiThroughPhi(ForwardInstructionIterator* it, | 4998 void LICM::TrySpecializeSmiPhi(PhiInstr* phi, |
| 4712 BlockEntryInstr* header, | 4999 BlockEntryInstr* header, |
| 4713 BlockEntryInstr* pre_header, | 5000 BlockEntryInstr* pre_header) { |
| 4714 CheckSmiInstr* current) { | 5001 if (phi->Type()->ToCid() == kSmiCid) { |
| 4715 PhiInstr* phi = current->value()->definition()->AsPhi(); | |
| 4716 if (!header->loop_info()->Contains(phi->block()->preorder_number())) { | |
| 4717 return; | 5002 return; |
| 4718 } | 5003 } |
| 4719 | 5004 |
| 4720 if (phi->Type()->ToCid() == kSmiCid) { | |
| 4721 it->RemoveCurrentFromGraph(); | |
| 4722 return; | |
| 4723 } | |
| 4724 | |
| 4725 // Check if there is only a single kDynamicCid input to the phi that | 5005 // Check if there is only a single kDynamicCid input to the phi that |
| 4726 // comes from the pre-header. | 5006 // comes from the pre-header. |
| 4727 const intptr_t kNotFound = -1; | 5007 const intptr_t kNotFound = -1; |
| 4728 intptr_t non_smi_input = kNotFound; | 5008 intptr_t non_smi_input = kNotFound; |
| 4729 for (intptr_t i = 0; i < phi->InputCount(); ++i) { | 5009 for (intptr_t i = 0; i < phi->InputCount(); ++i) { |
| 4730 Value* input = phi->InputAt(i); | 5010 Value* input = phi->InputAt(i); |
| 4731 if (input->Type()->ToCid() != kSmiCid) { | 5011 if (input->Type()->ToCid() != kSmiCid) { |
| 4732 if ((non_smi_input != kNotFound) || | 5012 if ((non_smi_input != kNotFound) || |
| 4733 (input->Type()->ToCid() != kDynamicCid)) { | 5013 (input->Type()->ToCid() != kDynamicCid)) { |
| 4734 // There are multiple kDynamicCid inputs or there is an input that is | 5014 // There are multiple kDynamicCid inputs or there is an input that is |
| 4735 // known to be non-smi. | 5015 // known to be non-smi. |
| 4736 return; | 5016 return; |
| 4737 } else { | 5017 } else { |
| 4738 non_smi_input = i; | 5018 non_smi_input = i; |
| 4739 } | 5019 } |
| 4740 } | 5020 } |
| 4741 } | 5021 } |
| 4742 | 5022 |
| 4743 if ((non_smi_input == kNotFound) || | 5023 if ((non_smi_input == kNotFound) || |
| 4744 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { | 5024 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { |
| 4745 return; | 5025 return; |
| 4746 } | 5026 } |
| 4747 | 5027 |
| 5028 CheckSmiInstr* check = NULL; | |
| 5029 for (Value* use = phi->input_use_list(); | |
| 5030 (use != NULL) && (check == NULL); | |
| 5031 use = use->next_use()) { | |
| 5032 check = use->instruction()->AsCheckSmi(); | |
| 5033 } | |
| 5034 | |
| 5035 if (check == NULL) { | |
| 5036 return; | |
| 5037 } | |
| 5038 | |
| 4748 // Host CheckSmi instruction and make this phi smi one. | 5039 // Host CheckSmi instruction and make this phi smi one. |
| 4749 Hoist(it, pre_header, current); | 5040 Hoist(NULL, pre_header, check); |
| 4750 | 5041 |
| 4751 // Replace value we are checking with phi's input. | 5042 // Replace value we are checking with phi's input. |
| 4752 current->value()->BindTo(phi->InputAt(non_smi_input)->definition()); | 5043 check->value()->BindTo(phi->InputAt(non_smi_input)->definition()); |
| 4753 | 5044 |
| 4754 phi->UpdateType(CompileType::FromCid(kSmiCid)); | 5045 phi->UpdateType(CompileType::FromCid(kSmiCid)); |
| 4755 } | 5046 } |
| 4756 | 5047 |
| 4757 | 5048 |
| 4758 // Load instructions handled by load elimination. | 5049 // Load instructions handled by load elimination. |
| 4759 static bool IsLoadEliminationCandidate(Instruction* instr) { | 5050 static bool IsLoadEliminationCandidate(Instruction* instr) { |
| 4760 return instr->IsLoadField() | 5051 return instr->IsLoadField() |
| 4761 || instr->IsLoadIndexed() | 5052 || instr->IsLoadIndexed() |
| 4762 || instr->IsLoadStaticField() | 5053 || instr->IsLoadStaticField() |
| 4763 || instr->IsCurrentContext(); | 5054 || instr->IsCurrentContext(); |
| 4764 } | 5055 } |
| 4765 | 5056 |
| 4766 | 5057 |
| 4767 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, | 5058 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, |
| 4768 intptr_t loop_header_index, | 5059 intptr_t loop_header_index, |
| 4769 Instruction* instr) { | 5060 Instruction* instr) { |
| 4770 return IsLoadEliminationCandidate(instr) && | 5061 return IsLoadEliminationCandidate(instr) && |
| 4771 (sets != NULL) && | 5062 (sets != NULL) && |
| 4772 instr->HasPlaceId() && | 5063 instr->HasPlaceId() && |
| 4773 ((*sets)[loop_header_index] != NULL) && | 5064 ((*sets)[loop_header_index] != NULL) && |
| 4774 (*sets)[loop_header_index]->Contains(instr->place_id()); | 5065 (*sets)[loop_header_index]->Contains(instr->place_id()); |
| 4775 } | 5066 } |
| 4776 | 5067 |
| 4777 | 5068 |
| 5069 void LICM::OptimisticallySpecializeSmiPhis() { | |
| 5070 if (!flow_graph()->parsed_function().function(). | |
| 5071 allows_hoisting_check_class()) { | |
| 5072 // Do not hoist any. | |
| 5073 return; | |
| 5074 } | |
| 5075 | |
| 5076 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | |
| 5077 flow_graph()->loop_headers(); | |
| 5078 | |
| 5079 for (intptr_t i = 0; i < loop_headers.length(); ++i) { | |
| 5080 JoinEntryInstr* header = loop_headers[i]->AsJoinEntry(); | |
| 5081 // Skip loop that don't have a pre-header block. | |
| 5082 BlockEntryInstr* pre_header = FindPreHeader(header); | |
| 5083 if (pre_header == NULL) continue; | |
| 5084 | |
| 5085 for (PhiIterator it(header); !it.Done(); it.Advance()) { | |
| 5086 TrySpecializeSmiPhi(it.Current(), header, pre_header); | |
| 5087 } | |
| 5088 } | |
| 5089 } | |
| 5090 | |
| 5091 | |
| 4778 void LICM::Optimize() { | 5092 void LICM::Optimize() { |
| 4779 if (!flow_graph()->parsed_function().function(). | 5093 if (!flow_graph()->parsed_function().function(). |
| 4780 allows_hoisting_check_class()) { | 5094 allows_hoisting_check_class()) { |
| 4781 // Do not hoist any. | 5095 // Do not hoist any. |
| 4782 return; | 5096 return; |
| 4783 } | 5097 } |
| 4784 | 5098 |
| 4785 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = | 5099 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |
| 4786 flow_graph()->loop_headers(); | 5100 flow_graph()->loop_headers(); |
| 4787 | 5101 |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 4814 inputs_loop_invariant = false; | 5128 inputs_loop_invariant = false; |
| 4815 break; | 5129 break; |
| 4816 } | 5130 } |
| 4817 } | 5131 } |
| 4818 if (inputs_loop_invariant && | 5132 if (inputs_loop_invariant && |
| 4819 !current->IsAssertAssignable() && | 5133 !current->IsAssertAssignable() && |
| 4820 !current->IsAssertBoolean()) { | 5134 !current->IsAssertBoolean()) { |
| 4821 // TODO(fschneider): Enable hoisting of Assert-instructions | 5135 // TODO(fschneider): Enable hoisting of Assert-instructions |
| 4822 // if it safe to do. | 5136 // if it safe to do. |
| 4823 Hoist(&it, pre_header, current); | 5137 Hoist(&it, pre_header, current); |
| 4824 } else if (current->IsCheckSmi() && | |
| 4825 current->InputAt(0)->definition()->IsPhi()) { | |
| 4826 TryHoistCheckSmiThroughPhi( | |
| 4827 &it, header, pre_header, current->AsCheckSmi()); | |
| 4828 } | 5138 } |
| 4829 } | 5139 } |
| 4830 } | 5140 } |
| 4831 } | 5141 } |
| 4832 } | 5142 } |
| 4833 } | 5143 } |
| 4834 | 5144 |
| 4835 | 5145 |
| 4836 // Place describes an abstract location (e.g. field) that IR can load | 5146 // Place describes an abstract location (e.g. field) that IR can load |
| 4837 // from or store to. | 5147 // from or store to. |
| (...skipping 3092 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7930 ASSERT(!result.IsNull()); | 8240 ASSERT(!result.IsNull()); |
| 7931 SetValue(defn, result); | 8241 SetValue(defn, result); |
| 7932 } | 8242 } |
| 7933 | 8243 |
| 7934 | 8244 |
| 7935 void ConstantPropagator::VisitBinarySmiOp(BinarySmiOpInstr* instr) { | 8245 void ConstantPropagator::VisitBinarySmiOp(BinarySmiOpInstr* instr) { |
| 7936 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 8246 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); |
| 7937 } | 8247 } |
| 7938 | 8248 |
| 7939 | 8249 |
| 8250 void ConstantPropagator::VisitBinaryInt32Op(BinaryInt32OpInstr* instr) { | |
| 8251 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | |
| 8252 } | |
| 8253 | |
| 8254 | |
| 7940 void ConstantPropagator::VisitBoxInteger(BoxIntegerInstr* instr) { | 8255 void ConstantPropagator::VisitBoxInteger(BoxIntegerInstr* instr) { |
| 7941 // TODO(kmillikin): Handle box operation. | 8256 // TODO(kmillikin): Handle box operation. |
| 7942 SetValue(instr, non_constant_); | 8257 SetValue(instr, non_constant_); |
| 7943 } | 8258 } |
| 7944 | 8259 |
| 7945 | 8260 |
| 7946 void ConstantPropagator::VisitUnboxInteger(UnboxIntegerInstr* instr) { | 8261 void ConstantPropagator::VisitUnboxInteger(UnboxIntegerInstr* instr) { |
| 7947 // TODO(kmillikin): Handle unbox operation. | 8262 // TODO(kmillikin): Handle unbox operation. |
| 7948 SetValue(instr, non_constant_); | 8263 SetValue(instr, non_constant_); |
| 7949 } | 8264 } |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7991 const Object& value = instr->value()->definition()->constant_value(); | 8306 const Object& value = instr->value()->definition()->constant_value(); |
| 7992 if (IsConstant(value) && value.IsInteger()) { | 8307 if (IsConstant(value) && value.IsInteger()) { |
| 7993 SetValue(instr, Double::Handle(I, | 8308 SetValue(instr, Double::Handle(I, |
| 7994 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); | 8309 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); |
| 7995 } else if (IsNonConstant(value)) { | 8310 } else if (IsNonConstant(value)) { |
| 7996 SetValue(instr, non_constant_); | 8311 SetValue(instr, non_constant_); |
| 7997 } | 8312 } |
| 7998 } | 8313 } |
| 7999 | 8314 |
| 8000 | 8315 |
| 8316 void ConstantPropagator::VisitInt32ToDouble(Int32ToDoubleInstr* instr) { | |
| 8317 const Object& value = instr->value()->definition()->constant_value(); | |
| 8318 if (IsConstant(value) && value.IsInteger()) { | |
| 8319 SetValue(instr, Double::Handle(I, | |
| 8320 Double::New(Integer::Cast(value).AsDoubleValue(), Heap::kOld))); | |
| 8321 } else if (IsNonConstant(value)) { | |
| 8322 SetValue(instr, non_constant_); | |
| 8323 } | |
| 8324 } | |
| 8325 | |
| 8326 | |
| 8001 void ConstantPropagator::VisitDoubleToInteger(DoubleToIntegerInstr* instr) { | 8327 void ConstantPropagator::VisitDoubleToInteger(DoubleToIntegerInstr* instr) { |
| 8002 // TODO(kmillikin): Handle conversion. | 8328 // TODO(kmillikin): Handle conversion. |
| 8003 SetValue(instr, non_constant_); | 8329 SetValue(instr, non_constant_); |
| 8004 } | 8330 } |
| 8005 | 8331 |
| 8006 | 8332 |
| 8007 void ConstantPropagator::VisitDoubleToSmi(DoubleToSmiInstr* instr) { | 8333 void ConstantPropagator::VisitDoubleToSmi(DoubleToSmiInstr* instr) { |
| 8008 // TODO(kmillikin): Handle conversion. | 8334 // TODO(kmillikin): Handle conversion. |
| 8009 SetValue(instr, non_constant_); | 8335 SetValue(instr, non_constant_); |
| 8010 } | 8336 } |
| (...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 8375 SetValue(instr, non_constant_); | 8701 SetValue(instr, non_constant_); |
| 8376 } | 8702 } |
| 8377 | 8703 |
| 8378 | 8704 |
| 8379 void ConstantPropagator::VisitUnboxUint32(UnboxUint32Instr* instr) { | 8705 void ConstantPropagator::VisitUnboxUint32(UnboxUint32Instr* instr) { |
| 8380 // TODO(kmillikin): Handle unbox operation. | 8706 // TODO(kmillikin): Handle unbox operation. |
| 8381 SetValue(instr, non_constant_); | 8707 SetValue(instr, non_constant_); |
| 8382 } | 8708 } |
| 8383 | 8709 |
| 8384 | 8710 |
| 8711 void ConstantPropagator::VisitBoxInt32(BoxInt32Instr* instr) { | |
| 8712 // TODO(kmillikin): Handle box operation. | |
| 8713 SetValue(instr, non_constant_); | |
| 8714 } | |
| 8715 | |
| 8716 | |
| 8717 void ConstantPropagator::VisitUnboxInt32(UnboxInt32Instr* instr) { | |
| 8718 // TODO(kmillikin): Handle unbox operation. | |
| 8719 SetValue(instr, non_constant_); | |
| 8720 } | |
| 8721 | |
| 8722 | |
| 8385 void ConstantPropagator::VisitUnboxedIntConverter( | 8723 void ConstantPropagator::VisitUnboxedIntConverter( |
| 8386 UnboxedIntConverterInstr* instr) { | 8724 UnboxedIntConverterInstr* instr) { |
| 8387 SetValue(instr, non_constant_); | 8725 SetValue(instr, non_constant_); |
| 8388 } | 8726 } |
| 8389 | 8727 |
| 8390 | 8728 |
| 8391 void ConstantPropagator::VisitBinaryUint32Op(BinaryUint32OpInstr* instr) { | 8729 void ConstantPropagator::VisitBinaryUint32Op(BinaryUint32OpInstr* instr) { |
| 8392 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); | 8730 HandleBinaryOp(instr, instr->op_kind(), *instr->left(), *instr->right()); |
| 8393 TruncateInteger(instr, static_cast<int64_t>(0xFFFFFFFF)); | 8731 TruncateInteger(instr, static_cast<int64_t>(0xFFFFFFFF)); |
| 8394 } | 8732 } |
| (...skipping 1220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 9615 | 9953 |
| 9616 // Insert materializations at environment uses. | 9954 // Insert materializations at environment uses. |
| 9617 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { | 9955 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { |
| 9618 CreateMaterializationAt( | 9956 CreateMaterializationAt( |
| 9619 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); | 9957 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); |
| 9620 } | 9958 } |
| 9621 } | 9959 } |
| 9622 | 9960 |
| 9623 | 9961 |
| 9624 } // namespace dart | 9962 } // namespace dart |
| OLD | NEW |