Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(91)

Side by Side Diff: runtime/vm/compiler.cc

Issue 1678203002: Remove more feature in product mode (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/compiler.h" 5 #include "vm/compiler.h"
6 6
7 #include "vm/assembler.h" 7 #include "vm/assembler.h"
8 8
9 #include "vm/ast_printer.h" 9 #include "vm/ast_printer.h"
10 #include "vm/block_scheduler.h" 10 #include "vm/block_scheduler.h"
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
295 Thread* thread = Thread::Current(); 295 Thread* thread = Thread::Current();
296 Error& error = Error::Handle(thread->zone()); 296 Error& error = Error::Handle(thread->zone());
297 error = thread->sticky_error(); 297 error = thread->sticky_error();
298 thread->clear_sticky_error(); 298 thread->clear_sticky_error();
299 return error.raw(); 299 return error.raw();
300 } 300 }
301 } 301 }
302 302
303 Thread* const thread = Thread::Current(); 303 Thread* const thread = Thread::Current();
304 StackZone zone(thread); 304 StackZone zone(thread);
305 // We remember all the classes that are being compiled in these lists. This 305 #ifndef PRODUCT
306 // also allows us to reset the marked_for_parsing state in case we see an
307 // error.
308 VMTagScope tagScope(thread, VMTag::kCompileClassTagId); 306 VMTagScope tagScope(thread, VMTag::kCompileClassTagId);
309 TimelineDurationScope tds(thread, 307 TimelineDurationScope tds(thread,
310 thread->isolate()->GetCompilerStream(), 308 thread->isolate()->GetCompilerStream(),
311 "CompileClass"); 309 "CompileClass");
312 if (tds.enabled()) { 310 if (tds.enabled()) {
313 tds.SetNumArguments(1); 311 tds.SetNumArguments(1);
314 tds.CopyArgument(0, "class", cls.ToCString()); 312 tds.CopyArgument(0, "class", cls.ToCString());
315 } 313 }
314 #endif // !PRODUCT
316 315
316 // We remember all the classes that are being compiled in these lists. This
317 // also allows us to reset the marked_for_parsing state in case we see an
318 // error.
317 GrowableHandlePtrArray<const Class> parse_list(thread->zone(), 4); 319 GrowableHandlePtrArray<const Class> parse_list(thread->zone(), 4);
318 GrowableHandlePtrArray<const Class> patch_list(thread->zone(), 4); 320 GrowableHandlePtrArray<const Class> patch_list(thread->zone(), 4);
319 321
320 // Parse the class and all the interfaces it implements and super classes. 322 // Parse the class and all the interfaces it implements and super classes.
321 LongJumpScope jump; 323 LongJumpScope jump;
322 if (setjmp(*jump.Set()) == 0) { 324 if (setjmp(*jump.Set()) == 0) {
323 if (FLAG_trace_compiler) { 325 if (FLAG_trace_compiler) {
324 THR_Print("Compiling Class '%s'\n", cls.ToCString()); 326 THR_Print("Compiling Class '%s'\n", cls.ToCString());
325 } 327 }
326 328
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after
593 // Return false if bailed out. 595 // Return false if bailed out.
594 // If optimized_result_code is not NULL then it is caller's responsibility 596 // If optimized_result_code is not NULL then it is caller's responsibility
595 // to install code. 597 // to install code.
596 bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { 598 bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
597 const Function& function = parsed_function()->function(); 599 const Function& function = parsed_function()->function();
598 if (optimized() && !function.IsOptimizable()) { 600 if (optimized() && !function.IsOptimizable()) {
599 return false; 601 return false;
600 } 602 }
601 bool is_compiled = false; 603 bool is_compiled = false;
602 Zone* const zone = thread()->zone(); 604 Zone* const zone = thread()->zone();
605 #ifndef PRODUCT
603 TimelineStream* compiler_timeline = isolate()->GetCompilerStream(); 606 TimelineStream* compiler_timeline = isolate()->GetCompilerStream();
607 #endif
604 CSTAT_TIMER_SCOPE(thread(), codegen_timer); 608 CSTAT_TIMER_SCOPE(thread(), codegen_timer);
605 HANDLESCOPE(thread()); 609 HANDLESCOPE(thread());
606 610
607 // We may reattempt compilation if the function needs to be assembled using 611 // We may reattempt compilation if the function needs to be assembled using
608 // far branches on ARM and MIPS. In the else branch of the setjmp call, 612 // far branches on ARM and MIPS. In the else branch of the setjmp call,
609 // done is set to false, and use_far_branches is set to true if there is a 613 // done is set to false, and use_far_branches is set to true if there is a
610 // longjmp from the ARM or MIPS assemblers. In all other paths through this 614 // longjmp from the ARM or MIPS assemblers. In all other paths through this
611 // while loop, done is set to true. use_far_branches is always false on ia32 615 // while loop, done is set to true. use_far_branches is always false on ia32
612 // and x64. 616 // and x64.
613 bool done = false; 617 bool done = false;
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
653 if (FLAG_print_ic_data_map) { 657 if (FLAG_print_ic_data_map) {
654 for (intptr_t i = 0; i < ic_data_array->length(); i++) { 658 for (intptr_t i = 0; i < ic_data_array->length(); i++) {
655 if ((*ic_data_array)[i] != NULL) { 659 if ((*ic_data_array)[i] != NULL) {
656 THR_Print("%" Pd " ", i); 660 THR_Print("%" Pd " ", i);
657 FlowGraphPrinter::PrintICData(*(*ic_data_array)[i]); 661 FlowGraphPrinter::PrintICData(*(*ic_data_array)[i]);
658 } 662 }
659 } 663 }
660 } 664 }
661 } 665 }
662 666
667 #ifndef PRODUCT
663 TimelineDurationScope tds(thread(), 668 TimelineDurationScope tds(thread(),
664 compiler_timeline, 669 compiler_timeline,
665 "BuildFlowGraph"); 670 "BuildFlowGraph");
671 #endif // !PRODUCT
666 flow_graph = pipeline->BuildFlowGraph(zone, 672 flow_graph = pipeline->BuildFlowGraph(zone,
667 parsed_function(), 673 parsed_function(),
668 *ic_data_array, 674 *ic_data_array,
669 osr_id()); 675 osr_id());
670 } 676 }
671 677
672 const bool print_flow_graph = 678 const bool print_flow_graph =
673 (FLAG_print_flow_graph || 679 (FLAG_print_flow_graph ||
674 (optimized() && FLAG_print_flow_graph_optimized)) && 680 (optimized() && FLAG_print_flow_graph_optimized)) &&
675 FlowGraphPrinter::ShouldPrint(function); 681 FlowGraphPrinter::ShouldPrint(function);
676 682
677 if (print_flow_graph) { 683 if (print_flow_graph) {
678 if (osr_id() == Compiler::kNoOSRDeoptId) { 684 if (osr_id() == Compiler::kNoOSRDeoptId) {
679 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph); 685 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph);
680 } else { 686 } else {
681 FlowGraphPrinter::PrintGraph("For OSR", flow_graph); 687 FlowGraphPrinter::PrintGraph("For OSR", flow_graph);
682 } 688 }
683 } 689 }
684 690
685 BlockScheduler block_scheduler(flow_graph); 691 BlockScheduler block_scheduler(flow_graph);
686 const bool reorder_blocks = 692 const bool reorder_blocks =
687 FlowGraph::ShouldReorderBlocks(function, optimized()); 693 FlowGraph::ShouldReorderBlocks(function, optimized());
688 if (reorder_blocks) { 694 if (reorder_blocks) {
695 #ifndef PRODUCT
689 TimelineDurationScope tds(thread(), 696 TimelineDurationScope tds(thread(),
690 compiler_timeline, 697 compiler_timeline,
691 "BlockScheduler::AssignEdgeWeights"); 698 "BlockScheduler::AssignEdgeWeights");
699 #endif // !PRODUCT
692 block_scheduler.AssignEdgeWeights(); 700 block_scheduler.AssignEdgeWeights();
693 } 701 }
694 702
695 if (optimized()) { 703 if (optimized()) {
704 #ifndef PRODUCT
696 TimelineDurationScope tds(thread(), 705 TimelineDurationScope tds(thread(),
697 compiler_timeline, 706 compiler_timeline,
698 "ComputeSSA"); 707 "ComputeSSA");
708 #endif // !PRODUCT
699 CSTAT_TIMER_SCOPE(thread(), ssa_timer); 709 CSTAT_TIMER_SCOPE(thread(), ssa_timer);
700 // Transform to SSA (virtual register 0 and no inlining arguments). 710 // Transform to SSA (virtual register 0 and no inlining arguments).
701 flow_graph->ComputeSSA(0, NULL); 711 flow_graph->ComputeSSA(0, NULL);
702 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 712 DEBUG_ASSERT(flow_graph->VerifyUseLists());
703 if (print_flow_graph) { 713 if (print_flow_graph) {
704 FlowGraphPrinter::PrintGraph("After SSA", flow_graph); 714 FlowGraphPrinter::PrintGraph("After SSA", flow_graph);
705 } 715 }
706 } 716 }
707 717
708 // Maps inline_id_to_function[inline_id] -> function. Top scope 718 // Maps inline_id_to_function[inline_id] -> function. Top scope
709 // function has inline_id 0. The map is populated by the inliner. 719 // function has inline_id 0. The map is populated by the inliner.
710 GrowableArray<const Function*> inline_id_to_function; 720 GrowableArray<const Function*> inline_id_to_function;
711 // For a given inlining-id(index) specifies the caller's inlining-id. 721 // For a given inlining-id(index) specifies the caller's inlining-id.
712 GrowableArray<intptr_t> caller_inline_id; 722 GrowableArray<intptr_t> caller_inline_id;
713 // Collect all instance fields that are loaded in the graph and 723 // Collect all instance fields that are loaded in the graph and
714 // have non-generic type feedback attached to them that can 724 // have non-generic type feedback attached to them that can
715 // potentially affect optimizations. 725 // potentially affect optimizations.
716 if (optimized()) { 726 if (optimized()) {
727 #ifndef PRODUCT
717 TimelineDurationScope tds(thread(), 728 TimelineDurationScope tds(thread(),
718 compiler_timeline, 729 compiler_timeline,
719 "OptimizationPasses"); 730 "OptimizationPasses");
731 #endif // !PRODUCT
720 inline_id_to_function.Add(&function); 732 inline_id_to_function.Add(&function);
721 // Top scope function has no caller (-1). 733 // Top scope function has no caller (-1).
722 caller_inline_id.Add(-1); 734 caller_inline_id.Add(-1);
723 CSTAT_TIMER_SCOPE(thread(), graphoptimizer_timer); 735 CSTAT_TIMER_SCOPE(thread(), graphoptimizer_timer);
724 736
725 FlowGraphOptimizer optimizer(flow_graph, 737 FlowGraphOptimizer optimizer(flow_graph,
726 use_speculative_inlining, 738 use_speculative_inlining,
727 &inlining_black_list); 739 &inlining_black_list);
728 if (FLAG_precompilation) { 740 if (FLAG_precompilation) {
729 optimizer.PopulateWithICData(); 741 optimizer.PopulateWithICData();
730 742
731 optimizer.ApplyClassIds(); 743 optimizer.ApplyClassIds();
732 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 744 DEBUG_ASSERT(flow_graph->VerifyUseLists());
733 745
734 FlowGraphTypePropagator::Propagate(flow_graph); 746 FlowGraphTypePropagator::Propagate(flow_graph);
735 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 747 DEBUG_ASSERT(flow_graph->VerifyUseLists());
736 } 748 }
737 optimizer.ApplyICData(); 749 optimizer.ApplyICData();
738 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 750 DEBUG_ASSERT(flow_graph->VerifyUseLists());
739 751
740 // Optimize (a << b) & c patterns, merge operations. 752 // Optimize (a << b) & c patterns, merge operations.
741 // Run early in order to have more opportunity to optimize left shifts. 753 // Run early in order to have more opportunity to optimize left shifts.
742 optimizer.TryOptimizePatterns(); 754 optimizer.TryOptimizePatterns();
743 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 755 DEBUG_ASSERT(flow_graph->VerifyUseLists());
744 756
745 FlowGraphInliner::SetInliningId(flow_graph, 0); 757 FlowGraphInliner::SetInliningId(flow_graph, 0);
746 758
747 // Inlining (mutates the flow graph) 759 // Inlining (mutates the flow graph)
748 if (FLAG_use_inlining) { 760 if (FLAG_use_inlining) {
761 #ifndef PRODUCT
749 TimelineDurationScope tds2(thread(), 762 TimelineDurationScope tds2(thread(),
750 compiler_timeline, 763 compiler_timeline,
751 "Inlining"); 764 "Inlining");
765 #endif // !PRODUCT
752 CSTAT_TIMER_SCOPE(thread(), graphinliner_timer); 766 CSTAT_TIMER_SCOPE(thread(), graphinliner_timer);
753 // Propagate types to create more inlining opportunities. 767 // Propagate types to create more inlining opportunities.
754 FlowGraphTypePropagator::Propagate(flow_graph); 768 FlowGraphTypePropagator::Propagate(flow_graph);
755 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 769 DEBUG_ASSERT(flow_graph->VerifyUseLists());
756 770
757 // Use propagated class-ids to create more inlining opportunities. 771 // Use propagated class-ids to create more inlining opportunities.
758 optimizer.ApplyClassIds(); 772 optimizer.ApplyClassIds();
759 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 773 DEBUG_ASSERT(flow_graph->VerifyUseLists());
760 774
761 FlowGraphInliner inliner(flow_graph, 775 FlowGraphInliner inliner(flow_graph,
762 &inline_id_to_function, 776 &inline_id_to_function,
763 &caller_inline_id, 777 &caller_inline_id,
764 use_speculative_inlining, 778 use_speculative_inlining,
765 &inlining_black_list); 779 &inlining_black_list);
766 inliner.Inline(); 780 inliner.Inline();
767 // Use lists are maintained and validated by the inliner. 781 // Use lists are maintained and validated by the inliner.
768 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 782 DEBUG_ASSERT(flow_graph->VerifyUseLists());
769 } 783 }
770 784
771 // Propagate types and eliminate more type tests. 785 // Propagate types and eliminate more type tests.
772 FlowGraphTypePropagator::Propagate(flow_graph); 786 FlowGraphTypePropagator::Propagate(flow_graph);
773 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 787 DEBUG_ASSERT(flow_graph->VerifyUseLists());
774 788
775 { 789 {
790 #ifndef PRODUCT
776 TimelineDurationScope tds2(thread(), 791 TimelineDurationScope tds2(thread(),
777 compiler_timeline, 792 compiler_timeline,
778 "ApplyClassIds"); 793 "ApplyClassIds");
794 #endif // !PRODUCT
779 // Use propagated class-ids to optimize further. 795 // Use propagated class-ids to optimize further.
780 optimizer.ApplyClassIds(); 796 optimizer.ApplyClassIds();
781 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 797 DEBUG_ASSERT(flow_graph->VerifyUseLists());
782 } 798 }
783 799
784 // Propagate types for potentially newly added instructions by 800 // Propagate types for potentially newly added instructions by
785 // ApplyClassIds(). Must occur before canonicalization. 801 // ApplyClassIds(). Must occur before canonicalization.
786 FlowGraphTypePropagator::Propagate(flow_graph); 802 FlowGraphTypePropagator::Propagate(flow_graph);
787 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 803 DEBUG_ASSERT(flow_graph->VerifyUseLists());
788 804
789 // Do optimizations that depend on the propagated type information. 805 // Do optimizations that depend on the propagated type information.
790 if (optimizer.Canonicalize()) { 806 if (optimizer.Canonicalize()) {
791 // Invoke Canonicalize twice in order to fully canonicalize patterns 807 // Invoke Canonicalize twice in order to fully canonicalize patterns
792 // like "if (a & const == 0) { }". 808 // like "if (a & const == 0) { }".
793 optimizer.Canonicalize(); 809 optimizer.Canonicalize();
794 } 810 }
795 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 811 DEBUG_ASSERT(flow_graph->VerifyUseLists());
796 812
797 { 813 {
814 #ifndef PRODUCT
798 TimelineDurationScope tds2(thread(), 815 TimelineDurationScope tds2(thread(),
799 compiler_timeline, 816 compiler_timeline,
800 "BranchSimplifier"); 817 "BranchSimplifier");
818 #endif // !PRODUCT
801 BranchSimplifier::Simplify(flow_graph); 819 BranchSimplifier::Simplify(flow_graph);
802 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 820 DEBUG_ASSERT(flow_graph->VerifyUseLists());
803 821
804 IfConverter::Simplify(flow_graph); 822 IfConverter::Simplify(flow_graph);
805 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 823 DEBUG_ASSERT(flow_graph->VerifyUseLists());
806 } 824 }
807 825
808 if (FLAG_constant_propagation) { 826 if (FLAG_constant_propagation) {
827 #ifndef PRODUCT
809 TimelineDurationScope tds2(thread(), 828 TimelineDurationScope tds2(thread(),
810 compiler_timeline, 829 compiler_timeline,
811 "ConstantPropagation"); 830 "ConstantPropagation");
831 #endif // !PRODUCT
812 ConstantPropagator::Optimize(flow_graph); 832 ConstantPropagator::Optimize(flow_graph);
813 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 833 DEBUG_ASSERT(flow_graph->VerifyUseLists());
814 // A canonicalization pass to remove e.g. smi checks on smi constants. 834 // A canonicalization pass to remove e.g. smi checks on smi constants.
815 optimizer.Canonicalize(); 835 optimizer.Canonicalize();
816 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 836 DEBUG_ASSERT(flow_graph->VerifyUseLists());
817 // Canonicalization introduced more opportunities for constant 837 // Canonicalization introduced more opportunities for constant
818 // propagation. 838 // propagation.
819 ConstantPropagator::Optimize(flow_graph); 839 ConstantPropagator::Optimize(flow_graph);
820 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 840 DEBUG_ASSERT(flow_graph->VerifyUseLists());
821 } 841 }
822 842
823 // Optimistically convert loop phis that have a single non-smi input 843 // Optimistically convert loop phis that have a single non-smi input
824 // coming from the loop pre-header into smi-phis. 844 // coming from the loop pre-header into smi-phis.
825 if (FLAG_loop_invariant_code_motion) { 845 if (FLAG_loop_invariant_code_motion) {
826 LICM licm(flow_graph); 846 LICM licm(flow_graph);
827 licm.OptimisticallySpecializeSmiPhis(); 847 licm.OptimisticallySpecializeSmiPhis();
828 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 848 DEBUG_ASSERT(flow_graph->VerifyUseLists());
829 } 849 }
830 850
831 // Propagate types and eliminate even more type tests. 851 // Propagate types and eliminate even more type tests.
832 // Recompute types after constant propagation to infer more precise 852 // Recompute types after constant propagation to infer more precise
833 // types for uses that were previously reached by now eliminated phis. 853 // types for uses that were previously reached by now eliminated phis.
834 FlowGraphTypePropagator::Propagate(flow_graph); 854 FlowGraphTypePropagator::Propagate(flow_graph);
835 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 855 DEBUG_ASSERT(flow_graph->VerifyUseLists());
836 856
837 { 857 {
858 #ifndef PRODUCT
838 TimelineDurationScope tds2(thread(), 859 TimelineDurationScope tds2(thread(),
839 compiler_timeline, 860 compiler_timeline,
840 "SelectRepresentations"); 861 "SelectRepresentations");
862 #endif // !PRODUCT
841 // Where beneficial convert Smi operations into Int32 operations. 863 // Where beneficial convert Smi operations into Int32 operations.
842 // Only meanigful for 32bit platforms right now. 864 // Only meanigful for 32bit platforms right now.
843 optimizer.WidenSmiToInt32(); 865 optimizer.WidenSmiToInt32();
844 866
845 // Unbox doubles. Performed after constant propagation to minimize 867 // Unbox doubles. Performed after constant propagation to minimize
846 // interference from phis merging double values and tagged 868 // interference from phis merging double values and tagged
847 // values coming from dead paths. 869 // values coming from dead paths.
848 optimizer.SelectRepresentations(); 870 optimizer.SelectRepresentations();
849 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 871 DEBUG_ASSERT(flow_graph->VerifyUseLists());
850 } 872 }
851 873
852 { 874 {
875 #ifndef PRODUCT
853 TimelineDurationScope tds2(thread(), 876 TimelineDurationScope tds2(thread(),
854 compiler_timeline, 877 compiler_timeline,
855 "CommonSubexpressionElinination"); 878 "CommonSubexpressionElinination");
879 #endif // !PRODUCT
856 if (FLAG_common_subexpression_elimination || 880 if (FLAG_common_subexpression_elimination ||
857 FLAG_loop_invariant_code_motion) { 881 FLAG_loop_invariant_code_motion) {
858 flow_graph->ComputeBlockEffects(); 882 flow_graph->ComputeBlockEffects();
859 } 883 }
860 884
861 if (FLAG_common_subexpression_elimination) { 885 if (FLAG_common_subexpression_elimination) {
862 if (DominatorBasedCSE::Optimize(flow_graph)) { 886 if (DominatorBasedCSE::Optimize(flow_graph)) {
863 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 887 DEBUG_ASSERT(flow_graph->VerifyUseLists());
864 optimizer.Canonicalize(); 888 optimizer.Canonicalize();
865 // Do another round of CSE to take secondary effects into account: 889 // Do another round of CSE to take secondary effects into account:
(...skipping 17 matching lines...) Expand all
883 flow_graph->RemoveRedefinitions(); 907 flow_graph->RemoveRedefinitions();
884 } 908 }
885 909
886 // Optimize (a << b) & c patterns, merge operations. 910 // Optimize (a << b) & c patterns, merge operations.
887 // Run after CSE in order to have more opportunity to merge 911 // Run after CSE in order to have more opportunity to merge
888 // instructions that have same inputs. 912 // instructions that have same inputs.
889 optimizer.TryOptimizePatterns(); 913 optimizer.TryOptimizePatterns();
890 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 914 DEBUG_ASSERT(flow_graph->VerifyUseLists());
891 915
892 { 916 {
917 #ifndef PRODUCT
893 TimelineDurationScope tds2(thread(), 918 TimelineDurationScope tds2(thread(),
894 compiler_timeline, 919 compiler_timeline,
895 "DeadStoreElimination"); 920 "DeadStoreElimination");
921 #endif // !PRODUCT
896 DeadStoreElimination::Optimize(flow_graph); 922 DeadStoreElimination::Optimize(flow_graph);
897 } 923 }
898 924
899 if (FLAG_range_analysis) { 925 if (FLAG_range_analysis) {
926 #ifndef PRODUCT
900 TimelineDurationScope tds2(thread(), 927 TimelineDurationScope tds2(thread(),
901 compiler_timeline, 928 compiler_timeline,
902 "RangeAnalysis"); 929 "RangeAnalysis");
930 #endif // !PRODUCT
903 // Propagate types after store-load-forwarding. Some phis may have 931 // Propagate types after store-load-forwarding. Some phis may have
904 // become smi phis that can be processed by range analysis. 932 // become smi phis that can be processed by range analysis.
905 FlowGraphTypePropagator::Propagate(flow_graph); 933 FlowGraphTypePropagator::Propagate(flow_graph);
906 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 934 DEBUG_ASSERT(flow_graph->VerifyUseLists());
907 935
908 // We have to perform range analysis after LICM because it 936 // We have to perform range analysis after LICM because it
909 // optimistically moves CheckSmi through phis into loop preheaders 937 // optimistically moves CheckSmi through phis into loop preheaders
910 // making some phis smi. 938 // making some phis smi.
911 optimizer.InferIntRanges(); 939 optimizer.InferIntRanges();
912 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 940 DEBUG_ASSERT(flow_graph->VerifyUseLists());
913 } 941 }
914 942
915 if (FLAG_constant_propagation) { 943 if (FLAG_constant_propagation) {
944 #ifndef PRODUCT
916 TimelineDurationScope tds2(thread(), 945 TimelineDurationScope tds2(thread(),
917 compiler_timeline, 946 compiler_timeline,
918 "ConstantPropagator::OptimizeBranches"); 947 "ConstantPropagator::OptimizeBranches");
948 #endif // !PRODUCT
919 // Constant propagation can use information from range analysis to 949 // Constant propagation can use information from range analysis to
920 // find unreachable branch targets and eliminate branches that have 950 // find unreachable branch targets and eliminate branches that have
921 // the same true- and false-target. 951 // the same true- and false-target.
922 ConstantPropagator::OptimizeBranches(flow_graph); 952 ConstantPropagator::OptimizeBranches(flow_graph);
923 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 953 DEBUG_ASSERT(flow_graph->VerifyUseLists());
924 } 954 }
925 955
926 // Recompute types after code movement was done to ensure correct 956 // Recompute types after code movement was done to ensure correct
927 // reaching types for hoisted values. 957 // reaching types for hoisted values.
928 FlowGraphTypePropagator::Propagate(flow_graph); 958 FlowGraphTypePropagator::Propagate(flow_graph);
929 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 959 DEBUG_ASSERT(flow_graph->VerifyUseLists());
930 960
931 { 961 {
962 #ifndef PRODUCT
932 TimelineDurationScope tds2(thread(), 963 TimelineDurationScope tds2(thread(),
933 compiler_timeline, 964 compiler_timeline,
934 "TryCatchAnalyzer::Optimize"); 965 "TryCatchAnalyzer::Optimize");
966 #endif // !PRODUCT
935 // Optimize try-blocks. 967 // Optimize try-blocks.
936 TryCatchAnalyzer::Optimize(flow_graph); 968 TryCatchAnalyzer::Optimize(flow_graph);
937 } 969 }
938 970
939 // Detach environments from the instructions that can't deoptimize. 971 // Detach environments from the instructions that can't deoptimize.
940 // Do it before we attempt to perform allocation sinking to minimize 972 // Do it before we attempt to perform allocation sinking to minimize
941 // amount of materializations it has to perform. 973 // amount of materializations it has to perform.
942 optimizer.EliminateEnvironments(); 974 optimizer.EliminateEnvironments();
943 975
944 { 976 {
977 #ifndef PRODUCT
945 TimelineDurationScope tds2(thread(), 978 TimelineDurationScope tds2(thread(),
946 compiler_timeline, 979 compiler_timeline,
947 "EliminateDeadPhis"); 980 "EliminateDeadPhis");
981 #endif // !PRODUCT
948 DeadCodeElimination::EliminateDeadPhis(flow_graph); 982 DeadCodeElimination::EliminateDeadPhis(flow_graph);
949 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 983 DEBUG_ASSERT(flow_graph->VerifyUseLists());
950 } 984 }
951 985
952 if (optimizer.Canonicalize()) { 986 if (optimizer.Canonicalize()) {
953 optimizer.Canonicalize(); 987 optimizer.Canonicalize();
954 } 988 }
955 989
956 // Attempt to sink allocations of temporary non-escaping objects to 990 // Attempt to sink allocations of temporary non-escaping objects to
957 // the deoptimization path. 991 // the deoptimization path.
958 AllocationSinking* sinking = NULL; 992 AllocationSinking* sinking = NULL;
959 if (FLAG_allocation_sinking && 993 if (FLAG_allocation_sinking &&
960 (flow_graph->graph_entry()->SuccessorCount() == 1)) { 994 (flow_graph->graph_entry()->SuccessorCount() == 1)) {
995 #ifndef PRODUCT
961 TimelineDurationScope tds2(thread(), 996 TimelineDurationScope tds2(thread(),
962 compiler_timeline, 997 compiler_timeline,
963 "AllocationSinking::Optimize"); 998 "AllocationSinking::Optimize");
999 #endif // !PRODUCT
964 // TODO(fschneider): Support allocation sinking with try-catch. 1000 // TODO(fschneider): Support allocation sinking with try-catch.
965 sinking = new AllocationSinking(flow_graph); 1001 sinking = new AllocationSinking(flow_graph);
966 sinking->Optimize(); 1002 sinking->Optimize();
967 } 1003 }
968 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 1004 DEBUG_ASSERT(flow_graph->VerifyUseLists());
969 1005
970 DeadCodeElimination::EliminateDeadPhis(flow_graph); 1006 DeadCodeElimination::EliminateDeadPhis(flow_graph);
971 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 1007 DEBUG_ASSERT(flow_graph->VerifyUseLists());
972 1008
973 FlowGraphTypePropagator::Propagate(flow_graph); 1009 FlowGraphTypePropagator::Propagate(flow_graph);
974 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 1010 DEBUG_ASSERT(flow_graph->VerifyUseLists());
975 1011
976 { 1012 {
1013 #ifndef PRODUCT
977 TimelineDurationScope tds2(thread(), 1014 TimelineDurationScope tds2(thread(),
978 compiler_timeline, 1015 compiler_timeline,
979 "SelectRepresentations"); 1016 "SelectRepresentations");
1017 #endif // !PRODUCT
980 // Ensure that all phis inserted by optimization passes have 1018 // Ensure that all phis inserted by optimization passes have
981 // consistent representations. 1019 // consistent representations.
982 optimizer.SelectRepresentations(); 1020 optimizer.SelectRepresentations();
983 } 1021 }
984 1022
985 if (optimizer.Canonicalize()) { 1023 if (optimizer.Canonicalize()) {
986 // To fully remove redundant boxing (e.g. BoxDouble used only in 1024 // To fully remove redundant boxing (e.g. BoxDouble used only in
987 // environments and UnboxDouble instructions) instruction we 1025 // environments and UnboxDouble instructions) instruction we
988 // first need to replace all their uses and then fold them away. 1026 // first need to replace all their uses and then fold them away.
989 // For now we just repeat Canonicalize twice to do that. 1027 // For now we just repeat Canonicalize twice to do that.
990 // TODO(vegorov): implement a separate representation folding pass. 1028 // TODO(vegorov): implement a separate representation folding pass.
991 optimizer.Canonicalize(); 1029 optimizer.Canonicalize();
992 } 1030 }
993 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 1031 DEBUG_ASSERT(flow_graph->VerifyUseLists());
994 1032
995 if (sinking != NULL) { 1033 if (sinking != NULL) {
1034 #ifndef PRODUCT
996 TimelineDurationScope tds2( 1035 TimelineDurationScope tds2(
997 thread(), 1036 thread(),
998 compiler_timeline, 1037 compiler_timeline,
999 "AllocationSinking::DetachMaterializations"); 1038 "AllocationSinking::DetachMaterializations");
1039 #endif // !PRODUCT
1000 // Remove all MaterializeObject instructions inserted by allocation 1040 // Remove all MaterializeObject instructions inserted by allocation
1001 // sinking from the flow graph and let them float on the side 1041 // sinking from the flow graph and let them float on the side
1002 // referenced only from environments. Register allocator will consider 1042 // referenced only from environments. Register allocator will consider
1003 // them as part of a deoptimization environment. 1043 // them as part of a deoptimization environment.
1004 sinking->DetachMaterializations(); 1044 sinking->DetachMaterializations();
1005 } 1045 }
1006 1046
1007 // Compute and store graph informations (call & instruction counts) 1047 // Compute and store graph informations (call & instruction counts)
1008 // to be later used by the inliner. 1048 // to be later used by the inliner.
1009 FlowGraphInliner::CollectGraphInfo(flow_graph, true); 1049 FlowGraphInliner::CollectGraphInfo(flow_graph, true);
1010 1050
1011 { 1051 {
1052 #ifndef PRODUCT
1012 TimelineDurationScope tds2(thread(), 1053 TimelineDurationScope tds2(thread(),
1013 compiler_timeline, 1054 compiler_timeline,
1014 "AllocateRegisters"); 1055 "AllocateRegisters");
1056 #endif // !PRODUCT
1015 // Perform register allocation on the SSA graph. 1057 // Perform register allocation on the SSA graph.
1016 FlowGraphAllocator allocator(*flow_graph); 1058 FlowGraphAllocator allocator(*flow_graph);
1017 allocator.AllocateRegisters(); 1059 allocator.AllocateRegisters();
1018 } 1060 }
1019 1061
1020 if (reorder_blocks) { 1062 if (reorder_blocks) {
1063 #ifndef PRODUCT
1021 TimelineDurationScope tds(thread(), 1064 TimelineDurationScope tds(thread(),
1022 compiler_timeline, 1065 compiler_timeline,
1023 "BlockScheduler::ReorderBlocks"); 1066 "BlockScheduler::ReorderBlocks");
1067 #endif // !PRODUCT
1024 block_scheduler.ReorderBlocks(); 1068 block_scheduler.ReorderBlocks();
1025 } 1069 }
1026 1070
1027 if (print_flow_graph) { 1071 if (print_flow_graph) {
1028 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph); 1072 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph);
1029 } 1073 }
1030 } 1074 }
1031 1075
1032 ASSERT(inline_id_to_function.length() == caller_inline_id.length()); 1076 ASSERT(inline_id_to_function.length() == caller_inline_id.length());
1033 Assembler assembler(use_far_branches); 1077 Assembler assembler(use_far_branches);
1034 FlowGraphCompiler graph_compiler(&assembler, flow_graph, 1078 FlowGraphCompiler graph_compiler(&assembler, flow_graph,
1035 *parsed_function(), optimized(), 1079 *parsed_function(), optimized(),
1036 inline_id_to_function, 1080 inline_id_to_function,
1037 caller_inline_id); 1081 caller_inline_id);
1038 { 1082 {
1039 CSTAT_TIMER_SCOPE(thread(), graphcompiler_timer); 1083 CSTAT_TIMER_SCOPE(thread(), graphcompiler_timer);
1084 #ifndef PRODUCT
1040 TimelineDurationScope tds(thread(), 1085 TimelineDurationScope tds(thread(),
1041 compiler_timeline, 1086 compiler_timeline,
1042 "CompileGraph"); 1087 "CompileGraph");
1088 #endif // !PRODUCT
1043 graph_compiler.CompileGraph(); 1089 graph_compiler.CompileGraph();
1044 pipeline->FinalizeCompilation(); 1090 pipeline->FinalizeCompilation();
1045 } 1091 }
1046 { 1092 {
1093 #ifndef PRODUCT
1047 TimelineDurationScope tds(thread(), 1094 TimelineDurationScope tds(thread(),
1048 compiler_timeline, 1095 compiler_timeline,
1049 "FinalizeCompilation"); 1096 "FinalizeCompilation");
1097 #endif // !PRODUCT
1050 if (thread()->IsMutatorThread()) { 1098 if (thread()->IsMutatorThread()) {
1051 FinalizeCompilation(&assembler, &graph_compiler, flow_graph); 1099 FinalizeCompilation(&assembler, &graph_compiler, flow_graph);
1052 } else { 1100 } else {
1053 // This part of compilation must be at a safepoint. 1101 // This part of compilation must be at a safepoint.
1054 // Stop mutator thread before creating the instruction object and 1102 // Stop mutator thread before creating the instruction object and
1055 // installing code. 1103 // installing code.
1056 // Mutator thread may not run code while we are creating the 1104 // Mutator thread may not run code while we are creating the
1057 // instruction object, since the creation of instruction object 1105 // instruction object, since the creation of instruction object
1058 // changes code page access permissions (makes them temporary not 1106 // changes code page access permissions (makes them temporary not
1059 // executable). 1107 // executable).
(...skipping 572 matching lines...) Expand 10 before | Expand all | Expand 10 after
1632 } 1680 }
1633 1681
1634 1682
1635 1683
1636 RawObject* Compiler::ExecuteOnce(SequenceNode* fragment) { 1684 RawObject* Compiler::ExecuteOnce(SequenceNode* fragment) {
1637 LongJumpScope jump; 1685 LongJumpScope jump;
1638 if (setjmp(*jump.Set()) == 0) { 1686 if (setjmp(*jump.Set()) == 0) {
1639 Thread* const thread = Thread::Current(); 1687 Thread* const thread = Thread::Current();
1640 if (FLAG_trace_compiler) { 1688 if (FLAG_trace_compiler) {
1641 THR_Print("compiling expression: "); 1689 THR_Print("compiling expression: ");
1642 AstPrinter::PrintNode(fragment); 1690 if (FLAG_support_ast_printer) {
1691 AstPrinter::PrintNode(fragment);
1692 }
1643 } 1693 }
1644 1694
1645 // Create a dummy function object for the code generator. 1695 // Create a dummy function object for the code generator.
1646 // The function needs to be associated with a named Class: the interface 1696 // The function needs to be associated with a named Class: the interface
1647 // Function fits the bill. 1697 // Function fits the bill.
1648 const char* kEvalConst = "eval_const"; 1698 const char* kEvalConst = "eval_const";
1649 const Function& func = Function::ZoneHandle(Function::New( 1699 const Function& func = Function::ZoneHandle(Function::New(
1650 String::Handle(Symbols::New(kEvalConst)), 1700 String::Handle(Symbols::New(kEvalConst)),
1651 RawFunction::kRegularFunction, 1701 RawFunction::kRegularFunction,
1652 true, // static function 1702 true, // static function
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
2053 } 2103 }
2054 2104
2055 2105
2056 void BackgroundCompiler::EnsureInit(Thread* thread) { 2106 void BackgroundCompiler::EnsureInit(Thread* thread) {
2057 UNREACHABLE(); 2107 UNREACHABLE();
2058 } 2108 }
2059 2109
2060 #endif // DART_PRECOMPILED_RUNTIME 2110 #endif // DART_PRECOMPILED_RUNTIME
2061 2111
2062 } // namespace dart 2112 } // namespace dart
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698