Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(207)

Side by Side Diff: runtime/vm/precompiler.cc

Issue 1663163003: Initial split of precompilation code from compiler.cc (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: rebased Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/precompiler.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/precompiler.h" 5 #include "vm/precompiler.h"
6 6
7 #include "vm/assembler.h"
8 #include "vm/ast_printer.h"
9 #include "vm/branch_optimizer.h"
7 #include "vm/cha.h" 10 #include "vm/cha.h"
11 #include "vm/code_generator.h"
8 #include "vm/code_patcher.h" 12 #include "vm/code_patcher.h"
9 #include "vm/compiler.h" 13 #include "vm/compiler.h"
14 #include "vm/constant_propagator.h"
15 #include "vm/dart_entry.h"
16 #include "vm/disassembler.h"
17 #include "vm/exceptions.h"
18 #include "vm/flags.h"
19 #include "vm/flow_graph.h"
20 #include "vm/flow_graph_allocator.h"
21 #include "vm/flow_graph_builder.h"
22 #include "vm/flow_graph_compiler.h"
23 #include "vm/flow_graph_inliner.h"
24 #include "vm/flow_graph_optimizer.h"
25 #include "vm/flow_graph_type_propagator.h"
10 #include "vm/hash_table.h" 26 #include "vm/hash_table.h"
27 #include "vm/il_printer.h"
11 #include "vm/isolate.h" 28 #include "vm/isolate.h"
12 #include "vm/log.h" 29 #include "vm/log.h"
13 #include "vm/longjump.h" 30 #include "vm/longjump.h"
14 #include "vm/object.h" 31 #include "vm/object.h"
15 #include "vm/object_store.h" 32 #include "vm/object_store.h"
33 #include "vm/os.h"
34 #include "vm/parser.h"
35 #include "vm/redundancy_elimination.h"
36 #include "vm/regexp_assembler.h"
37 #include "vm/regexp_parser.h"
16 #include "vm/resolver.h" 38 #include "vm/resolver.h"
17 #include "vm/symbols.h" 39 #include "vm/symbols.h"
40 #include "vm/tags.h"
41 #include "vm/timer.h"
18 42
19 namespace dart { 43 namespace dart {
20 44
21 45
22 #define T (thread()) 46 #define T (thread())
23 #define I (isolate()) 47 #define I (isolate())
24 #define Z (zone()) 48 #define Z (zone())
25 49
26 50
27 DEFINE_FLAG(bool, collect_dynamic_function_names, false, 51 DEFINE_FLAG(bool, collect_dynamic_function_names, false,
28 "In precompilation collects all dynamic function names in order to" 52 "In precompilation collects all dynamic function names in order to"
29 " identify unique targets"); 53 " identify unique targets");
30 DEFINE_FLAG(bool, print_unique_targets, false, "Print unique dynaic targets"); 54 DEFINE_FLAG(bool, print_unique_targets, false, "Print unique dynaic targets");
31 DEFINE_FLAG(bool, trace_precompiler, false, "Trace precompiler."); 55 DEFINE_FLAG(bool, trace_precompiler, false, "Trace precompiler.");
56 DEFINE_FLAG(int, max_speculative_inlining_attempts, 1,
57 "Max number of attempts with speculative inlining (precompilation only)");
58
59 DECLARE_FLAG(bool, allocation_sinking);
60 DECLARE_FLAG(bool, common_subexpression_elimination);
61 DECLARE_FLAG(bool, constant_propagation);
62 DECLARE_FLAG(bool, disassemble);
63 DECLARE_FLAG(bool, disassemble_optimized);
64 DECLARE_FLAG(bool, loop_invariant_code_motion);
65 DECLARE_FLAG(bool, print_flow_graph);
66 DECLARE_FLAG(bool, print_flow_graph_optimized);
67 DECLARE_FLAG(bool, range_analysis);
68 DECLARE_FLAG(bool, trace_compiler);
69 DECLARE_FLAG(bool, trace_optimizing_compiler);
70 DECLARE_FLAG(bool, trace_bailout);
71 DECLARE_FLAG(bool, use_inlining);
72 DECLARE_FLAG(bool, verify_compiler);
73 DECLARE_FLAG(bool, precompilation);
74 DECLARE_FLAG(bool, huge_method_cutoff_in_code_size);
75 DECLARE_FLAG(bool, load_deferred_eagerly);
76 DECLARE_FLAG(bool, trace_failed_optimization_attempts);
77 DECLARE_FLAG(bool, trace_inlining_intervals);
78 DECLARE_FLAG(bool, trace_irregexp);
79
80 #ifdef DART_PRECOMPILER
81
82 class PrecompileParsedFunctionHelper : public ValueObject {
83 public:
84 PrecompileParsedFunctionHelper(ParsedFunction* parsed_function,
85 bool optimized)
86 : parsed_function_(parsed_function),
87 optimized_(optimized),
88 thread_(Thread::Current()) {
89 }
90
91 bool Compile(CompilationPipeline* pipeline);
92
93 private:
94 ParsedFunction* parsed_function() const { return parsed_function_; }
95 bool optimized() const { return optimized_; }
96 Thread* thread() const { return thread_; }
97 Isolate* isolate() const { return thread_->isolate(); }
98
99 void FinalizeCompilation(Assembler* assembler,
100 FlowGraphCompiler* graph_compiler,
101 FlowGraph* flow_graph);
102
103 ParsedFunction* parsed_function_;
104 const bool optimized_;
105 Thread* const thread_;
106
107 DISALLOW_COPY_AND_ASSIGN(PrecompileParsedFunctionHelper);
108 };
32 109
33 110
34 static void Jump(const Error& error) { 111 static void Jump(const Error& error) {
35 Thread::Current()->long_jump_base()->Jump(1, error); 112 Thread::Current()->long_jump_base()->Jump(1, error);
36 } 113 }
37 114
38 115
39 RawError* Precompiler::CompileAll( 116 RawError* Precompiler::CompileAll(
40 Dart_QualifiedFunctionName embedder_entry_points[], 117 Dart_QualifiedFunctionName embedder_entry_points[],
41 bool reset_fields) { 118 bool reset_fields) {
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
400 THR_Print("Precompiling %" Pd " %s (%s, %s)\n", 477 THR_Print("Precompiling %" Pd " %s (%s, %s)\n",
401 function_count_, 478 function_count_,
402 function.ToLibNamePrefixedQualifiedCString(), 479 function.ToLibNamePrefixedQualifiedCString(),
403 function.token_pos().ToCString(), 480 function.token_pos().ToCString(),
404 Function::KindToCString(function.kind())); 481 Function::KindToCString(function.kind()));
405 } 482 }
406 483
407 ASSERT(!function.is_abstract()); 484 ASSERT(!function.is_abstract());
408 ASSERT(!function.IsRedirectingFactory()); 485 ASSERT(!function.IsRedirectingFactory());
409 486
410 error_ = Compiler::CompileFunction(thread_, function); 487 error_ = CompileFunction(thread_, function);
411 if (!error_.IsNull()) { 488 if (!error_.IsNull()) {
412 Jump(error_); 489 Jump(error_);
413 } 490 }
414 } else { 491 } else {
415 if (FLAG_trace_precompiler) { 492 if (FLAG_trace_precompiler) {
416 // This function was compiled from somewhere other than Precompiler, 493 // This function was compiled from somewhere other than Precompiler,
417 // such as const constructors compiled by the parser. 494 // such as const constructors compiled by the parser.
418 THR_Print("Already has code: %s (%s, %s)\n", 495 THR_Print("Already has code: %s (%s, %s)\n",
419 function.ToLibNamePrefixedQualifiedCString(), 496 function.ToLibNamePrefixedQualifiedCString(),
420 function.token_pos().ToCString(), 497 function.token_pos().ToCString(),
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 661
585 const bool is_initialized = value.raw() != Object::sentinel().raw(); 662 const bool is_initialized = value.raw() != Object::sentinel().raw();
586 if (is_initialized && !reset_fields_) return; 663 if (is_initialized && !reset_fields_) return;
587 664
588 if (!field.HasPrecompiledInitializer()) { 665 if (!field.HasPrecompiledInitializer()) {
589 if (FLAG_trace_precompiler) { 666 if (FLAG_trace_precompiler) {
590 THR_Print("Precompiling initializer for %s\n", field.ToCString()); 667 THR_Print("Precompiling initializer for %s\n", field.ToCString());
591 } 668 }
592 ASSERT(!Dart::IsRunningPrecompiledCode()); 669 ASSERT(!Dart::IsRunningPrecompiledCode());
593 field.SetStaticValue(Instance::Handle(field.SavedInitialStaticValue())); 670 field.SetStaticValue(Instance::Handle(field.SavedInitialStaticValue()));
594 Compiler::CompileStaticInitializer(field); 671 const Function& initializer =
672 Function::Handle(CompileStaticInitializer(field));
673 if (!initializer.IsNull()) {
674 field.SetPrecompiledInitializer(initializer);
675 }
595 } 676 }
596 677
597 const Function& function = 678 const Function& function =
598 Function::Handle(Z, field.PrecompiledInitializer()); 679 Function::Handle(Z, field.PrecompiledInitializer());
599 AddCalleesOf(function); 680 AddCalleesOf(function);
600 } 681 }
601 } 682 }
602 } 683 }
603 684
604 685
686 RawFunction* Precompiler::CompileStaticInitializer(const Field& field) {
687 ASSERT(field.is_static());
688 if (field.HasPrecompiledInitializer()) {
689 // TODO(rmacnak): Investigate why this happens for _enum_names.
690 OS::Print("Warning: Ignoring repeated request for initializer for %s\n",
691 field.ToCString());
692 return Function::null();
693 }
694 Thread* thread = Thread::Current();
695 StackZone zone(thread);
696
697 ParsedFunction* parsed_function = Parser::ParseStaticFieldInitializer(field);
698
699 parsed_function->AllocateVariables();
700 // Non-optimized code generator.
701 DartCompilationPipeline pipeline;
702 PrecompileParsedFunctionHelper helper(parsed_function,
703 /* optimized = */ false);
704 helper.Compile(&pipeline);
705 return parsed_function->function().raw();
706 }
707
708
709 RawObject* Precompiler::EvaluateStaticInitializer(const Field& field) {
710 ASSERT(field.is_static());
711 // The VM sets the field's value to transiton_sentinel prior to
712 // evaluating the initializer value.
713 ASSERT(field.StaticValue() == Object::transition_sentinel().raw());
714 LongJumpScope jump;
715 if (setjmp(*jump.Set()) == 0) {
716 // Under precompilation, the initializer may have already been compiled, in
717 // which case use it. Under lazy compilation or early in precompilation, the
718 // initializer has not yet been created, so create it now, but don't bother
719 // remembering it because it won't be used again.
720 Function& initializer = Function::Handle();
721 if (!field.HasPrecompiledInitializer()) {
722 initializer = CompileStaticInitializer(field);
723 Code::Handle(initializer.unoptimized_code()).set_var_descriptors(
724 Object::empty_var_descriptors());
725 } else {
726 initializer ^= field.PrecompiledInitializer();
727 }
728 // Invoke the function to evaluate the expression.
729 return DartEntry::InvokeFunction(initializer, Object::empty_array());
730 } else {
731 Thread* const thread = Thread::Current();
732 StackZone zone(thread);
733 const Error& error =
734 Error::Handle(thread->zone(), thread->sticky_error());
735 thread->clear_sticky_error();
736 return error.raw();
737 }
738 UNREACHABLE();
739 return Object::null();
740 }
741
742
743 RawObject* Precompiler::ExecuteOnce(SequenceNode* fragment) {
744 LongJumpScope jump;
745 if (setjmp(*jump.Set()) == 0) {
746 Thread* const thread = Thread::Current();
747 if (FLAG_trace_compiler) {
748 THR_Print("compiling expression: ");
749 AstPrinter::PrintNode(fragment);
750 }
751
752 // Create a dummy function object for the code generator.
753 // The function needs to be associated with a named Class: the interface
754 // Function fits the bill.
755 const char* kEvalConst = "eval_const";
756 const Function& func = Function::ZoneHandle(Function::New(
757 String::Handle(Symbols::New(kEvalConst)),
758 RawFunction::kRegularFunction,
759 true, // static function
760 false, // not const function
761 false, // not abstract
762 false, // not external
763 false, // not native
764 Class::Handle(Type::Handle(Type::Function()).type_class()),
765 fragment->token_pos()));
766
767 func.set_result_type(Object::dynamic_type());
768 func.set_num_fixed_parameters(0);
769 func.SetNumOptionalParameters(0, true);
770 // Manually generated AST, do not recompile.
771 func.SetIsOptimizable(false);
772 func.set_is_debuggable(false);
773
774 // We compile the function here, even though InvokeFunction() below
775 // would compile func automatically. We are checking fewer invariants
776 // here.
777 ParsedFunction* parsed_function = new ParsedFunction(thread, func);
778 parsed_function->SetNodeSequence(fragment);
779 fragment->scope()->AddVariable(parsed_function->EnsureExpressionTemp());
780 fragment->scope()->AddVariable(
781 parsed_function->current_context_var());
782 parsed_function->AllocateVariables();
783
784 // Non-optimized code generator.
785 DartCompilationPipeline pipeline;
786 PrecompileParsedFunctionHelper helper(parsed_function,
787 /* optimized = */ false);
788 helper.Compile(&pipeline);
789 Code::Handle(func.unoptimized_code()).set_var_descriptors(
790 Object::empty_var_descriptors());
791
792 const Object& result = PassiveObject::Handle(
793 DartEntry::InvokeFunction(func, Object::empty_array()));
794 return result.raw();
795 } else {
796 Thread* const thread = Thread::Current();
797 const Object& result =
798 PassiveObject::Handle(thread->sticky_error());
799 thread->clear_sticky_error();
800 return result.raw();
801 }
802 UNREACHABLE();
803 return Object::null();
804 }
805
806
605 void Precompiler::AddFunction(const Function& function) { 807 void Precompiler::AddFunction(const Function& function) {
606 if (enqueued_functions_.Lookup(&function) != NULL) return; 808 if (enqueued_functions_.Lookup(&function) != NULL) return;
607 809
608 enqueued_functions_.Insert(&Function::ZoneHandle(Z, function.raw())); 810 enqueued_functions_.Insert(&Function::ZoneHandle(Z, function.raw()));
609 pending_functions_.Add(function); 811 pending_functions_.Add(function);
610 changed_ = true; 812 changed_ = true;
611 } 813 }
612 814
613 815
614 bool Precompiler::IsSent(const String& selector) { 816 bool Precompiler::IsSent(const String& selector) {
(...skipping 727 matching lines...) Expand 10 before | Expand all | Expand 10 after
1342 while (it.HasNext()) { 1544 while (it.HasNext()) {
1343 cls = it.GetNextClass(); 1545 cls = it.GetNextClass();
1344 if (cls.IsDynamicClass()) { 1546 if (cls.IsDynamicClass()) {
1345 continue; // class 'dynamic' is in the read-only VM isolate. 1547 continue; // class 'dynamic' is in the read-only VM isolate.
1346 } 1548 }
1347 cls.set_is_allocated(false); 1549 cls.set_is_allocated(false);
1348 } 1550 }
1349 } 1551 }
1350 } 1552 }
1351 1553
1554
1555 void PrecompileParsedFunctionHelper::FinalizeCompilation(
1556 Assembler* assembler,
1557 FlowGraphCompiler* graph_compiler,
1558 FlowGraph* flow_graph) {
1559 const Function& function = parsed_function()->function();
1560 Zone* const zone = thread()->zone();
1561
1562 CSTAT_TIMER_SCOPE(thread(), codefinalizer_timer);
1563 // CreateDeoptInfo uses the object pool and needs to be done before
1564 // FinalizeCode.
1565 const Array& deopt_info_array =
1566 Array::Handle(zone, graph_compiler->CreateDeoptInfo(assembler));
1567 INC_STAT(thread(), total_code_size,
1568 deopt_info_array.Length() * sizeof(uword));
1569 // Allocates instruction object. Since this occurs only at safepoint,
1570 // there can be no concurrent access to the instruction page.
1571 const Code& code = Code::Handle(
1572 Code::FinalizeCode(function, assembler, optimized()));
1573 code.set_is_optimized(optimized());
1574 code.set_owner(function);
1575 if (!function.IsOptimizable()) {
1576 // A function with huge unoptimized code can become non-optimizable
1577 // after generating unoptimized code.
1578 function.set_usage_counter(INT_MIN);
1579 }
1580
1581 const Array& intervals = graph_compiler->inlined_code_intervals();
1582 INC_STAT(thread(), total_code_size,
1583 intervals.Length() * sizeof(uword));
1584 code.SetInlinedIntervals(intervals);
1585
1586 const Array& inlined_id_array =
1587 Array::Handle(zone, graph_compiler->InliningIdToFunction());
1588 INC_STAT(thread(), total_code_size,
1589 inlined_id_array.Length() * sizeof(uword));
1590 code.SetInlinedIdToFunction(inlined_id_array);
1591
1592 const Array& caller_inlining_id_map_array =
1593 Array::Handle(zone, graph_compiler->CallerInliningIdMap());
1594 INC_STAT(thread(), total_code_size,
1595 caller_inlining_id_map_array.Length() * sizeof(uword));
1596 code.SetInlinedCallerIdMap(caller_inlining_id_map_array);
1597
1598 graph_compiler->FinalizePcDescriptors(code);
1599 code.set_deopt_info_array(deopt_info_array);
1600
1601 graph_compiler->FinalizeStackmaps(code);
1602 graph_compiler->FinalizeVarDescriptors(code);
1603 graph_compiler->FinalizeExceptionHandlers(code);
1604 graph_compiler->FinalizeStaticCallTargetsTable(code);
1605
1606 if (optimized()) {
1607 // Installs code while at safepoint.
1608 ASSERT(thread()->IsMutatorThread());
1609 function.InstallOptimizedCode(code, /* is_osr = */ false);
1610 } else { // not optimized.
1611 function.set_unoptimized_code(code);
1612 function.AttachCode(code);
1613 }
1614 ASSERT(!parsed_function()->HasDeferredPrefixes());
1615 ASSERT(FLAG_load_deferred_eagerly);
1616 }
1617
1618
1619 // Return false if bailed out.
1620 // If optimized_result_code is not NULL then it is caller's responsibility
1621 // to install code.
1622 bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
1623 ASSERT(FLAG_precompilation);
1624 const Function& function = parsed_function()->function();
1625 if (optimized() && !function.IsOptimizable()) {
1626 return false;
1627 }
1628 bool is_compiled = false;
1629 Zone* const zone = thread()->zone();
1630 TimelineStream* compiler_timeline = isolate()->GetCompilerStream();
1631 CSTAT_TIMER_SCOPE(thread(), codegen_timer);
1632 HANDLESCOPE(thread());
1633
1634 // We may reattempt compilation if the function needs to be assembled using
1635 // far branches on ARM and MIPS. In the else branch of the setjmp call,
1636 // done is set to false, and use_far_branches is set to true if there is a
1637 // longjmp from the ARM or MIPS assemblers. In all other paths through this
1638 // while loop, done is set to true. use_far_branches is always false on ia32
1639 // and x64.
1640 bool done = false;
1641 // volatile because the variable may be clobbered by a longjmp.
1642 volatile bool use_far_branches = false;
1643 volatile bool use_speculative_inlining =
1644 FLAG_max_speculative_inlining_attempts > 0;
1645 GrowableArray<intptr_t> inlining_black_list;
1646
1647 while (!done) {
1648 const intptr_t prev_deopt_id = thread()->deopt_id();
1649 thread()->set_deopt_id(0);
1650 LongJumpScope jump;
1651 const intptr_t val = setjmp(*jump.Set());
1652 if (val == 0) {
1653 FlowGraph* flow_graph = NULL;
1654
1655 // Class hierarchy analysis is registered with the isolate in the
1656 // constructor and unregisters itself upon destruction.
1657 CHA cha(thread());
1658
1659 // TimerScope needs an isolate to be properly terminated in case of a
1660 // LongJump.
1661 {
1662 CSTAT_TIMER_SCOPE(thread(), graphbuilder_timer);
1663 ZoneGrowableArray<const ICData*>* ic_data_array =
1664 new(zone) ZoneGrowableArray<const ICData*>();
1665 TimelineDurationScope tds(thread(),
1666 compiler_timeline,
1667 "BuildFlowGraph");
1668 flow_graph = pipeline->BuildFlowGraph(zone,
1669 parsed_function(),
1670 *ic_data_array,
1671 Compiler::kNoOSRDeoptId);
1672 }
1673
1674 const bool print_flow_graph =
1675 (FLAG_print_flow_graph ||
1676 (optimized() && FLAG_print_flow_graph_optimized)) &&
1677 FlowGraphPrinter::ShouldPrint(function);
1678
1679 if (print_flow_graph) {
1680 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph);
1681 }
1682
1683 if (optimized()) {
1684 TimelineDurationScope tds(thread(),
1685 compiler_timeline,
1686 "ComputeSSA");
1687 CSTAT_TIMER_SCOPE(thread(), ssa_timer);
1688 // Transform to SSA (virtual register 0 and no inlining arguments).
1689 flow_graph->ComputeSSA(0, NULL);
1690 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1691 if (print_flow_graph) {
1692 FlowGraphPrinter::PrintGraph("After SSA", flow_graph);
1693 }
1694 }
1695
1696 // Maps inline_id_to_function[inline_id] -> function. Top scope
1697 // function has inline_id 0. The map is populated by the inliner.
1698 GrowableArray<const Function*> inline_id_to_function;
1699 // For a given inlining-id(index) specifies the caller's inlining-id.
1700 GrowableArray<intptr_t> caller_inline_id;
1701 // Collect all instance fields that are loaded in the graph and
1702 // have non-generic type feedback attached to them that can
1703 // potentially affect optimizations.
1704 if (optimized()) {
1705 TimelineDurationScope tds(thread(),
1706 compiler_timeline,
1707 "OptimizationPasses");
1708 inline_id_to_function.Add(&function);
1709 // Top scope function has no caller (-1).
1710 caller_inline_id.Add(-1);
1711 CSTAT_TIMER_SCOPE(thread(), graphoptimizer_timer);
1712
1713 FlowGraphOptimizer optimizer(flow_graph,
1714 use_speculative_inlining,
1715 &inlining_black_list);
1716 optimizer.PopulateWithICData();
1717
1718 optimizer.ApplyClassIds();
1719 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1720
1721 FlowGraphTypePropagator::Propagate(flow_graph);
1722 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1723
1724 optimizer.ApplyICData();
1725 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1726
1727 // Optimize (a << b) & c patterns, merge operations.
1728 // Run early in order to have more opportunity to optimize left shifts.
1729 optimizer.TryOptimizePatterns();
1730 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1731
1732 FlowGraphInliner::SetInliningId(flow_graph, 0);
1733
1734 // Inlining (mutates the flow graph)
1735 if (FLAG_use_inlining) {
1736 TimelineDurationScope tds2(thread(),
1737 compiler_timeline,
1738 "Inlining");
1739 CSTAT_TIMER_SCOPE(thread(), graphinliner_timer);
1740 // Propagate types to create more inlining opportunities.
1741 FlowGraphTypePropagator::Propagate(flow_graph);
1742 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1743
1744 // Use propagated class-ids to create more inlining opportunities.
1745 optimizer.ApplyClassIds();
1746 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1747
1748 FlowGraphInliner inliner(flow_graph,
1749 &inline_id_to_function,
1750 &caller_inline_id,
1751 use_speculative_inlining,
1752 &inlining_black_list);
1753 inliner.Inline();
1754 // Use lists are maintained and validated by the inliner.
1755 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1756 }
1757
1758 // Propagate types and eliminate more type tests.
1759 FlowGraphTypePropagator::Propagate(flow_graph);
1760 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1761
1762 {
1763 TimelineDurationScope tds2(thread(),
1764 compiler_timeline,
1765 "ApplyClassIds");
1766 // Use propagated class-ids to optimize further.
1767 optimizer.ApplyClassIds();
1768 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1769 }
1770
1771 // Propagate types for potentially newly added instructions by
1772 // ApplyClassIds(). Must occur before canonicalization.
1773 FlowGraphTypePropagator::Propagate(flow_graph);
1774 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1775
1776 // Do optimizations that depend on the propagated type information.
1777 if (optimizer.Canonicalize()) {
1778 // Invoke Canonicalize twice in order to fully canonicalize patterns
1779 // like "if (a & const == 0) { }".
1780 optimizer.Canonicalize();
1781 }
1782 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1783
1784 {
1785 TimelineDurationScope tds2(thread(),
1786 compiler_timeline,
1787 "BranchSimplifier");
1788 BranchSimplifier::Simplify(flow_graph);
1789 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1790
1791 IfConverter::Simplify(flow_graph);
1792 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1793 }
1794
1795 if (FLAG_constant_propagation) {
1796 TimelineDurationScope tds2(thread(),
1797 compiler_timeline,
1798 "ConstantPropagation");
1799 ConstantPropagator::Optimize(flow_graph);
1800 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1801 // A canonicalization pass to remove e.g. smi checks on smi constants.
1802 optimizer.Canonicalize();
1803 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1804 // Canonicalization introduced more opportunities for constant
1805 // propagation.
1806 ConstantPropagator::Optimize(flow_graph);
1807 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1808 }
1809
1810 // Optimistically convert loop phis that have a single non-smi input
1811 // coming from the loop pre-header into smi-phis.
1812 if (FLAG_loop_invariant_code_motion) {
1813 LICM licm(flow_graph);
1814 licm.OptimisticallySpecializeSmiPhis();
1815 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1816 }
1817
1818 // Propagate types and eliminate even more type tests.
1819 // Recompute types after constant propagation to infer more precise
1820 // types for uses that were previously reached by now eliminated phis.
1821 FlowGraphTypePropagator::Propagate(flow_graph);
1822 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1823
1824 {
1825 TimelineDurationScope tds2(thread(),
1826 compiler_timeline,
1827 "SelectRepresentations");
1828 // Where beneficial convert Smi operations into Int32 operations.
1829 // Only meanigful for 32bit platforms right now.
1830 optimizer.WidenSmiToInt32();
1831
1832 // Unbox doubles. Performed after constant propagation to minimize
1833 // interference from phis merging double values and tagged
1834 // values coming from dead paths.
1835 optimizer.SelectRepresentations();
1836 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1837 }
1838
1839 {
1840 TimelineDurationScope tds2(thread(),
1841 compiler_timeline,
1842 "CommonSubexpressionElinination");
1843 if (FLAG_common_subexpression_elimination ||
1844 FLAG_loop_invariant_code_motion) {
1845 flow_graph->ComputeBlockEffects();
1846 }
1847
1848 if (FLAG_common_subexpression_elimination) {
1849 if (DominatorBasedCSE::Optimize(flow_graph)) {
1850 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1851 optimizer.Canonicalize();
1852 // Do another round of CSE to take secondary effects into account:
1853 // e.g. when eliminating dependent loads (a.x[0] + a.x[0])
1854 // TODO(fschneider): Change to a one-pass optimization pass.
1855 if (DominatorBasedCSE::Optimize(flow_graph)) {
1856 optimizer.Canonicalize();
1857 }
1858 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1859 }
1860 }
1861
1862 // Run loop-invariant code motion right after load elimination since
1863 // it depends on the numbering of loads from the previous
1864 // load-elimination.
1865 if (FLAG_loop_invariant_code_motion) {
1866 LICM licm(flow_graph);
1867 licm.Optimize();
1868 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1869 }
1870 flow_graph->RemoveRedefinitions();
1871 }
1872
1873 // Optimize (a << b) & c patterns, merge operations.
1874 // Run after CSE in order to have more opportunity to merge
1875 // instructions that have same inputs.
1876 optimizer.TryOptimizePatterns();
1877 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1878
1879 {
1880 TimelineDurationScope tds2(thread(),
1881 compiler_timeline,
1882 "DeadStoreElimination");
1883 DeadStoreElimination::Optimize(flow_graph);
1884 }
1885
1886 if (FLAG_range_analysis) {
1887 TimelineDurationScope tds2(thread(),
1888 compiler_timeline,
1889 "RangeAnalysis");
1890 // Propagate types after store-load-forwarding. Some phis may have
1891 // become smi phis that can be processed by range analysis.
1892 FlowGraphTypePropagator::Propagate(flow_graph);
1893 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1894
1895 // We have to perform range analysis after LICM because it
1896 // optimistically moves CheckSmi through phis into loop preheaders
1897 // making some phis smi.
1898 optimizer.InferIntRanges();
1899 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1900 }
1901
1902 if (FLAG_constant_propagation) {
1903 TimelineDurationScope tds2(thread(),
1904 compiler_timeline,
1905 "ConstantPropagator::OptimizeBranches");
1906 // Constant propagation can use information from range analysis to
1907 // find unreachable branch targets and eliminate branches that have
1908 // the same true- and false-target.
1909 ConstantPropagator::OptimizeBranches(flow_graph);
1910 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1911 }
1912
1913 // Recompute types after code movement was done to ensure correct
1914 // reaching types for hoisted values.
1915 FlowGraphTypePropagator::Propagate(flow_graph);
1916 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1917
1918 {
1919 TimelineDurationScope tds2(thread(),
1920 compiler_timeline,
1921 "TryCatchAnalyzer::Optimize");
1922 // Optimize try-blocks.
1923 TryCatchAnalyzer::Optimize(flow_graph);
1924 }
1925
1926 // Detach environments from the instructions that can't deoptimize.
1927 // Do it before we attempt to perform allocation sinking to minimize
1928 // amount of materializations it has to perform.
1929 optimizer.EliminateEnvironments();
1930
1931 {
1932 TimelineDurationScope tds2(thread(),
1933 compiler_timeline,
1934 "EliminateDeadPhis");
1935 DeadCodeElimination::EliminateDeadPhis(flow_graph);
1936 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1937 }
1938
1939 if (optimizer.Canonicalize()) {
1940 optimizer.Canonicalize();
1941 }
1942
1943 // Attempt to sink allocations of temporary non-escaping objects to
1944 // the deoptimization path.
1945 AllocationSinking* sinking = NULL;
1946 if (FLAG_allocation_sinking &&
1947 (flow_graph->graph_entry()->SuccessorCount() == 1)) {
1948 TimelineDurationScope tds2(thread(),
1949 compiler_timeline,
1950 "AllocationSinking::Optimize");
1951 // TODO(fschneider): Support allocation sinking with try-catch.
1952 sinking = new AllocationSinking(flow_graph);
1953 sinking->Optimize();
1954 }
1955 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1956
1957 DeadCodeElimination::EliminateDeadPhis(flow_graph);
1958 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1959
1960 FlowGraphTypePropagator::Propagate(flow_graph);
1961 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1962
1963 {
1964 TimelineDurationScope tds2(thread(),
1965 compiler_timeline,
1966 "SelectRepresentations");
1967 // Ensure that all phis inserted by optimization passes have
1968 // consistent representations.
1969 optimizer.SelectRepresentations();
1970 }
1971
1972 if (optimizer.Canonicalize()) {
1973 // To fully remove redundant boxing (e.g. BoxDouble used only in
1974 // environments and UnboxDouble instructions) instruction we
1975 // first need to replace all their uses and then fold them away.
1976 // For now we just repeat Canonicalize twice to do that.
1977 // TODO(vegorov): implement a separate representation folding pass.
1978 optimizer.Canonicalize();
1979 }
1980 DEBUG_ASSERT(flow_graph->VerifyUseLists());
1981
1982 if (sinking != NULL) {
1983 TimelineDurationScope tds2(
1984 thread(),
1985 compiler_timeline,
1986 "AllocationSinking::DetachMaterializations");
1987 // Remove all MaterializeObject instructions inserted by allocation
1988 // sinking from the flow graph and let them float on the side
1989 // referenced only from environments. Register allocator will consider
1990 // them as part of a deoptimization environment.
1991 sinking->DetachMaterializations();
1992 }
1993
1994 // Compute and store graph informations (call & instruction counts)
1995 // to be later used by the inliner.
1996 FlowGraphInliner::CollectGraphInfo(flow_graph, true);
1997
1998 {
1999 TimelineDurationScope tds2(thread(),
2000 compiler_timeline,
2001 "AllocateRegisters");
2002 // Perform register allocation on the SSA graph.
2003 FlowGraphAllocator allocator(*flow_graph);
2004 allocator.AllocateRegisters();
2005 }
2006
2007 if (print_flow_graph) {
2008 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph);
2009 }
2010 }
2011
2012 ASSERT(inline_id_to_function.length() == caller_inline_id.length());
2013 Assembler assembler(use_far_branches);
2014 FlowGraphCompiler graph_compiler(&assembler, flow_graph,
2015 *parsed_function(), optimized(),
2016 inline_id_to_function,
2017 caller_inline_id);
2018 {
2019 CSTAT_TIMER_SCOPE(thread(), graphcompiler_timer);
2020 TimelineDurationScope tds(thread(),
2021 compiler_timeline,
2022 "CompileGraph");
2023 graph_compiler.CompileGraph();
2024 pipeline->FinalizeCompilation();
2025 }
2026 {
2027 TimelineDurationScope tds(thread(),
2028 compiler_timeline,
2029 "FinalizeCompilation");
2030 ASSERT(thread()->IsMutatorThread());
2031 FinalizeCompilation(&assembler, &graph_compiler, flow_graph);
2032 }
2033 // Mark that this isolate now has compiled code.
2034 isolate()->set_has_compiled_code(true);
2035 // Exit the loop and the function with the correct result value.
2036 is_compiled = true;
2037 done = true;
2038 } else {
2039 // We bailed out or we encountered an error.
2040 const Error& error = Error::Handle(thread()->sticky_error());
2041
2042 if (error.raw() == Object::branch_offset_error().raw()) {
2043 // Compilation failed due to an out of range branch offset in the
2044 // assembler. We try again (done = false) with far branches enabled.
2045 done = false;
2046 ASSERT(!use_far_branches);
2047 use_far_branches = true;
2048 } else if (error.raw() == Object::speculative_inlining_error().raw()) {
2049 // The return value of setjmp is the deopt id of the check instruction
2050 // that caused the bailout.
2051 done = false;
2052 #if defined(DEBUG)
2053 ASSERT(use_speculative_inlining);
2054 for (intptr_t i = 0; i < inlining_black_list.length(); ++i) {
2055 ASSERT(inlining_black_list[i] != val);
2056 }
2057 #endif
2058 inlining_black_list.Add(val);
2059 const intptr_t max_attempts = FLAG_max_speculative_inlining_attempts;
2060 if (inlining_black_list.length() >= max_attempts) {
2061 use_speculative_inlining = false;
2062 if (FLAG_trace_compiler || FLAG_trace_optimizing_compiler) {
2063 THR_Print("Disabled speculative inlining after %" Pd " attempts.\n",
2064 inlining_black_list.length());
2065 }
2066 }
2067 } else {
2068 // If the error isn't due to an out of range branch offset, we don't
2069 // try again (done = true), and indicate that we did not finish
2070 // compiling (is_compiled = false).
2071 if (FLAG_trace_bailout) {
2072 THR_Print("%s\n", error.ToErrorCString());
2073 }
2074 done = true;
2075 }
2076
2077 // Clear the error if it was not a real error, but just a bailout.
2078 if (error.IsLanguageError() &&
2079 (LanguageError::Cast(error).kind() == Report::kBailout)) {
2080 thread()->clear_sticky_error();
2081 }
2082 is_compiled = false;
2083 }
2084 // Reset global isolate state.
2085 thread()->set_deopt_id(prev_deopt_id);
2086 }
2087 return is_compiled;
2088 }
2089
2090
2091 static RawError* PrecompileFunctionHelper(CompilationPipeline* pipeline,
2092 const Function& function,
2093 bool optimized) {
2094 // Check that we optimize, except if the function is not optimizable.
2095 ASSERT(FLAG_precompilation);
2096 ASSERT(!function.IsOptimizable() || optimized);
2097 ASSERT(!function.HasCode());
2098 LongJumpScope jump;
2099 if (setjmp(*jump.Set()) == 0) {
2100 Thread* const thread = Thread::Current();
2101 StackZone stack_zone(thread);
2102 Zone* const zone = stack_zone.GetZone();
2103 const bool trace_compiler =
2104 FLAG_trace_compiler ||
2105 (FLAG_trace_optimizing_compiler && optimized);
2106 Timer per_compile_timer(trace_compiler, "Compilation time");
2107 per_compile_timer.Start();
2108
2109 ParsedFunction* parsed_function = new(zone) ParsedFunction(
2110 thread, Function::ZoneHandle(zone, function.raw()));
2111 if (trace_compiler) {
2112 THR_Print(
2113 "Precompiling %sfunction: '%s' @ token %" Pd ", size %" Pd "\n",
2114 (optimized ? "optimized " : ""),
2115 function.ToFullyQualifiedCString(),
2116 function.token_pos().Pos(),
2117 (function.end_token_pos().Pos() - function.token_pos().Pos()));
2118 }
2119 INC_STAT(thread, num_functions_compiled, 1);
2120 if (optimized) {
2121 INC_STAT(thread, num_functions_optimized, 1);
2122 }
2123 {
2124 HANDLESCOPE(thread);
2125 const int64_t num_tokens_before = STAT_VALUE(thread, num_tokens_consumed);
2126 pipeline->ParseFunction(parsed_function);
2127 const int64_t num_tokens_after = STAT_VALUE(thread, num_tokens_consumed);
2128 INC_STAT(thread,
2129 num_func_tokens_compiled,
2130 num_tokens_after - num_tokens_before);
2131 }
2132
2133 PrecompileParsedFunctionHelper helper(parsed_function, optimized);
2134 const bool success = helper.Compile(pipeline);
2135 if (!success) {
2136 // Encountered error.
2137 Error& error = Error::Handle();
2138 // We got an error during compilation.
2139 error = thread->sticky_error();
2140 thread->clear_sticky_error();
2141 ASSERT(error.IsLanguageError() &&
2142 LanguageError::Cast(error).kind() != Report::kBailout);
2143 return error.raw();
2144 }
2145
2146 per_compile_timer.Stop();
2147
2148 if (trace_compiler && success) {
2149 THR_Print("--> '%s' entry: %#" Px " size: %" Pd " time: %" Pd64 " us\n",
2150 function.ToFullyQualifiedCString(),
2151 Code::Handle(function.CurrentCode()).EntryPoint(),
2152 Code::Handle(function.CurrentCode()).Size(),
2153 per_compile_timer.TotalElapsedTime());
2154 }
2155
2156 if (FLAG_disassemble && FlowGraphPrinter::ShouldPrint(function)) {
2157 Disassembler::DisassembleCode(function, optimized);
2158 } else if (FLAG_disassemble_optimized &&
2159 optimized &&
2160 FlowGraphPrinter::ShouldPrint(function)) {
2161 // TODO(fschneider): Print unoptimized code along with the optimized code.
2162 THR_Print("*** BEGIN CODE\n");
2163 Disassembler::DisassembleCode(function, true);
2164 THR_Print("*** END CODE\n");
2165 }
2166 return Error::null();
2167 } else {
2168 Thread* const thread = Thread::Current();
2169 StackZone stack_zone(thread);
2170 Error& error = Error::Handle();
2171 // We got an error during compilation.
2172 error = thread->sticky_error();
2173 thread->clear_sticky_error();
2174 // Precompilation may encounter compile-time errors.
2175 // Do not attempt to optimize functions that can cause errors.
2176 function.set_is_optimizable(false);
2177 return error.raw();
2178 }
2179 UNREACHABLE();
2180 return Error::null();
2181 }
2182
2183
2184 RawError* Precompiler::CompileFunction(Thread* thread,
2185 const Function& function) {
2186 VMTagScope tagScope(thread, VMTag::kCompileUnoptimizedTagId);
2187 TIMELINE_FUNCTION_COMPILATION_DURATION(thread, "Function", function);
2188
2189 CompilationPipeline* pipeline =
2190 CompilationPipeline::New(thread->zone(), function);
2191
2192 ASSERT(FLAG_precompilation);
2193 const bool optimized = function.IsOptimizable(); // False for natives.
2194 return PrecompileFunctionHelper(pipeline, function, optimized);
2195 }
2196
2197 #endif // DART_PRECOMPILER
2198
1352 } // namespace dart 2199 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/precompiler.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698