OLD | NEW |
---|---|
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. |
6 | 6 |
7 #include "vm/flow_graph_compiler.h" | 7 #include "vm/flow_graph_compiler.h" |
8 | 8 |
9 #include "vm/bit_vector.h" | 9 #include "vm/bit_vector.h" |
10 #include "vm/cha.h" | 10 #include "vm/cha.h" |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
194 : thread_(Thread::Current()), | 194 : thread_(Thread::Current()), |
195 zone_(Thread::Current()->zone()), | 195 zone_(Thread::Current()->zone()), |
196 assembler_(assembler), | 196 assembler_(assembler), |
197 parsed_function_(parsed_function), | 197 parsed_function_(parsed_function), |
198 flow_graph_(*flow_graph), | 198 flow_graph_(*flow_graph), |
199 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), | 199 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), |
200 current_block_(NULL), | 200 current_block_(NULL), |
201 exception_handlers_list_(NULL), | 201 exception_handlers_list_(NULL), |
202 pc_descriptors_list_(NULL), | 202 pc_descriptors_list_(NULL), |
203 stackmap_table_builder_(NULL), | 203 stackmap_table_builder_(NULL), |
204 code_source_map_builder_(NULL), | |
205 saved_code_size_(0), | |
204 block_info_(block_order_.length()), | 206 block_info_(block_order_.length()), |
205 deopt_infos_(), | 207 deopt_infos_(), |
206 static_calls_target_table_(), | 208 static_calls_target_table_(), |
207 is_optimizing_(is_optimizing), | 209 is_optimizing_(is_optimizing), |
208 may_reoptimize_(false), | 210 may_reoptimize_(false), |
209 intrinsic_mode_(false), | 211 intrinsic_mode_(false), |
210 double_class_(Class::ZoneHandle( | 212 double_class_(Class::ZoneHandle( |
211 isolate()->object_store()->double_class())), | 213 isolate()->object_store()->double_class())), |
212 mint_class_(Class::ZoneHandle( | 214 mint_class_(Class::ZoneHandle( |
213 isolate()->object_store()->mint_class())), | 215 isolate()->object_store()->mint_class())), |
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
524 | 526 |
525 #if defined(DEBUG) | 527 #if defined(DEBUG) |
526 if (!is_optimizing()) { | 528 if (!is_optimizing()) { |
527 FrameStateClear(); | 529 FrameStateClear(); |
528 } | 530 } |
529 #endif | 531 #endif |
530 | 532 |
531 LoopInfoComment(assembler(), *entry, *loop_headers); | 533 LoopInfoComment(assembler(), *entry, *loop_headers); |
532 | 534 |
533 entry->set_offset(assembler()->CodeSize()); | 535 entry->set_offset(assembler()->CodeSize()); |
536 SaveCodeSize(); | |
534 entry->EmitNativeCode(this); | 537 entry->EmitNativeCode(this); |
538 MaybeEmitCodeSourceMapEntry(entry->token_pos()); | |
535 // Compile all successors until an exit, branch, or a block entry. | 539 // Compile all successors until an exit, branch, or a block entry. |
536 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 540 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
537 Instruction* instr = it.Current(); | 541 Instruction* instr = it.Current(); |
538 // Compose intervals. | 542 // Compose intervals. |
539 if (instr->has_inlining_id() && is_optimizing()) { | 543 if (instr->has_inlining_id() && is_optimizing()) { |
540 if (prev_inlining_id != instr->inlining_id()) { | 544 if (prev_inlining_id != instr->inlining_id()) { |
541 intervals.Add(IntervalStruct(prev_offset, prev_inlining_id)); | 545 intervals.Add(IntervalStruct(prev_offset, prev_inlining_id)); |
542 prev_offset = assembler()->CodeSize(); | 546 prev_offset = assembler()->CodeSize(); |
543 prev_inlining_id = instr->inlining_id(); | 547 prev_inlining_id = instr->inlining_id(); |
544 if (prev_inlining_id > max_inlining_id) { | 548 if (prev_inlining_id > max_inlining_id) { |
545 max_inlining_id = prev_inlining_id; | 549 max_inlining_id = prev_inlining_id; |
546 } | 550 } |
547 } | 551 } |
548 } | 552 } |
549 if (FLAG_code_comments || | 553 if (FLAG_code_comments || |
550 FLAG_disassemble || FLAG_disassemble_optimized) { | 554 FLAG_disassemble || FLAG_disassemble_optimized) { |
551 if (FLAG_source_lines) { | 555 if (FLAG_source_lines) { |
552 EmitSourceLine(instr); | 556 EmitSourceLine(instr); |
553 } | 557 } |
554 EmitComment(instr); | 558 EmitComment(instr); |
555 } | 559 } |
556 if (instr->IsParallelMove()) { | 560 if (instr->IsParallelMove()) { |
557 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); | 561 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); |
558 } else { | 562 } else { |
563 SaveCodeSize(); | |
559 EmitInstructionPrologue(instr); | 564 EmitInstructionPrologue(instr); |
560 ASSERT(pending_deoptimization_env_ == NULL); | 565 ASSERT(pending_deoptimization_env_ == NULL); |
561 pending_deoptimization_env_ = instr->env(); | 566 pending_deoptimization_env_ = instr->env(); |
562 instr->EmitNativeCode(this); | 567 instr->EmitNativeCode(this); |
563 pending_deoptimization_env_ = NULL; | 568 pending_deoptimization_env_ = NULL; |
564 EmitInstructionEpilogue(instr); | 569 EmitInstructionEpilogue(instr); |
570 MaybeEmitCodeSourceMapEntry(instr->token_pos()); | |
565 } | 571 } |
566 | 572 |
567 #if defined(DEBUG) | 573 #if defined(DEBUG) |
568 if (!is_optimizing()) { | 574 if (!is_optimizing()) { |
569 FrameStateUpdateWith(instr); | 575 FrameStateUpdateWith(instr); |
570 } | 576 } |
571 #endif | 577 #endif |
572 } | 578 } |
573 | 579 |
574 #if defined(DEBUG) | 580 #if defined(DEBUG) |
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
731 } | 737 } |
732 | 738 |
733 | 739 |
734 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { | 740 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { |
735 slow_path_code_.Add(code); | 741 slow_path_code_.Add(code); |
736 } | 742 } |
737 | 743 |
738 | 744 |
739 void FlowGraphCompiler::GenerateDeferredCode() { | 745 void FlowGraphCompiler::GenerateDeferredCode() { |
740 for (intptr_t i = 0; i < slow_path_code_.length(); i++) { | 746 for (intptr_t i = 0; i < slow_path_code_.length(); i++) { |
747 SaveCodeSize(); | |
741 slow_path_code_[i]->GenerateCode(this); | 748 slow_path_code_[i]->GenerateCode(this); |
749 MaybeEmitCodeSourceMapEntry(TokenPosition::kDeferredSlowPath); | |
742 } | 750 } |
743 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { | 751 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { |
752 SaveCodeSize(); | |
744 deopt_infos_[i]->GenerateCode(this, i); | 753 deopt_infos_[i]->GenerateCode(this, i); |
754 MaybeEmitCodeSourceMapEntry(TokenPosition::kDeferredDeoptInfo); | |
745 } | 755 } |
746 } | 756 } |
747 | 757 |
748 | 758 |
749 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, | 759 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, |
750 intptr_t outer_try_index, | 760 intptr_t outer_try_index, |
751 intptr_t pc_offset, | 761 intptr_t pc_offset, |
752 const Array& handler_types, | 762 const Array& handler_types, |
753 bool needs_stacktrace) { | 763 bool needs_stacktrace) { |
754 exception_handlers_list_->AddHandler(try_index, | 764 exception_handlers_list_->AddHandler(try_index, |
(...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1431 // and skipping such moves with register destinations keeps those | 1441 // and skipping such moves with register destinations keeps those |
1432 // registers free for the whole algorithm. | 1442 // registers free for the whole algorithm. |
1433 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); | 1443 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); |
1434 } | 1444 } |
1435 | 1445 |
1436 // Perform the moves with constant sources. | 1446 // Perform the moves with constant sources. |
1437 for (int i = 0; i < moves_.length(); ++i) { | 1447 for (int i = 0; i < moves_.length(); ++i) { |
1438 const MoveOperands& move = *moves_[i]; | 1448 const MoveOperands& move = *moves_[i]; |
1439 if (!move.IsEliminated()) { | 1449 if (!move.IsEliminated()) { |
1440 ASSERT(move.src().IsConstant()); | 1450 ASSERT(move.src().IsConstant()); |
1451 compiler_->SaveCodeSize(); | |
1441 EmitMove(i); | 1452 EmitMove(i); |
1453 compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); | |
1442 } | 1454 } |
1443 } | 1455 } |
1444 | 1456 |
1445 moves_.Clear(); | 1457 moves_.Clear(); |
1446 } | 1458 } |
1447 | 1459 |
1448 | 1460 |
1449 void ParallelMoveResolver::BuildInitialMoveList( | 1461 void ParallelMoveResolver::BuildInitialMoveList( |
1450 ParallelMoveInstr* parallel_move) { | 1462 ParallelMoveInstr* parallel_move) { |
1451 // Perform a linear sweep of the moves to add them to the initial list of | 1463 // Perform a linear sweep of the moves to add them to the initial list of |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1507 return; | 1519 return; |
1508 } | 1520 } |
1509 | 1521 |
1510 // The move may be blocked on a (at most one) pending move, in which case | 1522 // The move may be blocked on a (at most one) pending move, in which case |
1511 // we have a cycle. Search for such a blocking move and perform a swap to | 1523 // we have a cycle. Search for such a blocking move and perform a swap to |
1512 // resolve it. | 1524 // resolve it. |
1513 for (int i = 0; i < moves_.length(); ++i) { | 1525 for (int i = 0; i < moves_.length(); ++i) { |
1514 const MoveOperands& other_move = *moves_[i]; | 1526 const MoveOperands& other_move = *moves_[i]; |
1515 if (other_move.Blocks(destination)) { | 1527 if (other_move.Blocks(destination)) { |
1516 ASSERT(other_move.IsPending()); | 1528 ASSERT(other_move.IsPending()); |
1529 compiler_->SaveCodeSize(); | |
1517 EmitSwap(index); | 1530 EmitSwap(index); |
1531 compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); | |
1518 return; | 1532 return; |
1519 } | 1533 } |
1520 } | 1534 } |
1521 | 1535 |
1522 // This move is not blocked. | 1536 // This move is not blocked. |
1537 compiler_->SaveCodeSize(); | |
1523 EmitMove(index); | 1538 EmitMove(index); |
1539 compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); | |
1524 } | 1540 } |
1525 | 1541 |
1526 | 1542 |
1527 bool ParallelMoveResolver::IsScratchLocation(Location loc) { | 1543 bool ParallelMoveResolver::IsScratchLocation(Location loc) { |
1528 for (int i = 0; i < moves_.length(); ++i) { | 1544 for (int i = 0; i < moves_.length(); ++i) { |
1529 if (moves_[i]->Blocks(loc)) { | 1545 if (moves_[i]->Blocks(loc)) { |
1530 return false; | 1546 return false; |
1531 } | 1547 } |
1532 } | 1548 } |
1533 | 1549 |
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1771 Array::New(caller_inline_id_.length(), Heap::kOld)); | 1787 Array::New(caller_inline_id_.length(), Heap::kOld)); |
1772 Smi& smi = Smi::Handle(); | 1788 Smi& smi = Smi::Handle(); |
1773 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) { | 1789 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) { |
1774 smi = Smi::New(caller_inline_id_[i]); | 1790 smi = Smi::New(caller_inline_id_[i]); |
1775 res.SetAt(i, smi); | 1791 res.SetAt(i, smi); |
1776 } | 1792 } |
1777 return res.raw(); | 1793 return res.raw(); |
1778 } | 1794 } |
1779 | 1795 |
1780 | 1796 |
1797 void FlowGraphCompiler::SaveCodeSize() { | |
1798 // Remember how many bytes of code we emitted. This function | |
rmacnak
2016/02/25 23:04:40
NOT_IN_PRODUCT
rmacnak
2016/02/25 23:04:40
emitted so far
Cutch
2016/02/26 15:59:22
Done.
Cutch
2016/02/26 15:59:22
Done.
| |
1799 // is called before we call into an instruction's EmitNativeCode. | |
1800 saved_code_size_ = assembler()->CodeSize(); | |
1801 } | |
1802 | |
1803 | |
1804 bool FlowGraphCompiler::MaybeEmitCodeSourceMapEntry(TokenPosition token_pos) { | |
1805 // This function is called after each instructions' EmitNativeCode. | |
rmacnak
2016/02/25 23:04:40
NOT_IN_PRODUCT
Cutch
2016/02/26 15:59:22
Done.
| |
1806 if (saved_code_size_ < assembler()->CodeSize()) { | |
1807 // We emitted code, now associate the emitted code chunk with |token_pos|. | |
1808 code_source_map_builder()->AddEntry(saved_code_size_, token_pos); | |
1809 SaveCodeSize(); | |
1810 return true; | |
1811 } | |
1812 return false; | |
1813 } | |
1814 | |
1815 | |
1781 void FlowGraphCompiler::EmitPolymorphicInstanceCall( | 1816 void FlowGraphCompiler::EmitPolymorphicInstanceCall( |
1782 const ICData& ic_data, | 1817 const ICData& ic_data, |
1783 intptr_t argument_count, | 1818 intptr_t argument_count, |
1784 const Array& argument_names, | 1819 const Array& argument_names, |
1785 intptr_t deopt_id, | 1820 intptr_t deopt_id, |
1786 TokenPosition token_pos, | 1821 TokenPosition token_pos, |
1787 LocationSummary* locs) { | 1822 LocationSummary* locs) { |
1788 if (FLAG_polymorphic_with_deopt) { | 1823 if (FLAG_polymorphic_with_deopt) { |
1789 Label* deopt = AddDeoptStub(deopt_id, | 1824 Label* deopt = AddDeoptStub(deopt_id, |
1790 ICData::kDeoptPolymorphicInstanceCallTestFail); | 1825 ICData::kDeoptPolymorphicInstanceCallTestFail); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1881 | 1916 |
1882 | 1917 |
1883 void FlowGraphCompiler::FrameStateClear() { | 1918 void FlowGraphCompiler::FrameStateClear() { |
1884 ASSERT(!is_optimizing()); | 1919 ASSERT(!is_optimizing()); |
1885 frame_state_.TruncateTo(0); | 1920 frame_state_.TruncateTo(0); |
1886 } | 1921 } |
1887 #endif | 1922 #endif |
1888 | 1923 |
1889 | 1924 |
1890 } // namespace dart | 1925 } // namespace dart |
OLD | NEW |