| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/flow_graph_optimizer.h" | 5 #include "vm/flow_graph_optimizer.h" |
| 6 | 6 |
| 7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" |
| 8 #include "vm/cha.h" | 8 #include "vm/cha.h" |
| 9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
| 10 #include "vm/dart_entry.h" | 10 #include "vm/dart_entry.h" |
| 11 #include "vm/exceptions.h" | 11 #include "vm/exceptions.h" |
| 12 #include "vm/flow_graph_builder.h" | 12 #include "vm/flow_graph_builder.h" |
| 13 #include "vm/flow_graph_compiler.h" | 13 #include "vm/flow_graph_compiler.h" |
| 14 #include "vm/flow_graph_range_analysis.h" |
| 14 #include "vm/hash_map.h" | 15 #include "vm/hash_map.h" |
| 15 #include "vm/il_printer.h" | 16 #include "vm/il_printer.h" |
| 16 #include "vm/intermediate_language.h" | 17 #include "vm/intermediate_language.h" |
| 17 #include "vm/object_store.h" | 18 #include "vm/object_store.h" |
| 18 #include "vm/parser.h" | 19 #include "vm/parser.h" |
| 19 #include "vm/resolver.h" | 20 #include "vm/resolver.h" |
| 20 #include "vm/scopes.h" | 21 #include "vm/scopes.h" |
| 21 #include "vm/stack_frame.h" | 22 #include "vm/stack_frame.h" |
| 22 #include "vm/symbols.h" | 23 #include "vm/symbols.h" |
| 23 | 24 |
| 24 namespace dart { | 25 namespace dart { |
| 25 | 26 |
| 26 DEFINE_FLAG(bool, array_bounds_check_elimination, true, | |
| 27 "Eliminate redundant bounds checks."); | |
| 28 DEFINE_FLAG(int, getter_setter_ratio, 13, | 27 DEFINE_FLAG(int, getter_setter_ratio, 13, |
| 29 "Ratio of getter/setter usage used for double field unboxing heuristics"); | 28 "Ratio of getter/setter usage used for double field unboxing heuristics"); |
| 30 DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination."); | 29 DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination."); |
| 31 DEFINE_FLAG(bool, dead_store_elimination, true, "Eliminate dead stores"); | 30 DEFINE_FLAG(bool, dead_store_elimination, true, "Eliminate dead stores"); |
| 32 DEFINE_FLAG(int, max_polymorphic_checks, 4, | 31 DEFINE_FLAG(int, max_polymorphic_checks, 4, |
| 33 "Maximum number of polymorphic check, otherwise it is megamorphic."); | 32 "Maximum number of polymorphic check, otherwise it is megamorphic."); |
| 34 DEFINE_FLAG(int, max_equality_polymorphic_checks, 32, | 33 DEFINE_FLAG(int, max_equality_polymorphic_checks, 32, |
| 35 "Maximum number of polymorphic checks in equality operator," | 34 "Maximum number of polymorphic checks in equality operator," |
| 36 " otherwise use megamorphic dispatch."); | 35 " otherwise use megamorphic dispatch."); |
| 37 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); | 36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); |
| 38 DEFINE_FLAG(bool, trace_constant_propagation, false, | 37 DEFINE_FLAG(bool, trace_constant_propagation, false, |
| 39 "Print constant propagation and useless code elimination."); | 38 "Print constant propagation and useless code elimination."); |
| 40 DEFINE_FLAG(bool, trace_load_optimization, false, | 39 DEFINE_FLAG(bool, trace_load_optimization, false, |
| 41 "Print live sets for load optimization pass."); | 40 "Print live sets for load optimization pass."); |
| 42 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); | 41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); |
| 43 DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress"); | |
| 44 DEFINE_FLAG(bool, truncating_left_shift, true, | 42 DEFINE_FLAG(bool, truncating_left_shift, true, |
| 45 "Optimize left shift to truncate if possible"); | 43 "Optimize left shift to truncate if possible"); |
| 46 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); | 44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); |
| 47 DEFINE_FLAG(bool, trace_integer_ir_selection, false, | |
| 48 "Print integer IR selection optimization pass."); | |
| 49 DECLARE_FLAG(bool, enable_type_checks); | 45 DECLARE_FLAG(bool, enable_type_checks); |
| 50 DECLARE_FLAG(bool, source_lines); | 46 DECLARE_FLAG(bool, source_lines); |
| 51 DECLARE_FLAG(bool, trace_type_check_elimination); | 47 DECLARE_FLAG(bool, trace_type_check_elimination); |
| 52 DECLARE_FLAG(bool, warn_on_javascript_compatibility); | 48 DECLARE_FLAG(bool, warn_on_javascript_compatibility); |
| 53 | 49 |
| 54 // Quick access to the locally defined isolate() method. | 50 // Quick access to the locally defined isolate() method. |
| 55 #define I (isolate()) | 51 #define I (isolate()) |
| 56 | 52 |
| 57 static bool ShouldInlineSimd() { | 53 static bool ShouldInlineSimd() { |
| 58 return FlowGraphCompiler::SupportsUnboxedSimd128(); | 54 return FlowGraphCompiler::SupportsUnboxedSimd128(); |
| (...skipping 4433 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4492 } | 4488 } |
| 4493 | 4489 |
| 4494 // Discard the environment from the original instruction because the store | 4490 // Discard the environment from the original instruction because the store |
| 4495 // can't deoptimize. | 4491 // can't deoptimize. |
| 4496 instr->RemoveEnvironment(); | 4492 instr->RemoveEnvironment(); |
| 4497 ReplaceCall(instr, store); | 4493 ReplaceCall(instr, store); |
| 4498 return true; | 4494 return true; |
| 4499 } | 4495 } |
| 4500 | 4496 |
| 4501 | 4497 |
| 4502 // Replaces Mint IL instructions with Uint32 IL instructions | |
| 4503 // when possible. Uses output of RangeAnalysis. | |
| 4504 class IntegerInstructionSelector : public ValueObject { | |
| 4505 public: | |
| 4506 explicit IntegerInstructionSelector(FlowGraph* flow_graph) | |
| 4507 : flow_graph_(flow_graph), | |
| 4508 isolate_(NULL) { | |
| 4509 ASSERT(flow_graph_ != NULL); | |
| 4510 isolate_ = flow_graph_->isolate(); | |
| 4511 ASSERT(isolate_ != NULL); | |
| 4512 selected_uint32_defs_ = | |
| 4513 new(I) BitVector(flow_graph_->current_ssa_temp_index()); | |
| 4514 } | |
| 4515 | |
| 4516 void Select(); | |
| 4517 | |
| 4518 private: | |
| 4519 bool IsPotentialUint32Definition(Definition* def); | |
| 4520 void FindPotentialUint32Definitions(); | |
| 4521 bool IsUint32NarrowingDefinition(Definition* def); | |
| 4522 void FindUint32NarrowingDefinitions(); | |
| 4523 bool AllUsesAreUint32Narrowing(Value* list_head); | |
| 4524 bool CanBecomeUint32(Definition* def); | |
| 4525 void Propagate(); | |
| 4526 Definition* ConstructReplacementFor(Definition* def); | |
| 4527 void ReplaceInstructions(); | |
| 4528 | |
| 4529 Isolate* isolate() const { return isolate_; } | |
| 4530 | |
| 4531 GrowableArray<Definition*> potential_uint32_defs_; | |
| 4532 BitVector* selected_uint32_defs_; | |
| 4533 | |
| 4534 FlowGraph* flow_graph_; | |
| 4535 Isolate* isolate_; | |
| 4536 }; | |
| 4537 | |
| 4538 | |
| 4539 void IntegerInstructionSelector::Select() { | |
| 4540 if (FLAG_trace_integer_ir_selection) { | |
| 4541 OS::Print("---- starting integer ir selection -------\n"); | |
| 4542 } | |
| 4543 FindPotentialUint32Definitions(); | |
| 4544 FindUint32NarrowingDefinitions(); | |
| 4545 Propagate(); | |
| 4546 ReplaceInstructions(); | |
| 4547 if (FLAG_trace_integer_ir_selection) { | |
| 4548 OS::Print("---- after integer ir selection -------\n"); | |
| 4549 FlowGraphPrinter printer(*flow_graph_); | |
| 4550 printer.PrintBlocks(); | |
| 4551 } | |
| 4552 } | |
| 4553 | |
| 4554 | |
| 4555 bool IntegerInstructionSelector::IsPotentialUint32Definition(Definition* def) { | |
| 4556 // TODO(johnmccutchan): Consider Smi operations, to avoid unnecessary tagging | |
| 4557 // & untagged of intermediate results. | |
| 4558 // TODO(johnmccutchan): Consider phis. | |
| 4559 return def->IsBoxInteger() || // BoxMint. | |
| 4560 def->IsUnboxInteger() || // UnboxMint. | |
| 4561 def->IsBinaryMintOp() || | |
| 4562 def->IsShiftMintOp() || | |
| 4563 def->IsUnaryMintOp(); | |
| 4564 } | |
| 4565 | |
| 4566 | |
| 4567 void IntegerInstructionSelector::FindPotentialUint32Definitions() { | |
| 4568 if (FLAG_trace_integer_ir_selection) { | |
| 4569 OS::Print("++++ Finding potential Uint32 definitions:\n"); | |
| 4570 } | |
| 4571 | |
| 4572 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | |
| 4573 !block_it.Done(); | |
| 4574 block_it.Advance()) { | |
| 4575 BlockEntryInstr* block = block_it.Current(); | |
| 4576 | |
| 4577 for (ForwardInstructionIterator instr_it(block); | |
| 4578 !instr_it.Done(); | |
| 4579 instr_it.Advance()) { | |
| 4580 Instruction* current = instr_it.Current(); | |
| 4581 Definition* defn = current->AsDefinition(); | |
| 4582 if ((defn != NULL) && (defn->ssa_temp_index() != -1)) { | |
| 4583 if (IsPotentialUint32Definition(defn)) { | |
| 4584 if (FLAG_trace_integer_ir_selection) { | |
| 4585 OS::Print("Adding %s\n", current->ToCString()); | |
| 4586 } | |
| 4587 potential_uint32_defs_.Add(defn); | |
| 4588 } | |
| 4589 } | |
| 4590 } | |
| 4591 } | |
| 4592 } | |
| 4593 | |
| 4594 | |
| 4595 // BinaryMintOp masks and stores into unsigned typed arrays that truncate the | |
| 4596 // value into a Uint32 range. | |
| 4597 bool IntegerInstructionSelector::IsUint32NarrowingDefinition(Definition* def) { | |
| 4598 if (def->IsBinaryMintOp()) { | |
| 4599 BinaryMintOpInstr* op = def->AsBinaryMintOp(); | |
| 4600 // Must be a mask operation. | |
| 4601 if (op->op_kind() != Token::kBIT_AND) { | |
| 4602 return false; | |
| 4603 } | |
| 4604 Range* range = op->range(); | |
| 4605 if ((range == NULL) || | |
| 4606 !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) { | |
| 4607 return false; | |
| 4608 } | |
| 4609 return true; | |
| 4610 } | |
| 4611 // TODO(johnmccutchan): Add typed array stores. | |
| 4612 return false; | |
| 4613 } | |
| 4614 | |
| 4615 | |
| 4616 void IntegerInstructionSelector::FindUint32NarrowingDefinitions() { | |
| 4617 ASSERT(selected_uint32_defs_ != NULL); | |
| 4618 if (FLAG_trace_integer_ir_selection) { | |
| 4619 OS::Print("++++ Selecting Uint32 definitions:\n"); | |
| 4620 OS::Print("++++ Initial set:\n"); | |
| 4621 } | |
| 4622 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { | |
| 4623 Definition* defn = potential_uint32_defs_[i]; | |
| 4624 if (IsUint32NarrowingDefinition(defn)) { | |
| 4625 if (FLAG_trace_integer_ir_selection) { | |
| 4626 OS::Print("Adding %s\n", defn->ToCString()); | |
| 4627 } | |
| 4628 selected_uint32_defs_->Add(defn->ssa_temp_index()); | |
| 4629 } | |
| 4630 } | |
| 4631 } | |
| 4632 | |
| 4633 | |
| 4634 bool IntegerInstructionSelector::AllUsesAreUint32Narrowing(Value* list_head) { | |
| 4635 for (Value::Iterator it(list_head); | |
| 4636 !it.Done(); | |
| 4637 it.Advance()) { | |
| 4638 Value* use = it.Current(); | |
| 4639 Definition* defn = use->instruction()->AsDefinition(); | |
| 4640 if ((defn == NULL) || | |
| 4641 (defn->ssa_temp_index() == -1) || | |
| 4642 !selected_uint32_defs_->Contains(defn->ssa_temp_index())) { | |
| 4643 return false; | |
| 4644 } | |
| 4645 } | |
| 4646 return true; | |
| 4647 } | |
| 4648 | |
| 4649 | |
| 4650 bool IntegerInstructionSelector::CanBecomeUint32(Definition* def) { | |
| 4651 ASSERT(IsPotentialUint32Definition(def)); | |
| 4652 if (def->IsBoxInteger()) { | |
| 4653 // If a BoxInteger's input is a candidate, the box is a candidate. | |
| 4654 BoxIntegerInstr* box = def->AsBoxInteger(); | |
| 4655 Definition* box_input = box->value()->definition(); | |
| 4656 return selected_uint32_defs_->Contains(box_input->ssa_temp_index()); | |
| 4657 } | |
| 4658 // A right shift with an input outside of Uint32 range cannot be converted | |
| 4659 // because we need the high bits. | |
| 4660 if (def->IsShiftMintOp()) { | |
| 4661 ShiftMintOpInstr* op = def->AsShiftMintOp(); | |
| 4662 if (op->op_kind() == Token::kSHR) { | |
| 4663 Definition* shift_input = op->left()->definition(); | |
| 4664 ASSERT(shift_input != NULL); | |
| 4665 Range* range = shift_input->range(); | |
| 4666 if ((range == NULL) || | |
| 4667 !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) { | |
| 4668 return false; | |
| 4669 } | |
| 4670 } | |
| 4671 } | |
| 4672 if (!def->HasUses()) { | |
| 4673 // No uses, skip. | |
| 4674 return false; | |
| 4675 } | |
| 4676 return AllUsesAreUint32Narrowing(def->input_use_list()) && | |
| 4677 AllUsesAreUint32Narrowing(def->env_use_list()); | |
| 4678 } | |
| 4679 | |
| 4680 | |
| 4681 void IntegerInstructionSelector::Propagate() { | |
| 4682 ASSERT(selected_uint32_defs_ != NULL); | |
| 4683 bool changed = true; | |
| 4684 intptr_t iteration = 0; | |
| 4685 while (changed) { | |
| 4686 if (FLAG_trace_integer_ir_selection) { | |
| 4687 OS::Print("+++ Iteration: %" Pd "\n", iteration++); | |
| 4688 } | |
| 4689 changed = false; | |
| 4690 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { | |
| 4691 Definition* defn = potential_uint32_defs_[i]; | |
| 4692 if (selected_uint32_defs_->Contains(defn->ssa_temp_index())) { | |
| 4693 // Already marked as a candidate, skip. | |
| 4694 continue; | |
| 4695 } | |
| 4696 if (defn->IsConstant()) { | |
| 4697 // Skip constants. | |
| 4698 continue; | |
| 4699 } | |
| 4700 if (CanBecomeUint32(defn)) { | |
| 4701 if (FLAG_trace_integer_ir_selection) { | |
| 4702 OS::Print("Adding %s\n", defn->ToCString()); | |
| 4703 } | |
| 4704 // Found a new candidate. | |
| 4705 selected_uint32_defs_->Add(defn->ssa_temp_index()); | |
| 4706 // Haven't reached fixed point yet. | |
| 4707 changed = true; | |
| 4708 } | |
| 4709 } | |
| 4710 } | |
| 4711 if (FLAG_trace_integer_ir_selection) { | |
| 4712 OS::Print("Reached fixed point\n"); | |
| 4713 } | |
| 4714 } | |
| 4715 | |
| 4716 | |
| 4717 Definition* IntegerInstructionSelector::ConstructReplacementFor( | |
| 4718 Definition* def) { | |
| 4719 // Should only see mint definitions. | |
| 4720 ASSERT(IsPotentialUint32Definition(def)); | |
| 4721 // Should not see constant instructions. | |
| 4722 ASSERT(!def->IsConstant()); | |
| 4723 if (def->IsBinaryMintOp()) { | |
| 4724 BinaryMintOpInstr* op = def->AsBinaryMintOp(); | |
| 4725 Token::Kind op_kind = op->op_kind(); | |
| 4726 Value* left = op->left()->CopyWithType(); | |
| 4727 Value* right = op->right()->CopyWithType(); | |
| 4728 intptr_t deopt_id = op->DeoptimizationTarget(); | |
| 4729 return new(I) BinaryUint32OpInstr(op_kind, left, right, deopt_id); | |
| 4730 } else if (def->IsBoxInteger()) { | |
| 4731 BoxIntegerInstr* box = def->AsBoxInteger(); | |
| 4732 Value* value = box->value()->CopyWithType(); | |
| 4733 return new(I) BoxUint32Instr(value); | |
| 4734 } else if (def->IsUnboxInteger()) { | |
| 4735 UnboxIntegerInstr* unbox = def->AsUnboxInteger(); | |
| 4736 Value* value = unbox->value()->CopyWithType(); | |
| 4737 intptr_t deopt_id = unbox->deopt_id(); | |
| 4738 return new(I) UnboxUint32Instr(value, deopt_id); | |
| 4739 } else if (def->IsUnaryMintOp()) { | |
| 4740 UnaryMintOpInstr* op = def->AsUnaryMintOp(); | |
| 4741 Token::Kind op_kind = op->op_kind(); | |
| 4742 Value* value = op->value()->CopyWithType(); | |
| 4743 intptr_t deopt_id = op->DeoptimizationTarget(); | |
| 4744 return new(I) UnaryUint32OpInstr(op_kind, value, deopt_id); | |
| 4745 } else if (def->IsShiftMintOp()) { | |
| 4746 ShiftMintOpInstr* op = def->AsShiftMintOp(); | |
| 4747 Token::Kind op_kind = op->op_kind(); | |
| 4748 Value* left = op->left()->CopyWithType(); | |
| 4749 Value* right = op->right()->CopyWithType(); | |
| 4750 intptr_t deopt_id = op->DeoptimizationTarget(); | |
| 4751 return new(I) ShiftUint32OpInstr(op_kind, left, right, deopt_id); | |
| 4752 } | |
| 4753 UNREACHABLE(); | |
| 4754 return NULL; | |
| 4755 } | |
| 4756 | |
| 4757 | |
| 4758 void IntegerInstructionSelector::ReplaceInstructions() { | |
| 4759 if (FLAG_trace_integer_ir_selection) { | |
| 4760 OS::Print("++++ Replacing instructions:\n"); | |
| 4761 } | |
| 4762 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { | |
| 4763 Definition* defn = potential_uint32_defs_[i]; | |
| 4764 if (!selected_uint32_defs_->Contains(defn->ssa_temp_index())) { | |
| 4765 // Not a candidate. | |
| 4766 continue; | |
| 4767 } | |
| 4768 Definition* replacement = ConstructReplacementFor(defn); | |
| 4769 ASSERT(replacement != NULL); | |
| 4770 if (FLAG_trace_integer_ir_selection) { | |
| 4771 OS::Print("Replacing %s with %s\n", defn->ToCString(), | |
| 4772 replacement->ToCString()); | |
| 4773 } | |
| 4774 defn->ReplaceWith(replacement, NULL); | |
| 4775 ASSERT(flow_graph_->VerifyUseLists()); | |
| 4776 } | |
| 4777 } | |
| 4778 | |
| 4779 void FlowGraphOptimizer::SelectIntegerInstructions() { | |
| 4780 IntegerInstructionSelector iis(flow_graph_); | |
| 4781 iis.Select(); | |
| 4782 } | |
| 4783 | |
| 4784 | |
| 4785 // Range analysis for integer values. | |
| 4786 class RangeAnalysis : public ValueObject { | |
| 4787 public: | |
| 4788 explicit RangeAnalysis(FlowGraph* flow_graph) | |
| 4789 : flow_graph_(flow_graph), | |
| 4790 marked_defns_(NULL) { } | |
| 4791 | |
| 4792 // Infer ranges for all values and remove overflow checks from binary smi | |
| 4793 // operations when proven redundant. | |
| 4794 void Analyze(); | |
| 4795 | |
| 4796 private: | |
| 4797 // Collect all values that were proven to be smi in smi_values_ array and all | |
| 4798 // CheckSmi instructions in smi_check_ array. | |
| 4799 void CollectValues(); | |
| 4800 | |
| 4801 // Iterate over smi values and constrain them at branch successors. | |
| 4802 // Additionally constraint values after CheckSmi instructions. | |
| 4803 void InsertConstraints(); | |
| 4804 | |
| 4805 // Iterate over uses of the given definition and discover branches that | |
| 4806 // constrain it. Insert appropriate Constraint instructions at true | |
| 4807 // and false successor and rename all dominated uses to refer to a | |
| 4808 // Constraint instead of this definition. | |
| 4809 void InsertConstraintsFor(Definition* defn); | |
| 4810 | |
| 4811 // Create a constraint for defn, insert it after given instruction and | |
| 4812 // rename all uses that are dominated by it. | |
| 4813 ConstraintInstr* InsertConstraintFor(Definition* defn, | |
| 4814 Range* constraint, | |
| 4815 Instruction* after); | |
| 4816 | |
| 4817 void ConstrainValueAfterBranch(Definition* defn, Value* use); | |
| 4818 void ConstrainValueAfterCheckArrayBound(Definition* defn, | |
| 4819 CheckArrayBoundInstr* check, | |
| 4820 intptr_t use_index); | |
| 4821 | |
| 4822 // Replace uses of the definition def that are dominated by instruction dom | |
| 4823 // with uses of other definition. | |
| 4824 void RenameDominatedUses(Definition* def, | |
| 4825 Instruction* dom, | |
| 4826 Definition* other); | |
| 4827 | |
| 4828 | |
| 4829 // Walk the dominator tree and infer ranges for smi values. | |
| 4830 void InferRanges(); | |
| 4831 void InferRangesRecursive(BlockEntryInstr* block); | |
| 4832 | |
| 4833 enum Direction { | |
| 4834 kUnknown, | |
| 4835 kPositive, | |
| 4836 kNegative, | |
| 4837 kBoth | |
| 4838 }; | |
| 4839 | |
| 4840 Range* InferInductionVariableRange(JoinEntryInstr* loop_header, | |
| 4841 PhiInstr* var); | |
| 4842 | |
| 4843 void ResetWorklist(); | |
| 4844 void MarkDefinition(Definition* defn); | |
| 4845 | |
| 4846 static Direction ToDirection(Value* val); | |
| 4847 | |
| 4848 static Direction Invert(Direction direction) { | |
| 4849 return (direction == kPositive) ? kNegative : kPositive; | |
| 4850 } | |
| 4851 | |
| 4852 static void UpdateDirection(Direction* direction, | |
| 4853 Direction new_direction) { | |
| 4854 if (*direction != new_direction) { | |
| 4855 if (*direction != kUnknown) new_direction = kBoth; | |
| 4856 *direction = new_direction; | |
| 4857 } | |
| 4858 } | |
| 4859 | |
| 4860 // Remove artificial Constraint instructions and replace them with actual | |
| 4861 // unconstrained definitions. | |
| 4862 void RemoveConstraints(); | |
| 4863 | |
| 4864 Range* ConstraintRange(Token::Kind op, Definition* boundary); | |
| 4865 | |
| 4866 Isolate* isolate() const { return flow_graph_->isolate(); } | |
| 4867 | |
| 4868 FlowGraph* flow_graph_; | |
| 4869 | |
| 4870 // Value that are known to be smi or mint. | |
| 4871 GrowableArray<Definition*> values_; | |
| 4872 // All CheckSmi instructions. | |
| 4873 GrowableArray<CheckSmiInstr*> smi_checks_; | |
| 4874 | |
| 4875 // All Constraints inserted during InsertConstraints phase. They are treated | |
| 4876 // as smi values. | |
| 4877 GrowableArray<ConstraintInstr*> constraints_; | |
| 4878 | |
| 4879 // Bitvector for a quick filtering of known smi or mint values. | |
| 4880 BitVector* definitions_; | |
| 4881 | |
| 4882 // Worklist for induction variables analysis. | |
| 4883 GrowableArray<Definition*> worklist_; | |
| 4884 BitVector* marked_defns_; | |
| 4885 | |
| 4886 DISALLOW_COPY_AND_ASSIGN(RangeAnalysis); | |
| 4887 }; | |
| 4888 | |
| 4889 | |
| 4890 void RangeAnalysis::Analyze() { | |
| 4891 CollectValues(); | |
| 4892 InsertConstraints(); | |
| 4893 InferRanges(); | |
| 4894 IntegerInstructionSelector iis(flow_graph_); | |
| 4895 iis.Select(); | |
| 4896 RemoveConstraints(); | |
| 4897 } | |
| 4898 | |
| 4899 | |
| 4900 void RangeAnalysis::CollectValues() { | |
| 4901 const GrowableArray<Definition*>& initial = | |
| 4902 *flow_graph_->graph_entry()->initial_definitions(); | |
| 4903 for (intptr_t i = 0; i < initial.length(); ++i) { | |
| 4904 Definition* current = initial[i]; | |
| 4905 if (current->Type()->ToCid() == kSmiCid) { | |
| 4906 values_.Add(current); | |
| 4907 } else if (current->IsMintDefinition()) { | |
| 4908 values_.Add(current); | |
| 4909 } | |
| 4910 } | |
| 4911 | |
| 4912 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | |
| 4913 !block_it.Done(); | |
| 4914 block_it.Advance()) { | |
| 4915 BlockEntryInstr* block = block_it.Current(); | |
| 4916 | |
| 4917 | |
| 4918 if (block->IsGraphEntry() || block->IsCatchBlockEntry()) { | |
| 4919 const GrowableArray<Definition*>& initial = block->IsGraphEntry() | |
| 4920 ? *block->AsGraphEntry()->initial_definitions() | |
| 4921 : *block->AsCatchBlockEntry()->initial_definitions(); | |
| 4922 for (intptr_t i = 0; i < initial.length(); ++i) { | |
| 4923 Definition* current = initial[i]; | |
| 4924 if (current->Type()->ToCid() == kSmiCid) { | |
| 4925 values_.Add(current); | |
| 4926 } else if (current->IsMintDefinition()) { | |
| 4927 values_.Add(current); | |
| 4928 } | |
| 4929 } | |
| 4930 } | |
| 4931 | |
| 4932 JoinEntryInstr* join = block->AsJoinEntry(); | |
| 4933 if (join != NULL) { | |
| 4934 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { | |
| 4935 PhiInstr* current = phi_it.Current(); | |
| 4936 if (current->Type()->ToCid() == kSmiCid) { | |
| 4937 values_.Add(current); | |
| 4938 } | |
| 4939 } | |
| 4940 } | |
| 4941 | |
| 4942 for (ForwardInstructionIterator instr_it(block); | |
| 4943 !instr_it.Done(); | |
| 4944 instr_it.Advance()) { | |
| 4945 Instruction* current = instr_it.Current(); | |
| 4946 Definition* defn = current->AsDefinition(); | |
| 4947 if (defn != NULL) { | |
| 4948 if ((defn->Type()->ToCid() == kSmiCid) && | |
| 4949 (defn->ssa_temp_index() != -1)) { | |
| 4950 values_.Add(defn); | |
| 4951 } else if ((defn->IsMintDefinition()) && | |
| 4952 (defn->ssa_temp_index() != -1)) { | |
| 4953 values_.Add(defn); | |
| 4954 } | |
| 4955 } else if (current->IsCheckSmi()) { | |
| 4956 smi_checks_.Add(current->AsCheckSmi()); | |
| 4957 } | |
| 4958 } | |
| 4959 } | |
| 4960 } | |
| 4961 | |
| 4962 | |
| 4963 // Returns true if use is dominated by the given instruction. | |
| 4964 // Note: uses that occur at instruction itself are not dominated by it. | |
| 4965 static bool IsDominatedUse(Instruction* dom, Value* use) { | |
| 4966 BlockEntryInstr* dom_block = dom->GetBlock(); | |
| 4967 | |
| 4968 Instruction* instr = use->instruction(); | |
| 4969 | |
| 4970 PhiInstr* phi = instr->AsPhi(); | |
| 4971 if (phi != NULL) { | |
| 4972 return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index())); | |
| 4973 } | |
| 4974 | |
| 4975 BlockEntryInstr* use_block = instr->GetBlock(); | |
| 4976 if (use_block == dom_block) { | |
| 4977 // Fast path for the case of block entry. | |
| 4978 if (dom_block == dom) return true; | |
| 4979 | |
| 4980 for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) { | |
| 4981 if (curr == instr) return true; | |
| 4982 } | |
| 4983 | |
| 4984 return false; | |
| 4985 } | |
| 4986 | |
| 4987 return dom_block->Dominates(use_block); | |
| 4988 } | |
| 4989 | |
| 4990 | |
| 4991 void RangeAnalysis::RenameDominatedUses(Definition* def, | |
| 4992 Instruction* dom, | |
| 4993 Definition* other) { | |
| 4994 for (Value::Iterator it(def->input_use_list()); | |
| 4995 !it.Done(); | |
| 4996 it.Advance()) { | |
| 4997 Value* use = it.Current(); | |
| 4998 | |
| 4999 // Skip dead phis. | |
| 5000 PhiInstr* phi = use->instruction()->AsPhi(); | |
| 5001 ASSERT((phi == NULL) || phi->is_alive()); | |
| 5002 if (IsDominatedUse(dom, use)) { | |
| 5003 use->BindTo(other); | |
| 5004 } | |
| 5005 } | |
| 5006 } | |
| 5007 | |
| 5008 | |
| 5009 // For a comparison operation return an operation for the equivalent flipped | |
| 5010 // comparison: a (op) b === b (op') a. | |
| 5011 static Token::Kind FlipComparison(Token::Kind op) { | |
| 5012 switch (op) { | |
| 5013 case Token::kEQ: return Token::kEQ; | |
| 5014 case Token::kNE: return Token::kNE; | |
| 5015 case Token::kLT: return Token::kGT; | |
| 5016 case Token::kGT: return Token::kLT; | |
| 5017 case Token::kLTE: return Token::kGTE; | |
| 5018 case Token::kGTE: return Token::kLTE; | |
| 5019 default: | |
| 5020 UNREACHABLE(); | |
| 5021 return Token::kILLEGAL; | |
| 5022 } | |
| 5023 } | |
| 5024 | |
| 5025 | |
| 5026 // Given a boundary (right operand) and a comparison operation return | |
| 5027 // a symbolic range constraint for the left operand of the comparison assuming | |
| 5028 // that it evaluated to true. | |
| 5029 // For example for the comparison a < b symbol a is constrained with range | |
| 5030 // [Smi::kMinValue, b - 1]. | |
| 5031 Range* RangeAnalysis::ConstraintRange(Token::Kind op, Definition* boundary) { | |
| 5032 switch (op) { | |
| 5033 case Token::kEQ: | |
| 5034 return new(I) Range(RangeBoundary::FromDefinition(boundary), | |
| 5035 RangeBoundary::FromDefinition(boundary)); | |
| 5036 case Token::kNE: | |
| 5037 return Range::Unknown(); | |
| 5038 case Token::kLT: | |
| 5039 return new(I) Range(RangeBoundary::MinSmi(), | |
| 5040 RangeBoundary::FromDefinition(boundary, -1)); | |
| 5041 case Token::kGT: | |
| 5042 return new(I) Range(RangeBoundary::FromDefinition(boundary, 1), | |
| 5043 RangeBoundary::MaxSmi()); | |
| 5044 case Token::kLTE: | |
| 5045 return new(I) Range(RangeBoundary::MinSmi(), | |
| 5046 RangeBoundary::FromDefinition(boundary)); | |
| 5047 case Token::kGTE: | |
| 5048 return new(I) Range(RangeBoundary::FromDefinition(boundary), | |
| 5049 RangeBoundary::MaxSmi()); | |
| 5050 default: | |
| 5051 UNREACHABLE(); | |
| 5052 return Range::Unknown(); | |
| 5053 } | |
| 5054 } | |
| 5055 | |
| 5056 | |
| 5057 ConstraintInstr* RangeAnalysis::InsertConstraintFor(Definition* defn, | |
| 5058 Range* constraint_range, | |
| 5059 Instruction* after) { | |
| 5060 // No need to constrain constants. | |
| 5061 if (defn->IsConstant()) return NULL; | |
| 5062 | |
| 5063 ConstraintInstr* constraint = new(I) ConstraintInstr( | |
| 5064 new(I) Value(defn), constraint_range); | |
| 5065 flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue); | |
| 5066 RenameDominatedUses(defn, constraint, constraint); | |
| 5067 constraints_.Add(constraint); | |
| 5068 return constraint; | |
| 5069 } | |
| 5070 | |
| 5071 | |
| 5072 void RangeAnalysis::ConstrainValueAfterBranch(Definition* defn, Value* use) { | |
| 5073 BranchInstr* branch = use->instruction()->AsBranch(); | |
| 5074 RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp(); | |
| 5075 if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) { | |
| 5076 // Found comparison of two smis. Constrain defn at true and false | |
| 5077 // successors using the other operand as a boundary. | |
| 5078 Definition* boundary; | |
| 5079 Token::Kind op_kind; | |
| 5080 if (use->use_index() == 0) { // Left operand. | |
| 5081 boundary = rel_op->InputAt(1)->definition(); | |
| 5082 op_kind = rel_op->kind(); | |
| 5083 } else { | |
| 5084 ASSERT(use->use_index() == 1); // Right operand. | |
| 5085 boundary = rel_op->InputAt(0)->definition(); | |
| 5086 // InsertConstraintFor assumes that defn is left operand of a | |
| 5087 // comparison if it is right operand flip the comparison. | |
| 5088 op_kind = FlipComparison(rel_op->kind()); | |
| 5089 } | |
| 5090 | |
| 5091 // Constrain definition at the true successor. | |
| 5092 ConstraintInstr* true_constraint = | |
| 5093 InsertConstraintFor(defn, | |
| 5094 ConstraintRange(op_kind, boundary), | |
| 5095 branch->true_successor()); | |
| 5096 // Mark true_constraint an artificial use of boundary. This ensures | |
| 5097 // that constraint's range is recalculated if boundary's range changes. | |
| 5098 if (true_constraint != NULL) { | |
| 5099 true_constraint->AddDependency(boundary); | |
| 5100 true_constraint->set_target(branch->true_successor()); | |
| 5101 } | |
| 5102 | |
| 5103 // Constrain definition with a negated condition at the false successor. | |
| 5104 ConstraintInstr* false_constraint = | |
| 5105 InsertConstraintFor( | |
| 5106 defn, | |
| 5107 ConstraintRange(Token::NegateComparison(op_kind), boundary), | |
| 5108 branch->false_successor()); | |
| 5109 // Mark false_constraint an artificial use of boundary. This ensures | |
| 5110 // that constraint's range is recalculated if boundary's range changes. | |
| 5111 if (false_constraint != NULL) { | |
| 5112 false_constraint->AddDependency(boundary); | |
| 5113 false_constraint->set_target(branch->false_successor()); | |
| 5114 } | |
| 5115 } | |
| 5116 } | |
| 5117 | |
| 5118 | |
| 5119 void RangeAnalysis::InsertConstraintsFor(Definition* defn) { | |
| 5120 for (Value* use = defn->input_use_list(); | |
| 5121 use != NULL; | |
| 5122 use = use->next_use()) { | |
| 5123 if (use->instruction()->IsBranch()) { | |
| 5124 ConstrainValueAfterBranch(defn, use); | |
| 5125 } else if (use->instruction()->IsCheckArrayBound()) { | |
| 5126 ConstrainValueAfterCheckArrayBound( | |
| 5127 defn, | |
| 5128 use->instruction()->AsCheckArrayBound(), | |
| 5129 use->use_index()); | |
| 5130 } | |
| 5131 } | |
| 5132 } | |
| 5133 | |
| 5134 | |
| 5135 void RangeAnalysis::ConstrainValueAfterCheckArrayBound( | |
| 5136 Definition* defn, CheckArrayBoundInstr* check, intptr_t use_index) { | |
| 5137 Range* constraint_range = NULL; | |
| 5138 if (use_index == CheckArrayBoundInstr::kIndexPos) { | |
| 5139 Definition* length = check->length()->definition(); | |
| 5140 constraint_range = new(I) Range( | |
| 5141 RangeBoundary::FromConstant(0), | |
| 5142 RangeBoundary::FromDefinition(length, -1)); | |
| 5143 } else { | |
| 5144 ASSERT(use_index == CheckArrayBoundInstr::kLengthPos); | |
| 5145 Definition* index = check->index()->definition(); | |
| 5146 constraint_range = new(I) Range( | |
| 5147 RangeBoundary::FromDefinition(index, 1), | |
| 5148 RangeBoundary::MaxSmi()); | |
| 5149 } | |
| 5150 InsertConstraintFor(defn, constraint_range, check); | |
| 5151 } | |
| 5152 | |
| 5153 | |
| 5154 void RangeAnalysis::InsertConstraints() { | |
| 5155 for (intptr_t i = 0; i < smi_checks_.length(); i++) { | |
| 5156 CheckSmiInstr* check = smi_checks_[i]; | |
| 5157 ConstraintInstr* constraint = | |
| 5158 InsertConstraintFor(check->value()->definition(), | |
| 5159 Range::UnknownSmi(), | |
| 5160 check); | |
| 5161 if (constraint == NULL) { | |
| 5162 // No constraint was needed. | |
| 5163 continue; | |
| 5164 } | |
| 5165 // Mark the constraint's value's reaching type as smi. | |
| 5166 CompileType* smi_compile_type = | |
| 5167 ZoneCompileType::Wrap(CompileType::FromCid(kSmiCid)); | |
| 5168 constraint->value()->SetReachingType(smi_compile_type); | |
| 5169 } | |
| 5170 | |
| 5171 for (intptr_t i = 0; i < values_.length(); i++) { | |
| 5172 InsertConstraintsFor(values_[i]); | |
| 5173 } | |
| 5174 | |
| 5175 for (intptr_t i = 0; i < constraints_.length(); i++) { | |
| 5176 InsertConstraintsFor(constraints_[i]); | |
| 5177 } | |
| 5178 } | |
| 5179 | |
| 5180 | |
| 5181 void RangeAnalysis::ResetWorklist() { | |
| 5182 if (marked_defns_ == NULL) { | |
| 5183 marked_defns_ = new(I) BitVector(flow_graph_->current_ssa_temp_index()); | |
| 5184 } else { | |
| 5185 marked_defns_->Clear(); | |
| 5186 } | |
| 5187 worklist_.Clear(); | |
| 5188 } | |
| 5189 | |
| 5190 | |
| 5191 void RangeAnalysis::MarkDefinition(Definition* defn) { | |
| 5192 // Unwrap constrained value. | |
| 5193 while (defn->IsConstraint()) { | |
| 5194 defn = defn->AsConstraint()->value()->definition(); | |
| 5195 } | |
| 5196 | |
| 5197 if (!marked_defns_->Contains(defn->ssa_temp_index())) { | |
| 5198 worklist_.Add(defn); | |
| 5199 marked_defns_->Add(defn->ssa_temp_index()); | |
| 5200 } | |
| 5201 } | |
| 5202 | |
| 5203 | |
| 5204 RangeAnalysis::Direction RangeAnalysis::ToDirection(Value* val) { | |
| 5205 if (val->BindsToConstant()) { | |
| 5206 return (Smi::Cast(val->BoundConstant()).Value() >= 0) ? kPositive | |
| 5207 : kNegative; | |
| 5208 } else if (val->definition()->range() != NULL) { | |
| 5209 Range* range = val->definition()->range(); | |
| 5210 if (Range::ConstantMin(range).ConstantValue() >= 0) { | |
| 5211 return kPositive; | |
| 5212 } else if (Range::ConstantMax(range).ConstantValue() <= 0) { | |
| 5213 return kNegative; | |
| 5214 } | |
| 5215 } | |
| 5216 return kUnknown; | |
| 5217 } | |
| 5218 | |
| 5219 | |
| 5220 Range* RangeAnalysis::InferInductionVariableRange(JoinEntryInstr* loop_header, | |
| 5221 PhiInstr* var) { | |
| 5222 BitVector* loop_info = loop_header->loop_info(); | |
| 5223 | |
| 5224 Definition* initial_value = NULL; | |
| 5225 Direction direction = kUnknown; | |
| 5226 | |
| 5227 ResetWorklist(); | |
| 5228 MarkDefinition(var); | |
| 5229 while (!worklist_.is_empty()) { | |
| 5230 Definition* defn = worklist_.RemoveLast(); | |
| 5231 | |
| 5232 if (defn->IsPhi()) { | |
| 5233 PhiInstr* phi = defn->AsPhi(); | |
| 5234 for (intptr_t i = 0; i < phi->InputCount(); i++) { | |
| 5235 Definition* defn = phi->InputAt(i)->definition(); | |
| 5236 | |
| 5237 if (!loop_info->Contains(defn->GetBlock()->preorder_number())) { | |
| 5238 // The value is coming from outside of the loop. | |
| 5239 if (initial_value == NULL) { | |
| 5240 initial_value = defn; | |
| 5241 continue; | |
| 5242 } else if (initial_value == defn) { | |
| 5243 continue; | |
| 5244 } else { | |
| 5245 return NULL; | |
| 5246 } | |
| 5247 } | |
| 5248 | |
| 5249 MarkDefinition(defn); | |
| 5250 } | |
| 5251 } else if (defn->IsBinarySmiOp()) { | |
| 5252 BinarySmiOpInstr* binary_op = defn->AsBinarySmiOp(); | |
| 5253 | |
| 5254 switch (binary_op->op_kind()) { | |
| 5255 case Token::kADD: { | |
| 5256 const Direction growth_right = | |
| 5257 ToDirection(binary_op->right()); | |
| 5258 if (growth_right != kUnknown) { | |
| 5259 UpdateDirection(&direction, growth_right); | |
| 5260 MarkDefinition(binary_op->left()->definition()); | |
| 5261 break; | |
| 5262 } | |
| 5263 | |
| 5264 const Direction growth_left = | |
| 5265 ToDirection(binary_op->left()); | |
| 5266 if (growth_left != kUnknown) { | |
| 5267 UpdateDirection(&direction, growth_left); | |
| 5268 MarkDefinition(binary_op->right()->definition()); | |
| 5269 break; | |
| 5270 } | |
| 5271 | |
| 5272 return NULL; | |
| 5273 } | |
| 5274 | |
| 5275 case Token::kSUB: { | |
| 5276 const Direction growth_right = | |
| 5277 ToDirection(binary_op->right()); | |
| 5278 if (growth_right != kUnknown) { | |
| 5279 UpdateDirection(&direction, Invert(growth_right)); | |
| 5280 MarkDefinition(binary_op->left()->definition()); | |
| 5281 break; | |
| 5282 } | |
| 5283 return NULL; | |
| 5284 } | |
| 5285 | |
| 5286 default: | |
| 5287 return NULL; | |
| 5288 } | |
| 5289 } else { | |
| 5290 return NULL; | |
| 5291 } | |
| 5292 } | |
| 5293 | |
| 5294 | |
| 5295 // We transitively discovered all dependencies of the given phi | |
| 5296 // and confirmed that it depends on a single value coming from outside of | |
| 5297 // the loop and some linear combinations of itself. | |
| 5298 // Compute the range based on initial value and the direction of the growth. | |
| 5299 switch (direction) { | |
| 5300 case kPositive: | |
| 5301 return new(I) Range(RangeBoundary::FromDefinition(initial_value), | |
| 5302 RangeBoundary::MaxSmi()); | |
| 5303 | |
| 5304 case kNegative: | |
| 5305 return new(I) Range(RangeBoundary::MinSmi(), | |
| 5306 RangeBoundary::FromDefinition(initial_value)); | |
| 5307 | |
| 5308 case kUnknown: | |
| 5309 case kBoth: | |
| 5310 return Range::UnknownSmi(); | |
| 5311 } | |
| 5312 | |
| 5313 UNREACHABLE(); | |
| 5314 return NULL; | |
| 5315 } | |
| 5316 | |
| 5317 | |
| 5318 void RangeAnalysis::InferRangesRecursive(BlockEntryInstr* block) { | |
| 5319 JoinEntryInstr* join = block->AsJoinEntry(); | |
| 5320 if (join != NULL) { | |
| 5321 const bool is_loop_header = (join->loop_info() != NULL); | |
| 5322 for (PhiIterator it(join); !it.Done(); it.Advance()) { | |
| 5323 PhiInstr* phi = it.Current(); | |
| 5324 if (definitions_->Contains(phi->ssa_temp_index())) { | |
| 5325 if (is_loop_header) { | |
| 5326 // Try recognizing simple induction variables. | |
| 5327 Range* range = InferInductionVariableRange(join, phi); | |
| 5328 if (range != NULL) { | |
| 5329 phi->range_ = range; | |
| 5330 continue; | |
| 5331 } | |
| 5332 } | |
| 5333 | |
| 5334 phi->InferRange(); | |
| 5335 } | |
| 5336 } | |
| 5337 } | |
| 5338 | |
| 5339 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { | |
| 5340 Instruction* current = it.Current(); | |
| 5341 | |
| 5342 Definition* defn = current->AsDefinition(); | |
| 5343 if ((defn != NULL) && | |
| 5344 (defn->ssa_temp_index() != -1) && | |
| 5345 definitions_->Contains(defn->ssa_temp_index())) { | |
| 5346 defn->InferRange(); | |
| 5347 } else if (FLAG_array_bounds_check_elimination && | |
| 5348 current->IsCheckArrayBound()) { | |
| 5349 CheckArrayBoundInstr* check = current->AsCheckArrayBound(); | |
| 5350 RangeBoundary array_length = | |
| 5351 RangeBoundary::FromDefinition(check->length()->definition()); | |
| 5352 if (check->IsRedundant(array_length)) { | |
| 5353 it.RemoveCurrentFromGraph(); | |
| 5354 } | |
| 5355 } | |
| 5356 } | |
| 5357 | |
| 5358 for (intptr_t i = 0; i < block->dominated_blocks().length(); ++i) { | |
| 5359 InferRangesRecursive(block->dominated_blocks()[i]); | |
| 5360 } | |
| 5361 } | |
| 5362 | |
| 5363 | |
| 5364 void RangeAnalysis::InferRanges() { | |
| 5365 if (FLAG_trace_range_analysis) { | |
| 5366 OS::Print("---- before range analysis -------\n"); | |
| 5367 FlowGraphPrinter printer(*flow_graph_); | |
| 5368 printer.PrintBlocks(); | |
| 5369 } | |
| 5370 // Initialize bitvector for quick filtering of int values. | |
| 5371 definitions_ = | |
| 5372 new(I) BitVector(flow_graph_->current_ssa_temp_index()); | |
| 5373 for (intptr_t i = 0; i < values_.length(); i++) { | |
| 5374 definitions_->Add(values_[i]->ssa_temp_index()); | |
| 5375 } | |
| 5376 for (intptr_t i = 0; i < constraints_.length(); i++) { | |
| 5377 definitions_->Add(constraints_[i]->ssa_temp_index()); | |
| 5378 } | |
| 5379 | |
| 5380 // Infer initial values of ranges. | |
| 5381 const GrowableArray<Definition*>& initial = | |
| 5382 *flow_graph_->graph_entry()->initial_definitions(); | |
| 5383 for (intptr_t i = 0; i < initial.length(); ++i) { | |
| 5384 Definition* definition = initial[i]; | |
| 5385 if (definitions_->Contains(definition->ssa_temp_index())) { | |
| 5386 definition->InferRange(); | |
| 5387 } | |
| 5388 } | |
| 5389 InferRangesRecursive(flow_graph_->graph_entry()); | |
| 5390 | |
| 5391 if (FLAG_trace_range_analysis) { | |
| 5392 OS::Print("---- after range analysis -------\n"); | |
| 5393 FlowGraphPrinter printer(*flow_graph_); | |
| 5394 printer.PrintBlocks(); | |
| 5395 } | |
| 5396 } | |
| 5397 | |
| 5398 | |
| 5399 void RangeAnalysis::RemoveConstraints() { | |
| 5400 for (intptr_t i = 0; i < constraints_.length(); i++) { | |
| 5401 Definition* def = constraints_[i]->value()->definition(); | |
| 5402 // Some constraints might be constraining constraints. Unwind the chain of | |
| 5403 // constraints until we reach the actual definition. | |
| 5404 while (def->IsConstraint()) { | |
| 5405 def = def->AsConstraint()->value()->definition(); | |
| 5406 } | |
| 5407 constraints_[i]->ReplaceUsesWith(def); | |
| 5408 constraints_[i]->RemoveFromGraph(); | |
| 5409 } | |
| 5410 } | |
| 5411 | |
| 5412 | |
| 5413 void FlowGraphOptimizer::InferIntRanges() { | 4498 void FlowGraphOptimizer::InferIntRanges() { |
| 5414 RangeAnalysis range_analysis(flow_graph_); | 4499 RangeAnalysis range_analysis(flow_graph_); |
| 5415 range_analysis.Analyze(); | 4500 range_analysis.Analyze(); |
| 5416 } | 4501 } |
| 5417 | 4502 |
| 5418 | 4503 |
| 5419 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { | 4504 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { |
| 5420 // For every catch-block: Iterate over all call instructions inside the | 4505 // For every catch-block: Iterate over all call instructions inside the |
| 5421 // corresponding try-block and figure out for each environment value if it | 4506 // corresponding try-block and figure out for each environment value if it |
| 5422 // is the same constant at all calls. If yes, replace the initial definition | 4507 // is the same constant at all calls. If yes, replace the initial definition |
| (...skipping 4992 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 10415 | 9500 |
| 10416 // Insert materializations at environment uses. | 9501 // Insert materializations at environment uses. |
| 10417 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { | 9502 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { |
| 10418 CreateMaterializationAt( | 9503 CreateMaterializationAt( |
| 10419 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); | 9504 exits_collector_.exits()[i], alloc, alloc->cls(), *slots); |
| 10420 } | 9505 } |
| 10421 } | 9506 } |
| 10422 | 9507 |
| 10423 | 9508 |
| 10424 } // namespace dart | 9509 } // namespace dart |
| OLD | NEW |