| Index: runtime/vm/flow_graph_optimizer.cc
|
| diff --git a/runtime/vm/flow_graph_optimizer.cc b/runtime/vm/flow_graph_optimizer.cc
|
| index cf16ff95ac10a9cf9effa5667c2d50d690769ca6..e9d92410e0df39ee9cf256fb19281416052a2e75 100644
|
| --- a/runtime/vm/flow_graph_optimizer.cc
|
| +++ b/runtime/vm/flow_graph_optimizer.cc
|
| @@ -11,6 +11,7 @@
|
| #include "vm/exceptions.h"
|
| #include "vm/flow_graph_builder.h"
|
| #include "vm/flow_graph_compiler.h"
|
| +#include "vm/flow_graph_range_analysis.h"
|
| #include "vm/hash_map.h"
|
| #include "vm/il_printer.h"
|
| #include "vm/intermediate_language.h"
|
| @@ -23,8 +24,6 @@
|
|
|
| namespace dart {
|
|
|
| -DEFINE_FLAG(bool, array_bounds_check_elimination, true,
|
| - "Eliminate redundant bounds checks.");
|
| DEFINE_FLAG(int, getter_setter_ratio, 13,
|
| "Ratio of getter/setter usage used for double field unboxing heuristics");
|
| DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination.");
|
| @@ -40,12 +39,9 @@ DEFINE_FLAG(bool, trace_constant_propagation, false,
|
| DEFINE_FLAG(bool, trace_load_optimization, false,
|
| "Print live sets for load optimization pass.");
|
| DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details.");
|
| -DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress");
|
| DEFINE_FLAG(bool, truncating_left_shift, true,
|
| "Optimize left shift to truncate if possible");
|
| DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis.");
|
| -DEFINE_FLAG(bool, trace_integer_ir_selection, false,
|
| - "Print integer IR selection optimization pass.");
|
| DECLARE_FLAG(bool, enable_type_checks);
|
| DECLARE_FLAG(bool, source_lines);
|
| DECLARE_FLAG(bool, trace_type_check_elimination);
|
| @@ -4499,917 +4495,6 @@ bool FlowGraphOptimizer::TryInlineInstanceSetter(InstanceCallInstr* instr,
|
| }
|
|
|
|
|
| -// Replaces Mint IL instructions with Uint32 IL instructions
|
| -// when possible. Uses output of RangeAnalysis.
|
| -class IntegerInstructionSelector : public ValueObject {
|
| - public:
|
| - explicit IntegerInstructionSelector(FlowGraph* flow_graph)
|
| - : flow_graph_(flow_graph),
|
| - isolate_(NULL) {
|
| - ASSERT(flow_graph_ != NULL);
|
| - isolate_ = flow_graph_->isolate();
|
| - ASSERT(isolate_ != NULL);
|
| - selected_uint32_defs_ =
|
| - new(I) BitVector(flow_graph_->current_ssa_temp_index());
|
| - }
|
| -
|
| - void Select();
|
| -
|
| - private:
|
| - bool IsPotentialUint32Definition(Definition* def);
|
| - void FindPotentialUint32Definitions();
|
| - bool IsUint32NarrowingDefinition(Definition* def);
|
| - void FindUint32NarrowingDefinitions();
|
| - bool AllUsesAreUint32Narrowing(Value* list_head);
|
| - bool CanBecomeUint32(Definition* def);
|
| - void Propagate();
|
| - Definition* ConstructReplacementFor(Definition* def);
|
| - void ReplaceInstructions();
|
| -
|
| - Isolate* isolate() const { return isolate_; }
|
| -
|
| - GrowableArray<Definition*> potential_uint32_defs_;
|
| - BitVector* selected_uint32_defs_;
|
| -
|
| - FlowGraph* flow_graph_;
|
| - Isolate* isolate_;
|
| -};
|
| -
|
| -
|
| -void IntegerInstructionSelector::Select() {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("---- starting integer ir selection -------\n");
|
| - }
|
| - FindPotentialUint32Definitions();
|
| - FindUint32NarrowingDefinitions();
|
| - Propagate();
|
| - ReplaceInstructions();
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("---- after integer ir selection -------\n");
|
| - FlowGraphPrinter printer(*flow_graph_);
|
| - printer.PrintBlocks();
|
| - }
|
| -}
|
| -
|
| -
|
| -bool IntegerInstructionSelector::IsPotentialUint32Definition(Definition* def) {
|
| - // TODO(johnmccutchan): Consider Smi operations, to avoid unnecessary tagging
|
| - // & untagged of intermediate results.
|
| - // TODO(johnmccutchan): Consider phis.
|
| - return def->IsBoxInteger() || // BoxMint.
|
| - def->IsUnboxInteger() || // UnboxMint.
|
| - def->IsBinaryMintOp() ||
|
| - def->IsShiftMintOp() ||
|
| - def->IsUnaryMintOp();
|
| -}
|
| -
|
| -
|
| -void IntegerInstructionSelector::FindPotentialUint32Definitions() {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("++++ Finding potential Uint32 definitions:\n");
|
| - }
|
| -
|
| - for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
|
| - !block_it.Done();
|
| - block_it.Advance()) {
|
| - BlockEntryInstr* block = block_it.Current();
|
| -
|
| - for (ForwardInstructionIterator instr_it(block);
|
| - !instr_it.Done();
|
| - instr_it.Advance()) {
|
| - Instruction* current = instr_it.Current();
|
| - Definition* defn = current->AsDefinition();
|
| - if ((defn != NULL) && (defn->ssa_temp_index() != -1)) {
|
| - if (IsPotentialUint32Definition(defn)) {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("Adding %s\n", current->ToCString());
|
| - }
|
| - potential_uint32_defs_.Add(defn);
|
| - }
|
| - }
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -// BinaryMintOp masks and stores into unsigned typed arrays that truncate the
|
| -// value into a Uint32 range.
|
| -bool IntegerInstructionSelector::IsUint32NarrowingDefinition(Definition* def) {
|
| - if (def->IsBinaryMintOp()) {
|
| - BinaryMintOpInstr* op = def->AsBinaryMintOp();
|
| - // Must be a mask operation.
|
| - if (op->op_kind() != Token::kBIT_AND) {
|
| - return false;
|
| - }
|
| - Range* range = op->range();
|
| - if ((range == NULL) ||
|
| - !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
|
| - return false;
|
| - }
|
| - return true;
|
| - }
|
| - // TODO(johnmccutchan): Add typed array stores.
|
| - return false;
|
| -}
|
| -
|
| -
|
| -void IntegerInstructionSelector::FindUint32NarrowingDefinitions() {
|
| - ASSERT(selected_uint32_defs_ != NULL);
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("++++ Selecting Uint32 definitions:\n");
|
| - OS::Print("++++ Initial set:\n");
|
| - }
|
| - for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
|
| - Definition* defn = potential_uint32_defs_[i];
|
| - if (IsUint32NarrowingDefinition(defn)) {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("Adding %s\n", defn->ToCString());
|
| - }
|
| - selected_uint32_defs_->Add(defn->ssa_temp_index());
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -bool IntegerInstructionSelector::AllUsesAreUint32Narrowing(Value* list_head) {
|
| - for (Value::Iterator it(list_head);
|
| - !it.Done();
|
| - it.Advance()) {
|
| - Value* use = it.Current();
|
| - Definition* defn = use->instruction()->AsDefinition();
|
| - if ((defn == NULL) ||
|
| - (defn->ssa_temp_index() == -1) ||
|
| - !selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
|
| - return false;
|
| - }
|
| - }
|
| - return true;
|
| -}
|
| -
|
| -
|
| -bool IntegerInstructionSelector::CanBecomeUint32(Definition* def) {
|
| - ASSERT(IsPotentialUint32Definition(def));
|
| - if (def->IsBoxInteger()) {
|
| - // If a BoxInteger's input is a candidate, the box is a candidate.
|
| - BoxIntegerInstr* box = def->AsBoxInteger();
|
| - Definition* box_input = box->value()->definition();
|
| - return selected_uint32_defs_->Contains(box_input->ssa_temp_index());
|
| - }
|
| - // A right shift with an input outside of Uint32 range cannot be converted
|
| - // because we need the high bits.
|
| - if (def->IsShiftMintOp()) {
|
| - ShiftMintOpInstr* op = def->AsShiftMintOp();
|
| - if (op->op_kind() == Token::kSHR) {
|
| - Definition* shift_input = op->left()->definition();
|
| - ASSERT(shift_input != NULL);
|
| - Range* range = shift_input->range();
|
| - if ((range == NULL) ||
|
| - !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
|
| - return false;
|
| - }
|
| - }
|
| - }
|
| - if (!def->HasUses()) {
|
| - // No uses, skip.
|
| - return false;
|
| - }
|
| - return AllUsesAreUint32Narrowing(def->input_use_list()) &&
|
| - AllUsesAreUint32Narrowing(def->env_use_list());
|
| -}
|
| -
|
| -
|
| -void IntegerInstructionSelector::Propagate() {
|
| - ASSERT(selected_uint32_defs_ != NULL);
|
| - bool changed = true;
|
| - intptr_t iteration = 0;
|
| - while (changed) {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("+++ Iteration: %" Pd "\n", iteration++);
|
| - }
|
| - changed = false;
|
| - for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
|
| - Definition* defn = potential_uint32_defs_[i];
|
| - if (selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
|
| - // Already marked as a candidate, skip.
|
| - continue;
|
| - }
|
| - if (defn->IsConstant()) {
|
| - // Skip constants.
|
| - continue;
|
| - }
|
| - if (CanBecomeUint32(defn)) {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("Adding %s\n", defn->ToCString());
|
| - }
|
| - // Found a new candidate.
|
| - selected_uint32_defs_->Add(defn->ssa_temp_index());
|
| - // Haven't reached fixed point yet.
|
| - changed = true;
|
| - }
|
| - }
|
| - }
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("Reached fixed point\n");
|
| - }
|
| -}
|
| -
|
| -
|
| -Definition* IntegerInstructionSelector::ConstructReplacementFor(
|
| - Definition* def) {
|
| - // Should only see mint definitions.
|
| - ASSERT(IsPotentialUint32Definition(def));
|
| - // Should not see constant instructions.
|
| - ASSERT(!def->IsConstant());
|
| - if (def->IsBinaryMintOp()) {
|
| - BinaryMintOpInstr* op = def->AsBinaryMintOp();
|
| - Token::Kind op_kind = op->op_kind();
|
| - Value* left = op->left()->CopyWithType();
|
| - Value* right = op->right()->CopyWithType();
|
| - intptr_t deopt_id = op->DeoptimizationTarget();
|
| - return new(I) BinaryUint32OpInstr(op_kind, left, right, deopt_id);
|
| - } else if (def->IsBoxInteger()) {
|
| - BoxIntegerInstr* box = def->AsBoxInteger();
|
| - Value* value = box->value()->CopyWithType();
|
| - return new(I) BoxUint32Instr(value);
|
| - } else if (def->IsUnboxInteger()) {
|
| - UnboxIntegerInstr* unbox = def->AsUnboxInteger();
|
| - Value* value = unbox->value()->CopyWithType();
|
| - intptr_t deopt_id = unbox->deopt_id();
|
| - return new(I) UnboxUint32Instr(value, deopt_id);
|
| - } else if (def->IsUnaryMintOp()) {
|
| - UnaryMintOpInstr* op = def->AsUnaryMintOp();
|
| - Token::Kind op_kind = op->op_kind();
|
| - Value* value = op->value()->CopyWithType();
|
| - intptr_t deopt_id = op->DeoptimizationTarget();
|
| - return new(I) UnaryUint32OpInstr(op_kind, value, deopt_id);
|
| - } else if (def->IsShiftMintOp()) {
|
| - ShiftMintOpInstr* op = def->AsShiftMintOp();
|
| - Token::Kind op_kind = op->op_kind();
|
| - Value* left = op->left()->CopyWithType();
|
| - Value* right = op->right()->CopyWithType();
|
| - intptr_t deopt_id = op->DeoptimizationTarget();
|
| - return new(I) ShiftUint32OpInstr(op_kind, left, right, deopt_id);
|
| - }
|
| - UNREACHABLE();
|
| - return NULL;
|
| -}
|
| -
|
| -
|
| -void IntegerInstructionSelector::ReplaceInstructions() {
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("++++ Replacing instructions:\n");
|
| - }
|
| - for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
|
| - Definition* defn = potential_uint32_defs_[i];
|
| - if (!selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
|
| - // Not a candidate.
|
| - continue;
|
| - }
|
| - Definition* replacement = ConstructReplacementFor(defn);
|
| - ASSERT(replacement != NULL);
|
| - if (FLAG_trace_integer_ir_selection) {
|
| - OS::Print("Replacing %s with %s\n", defn->ToCString(),
|
| - replacement->ToCString());
|
| - }
|
| - defn->ReplaceWith(replacement, NULL);
|
| - ASSERT(flow_graph_->VerifyUseLists());
|
| - }
|
| -}
|
| -
|
| -void FlowGraphOptimizer::SelectIntegerInstructions() {
|
| - IntegerInstructionSelector iis(flow_graph_);
|
| - iis.Select();
|
| -}
|
| -
|
| -
|
| -// Range analysis for integer values.
|
| -class RangeAnalysis : public ValueObject {
|
| - public:
|
| - explicit RangeAnalysis(FlowGraph* flow_graph)
|
| - : flow_graph_(flow_graph),
|
| - marked_defns_(NULL) { }
|
| -
|
| - // Infer ranges for all values and remove overflow checks from binary smi
|
| - // operations when proven redundant.
|
| - void Analyze();
|
| -
|
| - private:
|
| - // Collect all values that were proven to be smi in smi_values_ array and all
|
| - // CheckSmi instructions in smi_check_ array.
|
| - void CollectValues();
|
| -
|
| - // Iterate over smi values and constrain them at branch successors.
|
| - // Additionally constraint values after CheckSmi instructions.
|
| - void InsertConstraints();
|
| -
|
| - // Iterate over uses of the given definition and discover branches that
|
| - // constrain it. Insert appropriate Constraint instructions at true
|
| - // and false successor and rename all dominated uses to refer to a
|
| - // Constraint instead of this definition.
|
| - void InsertConstraintsFor(Definition* defn);
|
| -
|
| - // Create a constraint for defn, insert it after given instruction and
|
| - // rename all uses that are dominated by it.
|
| - ConstraintInstr* InsertConstraintFor(Definition* defn,
|
| - Range* constraint,
|
| - Instruction* after);
|
| -
|
| - void ConstrainValueAfterBranch(Definition* defn, Value* use);
|
| - void ConstrainValueAfterCheckArrayBound(Definition* defn,
|
| - CheckArrayBoundInstr* check,
|
| - intptr_t use_index);
|
| -
|
| - // Replace uses of the definition def that are dominated by instruction dom
|
| - // with uses of other definition.
|
| - void RenameDominatedUses(Definition* def,
|
| - Instruction* dom,
|
| - Definition* other);
|
| -
|
| -
|
| - // Walk the dominator tree and infer ranges for smi values.
|
| - void InferRanges();
|
| - void InferRangesRecursive(BlockEntryInstr* block);
|
| -
|
| - enum Direction {
|
| - kUnknown,
|
| - kPositive,
|
| - kNegative,
|
| - kBoth
|
| - };
|
| -
|
| - Range* InferInductionVariableRange(JoinEntryInstr* loop_header,
|
| - PhiInstr* var);
|
| -
|
| - void ResetWorklist();
|
| - void MarkDefinition(Definition* defn);
|
| -
|
| - static Direction ToDirection(Value* val);
|
| -
|
| - static Direction Invert(Direction direction) {
|
| - return (direction == kPositive) ? kNegative : kPositive;
|
| - }
|
| -
|
| - static void UpdateDirection(Direction* direction,
|
| - Direction new_direction) {
|
| - if (*direction != new_direction) {
|
| - if (*direction != kUnknown) new_direction = kBoth;
|
| - *direction = new_direction;
|
| - }
|
| - }
|
| -
|
| - // Remove artificial Constraint instructions and replace them with actual
|
| - // unconstrained definitions.
|
| - void RemoveConstraints();
|
| -
|
| - Range* ConstraintRange(Token::Kind op, Definition* boundary);
|
| -
|
| - Isolate* isolate() const { return flow_graph_->isolate(); }
|
| -
|
| - FlowGraph* flow_graph_;
|
| -
|
| - // Value that are known to be smi or mint.
|
| - GrowableArray<Definition*> values_;
|
| - // All CheckSmi instructions.
|
| - GrowableArray<CheckSmiInstr*> smi_checks_;
|
| -
|
| - // All Constraints inserted during InsertConstraints phase. They are treated
|
| - // as smi values.
|
| - GrowableArray<ConstraintInstr*> constraints_;
|
| -
|
| - // Bitvector for a quick filtering of known smi or mint values.
|
| - BitVector* definitions_;
|
| -
|
| - // Worklist for induction variables analysis.
|
| - GrowableArray<Definition*> worklist_;
|
| - BitVector* marked_defns_;
|
| -
|
| - DISALLOW_COPY_AND_ASSIGN(RangeAnalysis);
|
| -};
|
| -
|
| -
|
| -void RangeAnalysis::Analyze() {
|
| - CollectValues();
|
| - InsertConstraints();
|
| - InferRanges();
|
| - IntegerInstructionSelector iis(flow_graph_);
|
| - iis.Select();
|
| - RemoveConstraints();
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::CollectValues() {
|
| - const GrowableArray<Definition*>& initial =
|
| - *flow_graph_->graph_entry()->initial_definitions();
|
| - for (intptr_t i = 0; i < initial.length(); ++i) {
|
| - Definition* current = initial[i];
|
| - if (current->Type()->ToCid() == kSmiCid) {
|
| - values_.Add(current);
|
| - } else if (current->IsMintDefinition()) {
|
| - values_.Add(current);
|
| - }
|
| - }
|
| -
|
| - for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
|
| - !block_it.Done();
|
| - block_it.Advance()) {
|
| - BlockEntryInstr* block = block_it.Current();
|
| -
|
| -
|
| - if (block->IsGraphEntry() || block->IsCatchBlockEntry()) {
|
| - const GrowableArray<Definition*>& initial = block->IsGraphEntry()
|
| - ? *block->AsGraphEntry()->initial_definitions()
|
| - : *block->AsCatchBlockEntry()->initial_definitions();
|
| - for (intptr_t i = 0; i < initial.length(); ++i) {
|
| - Definition* current = initial[i];
|
| - if (current->Type()->ToCid() == kSmiCid) {
|
| - values_.Add(current);
|
| - } else if (current->IsMintDefinition()) {
|
| - values_.Add(current);
|
| - }
|
| - }
|
| - }
|
| -
|
| - JoinEntryInstr* join = block->AsJoinEntry();
|
| - if (join != NULL) {
|
| - for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
|
| - PhiInstr* current = phi_it.Current();
|
| - if (current->Type()->ToCid() == kSmiCid) {
|
| - values_.Add(current);
|
| - }
|
| - }
|
| - }
|
| -
|
| - for (ForwardInstructionIterator instr_it(block);
|
| - !instr_it.Done();
|
| - instr_it.Advance()) {
|
| - Instruction* current = instr_it.Current();
|
| - Definition* defn = current->AsDefinition();
|
| - if (defn != NULL) {
|
| - if ((defn->Type()->ToCid() == kSmiCid) &&
|
| - (defn->ssa_temp_index() != -1)) {
|
| - values_.Add(defn);
|
| - } else if ((defn->IsMintDefinition()) &&
|
| - (defn->ssa_temp_index() != -1)) {
|
| - values_.Add(defn);
|
| - }
|
| - } else if (current->IsCheckSmi()) {
|
| - smi_checks_.Add(current->AsCheckSmi());
|
| - }
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -// Returns true if use is dominated by the given instruction.
|
| -// Note: uses that occur at instruction itself are not dominated by it.
|
| -static bool IsDominatedUse(Instruction* dom, Value* use) {
|
| - BlockEntryInstr* dom_block = dom->GetBlock();
|
| -
|
| - Instruction* instr = use->instruction();
|
| -
|
| - PhiInstr* phi = instr->AsPhi();
|
| - if (phi != NULL) {
|
| - return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index()));
|
| - }
|
| -
|
| - BlockEntryInstr* use_block = instr->GetBlock();
|
| - if (use_block == dom_block) {
|
| - // Fast path for the case of block entry.
|
| - if (dom_block == dom) return true;
|
| -
|
| - for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) {
|
| - if (curr == instr) return true;
|
| - }
|
| -
|
| - return false;
|
| - }
|
| -
|
| - return dom_block->Dominates(use_block);
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::RenameDominatedUses(Definition* def,
|
| - Instruction* dom,
|
| - Definition* other) {
|
| - for (Value::Iterator it(def->input_use_list());
|
| - !it.Done();
|
| - it.Advance()) {
|
| - Value* use = it.Current();
|
| -
|
| - // Skip dead phis.
|
| - PhiInstr* phi = use->instruction()->AsPhi();
|
| - ASSERT((phi == NULL) || phi->is_alive());
|
| - if (IsDominatedUse(dom, use)) {
|
| - use->BindTo(other);
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -// For a comparison operation return an operation for the equivalent flipped
|
| -// comparison: a (op) b === b (op') a.
|
| -static Token::Kind FlipComparison(Token::Kind op) {
|
| - switch (op) {
|
| - case Token::kEQ: return Token::kEQ;
|
| - case Token::kNE: return Token::kNE;
|
| - case Token::kLT: return Token::kGT;
|
| - case Token::kGT: return Token::kLT;
|
| - case Token::kLTE: return Token::kGTE;
|
| - case Token::kGTE: return Token::kLTE;
|
| - default:
|
| - UNREACHABLE();
|
| - return Token::kILLEGAL;
|
| - }
|
| -}
|
| -
|
| -
|
| -// Given a boundary (right operand) and a comparison operation return
|
| -// a symbolic range constraint for the left operand of the comparison assuming
|
| -// that it evaluated to true.
|
| -// For example for the comparison a < b symbol a is constrained with range
|
| -// [Smi::kMinValue, b - 1].
|
| -Range* RangeAnalysis::ConstraintRange(Token::Kind op, Definition* boundary) {
|
| - switch (op) {
|
| - case Token::kEQ:
|
| - return new(I) Range(RangeBoundary::FromDefinition(boundary),
|
| - RangeBoundary::FromDefinition(boundary));
|
| - case Token::kNE:
|
| - return Range::Unknown();
|
| - case Token::kLT:
|
| - return new(I) Range(RangeBoundary::MinSmi(),
|
| - RangeBoundary::FromDefinition(boundary, -1));
|
| - case Token::kGT:
|
| - return new(I) Range(RangeBoundary::FromDefinition(boundary, 1),
|
| - RangeBoundary::MaxSmi());
|
| - case Token::kLTE:
|
| - return new(I) Range(RangeBoundary::MinSmi(),
|
| - RangeBoundary::FromDefinition(boundary));
|
| - case Token::kGTE:
|
| - return new(I) Range(RangeBoundary::FromDefinition(boundary),
|
| - RangeBoundary::MaxSmi());
|
| - default:
|
| - UNREACHABLE();
|
| - return Range::Unknown();
|
| - }
|
| -}
|
| -
|
| -
|
| -ConstraintInstr* RangeAnalysis::InsertConstraintFor(Definition* defn,
|
| - Range* constraint_range,
|
| - Instruction* after) {
|
| - // No need to constrain constants.
|
| - if (defn->IsConstant()) return NULL;
|
| -
|
| - ConstraintInstr* constraint = new(I) ConstraintInstr(
|
| - new(I) Value(defn), constraint_range);
|
| - flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue);
|
| - RenameDominatedUses(defn, constraint, constraint);
|
| - constraints_.Add(constraint);
|
| - return constraint;
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::ConstrainValueAfterBranch(Definition* defn, Value* use) {
|
| - BranchInstr* branch = use->instruction()->AsBranch();
|
| - RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp();
|
| - if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) {
|
| - // Found comparison of two smis. Constrain defn at true and false
|
| - // successors using the other operand as a boundary.
|
| - Definition* boundary;
|
| - Token::Kind op_kind;
|
| - if (use->use_index() == 0) { // Left operand.
|
| - boundary = rel_op->InputAt(1)->definition();
|
| - op_kind = rel_op->kind();
|
| - } else {
|
| - ASSERT(use->use_index() == 1); // Right operand.
|
| - boundary = rel_op->InputAt(0)->definition();
|
| - // InsertConstraintFor assumes that defn is left operand of a
|
| - // comparison if it is right operand flip the comparison.
|
| - op_kind = FlipComparison(rel_op->kind());
|
| - }
|
| -
|
| - // Constrain definition at the true successor.
|
| - ConstraintInstr* true_constraint =
|
| - InsertConstraintFor(defn,
|
| - ConstraintRange(op_kind, boundary),
|
| - branch->true_successor());
|
| - // Mark true_constraint an artificial use of boundary. This ensures
|
| - // that constraint's range is recalculated if boundary's range changes.
|
| - if (true_constraint != NULL) {
|
| - true_constraint->AddDependency(boundary);
|
| - true_constraint->set_target(branch->true_successor());
|
| - }
|
| -
|
| - // Constrain definition with a negated condition at the false successor.
|
| - ConstraintInstr* false_constraint =
|
| - InsertConstraintFor(
|
| - defn,
|
| - ConstraintRange(Token::NegateComparison(op_kind), boundary),
|
| - branch->false_successor());
|
| - // Mark false_constraint an artificial use of boundary. This ensures
|
| - // that constraint's range is recalculated if boundary's range changes.
|
| - if (false_constraint != NULL) {
|
| - false_constraint->AddDependency(boundary);
|
| - false_constraint->set_target(branch->false_successor());
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::InsertConstraintsFor(Definition* defn) {
|
| - for (Value* use = defn->input_use_list();
|
| - use != NULL;
|
| - use = use->next_use()) {
|
| - if (use->instruction()->IsBranch()) {
|
| - ConstrainValueAfterBranch(defn, use);
|
| - } else if (use->instruction()->IsCheckArrayBound()) {
|
| - ConstrainValueAfterCheckArrayBound(
|
| - defn,
|
| - use->instruction()->AsCheckArrayBound(),
|
| - use->use_index());
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::ConstrainValueAfterCheckArrayBound(
|
| - Definition* defn, CheckArrayBoundInstr* check, intptr_t use_index) {
|
| - Range* constraint_range = NULL;
|
| - if (use_index == CheckArrayBoundInstr::kIndexPos) {
|
| - Definition* length = check->length()->definition();
|
| - constraint_range = new(I) Range(
|
| - RangeBoundary::FromConstant(0),
|
| - RangeBoundary::FromDefinition(length, -1));
|
| - } else {
|
| - ASSERT(use_index == CheckArrayBoundInstr::kLengthPos);
|
| - Definition* index = check->index()->definition();
|
| - constraint_range = new(I) Range(
|
| - RangeBoundary::FromDefinition(index, 1),
|
| - RangeBoundary::MaxSmi());
|
| - }
|
| - InsertConstraintFor(defn, constraint_range, check);
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::InsertConstraints() {
|
| - for (intptr_t i = 0; i < smi_checks_.length(); i++) {
|
| - CheckSmiInstr* check = smi_checks_[i];
|
| - ConstraintInstr* constraint =
|
| - InsertConstraintFor(check->value()->definition(),
|
| - Range::UnknownSmi(),
|
| - check);
|
| - if (constraint == NULL) {
|
| - // No constraint was needed.
|
| - continue;
|
| - }
|
| - // Mark the constraint's value's reaching type as smi.
|
| - CompileType* smi_compile_type =
|
| - ZoneCompileType::Wrap(CompileType::FromCid(kSmiCid));
|
| - constraint->value()->SetReachingType(smi_compile_type);
|
| - }
|
| -
|
| - for (intptr_t i = 0; i < values_.length(); i++) {
|
| - InsertConstraintsFor(values_[i]);
|
| - }
|
| -
|
| - for (intptr_t i = 0; i < constraints_.length(); i++) {
|
| - InsertConstraintsFor(constraints_[i]);
|
| - }
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::ResetWorklist() {
|
| - if (marked_defns_ == NULL) {
|
| - marked_defns_ = new(I) BitVector(flow_graph_->current_ssa_temp_index());
|
| - } else {
|
| - marked_defns_->Clear();
|
| - }
|
| - worklist_.Clear();
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::MarkDefinition(Definition* defn) {
|
| - // Unwrap constrained value.
|
| - while (defn->IsConstraint()) {
|
| - defn = defn->AsConstraint()->value()->definition();
|
| - }
|
| -
|
| - if (!marked_defns_->Contains(defn->ssa_temp_index())) {
|
| - worklist_.Add(defn);
|
| - marked_defns_->Add(defn->ssa_temp_index());
|
| - }
|
| -}
|
| -
|
| -
|
| -RangeAnalysis::Direction RangeAnalysis::ToDirection(Value* val) {
|
| - if (val->BindsToConstant()) {
|
| - return (Smi::Cast(val->BoundConstant()).Value() >= 0) ? kPositive
|
| - : kNegative;
|
| - } else if (val->definition()->range() != NULL) {
|
| - Range* range = val->definition()->range();
|
| - if (Range::ConstantMin(range).ConstantValue() >= 0) {
|
| - return kPositive;
|
| - } else if (Range::ConstantMax(range).ConstantValue() <= 0) {
|
| - return kNegative;
|
| - }
|
| - }
|
| - return kUnknown;
|
| -}
|
| -
|
| -
|
| -Range* RangeAnalysis::InferInductionVariableRange(JoinEntryInstr* loop_header,
|
| - PhiInstr* var) {
|
| - BitVector* loop_info = loop_header->loop_info();
|
| -
|
| - Definition* initial_value = NULL;
|
| - Direction direction = kUnknown;
|
| -
|
| - ResetWorklist();
|
| - MarkDefinition(var);
|
| - while (!worklist_.is_empty()) {
|
| - Definition* defn = worklist_.RemoveLast();
|
| -
|
| - if (defn->IsPhi()) {
|
| - PhiInstr* phi = defn->AsPhi();
|
| - for (intptr_t i = 0; i < phi->InputCount(); i++) {
|
| - Definition* defn = phi->InputAt(i)->definition();
|
| -
|
| - if (!loop_info->Contains(defn->GetBlock()->preorder_number())) {
|
| - // The value is coming from outside of the loop.
|
| - if (initial_value == NULL) {
|
| - initial_value = defn;
|
| - continue;
|
| - } else if (initial_value == defn) {
|
| - continue;
|
| - } else {
|
| - return NULL;
|
| - }
|
| - }
|
| -
|
| - MarkDefinition(defn);
|
| - }
|
| - } else if (defn->IsBinarySmiOp()) {
|
| - BinarySmiOpInstr* binary_op = defn->AsBinarySmiOp();
|
| -
|
| - switch (binary_op->op_kind()) {
|
| - case Token::kADD: {
|
| - const Direction growth_right =
|
| - ToDirection(binary_op->right());
|
| - if (growth_right != kUnknown) {
|
| - UpdateDirection(&direction, growth_right);
|
| - MarkDefinition(binary_op->left()->definition());
|
| - break;
|
| - }
|
| -
|
| - const Direction growth_left =
|
| - ToDirection(binary_op->left());
|
| - if (growth_left != kUnknown) {
|
| - UpdateDirection(&direction, growth_left);
|
| - MarkDefinition(binary_op->right()->definition());
|
| - break;
|
| - }
|
| -
|
| - return NULL;
|
| - }
|
| -
|
| - case Token::kSUB: {
|
| - const Direction growth_right =
|
| - ToDirection(binary_op->right());
|
| - if (growth_right != kUnknown) {
|
| - UpdateDirection(&direction, Invert(growth_right));
|
| - MarkDefinition(binary_op->left()->definition());
|
| - break;
|
| - }
|
| - return NULL;
|
| - }
|
| -
|
| - default:
|
| - return NULL;
|
| - }
|
| - } else {
|
| - return NULL;
|
| - }
|
| - }
|
| -
|
| -
|
| - // We transitively discovered all dependencies of the given phi
|
| - // and confirmed that it depends on a single value coming from outside of
|
| - // the loop and some linear combinations of itself.
|
| - // Compute the range based on initial value and the direction of the growth.
|
| - switch (direction) {
|
| - case kPositive:
|
| - return new(I) Range(RangeBoundary::FromDefinition(initial_value),
|
| - RangeBoundary::MaxSmi());
|
| -
|
| - case kNegative:
|
| - return new(I) Range(RangeBoundary::MinSmi(),
|
| - RangeBoundary::FromDefinition(initial_value));
|
| -
|
| - case kUnknown:
|
| - case kBoth:
|
| - return Range::UnknownSmi();
|
| - }
|
| -
|
| - UNREACHABLE();
|
| - return NULL;
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::InferRangesRecursive(BlockEntryInstr* block) {
|
| - JoinEntryInstr* join = block->AsJoinEntry();
|
| - if (join != NULL) {
|
| - const bool is_loop_header = (join->loop_info() != NULL);
|
| - for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
| - PhiInstr* phi = it.Current();
|
| - if (definitions_->Contains(phi->ssa_temp_index())) {
|
| - if (is_loop_header) {
|
| - // Try recognizing simple induction variables.
|
| - Range* range = InferInductionVariableRange(join, phi);
|
| - if (range != NULL) {
|
| - phi->range_ = range;
|
| - continue;
|
| - }
|
| - }
|
| -
|
| - phi->InferRange();
|
| - }
|
| - }
|
| - }
|
| -
|
| - for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
| - Instruction* current = it.Current();
|
| -
|
| - Definition* defn = current->AsDefinition();
|
| - if ((defn != NULL) &&
|
| - (defn->ssa_temp_index() != -1) &&
|
| - definitions_->Contains(defn->ssa_temp_index())) {
|
| - defn->InferRange();
|
| - } else if (FLAG_array_bounds_check_elimination &&
|
| - current->IsCheckArrayBound()) {
|
| - CheckArrayBoundInstr* check = current->AsCheckArrayBound();
|
| - RangeBoundary array_length =
|
| - RangeBoundary::FromDefinition(check->length()->definition());
|
| - if (check->IsRedundant(array_length)) {
|
| - it.RemoveCurrentFromGraph();
|
| - }
|
| - }
|
| - }
|
| -
|
| - for (intptr_t i = 0; i < block->dominated_blocks().length(); ++i) {
|
| - InferRangesRecursive(block->dominated_blocks()[i]);
|
| - }
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::InferRanges() {
|
| - if (FLAG_trace_range_analysis) {
|
| - OS::Print("---- before range analysis -------\n");
|
| - FlowGraphPrinter printer(*flow_graph_);
|
| - printer.PrintBlocks();
|
| - }
|
| - // Initialize bitvector for quick filtering of int values.
|
| - definitions_ =
|
| - new(I) BitVector(flow_graph_->current_ssa_temp_index());
|
| - for (intptr_t i = 0; i < values_.length(); i++) {
|
| - definitions_->Add(values_[i]->ssa_temp_index());
|
| - }
|
| - for (intptr_t i = 0; i < constraints_.length(); i++) {
|
| - definitions_->Add(constraints_[i]->ssa_temp_index());
|
| - }
|
| -
|
| - // Infer initial values of ranges.
|
| - const GrowableArray<Definition*>& initial =
|
| - *flow_graph_->graph_entry()->initial_definitions();
|
| - for (intptr_t i = 0; i < initial.length(); ++i) {
|
| - Definition* definition = initial[i];
|
| - if (definitions_->Contains(definition->ssa_temp_index())) {
|
| - definition->InferRange();
|
| - }
|
| - }
|
| - InferRangesRecursive(flow_graph_->graph_entry());
|
| -
|
| - if (FLAG_trace_range_analysis) {
|
| - OS::Print("---- after range analysis -------\n");
|
| - FlowGraphPrinter printer(*flow_graph_);
|
| - printer.PrintBlocks();
|
| - }
|
| -}
|
| -
|
| -
|
| -void RangeAnalysis::RemoveConstraints() {
|
| - for (intptr_t i = 0; i < constraints_.length(); i++) {
|
| - Definition* def = constraints_[i]->value()->definition();
|
| - // Some constraints might be constraining constraints. Unwind the chain of
|
| - // constraints until we reach the actual definition.
|
| - while (def->IsConstraint()) {
|
| - def = def->AsConstraint()->value()->definition();
|
| - }
|
| - constraints_[i]->ReplaceUsesWith(def);
|
| - constraints_[i]->RemoveFromGraph();
|
| - }
|
| -}
|
| -
|
| -
|
| void FlowGraphOptimizer::InferIntRanges() {
|
| RangeAnalysis range_analysis(flow_graph_);
|
| range_analysis.Analyze();
|
|
|