Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/flow_graph_range_analysis.h" | 5 #include "vm/flow_graph_range_analysis.h" |
| 6 | 6 |
| 7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" |
| 8 #include "vm/il_printer.h" | 8 #include "vm/il_printer.h" |
| 9 | 9 |
| 10 namespace dart { | 10 namespace dart { |
| 11 | 11 |
| 12 DEFINE_FLAG(bool, array_bounds_check_elimination, true, | 12 DEFINE_FLAG(bool, array_bounds_check_elimination, true, |
| 13 "Eliminate redundant bounds checks."); | 13 "Eliminate redundant bounds checks."); |
| 14 DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress"); | 14 DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress"); |
| 15 DEFINE_FLAG(bool, trace_integer_ir_selection, false, | 15 DEFINE_FLAG(bool, trace_integer_ir_selection, false, |
| 16 "Print integer IR selection optimization pass."); | 16 "Print integer IR selection optimization pass."); |
| 17 DECLARE_FLAG(bool, trace_constant_propagation); | 17 DECLARE_FLAG(bool, trace_constant_propagation); |
| 18 | 18 |
| 19 // Quick access to the locally defined isolate() method. | 19 // Quick access to the locally defined isolate() method. |
| 20 #define I (isolate()) | 20 #define I (isolate()) |
| 21 | 21 |
| 22 void RangeAnalysis::Analyze() { | 22 void RangeAnalysis::Analyze() { |
| 23 CollectValues(); | 23 CollectValues(); |
| 24 | |
| 25 if (FLAG_trace_range_analysis) { | |
| 26 FlowGraphPrinter::PrintGraph("Range Analysis (BBB)", flow_graph_); | |
| 27 } | |
| 28 | |
| 29 InsertConstraints(); | 24 InsertConstraints(); |
| 25 DiscoverSimpleInductionVariables(); | |
| 30 InferRanges(); | 26 InferRanges(); |
| 31 EliminateRedundantBoundsChecks(); | 27 EliminateRedundantBoundsChecks(); |
| 32 MarkUnreachableBlocks(); | 28 MarkUnreachableBlocks(); |
| 33 | 29 |
| 34 NarrowMintToInt32(); | 30 NarrowMintToInt32(); |
| 35 | 31 |
| 36 IntegerInstructionSelector iis(flow_graph_); | 32 IntegerInstructionSelector iis(flow_graph_); |
| 37 iis.Select(); | 33 iis.Select(); |
| 38 | 34 |
| 39 RemoveConstraints(); | 35 RemoveConstraints(); |
| 40 } | 36 } |
| 41 | 37 |
| 42 | 38 |
| 39 static Definition* UnwrapConstraint(Definition* defn) { | |
| 40 while (defn->IsConstraint()) { | |
| 41 defn = defn->AsConstraint()->value()->definition(); | |
| 42 } | |
| 43 return defn; | |
| 44 } | |
| 45 | |
| 46 | |
| 47 // Simple induction variable is a variable that satisfies the following pattern: | |
| 48 // | |
| 49 // v1 <- phi(v0, v1 + 1) | |
| 50 // | |
| 51 // If there are two simple induction variables in the same block and one of | |
| 52 // them is constrained - then another one is constrained as well, e.g. | |
| 53 // from | |
| 54 // | |
| 55 // B1: | |
| 56 // v3 <- phi(v0, v3 + 1) | |
| 57 // v4 <- phi(v2, v4 + 1) | |
| 58 // Bx: | |
| 59 // v3 is constrained to [v0, v1] | |
| 60 // | |
| 61 // it follows that | |
| 62 // | |
| 63 // Bx: | |
| 64 // v4 is constrained to [v2, v2 + (v0 - v1)] | |
| 65 // | |
| 66 // This pass essentially pattern matches induction variables introduced | |
| 67 // like this: | |
| 68 // | |
| 69 // for (var i = i0, j = j0; i < L; i++, j++) { | |
| 70 // j is known to be within [j0, j0 + (L - i0 - 1)] | |
| 71 // } | |
| 72 // | |
| 73 class InductionVariableInfo : public ZoneAllocated { | |
| 74 public: | |
| 75 InductionVariableInfo(PhiInstr* phi, | |
| 76 Definition* initial_value, | |
| 77 BinarySmiOpInstr* increment, | |
| 78 ConstraintInstr* limit) | |
| 79 : phi_(phi), | |
| 80 initial_value_(initial_value), | |
| 81 increment_(increment), | |
| 82 limit_(limit), | |
| 83 bound_(NULL) { } | |
| 84 | |
| 85 PhiInstr* phi() const { return phi_; } | |
| 86 Definition* initial_value() const { return initial_value_; } | |
| 87 BinarySmiOpInstr* increment() const { return increment_; } | |
| 88 | |
| 89 // Outermost constraint that constrains this induction variable into | |
| 90 // [-inf, X] range. | |
| 91 ConstraintInstr* limit() const { return limit_; } | |
| 92 | |
| 93 // Induction variable from the same join block that has limiting constraint. | |
| 94 PhiInstr* bound() const { return bound_; } | |
| 95 void set_bound(PhiInstr* bound) { bound_ = bound; } | |
| 96 | |
| 97 private: | |
| 98 PhiInstr* phi_; | |
| 99 Definition* initial_value_; | |
| 100 BinarySmiOpInstr* increment_; | |
| 101 ConstraintInstr* limit_; | |
| 102 | |
| 103 PhiInstr* bound_; | |
| 104 }; | |
| 105 | |
| 106 | |
| 107 static ConstraintInstr* FindBoundingConstraint(PhiInstr* phi, | |
| 108 Definition* defn) { | |
| 109 ConstraintInstr* limit = NULL; | |
| 110 for (ConstraintInstr* constraint = defn->AsConstraint(); | |
| 111 constraint != NULL; | |
| 112 constraint = constraint->value()->definition()->AsConstraint()) { | |
| 113 if (constraint->target() == NULL) { | |
| 114 continue; // Only interested in non-artifical constraints. | |
| 115 } | |
| 116 | |
| 117 Range* constraining_range = constraint->constraint(); | |
| 118 if (constraining_range->min().Equals(RangeBoundary::MinSmi()) && | |
| 119 (constraining_range->max().IsSymbol() && | |
| 120 phi->IsDominatedBy(constraining_range->max().symbol()))) { | |
| 121 limit = constraint; | |
| 122 } | |
| 123 } | |
| 124 | |
| 125 return limit; | |
| 126 } | |
| 127 | |
| 128 | |
| 129 static InductionVariableInfo* DetectSimpleInductionVariable(PhiInstr* phi) { | |
| 130 if (phi->Type()->ToCid() != kSmiCid) { | |
| 131 return false; | |
| 132 } | |
| 133 | |
| 134 if (phi->InputCount() != 2) { | |
| 135 return false; | |
| 136 } | |
| 137 | |
| 138 BitVector* loop_info = phi->block()->loop_info(); | |
| 139 | |
| 140 const intptr_t backedge_idx = | |
| 141 loop_info->Contains(phi->block()->PredecessorAt(0)->preorder_number()) | |
| 142 ? 0 : 1; | |
| 143 | |
| 144 Definition* initial_value = | |
| 145 phi->InputAt(1 - backedge_idx)->definition(); | |
| 146 | |
| 147 BinarySmiOpInstr* increment = | |
| 148 UnwrapConstraint(phi->InputAt(backedge_idx)->definition())-> | |
| 149 AsBinarySmiOp(); | |
| 150 | |
| 151 if ((increment != NULL) && | |
| 152 (increment->op_kind() == Token::kADD) && | |
| 153 (UnwrapConstraint(increment->left()->definition()) == phi) && | |
| 154 increment->right()->BindsToConstant() && | |
| 155 increment->right()->BoundConstant().IsSmi() && | |
| 156 (Smi::Cast(increment->right()->BoundConstant()).Value() == 1)) { | |
| 157 return new InductionVariableInfo( | |
| 158 phi, | |
| 159 initial_value, | |
| 160 increment, | |
| 161 FindBoundingConstraint(phi, increment->left()->definition())); | |
| 162 } | |
| 163 | |
| 164 return NULL; | |
| 165 } | |
| 166 | |
| 167 | |
| 168 void RangeAnalysis::DiscoverSimpleInductionVariables() { | |
| 169 GrowableArray<InductionVariableInfo*> loop_variables; | |
| 170 | |
| 171 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | |
| 172 !block_it.Done(); | |
| 173 block_it.Advance()) { | |
| 174 BlockEntryInstr* block = block_it.Current(); | |
| 175 | |
| 176 JoinEntryInstr* join = block->AsJoinEntry(); | |
| 177 if (join != NULL && join->loop_info() != NULL) { | |
| 178 loop_variables.Clear(); | |
| 179 | |
| 180 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { | |
| 181 PhiInstr* current = phi_it.Current(); | |
| 182 | |
| 183 InductionVariableInfo* info = DetectSimpleInductionVariable(current); | |
| 184 if (info != NULL) { | |
| 185 if (FLAG_trace_range_analysis) { | |
| 186 OS::Print("Simple loop variable: %s bound <%s>\n", | |
| 187 current->ToCString(), | |
| 188 info->limit() != NULL ? | |
| 189 info->limit()->ToCString() : "?"); | |
| 190 } | |
| 191 | |
| 192 loop_variables.Add(info); | |
| 193 } | |
| 194 } | |
| 195 } | |
| 196 | |
| 197 InductionVariableInfo* bound = NULL; | |
| 198 for (intptr_t i = 0; i < loop_variables.length(); i++) { | |
| 199 if (loop_variables[i]->limit() != NULL) { | |
| 200 bound = loop_variables[i]; | |
| 201 break; | |
| 202 } | |
| 203 } | |
| 204 | |
| 205 if (bound != NULL) { | |
| 206 for (intptr_t i = 0; i < loop_variables.length(); i++) { | |
| 207 InductionVariableInfo* info = loop_variables[i]; | |
| 208 info->set_bound(bound->phi()); | |
| 209 info->phi()->set_induction_variable_info(info); | |
| 210 } | |
| 211 } | |
| 212 } | |
| 213 } | |
| 214 | |
| 215 | |
| 43 void RangeAnalysis::CollectValues() { | 216 void RangeAnalysis::CollectValues() { |
| 44 const GrowableArray<Definition*>& initial = | 217 const GrowableArray<Definition*>& initial = |
| 45 *flow_graph_->graph_entry()->initial_definitions(); | 218 *flow_graph_->graph_entry()->initial_definitions(); |
| 46 for (intptr_t i = 0; i < initial.length(); ++i) { | 219 for (intptr_t i = 0; i < initial.length(); ++i) { |
| 47 Definition* current = initial[i]; | 220 Definition* current = initial[i]; |
| 48 if (current->Type()->ToCid() == kSmiCid) { | 221 if (IsIntegerDefinition(current)) { |
| 49 values_.Add(current); | |
| 50 } else if (current->IsMintDefinition()) { | |
| 51 values_.Add(current); | |
| 52 } else if (current->IsInt32Definition()) { | |
| 53 values_.Add(current); | 222 values_.Add(current); |
| 54 } | 223 } |
| 55 } | 224 } |
| 56 | 225 |
| 57 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | 226 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
| 58 !block_it.Done(); | 227 !block_it.Done(); |
| 59 block_it.Advance()) { | 228 block_it.Advance()) { |
| 60 BlockEntryInstr* block = block_it.Current(); | 229 BlockEntryInstr* block = block_it.Current(); |
| 61 | 230 |
| 62 | 231 |
| 63 if (block->IsGraphEntry() || block->IsCatchBlockEntry()) { | 232 if (block->IsGraphEntry() || block->IsCatchBlockEntry()) { |
| 64 const GrowableArray<Definition*>& initial = block->IsGraphEntry() | 233 const GrowableArray<Definition*>& initial = block->IsGraphEntry() |
| 65 ? *block->AsGraphEntry()->initial_definitions() | 234 ? *block->AsGraphEntry()->initial_definitions() |
| 66 : *block->AsCatchBlockEntry()->initial_definitions(); | 235 : *block->AsCatchBlockEntry()->initial_definitions(); |
| 67 for (intptr_t i = 0; i < initial.length(); ++i) { | 236 for (intptr_t i = 0; i < initial.length(); ++i) { |
| 68 Definition* current = initial[i]; | 237 Definition* current = initial[i]; |
| 69 if (current->Type()->ToCid() == kSmiCid) { | 238 if (IsIntegerDefinition(current)) { |
| 70 values_.Add(current); | |
| 71 } else if (current->IsMintDefinition()) { | |
| 72 values_.Add(current); | |
| 73 } else if (current->IsInt32Definition()) { | |
| 74 values_.Add(current); | 239 values_.Add(current); |
| 75 } | 240 } |
| 76 } | 241 } |
| 77 } | 242 } |
| 78 | 243 |
| 79 JoinEntryInstr* join = block->AsJoinEntry(); | 244 JoinEntryInstr* join = block->AsJoinEntry(); |
| 80 if (join != NULL) { | 245 if (join != NULL) { |
| 81 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { | 246 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { |
| 82 PhiInstr* current = phi_it.Current(); | 247 PhiInstr* current = phi_it.Current(); |
| 83 if (current->Type()->ToCid() == kSmiCid) { | 248 if ((current->Type()->ToCid() == kSmiCid) || |
| 84 values_.Add(current); | 249 (current->representation() == kUnboxedInt32)) { |
| 85 } else if (current->representation() == kUnboxedInt32) { | |
| 86 values_.Add(current); | 250 values_.Add(current); |
| 87 } | 251 } |
| 88 } | 252 } |
| 89 } | 253 } |
| 90 | 254 |
| 91 for (ForwardInstructionIterator instr_it(block); | 255 for (ForwardInstructionIterator instr_it(block); |
| 92 !instr_it.Done(); | 256 !instr_it.Done(); |
| 93 instr_it.Advance()) { | 257 instr_it.Advance()) { |
| 94 Instruction* current = instr_it.Current(); | 258 Instruction* current = instr_it.Current(); |
| 95 Definition* defn = current->AsDefinition(); | 259 Definition* defn = current->AsDefinition(); |
| 96 if (defn != NULL) { | 260 if (defn != NULL) { |
| 97 if ((defn->Type()->ToCid() == kSmiCid) && | 261 if (defn->HasSSATemp() && IsIntegerDefinition(defn)) { |
| 98 (defn->ssa_temp_index() != -1)) { | |
| 99 values_.Add(defn); | |
| 100 } else if ((defn->IsMintDefinition()) && | |
| 101 (defn->ssa_temp_index() != -1)) { | |
| 102 values_.Add(defn); | 262 values_.Add(defn); |
| 103 if (defn->IsBinaryMintOp()) { | 263 if (defn->IsBinaryMintOp()) { |
| 104 binary_mint_ops_.Add(defn->AsBinaryMintOp()); | 264 binary_mint_ops_.Add(defn->AsBinaryMintOp()); |
| 105 } else if (defn->IsShiftMintOp()) { | 265 } else if (defn->IsShiftMintOp()) { |
| 106 shift_mint_ops_.Add(defn->AsShiftMintOp()); | 266 shift_mint_ops_.Add(defn->AsShiftMintOp()); |
| 107 } | 267 } |
| 108 } else if (defn->IsInt32Definition()) { | |
| 109 values_.Add(defn); | |
| 110 } | 268 } |
| 111 } else if (current->IsCheckArrayBound()) { | 269 } else if (current->IsCheckArrayBound()) { |
| 112 bounds_checks_.Add(current->AsCheckArrayBound()); | 270 bounds_checks_.Add(current->AsCheckArrayBound()); |
| 113 } | 271 } |
| 114 } | 272 } |
| 115 } | 273 } |
| 116 } | 274 } |
| 117 | 275 |
| 118 | 276 |
| 119 // Returns true if use is dominated by the given instruction. | 277 // Returns true if use is dominated by the given instruction. |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 231 constraint = new(I) ConstraintInstr( | 389 constraint = new(I) ConstraintInstr( |
| 232 use->CopyWithType(), constraint_range); | 390 use->CopyWithType(), constraint_range); |
| 233 | 391 |
| 234 flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue); | 392 flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue); |
| 235 RenameDominatedUses(defn, constraint, constraint); | 393 RenameDominatedUses(defn, constraint, constraint); |
| 236 constraints_.Add(constraint); | 394 constraints_.Add(constraint); |
| 237 return constraint; | 395 return constraint; |
| 238 } | 396 } |
| 239 | 397 |
| 240 | 398 |
| 241 void RangeAnalysis::ConstrainValueAfterBranch(Value* use, Definition* defn) { | 399 bool RangeAnalysis::ConstrainValueAfterBranch(Value* use, Definition* defn) { |
| 242 BranchInstr* branch = use->instruction()->AsBranch(); | 400 BranchInstr* branch = use->instruction()->AsBranch(); |
| 243 RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp(); | 401 RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp(); |
| 244 if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) { | 402 if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) { |
| 245 // Found comparison of two smis. Constrain defn at true and false | 403 // Found comparison of two smis. Constrain defn at true and false |
| 246 // successors using the other operand as a boundary. | 404 // successors using the other operand as a boundary. |
| 247 Definition* boundary; | 405 Definition* boundary; |
| 248 Token::Kind op_kind; | 406 Token::Kind op_kind; |
| 249 if (use->use_index() == 0) { // Left operand. | 407 if (use->use_index() == 0) { // Left operand. |
| 250 boundary = rel_op->InputAt(1)->definition(); | 408 boundary = rel_op->InputAt(1)->definition(); |
| 251 op_kind = rel_op->kind(); | 409 op_kind = rel_op->kind(); |
| 252 } else { | 410 } else { |
| 253 ASSERT(use->use_index() == 1); // Right operand. | 411 ASSERT(use->use_index() == 1); // Right operand. |
| 254 boundary = rel_op->InputAt(0)->definition(); | 412 boundary = rel_op->InputAt(0)->definition(); |
| 255 // InsertConstraintFor assumes that defn is left operand of a | 413 // InsertConstraintFor assumes that defn is left operand of a |
| 256 // comparison if it is right operand flip the comparison. | 414 // comparison if it is right operand flip the comparison. |
| 257 op_kind = FlipComparison(rel_op->kind()); | 415 op_kind = FlipComparison(rel_op->kind()); |
| 258 } | 416 } |
| 259 | 417 |
| 260 // Constrain definition at the true successor. | 418 // Constrain definition at the true successor. |
| 261 ConstraintInstr* true_constraint = | 419 ConstraintInstr* true_constraint = |
| 262 InsertConstraintFor(use, | 420 InsertConstraintFor(use, |
| 263 defn, | 421 defn, |
| 264 ConstraintSmiRange(op_kind, boundary), | 422 ConstraintSmiRange(op_kind, boundary), |
| 265 branch->true_successor()); | 423 branch->true_successor()); |
| 266 // Mark true_constraint an artificial use of boundary. This ensures | |
| 267 // that constraint's range is recalculated if boundary's range changes. | |
| 268 if (true_constraint != NULL) { | 424 if (true_constraint != NULL) { |
| 269 true_constraint->AddDependency(boundary); | |
| 270 true_constraint->set_target(branch->true_successor()); | 425 true_constraint->set_target(branch->true_successor()); |
| 271 } | 426 } |
| 272 | 427 |
| 273 // Constrain definition with a negated condition at the false successor. | 428 // Constrain definition with a negated condition at the false successor. |
| 274 ConstraintInstr* false_constraint = | 429 ConstraintInstr* false_constraint = |
| 275 InsertConstraintFor( | 430 InsertConstraintFor( |
| 276 use, | 431 use, |
| 277 defn, | 432 defn, |
| 278 ConstraintSmiRange(Token::NegateComparison(op_kind), boundary), | 433 ConstraintSmiRange(Token::NegateComparison(op_kind), boundary), |
| 279 branch->false_successor()); | 434 branch->false_successor()); |
| 280 // Mark false_constraint an artificial use of boundary. This ensures | |
| 281 // that constraint's range is recalculated if boundary's range changes. | |
| 282 if (false_constraint != NULL) { | 435 if (false_constraint != NULL) { |
| 283 false_constraint->AddDependency(boundary); | |
| 284 false_constraint->set_target(branch->false_successor()); | 436 false_constraint->set_target(branch->false_successor()); |
| 285 } | 437 } |
| 438 | |
| 439 return true; | |
| 286 } | 440 } |
| 441 | |
| 442 return false; | |
| 287 } | 443 } |
| 288 | 444 |
| 289 | 445 |
| 290 void RangeAnalysis::InsertConstraintsFor(Definition* defn) { | 446 void RangeAnalysis::InsertConstraintsFor(Definition* defn) { |
| 291 for (Value* use = defn->input_use_list(); | 447 for (Value* use = defn->input_use_list(); |
| 292 use != NULL; | 448 use != NULL; |
| 293 use = use->next_use()) { | 449 use = use->next_use()) { |
| 294 if (use->instruction()->IsBranch()) { | 450 if (use->instruction()->IsBranch()) { |
| 295 ConstrainValueAfterBranch(use, defn); | 451 if (ConstrainValueAfterBranch(use, defn)) { |
| 452 Value* other_value = use->instruction()->InputAt(1 - use->use_index()); | |
| 453 if (!IsIntegerDefinition(other_value->definition())) { | |
| 454 ConstrainValueAfterBranch(other_value, other_value->definition()); | |
| 455 } | |
| 456 } | |
| 296 } else if (use->instruction()->IsCheckArrayBound()) { | 457 } else if (use->instruction()->IsCheckArrayBound()) { |
| 297 ConstrainValueAfterCheckArrayBound(use, defn); | 458 ConstrainValueAfterCheckArrayBound(use, defn); |
| 298 } | 459 } |
| 299 } | 460 } |
| 300 } | 461 } |
| 301 | 462 |
| 302 | 463 |
| 303 void RangeAnalysis::ConstrainValueAfterCheckArrayBound( | 464 void RangeAnalysis::ConstrainValueAfterCheckArrayBound( |
| 304 Value* use, | 465 Value* use, |
| 305 Definition* defn) { | 466 Definition* defn) { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 349 // and will have a range assigned to it. | 510 // and will have a range assigned to it. |
| 350 // Note: that we can't return NULL here because it is used as lattice's | 511 // Note: that we can't return NULL here because it is used as lattice's |
| 351 // bottom element to indicate that the range was not computed *yet*. | 512 // bottom element to indicate that the range was not computed *yet*. |
| 352 return &smi_range_; | 513 return &smi_range_; |
| 353 } | 514 } |
| 354 | 515 |
| 355 return range; | 516 return range; |
| 356 } | 517 } |
| 357 | 518 |
| 358 | 519 |
| 359 static Definition* UnwrapConstraint(Definition* defn) { | |
| 360 while (defn->IsConstraint()) { | |
| 361 defn = defn->AsConstraint()->value()->definition(); | |
| 362 } | |
| 363 return defn; | |
| 364 } | |
| 365 | |
| 366 | |
| 367 static bool AreEqualDefinitions(Definition* a, Definition* b) { | 520 static bool AreEqualDefinitions(Definition* a, Definition* b) { |
| 368 a = UnwrapConstraint(a); | 521 a = UnwrapConstraint(a); |
| 369 b = UnwrapConstraint(b); | 522 b = UnwrapConstraint(b); |
| 370 return (a == b) || | 523 return (a == b) || |
| 371 (a->AllowsCSE() && | 524 (a->AllowsCSE() && |
| 372 a->Dependencies().IsNone() && | 525 a->Dependencies().IsNone() && |
| 373 b->AllowsCSE() && | 526 b->AllowsCSE() && |
| 374 b->Dependencies().IsNone() && | 527 b->Dependencies().IsNone() && |
| 375 a->Equals(b)); | 528 a->Equals(b)); |
| 376 } | 529 } |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 519 } | 672 } |
| 520 defn->set_range(range); | 673 defn->set_range(range); |
| 521 return true; | 674 return true; |
| 522 } | 675 } |
| 523 } | 676 } |
| 524 | 677 |
| 525 return false; | 678 return false; |
| 526 } | 679 } |
| 527 | 680 |
| 528 | 681 |
| 529 void RangeAnalysis::CollectDefinitions(BlockEntryInstr* block, BitVector* set) { | 682 void RangeAnalysis::CollectDefinitions(BitVector* set) { |
| 530 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); | 683 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
| 531 !block_it.Done(); | 684 !block_it.Done(); |
| 532 block_it.Advance()) { | 685 block_it.Advance()) { |
| 533 BlockEntryInstr* block = block_it.Current(); | 686 BlockEntryInstr* block = block_it.Current(); |
| 534 | 687 |
| 535 JoinEntryInstr* join = block->AsJoinEntry(); | 688 JoinEntryInstr* join = block->AsJoinEntry(); |
| 536 if (join != NULL) { | 689 if (join != NULL) { |
| 537 for (PhiIterator it(join); !it.Done(); it.Advance()) { | 690 for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| 538 PhiInstr* phi = it.Current(); | 691 PhiInstr* phi = it.Current(); |
| 539 if (set->Contains(phi->ssa_temp_index())) { | 692 if (set->Contains(phi->ssa_temp_index())) { |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 590 // postorder. This improves convergence speed compared to iterating | 743 // postorder. This improves convergence speed compared to iterating |
| 591 // values_ and constraints_ array separately. | 744 // values_ and constraints_ array separately. |
| 592 const GrowableArray<Definition*>& initial = | 745 const GrowableArray<Definition*>& initial = |
| 593 *flow_graph_->graph_entry()->initial_definitions(); | 746 *flow_graph_->graph_entry()->initial_definitions(); |
| 594 for (intptr_t i = 0; i < initial.length(); ++i) { | 747 for (intptr_t i = 0; i < initial.length(); ++i) { |
| 595 Definition* definition = initial[i]; | 748 Definition* definition = initial[i]; |
| 596 if (set->Contains(definition->ssa_temp_index())) { | 749 if (set->Contains(definition->ssa_temp_index())) { |
| 597 definitions_.Add(definition); | 750 definitions_.Add(definition); |
| 598 } | 751 } |
| 599 } | 752 } |
| 600 CollectDefinitions(flow_graph_->graph_entry(), set); | 753 CollectDefinitions(set); |
| 601 | 754 |
| 602 // Perform an iteration of range inference just propagating ranges | 755 // Perform an iteration of range inference just propagating ranges |
| 603 // through the graph as-is without applying widening or narrowing. | 756 // through the graph as-is without applying widening or narrowing. |
| 604 // This helps to improve precision of initial bounds. | 757 // This helps to improve precision of initial bounds. |
| 605 Iterate(NONE, 1); | 758 Iterate(NONE, 1); |
| 606 | 759 |
| 607 // Perform fix-point iteration of range inference applying widening | 760 // Perform fix-point iteration of range inference applying widening |
| 608 // operator to phis to ensure fast convergence. | 761 // operator to phis to ensure fast convergence. |
| 609 // Widening simply maps growing bounds to the respective range bound. | 762 // Widening simply maps growing bounds to the respective range bound. |
| 610 Iterate(WIDEN, kMaxInt32); | 763 Iterate(WIDEN, kMaxInt32); |
| 611 | 764 |
| 612 if (FLAG_trace_range_analysis) { | 765 if (FLAG_trace_range_analysis) { |
| 613 FlowGraphPrinter::PrintGraph("Range Analysis (WIDEN)", flow_graph_); | 766 FlowGraphPrinter::PrintGraph("Range Analysis (WIDEN)", flow_graph_); |
| 614 } | 767 } |
| 615 | 768 |
| 616 // Perform fix-point iteration of range inference applying narrowing | 769 // Perform fix-point iteration of range inference applying narrowing |
| 617 // to phis to compute more accurate range. | 770 // to phis to compute more accurate range. |
| 618 // Narrowing only improves those boundaries that were widened up to | 771 // Narrowing only improves those boundaries that were widened up to |
| 619 // range boundary and leaves other boundaries intact. | 772 // range boundary and leaves other boundaries intact. |
| 620 Iterate(NARROW, kMaxInt32); | 773 Iterate(NARROW, kMaxInt32); |
| 621 | 774 |
| 622 if (FLAG_trace_range_analysis) { | 775 if (FLAG_trace_range_analysis) { |
| 623 FlowGraphPrinter::PrintGraph("Range Analysis (AFTER)", flow_graph_); | 776 FlowGraphPrinter::PrintGraph("Range Analysis (AFTER)", flow_graph_); |
| 624 } | 777 } |
| 625 } | 778 } |
| 626 | 779 |
| 627 | 780 |
| 781 void RangeAnalysis::AssignRangesRecursively(Definition* defn) { | |
| 782 if (!Range::IsUnknown(defn->range())) { | |
| 783 return; | |
| 784 } | |
| 785 | |
| 786 if (!IsIntegerDefinition(defn)) { | |
| 787 return; | |
| 788 } | |
| 789 | |
| 790 for (intptr_t i = 0; i < defn->InputCount(); i++) { | |
| 791 Definition* input_defn = defn->InputAt(i)->definition(); | |
| 792 if (!input_defn->HasSSATemp() || input_defn->IsConstant()) { | |
| 793 AssignRangesRecursively(input_defn); | |
| 794 } | |
| 795 } | |
| 796 | |
| 797 Range new_range; | |
| 798 defn->InferRange(this, &new_range); | |
| 799 if (!Range::IsUnknown(&new_range)) { | |
| 800 defn->set_range(new_range); | |
| 801 } | |
| 802 } | |
| 803 | |
| 804 | |
| 805 // Scheduler is a helper class that inserts floating control-flow less | |
| 806 // subgraphs into the flow graph. | |
| 807 // It always attempts to schedule instructions into the loop preheader in the | |
| 808 // way similar to LICM optimization pass. | |
| 809 // Scheduler supports rollback - that is it keeps track of instructions it | |
| 810 // schedules and can remove all instructions it inserted from the graph. | |
| 811 class Scheduler { | |
| 812 public: | |
| 813 explicit Scheduler(FlowGraph* flow_graph) | |
| 814 : flow_graph_(flow_graph), | |
| 815 loop_headers_(flow_graph->LoopHeaders()), | |
| 816 pre_headers_(loop_headers_.length()) { | |
| 817 for (intptr_t i = 0; i < loop_headers_.length(); i++) { | |
| 818 pre_headers_.Add(loop_headers_[i]->ImmediateDominator()); | |
| 819 } | |
| 820 } | |
| 821 | |
| 822 // Clear the list of emitted instructions. | |
| 823 void Start() { | |
| 824 emitted_.Clear(); | |
| 825 } | |
| 826 | |
| 827 // Given the floating instruction attempt to schedule it into one of the | |
| 828 // loop preheaders that dominates given post_dominator instruction. | |
| 829 // Some of the instruction inputs can potentially be unscheduled as well. | |
| 830 // Returns NULL is the scheduling fails (e.g. inputs are not invariant for | |
| 831 // any loop containing post_dominator). | |
| 832 // Resulting schedule should be equivalent to one obtained by inserting | |
| 833 // instructions right before post_dominator and running CSE and LICM passes. | |
| 834 template<typename T> | |
| 835 T* Emit(T* instruction, Instruction* post_dominator) { | |
|
Florian Schneider
2014/10/07 11:40:48
Maybe rename post_dominator to sink?
Vyacheslav Egorov (Google)
2014/10/07 18:57:27
While I really like the name 'sink' and use it int
| |
| 836 return static_cast<T*>(EmitRecursively(instruction, post_dominator)); | |
| 837 } | |
| 838 | |
| 839 // Undo all insertions recorded in the list of emitted instructions. | |
| 840 void Rollback() { | |
| 841 for (intptr_t i = emitted_.length() - 1; i >= 0; i--) { | |
| 842 emitted_[i]->RemoveFromGraph(); | |
| 843 } | |
| 844 emitted_.Clear(); | |
| 845 } | |
| 846 | |
| 847 private: | |
| 848 typedef DirectChainedHashMap<PointerKeyValueTrait<Instruction> > Map; | |
| 849 | |
| 850 Instruction* EmitRecursively(Instruction* instruction, Instruction* sink) { | |
| 851 // Schedule all unscheduled inputs and unwrap all constrained inputs. | |
| 852 for (intptr_t i = 0; i < instruction->InputCount(); i++) { | |
| 853 Definition* defn = instruction->InputAt(i)->definition(); | |
| 854 if (defn->ssa_temp_index() == -1) { | |
|
Florian Schneider
2014/10/07 11:40:48
!HasSSATemp()
Vyacheslav Egorov (Google)
2014/10/07 18:57:27
Done.
| |
| 855 Definition* scheduled = Emit(defn, sink); | |
| 856 if (scheduled == NULL) { | |
| 857 return NULL; | |
| 858 } | |
| 859 instruction->InputAt(i)->set_definition(scheduled); | |
|
Florian Schneider
2014/10/07 11:40:48
Do we care about stale entries in the use-list of
Vyacheslav Egorov (Google)
2014/10/07 18:57:27
Instruction itself is not in the graph - which mea
| |
| 860 } else if (defn->IsConstraint()) { | |
| 861 instruction->InputAt(i)->set_definition(UnwrapConstraint(defn)); | |
|
Florian Schneider
2014/10/07 11:40:48
Same issue as above.
Vyacheslav Egorov (Google)
2014/10/07 18:57:27
Same answer as above :)
| |
| 862 } | |
| 863 } | |
| 864 | |
| 865 // Attempt to find equivalent instruction that was already scheduled. | |
| 866 // If the instruction is still in the graph (it could have been | |
| 867 // un-scheduled by a rollback action) and it dominates the sink - use it. | |
| 868 Instruction* emitted = map_.Lookup(instruction); | |
| 869 if (emitted != NULL && | |
| 870 !emitted->WasEliminated() && | |
| 871 sink->IsDominatedBy(emitted)) { | |
| 872 return emitted; | |
| 873 } | |
| 874 | |
| 875 // Attempt to find suitable pre-header. Iterate loop headers backwards to | |
| 876 // attempt scheduling into the outermost loop first. | |
| 877 for (intptr_t i = loop_headers_.length() - 1; i >= 0; i--) { | |
| 878 BlockEntryInstr* header = loop_headers_[i]; | |
| 879 BlockEntryInstr* pre_header = pre_headers_[i]; | |
| 880 | |
| 881 if (pre_header == NULL) { | |
| 882 continue; | |
| 883 } | |
| 884 | |
| 885 if (!sink->IsDominatedBy(header)) { | |
| 886 continue; | |
| 887 } | |
| 888 | |
| 889 Instruction* last = pre_header->last_instruction(); | |
| 890 | |
| 891 bool inputs_are_invariant = true; | |
| 892 for (intptr_t j = 0; j < instruction->InputCount(); j++) { | |
| 893 Definition* defn = instruction->InputAt(j)->definition(); | |
| 894 if (!last->IsDominatedBy(defn)) { | |
| 895 inputs_are_invariant = false; | |
| 896 break; | |
| 897 } | |
| 898 } | |
| 899 | |
| 900 if (inputs_are_invariant) { | |
| 901 EmitTo(pre_header, instruction); | |
| 902 return instruction; | |
| 903 } | |
| 904 } | |
| 905 | |
| 906 return NULL; | |
| 907 } | |
| 908 | |
| 909 void EmitTo(BlockEntryInstr* block, Instruction* instr) { | |
| 910 GotoInstr* last = block->last_instruction()->AsGoto(); | |
| 911 flow_graph_->InsertBefore(last, | |
| 912 instr, | |
| 913 last->env(), | |
| 914 instr->IsDefinition() ? FlowGraph::kValue | |
| 915 : FlowGraph::kEffect); | |
| 916 instr->deopt_id_ = last->GetDeoptId(); | |
| 917 instr->env()->set_deopt_id(instr->deopt_id_); | |
| 918 | |
| 919 map_.Insert(instr); | |
| 920 emitted_.Add(instr); | |
| 921 } | |
| 922 | |
| 923 FlowGraph* flow_graph_; | |
| 924 Map map_; | |
| 925 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers_; | |
| 926 GrowableArray<BlockEntryInstr*> pre_headers_; | |
| 927 GrowableArray<Instruction*> emitted_; | |
| 928 }; | |
| 929 | |
| 930 | |
| 931 // If bounds check 0 <= index < length is not redundant we attempt to | |
| 932 // replace it with a sequence of checks that guarantee | |
| 933 // | |
| 934 // 0 <= LowerBound(index) < UpperBound(index) < length | |
| 935 // | |
| 936 // and hoist all of those checks out of the enclosing loop. | |
| 937 // | |
| 938 // Upper/Lower bounds are symbolic arithmetic expressions with +, -, * | |
| 939 // operations. | |
| 940 class BoundsCheckGeneralizer { | |
| 941 public: | |
| 942 BoundsCheckGeneralizer(RangeAnalysis* range_analysis, | |
| 943 FlowGraph* flow_graph) | |
| 944 : range_analysis_(range_analysis), | |
| 945 flow_graph_(flow_graph), | |
| 946 scheduler_(flow_graph) { } | |
| 947 | |
| 948 void TryGeneralize(CheckArrayBoundInstr* check, | |
| 949 const RangeBoundary& array_length) { | |
| 950 Definition* upper_bound = | |
| 951 ConstructUpperBound(check->index()->definition(), check); | |
| 952 if (upper_bound == UnwrapConstraint(check->index()->definition())) { | |
| 953 // Unable to construct upper bound for the index. | |
| 954 if (FLAG_trace_range_analysis) { | |
| 955 OS::Print("Failed to construct upper bound for %s index\n", | |
| 956 check->ToCString()); | |
| 957 } | |
| 958 return; | |
| 959 } | |
| 960 | |
| 961 // Re-associate subexpressions inside upper_bound to collect all constants | |
| 962 // together. This will expose more redundancies when we are going to emit | |
| 963 // upper bound through scheduler. | |
| 964 if (!Simplify(&upper_bound, NULL)) { | |
| 965 if (FLAG_trace_range_analysis) { | |
| 966 OS::Print("Failed to simplify upper bound for %s index\n", | |
| 967 check->ToCString()); | |
| 968 } | |
| 969 return; | |
| 970 } | |
| 971 upper_bound = ApplyConstraints(upper_bound, check); | |
| 972 range_analysis_->AssignRangesRecursively(upper_bound); | |
| 973 | |
| 974 // We are going to constrain any symbols participating in + and * operations | |
| 975 // to guarantee that they are positive. Find all symbols that need | |
| 976 // constraining. If there is a subtraction subexpression with non-positive | |
| 977 // range give up on generalization for simplicity. | |
| 978 GrowableArray<Definition*> non_positive_symbols; | |
| 979 if (!FindNonPositiveSymbols(&non_positive_symbols, upper_bound)) { | |
| 980 if (FLAG_trace_range_analysis) { | |
| 981 OS::Print("Failed to generalize %s index to %s" | |
| 982 " (can't ensure positivity)\n", | |
| 983 check->ToCString(), | |
| 984 IndexBoundToCString(upper_bound)); | |
| 985 } | |
| 986 return; | |
| 987 } | |
| 988 | |
| 989 // Check that we can statically prove that lower bound of the index is | |
| 990 // non-negative under the assumption that all potentially non-positive | |
| 991 // symbols are positive. | |
| 992 GrowableArray<ConstraintInstr*> positive_constraints( | |
| 993 non_positive_symbols.length()); | |
| 994 Range* positive_range = new Range( | |
| 995 RangeBoundary::FromConstant(0), | |
| 996 RangeBoundary::MaxConstant(RangeBoundary::kRangeBoundarySmi)); | |
| 997 for (intptr_t i = 0; i < non_positive_symbols.length(); i++) { | |
| 998 Definition* symbol = non_positive_symbols[i]; | |
| 999 positive_constraints.Add(new ConstraintInstr( | |
| 1000 new Value(symbol), | |
| 1001 positive_range)); | |
| 1002 } | |
| 1003 | |
| 1004 Definition* lower_bound = | |
| 1005 ConstructLowerBound(check->index()->definition(), check); | |
| 1006 // No need to simplify lower bound before applying constraints as | |
| 1007 // we are not going to emit it. | |
| 1008 lower_bound = ApplyConstraints(lower_bound, check, &positive_constraints); | |
| 1009 range_analysis_->AssignRangesRecursively(lower_bound); | |
| 1010 | |
| 1011 if (!RangeUtils::IsPositive(lower_bound->range())) { | |
| 1012 // Can't prove that lower bound is positive even with additional checks | |
| 1013 // against potentially non-positive symbols. Give up. | |
| 1014 if (FLAG_trace_range_analysis) { | |
| 1015 OS::Print("Failed to generalize %s index to %s" | |
| 1016 " (lower bound is not positive)\n", | |
| 1017 check->ToCString(), | |
| 1018 IndexBoundToCString(upper_bound)); | |
| 1019 } | |
| 1020 return; | |
| 1021 } | |
| 1022 | |
| 1023 if (FLAG_trace_range_analysis) { | |
| 1024 OS::Print("For %s computed index bounds [%s, %s]\n", | |
| 1025 check->ToCString(), | |
| 1026 IndexBoundToCString(lower_bound), | |
| 1027 IndexBoundToCString(upper_bound)); | |
| 1028 } | |
| 1029 | |
| 1030 // At this point we know that 0 <= index < UpperBound(index) under | |
| 1031 // certain preconditions. Start by emitting this preconditions. | |
| 1032 scheduler_.Start(); | |
| 1033 | |
| 1034 ConstantInstr* max_smi = | |
| 1035 flow_graph_->GetConstant(Smi::Handle(Smi::New(Smi::kMaxValue))); | |
| 1036 for (intptr_t i = 0; i < non_positive_symbols.length(); i++) { | |
| 1037 CheckArrayBoundInstr* precondition = new CheckArrayBoundInstr( | |
| 1038 new Value(max_smi), | |
| 1039 new Value(non_positive_symbols[i]), | |
| 1040 Isolate::kNoDeoptId); | |
| 1041 precondition->mark_generalized(); | |
| 1042 precondition = scheduler_.Emit(precondition, check); | |
| 1043 if (precondition == NULL) { | |
| 1044 if (FLAG_trace_range_analysis) { | |
| 1045 OS::Print(" => failed to insert positivity constraint\n"); | |
| 1046 } | |
| 1047 scheduler_.Rollback(); | |
| 1048 return; | |
| 1049 } | |
| 1050 } | |
| 1051 | |
| 1052 CheckArrayBoundInstr* new_check = new CheckArrayBoundInstr( | |
| 1053 new Value(UnwrapConstraint(check->length()->definition())), | |
| 1054 new Value(upper_bound), | |
| 1055 Isolate::kNoDeoptId); | |
| 1056 new_check->mark_generalized(); | |
| 1057 if (new_check->IsRedundant(array_length)) { | |
| 1058 if (FLAG_trace_range_analysis) { | |
| 1059 OS::Print(" => generalized check is redundant\n"); | |
| 1060 } | |
| 1061 RemoveGeneralizedCheck(check); | |
| 1062 return; | |
| 1063 } | |
| 1064 | |
| 1065 new_check = scheduler_.Emit(new_check, check); | |
| 1066 if (new_check != NULL) { | |
| 1067 if (FLAG_trace_range_analysis) { | |
| 1068 OS::Print(" => generalized check was hoisted into B%" Pd "\n", | |
| 1069 new_check->GetBlock()->block_id()); | |
| 1070 } | |
| 1071 RemoveGeneralizedCheck(check); | |
| 1072 } else { | |
| 1073 if (FLAG_trace_range_analysis) { | |
| 1074 OS::Print(" => generalized check can't be hoisted\n"); | |
| 1075 } | |
| 1076 scheduler_.Rollback(); | |
| 1077 } | |
| 1078 } | |
| 1079 | |
| 1080 static void RemoveGeneralizedCheck(CheckArrayBoundInstr* check) { | |
| 1081 BinarySmiOpInstr* binary_op = | |
| 1082 check->index()->definition()->AsBinarySmiOp(); | |
| 1083 if (binary_op != NULL) { | |
| 1084 binary_op->set_can_overflow(false); | |
| 1085 } | |
| 1086 check->RemoveFromGraph(); | |
| 1087 } | |
| 1088 | |
| 1089 private: | |
| 1090 BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind, | |
| 1091 Definition* left, | |
| 1092 Definition* right) { | |
| 1093 return new BinarySmiOpInstr(op_kind, | |
| 1094 new Value(left), | |
| 1095 new Value(right), | |
| 1096 Isolate::kNoDeoptId); | |
| 1097 } | |
| 1098 | |
| 1099 | |
| 1100 BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind, | |
| 1101 Definition* left, | |
| 1102 intptr_t right) { | |
| 1103 ConstantInstr* constant_right = | |
| 1104 flow_graph_->GetConstant(Smi::Handle(Smi::New(right))); | |
| 1105 return MakeBinaryOp(op_kind, left, constant_right); | |
| 1106 } | |
| 1107 | |
| 1108 Definition* RangeBoundaryToDefinition(const RangeBoundary& bound) { | |
| 1109 Definition* symbol = UnwrapConstraint(bound.symbol()); | |
| 1110 if (bound.offset() == 0) { | |
| 1111 return symbol; | |
| 1112 } else { | |
| 1113 return MakeBinaryOp(Token::kADD, symbol, bound.offset()); | |
| 1114 } | |
| 1115 } | |
| 1116 | |
| 1117 typedef Definition* (BoundsCheckGeneralizer::*PhiBoundFunc)( | |
| 1118 PhiInstr*, Instruction*); | |
| 1119 | |
| 1120 // Construct symbolic lower bound for a value at the given point. | |
| 1121 Definition* ConstructLowerBound(Definition* value, Instruction* point) { | |
| 1122 return ConstructBound(&BoundsCheckGeneralizer::InductionVariableLowerBound, | |
| 1123 value, | |
| 1124 point); | |
| 1125 } | |
| 1126 | |
| 1127 // Construct symbolic upper bound for a value at the given point. | |
| 1128 Definition* ConstructUpperBound(Definition* value, Instruction* point) { | |
| 1129 return ConstructBound(&BoundsCheckGeneralizer::InductionVariableUpperBound, | |
| 1130 value, | |
| 1131 point); | |
| 1132 } | |
| 1133 | |
| 1134 // Construct symbolic bound for a value at the given point: | |
| 1135 // | |
| 1136 // 1. if value is an induction variable use its bounds; | |
| 1137 // 2. if value is addition or multiplication construct bounds for left | |
| 1138 // and right hand sides separately and use addition/multiplication | |
| 1139 // of bounds as a bound (addition and multiplication are monotone | |
| 1140 // operations for both operands); | |
| 1141 // 3. if value is a substraction then construct bound for the left hand | |
| 1142 // side and use substraction of the right hand side from the left hand | |
| 1143 // side bound as a bound for an expression (substraction is monotone for | |
| 1144 // the left hand side operand). | |
| 1145 // | |
| 1146 Definition* ConstructBound(PhiBoundFunc phi_bound_func, | |
| 1147 Definition* value, | |
| 1148 Instruction* point) { | |
| 1149 value = UnwrapConstraint(value); | |
| 1150 if (PhiInstr* phi = value->AsPhi()) { | |
| 1151 if (phi->induction_variable_info() != NULL) { | |
| 1152 return (this->*phi_bound_func)(phi, point); | |
| 1153 } | |
| 1154 } else if (BinarySmiOpInstr* bin_op = value->AsBinarySmiOp()) { | |
| 1155 if ((bin_op->op_kind() == Token::kADD) || | |
| 1156 (bin_op->op_kind() == Token::kMUL) || | |
| 1157 (bin_op->op_kind() == Token::kSUB)) { | |
| 1158 Definition* new_left = | |
| 1159 ConstructBound(phi_bound_func, bin_op->left()->definition(), point); | |
| 1160 Definition* new_right = (bin_op->op_kind() != Token::kSUB) | |
| 1161 ? ConstructBound(phi_bound_func, | |
| 1162 bin_op->right()->definition(), | |
| 1163 point) | |
| 1164 : UnwrapConstraint(bin_op->right()->definition()); | |
| 1165 | |
| 1166 if ((new_left != UnwrapConstraint(bin_op->left()->definition())) || | |
| 1167 (new_right != UnwrapConstraint(bin_op->right()->definition()))) { | |
| 1168 return MakeBinaryOp(bin_op->op_kind(), new_left, new_right); | |
| 1169 } | |
| 1170 } | |
| 1171 } | |
| 1172 | |
| 1173 return value; | |
| 1174 } | |
| 1175 | |
| 1176 Definition* InductionVariableUpperBound(PhiInstr* phi, | |
| 1177 Instruction* point) { | |
| 1178 const InductionVariableInfo& info = *phi->induction_variable_info(); | |
| 1179 if (info.bound() == phi) { | |
| 1180 if (point->IsDominatedBy(info.limit())) { | |
| 1181 // Given induction variable | |
| 1182 // | |
| 1183 // x <- phi(x0, x + 1) | |
| 1184 // | |
| 1185 // and a constraint x <= M that dominates the given | |
| 1186 // point we conclude that M is an upper bound for x. | |
| 1187 return RangeBoundaryToDefinition(info.limit()->constraint()->max()); | |
| 1188 } | |
| 1189 } else { | |
| 1190 const InductionVariableInfo& bound_info = | |
| 1191 *info.bound()->induction_variable_info(); | |
| 1192 if (point->IsDominatedBy(bound_info.limit())) { | |
| 1193 // Given two induction variables | |
| 1194 // | |
| 1195 // x <- phi(x0, x + 1) | |
| 1196 // y <- phi(y0, y + 1) | |
| 1197 // | |
| 1198 // and a constraint x <= M that dominates the given | |
| 1199 // point we can conclude that | |
| 1200 // | |
| 1201 // y <= y0 + (M - x0) | |
| 1202 // | |
| 1203 Definition* limit = RangeBoundaryToDefinition( | |
| 1204 bound_info.limit()->constraint()->max()); | |
| 1205 BinarySmiOpInstr* loop_length = | |
| 1206 MakeBinaryOp(Token::kSUB, | |
| 1207 ConstructUpperBound(limit, point), | |
| 1208 ConstructLowerBound(bound_info.initial_value(), | |
| 1209 point)); | |
| 1210 return MakeBinaryOp(Token::kADD, | |
| 1211 ConstructUpperBound(info.initial_value(), point), | |
| 1212 loop_length); | |
| 1213 } | |
| 1214 } | |
| 1215 | |
| 1216 return phi; | |
| 1217 } | |
| 1218 | |
| 1219 Definition* InductionVariableLowerBound(PhiInstr* phi, | |
| 1220 Instruction* point) { | |
| 1221 // Given induction variable | |
| 1222 // | |
| 1223 // x <- phi(x0, x + 1) | |
| 1224 // | |
| 1225 // we can conclude that LowerBound(x) == x0. | |
| 1226 const InductionVariableInfo& info = *phi->induction_variable_info(); | |
| 1227 return ConstructLowerBound(info.initial_value(), point); | |
| 1228 } | |
| 1229 | |
| 1230 // Try to re-associate binary operations in the floating DAG of operations | |
| 1231 // to collect all constants together, e.g. x + C0 + y + C1 is simplified into | |
| 1232 // x + y + (C0 + C1). | |
| 1233 bool Simplify(Definition** defn, intptr_t* constant) { | |
| 1234 if (BinarySmiOpInstr* binary_op = (*defn)->AsBinarySmiOp()) { | |
| 1235 Definition* left = binary_op->left()->definition(); | |
| 1236 Definition* right = binary_op->right()->definition(); | |
| 1237 | |
| 1238 if (binary_op->op_kind() == Token::kADD) { | |
| 1239 intptr_t left_const = 0; | |
| 1240 intptr_t right_const = 0; | |
| 1241 if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) { | |
| 1242 return false; | |
| 1243 } | |
| 1244 | |
| 1245 const intptr_t c = left_const + right_const; | |
| 1246 if (Utils::WillAddOverflow(left_const, right_const) || | |
| 1247 !Smi::IsValid(c)) { | |
| 1248 return false; // Abort. | |
| 1249 } | |
| 1250 | |
| 1251 if (constant != NULL) { | |
| 1252 *constant = c; | |
| 1253 } | |
| 1254 | |
| 1255 if (left == NULL && right == NULL) { | |
| 1256 if (constant != NULL) { | |
| 1257 *defn = NULL; | |
| 1258 } else { | |
| 1259 *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); | |
| 1260 } | |
| 1261 return true; | |
| 1262 } | |
| 1263 | |
| 1264 if (left == NULL) { | |
| 1265 if (constant != NULL || c == 0) { | |
| 1266 *defn = right; | |
| 1267 return true; | |
| 1268 } else { | |
| 1269 left = right; | |
| 1270 right = NULL; | |
| 1271 } | |
| 1272 } | |
| 1273 | |
| 1274 if (right == NULL) { | |
| 1275 if (constant != NULL || c == 0) { | |
| 1276 *defn = right; | |
| 1277 return true; | |
| 1278 } else { | |
| 1279 right = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); | |
| 1280 } | |
| 1281 } | |
| 1282 | |
| 1283 } else if (binary_op->op_kind() == Token::kSUB) { | |
| 1284 intptr_t left_const = 0; | |
| 1285 intptr_t right_const = 0; | |
| 1286 if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) { | |
| 1287 return false; | |
| 1288 } | |
| 1289 | |
| 1290 const intptr_t c = (left_const - right_const); | |
| 1291 if (Utils::WillSubOverflow(left_const, right_const) || | |
| 1292 !Smi::IsValid(c)) { | |
| 1293 return false; // Abort. | |
| 1294 } | |
| 1295 | |
| 1296 if (constant != NULL) { | |
| 1297 *constant = c; | |
| 1298 } | |
| 1299 | |
| 1300 if (left == NULL && right == NULL) { | |
| 1301 if (constant != NULL) { | |
| 1302 *defn = NULL; | |
| 1303 } else { | |
| 1304 *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); | |
| 1305 } | |
| 1306 return true; | |
| 1307 } | |
| 1308 | |
| 1309 if (left == NULL) { | |
| 1310 left = flow_graph_->GetConstant(Smi::Handle(Smi::New(0))); | |
| 1311 } | |
| 1312 | |
| 1313 if (right == NULL) { | |
| 1314 if (constant != NULL || c == 0) { | |
| 1315 *defn = left; | |
| 1316 return true; | |
| 1317 } else { | |
| 1318 right = flow_graph_->GetConstant(Smi::Handle(Smi::New(-c))); | |
| 1319 } | |
| 1320 } | |
| 1321 | |
| 1322 } else { | |
| 1323 if (!Simplify(&left, NULL) || !Simplify(&right, NULL)) { | |
| 1324 return false; | |
| 1325 } | |
| 1326 } | |
| 1327 | |
| 1328 ASSERT(left != NULL); | |
| 1329 ASSERT(right != NULL); | |
| 1330 | |
| 1331 const bool left_changed = (left != binary_op->left()->definition()); | |
| 1332 const bool right_changed = (right != binary_op->right()->definition()); | |
| 1333 if (left_changed || right_changed) { | |
| 1334 if ((*defn)->ssa_temp_index() == -1) { | |
| 1335 if (left_changed) binary_op->left()->set_definition(left); | |
| 1336 if (right_changed) binary_op->right()->set_definition(right); | |
| 1337 *defn = binary_op; | |
| 1338 } else { | |
| 1339 *defn = MakeBinaryOp(binary_op->op_kind(), | |
| 1340 UnwrapConstraint(left), | |
| 1341 UnwrapConstraint(right)); | |
| 1342 } | |
| 1343 } | |
| 1344 } else if (ConstantInstr* constant_defn = (*defn)->AsConstant()) { | |
| 1345 if ((constant != NULL) && constant_defn->value().IsSmi()) { | |
| 1346 *defn = NULL; | |
| 1347 *constant = Smi::Cast(constant_defn->value()).Value(); | |
| 1348 } | |
| 1349 } | |
| 1350 | |
| 1351 return true; | |
| 1352 } | |
| 1353 | |
| 1354 // If possible find a set of symbols that need to be non-negative to | |
| 1355 // guarantee that expression as whole is non-negative. | |
| 1356 bool FindNonPositiveSymbols(GrowableArray<Definition*>* symbols, | |
| 1357 Definition* defn) { | |
| 1358 if (defn->IsConstant()) { | |
| 1359 const Object& value = defn->AsConstant()->value(); | |
| 1360 return value.IsSmi() && (Smi::Cast(value).Value() >= 0); | |
| 1361 } else if (defn->HasSSATemp()) { | |
| 1362 if (!RangeUtils::IsPositive(defn->range())) { | |
| 1363 symbols->Add(defn); | |
| 1364 } | |
| 1365 return true; | |
| 1366 } else if (defn->IsBinarySmiOp()) { | |
| 1367 BinarySmiOpInstr* binary_op = defn->AsBinarySmiOp(); | |
| 1368 ASSERT((binary_op->op_kind() == Token::kADD) || | |
| 1369 (binary_op->op_kind() == Token::kSUB) || | |
| 1370 (binary_op->op_kind() == Token::kMUL)); | |
| 1371 | |
| 1372 if (RangeUtils::IsPositive(defn->range())) { | |
| 1373 // We can statically prove that this subexpression is always positive. | |
| 1374 // No need to inspect its subexpressions. | |
| 1375 return true; | |
| 1376 } | |
| 1377 | |
| 1378 if (binary_op->op_kind() == Token::kSUB) { | |
| 1379 // For addition and multiplication it's enough to ensure that | |
| 1380 // lhs and rhs are positive to guarantee that defn as whole is | |
| 1381 // positive. This does not work for substraction so just give up. | |
| 1382 return false; | |
| 1383 } | |
| 1384 | |
| 1385 return FindNonPositiveSymbols(symbols, binary_op->left()->definition()) && | |
| 1386 FindNonPositiveSymbols(symbols, binary_op->right()->definition()); | |
| 1387 } | |
| 1388 UNREACHABLE(); | |
| 1389 return false; | |
| 1390 } | |
| 1391 | |
| 1392 // Find innermost constraint for the given definition dominating given | |
| 1393 // instruction. | |
| 1394 static Definition* FindInnermostConstraint(Definition* defn, | |
| 1395 Instruction* post_dominator) { | |
| 1396 for (Value* use = defn->input_use_list(); | |
| 1397 use != NULL; | |
| 1398 use = use->next_use()) { | |
| 1399 ConstraintInstr* constraint = use->instruction()->AsConstraint(); | |
| 1400 if ((constraint != NULL) && post_dominator->IsDominatedBy(constraint)) { | |
| 1401 return FindInnermostConstraint(constraint, post_dominator); | |
| 1402 } | |
| 1403 } | |
| 1404 return defn; | |
| 1405 } | |
| 1406 | |
| 1407 // Replace symbolic parts of the boundary with respective constraints | |
| 1408 // that hold at the given point in the flow graph signified by | |
| 1409 // post_dominator. | |
| 1410 // Constraints array allows to provide a set of additional floating | |
| 1411 // constraints that were not inserted into the graph. | |
| 1412 static Definition* ApplyConstraints( | |
| 1413 Definition* defn, | |
| 1414 Instruction* post_dominator, | |
|
Florian Schneider
2014/10/07 11:40:48
I'd rename post_dominator to
CheckArrayBoundInstr
Vyacheslav Egorov (Google)
2014/10/07 18:57:27
It does not have to be CheckArrayBoundInstr though
| |
| 1415 GrowableArray<ConstraintInstr*>* constraints = NULL) { | |
| 1416 if (defn->HasSSATemp()) { | |
| 1417 defn = FindInnermostConstraint(defn, post_dominator); | |
| 1418 if (constraints != NULL) { | |
| 1419 for (intptr_t i = 0; i < constraints->length(); i++) { | |
| 1420 ConstraintInstr* constraint = (*constraints)[i]; | |
| 1421 if (constraint->value()->definition() == defn) { | |
| 1422 return constraint; | |
| 1423 } | |
| 1424 } | |
| 1425 } | |
| 1426 return defn; | |
| 1427 } | |
| 1428 | |
| 1429 for (intptr_t i = 0; i < defn->InputCount(); i++) { | |
| 1430 defn->InputAt(i)->set_definition( | |
| 1431 ApplyConstraints(defn->InputAt(i)->definition(), | |
| 1432 post_dominator, | |
| 1433 constraints)); | |
| 1434 } | |
| 1435 | |
| 1436 return defn; | |
| 1437 } | |
| 1438 | |
| 1439 static void PrettyPrintIndexBoundRecursively(BufferFormatter* f, | |
| 1440 Definition* index_bound) { | |
| 1441 BinarySmiOpInstr* binary_op = index_bound->AsBinarySmiOp(); | |
| 1442 if (binary_op != NULL) { | |
| 1443 f->Print("("); | |
| 1444 PrettyPrintIndexBoundRecursively(f, binary_op->left()->definition()); | |
| 1445 f->Print(" %s ", Token::Str(binary_op->op_kind())); | |
| 1446 PrettyPrintIndexBoundRecursively(f, binary_op->right()->definition()); | |
| 1447 f->Print(")"); | |
| 1448 } else if (index_bound->IsConstant()) { | |
| 1449 f->Print("%" Pd "", | |
| 1450 Smi::Cast(index_bound->AsConstant()->value()).Value()); | |
| 1451 } else { | |
| 1452 f->Print("v%" Pd "", index_bound->ssa_temp_index()); | |
| 1453 } | |
| 1454 f->Print(" {%s}", Range::ToCString(index_bound->range())); | |
| 1455 } | |
| 1456 | |
| 1457 | |
| 1458 static const char* IndexBoundToCString(Definition* index_bound) { | |
| 1459 char buffer[1024]; | |
| 1460 BufferFormatter f(buffer, sizeof(buffer)); | |
| 1461 PrettyPrintIndexBoundRecursively(&f, index_bound); | |
| 1462 return Isolate::Current()->current_zone()->MakeCopyOfString(buffer); | |
| 1463 } | |
| 1464 | |
| 1465 RangeAnalysis* range_analysis_; | |
| 1466 FlowGraph* flow_graph_; | |
| 1467 Scheduler scheduler_; | |
| 1468 }; | |
| 1469 | |
| 1470 | |
| 628 void RangeAnalysis::EliminateRedundantBoundsChecks() { | 1471 void RangeAnalysis::EliminateRedundantBoundsChecks() { |
| 629 if (FLAG_array_bounds_check_elimination) { | 1472 if (FLAG_array_bounds_check_elimination) { |
| 1473 const Function& function = flow_graph_->parsed_function().function(); | |
| 1474 const bool try_generalization = | |
| 1475 function.allows_bounds_check_generalization(); | |
| 1476 | |
| 1477 BoundsCheckGeneralizer generalizer(this, flow_graph_); | |
| 1478 | |
| 630 for (intptr_t i = 0; i < bounds_checks_.length(); i++) { | 1479 for (intptr_t i = 0; i < bounds_checks_.length(); i++) { |
| 631 CheckArrayBoundInstr* check = bounds_checks_[i]; | 1480 CheckArrayBoundInstr* check = bounds_checks_[i]; |
| 632 RangeBoundary array_length = | 1481 RangeBoundary array_length = |
| 633 RangeBoundary::FromDefinition(check->length()->definition()); | 1482 RangeBoundary::FromDefinition(check->length()->definition()); |
| 634 if (check->IsRedundant(array_length)) { | 1483 if (check->IsRedundant(array_length)) { |
| 635 check->RemoveFromGraph(); | 1484 check->RemoveFromGraph(); |
| 636 } | 1485 } else if (try_generalization) { |
| 1486 generalizer.TryGeneralize(check, array_length); | |
| 1487 } | |
| 1488 } | |
| 1489 | |
| 1490 if (FLAG_trace_range_analysis) { | |
| 1491 FlowGraphPrinter::PrintGraph("RangeAnalysis (ABCE)", flow_graph_); | |
| 637 } | 1492 } |
| 638 } | 1493 } |
| 639 } | 1494 } |
| 640 | 1495 |
| 641 | 1496 |
| 642 void RangeAnalysis::MarkUnreachableBlocks() { | 1497 void RangeAnalysis::MarkUnreachableBlocks() { |
| 643 for (intptr_t i = 0; i < constraints_.length(); i++) { | 1498 for (intptr_t i = 0; i < constraints_.length(); i++) { |
| 644 if (Range::IsUnknown(constraints_[i]->range())) { | 1499 if (Range::IsUnknown(constraints_[i]->range())) { |
| 645 TargetEntryInstr* target = constraints_[i]->target(); | 1500 TargetEntryInstr* target = constraints_[i]->target(); |
| 646 if (target == NULL) { | 1501 if (target == NULL) { |
| (...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1393 } | 2248 } |
| 1394 return max().UpperBound().ConstantValue() >= 0; | 2249 return max().UpperBound().ConstantValue() >= 0; |
| 1395 } | 2250 } |
| 1396 | 2251 |
| 1397 | 2252 |
| 1398 bool Range::OnlyLessThanOrEqualTo(int64_t val) const { | 2253 bool Range::OnlyLessThanOrEqualTo(int64_t val) const { |
| 1399 if (max().IsPositiveInfinity()) { | 2254 if (max().IsPositiveInfinity()) { |
| 1400 // Cannot be true. | 2255 // Cannot be true. |
| 1401 return false; | 2256 return false; |
| 1402 } | 2257 } |
| 1403 if (max().UpperBound().ConstantValue() > val) { | 2258 return max().UpperBound().ConstantValue() <= val; |
| 1404 // Not true. | |
| 1405 return false; | |
| 1406 } | |
| 1407 return true; | |
| 1408 } | 2259 } |
| 1409 | 2260 |
| 1410 | 2261 |
| 1411 bool Range::OnlyGreaterThanOrEqualTo(int64_t val) const { | 2262 bool Range::OnlyGreaterThanOrEqualTo(int64_t val) const { |
| 1412 if (min().IsNegativeInfinity()) { | 2263 if (min().IsNegativeInfinity()) { |
| 1413 return false; | 2264 return false; |
| 1414 } | 2265 } |
| 1415 if (min().LowerBound().ConstantValue() < val) { | 2266 return min().LowerBound().ConstantValue() >= val; |
| 1416 return false; | |
| 1417 } | |
| 1418 return true; | |
| 1419 } | 2267 } |
| 1420 | 2268 |
| 1421 | 2269 |
| 1422 // Inclusive. | 2270 // Inclusive. |
| 1423 bool Range::IsWithin(int64_t min_int, int64_t max_int) const { | 2271 bool Range::IsWithin(int64_t min_int, int64_t max_int) const { |
| 1424 RangeBoundary lower_min = min().LowerBound(); | 2272 RangeBoundary lower_min = min().LowerBound(); |
| 1425 if (lower_min.IsNegativeInfinity() || (lower_min.ConstantValue() < min_int)) { | 2273 if (lower_min.IsNegativeInfinity() || (lower_min.ConstantValue() < min_int)) { |
| 1426 return false; | 2274 return false; |
| 1427 } | 2275 } |
| 1428 RangeBoundary upper_max = max().UpperBound(); | 2276 RangeBoundary upper_max = max().UpperBound(); |
| 1429 if (upper_max.IsPositiveInfinity() || (upper_max.ConstantValue() > max_int)) { | 2277 return !upper_max.IsPositiveInfinity() && |
| 1430 return false; | 2278 (upper_max.ConstantValue() <= max_int); |
| 1431 } | |
| 1432 return true; | |
| 1433 } | 2279 } |
| 1434 | 2280 |
| 1435 | 2281 |
| 1436 bool Range::Overlaps(int64_t min_int, int64_t max_int) const { | 2282 bool Range::Overlaps(int64_t min_int, int64_t max_int) const { |
| 1437 RangeBoundary lower = min().LowerBound(); | 2283 RangeBoundary lower = min().LowerBound(); |
| 1438 RangeBoundary upper = max().UpperBound(); | 2284 RangeBoundary upper = max().UpperBound(); |
| 1439 const int64_t this_min = lower.IsNegativeInfinity() ? | 2285 const int64_t this_min = lower.IsNegativeInfinity() ? |
| 1440 RangeBoundary::kMin : lower.ConstantValue(); | 2286 RangeBoundary::kMin : lower.ConstantValue(); |
| 1441 const int64_t this_max = upper.IsPositiveInfinity() ? | 2287 const int64_t this_max = upper.IsPositiveInfinity() ? |
| 1442 RangeBoundary::kMax : upper.ConstantValue(); | 2288 RangeBoundary::kMax : upper.ConstantValue(); |
| 1443 if ((this_min <= min_int) && (min_int <= this_max)) return true; | 2289 if ((this_min <= min_int) && (min_int <= this_max)) return true; |
| 1444 if ((this_min <= max_int) && (max_int <= this_max)) return true; | 2290 if ((this_min <= max_int) && (max_int <= this_max)) return true; |
| 1445 if ((min_int < this_min) && (max_int > this_max)) return true; | 2291 if ((min_int < this_min) && (max_int > this_max)) return true; |
| 1446 return false; | 2292 return false; |
| 1447 } | 2293 } |
| 1448 | 2294 |
| 1449 | 2295 |
| 1450 bool Range::IsUnsatisfiable() const { | 2296 bool Range::IsUnsatisfiable() const { |
| 1451 // Infinity case: [+inf, ...] || [..., -inf] | 2297 // Infinity case: [+inf, ...] || [..., -inf] |
| 1452 if (min().IsPositiveInfinity() || max().IsNegativeInfinity()) { | 2298 if (min().IsPositiveInfinity() || max().IsNegativeInfinity()) { |
| 1453 return true; | 2299 return true; |
| 1454 } | 2300 } |
| 1455 // Constant case: For example [0, -1]. | 2301 // Constant case: For example [0, -1]. |
| 1456 if (Range::ConstantMin(this).ConstantValue() > | 2302 if (Range::ConstantMin(this).ConstantValue() > |
| 1457 Range::ConstantMax(this).ConstantValue()) { | 2303 Range::ConstantMax(this).ConstantValue()) { |
| 1458 return true; | 2304 return true; |
| 1459 } | 2305 } |
| 1460 // Symbol case: For example [v+1, v]. | 2306 // Symbol case: For example [v+1, v]. |
| 1461 if (DependOnSameSymbol(min(), max()) && min().offset() > max().offset()) { | 2307 return DependOnSameSymbol(min(), max()) && min().offset() > max().offset(); |
| 1462 return true; | |
| 1463 } | |
| 1464 return false; | |
| 1465 } | 2308 } |
| 1466 | 2309 |
| 1467 | 2310 |
| 1468 void Range::Clamp(RangeBoundary::RangeSize size) { | 2311 void Range::Clamp(RangeBoundary::RangeSize size) { |
| 1469 min_ = min_.Clamp(size); | 2312 min_ = min_.Clamp(size); |
| 1470 max_ = max_.Clamp(size); | 2313 max_ = max_.Clamp(size); |
| 1471 } | 2314 } |
| 1472 | 2315 |
| 1473 | 2316 |
| 1474 void Range::Shl(const Range* left, | 2317 void Range::Shl(const Range* left, |
| (...skipping 666 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2141 *range = Range::Full(RangeBoundary::kRangeBoundaryInt64); | 2984 *range = Range::Full(RangeBoundary::kRangeBoundaryInt64); |
| 2142 } | 2985 } |
| 2143 } | 2986 } |
| 2144 | 2987 |
| 2145 | 2988 |
| 2146 bool CheckArrayBoundInstr::IsRedundant(const RangeBoundary& length) { | 2989 bool CheckArrayBoundInstr::IsRedundant(const RangeBoundary& length) { |
| 2147 Range* index_range = index()->definition()->range(); | 2990 Range* index_range = index()->definition()->range(); |
| 2148 | 2991 |
| 2149 // Range of the index is unknown can't decide if the check is redundant. | 2992 // Range of the index is unknown can't decide if the check is redundant. |
| 2150 if (index_range == NULL) { | 2993 if (index_range == NULL) { |
| 2151 return false; | 2994 if (!(index()->BindsToConstant() && index()->BoundConstant().IsSmi())) { |
| 2995 return false; | |
| 2996 } | |
| 2997 | |
| 2998 Range range; | |
| 2999 index()->definition()->InferRange(NULL, &range); | |
| 3000 ASSERT(!Range::IsUnknown(&range)); | |
| 3001 index()->definition()->set_range(range); | |
| 3002 index_range = index()->definition()->range(); | |
| 2152 } | 3003 } |
| 2153 | 3004 |
| 2154 // Range of the index is not positive. Check can't be redundant. | 3005 // Range of the index is not positive. Check can't be redundant. |
| 2155 if (Range::ConstantMinSmi(index_range).ConstantValue() < 0) { | 3006 if (Range::ConstantMinSmi(index_range).ConstantValue() < 0) { |
| 2156 return false; | 3007 return false; |
| 2157 } | 3008 } |
| 2158 | 3009 |
| 2159 RangeBoundary max = CanonicalizeBoundary(index_range->max(), | 3010 RangeBoundary max = CanonicalizeBoundary( |
| 2160 RangeBoundary::PositiveInfinity()); | 3011 RangeBoundary::FromDefinition(index()->definition()), |
| 3012 RangeBoundary::PositiveInfinity()); | |
| 2161 | 3013 |
| 2162 if (max.OverflowedSmi()) { | 3014 if (max.OverflowedSmi()) { |
| 2163 return false; | 3015 return false; |
| 2164 } | 3016 } |
| 2165 | 3017 |
| 2166 | 3018 |
| 2167 RangeBoundary max_upper = max.UpperBound(); | 3019 RangeBoundary max_upper = max.UpperBound(); |
| 2168 RangeBoundary length_lower = length.LowerBound(); | 3020 RangeBoundary length_lower = length.LowerBound(); |
| 2169 | 3021 |
| 2170 if (max_upper.OverflowedSmi() || length_lower.OverflowedSmi()) { | 3022 if (max_upper.OverflowedSmi() || length_lower.OverflowedSmi()) { |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 2189 } | 3041 } |
| 2190 } while (CanonicalizeMaxBoundary(&max) || | 3042 } while (CanonicalizeMaxBoundary(&max) || |
| 2191 CanonicalizeMinBoundary(&canonical_length)); | 3043 CanonicalizeMinBoundary(&canonical_length)); |
| 2192 | 3044 |
| 2193 // Failed to prove that maximum is bounded with array length. | 3045 // Failed to prove that maximum is bounded with array length. |
| 2194 return false; | 3046 return false; |
| 2195 } | 3047 } |
| 2196 | 3048 |
| 2197 | 3049 |
| 2198 } // namespace dart | 3050 } // namespace dart |
| OLD | NEW |