| OLD | NEW | 
|      1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file |      1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file | 
|      2 // for details. All rights reserved. Use of this source code is governed by a |      2 // for details. All rights reserved. Use of this source code is governed by a | 
|      3 // BSD-style license that can be found in the LICENSE file. |      3 // BSD-style license that can be found in the LICENSE file. | 
|      4  |      4  | 
|      5 #include "vm/flow_graph_optimizer.h" |      5 #include "vm/constant_propagator.h" | 
|      6  |      6  | 
|      7 #include "vm/bit_vector.h" |      7 #include "vm/bit_vector.h" | 
|      8 #include "vm/cha.h" |  | 
|      9 #include "vm/cpu.h" |  | 
|     10 #include "vm/dart_entry.h" |  | 
|     11 #include "vm/exceptions.h" |  | 
|     12 #include "vm/flow_graph_builder.h" |      8 #include "vm/flow_graph_builder.h" | 
|     13 #include "vm/flow_graph_compiler.h" |      9 #include "vm/flow_graph_compiler.h" | 
|     14 #include "vm/flow_graph_range_analysis.h" |     10 #include "vm/flow_graph_range_analysis.h" | 
|     15 #include "vm/hash_map.h" |  | 
|     16 #include "vm/il_printer.h" |     11 #include "vm/il_printer.h" | 
|     17 #include "vm/intermediate_language.h" |     12 #include "vm/intermediate_language.h" | 
|     18 #include "vm/object_store.h" |  | 
|     19 #include "vm/parser.h" |     13 #include "vm/parser.h" | 
|     20 #include "vm/resolver.h" |  | 
|     21 #include "vm/scopes.h" |  | 
|     22 #include "vm/stack_frame.h" |  | 
|     23 #include "vm/symbols.h" |     14 #include "vm/symbols.h" | 
|     24  |     15  | 
|     25 namespace dart { |     16 namespace dart { | 
|     26  |     17  | 
|     27 DEFINE_FLAG(int, getter_setter_ratio, 13, |  | 
|     28     "Ratio of getter/setter usage used for double field unboxing heuristics"); |  | 
|     29 DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination."); |  | 
|     30 DEFINE_FLAG(bool, dead_store_elimination, true, "Eliminate dead stores"); |  | 
|     31 DEFINE_FLAG(int, max_polymorphic_checks, 4, |  | 
|     32     "Maximum number of polymorphic check, otherwise it is megamorphic."); |  | 
|     33 DEFINE_FLAG(int, max_equality_polymorphic_checks, 32, |  | 
|     34     "Maximum number of polymorphic checks in equality operator," |  | 
|     35     " otherwise use megamorphic dispatch."); |  | 
|     36 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); |     18 DEFINE_FLAG(bool, remove_redundant_phis, true, "Remove redundant phis."); | 
|     37 DEFINE_FLAG(bool, trace_constant_propagation, false, |     19 DEFINE_FLAG(bool, trace_constant_propagation, false, | 
|     38     "Print constant propagation and useless code elimination."); |     20     "Print constant propagation and useless code elimination."); | 
|     39 DEFINE_FLAG(bool, trace_load_optimization, false, |  | 
|     40     "Print live sets for load optimization pass."); |  | 
|     41 DEFINE_FLAG(bool, trace_optimization, false, "Print optimization details."); |  | 
|     42 DEFINE_FLAG(bool, truncating_left_shift, true, |  | 
|     43     "Optimize left shift to truncate if possible"); |  | 
|     44 DEFINE_FLAG(bool, use_cha, true, "Use class hierarchy analysis."); |  | 
|     45 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) |  | 
|     46 DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); |  | 
|     47 #endif |  | 
|     48 DECLARE_FLAG(bool, enable_type_checks); |  | 
|     49 DECLARE_FLAG(bool, source_lines); |  | 
|     50 DECLARE_FLAG(bool, trace_type_check_elimination); |  | 
|     51 DECLARE_FLAG(bool, warn_on_javascript_compatibility); |  | 
|     52  |     21  | 
|     53 // Quick access to the locally defined isolate() method. |     22 // Quick access to the locally defined isolate() method. | 
|     54 #define I (isolate()) |     23 #define I (isolate()) | 
|     55  |     24  | 
|     56 static bool ShouldInlineSimd() { |  | 
|     57   return FlowGraphCompiler::SupportsUnboxedSimd128(); |  | 
|     58 } |  | 
|     59  |  | 
|     60  |  | 
|     61 static bool CanUnboxDouble() { |  | 
|     62   return FlowGraphCompiler::SupportsUnboxedDoubles(); |  | 
|     63 } |  | 
|     64  |  | 
|     65  |  | 
|     66 static bool ShouldInlineInt64ArrayOps() { |  | 
|     67 #if defined(TARGET_ARCH_X64) |  | 
|     68   return true; |  | 
|     69 #endif |  | 
|     70   return false; |  | 
|     71 } |  | 
|     72  |  | 
|     73 static bool CanConvertUnboxedMintToDouble() { |  | 
|     74 #if defined(TARGET_ARCH_IA32) |  | 
|     75   return true; |  | 
|     76 #else |  | 
|     77   // ARM does not have a short instruction sequence for converting int64 to |  | 
|     78   // double. |  | 
|     79   // TODO(johnmccutchan): Investigate possibility on MIPS once |  | 
|     80   // mints are implemented there. |  | 
|     81   return false; |  | 
|     82 #endif |  | 
|     83 } |  | 
|     84  |  | 
|     85  |  | 
|     86 // Optimize instance calls using ICData. |  | 
|     87 void FlowGraphOptimizer::ApplyICData() { |  | 
|     88   VisitBlocks(); |  | 
|     89 } |  | 
|     90  |  | 
|     91  |  | 
|     92 // Optimize instance calls using cid.  This is called after optimizer |  | 
|     93 // converted instance calls to instructions. Any remaining |  | 
|     94 // instance calls are either megamorphic calls, cannot be optimized or |  | 
|     95 // have no runtime type feedback collected. |  | 
|     96 // Attempts to convert an instance call (IC call) using propagated class-ids, |  | 
|     97 // e.g., receiver class id, guarded-cid, or by guessing cid-s. |  | 
|     98 void FlowGraphOptimizer::ApplyClassIds() { |  | 
|     99   ASSERT(current_iterator_ == NULL); |  | 
|    100   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|    101     BlockEntryInstr* entry = block_order_[i]; |  | 
|    102     ForwardInstructionIterator it(entry); |  | 
|    103     current_iterator_ = ⁢ |  | 
|    104     for (; !it.Done(); it.Advance()) { |  | 
|    105       Instruction* instr = it.Current(); |  | 
|    106       if (instr->IsInstanceCall()) { |  | 
|    107         InstanceCallInstr* call = instr->AsInstanceCall(); |  | 
|    108         if (call->HasICData()) { |  | 
|    109           if (TryCreateICData(call)) { |  | 
|    110             VisitInstanceCall(call); |  | 
|    111           } |  | 
|    112         } |  | 
|    113       } else if (instr->IsPolymorphicInstanceCall()) { |  | 
|    114         SpecializePolymorphicInstanceCall(instr->AsPolymorphicInstanceCall()); |  | 
|    115       } else if (instr->IsStrictCompare()) { |  | 
|    116         VisitStrictCompare(instr->AsStrictCompare()); |  | 
|    117       } else if (instr->IsBranch()) { |  | 
|    118         ComparisonInstr* compare = instr->AsBranch()->comparison(); |  | 
|    119         if (compare->IsStrictCompare()) { |  | 
|    120           VisitStrictCompare(compare->AsStrictCompare()); |  | 
|    121         } |  | 
|    122       } |  | 
|    123     } |  | 
|    124     current_iterator_ = NULL; |  | 
|    125   } |  | 
|    126 } |  | 
|    127  |  | 
|    128  |  | 
|    129 // TODO(srdjan): Test/support other number types as well. |  | 
|    130 static bool IsNumberCid(intptr_t cid) { |  | 
|    131   return (cid == kSmiCid) || (cid == kDoubleCid); |  | 
|    132 } |  | 
|    133  |  | 
|    134  |  | 
|    135 // Attempt to build ICData for call using propagated class-ids. |  | 
|    136 bool FlowGraphOptimizer::TryCreateICData(InstanceCallInstr* call) { |  | 
|    137   ASSERT(call->HasICData()); |  | 
|    138   if (call->ic_data()->NumberOfUsedChecks() > 0) { |  | 
|    139     // This occurs when an instance call has too many checks, will be converted |  | 
|    140     // to megamorphic call. |  | 
|    141     return false; |  | 
|    142   } |  | 
|    143   if (FLAG_warn_on_javascript_compatibility) { |  | 
|    144     // Do not make the instance call megamorphic if the callee needs to decode |  | 
|    145     // the calling code sequence to lookup the ic data and verify if a warning |  | 
|    146     // has already been issued or not. |  | 
|    147     // TryCreateICData is only invoked if the ic_data target has not been called |  | 
|    148     // yet, so no warning can possibly have been issued. |  | 
|    149     ASSERT(!call->ic_data()->IssuedJSWarning()); |  | 
|    150     if (call->ic_data()->MayCheckForJSWarning()) { |  | 
|    151       return false; |  | 
|    152     } |  | 
|    153   } |  | 
|    154   GrowableArray<intptr_t> class_ids(call->ic_data()->NumArgsTested()); |  | 
|    155   ASSERT(call->ic_data()->NumArgsTested() <= call->ArgumentCount()); |  | 
|    156   for (intptr_t i = 0; i < call->ic_data()->NumArgsTested(); i++) { |  | 
|    157     const intptr_t cid = call->PushArgumentAt(i)->value()->Type()->ToCid(); |  | 
|    158     class_ids.Add(cid); |  | 
|    159   } |  | 
|    160  |  | 
|    161   const Token::Kind op_kind = call->token_kind(); |  | 
|    162   if (Token::IsRelationalOperator(op_kind) || |  | 
|    163       Token::IsEqualityOperator(op_kind) || |  | 
|    164       Token::IsBinaryOperator(op_kind)) { |  | 
|    165     // Guess cid: if one of the inputs is a number assume that the other |  | 
|    166     // is a number of same type. |  | 
|    167     const intptr_t cid_0 = class_ids[0]; |  | 
|    168     const intptr_t cid_1 = class_ids[1]; |  | 
|    169     if ((cid_0 == kDynamicCid) && (IsNumberCid(cid_1))) { |  | 
|    170       class_ids[0] = cid_1; |  | 
|    171     } else if (IsNumberCid(cid_0) && (cid_1 == kDynamicCid)) { |  | 
|    172       class_ids[1] = cid_0; |  | 
|    173     } |  | 
|    174   } |  | 
|    175  |  | 
|    176   for (intptr_t i = 0; i < class_ids.length(); i++) { |  | 
|    177     if (class_ids[i] == kDynamicCid) { |  | 
|    178       // Not all cid-s known. |  | 
|    179       return false; |  | 
|    180     } |  | 
|    181   } |  | 
|    182  |  | 
|    183   const Array& args_desc_array = Array::Handle(I, |  | 
|    184       ArgumentsDescriptor::New(call->ArgumentCount(), call->argument_names())); |  | 
|    185   ArgumentsDescriptor args_desc(args_desc_array); |  | 
|    186   const Class& receiver_class = Class::Handle(I, |  | 
|    187       isolate()->class_table()->At(class_ids[0])); |  | 
|    188   const Function& function = Function::Handle(I, |  | 
|    189       Resolver::ResolveDynamicForReceiverClass( |  | 
|    190           receiver_class, |  | 
|    191           call->function_name(), |  | 
|    192           args_desc)); |  | 
|    193   if (function.IsNull()) { |  | 
|    194     return false; |  | 
|    195   } |  | 
|    196   // Create new ICData, do not modify the one attached to the instruction |  | 
|    197   // since it is attached to the assembly instruction itself. |  | 
|    198   // TODO(srdjan): Prevent modification of ICData object that is |  | 
|    199   // referenced in assembly code. |  | 
|    200   ICData& ic_data = ICData::ZoneHandle(I, ICData::New( |  | 
|    201       flow_graph_->parsed_function()->function(), |  | 
|    202       call->function_name(), |  | 
|    203       args_desc_array, |  | 
|    204       call->deopt_id(), |  | 
|    205       class_ids.length())); |  | 
|    206   if (class_ids.length() > 1) { |  | 
|    207     ic_data.AddCheck(class_ids, function); |  | 
|    208   } else { |  | 
|    209     ASSERT(class_ids.length() == 1); |  | 
|    210     ic_data.AddReceiverCheck(class_ids[0], function); |  | 
|    211   } |  | 
|    212   call->set_ic_data(&ic_data); |  | 
|    213   return true; |  | 
|    214 } |  | 
|    215  |  | 
|    216  |  | 
|    217 const ICData& FlowGraphOptimizer::TrySpecializeICData(const ICData& ic_data, |  | 
|    218                                                       intptr_t cid) { |  | 
|    219   ASSERT(ic_data.NumArgsTested() == 1); |  | 
|    220  |  | 
|    221   if ((ic_data.NumberOfUsedChecks() == 1) && ic_data.HasReceiverClassId(cid)) { |  | 
|    222     return ic_data;  // Nothing to do |  | 
|    223   } |  | 
|    224  |  | 
|    225   const Function& function = |  | 
|    226       Function::Handle(I, ic_data.GetTargetForReceiverClassId(cid)); |  | 
|    227   // TODO(fschneider): Try looking up the function on the class if it is |  | 
|    228   // not found in the ICData. |  | 
|    229   if (!function.IsNull()) { |  | 
|    230     const ICData& new_ic_data = ICData::ZoneHandle(I, ICData::New( |  | 
|    231         Function::Handle(I, ic_data.owner()), |  | 
|    232         String::Handle(I, ic_data.target_name()), |  | 
|    233         Object::empty_array(),  // Dummy argument descriptor. |  | 
|    234         ic_data.deopt_id(), |  | 
|    235         ic_data.NumArgsTested())); |  | 
|    236     new_ic_data.SetDeoptReasons(ic_data.DeoptReasons()); |  | 
|    237     new_ic_data.AddReceiverCheck(cid, function); |  | 
|    238     return new_ic_data; |  | 
|    239   } |  | 
|    240  |  | 
|    241   return ic_data; |  | 
|    242 } |  | 
|    243  |  | 
|    244  |  | 
|    245 void FlowGraphOptimizer::SpecializePolymorphicInstanceCall( |  | 
|    246     PolymorphicInstanceCallInstr* call) { |  | 
|    247   if (!call->with_checks()) { |  | 
|    248     return;  // Already specialized. |  | 
|    249   } |  | 
|    250  |  | 
|    251   const intptr_t receiver_cid = |  | 
|    252       call->PushArgumentAt(0)->value()->Type()->ToCid(); |  | 
|    253   if (receiver_cid == kDynamicCid) { |  | 
|    254     return;  // No information about receiver was infered. |  | 
|    255   } |  | 
|    256  |  | 
|    257   const ICData& ic_data = TrySpecializeICData(call->ic_data(), receiver_cid); |  | 
|    258   if (ic_data.raw() == call->ic_data().raw()) { |  | 
|    259     // No specialization. |  | 
|    260     return; |  | 
|    261   } |  | 
|    262  |  | 
|    263   const bool with_checks = false; |  | 
|    264   PolymorphicInstanceCallInstr* specialized = |  | 
|    265       new(I) PolymorphicInstanceCallInstr(call->instance_call(), |  | 
|    266                                           ic_data, |  | 
|    267                                           with_checks); |  | 
|    268   call->ReplaceWith(specialized, current_iterator()); |  | 
|    269 } |  | 
|    270  |  | 
|    271  |  | 
|    272 static BinarySmiOpInstr* AsSmiShiftLeftInstruction(Definition* d) { |  | 
|    273   BinarySmiOpInstr* instr = d->AsBinarySmiOp(); |  | 
|    274   if ((instr != NULL) && (instr->op_kind() == Token::kSHL)) { |  | 
|    275     return instr; |  | 
|    276   } |  | 
|    277   return NULL; |  | 
|    278 } |  | 
|    279  |  | 
|    280  |  | 
|    281 static bool IsPositiveOrZeroSmiConst(Definition* d) { |  | 
|    282   ConstantInstr* const_instr = d->AsConstant(); |  | 
|    283   if ((const_instr != NULL) && (const_instr->value().IsSmi())) { |  | 
|    284     return Smi::Cast(const_instr->value()).Value() >= 0; |  | 
|    285   } |  | 
|    286   return false; |  | 
|    287 } |  | 
|    288  |  | 
|    289  |  | 
|    290 void FlowGraphOptimizer::OptimizeLeftShiftBitAndSmiOp( |  | 
|    291     Definition* bit_and_instr, |  | 
|    292     Definition* left_instr, |  | 
|    293     Definition* right_instr) { |  | 
|    294   ASSERT(bit_and_instr != NULL); |  | 
|    295   ASSERT((left_instr != NULL) && (right_instr != NULL)); |  | 
|    296  |  | 
|    297   // Check for pattern, smi_shift_left must be single-use. |  | 
|    298   bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr); |  | 
|    299   if (!is_positive_or_zero) { |  | 
|    300     is_positive_or_zero = IsPositiveOrZeroSmiConst(right_instr); |  | 
|    301   } |  | 
|    302   if (!is_positive_or_zero) return; |  | 
|    303  |  | 
|    304   BinarySmiOpInstr* smi_shift_left = NULL; |  | 
|    305   if (bit_and_instr->InputAt(0)->IsSingleUse()) { |  | 
|    306     smi_shift_left = AsSmiShiftLeftInstruction(left_instr); |  | 
|    307   } |  | 
|    308   if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) { |  | 
|    309     smi_shift_left = AsSmiShiftLeftInstruction(right_instr); |  | 
|    310   } |  | 
|    311   if (smi_shift_left == NULL) return; |  | 
|    312  |  | 
|    313   // Pattern recognized. |  | 
|    314   smi_shift_left->mark_truncating(); |  | 
|    315   ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryMintOp()); |  | 
|    316   if (bit_and_instr->IsBinaryMintOp()) { |  | 
|    317     // Replace Mint op with Smi op. |  | 
|    318     BinarySmiOpInstr* smi_op = new(I) BinarySmiOpInstr( |  | 
|    319         Token::kBIT_AND, |  | 
|    320         new(I) Value(left_instr), |  | 
|    321         new(I) Value(right_instr), |  | 
|    322         Isolate::kNoDeoptId);  // BIT_AND cannot deoptimize. |  | 
|    323     bit_and_instr->ReplaceWith(smi_op, current_iterator()); |  | 
|    324   } |  | 
|    325 } |  | 
|    326  |  | 
|    327  |  | 
|    328  |  | 
|    329 // Used by TryMergeDivMod. |  | 
|    330 // Inserts a load-indexed instruction between a TRUNCDIV or MOD instruction, |  | 
|    331 // and the using instruction. This is an intermediate step before merging. |  | 
|    332 void FlowGraphOptimizer::AppendLoadIndexedForMerged(Definition* instr, |  | 
|    333                                                     intptr_t ix, |  | 
|    334                                                     intptr_t cid) { |  | 
|    335   const intptr_t index_scale = Instance::ElementSizeFor(cid); |  | 
|    336   ConstantInstr* index_instr = |  | 
|    337       flow_graph()->GetConstant(Smi::Handle(I, Smi::New(ix))); |  | 
|    338   LoadIndexedInstr* load = |  | 
|    339       new(I) LoadIndexedInstr(new(I) Value(instr), |  | 
|    340                                       new(I) Value(index_instr), |  | 
|    341                                       index_scale, |  | 
|    342                                       cid, |  | 
|    343                                       Isolate::kNoDeoptId, |  | 
|    344                                       instr->token_pos()); |  | 
|    345   instr->ReplaceUsesWith(load); |  | 
|    346   flow_graph()->InsertAfter(instr, load, NULL, FlowGraph::kValue); |  | 
|    347 } |  | 
|    348  |  | 
|    349  |  | 
|    350 void FlowGraphOptimizer::AppendExtractNthOutputForMerged(Definition* instr, |  | 
|    351                                                          intptr_t index, |  | 
|    352                                                          Representation rep, |  | 
|    353                                                          intptr_t cid) { |  | 
|    354   ExtractNthOutputInstr* extract = |  | 
|    355       new(I) ExtractNthOutputInstr(new(I) Value(instr), index, rep, cid); |  | 
|    356   instr->ReplaceUsesWith(extract); |  | 
|    357   flow_graph()->InsertAfter(instr, extract, NULL, FlowGraph::kValue); |  | 
|    358 } |  | 
|    359  |  | 
|    360  |  | 
|    361 // Dart: |  | 
|    362 //  var x = d % 10; |  | 
|    363 //  var y = d ~/ 10; |  | 
|    364 //  var z = x + y; |  | 
|    365 // |  | 
|    366 // IL: |  | 
|    367 //  v4 <- %(v2, v3) |  | 
|    368 //  v5 <- ~/(v2, v3) |  | 
|    369 //  v6 <- +(v4, v5) |  | 
|    370 // |  | 
|    371 // IL optimized: |  | 
|    372 //  v4 <- DIVMOD(v2, v3); |  | 
|    373 //  v5 <- LoadIndexed(v4, 0); // ~/ result |  | 
|    374 //  v6 <- LoadIndexed(v4, 1); // % result |  | 
|    375 //  v7 <- +(v5, v6) |  | 
|    376 // Because of the environment it is important that merged instruction replaces |  | 
|    377 // first original instruction encountered. |  | 
|    378 void FlowGraphOptimizer::TryMergeTruncDivMod( |  | 
|    379     GrowableArray<BinarySmiOpInstr*>* merge_candidates) { |  | 
|    380   if (merge_candidates->length() < 2) { |  | 
|    381     // Need at least a TRUNCDIV and a MOD. |  | 
|    382     return; |  | 
|    383   } |  | 
|    384   for (intptr_t i = 0; i < merge_candidates->length(); i++) { |  | 
|    385     BinarySmiOpInstr* curr_instr = (*merge_candidates)[i]; |  | 
|    386     if (curr_instr == NULL) { |  | 
|    387       // Instruction was merged already. |  | 
|    388       continue; |  | 
|    389     } |  | 
|    390     ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) || |  | 
|    391            (curr_instr->op_kind() == Token::kMOD)); |  | 
|    392     // Check if there is kMOD/kTRUNDIV binop with same inputs. |  | 
|    393     const intptr_t other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV) ? |  | 
|    394         Token::kMOD : Token::kTRUNCDIV; |  | 
|    395     Definition* left_def = curr_instr->left()->definition(); |  | 
|    396     Definition* right_def = curr_instr->right()->definition(); |  | 
|    397     for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |  | 
|    398       BinarySmiOpInstr* other_binop = (*merge_candidates)[k]; |  | 
|    399       // 'other_binop' can be NULL if it was already merged. |  | 
|    400       if ((other_binop != NULL) && |  | 
|    401           (other_binop->op_kind() == other_kind) && |  | 
|    402           (other_binop->left()->definition() == left_def) && |  | 
|    403           (other_binop->right()->definition() == right_def)) { |  | 
|    404         (*merge_candidates)[k] = NULL;  // Clear it. |  | 
|    405         ASSERT(curr_instr->HasUses()); |  | 
|    406         AppendExtractNthOutputForMerged( |  | 
|    407             curr_instr, |  | 
|    408             MergedMathInstr::OutputIndexOf(curr_instr->op_kind()), |  | 
|    409             kTagged, kSmiCid); |  | 
|    410         ASSERT(other_binop->HasUses()); |  | 
|    411         AppendExtractNthOutputForMerged( |  | 
|    412             other_binop, |  | 
|    413             MergedMathInstr::OutputIndexOf(other_binop->op_kind()), |  | 
|    414             kTagged, kSmiCid); |  | 
|    415  |  | 
|    416         ZoneGrowableArray<Value*>* args = new(I) ZoneGrowableArray<Value*>(2); |  | 
|    417         args->Add(new(I) Value(curr_instr->left()->definition())); |  | 
|    418         args->Add(new(I) Value(curr_instr->right()->definition())); |  | 
|    419  |  | 
|    420         // Replace with TruncDivMod. |  | 
|    421         MergedMathInstr* div_mod = new(I) MergedMathInstr( |  | 
|    422             args, |  | 
|    423             curr_instr->deopt_id(), |  | 
|    424             MergedMathInstr::kTruncDivMod); |  | 
|    425         curr_instr->ReplaceWith(div_mod, current_iterator()); |  | 
|    426         other_binop->ReplaceUsesWith(div_mod); |  | 
|    427         other_binop->RemoveFromGraph(); |  | 
|    428         // Only one merge possible. Because canonicalization happens later, |  | 
|    429         // more candidates are possible. |  | 
|    430         // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod. |  | 
|    431         break; |  | 
|    432       } |  | 
|    433     } |  | 
|    434   } |  | 
|    435 } |  | 
|    436  |  | 
|    437  |  | 
|    438 // Tries to merge MathUnary operations, in this case sinus and cosinus. |  | 
|    439 void FlowGraphOptimizer::TryMergeMathUnary( |  | 
|    440     GrowableArray<MathUnaryInstr*>* merge_candidates) { |  | 
|    441   if (!FlowGraphCompiler::SupportsSinCos() || !CanUnboxDouble()) { |  | 
|    442     return; |  | 
|    443   } |  | 
|    444   if (merge_candidates->length() < 2) { |  | 
|    445     // Need at least a SIN and a COS. |  | 
|    446     return; |  | 
|    447   } |  | 
|    448   for (intptr_t i = 0; i < merge_candidates->length(); i++) { |  | 
|    449     MathUnaryInstr* curr_instr = (*merge_candidates)[i]; |  | 
|    450     if (curr_instr == NULL) { |  | 
|    451       // Instruction was merged already. |  | 
|    452       continue; |  | 
|    453     } |  | 
|    454     const intptr_t kind = curr_instr->kind(); |  | 
|    455     ASSERT((kind == MathUnaryInstr::kSin) || |  | 
|    456            (kind == MathUnaryInstr::kCos)); |  | 
|    457     // Check if there is sin/cos binop with same inputs. |  | 
|    458     const intptr_t other_kind = (kind == MethodRecognizer::kMathSin) ? |  | 
|    459         MethodRecognizer::kMathCos : MethodRecognizer::kMathSin; |  | 
|    460     Definition* def = curr_instr->value()->definition(); |  | 
|    461     for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |  | 
|    462       MathUnaryInstr* other_op = (*merge_candidates)[k]; |  | 
|    463       // 'other_op' can be NULL if it was already merged. |  | 
|    464       if ((other_op != NULL) && (other_op->kind() == other_kind) && |  | 
|    465           (other_op->value()->definition() == def)) { |  | 
|    466         (*merge_candidates)[k] = NULL;  // Clear it. |  | 
|    467         ASSERT(curr_instr->HasUses()); |  | 
|    468         AppendExtractNthOutputForMerged(curr_instr, |  | 
|    469                                         MergedMathInstr::OutputIndexOf(kind), |  | 
|    470                                         kUnboxedDouble, kDoubleCid); |  | 
|    471         ASSERT(other_op->HasUses()); |  | 
|    472         AppendExtractNthOutputForMerged( |  | 
|    473             other_op, |  | 
|    474             MergedMathInstr::OutputIndexOf(other_kind), |  | 
|    475             kUnboxedDouble, kDoubleCid); |  | 
|    476         ZoneGrowableArray<Value*>* args = new(I) ZoneGrowableArray<Value*>(1); |  | 
|    477         args->Add(new(I) Value(curr_instr->value()->definition())); |  | 
|    478         // Replace with SinCos. |  | 
|    479         MergedMathInstr* sin_cos = |  | 
|    480             new(I) MergedMathInstr(args, |  | 
|    481                                    curr_instr->DeoptimizationTarget(), |  | 
|    482                                    MergedMathInstr::kSinCos); |  | 
|    483         curr_instr->ReplaceWith(sin_cos, current_iterator()); |  | 
|    484         other_op->ReplaceUsesWith(sin_cos); |  | 
|    485         other_op->RemoveFromGraph(); |  | 
|    486         // Only one merge possible. Because canonicalization happens later, |  | 
|    487         // more candidates are possible. |  | 
|    488         // TODO(srdjan): Allow merging of sin/cos into sincos. |  | 
|    489         break; |  | 
|    490       } |  | 
|    491     } |  | 
|    492   } |  | 
|    493 } |  | 
|    494  |  | 
|    495  |  | 
|    496 // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the |  | 
|    497 // shift can be a truncating Smi shift-left and result is always Smi. |  | 
|    498 // Merging occurs only per basic-block. |  | 
|    499 void FlowGraphOptimizer::TryOptimizePatterns() { |  | 
|    500   if (!FLAG_truncating_left_shift) return; |  | 
|    501   ASSERT(current_iterator_ == NULL); |  | 
|    502   GrowableArray<BinarySmiOpInstr*> div_mod_merge; |  | 
|    503   GrowableArray<MathUnaryInstr*> sin_cos_merge; |  | 
|    504   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|    505     // Merging only per basic-block. |  | 
|    506     div_mod_merge.Clear(); |  | 
|    507     sin_cos_merge.Clear(); |  | 
|    508     BlockEntryInstr* entry = block_order_[i]; |  | 
|    509     ForwardInstructionIterator it(entry); |  | 
|    510     current_iterator_ = ⁢ |  | 
|    511     for (; !it.Done(); it.Advance()) { |  | 
|    512       if (it.Current()->IsBinarySmiOp()) { |  | 
|    513         BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp(); |  | 
|    514         if (binop->op_kind() == Token::kBIT_AND) { |  | 
|    515           OptimizeLeftShiftBitAndSmiOp(binop, |  | 
|    516                                        binop->left()->definition(), |  | 
|    517                                        binop->right()->definition()); |  | 
|    518         } else if ((binop->op_kind() == Token::kTRUNCDIV) || |  | 
|    519                    (binop->op_kind() == Token::kMOD)) { |  | 
|    520           if (binop->HasUses()) { |  | 
|    521             div_mod_merge.Add(binop); |  | 
|    522           } |  | 
|    523         } |  | 
|    524       } else if (it.Current()->IsBinaryMintOp()) { |  | 
|    525         BinaryMintOpInstr* mintop = it.Current()->AsBinaryMintOp(); |  | 
|    526         if (mintop->op_kind() == Token::kBIT_AND) { |  | 
|    527           OptimizeLeftShiftBitAndSmiOp(mintop, |  | 
|    528                                        mintop->left()->definition(), |  | 
|    529                                        mintop->right()->definition()); |  | 
|    530         } |  | 
|    531       } else if (it.Current()->IsMathUnary()) { |  | 
|    532         MathUnaryInstr* math_unary = it.Current()->AsMathUnary(); |  | 
|    533         if ((math_unary->kind() == MathUnaryInstr::kSin) || |  | 
|    534             (math_unary->kind() == MathUnaryInstr::kCos)) { |  | 
|    535           if (math_unary->HasUses()) { |  | 
|    536             sin_cos_merge.Add(math_unary); |  | 
|    537           } |  | 
|    538         } |  | 
|    539       } |  | 
|    540     } |  | 
|    541     TryMergeTruncDivMod(&div_mod_merge); |  | 
|    542     TryMergeMathUnary(&sin_cos_merge); |  | 
|    543     current_iterator_ = NULL; |  | 
|    544   } |  | 
|    545 } |  | 
|    546  |  | 
|    547  |  | 
|    548 static void EnsureSSATempIndex(FlowGraph* graph, |  | 
|    549                                Definition* defn, |  | 
|    550                                Definition* replacement) { |  | 
|    551   if ((replacement->ssa_temp_index() == -1) && |  | 
|    552       (defn->ssa_temp_index() != -1)) { |  | 
|    553     replacement->set_ssa_temp_index(graph->alloc_ssa_temp_index()); |  | 
|    554   } |  | 
|    555 } |  | 
|    556  |  | 
|    557  |  | 
|    558 static void ReplaceCurrentInstruction(ForwardInstructionIterator* iterator, |  | 
|    559                                       Instruction* current, |  | 
|    560                                       Instruction* replacement, |  | 
|    561                                       FlowGraph* graph) { |  | 
|    562   Definition* current_defn = current->AsDefinition(); |  | 
|    563   if ((replacement != NULL) && (current_defn != NULL)) { |  | 
|    564     Definition* replacement_defn = replacement->AsDefinition(); |  | 
|    565     ASSERT(replacement_defn != NULL); |  | 
|    566     current_defn->ReplaceUsesWith(replacement_defn); |  | 
|    567     EnsureSSATempIndex(graph, current_defn, replacement_defn); |  | 
|    568  |  | 
|    569     if (FLAG_trace_optimization) { |  | 
|    570       OS::Print("Replacing v%" Pd " with v%" Pd "\n", |  | 
|    571                 current_defn->ssa_temp_index(), |  | 
|    572                 replacement_defn->ssa_temp_index()); |  | 
|    573     } |  | 
|    574   } else if (FLAG_trace_optimization) { |  | 
|    575     if (current_defn == NULL) { |  | 
|    576       OS::Print("Removing %s\n", current->DebugName()); |  | 
|    577     } else { |  | 
|    578       ASSERT(!current_defn->HasUses()); |  | 
|    579       OS::Print("Removing v%" Pd ".\n", current_defn->ssa_temp_index()); |  | 
|    580     } |  | 
|    581   } |  | 
|    582   iterator->RemoveCurrentFromGraph(); |  | 
|    583 } |  | 
|    584  |  | 
|    585  |  | 
|    586 bool FlowGraphOptimizer::Canonicalize() { |  | 
|    587   bool changed = false; |  | 
|    588   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|    589     BlockEntryInstr* entry = block_order_[i]; |  | 
|    590     for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |  | 
|    591       Instruction* current = it.Current(); |  | 
|    592       if (current->HasUnmatchedInputRepresentations()) { |  | 
|    593         // Can't canonicalize this instruction until all conversions for its |  | 
|    594         // inputs are inserted. |  | 
|    595         continue; |  | 
|    596       } |  | 
|    597  |  | 
|    598       Instruction* replacement = current->Canonicalize(flow_graph()); |  | 
|    599  |  | 
|    600       if (replacement != current) { |  | 
|    601         // For non-definitions Canonicalize should return either NULL or |  | 
|    602         // this. |  | 
|    603         ASSERT((replacement == NULL) || current->IsDefinition()); |  | 
|    604         ReplaceCurrentInstruction(&it, current, replacement, flow_graph_); |  | 
|    605         changed = true; |  | 
|    606       } |  | 
|    607     } |  | 
|    608   } |  | 
|    609   return changed; |  | 
|    610 } |  | 
|    611  |  | 
|    612  |  | 
|    613 static bool IsUnboxedInteger(Representation rep) { |  | 
|    614   return (rep == kUnboxedInt32) || |  | 
|    615          (rep == kUnboxedUint32) || |  | 
|    616          (rep == kUnboxedMint); |  | 
|    617 } |  | 
|    618  |  | 
|    619  |  | 
|    620 void FlowGraphOptimizer::InsertConversion(Representation from, |  | 
|    621                                           Representation to, |  | 
|    622                                           Value* use, |  | 
|    623                                           bool is_environment_use) { |  | 
|    624   Instruction* insert_before; |  | 
|    625   Instruction* deopt_target; |  | 
|    626   PhiInstr* phi = use->instruction()->AsPhi(); |  | 
|    627   if (phi != NULL) { |  | 
|    628     ASSERT(phi->is_alive()); |  | 
|    629     // For phis conversions have to be inserted in the predecessor. |  | 
|    630     insert_before = |  | 
|    631         phi->block()->PredecessorAt(use->use_index())->last_instruction(); |  | 
|    632     deopt_target = NULL; |  | 
|    633   } else { |  | 
|    634     deopt_target = insert_before = use->instruction(); |  | 
|    635   } |  | 
|    636  |  | 
|    637   Definition* converted = NULL; |  | 
|    638   if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { |  | 
|    639     const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) ? |  | 
|    640       deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |  | 
|    641     converted = new(I) UnboxedIntConverterInstr(from, |  | 
|    642                                                 to, |  | 
|    643                                                 use->CopyWithType(), |  | 
|    644                                                 deopt_id); |  | 
|    645   } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { |  | 
|    646     converted = new Int32ToDoubleInstr(use->CopyWithType()); |  | 
|    647   } else if ((from == kUnboxedMint) && |  | 
|    648              (to == kUnboxedDouble) && |  | 
|    649              CanConvertUnboxedMintToDouble()) { |  | 
|    650     const intptr_t deopt_id = (deopt_target != NULL) ? |  | 
|    651       deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |  | 
|    652     ASSERT(CanUnboxDouble()); |  | 
|    653     converted = new MintToDoubleInstr(use->CopyWithType(), deopt_id); |  | 
|    654   } else if ((from == kTagged) && Boxing::Supports(to)) { |  | 
|    655     const intptr_t deopt_id = (deopt_target != NULL) ? |  | 
|    656       deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |  | 
|    657     converted = UnboxInstr::Create(to, use->CopyWithType(), deopt_id); |  | 
|    658   } else if ((to == kTagged) && Boxing::Supports(from)) { |  | 
|    659     converted = BoxInstr::Create(from, use->CopyWithType()); |  | 
|    660   } else { |  | 
|    661     // We have failed to find a suitable conversion instruction. |  | 
|    662     // Insert two "dummy" conversion instructions with the correct |  | 
|    663     // "from" and "to" representation. The inserted instructions will |  | 
|    664     // trigger a deoptimization if executed. See #12417 for a discussion. |  | 
|    665     const intptr_t deopt_id = (deopt_target != NULL) ? |  | 
|    666       deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId; |  | 
|    667     ASSERT(Boxing::Supports(from)); |  | 
|    668     ASSERT(Boxing::Supports(to)); |  | 
|    669     Definition* boxed = BoxInstr::Create(from, use->CopyWithType()); |  | 
|    670     use->BindTo(boxed); |  | 
|    671     InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); |  | 
|    672     converted = UnboxInstr::Create(to, new(I) Value(boxed), deopt_id); |  | 
|    673   } |  | 
|    674   ASSERT(converted != NULL); |  | 
|    675   InsertBefore(insert_before, converted, use->instruction()->env(), |  | 
|    676                FlowGraph::kValue); |  | 
|    677   if (is_environment_use) { |  | 
|    678     use->BindToEnvironment(converted); |  | 
|    679   } else { |  | 
|    680     use->BindTo(converted); |  | 
|    681   } |  | 
|    682  |  | 
|    683   if ((to == kUnboxedInt32) && (phi != NULL)) { |  | 
|    684     // Int32 phis are unboxed optimistically. Ensure that unboxing |  | 
|    685     // has deoptimization target attached from the goto instruction. |  | 
|    686     flow_graph_->CopyDeoptTarget(converted, insert_before); |  | 
|    687   } |  | 
|    688 } |  | 
|    689  |  | 
|    690  |  | 
|    691 void FlowGraphOptimizer::ConvertUse(Value* use, Representation from_rep) { |  | 
|    692   const Representation to_rep = |  | 
|    693       use->instruction()->RequiredInputRepresentation(use->use_index()); |  | 
|    694   if (from_rep == to_rep || to_rep == kNoRepresentation) { |  | 
|    695     return; |  | 
|    696   } |  | 
|    697   InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); |  | 
|    698 } |  | 
|    699  |  | 
|    700  |  | 
|    701 void FlowGraphOptimizer::ConvertEnvironmentUse(Value* use, |  | 
|    702                                                Representation from_rep) { |  | 
|    703   const Representation to_rep = kTagged; |  | 
|    704   if (from_rep == to_rep || to_rep == kNoRepresentation) { |  | 
|    705     return; |  | 
|    706   } |  | 
|    707   InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ true); |  | 
|    708 } |  | 
|    709  |  | 
|    710  |  | 
|    711 void FlowGraphOptimizer::InsertConversionsFor(Definition* def) { |  | 
|    712   const Representation from_rep = def->representation(); |  | 
|    713  |  | 
|    714   for (Value::Iterator it(def->input_use_list()); |  | 
|    715        !it.Done(); |  | 
|    716        it.Advance()) { |  | 
|    717     ConvertUse(it.Current(), from_rep); |  | 
|    718   } |  | 
|    719  |  | 
|    720   if (flow_graph()->graph_entry()->SuccessorCount() > 1) { |  | 
|    721     for (Value::Iterator it(def->env_use_list()); |  | 
|    722          !it.Done(); |  | 
|    723          it.Advance()) { |  | 
|    724       Value* use = it.Current(); |  | 
|    725       if (use->instruction()->MayThrow() && |  | 
|    726           use->instruction()->GetBlock()->InsideTryBlock()) { |  | 
|    727         // Environment uses at calls inside try-blocks must be converted to |  | 
|    728         // tagged representation. |  | 
|    729         ConvertEnvironmentUse(it.Current(), from_rep); |  | 
|    730       } |  | 
|    731     } |  | 
|    732   } |  | 
|    733 } |  | 
|    734  |  | 
|    735  |  | 
|    736 static void UnboxPhi(PhiInstr* phi) { |  | 
|    737   Representation unboxed = phi->representation(); |  | 
|    738  |  | 
|    739   switch (phi->Type()->ToCid()) { |  | 
|    740     case kDoubleCid: |  | 
|    741       if (CanUnboxDouble()) { |  | 
|    742         unboxed = kUnboxedDouble; |  | 
|    743       } |  | 
|    744       break; |  | 
|    745     case kFloat32x4Cid: |  | 
|    746       if (ShouldInlineSimd()) { |  | 
|    747         unboxed = kUnboxedFloat32x4; |  | 
|    748       } |  | 
|    749       break; |  | 
|    750     case kInt32x4Cid: |  | 
|    751       if (ShouldInlineSimd()) { |  | 
|    752         unboxed = kUnboxedInt32x4; |  | 
|    753       } |  | 
|    754       break; |  | 
|    755     case kFloat64x2Cid: |  | 
|    756       if (ShouldInlineSimd()) { |  | 
|    757         unboxed = kUnboxedFloat64x2; |  | 
|    758       } |  | 
|    759       break; |  | 
|    760   } |  | 
|    761  |  | 
|    762   if ((kSmiBits < 32) && |  | 
|    763       (unboxed == kTagged) && |  | 
|    764       phi->Type()->IsInt() && |  | 
|    765       RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)) { |  | 
|    766     // On 32-bit platforms conservatively unbox phis that: |  | 
|    767     //   - are proven to be of type Int; |  | 
|    768     //   - fit into 32bits range; |  | 
|    769     //   - have either constants or Box() operations as inputs; |  | 
|    770     //   - have at least one Box() operation as an input; |  | 
|    771     //   - are used in at least 1 Unbox() operation. |  | 
|    772     bool should_unbox = false; |  | 
|    773     for (intptr_t i = 0; i < phi->InputCount(); i++) { |  | 
|    774       Definition* input = phi->InputAt(i)->definition(); |  | 
|    775       if (input->IsBox() && |  | 
|    776           RangeUtils::Fits(input->range(), |  | 
|    777                            RangeBoundary::kRangeBoundaryInt32)) { |  | 
|    778         should_unbox = true; |  | 
|    779       } else if (!input->IsConstant()) { |  | 
|    780         should_unbox = false; |  | 
|    781         break; |  | 
|    782       } |  | 
|    783     } |  | 
|    784  |  | 
|    785     if (should_unbox) { |  | 
|    786       // We checked inputs. Check if phi is used in at least one unbox |  | 
|    787       // operation. |  | 
|    788       bool has_unboxed_use = false; |  | 
|    789       for (Value* use = phi->input_use_list(); |  | 
|    790            use != NULL; |  | 
|    791            use = use->next_use()) { |  | 
|    792         Instruction* instr = use->instruction(); |  | 
|    793         if (instr->IsUnbox()) { |  | 
|    794           has_unboxed_use = true; |  | 
|    795           break; |  | 
|    796         } else if (IsUnboxedInteger( |  | 
|    797             instr->RequiredInputRepresentation(use->use_index()))) { |  | 
|    798           has_unboxed_use = true; |  | 
|    799           break; |  | 
|    800         } |  | 
|    801       } |  | 
|    802  |  | 
|    803       if (!has_unboxed_use) { |  | 
|    804         should_unbox = false; |  | 
|    805       } |  | 
|    806     } |  | 
|    807  |  | 
|    808     if (should_unbox) { |  | 
|    809       unboxed = kUnboxedInt32; |  | 
|    810     } |  | 
|    811   } |  | 
|    812  |  | 
|    813   phi->set_representation(unboxed); |  | 
|    814 } |  | 
|    815  |  | 
|    816  |  | 
|    817 void FlowGraphOptimizer::SelectRepresentations() { |  | 
|    818   // Conservatively unbox all phis that were proven to be of Double, |  | 
|    819   // Float32x4, or Int32x4 type. |  | 
|    820   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|    821     JoinEntryInstr* join_entry = block_order_[i]->AsJoinEntry(); |  | 
|    822     if (join_entry != NULL) { |  | 
|    823       for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |  | 
|    824         PhiInstr* phi = it.Current(); |  | 
|    825         UnboxPhi(phi); |  | 
|    826       } |  | 
|    827     } |  | 
|    828   } |  | 
|    829  |  | 
|    830   // Process all instructions and insert conversions where needed. |  | 
|    831   GraphEntryInstr* graph_entry = block_order_[0]->AsGraphEntry(); |  | 
|    832  |  | 
|    833   // Visit incoming parameters and constants. |  | 
|    834   for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); i++) { |  | 
|    835     InsertConversionsFor((*graph_entry->initial_definitions())[i]); |  | 
|    836   } |  | 
|    837  |  | 
|    838   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|    839     BlockEntryInstr* entry = block_order_[i]; |  | 
|    840     JoinEntryInstr* join_entry = entry->AsJoinEntry(); |  | 
|    841     if (join_entry != NULL) { |  | 
|    842       for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |  | 
|    843         PhiInstr* phi = it.Current(); |  | 
|    844         ASSERT(phi != NULL); |  | 
|    845         ASSERT(phi->is_alive()); |  | 
|    846         InsertConversionsFor(phi); |  | 
|    847       } |  | 
|    848     } |  | 
|    849     CatchBlockEntryInstr* catch_entry = entry->AsCatchBlockEntry(); |  | 
|    850     if (catch_entry != NULL) { |  | 
|    851       for (intptr_t i = 0; |  | 
|    852            i < catch_entry->initial_definitions()->length(); |  | 
|    853            i++) { |  | 
|    854         InsertConversionsFor((*catch_entry->initial_definitions())[i]); |  | 
|    855       } |  | 
|    856     } |  | 
|    857     for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |  | 
|    858       Definition* def = it.Current()->AsDefinition(); |  | 
|    859       if (def != NULL) { |  | 
|    860         InsertConversionsFor(def); |  | 
|    861       } |  | 
|    862     } |  | 
|    863   } |  | 
|    864 } |  | 
|    865  |  | 
|    866  |  | 
|    867 static bool ClassIdIsOneOf(intptr_t class_id, |  | 
|    868                            const GrowableArray<intptr_t>& class_ids) { |  | 
|    869   for (intptr_t i = 0; i < class_ids.length(); i++) { |  | 
|    870     ASSERT(class_ids[i] != kIllegalCid); |  | 
|    871     if (class_ids[i] == class_id) { |  | 
|    872       return true; |  | 
|    873     } |  | 
|    874   } |  | 
|    875   return false; |  | 
|    876 } |  | 
|    877  |  | 
|    878  |  | 
|    879 // Returns true if ICData tests two arguments and all ICData cids are in the |  | 
|    880 // required sets 'receiver_class_ids' or 'argument_class_ids', respectively. |  | 
|    881 static bool ICDataHasOnlyReceiverArgumentClassIds( |  | 
|    882     const ICData& ic_data, |  | 
|    883     const GrowableArray<intptr_t>& receiver_class_ids, |  | 
|    884     const GrowableArray<intptr_t>& argument_class_ids) { |  | 
|    885   if (ic_data.NumArgsTested() != 2) { |  | 
|    886     return false; |  | 
|    887   } |  | 
|    888   Function& target = Function::Handle(); |  | 
|    889   const intptr_t len = ic_data.NumberOfChecks(); |  | 
|    890   GrowableArray<intptr_t> class_ids; |  | 
|    891   for (intptr_t i = 0; i < len; i++) { |  | 
|    892     if (ic_data.IsUsedAt(i)) { |  | 
|    893       ic_data.GetCheckAt(i, &class_ids, &target); |  | 
|    894       ASSERT(class_ids.length() == 2); |  | 
|    895       if (!ClassIdIsOneOf(class_ids[0], receiver_class_ids) || |  | 
|    896           !ClassIdIsOneOf(class_ids[1], argument_class_ids)) { |  | 
|    897         return false; |  | 
|    898       } |  | 
|    899     } |  | 
|    900   } |  | 
|    901   return true; |  | 
|    902 } |  | 
|    903  |  | 
|    904  |  | 
|    905 static bool ICDataHasReceiverArgumentClassIds(const ICData& ic_data, |  | 
|    906                                               intptr_t receiver_class_id, |  | 
|    907                                               intptr_t argument_class_id) { |  | 
|    908   if (ic_data.NumArgsTested() != 2) { |  | 
|    909     return false; |  | 
|    910   } |  | 
|    911   Function& target = Function::Handle(); |  | 
|    912   const intptr_t len = ic_data.NumberOfChecks(); |  | 
|    913   for (intptr_t i = 0; i < len; i++) { |  | 
|    914     if (ic_data.IsUsedAt(i)) { |  | 
|    915       GrowableArray<intptr_t> class_ids; |  | 
|    916       ic_data.GetCheckAt(i, &class_ids, &target); |  | 
|    917       ASSERT(class_ids.length() == 2); |  | 
|    918       if ((class_ids[0] == receiver_class_id) && |  | 
|    919           (class_ids[1] == argument_class_id)) { |  | 
|    920         return true; |  | 
|    921       } |  | 
|    922     } |  | 
|    923   } |  | 
|    924   return false; |  | 
|    925 } |  | 
|    926  |  | 
|    927  |  | 
|    928 static bool HasOnlyOneSmi(const ICData& ic_data) { |  | 
|    929   return (ic_data.NumberOfUsedChecks() == 1) |  | 
|    930       && ic_data.HasReceiverClassId(kSmiCid); |  | 
|    931 } |  | 
|    932  |  | 
|    933  |  | 
|    934 static bool HasOnlySmiOrMint(const ICData& ic_data) { |  | 
|    935   if (ic_data.NumberOfUsedChecks() == 1) { |  | 
|    936     return ic_data.HasReceiverClassId(kSmiCid) |  | 
|    937         || ic_data.HasReceiverClassId(kMintCid); |  | 
|    938   } |  | 
|    939   return (ic_data.NumberOfUsedChecks() == 2) |  | 
|    940       && ic_data.HasReceiverClassId(kSmiCid) |  | 
|    941       && ic_data.HasReceiverClassId(kMintCid); |  | 
|    942 } |  | 
|    943  |  | 
|    944  |  | 
|    945 static bool HasOnlyTwoOf(const ICData& ic_data, intptr_t cid) { |  | 
|    946   if (ic_data.NumberOfUsedChecks() != 1) { |  | 
|    947     return false; |  | 
|    948   } |  | 
|    949   GrowableArray<intptr_t> first; |  | 
|    950   GrowableArray<intptr_t> second; |  | 
|    951   ic_data.GetUsedCidsForTwoArgs(&first, &second); |  | 
|    952   return (first[0] == cid) && (second[0] == cid); |  | 
|    953 } |  | 
|    954  |  | 
|    955 // Returns false if the ICData contains anything other than the 4 combinations |  | 
|    956 // of Mint and Smi for the receiver and argument classes. |  | 
|    957 static bool HasTwoMintOrSmi(const ICData& ic_data) { |  | 
|    958   GrowableArray<intptr_t> first; |  | 
|    959   GrowableArray<intptr_t> second; |  | 
|    960   ic_data.GetUsedCidsForTwoArgs(&first, &second); |  | 
|    961   for (intptr_t i = 0; i < first.length(); i++) { |  | 
|    962     if ((first[i] != kSmiCid) && (first[i] != kMintCid)) { |  | 
|    963       return false; |  | 
|    964     } |  | 
|    965     if ((second[i] != kSmiCid) && (second[i] != kMintCid)) { |  | 
|    966       return false; |  | 
|    967     } |  | 
|    968   } |  | 
|    969   return true; |  | 
|    970 } |  | 
|    971  |  | 
|    972  |  | 
|    973 // Returns false if the ICData contains anything other than the 4 combinations |  | 
|    974 // of Double and Smi for the receiver and argument classes. |  | 
|    975 static bool HasTwoDoubleOrSmi(const ICData& ic_data) { |  | 
|    976   GrowableArray<intptr_t> class_ids(2); |  | 
|    977   class_ids.Add(kSmiCid); |  | 
|    978   class_ids.Add(kDoubleCid); |  | 
|    979   return ICDataHasOnlyReceiverArgumentClassIds(ic_data, class_ids, class_ids); |  | 
|    980 } |  | 
|    981  |  | 
|    982  |  | 
|    983 static bool HasOnlyOneDouble(const ICData& ic_data) { |  | 
|    984   return (ic_data.NumberOfUsedChecks() == 1) |  | 
|    985       && ic_data.HasReceiverClassId(kDoubleCid); |  | 
|    986 } |  | 
|    987  |  | 
|    988  |  | 
|    989 static bool ShouldSpecializeForDouble(const ICData& ic_data) { |  | 
|    990   // Don't specialize for double if we can't unbox them. |  | 
|    991   if (!CanUnboxDouble()) { |  | 
|    992     return false; |  | 
|    993   } |  | 
|    994  |  | 
|    995   // Unboxed double operation can't handle case of two smis. |  | 
|    996   if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |  | 
|    997     return false; |  | 
|    998   } |  | 
|    999  |  | 
|   1000   // Check that it have seen only smis and doubles. |  | 
|   1001   return HasTwoDoubleOrSmi(ic_data); |  | 
|   1002 } |  | 
|   1003  |  | 
|   1004  |  | 
|   1005 void FlowGraphOptimizer::ReplaceCall(Definition* call, |  | 
|   1006                                      Definition* replacement) { |  | 
|   1007   // Remove the original push arguments. |  | 
|   1008   for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   1009     PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   1010     push->ReplaceUsesWith(push->value()->definition()); |  | 
|   1011     push->RemoveFromGraph(); |  | 
|   1012   } |  | 
|   1013   call->ReplaceWith(replacement, current_iterator()); |  | 
|   1014 } |  | 
|   1015  |  | 
|   1016  |  | 
|   1017 void FlowGraphOptimizer::AddCheckSmi(Definition* to_check, |  | 
|   1018                                      intptr_t deopt_id, |  | 
|   1019                                      Environment* deopt_environment, |  | 
|   1020                                      Instruction* insert_before) { |  | 
|   1021   if (to_check->Type()->ToCid() != kSmiCid) { |  | 
|   1022     InsertBefore(insert_before, |  | 
|   1023                  new(I) CheckSmiInstr(new(I) Value(to_check), |  | 
|   1024                                       deopt_id, |  | 
|   1025                                       insert_before->token_pos()), |  | 
|   1026                  deopt_environment, |  | 
|   1027                  FlowGraph::kEffect); |  | 
|   1028   } |  | 
|   1029 } |  | 
|   1030  |  | 
|   1031  |  | 
|   1032 Instruction* FlowGraphOptimizer::GetCheckClass(Definition* to_check, |  | 
|   1033                                                const ICData& unary_checks, |  | 
|   1034                                                intptr_t deopt_id, |  | 
|   1035                                                intptr_t token_pos) { |  | 
|   1036   if ((unary_checks.NumberOfUsedChecks() == 1) && |  | 
|   1037       unary_checks.HasReceiverClassId(kSmiCid)) { |  | 
|   1038     return new(I) CheckSmiInstr(new(I) Value(to_check), |  | 
|   1039                                 deopt_id, |  | 
|   1040                                 token_pos); |  | 
|   1041   } |  | 
|   1042   return new(I) CheckClassInstr( |  | 
|   1043       new(I) Value(to_check), deopt_id, unary_checks, token_pos); |  | 
|   1044 } |  | 
|   1045  |  | 
|   1046  |  | 
|   1047 void FlowGraphOptimizer::AddCheckClass(Definition* to_check, |  | 
|   1048                                        const ICData& unary_checks, |  | 
|   1049                                        intptr_t deopt_id, |  | 
|   1050                                        Environment* deopt_environment, |  | 
|   1051                                        Instruction* insert_before) { |  | 
|   1052   // Type propagation has not run yet, we cannot eliminate the check. |  | 
|   1053   Instruction* check = GetCheckClass( |  | 
|   1054       to_check, unary_checks, deopt_id, insert_before->token_pos()); |  | 
|   1055   InsertBefore(insert_before, check, deopt_environment, FlowGraph::kEffect); |  | 
|   1056 } |  | 
|   1057  |  | 
|   1058  |  | 
|   1059 void FlowGraphOptimizer::AddReceiverCheck(InstanceCallInstr* call) { |  | 
|   1060   AddCheckClass(call->ArgumentAt(0), |  | 
|   1061                 ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecks()), |  | 
|   1062                 call->deopt_id(), |  | 
|   1063                 call->env(), |  | 
|   1064                 call); |  | 
|   1065 } |  | 
|   1066  |  | 
|   1067  |  | 
|   1068 static bool ArgIsAlways(intptr_t cid, |  | 
|   1069                         const ICData& ic_data, |  | 
|   1070                         intptr_t arg_number) { |  | 
|   1071   ASSERT(ic_data.NumArgsTested() > arg_number); |  | 
|   1072   if (ic_data.NumberOfUsedChecks() == 0) { |  | 
|   1073     return false; |  | 
|   1074   } |  | 
|   1075   const intptr_t num_checks = ic_data.NumberOfChecks(); |  | 
|   1076   for (intptr_t i = 0; i < num_checks; i++) { |  | 
|   1077     if (ic_data.IsUsedAt(i) && ic_data.GetClassIdAt(i, arg_number) != cid) { |  | 
|   1078       return false; |  | 
|   1079     } |  | 
|   1080   } |  | 
|   1081   return true; |  | 
|   1082 } |  | 
|   1083  |  | 
|   1084  |  | 
|   1085 static bool CanUnboxInt32() { |  | 
|   1086   // Int32/Uint32 can be unboxed if it fits into a smi or the platform |  | 
|   1087   // supports unboxed mints. |  | 
|   1088   return (kSmiBits >= 32) || FlowGraphCompiler::SupportsUnboxedMints(); |  | 
|   1089 } |  | 
|   1090  |  | 
|   1091  |  | 
|   1092 static intptr_t MethodKindToCid(MethodRecognizer::Kind kind) { |  | 
|   1093   switch (kind) { |  | 
|   1094     case MethodRecognizer::kImmutableArrayGetIndexed: |  | 
|   1095       return kImmutableArrayCid; |  | 
|   1096  |  | 
|   1097     case MethodRecognizer::kObjectArrayGetIndexed: |  | 
|   1098     case MethodRecognizer::kObjectArraySetIndexed: |  | 
|   1099       return kArrayCid; |  | 
|   1100  |  | 
|   1101     case MethodRecognizer::kGrowableArrayGetIndexed: |  | 
|   1102     case MethodRecognizer::kGrowableArraySetIndexed: |  | 
|   1103       return kGrowableObjectArrayCid; |  | 
|   1104  |  | 
|   1105     case MethodRecognizer::kFloat32ArrayGetIndexed: |  | 
|   1106     case MethodRecognizer::kFloat32ArraySetIndexed: |  | 
|   1107       return kTypedDataFloat32ArrayCid; |  | 
|   1108  |  | 
|   1109     case MethodRecognizer::kFloat64ArrayGetIndexed: |  | 
|   1110     case MethodRecognizer::kFloat64ArraySetIndexed: |  | 
|   1111       return kTypedDataFloat64ArrayCid; |  | 
|   1112  |  | 
|   1113     case MethodRecognizer::kInt8ArrayGetIndexed: |  | 
|   1114     case MethodRecognizer::kInt8ArraySetIndexed: |  | 
|   1115       return kTypedDataInt8ArrayCid; |  | 
|   1116  |  | 
|   1117     case MethodRecognizer::kUint8ArrayGetIndexed: |  | 
|   1118     case MethodRecognizer::kUint8ArraySetIndexed: |  | 
|   1119       return kTypedDataUint8ArrayCid; |  | 
|   1120  |  | 
|   1121     case MethodRecognizer::kUint8ClampedArrayGetIndexed: |  | 
|   1122     case MethodRecognizer::kUint8ClampedArraySetIndexed: |  | 
|   1123       return kTypedDataUint8ClampedArrayCid; |  | 
|   1124  |  | 
|   1125     case MethodRecognizer::kExternalUint8ArrayGetIndexed: |  | 
|   1126     case MethodRecognizer::kExternalUint8ArraySetIndexed: |  | 
|   1127       return kExternalTypedDataUint8ArrayCid; |  | 
|   1128  |  | 
|   1129     case MethodRecognizer::kExternalUint8ClampedArrayGetIndexed: |  | 
|   1130     case MethodRecognizer::kExternalUint8ClampedArraySetIndexed: |  | 
|   1131       return kExternalTypedDataUint8ClampedArrayCid; |  | 
|   1132  |  | 
|   1133     case MethodRecognizer::kInt16ArrayGetIndexed: |  | 
|   1134     case MethodRecognizer::kInt16ArraySetIndexed: |  | 
|   1135       return kTypedDataInt16ArrayCid; |  | 
|   1136  |  | 
|   1137     case MethodRecognizer::kUint16ArrayGetIndexed: |  | 
|   1138     case MethodRecognizer::kUint16ArraySetIndexed: |  | 
|   1139       return kTypedDataUint16ArrayCid; |  | 
|   1140  |  | 
|   1141     case MethodRecognizer::kInt32ArrayGetIndexed: |  | 
|   1142     case MethodRecognizer::kInt32ArraySetIndexed: |  | 
|   1143       return kTypedDataInt32ArrayCid; |  | 
|   1144  |  | 
|   1145     case MethodRecognizer::kUint32ArrayGetIndexed: |  | 
|   1146     case MethodRecognizer::kUint32ArraySetIndexed: |  | 
|   1147       return kTypedDataUint32ArrayCid; |  | 
|   1148  |  | 
|   1149     case MethodRecognizer::kInt64ArrayGetIndexed: |  | 
|   1150     case MethodRecognizer::kInt64ArraySetIndexed: |  | 
|   1151       return kTypedDataInt64ArrayCid; |  | 
|   1152  |  | 
|   1153     case MethodRecognizer::kFloat32x4ArrayGetIndexed: |  | 
|   1154     case MethodRecognizer::kFloat32x4ArraySetIndexed: |  | 
|   1155       return kTypedDataFloat32x4ArrayCid; |  | 
|   1156  |  | 
|   1157     case MethodRecognizer::kInt32x4ArrayGetIndexed: |  | 
|   1158     case MethodRecognizer::kInt32x4ArraySetIndexed: |  | 
|   1159       return kTypedDataInt32x4ArrayCid; |  | 
|   1160  |  | 
|   1161     case MethodRecognizer::kFloat64x2ArrayGetIndexed: |  | 
|   1162     case MethodRecognizer::kFloat64x2ArraySetIndexed: |  | 
|   1163       return kTypedDataFloat64x2ArrayCid; |  | 
|   1164  |  | 
|   1165     default: |  | 
|   1166       break; |  | 
|   1167   } |  | 
|   1168   return kIllegalCid; |  | 
|   1169 } |  | 
|   1170  |  | 
|   1171  |  | 
|   1172 bool FlowGraphOptimizer::TryReplaceWithStoreIndexed(InstanceCallInstr* call) { |  | 
|   1173   // Check for monomorphic IC data. |  | 
|   1174   if (!call->HasICData()) return false; |  | 
|   1175   const ICData& ic_data = |  | 
|   1176       ICData::Handle(I, call->ic_data()->AsUnaryClassChecks()); |  | 
|   1177   if (ic_data.NumberOfChecks() != 1) { |  | 
|   1178     return false; |  | 
|   1179   } |  | 
|   1180   ASSERT(ic_data.NumberOfUsedChecks() == 1); |  | 
|   1181   ASSERT(ic_data.HasOneTarget()); |  | 
|   1182  |  | 
|   1183   const Function& target = Function::Handle(I, ic_data.GetTargetAt(0)); |  | 
|   1184   TargetEntryInstr* entry; |  | 
|   1185   Definition* last; |  | 
|   1186   if (!TryInlineRecognizedMethod(ic_data.GetReceiverClassIdAt(0), |  | 
|   1187                                  target, |  | 
|   1188                                  call, |  | 
|   1189                                  call->ArgumentAt(0), |  | 
|   1190                                  call->token_pos(), |  | 
|   1191                                  *call->ic_data(), |  | 
|   1192                                  &entry, &last)) { |  | 
|   1193     return false; |  | 
|   1194   } |  | 
|   1195   // Insert receiver class check. |  | 
|   1196   AddReceiverCheck(call); |  | 
|   1197   // Remove the original push arguments. |  | 
|   1198   for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   1199     PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   1200     push->ReplaceUsesWith(push->value()->definition()); |  | 
|   1201     push->RemoveFromGraph(); |  | 
|   1202   } |  | 
|   1203   // Replace all uses of this definition with the result. |  | 
|   1204   call->ReplaceUsesWith(last); |  | 
|   1205   // Finally insert the sequence other definition in place of this one in the |  | 
|   1206   // graph. |  | 
|   1207   call->previous()->LinkTo(entry->next()); |  | 
|   1208   entry->UnuseAllInputs();  // Entry block is not in the graph. |  | 
|   1209   last->LinkTo(call); |  | 
|   1210   // Remove through the iterator. |  | 
|   1211   ASSERT(current_iterator()->Current() == call); |  | 
|   1212   current_iterator()->RemoveCurrentFromGraph(); |  | 
|   1213   call->set_previous(NULL); |  | 
|   1214   call->set_next(NULL); |  | 
|   1215   return true; |  | 
|   1216 } |  | 
|   1217  |  | 
|   1218  |  | 
|   1219 bool FlowGraphOptimizer::InlineSetIndexed( |  | 
|   1220     MethodRecognizer::Kind kind, |  | 
|   1221     const Function& target, |  | 
|   1222     Instruction* call, |  | 
|   1223     Definition* receiver, |  | 
|   1224     intptr_t token_pos, |  | 
|   1225     const ICData* ic_data, |  | 
|   1226     const ICData& value_check, |  | 
|   1227     TargetEntryInstr** entry, |  | 
|   1228     Definition** last) { |  | 
|   1229   intptr_t array_cid = MethodKindToCid(kind); |  | 
|   1230   ASSERT(array_cid != kIllegalCid); |  | 
|   1231  |  | 
|   1232   Definition* array = receiver; |  | 
|   1233   Definition* index = call->ArgumentAt(1); |  | 
|   1234   Definition* stored_value = call->ArgumentAt(2); |  | 
|   1235  |  | 
|   1236   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   1237                                    call->GetBlock()->try_index()); |  | 
|   1238   (*entry)->InheritDeoptTarget(I, call); |  | 
|   1239   Instruction* cursor = *entry; |  | 
|   1240   if (FLAG_enable_type_checks) { |  | 
|   1241     // Only type check for the value. A type check for the index is not |  | 
|   1242     // needed here because we insert a deoptimizing smi-check for the case |  | 
|   1243     // the index is not a smi. |  | 
|   1244     const AbstractType& value_type = |  | 
|   1245         AbstractType::ZoneHandle(I, target.ParameterTypeAt(2)); |  | 
|   1246     Definition* instantiator = NULL; |  | 
|   1247     Definition* type_args = NULL; |  | 
|   1248     switch (array_cid) { |  | 
|   1249       case kArrayCid: |  | 
|   1250       case kGrowableObjectArrayCid: { |  | 
|   1251         const Class& instantiator_class =  Class::Handle(I, target.Owner()); |  | 
|   1252         intptr_t type_arguments_field_offset = |  | 
|   1253             instantiator_class.type_arguments_field_offset(); |  | 
|   1254         LoadFieldInstr* load_type_args = |  | 
|   1255             new(I) LoadFieldInstr(new(I) Value(array), |  | 
|   1256                                   type_arguments_field_offset, |  | 
|   1257                                   Type::ZoneHandle(I),  // No type. |  | 
|   1258                                   call->token_pos()); |  | 
|   1259         cursor = flow_graph()->AppendTo(cursor, |  | 
|   1260                                         load_type_args, |  | 
|   1261                                         NULL, |  | 
|   1262                                         FlowGraph::kValue); |  | 
|   1263  |  | 
|   1264         instantiator = array; |  | 
|   1265         type_args = load_type_args; |  | 
|   1266         break; |  | 
|   1267       } |  | 
|   1268       case kTypedDataInt8ArrayCid: |  | 
|   1269       case kTypedDataUint8ArrayCid: |  | 
|   1270       case kTypedDataUint8ClampedArrayCid: |  | 
|   1271       case kExternalTypedDataUint8ArrayCid: |  | 
|   1272       case kExternalTypedDataUint8ClampedArrayCid: |  | 
|   1273       case kTypedDataInt16ArrayCid: |  | 
|   1274       case kTypedDataUint16ArrayCid: |  | 
|   1275       case kTypedDataInt32ArrayCid: |  | 
|   1276       case kTypedDataUint32ArrayCid: |  | 
|   1277       case kTypedDataInt64ArrayCid: |  | 
|   1278         ASSERT(value_type.IsIntType()); |  | 
|   1279         // Fall through. |  | 
|   1280       case kTypedDataFloat32ArrayCid: |  | 
|   1281       case kTypedDataFloat64ArrayCid: { |  | 
|   1282         type_args = instantiator = flow_graph_->constant_null(); |  | 
|   1283         ASSERT((array_cid != kTypedDataFloat32ArrayCid && |  | 
|   1284                 array_cid != kTypedDataFloat64ArrayCid) || |  | 
|   1285                value_type.IsDoubleType()); |  | 
|   1286         ASSERT(value_type.IsInstantiated()); |  | 
|   1287         break; |  | 
|   1288       } |  | 
|   1289       case kTypedDataFloat32x4ArrayCid: { |  | 
|   1290         type_args = instantiator = flow_graph_->constant_null(); |  | 
|   1291         ASSERT((array_cid != kTypedDataFloat32x4ArrayCid) || |  | 
|   1292                value_type.IsFloat32x4Type()); |  | 
|   1293         ASSERT(value_type.IsInstantiated()); |  | 
|   1294         break; |  | 
|   1295       } |  | 
|   1296       case kTypedDataFloat64x2ArrayCid: { |  | 
|   1297         type_args = instantiator = flow_graph_->constant_null(); |  | 
|   1298         ASSERT((array_cid != kTypedDataFloat64x2ArrayCid) || |  | 
|   1299                value_type.IsFloat64x2Type()); |  | 
|   1300         ASSERT(value_type.IsInstantiated()); |  | 
|   1301         break; |  | 
|   1302       } |  | 
|   1303       default: |  | 
|   1304         // TODO(fschneider): Add support for other array types. |  | 
|   1305         UNREACHABLE(); |  | 
|   1306     } |  | 
|   1307     AssertAssignableInstr* assert_value = |  | 
|   1308         new(I) AssertAssignableInstr(token_pos, |  | 
|   1309                                      new(I) Value(stored_value), |  | 
|   1310                                      new(I) Value(instantiator), |  | 
|   1311                                      new(I) Value(type_args), |  | 
|   1312                                      value_type, |  | 
|   1313                                      Symbols::Value(), |  | 
|   1314                                      call->deopt_id()); |  | 
|   1315     cursor = flow_graph()->AppendTo(cursor, |  | 
|   1316                                     assert_value, |  | 
|   1317                                     call->env(), |  | 
|   1318                                     FlowGraph::kValue); |  | 
|   1319   } |  | 
|   1320  |  | 
|   1321   array_cid = PrepareInlineIndexedOp(call, |  | 
|   1322                                      array_cid, |  | 
|   1323                                      &array, |  | 
|   1324                                      index, |  | 
|   1325                                      &cursor); |  | 
|   1326  |  | 
|   1327   // Check if store barrier is needed. Byte arrays don't need a store barrier. |  | 
|   1328   StoreBarrierType needs_store_barrier = |  | 
|   1329       (RawObject::IsTypedDataClassId(array_cid) || |  | 
|   1330        RawObject::IsTypedDataViewClassId(array_cid) || |  | 
|   1331        RawObject::IsExternalTypedDataClassId(array_cid)) ? kNoStoreBarrier |  | 
|   1332                                                          : kEmitStoreBarrier; |  | 
|   1333  |  | 
|   1334   // No need to class check stores to Int32 and Uint32 arrays because |  | 
|   1335   // we insert unboxing instructions below which include a class check. |  | 
|   1336   if ((array_cid != kTypedDataUint32ArrayCid) && |  | 
|   1337       (array_cid != kTypedDataInt32ArrayCid) && |  | 
|   1338       !value_check.IsNull()) { |  | 
|   1339     // No store barrier needed because checked value is a smi, an unboxed mint, |  | 
|   1340     // an unboxed double, an unboxed Float32x4, or unboxed Int32x4. |  | 
|   1341     needs_store_barrier = kNoStoreBarrier; |  | 
|   1342     Instruction* check = GetCheckClass( |  | 
|   1343         stored_value, value_check, call->deopt_id(), call->token_pos()); |  | 
|   1344     cursor = flow_graph()->AppendTo(cursor, |  | 
|   1345                                     check, |  | 
|   1346                                     call->env(), |  | 
|   1347                                     FlowGraph::kEffect); |  | 
|   1348   } |  | 
|   1349  |  | 
|   1350   if (array_cid == kTypedDataFloat32ArrayCid) { |  | 
|   1351     stored_value = |  | 
|   1352         new(I) DoubleToFloatInstr( |  | 
|   1353             new(I) Value(stored_value), call->deopt_id()); |  | 
|   1354     cursor = flow_graph()->AppendTo(cursor, |  | 
|   1355                                     stored_value, |  | 
|   1356                                     NULL, |  | 
|   1357                                     FlowGraph::kValue); |  | 
|   1358   } else if (array_cid == kTypedDataInt32ArrayCid) { |  | 
|   1359     stored_value = new(I) UnboxInt32Instr( |  | 
|   1360         UnboxInt32Instr::kTruncate, |  | 
|   1361         new(I) Value(stored_value), |  | 
|   1362         call->deopt_id()); |  | 
|   1363     cursor = flow_graph()->AppendTo(cursor, |  | 
|   1364                                     stored_value, |  | 
|   1365                                     call->env(), |  | 
|   1366                                     FlowGraph::kValue); |  | 
|   1367   } else if (array_cid == kTypedDataUint32ArrayCid) { |  | 
|   1368     stored_value = new(I) UnboxUint32Instr( |  | 
|   1369         new(I) Value(stored_value), |  | 
|   1370         call->deopt_id()); |  | 
|   1371     ASSERT(stored_value->AsUnboxInteger()->is_truncating()); |  | 
|   1372     cursor = flow_graph()->AppendTo(cursor, |  | 
|   1373                                     stored_value, |  | 
|   1374                                     call->env(), |  | 
|   1375                                     FlowGraph::kValue); |  | 
|   1376   } |  | 
|   1377  |  | 
|   1378   const intptr_t index_scale = Instance::ElementSizeFor(array_cid); |  | 
|   1379   *last = new(I) StoreIndexedInstr(new(I) Value(array), |  | 
|   1380                                    new(I) Value(index), |  | 
|   1381                                    new(I) Value(stored_value), |  | 
|   1382                                    needs_store_barrier, |  | 
|   1383                                    index_scale, |  | 
|   1384                                    array_cid, |  | 
|   1385                                    call->deopt_id(), |  | 
|   1386                                    call->token_pos()); |  | 
|   1387   flow_graph()->AppendTo(cursor, |  | 
|   1388                          *last, |  | 
|   1389                          call->env(), |  | 
|   1390                          FlowGraph::kEffect); |  | 
|   1391   return true; |  | 
|   1392 } |  | 
|   1393  |  | 
|   1394  |  | 
|   1395 bool FlowGraphOptimizer::TryInlineRecognizedMethod(intptr_t receiver_cid, |  | 
|   1396                                                    const Function& target, |  | 
|   1397                                                    Instruction* call, |  | 
|   1398                                                    Definition* receiver, |  | 
|   1399                                                    intptr_t token_pos, |  | 
|   1400                                                    const ICData& ic_data, |  | 
|   1401                                                    TargetEntryInstr** entry, |  | 
|   1402                                                    Definition** last) { |  | 
|   1403   ICData& value_check = ICData::ZoneHandle(I); |  | 
|   1404   MethodRecognizer::Kind kind = MethodRecognizer::RecognizeKind(target); |  | 
|   1405   switch (kind) { |  | 
|   1406     // Recognized [] operators. |  | 
|   1407     case MethodRecognizer::kImmutableArrayGetIndexed: |  | 
|   1408     case MethodRecognizer::kObjectArrayGetIndexed: |  | 
|   1409     case MethodRecognizer::kGrowableArrayGetIndexed: |  | 
|   1410     case MethodRecognizer::kInt8ArrayGetIndexed: |  | 
|   1411     case MethodRecognizer::kUint8ArrayGetIndexed: |  | 
|   1412     case MethodRecognizer::kUint8ClampedArrayGetIndexed: |  | 
|   1413     case MethodRecognizer::kExternalUint8ArrayGetIndexed: |  | 
|   1414     case MethodRecognizer::kExternalUint8ClampedArrayGetIndexed: |  | 
|   1415     case MethodRecognizer::kInt16ArrayGetIndexed: |  | 
|   1416     case MethodRecognizer::kUint16ArrayGetIndexed: |  | 
|   1417       return InlineGetIndexed(kind, call, receiver, ic_data, entry, last); |  | 
|   1418     case MethodRecognizer::kFloat32ArrayGetIndexed: |  | 
|   1419     case MethodRecognizer::kFloat64ArrayGetIndexed: |  | 
|   1420       if (!CanUnboxDouble()) { |  | 
|   1421         return false; |  | 
|   1422       } |  | 
|   1423       return InlineGetIndexed(kind, call, receiver, ic_data, entry, last); |  | 
|   1424     case MethodRecognizer::kFloat32x4ArrayGetIndexed: |  | 
|   1425     case MethodRecognizer::kFloat64x2ArrayGetIndexed: |  | 
|   1426       if (!ShouldInlineSimd()) { |  | 
|   1427         return false; |  | 
|   1428       } |  | 
|   1429       return InlineGetIndexed(kind, call, receiver, ic_data, entry, last); |  | 
|   1430     case MethodRecognizer::kInt32ArrayGetIndexed: |  | 
|   1431     case MethodRecognizer::kUint32ArrayGetIndexed: |  | 
|   1432       if (!CanUnboxInt32()) return false; |  | 
|   1433       return InlineGetIndexed(kind, call, receiver, ic_data, entry, last); |  | 
|   1434  |  | 
|   1435     case MethodRecognizer::kInt64ArrayGetIndexed: |  | 
|   1436       if (!ShouldInlineInt64ArrayOps()) { |  | 
|   1437         return false; |  | 
|   1438       } |  | 
|   1439       return InlineGetIndexed(kind, call, receiver, ic_data, entry, last); |  | 
|   1440     // Recognized []= operators. |  | 
|   1441     case MethodRecognizer::kObjectArraySetIndexed: |  | 
|   1442     case MethodRecognizer::kGrowableArraySetIndexed: |  | 
|   1443       if (ArgIsAlways(kSmiCid, ic_data, 2)) { |  | 
|   1444         value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1445       } |  | 
|   1446       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1447                               &ic_data, value_check, entry, last); |  | 
|   1448     case MethodRecognizer::kInt8ArraySetIndexed: |  | 
|   1449     case MethodRecognizer::kUint8ArraySetIndexed: |  | 
|   1450     case MethodRecognizer::kUint8ClampedArraySetIndexed: |  | 
|   1451     case MethodRecognizer::kExternalUint8ArraySetIndexed: |  | 
|   1452     case MethodRecognizer::kExternalUint8ClampedArraySetIndexed: |  | 
|   1453     case MethodRecognizer::kInt16ArraySetIndexed: |  | 
|   1454     case MethodRecognizer::kUint16ArraySetIndexed: |  | 
|   1455       if (!ArgIsAlways(kSmiCid, ic_data, 2)) { |  | 
|   1456         return false; |  | 
|   1457       } |  | 
|   1458       value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1459       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1460                               &ic_data, value_check, entry, last); |  | 
|   1461     case MethodRecognizer::kInt32ArraySetIndexed: |  | 
|   1462     case MethodRecognizer::kUint32ArraySetIndexed: |  | 
|   1463       // Check that value is always smi or mint. We use Int32/Uint32 unboxing |  | 
|   1464       // which can only deal unbox these values. |  | 
|   1465       value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1466       if (!HasOnlySmiOrMint(value_check)) { |  | 
|   1467         return false; |  | 
|   1468       } |  | 
|   1469       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1470                               &ic_data, value_check, entry, last); |  | 
|   1471     case MethodRecognizer::kInt64ArraySetIndexed: |  | 
|   1472       if (!ShouldInlineInt64ArrayOps()) { |  | 
|   1473         return false; |  | 
|   1474       } |  | 
|   1475       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1476                               &ic_data, value_check, entry, last); |  | 
|   1477     case MethodRecognizer::kFloat32ArraySetIndexed: |  | 
|   1478     case MethodRecognizer::kFloat64ArraySetIndexed: |  | 
|   1479       if (!CanUnboxDouble()) { |  | 
|   1480         return false; |  | 
|   1481       } |  | 
|   1482       // Check that value is always double. |  | 
|   1483       if (!ArgIsAlways(kDoubleCid, ic_data, 2)) { |  | 
|   1484         return false; |  | 
|   1485       } |  | 
|   1486       value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1487       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1488                               &ic_data, value_check, entry, last); |  | 
|   1489     case MethodRecognizer::kFloat32x4ArraySetIndexed: |  | 
|   1490       if (!ShouldInlineSimd()) { |  | 
|   1491         return false; |  | 
|   1492       } |  | 
|   1493       // Check that value is always a Float32x4. |  | 
|   1494       if (!ArgIsAlways(kFloat32x4Cid, ic_data, 2)) { |  | 
|   1495         return false; |  | 
|   1496       } |  | 
|   1497       value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1498       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1499                               &ic_data, value_check, entry, last); |  | 
|   1500     case MethodRecognizer::kFloat64x2ArraySetIndexed: |  | 
|   1501       if (!ShouldInlineSimd()) { |  | 
|   1502         return false; |  | 
|   1503       } |  | 
|   1504       // Check that value is always a Float32x4. |  | 
|   1505       if (!ArgIsAlways(kFloat64x2Cid, ic_data, 2)) { |  | 
|   1506         return false; |  | 
|   1507       } |  | 
|   1508       value_check = ic_data.AsUnaryClassChecksForArgNr(2); |  | 
|   1509       return InlineSetIndexed(kind, target, call, receiver, token_pos, |  | 
|   1510                               &ic_data, value_check, entry, last); |  | 
|   1511     case MethodRecognizer::kByteArrayBaseGetInt8: |  | 
|   1512       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1513                                      kTypedDataInt8ArrayCid, |  | 
|   1514                                      ic_data, entry, last); |  | 
|   1515     case MethodRecognizer::kByteArrayBaseGetUint8: |  | 
|   1516       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1517                                      kTypedDataUint8ArrayCid, |  | 
|   1518                                      ic_data, entry, last); |  | 
|   1519     case MethodRecognizer::kByteArrayBaseGetInt16: |  | 
|   1520       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1521                                      kTypedDataInt16ArrayCid, |  | 
|   1522                                      ic_data, entry, last); |  | 
|   1523     case MethodRecognizer::kByteArrayBaseGetUint16: |  | 
|   1524       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1525                                      kTypedDataUint16ArrayCid, |  | 
|   1526                                      ic_data, entry, last); |  | 
|   1527     case MethodRecognizer::kByteArrayBaseGetInt32: |  | 
|   1528       if (!CanUnboxInt32()) { |  | 
|   1529         return false; |  | 
|   1530       } |  | 
|   1531       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1532                                      kTypedDataInt32ArrayCid, |  | 
|   1533                                      ic_data, entry, last); |  | 
|   1534     case MethodRecognizer::kByteArrayBaseGetUint32: |  | 
|   1535       if (!CanUnboxInt32()) { |  | 
|   1536         return false; |  | 
|   1537       } |  | 
|   1538       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1539                                      kTypedDataUint32ArrayCid, |  | 
|   1540                                      ic_data, entry, last); |  | 
|   1541     case MethodRecognizer::kByteArrayBaseGetFloat32: |  | 
|   1542       if (!CanUnboxDouble()) { |  | 
|   1543         return false; |  | 
|   1544       } |  | 
|   1545       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1546                                      kTypedDataFloat32ArrayCid, |  | 
|   1547                                      ic_data, entry, last); |  | 
|   1548     case MethodRecognizer::kByteArrayBaseGetFloat64: |  | 
|   1549       if (!CanUnboxDouble()) { |  | 
|   1550         return false; |  | 
|   1551       } |  | 
|   1552       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1553                                      kTypedDataFloat64ArrayCid, |  | 
|   1554                                      ic_data, entry, last); |  | 
|   1555     case MethodRecognizer::kByteArrayBaseGetFloat32x4: |  | 
|   1556       if (!ShouldInlineSimd()) { |  | 
|   1557         return false; |  | 
|   1558       } |  | 
|   1559       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1560                                      kTypedDataFloat32x4ArrayCid, |  | 
|   1561                                      ic_data, entry, last); |  | 
|   1562     case MethodRecognizer::kByteArrayBaseGetInt32x4: |  | 
|   1563       if (!ShouldInlineSimd()) { |  | 
|   1564         return false; |  | 
|   1565       } |  | 
|   1566       return InlineByteArrayViewLoad(call, receiver, receiver_cid, |  | 
|   1567                                      kTypedDataInt32x4ArrayCid, |  | 
|   1568                                      ic_data, entry, last); |  | 
|   1569     case MethodRecognizer::kByteArrayBaseSetInt8: |  | 
|   1570       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1571                                       kTypedDataInt8ArrayCid, |  | 
|   1572                                       ic_data, entry, last); |  | 
|   1573     case MethodRecognizer::kByteArrayBaseSetUint8: |  | 
|   1574       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1575                                       kTypedDataUint8ArrayCid, |  | 
|   1576                                       ic_data, entry, last); |  | 
|   1577     case MethodRecognizer::kByteArrayBaseSetInt16: |  | 
|   1578       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1579                                       kTypedDataInt16ArrayCid, |  | 
|   1580                                       ic_data, entry, last); |  | 
|   1581     case MethodRecognizer::kByteArrayBaseSetUint16: |  | 
|   1582       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1583                                       kTypedDataUint16ArrayCid, |  | 
|   1584                                       ic_data, entry, last); |  | 
|   1585     case MethodRecognizer::kByteArrayBaseSetInt32: |  | 
|   1586       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1587                                       kTypedDataInt32ArrayCid, |  | 
|   1588                                       ic_data, entry, last); |  | 
|   1589     case MethodRecognizer::kByteArrayBaseSetUint32: |  | 
|   1590       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1591                                       kTypedDataUint32ArrayCid, |  | 
|   1592                                       ic_data, entry, last); |  | 
|   1593     case MethodRecognizer::kByteArrayBaseSetFloat32: |  | 
|   1594       if (!CanUnboxDouble()) { |  | 
|   1595         return false; |  | 
|   1596       } |  | 
|   1597       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1598                                       kTypedDataFloat32ArrayCid, |  | 
|   1599                                       ic_data, entry, last); |  | 
|   1600     case MethodRecognizer::kByteArrayBaseSetFloat64: |  | 
|   1601       if (!CanUnboxDouble()) { |  | 
|   1602         return false; |  | 
|   1603       } |  | 
|   1604       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1605                                       kTypedDataFloat64ArrayCid, |  | 
|   1606                                       ic_data, entry, last); |  | 
|   1607     case MethodRecognizer::kByteArrayBaseSetFloat32x4: |  | 
|   1608       if (!ShouldInlineSimd()) { |  | 
|   1609         return false; |  | 
|   1610       } |  | 
|   1611       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1612                                       kTypedDataFloat32x4ArrayCid, |  | 
|   1613                                       ic_data, entry, last); |  | 
|   1614     case MethodRecognizer::kByteArrayBaseSetInt32x4: |  | 
|   1615       if (!ShouldInlineSimd()) { |  | 
|   1616         return false; |  | 
|   1617       } |  | 
|   1618       return InlineByteArrayViewStore(target, call, receiver, receiver_cid, |  | 
|   1619                                       kTypedDataInt32x4ArrayCid, |  | 
|   1620                                       ic_data, entry, last); |  | 
|   1621     case MethodRecognizer::kStringBaseCodeUnitAt: |  | 
|   1622       return InlineStringCodeUnitAt(call, receiver_cid, entry, last); |  | 
|   1623     case MethodRecognizer::kStringBaseCharAt: |  | 
|   1624       return InlineStringBaseCharAt(call, receiver_cid, entry, last); |  | 
|   1625     case MethodRecognizer::kDoubleAdd: |  | 
|   1626       return InlineDoubleOp(Token::kADD, call, entry, last); |  | 
|   1627     case MethodRecognizer::kDoubleSub: |  | 
|   1628       return InlineDoubleOp(Token::kSUB, call, entry, last); |  | 
|   1629     case MethodRecognizer::kDoubleMul: |  | 
|   1630       return InlineDoubleOp(Token::kMUL, call, entry, last); |  | 
|   1631     case MethodRecognizer::kDoubleDiv: |  | 
|   1632       return InlineDoubleOp(Token::kDIV, call, entry, last); |  | 
|   1633     default: |  | 
|   1634       return false; |  | 
|   1635   } |  | 
|   1636 } |  | 
|   1637  |  | 
|   1638  |  | 
|   1639 intptr_t FlowGraphOptimizer::PrepareInlineIndexedOp(Instruction* call, |  | 
|   1640                                                     intptr_t array_cid, |  | 
|   1641                                                     Definition** array, |  | 
|   1642                                                     Definition* index, |  | 
|   1643                                                     Instruction** cursor) { |  | 
|   1644   // Insert index smi check. |  | 
|   1645   *cursor = flow_graph()->AppendTo( |  | 
|   1646       *cursor, |  | 
|   1647       new(I) CheckSmiInstr(new(I) Value(index), |  | 
|   1648                            call->deopt_id(), |  | 
|   1649                            call->token_pos()), |  | 
|   1650       call->env(), |  | 
|   1651       FlowGraph::kEffect); |  | 
|   1652  |  | 
|   1653   // Insert array length load and bounds check. |  | 
|   1654   LoadFieldInstr* length = |  | 
|   1655       new(I) LoadFieldInstr( |  | 
|   1656           new(I) Value(*array), |  | 
|   1657           CheckArrayBoundInstr::LengthOffsetFor(array_cid), |  | 
|   1658           Type::ZoneHandle(I, Type::SmiType()), |  | 
|   1659           call->token_pos()); |  | 
|   1660   length->set_is_immutable( |  | 
|   1661       CheckArrayBoundInstr::IsFixedLengthArrayType(array_cid)); |  | 
|   1662   length->set_result_cid(kSmiCid); |  | 
|   1663   length->set_recognized_kind( |  | 
|   1664       LoadFieldInstr::RecognizedKindFromArrayCid(array_cid)); |  | 
|   1665   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   1666                                    length, |  | 
|   1667                                    NULL, |  | 
|   1668                                    FlowGraph::kValue); |  | 
|   1669  |  | 
|   1670   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   1671                                    new(I) CheckArrayBoundInstr( |  | 
|   1672                                        new(I) Value(length), |  | 
|   1673                                        new(I) Value(index), |  | 
|   1674                                        call->deopt_id()), |  | 
|   1675                                    call->env(), |  | 
|   1676                                    FlowGraph::kEffect); |  | 
|   1677  |  | 
|   1678   if (array_cid == kGrowableObjectArrayCid) { |  | 
|   1679     // Insert data elements load. |  | 
|   1680     LoadFieldInstr* elements = |  | 
|   1681         new(I) LoadFieldInstr( |  | 
|   1682             new(I) Value(*array), |  | 
|   1683             GrowableObjectArray::data_offset(), |  | 
|   1684             Type::ZoneHandle(I, Type::DynamicType()), |  | 
|   1685             call->token_pos()); |  | 
|   1686     elements->set_result_cid(kArrayCid); |  | 
|   1687     *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   1688                                      elements, |  | 
|   1689                                      NULL, |  | 
|   1690                                      FlowGraph::kValue); |  | 
|   1691     // Load from the data from backing store which is a fixed-length array. |  | 
|   1692     *array = elements; |  | 
|   1693     array_cid = kArrayCid; |  | 
|   1694   } else if (RawObject::IsExternalTypedDataClassId(array_cid)) { |  | 
|   1695     LoadUntaggedInstr* elements = |  | 
|   1696         new(I) LoadUntaggedInstr(new(I) Value(*array), |  | 
|   1697                                  ExternalTypedData::data_offset()); |  | 
|   1698     *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   1699                                      elements, |  | 
|   1700                                      NULL, |  | 
|   1701                                      FlowGraph::kValue); |  | 
|   1702     *array = elements; |  | 
|   1703   } |  | 
|   1704   return array_cid; |  | 
|   1705 } |  | 
|   1706  |  | 
|   1707  |  | 
|   1708 bool FlowGraphOptimizer::InlineGetIndexed(MethodRecognizer::Kind kind, |  | 
|   1709                                           Instruction* call, |  | 
|   1710                                           Definition* receiver, |  | 
|   1711                                           const ICData& ic_data, |  | 
|   1712                                           TargetEntryInstr** entry, |  | 
|   1713                                           Definition** last) { |  | 
|   1714   intptr_t array_cid = MethodKindToCid(kind); |  | 
|   1715   ASSERT(array_cid != kIllegalCid); |  | 
|   1716  |  | 
|   1717   Definition* array = receiver; |  | 
|   1718   Definition* index = call->ArgumentAt(1); |  | 
|   1719   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   1720                                    call->GetBlock()->try_index()); |  | 
|   1721   (*entry)->InheritDeoptTarget(I, call); |  | 
|   1722   Instruction* cursor = *entry; |  | 
|   1723  |  | 
|   1724   array_cid = PrepareInlineIndexedOp(call, |  | 
|   1725                                      array_cid, |  | 
|   1726                                      &array, |  | 
|   1727                                      index, |  | 
|   1728                                      &cursor); |  | 
|   1729  |  | 
|   1730   intptr_t deopt_id = Isolate::kNoDeoptId; |  | 
|   1731   if ((array_cid == kTypedDataInt32ArrayCid) || |  | 
|   1732       (array_cid == kTypedDataUint32ArrayCid)) { |  | 
|   1733     // Deoptimization may be needed if result does not always fit in a Smi. |  | 
|   1734     deopt_id = (kSmiBits >= 32) ? Isolate::kNoDeoptId : call->deopt_id(); |  | 
|   1735   } |  | 
|   1736  |  | 
|   1737   // Array load and return. |  | 
|   1738   intptr_t index_scale = Instance::ElementSizeFor(array_cid); |  | 
|   1739   *last = new(I) LoadIndexedInstr(new(I) Value(array), |  | 
|   1740                                   new(I) Value(index), |  | 
|   1741                                   index_scale, |  | 
|   1742                                   array_cid, |  | 
|   1743                                   deopt_id, |  | 
|   1744                                   call->token_pos()); |  | 
|   1745   cursor = flow_graph()->AppendTo( |  | 
|   1746       cursor, |  | 
|   1747       *last, |  | 
|   1748       deopt_id != Isolate::kNoDeoptId ? call->env() : NULL, |  | 
|   1749       FlowGraph::kValue); |  | 
|   1750  |  | 
|   1751   if (array_cid == kTypedDataFloat32ArrayCid) { |  | 
|   1752     *last = new(I) FloatToDoubleInstr(new(I) Value(*last), deopt_id); |  | 
|   1753     flow_graph()->AppendTo(cursor, |  | 
|   1754                            *last, |  | 
|   1755                            deopt_id != Isolate::kNoDeoptId ? call->env() : NULL, |  | 
|   1756                            FlowGraph::kValue); |  | 
|   1757   } |  | 
|   1758   return true; |  | 
|   1759 } |  | 
|   1760  |  | 
|   1761  |  | 
|   1762 bool FlowGraphOptimizer::TryReplaceWithLoadIndexed(InstanceCallInstr* call) { |  | 
|   1763   // Check for monomorphic IC data. |  | 
|   1764   if (!call->HasICData()) return false; |  | 
|   1765   const ICData& ic_data = |  | 
|   1766       ICData::Handle(I, call->ic_data()->AsUnaryClassChecks()); |  | 
|   1767   if (ic_data.NumberOfChecks() != 1) { |  | 
|   1768     return false; |  | 
|   1769   } |  | 
|   1770   ASSERT(ic_data.NumberOfUsedChecks() == 1); |  | 
|   1771   ASSERT(ic_data.HasOneTarget()); |  | 
|   1772  |  | 
|   1773   const Function& target = Function::Handle(I, ic_data.GetTargetAt(0)); |  | 
|   1774   TargetEntryInstr* entry; |  | 
|   1775   Definition* last; |  | 
|   1776   if (!TryInlineRecognizedMethod(ic_data.GetReceiverClassIdAt(0), |  | 
|   1777                                  target, |  | 
|   1778                                  call, |  | 
|   1779                                  call->ArgumentAt(0), |  | 
|   1780                                  call->token_pos(), |  | 
|   1781                                  *call->ic_data(), |  | 
|   1782                                  &entry, &last)) { |  | 
|   1783     return false; |  | 
|   1784   } |  | 
|   1785  |  | 
|   1786   // Insert receiver class check. |  | 
|   1787   AddReceiverCheck(call); |  | 
|   1788   // Remove the original push arguments. |  | 
|   1789   for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   1790     PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   1791     push->ReplaceUsesWith(push->value()->definition()); |  | 
|   1792     push->RemoveFromGraph(); |  | 
|   1793   } |  | 
|   1794   // Replace all uses of this definition with the result. |  | 
|   1795   call->ReplaceUsesWith(last); |  | 
|   1796   // Finally insert the sequence other definition in place of this one in the |  | 
|   1797   // graph. |  | 
|   1798   call->previous()->LinkTo(entry->next()); |  | 
|   1799   entry->UnuseAllInputs();  // Entry block is not in the graph. |  | 
|   1800   last->LinkTo(call); |  | 
|   1801   // Remove through the iterator. |  | 
|   1802   ASSERT(current_iterator()->Current() == call); |  | 
|   1803   current_iterator()->RemoveCurrentFromGraph(); |  | 
|   1804   call->set_previous(NULL); |  | 
|   1805   call->set_next(NULL); |  | 
|   1806   return true; |  | 
|   1807 } |  | 
|   1808  |  | 
|   1809  |  | 
|   1810 // Return true if d is a string of length one (a constant or result from |  | 
|   1811 // from string-from-char-code instruction. |  | 
|   1812 static bool IsLengthOneString(Definition* d) { |  | 
|   1813   if (d->IsConstant()) { |  | 
|   1814     const Object& obj = d->AsConstant()->value(); |  | 
|   1815     if (obj.IsString()) { |  | 
|   1816       return String::Cast(obj).Length() == 1; |  | 
|   1817     } else { |  | 
|   1818       return false; |  | 
|   1819     } |  | 
|   1820   } else { |  | 
|   1821     return d->IsStringFromCharCode(); |  | 
|   1822   } |  | 
|   1823 } |  | 
|   1824  |  | 
|   1825  |  | 
|   1826 // Returns true if the string comparison was converted into char-code |  | 
|   1827 // comparison. Conversion is only possible for strings of length one. |  | 
|   1828 // E.g., detect str[x] == "x"; and use an integer comparison of char-codes. |  | 
|   1829 // TODO(srdjan): Expand for two-byte and external strings. |  | 
|   1830 bool FlowGraphOptimizer::TryStringLengthOneEquality(InstanceCallInstr* call, |  | 
|   1831                                                     Token::Kind op_kind) { |  | 
|   1832   ASSERT(HasOnlyTwoOf(*call->ic_data(), kOneByteStringCid)); |  | 
|   1833   // Check that left and right are length one strings (either string constants |  | 
|   1834   // or results of string-from-char-code. |  | 
|   1835   Definition* left = call->ArgumentAt(0); |  | 
|   1836   Definition* right = call->ArgumentAt(1); |  | 
|   1837   Value* left_val = NULL; |  | 
|   1838   Definition* to_remove_left = NULL; |  | 
|   1839   if (IsLengthOneString(right)) { |  | 
|   1840     // Swap, since we know that both arguments are strings |  | 
|   1841     Definition* temp = left; |  | 
|   1842     left = right; |  | 
|   1843     right = temp; |  | 
|   1844   } |  | 
|   1845   if (IsLengthOneString(left)) { |  | 
|   1846     // Optimize if left is a string with length one (either constant or |  | 
|   1847     // result of string-from-char-code. |  | 
|   1848     if (left->IsConstant()) { |  | 
|   1849       ConstantInstr* left_const = left->AsConstant(); |  | 
|   1850       const String& str = String::Cast(left_const->value()); |  | 
|   1851       ASSERT(str.Length() == 1); |  | 
|   1852       ConstantInstr* char_code_left = flow_graph()->GetConstant( |  | 
|   1853           Smi::ZoneHandle(I, Smi::New(static_cast<intptr_t>(str.CharAt(0))))); |  | 
|   1854       left_val = new(I) Value(char_code_left); |  | 
|   1855     } else if (left->IsStringFromCharCode()) { |  | 
|   1856       // Use input of string-from-charcode as left value. |  | 
|   1857       StringFromCharCodeInstr* instr = left->AsStringFromCharCode(); |  | 
|   1858       left_val = new(I) Value(instr->char_code()->definition()); |  | 
|   1859       to_remove_left = instr; |  | 
|   1860     } else { |  | 
|   1861       // IsLengthOneString(left) should have been false. |  | 
|   1862       UNREACHABLE(); |  | 
|   1863     } |  | 
|   1864  |  | 
|   1865     Definition* to_remove_right = NULL; |  | 
|   1866     Value* right_val = NULL; |  | 
|   1867     if (right->IsStringFromCharCode()) { |  | 
|   1868       // Skip string-from-char-code, and use its input as right value. |  | 
|   1869       StringFromCharCodeInstr* right_instr = right->AsStringFromCharCode(); |  | 
|   1870       right_val = new(I) Value(right_instr->char_code()->definition()); |  | 
|   1871       to_remove_right = right_instr; |  | 
|   1872     } else { |  | 
|   1873       const ICData& unary_checks_1 = |  | 
|   1874           ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecksForArgNr(1)); |  | 
|   1875       AddCheckClass(right, |  | 
|   1876                     unary_checks_1, |  | 
|   1877                     call->deopt_id(), |  | 
|   1878                     call->env(), |  | 
|   1879                     call); |  | 
|   1880       // String-to-char-code instructions returns -1 (illegal charcode) if |  | 
|   1881       // string is not of length one. |  | 
|   1882       StringToCharCodeInstr* char_code_right = |  | 
|   1883           new(I) StringToCharCodeInstr(new(I) Value(right), kOneByteStringCid); |  | 
|   1884       InsertBefore(call, char_code_right, call->env(), FlowGraph::kValue); |  | 
|   1885       right_val = new(I) Value(char_code_right); |  | 
|   1886     } |  | 
|   1887  |  | 
|   1888     // Comparing char-codes instead of strings. |  | 
|   1889     EqualityCompareInstr* comp = |  | 
|   1890         new(I) EqualityCompareInstr(call->token_pos(), |  | 
|   1891                                     op_kind, |  | 
|   1892                                     left_val, |  | 
|   1893                                     right_val, |  | 
|   1894                                     kSmiCid, |  | 
|   1895                                     call->deopt_id()); |  | 
|   1896     ReplaceCall(call, comp); |  | 
|   1897  |  | 
|   1898     // Remove dead instructions. |  | 
|   1899     if ((to_remove_left != NULL) && |  | 
|   1900         (to_remove_left->input_use_list() == NULL)) { |  | 
|   1901       to_remove_left->ReplaceUsesWith(flow_graph()->constant_null()); |  | 
|   1902       to_remove_left->RemoveFromGraph(); |  | 
|   1903     } |  | 
|   1904     if ((to_remove_right != NULL) && |  | 
|   1905         (to_remove_right->input_use_list() == NULL)) { |  | 
|   1906       to_remove_right->ReplaceUsesWith(flow_graph()->constant_null()); |  | 
|   1907       to_remove_right->RemoveFromGraph(); |  | 
|   1908     } |  | 
|   1909     return true; |  | 
|   1910   } |  | 
|   1911   return false; |  | 
|   1912 } |  | 
|   1913  |  | 
|   1914  |  | 
|   1915 static bool SmiFitsInDouble() { return kSmiBits < 53; } |  | 
|   1916  |  | 
|   1917 bool FlowGraphOptimizer::TryReplaceWithEqualityOp(InstanceCallInstr* call, |  | 
|   1918                                                   Token::Kind op_kind) { |  | 
|   1919   const ICData& ic_data = *call->ic_data(); |  | 
|   1920   ASSERT(ic_data.NumArgsTested() == 2); |  | 
|   1921  |  | 
|   1922   ASSERT(call->ArgumentCount() == 2); |  | 
|   1923   Definition* left = call->ArgumentAt(0); |  | 
|   1924   Definition* right = call->ArgumentAt(1); |  | 
|   1925  |  | 
|   1926   intptr_t cid = kIllegalCid; |  | 
|   1927   if (HasOnlyTwoOf(ic_data, kOneByteStringCid)) { |  | 
|   1928     if (TryStringLengthOneEquality(call, op_kind)) { |  | 
|   1929       return true; |  | 
|   1930     } else { |  | 
|   1931       return false; |  | 
|   1932     } |  | 
|   1933   } else if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   1934     InsertBefore(call, |  | 
|   1935                  new(I) CheckSmiInstr(new(I) Value(left), |  | 
|   1936                                       call->deopt_id(), |  | 
|   1937                                       call->token_pos()), |  | 
|   1938                  call->env(), |  | 
|   1939                  FlowGraph::kEffect); |  | 
|   1940     InsertBefore(call, |  | 
|   1941                  new(I) CheckSmiInstr(new(I) Value(right), |  | 
|   1942                                       call->deopt_id(), |  | 
|   1943                                       call->token_pos()), |  | 
|   1944                  call->env(), |  | 
|   1945                  FlowGraph::kEffect); |  | 
|   1946     cid = kSmiCid; |  | 
|   1947   } else if (HasTwoMintOrSmi(ic_data) && |  | 
|   1948              FlowGraphCompiler::SupportsUnboxedMints()) { |  | 
|   1949     cid = kMintCid; |  | 
|   1950   } else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) { |  | 
|   1951     // Use double comparison. |  | 
|   1952     if (SmiFitsInDouble()) { |  | 
|   1953       cid = kDoubleCid; |  | 
|   1954     } else { |  | 
|   1955       if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |  | 
|   1956         // We cannot use double comparison on two smis. Need polymorphic |  | 
|   1957         // call. |  | 
|   1958         return false; |  | 
|   1959       } else { |  | 
|   1960         InsertBefore(call, |  | 
|   1961                      new(I) CheckEitherNonSmiInstr( |  | 
|   1962                          new(I) Value(left), |  | 
|   1963                          new(I) Value(right), |  | 
|   1964                          call->deopt_id()), |  | 
|   1965                      call->env(), |  | 
|   1966                      FlowGraph::kEffect); |  | 
|   1967         cid = kDoubleCid; |  | 
|   1968       } |  | 
|   1969     } |  | 
|   1970   } else { |  | 
|   1971     // Check if ICDData contains checks with Smi/Null combinations. In that case |  | 
|   1972     // we can still emit the optimized Smi equality operation but need to add |  | 
|   1973     // checks for null or Smi. |  | 
|   1974     GrowableArray<intptr_t> smi_or_null(2); |  | 
|   1975     smi_or_null.Add(kSmiCid); |  | 
|   1976     smi_or_null.Add(kNullCid); |  | 
|   1977     if (ICDataHasOnlyReceiverArgumentClassIds(ic_data, |  | 
|   1978                                               smi_or_null, |  | 
|   1979                                               smi_or_null)) { |  | 
|   1980       const ICData& unary_checks_0 = |  | 
|   1981           ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecks()); |  | 
|   1982       AddCheckClass(left, |  | 
|   1983                     unary_checks_0, |  | 
|   1984                     call->deopt_id(), |  | 
|   1985                     call->env(), |  | 
|   1986                     call); |  | 
|   1987  |  | 
|   1988       const ICData& unary_checks_1 = |  | 
|   1989           ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecksForArgNr(1)); |  | 
|   1990       AddCheckClass(right, |  | 
|   1991                     unary_checks_1, |  | 
|   1992                     call->deopt_id(), |  | 
|   1993                     call->env(), |  | 
|   1994                     call); |  | 
|   1995       cid = kSmiCid; |  | 
|   1996     } else { |  | 
|   1997       // Shortcut for equality with null. |  | 
|   1998       ConstantInstr* right_const = right->AsConstant(); |  | 
|   1999       ConstantInstr* left_const = left->AsConstant(); |  | 
|   2000       if ((right_const != NULL && right_const->value().IsNull()) || |  | 
|   2001           (left_const != NULL && left_const->value().IsNull())) { |  | 
|   2002         StrictCompareInstr* comp = |  | 
|   2003             new(I) StrictCompareInstr(call->token_pos(), |  | 
|   2004                                       Token::kEQ_STRICT, |  | 
|   2005                                       new(I) Value(left), |  | 
|   2006                                       new(I) Value(right), |  | 
|   2007                                       false);  // No number check. |  | 
|   2008         ReplaceCall(call, comp); |  | 
|   2009         return true; |  | 
|   2010       } |  | 
|   2011       return false; |  | 
|   2012     } |  | 
|   2013   } |  | 
|   2014   ASSERT(cid != kIllegalCid); |  | 
|   2015   EqualityCompareInstr* comp = new(I) EqualityCompareInstr(call->token_pos(), |  | 
|   2016                                                            op_kind, |  | 
|   2017                                                            new(I) Value(left), |  | 
|   2018                                                            new(I) Value(right), |  | 
|   2019                                                            cid, |  | 
|   2020                                                            call->deopt_id()); |  | 
|   2021   ReplaceCall(call, comp); |  | 
|   2022   return true; |  | 
|   2023 } |  | 
|   2024  |  | 
|   2025  |  | 
|   2026 bool FlowGraphOptimizer::TryReplaceWithRelationalOp(InstanceCallInstr* call, |  | 
|   2027                                                     Token::Kind op_kind) { |  | 
|   2028   const ICData& ic_data = *call->ic_data(); |  | 
|   2029   ASSERT(ic_data.NumArgsTested() == 2); |  | 
|   2030  |  | 
|   2031   ASSERT(call->ArgumentCount() == 2); |  | 
|   2032   Definition* left = call->ArgumentAt(0); |  | 
|   2033   Definition* right = call->ArgumentAt(1); |  | 
|   2034  |  | 
|   2035   intptr_t cid = kIllegalCid; |  | 
|   2036   if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2037     InsertBefore(call, |  | 
|   2038                  new(I) CheckSmiInstr(new(I) Value(left), |  | 
|   2039                                       call->deopt_id(), |  | 
|   2040                                       call->token_pos()), |  | 
|   2041                  call->env(), |  | 
|   2042                  FlowGraph::kEffect); |  | 
|   2043     InsertBefore(call, |  | 
|   2044                  new(I) CheckSmiInstr(new(I) Value(right), |  | 
|   2045                                       call->deopt_id(), |  | 
|   2046                                       call->token_pos()), |  | 
|   2047                  call->env(), |  | 
|   2048                  FlowGraph::kEffect); |  | 
|   2049     cid = kSmiCid; |  | 
|   2050   } else if (HasTwoMintOrSmi(ic_data) && |  | 
|   2051              FlowGraphCompiler::SupportsUnboxedMints()) { |  | 
|   2052     cid = kMintCid; |  | 
|   2053   } else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) { |  | 
|   2054     // Use double comparison. |  | 
|   2055     if (SmiFitsInDouble()) { |  | 
|   2056       cid = kDoubleCid; |  | 
|   2057     } else { |  | 
|   2058       if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |  | 
|   2059         // We cannot use double comparison on two smis. Need polymorphic |  | 
|   2060         // call. |  | 
|   2061         return false; |  | 
|   2062       } else { |  | 
|   2063         InsertBefore(call, |  | 
|   2064                      new(I) CheckEitherNonSmiInstr( |  | 
|   2065                          new(I) Value(left), |  | 
|   2066                          new(I) Value(right), |  | 
|   2067                          call->deopt_id()), |  | 
|   2068                      call->env(), |  | 
|   2069                      FlowGraph::kEffect); |  | 
|   2070         cid = kDoubleCid; |  | 
|   2071       } |  | 
|   2072     } |  | 
|   2073   } else { |  | 
|   2074     return false; |  | 
|   2075   } |  | 
|   2076   ASSERT(cid != kIllegalCid); |  | 
|   2077   RelationalOpInstr* comp = new(I) RelationalOpInstr(call->token_pos(), |  | 
|   2078                                                      op_kind, |  | 
|   2079                                                      new(I) Value(left), |  | 
|   2080                                                      new(I) Value(right), |  | 
|   2081                                                      cid, |  | 
|   2082                                                      call->deopt_id()); |  | 
|   2083   ReplaceCall(call, comp); |  | 
|   2084   return true; |  | 
|   2085 } |  | 
|   2086  |  | 
|   2087  |  | 
|   2088 bool FlowGraphOptimizer::TryReplaceWithBinaryOp(InstanceCallInstr* call, |  | 
|   2089                                                 Token::Kind op_kind) { |  | 
|   2090   intptr_t operands_type = kIllegalCid; |  | 
|   2091   ASSERT(call->HasICData()); |  | 
|   2092   const ICData& ic_data = *call->ic_data(); |  | 
|   2093   switch (op_kind) { |  | 
|   2094     case Token::kADD: |  | 
|   2095     case Token::kSUB: |  | 
|   2096     case Token::kMUL: |  | 
|   2097       if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2098         // Don't generate smi code if the IC data is marked because |  | 
|   2099         // of an overflow. |  | 
|   2100         operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp) |  | 
|   2101             ? kMintCid |  | 
|   2102             : kSmiCid; |  | 
|   2103       } else if (HasTwoMintOrSmi(ic_data) && |  | 
|   2104                  FlowGraphCompiler::SupportsUnboxedMints()) { |  | 
|   2105         // Don't generate mint code if the IC data is marked because of an |  | 
|   2106         // overflow. |  | 
|   2107         if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) return false; |  | 
|   2108         operands_type = kMintCid; |  | 
|   2109       } else if (ShouldSpecializeForDouble(ic_data)) { |  | 
|   2110         operands_type = kDoubleCid; |  | 
|   2111       } else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) { |  | 
|   2112         operands_type = kFloat32x4Cid; |  | 
|   2113       } else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) { |  | 
|   2114         ASSERT(op_kind != Token::kMUL);  // Int32x4 doesn't have a multiply op. |  | 
|   2115         operands_type = kInt32x4Cid; |  | 
|   2116       } else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) { |  | 
|   2117         operands_type = kFloat64x2Cid; |  | 
|   2118       } else { |  | 
|   2119         return false; |  | 
|   2120       } |  | 
|   2121       break; |  | 
|   2122     case Token::kDIV: |  | 
|   2123       if (ShouldSpecializeForDouble(ic_data) || |  | 
|   2124           HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2125         operands_type = kDoubleCid; |  | 
|   2126       } else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) { |  | 
|   2127         operands_type = kFloat32x4Cid; |  | 
|   2128       } else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) { |  | 
|   2129         operands_type = kFloat64x2Cid; |  | 
|   2130       } else { |  | 
|   2131         return false; |  | 
|   2132       } |  | 
|   2133       break; |  | 
|   2134     case Token::kBIT_AND: |  | 
|   2135     case Token::kBIT_OR: |  | 
|   2136     case Token::kBIT_XOR: |  | 
|   2137       if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2138         operands_type = kSmiCid; |  | 
|   2139       } else if (HasTwoMintOrSmi(ic_data)) { |  | 
|   2140         operands_type = kMintCid; |  | 
|   2141       } else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) { |  | 
|   2142         operands_type = kInt32x4Cid; |  | 
|   2143       } else { |  | 
|   2144         return false; |  | 
|   2145       } |  | 
|   2146       break; |  | 
|   2147     case Token::kSHR: |  | 
|   2148     case Token::kSHL: |  | 
|   2149       if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2150         // Left shift may overflow from smi into mint or big ints. |  | 
|   2151         // Don't generate smi code if the IC data is marked because |  | 
|   2152         // of an overflow. |  | 
|   2153         if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |  | 
|   2154           return false; |  | 
|   2155         } |  | 
|   2156         operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp) |  | 
|   2157             ? kMintCid |  | 
|   2158             : kSmiCid; |  | 
|   2159       } else if (HasTwoMintOrSmi(ic_data) && |  | 
|   2160                  HasOnlyOneSmi(ICData::Handle(I, |  | 
|   2161                      ic_data.AsUnaryClassChecksForArgNr(1)))) { |  | 
|   2162         // Don't generate mint code if the IC data is marked because of an |  | 
|   2163         // overflow. |  | 
|   2164         if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |  | 
|   2165           return false; |  | 
|   2166         } |  | 
|   2167         // Check for smi/mint << smi or smi/mint >> smi. |  | 
|   2168         operands_type = kMintCid; |  | 
|   2169       } else { |  | 
|   2170         return false; |  | 
|   2171       } |  | 
|   2172       break; |  | 
|   2173     case Token::kMOD: |  | 
|   2174     case Token::kTRUNCDIV: |  | 
|   2175       if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   2176         if (ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp)) { |  | 
|   2177           return false; |  | 
|   2178         } |  | 
|   2179         operands_type = kSmiCid; |  | 
|   2180       } else { |  | 
|   2181         return false; |  | 
|   2182       } |  | 
|   2183       break; |  | 
|   2184     default: |  | 
|   2185       UNREACHABLE(); |  | 
|   2186   } |  | 
|   2187  |  | 
|   2188   ASSERT(call->ArgumentCount() == 2); |  | 
|   2189   Definition* left = call->ArgumentAt(0); |  | 
|   2190   Definition* right = call->ArgumentAt(1); |  | 
|   2191   if (operands_type == kDoubleCid) { |  | 
|   2192     if (!CanUnboxDouble()) { |  | 
|   2193       return false; |  | 
|   2194     } |  | 
|   2195     // Check that either left or right are not a smi.  Result of a |  | 
|   2196     // binary operation with two smis is a smi not a double, except '/' which |  | 
|   2197     // returns a double for two smis. |  | 
|   2198     if (op_kind != Token::kDIV) { |  | 
|   2199       InsertBefore(call, |  | 
|   2200                    new(I) CheckEitherNonSmiInstr( |  | 
|   2201                        new(I) Value(left), |  | 
|   2202                        new(I) Value(right), |  | 
|   2203                        call->deopt_id()), |  | 
|   2204                    call->env(), |  | 
|   2205                    FlowGraph::kEffect); |  | 
|   2206     } |  | 
|   2207  |  | 
|   2208     BinaryDoubleOpInstr* double_bin_op = |  | 
|   2209         new(I) BinaryDoubleOpInstr(op_kind, |  | 
|   2210                                    new(I) Value(left), |  | 
|   2211                                    new(I) Value(right), |  | 
|   2212                                    call->deopt_id(), call->token_pos()); |  | 
|   2213     ReplaceCall(call, double_bin_op); |  | 
|   2214   } else if (operands_type == kMintCid) { |  | 
|   2215     if (!FlowGraphCompiler::SupportsUnboxedMints()) return false; |  | 
|   2216     if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |  | 
|   2217       ShiftMintOpInstr* shift_op = |  | 
|   2218           new(I) ShiftMintOpInstr( |  | 
|   2219               op_kind, new(I) Value(left), new(I) Value(right), |  | 
|   2220               call->deopt_id()); |  | 
|   2221       ReplaceCall(call, shift_op); |  | 
|   2222     } else { |  | 
|   2223       BinaryMintOpInstr* bin_op = |  | 
|   2224           new(I) BinaryMintOpInstr( |  | 
|   2225               op_kind, new(I) Value(left), new(I) Value(right), |  | 
|   2226               call->deopt_id()); |  | 
|   2227       ReplaceCall(call, bin_op); |  | 
|   2228     } |  | 
|   2229   } else if (operands_type == kFloat32x4Cid) { |  | 
|   2230     return InlineFloat32x4BinaryOp(call, op_kind); |  | 
|   2231   } else if (operands_type == kInt32x4Cid) { |  | 
|   2232     return InlineInt32x4BinaryOp(call, op_kind); |  | 
|   2233   } else if (operands_type == kFloat64x2Cid) { |  | 
|   2234     return InlineFloat64x2BinaryOp(call, op_kind); |  | 
|   2235   } else if (op_kind == Token::kMOD) { |  | 
|   2236     ASSERT(operands_type == kSmiCid); |  | 
|   2237     if (right->IsConstant()) { |  | 
|   2238       const Object& obj = right->AsConstant()->value(); |  | 
|   2239       if (obj.IsSmi() && Utils::IsPowerOfTwo(Smi::Cast(obj).Value())) { |  | 
|   2240         // Insert smi check and attach a copy of the original environment |  | 
|   2241         // because the smi operation can still deoptimize. |  | 
|   2242         InsertBefore(call, |  | 
|   2243                      new(I) CheckSmiInstr(new(I) Value(left), |  | 
|   2244                                           call->deopt_id(), |  | 
|   2245                                           call->token_pos()), |  | 
|   2246                      call->env(), |  | 
|   2247                      FlowGraph::kEffect); |  | 
|   2248         ConstantInstr* constant = |  | 
|   2249             flow_graph()->GetConstant(Smi::Handle(I, |  | 
|   2250                 Smi::New(Smi::Cast(obj).Value() - 1))); |  | 
|   2251         BinarySmiOpInstr* bin_op = |  | 
|   2252             new(I) BinarySmiOpInstr(Token::kBIT_AND, |  | 
|   2253                                     new(I) Value(left), |  | 
|   2254                                     new(I) Value(constant), |  | 
|   2255                                     call->deopt_id()); |  | 
|   2256         ReplaceCall(call, bin_op); |  | 
|   2257         return true; |  | 
|   2258       } |  | 
|   2259     } |  | 
|   2260     // Insert two smi checks and attach a copy of the original |  | 
|   2261     // environment because the smi operation can still deoptimize. |  | 
|   2262     AddCheckSmi(left, call->deopt_id(), call->env(), call); |  | 
|   2263     AddCheckSmi(right, call->deopt_id(), call->env(), call); |  | 
|   2264     BinarySmiOpInstr* bin_op = |  | 
|   2265         new(I) BinarySmiOpInstr(op_kind, |  | 
|   2266                                 new(I) Value(left), |  | 
|   2267                                 new(I) Value(right), |  | 
|   2268                                 call->deopt_id()); |  | 
|   2269     ReplaceCall(call, bin_op); |  | 
|   2270   } else { |  | 
|   2271     ASSERT(operands_type == kSmiCid); |  | 
|   2272     // Insert two smi checks and attach a copy of the original |  | 
|   2273     // environment because the smi operation can still deoptimize. |  | 
|   2274     AddCheckSmi(left, call->deopt_id(), call->env(), call); |  | 
|   2275     AddCheckSmi(right, call->deopt_id(), call->env(), call); |  | 
|   2276     if (left->IsConstant() && |  | 
|   2277         ((op_kind == Token::kADD) || (op_kind == Token::kMUL))) { |  | 
|   2278       // Constant should be on the right side. |  | 
|   2279       Definition* temp = left; |  | 
|   2280       left = right; |  | 
|   2281       right = temp; |  | 
|   2282     } |  | 
|   2283     BinarySmiOpInstr* bin_op = |  | 
|   2284         new(I) BinarySmiOpInstr( |  | 
|   2285             op_kind, |  | 
|   2286             new(I) Value(left), |  | 
|   2287             new(I) Value(right), |  | 
|   2288             call->deopt_id()); |  | 
|   2289     ReplaceCall(call, bin_op); |  | 
|   2290   } |  | 
|   2291   return true; |  | 
|   2292 } |  | 
|   2293  |  | 
|   2294  |  | 
|   2295 bool FlowGraphOptimizer::TryReplaceWithUnaryOp(InstanceCallInstr* call, |  | 
|   2296                                                Token::Kind op_kind) { |  | 
|   2297   ASSERT(call->ArgumentCount() == 1); |  | 
|   2298   Definition* input = call->ArgumentAt(0); |  | 
|   2299   Definition* unary_op = NULL; |  | 
|   2300   if (HasOnlyOneSmi(*call->ic_data())) { |  | 
|   2301     InsertBefore(call, |  | 
|   2302                  new(I) CheckSmiInstr(new(I) Value(input), |  | 
|   2303                                       call->deopt_id(), |  | 
|   2304                                       call->token_pos()), |  | 
|   2305                  call->env(), |  | 
|   2306                  FlowGraph::kEffect); |  | 
|   2307     unary_op = new(I) UnarySmiOpInstr( |  | 
|   2308         op_kind, new(I) Value(input), call->deopt_id()); |  | 
|   2309   } else if ((op_kind == Token::kBIT_NOT) && |  | 
|   2310              HasOnlySmiOrMint(*call->ic_data()) && |  | 
|   2311              FlowGraphCompiler::SupportsUnboxedMints()) { |  | 
|   2312     unary_op = new(I) UnaryMintOpInstr( |  | 
|   2313         op_kind, new(I) Value(input), call->deopt_id()); |  | 
|   2314   } else if (HasOnlyOneDouble(*call->ic_data()) && |  | 
|   2315              (op_kind == Token::kNEGATE) && |  | 
|   2316              CanUnboxDouble()) { |  | 
|   2317     AddReceiverCheck(call); |  | 
|   2318     unary_op = new(I) UnaryDoubleOpInstr( |  | 
|   2319         Token::kNEGATE, new(I) Value(input), call->deopt_id()); |  | 
|   2320   } else { |  | 
|   2321     return false; |  | 
|   2322   } |  | 
|   2323   ASSERT(unary_op != NULL); |  | 
|   2324   ReplaceCall(call, unary_op); |  | 
|   2325   return true; |  | 
|   2326 } |  | 
|   2327  |  | 
|   2328  |  | 
|   2329 // Using field class |  | 
|   2330 static RawField* GetField(intptr_t class_id, const String& field_name) { |  | 
|   2331   Isolate* isolate = Isolate::Current(); |  | 
|   2332   Class& cls = Class::Handle(isolate, isolate->class_table()->At(class_id)); |  | 
|   2333   Field& field = Field::Handle(isolate); |  | 
|   2334   while (!cls.IsNull()) { |  | 
|   2335     field = cls.LookupInstanceField(field_name); |  | 
|   2336     if (!field.IsNull()) { |  | 
|   2337       return field.raw(); |  | 
|   2338     } |  | 
|   2339     cls = cls.SuperClass(); |  | 
|   2340   } |  | 
|   2341   return Field::null(); |  | 
|   2342 } |  | 
|   2343  |  | 
|   2344  |  | 
|   2345 // Use CHA to determine if the call needs a class check: if the callee's |  | 
|   2346 // receiver is the same as the caller's receiver and there are no overriden |  | 
|   2347 // callee functions, then no class check is needed. |  | 
|   2348 bool FlowGraphOptimizer::InstanceCallNeedsClassCheck( |  | 
|   2349     InstanceCallInstr* call, RawFunction::Kind kind) const { |  | 
|   2350   if (!FLAG_use_cha) return true; |  | 
|   2351   Definition* callee_receiver = call->ArgumentAt(0); |  | 
|   2352   ASSERT(callee_receiver != NULL); |  | 
|   2353   const Function& function = flow_graph_->parsed_function()->function(); |  | 
|   2354   if (function.IsDynamicFunction() && |  | 
|   2355       callee_receiver->IsParameter() && |  | 
|   2356       (callee_receiver->AsParameter()->index() == 0)) { |  | 
|   2357     const String& name = (kind == RawFunction::kMethodExtractor) |  | 
|   2358         ? String::Handle(I, Field::NameFromGetter(call->function_name())) |  | 
|   2359         : call->function_name(); |  | 
|   2360     return isolate()->cha()->HasOverride(Class::Handle(I, function.Owner()), |  | 
|   2361                                          name); |  | 
|   2362   } |  | 
|   2363   return true; |  | 
|   2364 } |  | 
|   2365  |  | 
|   2366  |  | 
|   2367 void FlowGraphOptimizer::InlineImplicitInstanceGetter(InstanceCallInstr* call) { |  | 
|   2368   ASSERT(call->HasICData()); |  | 
|   2369   const ICData& ic_data = *call->ic_data(); |  | 
|   2370   ASSERT(ic_data.HasOneTarget()); |  | 
|   2371   Function& target = Function::Handle(I); |  | 
|   2372   GrowableArray<intptr_t> class_ids; |  | 
|   2373   ic_data.GetCheckAt(0, &class_ids, &target); |  | 
|   2374   ASSERT(class_ids.length() == 1); |  | 
|   2375   // Inline implicit instance getter. |  | 
|   2376   const String& field_name = |  | 
|   2377       String::Handle(I, Field::NameFromGetter(call->function_name())); |  | 
|   2378   const Field& field = |  | 
|   2379       Field::ZoneHandle(I, GetField(class_ids[0], field_name)); |  | 
|   2380   ASSERT(!field.IsNull()); |  | 
|   2381  |  | 
|   2382   if (InstanceCallNeedsClassCheck(call, RawFunction::kImplicitGetter)) { |  | 
|   2383     AddReceiverCheck(call); |  | 
|   2384   } |  | 
|   2385   LoadFieldInstr* load = new(I) LoadFieldInstr( |  | 
|   2386       new(I) Value(call->ArgumentAt(0)), |  | 
|   2387       &field, |  | 
|   2388       AbstractType::ZoneHandle(I, field.type()), |  | 
|   2389       call->token_pos()); |  | 
|   2390   load->set_is_immutable(field.is_final()); |  | 
|   2391   if (field.guarded_cid() != kIllegalCid) { |  | 
|   2392     if (!field.is_nullable() || (field.guarded_cid() == kNullCid)) { |  | 
|   2393       load->set_result_cid(field.guarded_cid()); |  | 
|   2394     } |  | 
|   2395     FlowGraph::AddToGuardedFields(flow_graph_->guarded_fields(), &field); |  | 
|   2396   } |  | 
|   2397  |  | 
|   2398   // Discard the environment from the original instruction because the load |  | 
|   2399   // can't deoptimize. |  | 
|   2400   call->RemoveEnvironment(); |  | 
|   2401   ReplaceCall(call, load); |  | 
|   2402  |  | 
|   2403   if (load->result_cid() != kDynamicCid) { |  | 
|   2404     // Reset value types if guarded_cid was used. |  | 
|   2405     for (Value::Iterator it(load->input_use_list()); |  | 
|   2406          !it.Done(); |  | 
|   2407          it.Advance()) { |  | 
|   2408       it.Current()->SetReachingType(NULL); |  | 
|   2409     } |  | 
|   2410   } |  | 
|   2411 } |  | 
|   2412  |  | 
|   2413  |  | 
|   2414 bool FlowGraphOptimizer::InlineFloat32x4Getter(InstanceCallInstr* call, |  | 
|   2415                                                MethodRecognizer::Kind getter) { |  | 
|   2416   if (!ShouldInlineSimd()) { |  | 
|   2417     return false; |  | 
|   2418   } |  | 
|   2419   AddCheckClass(call->ArgumentAt(0), |  | 
|   2420                 ICData::ZoneHandle( |  | 
|   2421                     I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2422                 call->deopt_id(), |  | 
|   2423                 call->env(), |  | 
|   2424                 call); |  | 
|   2425   intptr_t mask = 0; |  | 
|   2426   if ((getter == MethodRecognizer::kFloat32x4Shuffle) || |  | 
|   2427       (getter == MethodRecognizer::kFloat32x4ShuffleMix)) { |  | 
|   2428     // Extract shuffle mask. |  | 
|   2429     Definition* mask_definition = NULL; |  | 
|   2430     if (getter == MethodRecognizer::kFloat32x4Shuffle) { |  | 
|   2431       ASSERT(call->ArgumentCount() == 2); |  | 
|   2432       mask_definition = call->ArgumentAt(1); |  | 
|   2433     } else { |  | 
|   2434       ASSERT(getter == MethodRecognizer::kFloat32x4ShuffleMix); |  | 
|   2435       ASSERT(call->ArgumentCount() == 3); |  | 
|   2436       mask_definition = call->ArgumentAt(2); |  | 
|   2437     } |  | 
|   2438     if (!mask_definition->IsConstant()) { |  | 
|   2439       return false; |  | 
|   2440     } |  | 
|   2441     ASSERT(mask_definition->IsConstant()); |  | 
|   2442     ConstantInstr* constant_instruction = mask_definition->AsConstant(); |  | 
|   2443     const Object& constant_mask = constant_instruction->value(); |  | 
|   2444     if (!constant_mask.IsSmi()) { |  | 
|   2445       return false; |  | 
|   2446     } |  | 
|   2447     ASSERT(constant_mask.IsSmi()); |  | 
|   2448     mask = Smi::Cast(constant_mask).Value(); |  | 
|   2449     if ((mask < 0) || (mask > 255)) { |  | 
|   2450       // Not a valid mask. |  | 
|   2451       return false; |  | 
|   2452     } |  | 
|   2453   } |  | 
|   2454   if (getter == MethodRecognizer::kFloat32x4GetSignMask) { |  | 
|   2455     Simd32x4GetSignMaskInstr* instr = new(I) Simd32x4GetSignMaskInstr( |  | 
|   2456         getter, |  | 
|   2457         new(I) Value(call->ArgumentAt(0)), |  | 
|   2458         call->deopt_id()); |  | 
|   2459     ReplaceCall(call, instr); |  | 
|   2460     return true; |  | 
|   2461   } else if (getter == MethodRecognizer::kFloat32x4ShuffleMix) { |  | 
|   2462     Simd32x4ShuffleMixInstr* instr = new(I) Simd32x4ShuffleMixInstr( |  | 
|   2463         getter, |  | 
|   2464         new(I) Value(call->ArgumentAt(0)), |  | 
|   2465         new(I) Value(call->ArgumentAt(1)), |  | 
|   2466         mask, |  | 
|   2467         call->deopt_id()); |  | 
|   2468     ReplaceCall(call, instr); |  | 
|   2469     return true; |  | 
|   2470   } else { |  | 
|   2471     ASSERT((getter == MethodRecognizer::kFloat32x4Shuffle)  || |  | 
|   2472            (getter == MethodRecognizer::kFloat32x4ShuffleX) || |  | 
|   2473            (getter == MethodRecognizer::kFloat32x4ShuffleY) || |  | 
|   2474            (getter == MethodRecognizer::kFloat32x4ShuffleZ) || |  | 
|   2475            (getter == MethodRecognizer::kFloat32x4ShuffleW)); |  | 
|   2476     Simd32x4ShuffleInstr* instr = new(I) Simd32x4ShuffleInstr( |  | 
|   2477         getter, |  | 
|   2478         new(I) Value(call->ArgumentAt(0)), |  | 
|   2479         mask, |  | 
|   2480         call->deopt_id()); |  | 
|   2481     ReplaceCall(call, instr); |  | 
|   2482     return true; |  | 
|   2483   } |  | 
|   2484   UNREACHABLE(); |  | 
|   2485   return false; |  | 
|   2486 } |  | 
|   2487  |  | 
|   2488  |  | 
|   2489 bool FlowGraphOptimizer::InlineFloat64x2Getter(InstanceCallInstr* call, |  | 
|   2490                                                MethodRecognizer::Kind getter) { |  | 
|   2491   if (!ShouldInlineSimd()) { |  | 
|   2492     return false; |  | 
|   2493   } |  | 
|   2494   AddCheckClass(call->ArgumentAt(0), |  | 
|   2495                 ICData::ZoneHandle( |  | 
|   2496                     I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2497                 call->deopt_id(), |  | 
|   2498                 call->env(), |  | 
|   2499                 call); |  | 
|   2500   if ((getter == MethodRecognizer::kFloat64x2GetX) || |  | 
|   2501       (getter == MethodRecognizer::kFloat64x2GetY)) { |  | 
|   2502     Simd64x2ShuffleInstr* instr = new(I) Simd64x2ShuffleInstr( |  | 
|   2503         getter, |  | 
|   2504         new(I) Value(call->ArgumentAt(0)), |  | 
|   2505         0, |  | 
|   2506         call->deopt_id()); |  | 
|   2507     ReplaceCall(call, instr); |  | 
|   2508     return true; |  | 
|   2509   } |  | 
|   2510   UNREACHABLE(); |  | 
|   2511   return false; |  | 
|   2512 } |  | 
|   2513  |  | 
|   2514  |  | 
|   2515 bool FlowGraphOptimizer::InlineInt32x4Getter(InstanceCallInstr* call, |  | 
|   2516                                               MethodRecognizer::Kind getter) { |  | 
|   2517   if (!ShouldInlineSimd()) { |  | 
|   2518     return false; |  | 
|   2519   } |  | 
|   2520   AddCheckClass(call->ArgumentAt(0), |  | 
|   2521                 ICData::ZoneHandle( |  | 
|   2522                     I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2523                 call->deopt_id(), |  | 
|   2524                 call->env(), |  | 
|   2525                 call); |  | 
|   2526   intptr_t mask = 0; |  | 
|   2527   if ((getter == MethodRecognizer::kInt32x4Shuffle) || |  | 
|   2528       (getter == MethodRecognizer::kInt32x4ShuffleMix)) { |  | 
|   2529     // Extract shuffle mask. |  | 
|   2530     Definition* mask_definition = NULL; |  | 
|   2531     if (getter == MethodRecognizer::kInt32x4Shuffle) { |  | 
|   2532       ASSERT(call->ArgumentCount() == 2); |  | 
|   2533       mask_definition = call->ArgumentAt(1); |  | 
|   2534     } else { |  | 
|   2535       ASSERT(getter == MethodRecognizer::kInt32x4ShuffleMix); |  | 
|   2536       ASSERT(call->ArgumentCount() == 3); |  | 
|   2537       mask_definition = call->ArgumentAt(2); |  | 
|   2538     } |  | 
|   2539     if (!mask_definition->IsConstant()) { |  | 
|   2540       return false; |  | 
|   2541     } |  | 
|   2542     ASSERT(mask_definition->IsConstant()); |  | 
|   2543     ConstantInstr* constant_instruction = mask_definition->AsConstant(); |  | 
|   2544     const Object& constant_mask = constant_instruction->value(); |  | 
|   2545     if (!constant_mask.IsSmi()) { |  | 
|   2546       return false; |  | 
|   2547     } |  | 
|   2548     ASSERT(constant_mask.IsSmi()); |  | 
|   2549     mask = Smi::Cast(constant_mask).Value(); |  | 
|   2550     if ((mask < 0) || (mask > 255)) { |  | 
|   2551       // Not a valid mask. |  | 
|   2552       return false; |  | 
|   2553     } |  | 
|   2554   } |  | 
|   2555   if (getter == MethodRecognizer::kInt32x4GetSignMask) { |  | 
|   2556     Simd32x4GetSignMaskInstr* instr = new(I) Simd32x4GetSignMaskInstr( |  | 
|   2557         getter, |  | 
|   2558         new(I) Value(call->ArgumentAt(0)), |  | 
|   2559         call->deopt_id()); |  | 
|   2560     ReplaceCall(call, instr); |  | 
|   2561     return true; |  | 
|   2562   } else if (getter == MethodRecognizer::kInt32x4ShuffleMix) { |  | 
|   2563     Simd32x4ShuffleMixInstr* instr = new(I) Simd32x4ShuffleMixInstr( |  | 
|   2564         getter, |  | 
|   2565         new(I) Value(call->ArgumentAt(0)), |  | 
|   2566         new(I) Value(call->ArgumentAt(1)), |  | 
|   2567         mask, |  | 
|   2568         call->deopt_id()); |  | 
|   2569     ReplaceCall(call, instr); |  | 
|   2570     return true; |  | 
|   2571   } else if (getter == MethodRecognizer::kInt32x4Shuffle) { |  | 
|   2572     Simd32x4ShuffleInstr* instr = new(I) Simd32x4ShuffleInstr( |  | 
|   2573         getter, |  | 
|   2574         new(I) Value(call->ArgumentAt(0)), |  | 
|   2575         mask, |  | 
|   2576         call->deopt_id()); |  | 
|   2577     ReplaceCall(call, instr); |  | 
|   2578     return true; |  | 
|   2579   } else { |  | 
|   2580     Int32x4GetFlagInstr* instr = new(I) Int32x4GetFlagInstr( |  | 
|   2581         getter, |  | 
|   2582         new(I) Value(call->ArgumentAt(0)), |  | 
|   2583         call->deopt_id()); |  | 
|   2584     ReplaceCall(call, instr); |  | 
|   2585     return true; |  | 
|   2586   } |  | 
|   2587 } |  | 
|   2588  |  | 
|   2589  |  | 
|   2590 bool FlowGraphOptimizer::InlineFloat32x4BinaryOp(InstanceCallInstr* call, |  | 
|   2591                                                  Token::Kind op_kind) { |  | 
|   2592   if (!ShouldInlineSimd()) { |  | 
|   2593     return false; |  | 
|   2594   } |  | 
|   2595   ASSERT(call->ArgumentCount() == 2); |  | 
|   2596   Definition* left = call->ArgumentAt(0); |  | 
|   2597   Definition* right = call->ArgumentAt(1); |  | 
|   2598   // Type check left. |  | 
|   2599   AddCheckClass(left, |  | 
|   2600                 ICData::ZoneHandle( |  | 
|   2601                     I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2602                 call->deopt_id(), |  | 
|   2603                 call->env(), |  | 
|   2604                 call); |  | 
|   2605   // Type check right. |  | 
|   2606   AddCheckClass(right, |  | 
|   2607                 ICData::ZoneHandle( |  | 
|   2608                     I, call->ic_data()->AsUnaryClassChecksForArgNr(1)), |  | 
|   2609                 call->deopt_id(), |  | 
|   2610                 call->env(), |  | 
|   2611                 call); |  | 
|   2612   // Replace call. |  | 
|   2613   BinaryFloat32x4OpInstr* float32x4_bin_op = |  | 
|   2614       new(I) BinaryFloat32x4OpInstr( |  | 
|   2615           op_kind, new(I) Value(left), new(I) Value(right), |  | 
|   2616           call->deopt_id()); |  | 
|   2617   ReplaceCall(call, float32x4_bin_op); |  | 
|   2618  |  | 
|   2619   return true; |  | 
|   2620 } |  | 
|   2621  |  | 
|   2622  |  | 
|   2623 bool FlowGraphOptimizer::InlineInt32x4BinaryOp(InstanceCallInstr* call, |  | 
|   2624                                                 Token::Kind op_kind) { |  | 
|   2625   if (!ShouldInlineSimd()) { |  | 
|   2626     return false; |  | 
|   2627   } |  | 
|   2628   ASSERT(call->ArgumentCount() == 2); |  | 
|   2629   Definition* left = call->ArgumentAt(0); |  | 
|   2630   Definition* right = call->ArgumentAt(1); |  | 
|   2631   // Type check left. |  | 
|   2632   AddCheckClass(left, |  | 
|   2633                 ICData::ZoneHandle( |  | 
|   2634                     I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2635                 call->deopt_id(), |  | 
|   2636                 call->env(), |  | 
|   2637                 call); |  | 
|   2638   // Type check right. |  | 
|   2639   AddCheckClass(right, |  | 
|   2640                 ICData::ZoneHandle(I, |  | 
|   2641                     call->ic_data()->AsUnaryClassChecksForArgNr(1)), |  | 
|   2642                 call->deopt_id(), |  | 
|   2643                 call->env(), |  | 
|   2644                 call); |  | 
|   2645   // Replace call. |  | 
|   2646   BinaryInt32x4OpInstr* int32x4_bin_op = |  | 
|   2647       new(I) BinaryInt32x4OpInstr( |  | 
|   2648           op_kind, new(I) Value(left), new(I) Value(right), |  | 
|   2649           call->deopt_id()); |  | 
|   2650   ReplaceCall(call, int32x4_bin_op); |  | 
|   2651   return true; |  | 
|   2652 } |  | 
|   2653  |  | 
|   2654  |  | 
|   2655 bool FlowGraphOptimizer::InlineFloat64x2BinaryOp(InstanceCallInstr* call, |  | 
|   2656                                                  Token::Kind op_kind) { |  | 
|   2657   if (!ShouldInlineSimd()) { |  | 
|   2658     return false; |  | 
|   2659   } |  | 
|   2660   ASSERT(call->ArgumentCount() == 2); |  | 
|   2661   Definition* left = call->ArgumentAt(0); |  | 
|   2662   Definition* right = call->ArgumentAt(1); |  | 
|   2663   // Type check left. |  | 
|   2664   AddCheckClass(left, |  | 
|   2665                 ICData::ZoneHandle( |  | 
|   2666                     call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   2667                 call->deopt_id(), |  | 
|   2668                 call->env(), |  | 
|   2669                 call); |  | 
|   2670   // Type check right. |  | 
|   2671   AddCheckClass(right, |  | 
|   2672                 ICData::ZoneHandle( |  | 
|   2673                     call->ic_data()->AsUnaryClassChecksForArgNr(1)), |  | 
|   2674                 call->deopt_id(), |  | 
|   2675                 call->env(), |  | 
|   2676                 call); |  | 
|   2677   // Replace call. |  | 
|   2678   BinaryFloat64x2OpInstr* float64x2_bin_op = |  | 
|   2679       new(I) BinaryFloat64x2OpInstr( |  | 
|   2680           op_kind, new(I) Value(left), new(I) Value(right), |  | 
|   2681           call->deopt_id()); |  | 
|   2682   ReplaceCall(call, float64x2_bin_op); |  | 
|   2683   return true; |  | 
|   2684 } |  | 
|   2685  |  | 
|   2686  |  | 
|   2687 // Only unique implicit instance getters can be currently handled. |  | 
|   2688 bool FlowGraphOptimizer::TryInlineInstanceGetter(InstanceCallInstr* call) { |  | 
|   2689   ASSERT(call->HasICData()); |  | 
|   2690   const ICData& ic_data = *call->ic_data(); |  | 
|   2691   if (ic_data.NumberOfUsedChecks() == 0) { |  | 
|   2692     // No type feedback collected. |  | 
|   2693     return false; |  | 
|   2694   } |  | 
|   2695  |  | 
|   2696   if (!ic_data.HasOneTarget()) { |  | 
|   2697     // Polymorphic sites are inlined like normal methods by conventional |  | 
|   2698     // inlining in FlowGraphInliner. |  | 
|   2699     return false; |  | 
|   2700   } |  | 
|   2701  |  | 
|   2702   const Function& target = Function::Handle(I, ic_data.GetTargetAt(0)); |  | 
|   2703   if (target.kind() != RawFunction::kImplicitGetter) { |  | 
|   2704     // Non-implicit getters are inlined like normal methods by conventional |  | 
|   2705     // inlining in FlowGraphInliner. |  | 
|   2706     return false; |  | 
|   2707   } |  | 
|   2708   InlineImplicitInstanceGetter(call); |  | 
|   2709   return true; |  | 
|   2710 } |  | 
|   2711  |  | 
|   2712  |  | 
|   2713 bool FlowGraphOptimizer::TryReplaceInstanceCallWithInline( |  | 
|   2714     InstanceCallInstr* call) { |  | 
|   2715   ASSERT(call->HasICData()); |  | 
|   2716   Function& target = Function::Handle(I); |  | 
|   2717   GrowableArray<intptr_t> class_ids; |  | 
|   2718   call->ic_data()->GetCheckAt(0, &class_ids, &target); |  | 
|   2719   const intptr_t receiver_cid = class_ids[0]; |  | 
|   2720  |  | 
|   2721   TargetEntryInstr* entry; |  | 
|   2722   Definition* last; |  | 
|   2723   if (!TryInlineRecognizedMethod(receiver_cid, |  | 
|   2724                                  target, |  | 
|   2725                                  call, |  | 
|   2726                                  call->ArgumentAt(0), |  | 
|   2727                                  call->token_pos(), |  | 
|   2728                                  *call->ic_data(), |  | 
|   2729                                  &entry, &last)) { |  | 
|   2730     return false; |  | 
|   2731   } |  | 
|   2732  |  | 
|   2733   // Insert receiver class check. |  | 
|   2734   AddReceiverCheck(call); |  | 
|   2735   // Remove the original push arguments. |  | 
|   2736   for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   2737     PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   2738     push->ReplaceUsesWith(push->value()->definition()); |  | 
|   2739     push->RemoveFromGraph(); |  | 
|   2740   } |  | 
|   2741   // Replace all uses of this definition with the result. |  | 
|   2742   call->ReplaceUsesWith(last); |  | 
|   2743   // Finally insert the sequence other definition in place of this one in the |  | 
|   2744   // graph. |  | 
|   2745   call->previous()->LinkTo(entry->next()); |  | 
|   2746   entry->UnuseAllInputs();  // Entry block is not in the graph. |  | 
|   2747   last->LinkTo(call); |  | 
|   2748   // Remove through the iterator. |  | 
|   2749   ASSERT(current_iterator()->Current() == call); |  | 
|   2750   current_iterator()->RemoveCurrentFromGraph(); |  | 
|   2751   call->set_previous(NULL); |  | 
|   2752   call->set_next(NULL); |  | 
|   2753   return true; |  | 
|   2754 } |  | 
|   2755  |  | 
|   2756  |  | 
|   2757 // Returns the LoadIndexedInstr. |  | 
|   2758 Definition* FlowGraphOptimizer::PrepareInlineStringIndexOp( |  | 
|   2759     Instruction* call, |  | 
|   2760     intptr_t cid, |  | 
|   2761     Definition* str, |  | 
|   2762     Definition* index, |  | 
|   2763     Instruction* cursor) { |  | 
|   2764  |  | 
|   2765   cursor = flow_graph()->AppendTo(cursor, |  | 
|   2766                                   new(I) CheckSmiInstr( |  | 
|   2767                                       new(I) Value(index), |  | 
|   2768                                       call->deopt_id(), |  | 
|   2769                                       call->token_pos()), |  | 
|   2770                                   call->env(), |  | 
|   2771                                   FlowGraph::kEffect); |  | 
|   2772  |  | 
|   2773   // Load the length of the string. |  | 
|   2774   // Treat length loads as mutable (i.e. affected by side effects) to avoid |  | 
|   2775   // hoisting them since we can't hoist the preceding class-check. This |  | 
|   2776   // is because of externalization of strings that affects their class-id. |  | 
|   2777   LoadFieldInstr* length = new(I) LoadFieldInstr( |  | 
|   2778       new(I) Value(str), |  | 
|   2779       String::length_offset(), |  | 
|   2780       Type::ZoneHandle(I, Type::SmiType()), |  | 
|   2781       str->token_pos()); |  | 
|   2782   length->set_result_cid(kSmiCid); |  | 
|   2783   length->set_recognized_kind(MethodRecognizer::kStringBaseLength); |  | 
|   2784  |  | 
|   2785   cursor = flow_graph()->AppendTo(cursor, length, NULL, FlowGraph::kValue); |  | 
|   2786   // Bounds check. |  | 
|   2787   cursor = flow_graph()->AppendTo(cursor, |  | 
|   2788                                    new(I) CheckArrayBoundInstr( |  | 
|   2789                                        new(I) Value(length), |  | 
|   2790                                        new(I) Value(index), |  | 
|   2791                                        call->deopt_id()), |  | 
|   2792                                    call->env(), |  | 
|   2793                                    FlowGraph::kEffect); |  | 
|   2794  |  | 
|   2795   LoadIndexedInstr* load_indexed = new(I) LoadIndexedInstr( |  | 
|   2796       new(I) Value(str), |  | 
|   2797       new(I) Value(index), |  | 
|   2798       Instance::ElementSizeFor(cid), |  | 
|   2799       cid, |  | 
|   2800       Isolate::kNoDeoptId, |  | 
|   2801       call->token_pos()); |  | 
|   2802  |  | 
|   2803   cursor = flow_graph()->AppendTo(cursor, |  | 
|   2804                                   load_indexed, |  | 
|   2805                                   NULL, |  | 
|   2806                                   FlowGraph::kValue); |  | 
|   2807   ASSERT(cursor == load_indexed); |  | 
|   2808   return load_indexed; |  | 
|   2809 } |  | 
|   2810  |  | 
|   2811  |  | 
|   2812 bool FlowGraphOptimizer::InlineStringCodeUnitAt( |  | 
|   2813     Instruction* call, |  | 
|   2814     intptr_t cid, |  | 
|   2815     TargetEntryInstr** entry, |  | 
|   2816     Definition** last) { |  | 
|   2817   // TODO(johnmccutchan): Handle external strings in PrepareInlineStringIndexOp. |  | 
|   2818   if (RawObject::IsExternalStringClassId(cid)) { |  | 
|   2819     return false; |  | 
|   2820   } |  | 
|   2821  |  | 
|   2822   Definition* str = call->ArgumentAt(0); |  | 
|   2823   Definition* index = call->ArgumentAt(1); |  | 
|   2824  |  | 
|   2825   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   2826                                    call->GetBlock()->try_index()); |  | 
|   2827   (*entry)->InheritDeoptTarget(I, call); |  | 
|   2828  |  | 
|   2829   *last = PrepareInlineStringIndexOp(call, cid, str, index, *entry); |  | 
|   2830  |  | 
|   2831   return true; |  | 
|   2832 } |  | 
|   2833  |  | 
|   2834  |  | 
|   2835 bool FlowGraphOptimizer::InlineStringBaseCharAt( |  | 
|   2836     Instruction* call, |  | 
|   2837     intptr_t cid, |  | 
|   2838     TargetEntryInstr** entry, |  | 
|   2839     Definition** last) { |  | 
|   2840   // TODO(johnmccutchan): Handle external strings in PrepareInlineStringIndexOp. |  | 
|   2841   if (RawObject::IsExternalStringClassId(cid) || cid != kOneByteStringCid) { |  | 
|   2842     return false; |  | 
|   2843   } |  | 
|   2844   Definition* str = call->ArgumentAt(0); |  | 
|   2845   Definition* index = call->ArgumentAt(1); |  | 
|   2846  |  | 
|   2847   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   2848                                    call->GetBlock()->try_index()); |  | 
|   2849   (*entry)->InheritDeoptTarget(I, call); |  | 
|   2850  |  | 
|   2851   *last = PrepareInlineStringIndexOp(call, cid, str, index, *entry); |  | 
|   2852  |  | 
|   2853   StringFromCharCodeInstr* char_at = new(I) StringFromCharCodeInstr( |  | 
|   2854       new(I) Value(*last), cid); |  | 
|   2855  |  | 
|   2856   flow_graph()->AppendTo(*last, char_at, NULL, FlowGraph::kValue); |  | 
|   2857   *last = char_at; |  | 
|   2858  |  | 
|   2859   return true; |  | 
|   2860 } |  | 
|   2861  |  | 
|   2862  |  | 
|   2863 bool FlowGraphOptimizer::InlineDoubleOp( |  | 
|   2864     Token::Kind op_kind, |  | 
|   2865     Instruction* call, |  | 
|   2866     TargetEntryInstr** entry, |  | 
|   2867     Definition** last) { |  | 
|   2868   Definition* left = call->ArgumentAt(0); |  | 
|   2869   Definition* right = call->ArgumentAt(1); |  | 
|   2870  |  | 
|   2871   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   2872                                    call->GetBlock()->try_index()); |  | 
|   2873   (*entry)->InheritDeoptTarget(I, call); |  | 
|   2874   // Arguments are checked. No need for class check. |  | 
|   2875   BinaryDoubleOpInstr* double_bin_op = |  | 
|   2876       new(I) BinaryDoubleOpInstr(op_kind, |  | 
|   2877                                  new(I) Value(left), |  | 
|   2878                                  new(I) Value(right), |  | 
|   2879                                  call->deopt_id(), call->token_pos()); |  | 
|   2880   flow_graph()->AppendTo(*entry, double_bin_op, call->env(), FlowGraph::kValue); |  | 
|   2881   *last = double_bin_op; |  | 
|   2882  |  | 
|   2883   return true; |  | 
|   2884 } |  | 
|   2885  |  | 
|   2886  |  | 
|   2887 void FlowGraphOptimizer::ReplaceWithMathCFunction( |  | 
|   2888     InstanceCallInstr* call, |  | 
|   2889     MethodRecognizer::Kind recognized_kind) { |  | 
|   2890   AddReceiverCheck(call); |  | 
|   2891   ZoneGrowableArray<Value*>* args = |  | 
|   2892       new(I) ZoneGrowableArray<Value*>(call->ArgumentCount()); |  | 
|   2893   for (intptr_t i = 0; i < call->ArgumentCount(); i++) { |  | 
|   2894     args->Add(new(I) Value(call->ArgumentAt(i))); |  | 
|   2895   } |  | 
|   2896   InvokeMathCFunctionInstr* invoke = |  | 
|   2897       new(I) InvokeMathCFunctionInstr(args, |  | 
|   2898                                       call->deopt_id(), |  | 
|   2899                                       recognized_kind, |  | 
|   2900                                       call->token_pos()); |  | 
|   2901   ReplaceCall(call, invoke); |  | 
|   2902 } |  | 
|   2903  |  | 
|   2904  |  | 
|   2905 static bool IsSupportedByteArrayViewCid(intptr_t cid) { |  | 
|   2906   switch (cid) { |  | 
|   2907     case kTypedDataInt8ArrayCid: |  | 
|   2908     case kTypedDataUint8ArrayCid: |  | 
|   2909     case kExternalTypedDataUint8ArrayCid: |  | 
|   2910     case kTypedDataUint8ClampedArrayCid: |  | 
|   2911     case kExternalTypedDataUint8ClampedArrayCid: |  | 
|   2912     case kTypedDataInt16ArrayCid: |  | 
|   2913     case kTypedDataUint16ArrayCid: |  | 
|   2914     case kTypedDataInt32ArrayCid: |  | 
|   2915     case kTypedDataUint32ArrayCid: |  | 
|   2916     case kTypedDataFloat32ArrayCid: |  | 
|   2917     case kTypedDataFloat64ArrayCid: |  | 
|   2918     case kTypedDataFloat32x4ArrayCid: |  | 
|   2919     case kTypedDataInt32x4ArrayCid: |  | 
|   2920       return true; |  | 
|   2921     default: |  | 
|   2922       return false; |  | 
|   2923   } |  | 
|   2924 } |  | 
|   2925  |  | 
|   2926  |  | 
|   2927 // Inline only simple, frequently called core library methods. |  | 
|   2928 bool FlowGraphOptimizer::TryInlineInstanceMethod(InstanceCallInstr* call) { |  | 
|   2929   ASSERT(call->HasICData()); |  | 
|   2930   const ICData& ic_data = *call->ic_data(); |  | 
|   2931   if ((ic_data.NumberOfUsedChecks() == 0) || !ic_data.HasOneTarget()) { |  | 
|   2932     // No type feedback collected or multiple targets found. |  | 
|   2933     return false; |  | 
|   2934   } |  | 
|   2935  |  | 
|   2936   Function& target = Function::Handle(I); |  | 
|   2937   GrowableArray<intptr_t> class_ids; |  | 
|   2938   ic_data.GetCheckAt(0, &class_ids, &target); |  | 
|   2939   MethodRecognizer::Kind recognized_kind = |  | 
|   2940       MethodRecognizer::RecognizeKind(target); |  | 
|   2941  |  | 
|   2942   if ((recognized_kind == MethodRecognizer::kGrowableArraySetData) && |  | 
|   2943       (ic_data.NumberOfChecks() == 1) && |  | 
|   2944       (class_ids[0] == kGrowableObjectArrayCid)) { |  | 
|   2945     // This is an internal method, no need to check argument types. |  | 
|   2946     Definition* array = call->ArgumentAt(0); |  | 
|   2947     Definition* value = call->ArgumentAt(1); |  | 
|   2948     StoreInstanceFieldInstr* store = new(I) StoreInstanceFieldInstr( |  | 
|   2949         GrowableObjectArray::data_offset(), |  | 
|   2950         new(I) Value(array), |  | 
|   2951         new(I) Value(value), |  | 
|   2952         kEmitStoreBarrier, |  | 
|   2953         call->token_pos()); |  | 
|   2954     ReplaceCall(call, store); |  | 
|   2955     return true; |  | 
|   2956   } |  | 
|   2957  |  | 
|   2958   if ((recognized_kind == MethodRecognizer::kGrowableArraySetLength) && |  | 
|   2959       (ic_data.NumberOfChecks() == 1) && |  | 
|   2960       (class_ids[0] == kGrowableObjectArrayCid)) { |  | 
|   2961     // This is an internal method, no need to check argument types nor |  | 
|   2962     // range. |  | 
|   2963     Definition* array = call->ArgumentAt(0); |  | 
|   2964     Definition* value = call->ArgumentAt(1); |  | 
|   2965     StoreInstanceFieldInstr* store = new(I) StoreInstanceFieldInstr( |  | 
|   2966         GrowableObjectArray::length_offset(), |  | 
|   2967         new(I) Value(array), |  | 
|   2968         new(I) Value(value), |  | 
|   2969         kNoStoreBarrier, |  | 
|   2970         call->token_pos()); |  | 
|   2971     ReplaceCall(call, store); |  | 
|   2972     return true; |  | 
|   2973   } |  | 
|   2974  |  | 
|   2975   if (((recognized_kind == MethodRecognizer::kStringBaseCodeUnitAt) || |  | 
|   2976        (recognized_kind == MethodRecognizer::kStringBaseCharAt)) && |  | 
|   2977       (ic_data.NumberOfChecks() == 1) && |  | 
|   2978       ((class_ids[0] == kOneByteStringCid) || |  | 
|   2979        (class_ids[0] == kTwoByteStringCid))) { |  | 
|   2980     return TryReplaceInstanceCallWithInline(call); |  | 
|   2981   } |  | 
|   2982  |  | 
|   2983   if ((class_ids[0] == kOneByteStringCid) && (ic_data.NumberOfChecks() == 1)) { |  | 
|   2984     if (recognized_kind == MethodRecognizer::kOneByteStringSetAt) { |  | 
|   2985       // This is an internal method, no need to check argument types nor |  | 
|   2986       // range. |  | 
|   2987       Definition* str = call->ArgumentAt(0); |  | 
|   2988       Definition* index = call->ArgumentAt(1); |  | 
|   2989       Definition* value = call->ArgumentAt(2); |  | 
|   2990       StoreIndexedInstr* store_op = new(I) StoreIndexedInstr( |  | 
|   2991           new(I) Value(str), |  | 
|   2992           new(I) Value(index), |  | 
|   2993           new(I) Value(value), |  | 
|   2994           kNoStoreBarrier, |  | 
|   2995           1,  // Index scale |  | 
|   2996           kOneByteStringCid, |  | 
|   2997           call->deopt_id(), |  | 
|   2998           call->token_pos()); |  | 
|   2999       ReplaceCall(call, store_op); |  | 
|   3000       return true; |  | 
|   3001     } |  | 
|   3002     return false; |  | 
|   3003   } |  | 
|   3004  |  | 
|   3005   if (CanUnboxDouble() && |  | 
|   3006       (recognized_kind == MethodRecognizer::kIntegerToDouble) && |  | 
|   3007       (ic_data.NumberOfChecks() == 1)) { |  | 
|   3008     if (class_ids[0] == kSmiCid) { |  | 
|   3009       AddReceiverCheck(call); |  | 
|   3010       ReplaceCall(call, |  | 
|   3011                   new(I) SmiToDoubleInstr( |  | 
|   3012                       new(I) Value(call->ArgumentAt(0)), |  | 
|   3013                       call->token_pos())); |  | 
|   3014       return true; |  | 
|   3015     } else if ((class_ids[0] == kMintCid) && CanConvertUnboxedMintToDouble()) { |  | 
|   3016       AddReceiverCheck(call); |  | 
|   3017       ReplaceCall(call, |  | 
|   3018                   new(I) MintToDoubleInstr(new(I) Value(call->ArgumentAt(0)), |  | 
|   3019                                            call->deopt_id())); |  | 
|   3020       return true; |  | 
|   3021     } |  | 
|   3022   } |  | 
|   3023  |  | 
|   3024   if (class_ids[0] == kDoubleCid) { |  | 
|   3025     if (!CanUnboxDouble()) { |  | 
|   3026       return false; |  | 
|   3027     } |  | 
|   3028     switch (recognized_kind) { |  | 
|   3029       case MethodRecognizer::kDoubleToInteger: { |  | 
|   3030         AddReceiverCheck(call); |  | 
|   3031         ASSERT(call->HasICData()); |  | 
|   3032         const ICData& ic_data = *call->ic_data(); |  | 
|   3033         Definition* input = call->ArgumentAt(0); |  | 
|   3034         Definition* d2i_instr = NULL; |  | 
|   3035         if (ic_data.HasDeoptReason(ICData::kDeoptDoubleToSmi)) { |  | 
|   3036           // Do not repeatedly deoptimize because result didn't fit into Smi. |  | 
|   3037           d2i_instr =  new(I) DoubleToIntegerInstr( |  | 
|   3038               new(I) Value(input), call); |  | 
|   3039         } else { |  | 
|   3040           // Optimistically assume result fits into Smi. |  | 
|   3041           d2i_instr = new(I) DoubleToSmiInstr( |  | 
|   3042               new(I) Value(input), call->deopt_id()); |  | 
|   3043         } |  | 
|   3044         ReplaceCall(call, d2i_instr); |  | 
|   3045         return true; |  | 
|   3046       } |  | 
|   3047       case MethodRecognizer::kDoubleMod: |  | 
|   3048       case MethodRecognizer::kDoubleRound: |  | 
|   3049         ReplaceWithMathCFunction(call, recognized_kind); |  | 
|   3050         return true; |  | 
|   3051       case MethodRecognizer::kDoubleTruncate: |  | 
|   3052       case MethodRecognizer::kDoubleFloor: |  | 
|   3053       case MethodRecognizer::kDoubleCeil: |  | 
|   3054         if (!TargetCPUFeatures::double_truncate_round_supported()) { |  | 
|   3055           ReplaceWithMathCFunction(call, recognized_kind); |  | 
|   3056         } else { |  | 
|   3057           AddReceiverCheck(call); |  | 
|   3058           DoubleToDoubleInstr* d2d_instr = |  | 
|   3059               new(I) DoubleToDoubleInstr(new(I) Value(call->ArgumentAt(0)), |  | 
|   3060                                          recognized_kind, call->deopt_id()); |  | 
|   3061           ReplaceCall(call, d2d_instr); |  | 
|   3062         } |  | 
|   3063         return true; |  | 
|   3064       case MethodRecognizer::kDoubleAdd: |  | 
|   3065       case MethodRecognizer::kDoubleSub: |  | 
|   3066       case MethodRecognizer::kDoubleMul: |  | 
|   3067       case MethodRecognizer::kDoubleDiv: |  | 
|   3068         return TryReplaceInstanceCallWithInline(call); |  | 
|   3069       default: |  | 
|   3070         // Unsupported method. |  | 
|   3071         return false; |  | 
|   3072     } |  | 
|   3073   } |  | 
|   3074  |  | 
|   3075   if (IsSupportedByteArrayViewCid(class_ids[0]) && |  | 
|   3076       (ic_data.NumberOfChecks() == 1)) { |  | 
|   3077     // For elements that may not fit into a smi on all platforms, check if |  | 
|   3078     // elements fit into a smi or the platform supports unboxed mints. |  | 
|   3079     if ((recognized_kind == MethodRecognizer::kByteArrayBaseGetInt32) || |  | 
|   3080         (recognized_kind == MethodRecognizer::kByteArrayBaseGetUint32) || |  | 
|   3081         (recognized_kind == MethodRecognizer::kByteArrayBaseSetInt32) || |  | 
|   3082         (recognized_kind == MethodRecognizer::kByteArrayBaseSetUint32)) { |  | 
|   3083       if (!CanUnboxInt32()) { |  | 
|   3084         return false; |  | 
|   3085       } |  | 
|   3086     } |  | 
|   3087  |  | 
|   3088     if ((recognized_kind == MethodRecognizer::kByteArrayBaseGetFloat32) || |  | 
|   3089         (recognized_kind == MethodRecognizer::kByteArrayBaseGetFloat64) || |  | 
|   3090         (recognized_kind == MethodRecognizer::kByteArrayBaseSetFloat32) || |  | 
|   3091         (recognized_kind == MethodRecognizer::kByteArrayBaseSetFloat64)) { |  | 
|   3092       if (!CanUnboxDouble()) { |  | 
|   3093         return false; |  | 
|   3094       } |  | 
|   3095     } |  | 
|   3096  |  | 
|   3097     switch (recognized_kind) { |  | 
|   3098       // ByteArray getters. |  | 
|   3099       case MethodRecognizer::kByteArrayBaseGetInt8: |  | 
|   3100         return BuildByteArrayViewLoad(call, kTypedDataInt8ArrayCid); |  | 
|   3101       case MethodRecognizer::kByteArrayBaseGetUint8: |  | 
|   3102         return BuildByteArrayViewLoad(call, kTypedDataUint8ArrayCid); |  | 
|   3103       case MethodRecognizer::kByteArrayBaseGetInt16: |  | 
|   3104         return BuildByteArrayViewLoad(call, kTypedDataInt16ArrayCid); |  | 
|   3105       case MethodRecognizer::kByteArrayBaseGetUint16: |  | 
|   3106         return BuildByteArrayViewLoad(call, kTypedDataUint16ArrayCid); |  | 
|   3107       case MethodRecognizer::kByteArrayBaseGetInt32: |  | 
|   3108         return BuildByteArrayViewLoad(call, kTypedDataInt32ArrayCid); |  | 
|   3109       case MethodRecognizer::kByteArrayBaseGetUint32: |  | 
|   3110         return BuildByteArrayViewLoad(call, kTypedDataUint32ArrayCid); |  | 
|   3111       case MethodRecognizer::kByteArrayBaseGetFloat32: |  | 
|   3112         return BuildByteArrayViewLoad(call, kTypedDataFloat32ArrayCid); |  | 
|   3113       case MethodRecognizer::kByteArrayBaseGetFloat64: |  | 
|   3114         return BuildByteArrayViewLoad(call, kTypedDataFloat64ArrayCid); |  | 
|   3115       case MethodRecognizer::kByteArrayBaseGetFloat32x4: |  | 
|   3116         return BuildByteArrayViewLoad(call, kTypedDataFloat32x4ArrayCid); |  | 
|   3117       case MethodRecognizer::kByteArrayBaseGetInt32x4: |  | 
|   3118         return BuildByteArrayViewLoad(call, kTypedDataInt32x4ArrayCid); |  | 
|   3119  |  | 
|   3120       // ByteArray setters. |  | 
|   3121       case MethodRecognizer::kByteArrayBaseSetInt8: |  | 
|   3122         return BuildByteArrayViewStore(call, kTypedDataInt8ArrayCid); |  | 
|   3123       case MethodRecognizer::kByteArrayBaseSetUint8: |  | 
|   3124         return BuildByteArrayViewStore(call, kTypedDataUint8ArrayCid); |  | 
|   3125       case MethodRecognizer::kByteArrayBaseSetInt16: |  | 
|   3126         return BuildByteArrayViewStore(call, kTypedDataInt16ArrayCid); |  | 
|   3127       case MethodRecognizer::kByteArrayBaseSetUint16: |  | 
|   3128         return BuildByteArrayViewStore(call, kTypedDataUint16ArrayCid); |  | 
|   3129       case MethodRecognizer::kByteArrayBaseSetInt32: |  | 
|   3130         return BuildByteArrayViewStore(call, kTypedDataInt32ArrayCid); |  | 
|   3131       case MethodRecognizer::kByteArrayBaseSetUint32: |  | 
|   3132         return BuildByteArrayViewStore(call, kTypedDataUint32ArrayCid); |  | 
|   3133       case MethodRecognizer::kByteArrayBaseSetFloat32: |  | 
|   3134         return BuildByteArrayViewStore(call, kTypedDataFloat32ArrayCid); |  | 
|   3135       case MethodRecognizer::kByteArrayBaseSetFloat64: |  | 
|   3136         return BuildByteArrayViewStore(call, kTypedDataFloat64ArrayCid); |  | 
|   3137       case MethodRecognizer::kByteArrayBaseSetFloat32x4: |  | 
|   3138         return BuildByteArrayViewStore(call, kTypedDataFloat32x4ArrayCid); |  | 
|   3139       case MethodRecognizer::kByteArrayBaseSetInt32x4: |  | 
|   3140         return BuildByteArrayViewStore(call, kTypedDataInt32x4ArrayCid); |  | 
|   3141       default: |  | 
|   3142         // Unsupported method. |  | 
|   3143         return false; |  | 
|   3144     } |  | 
|   3145   } |  | 
|   3146  |  | 
|   3147   if ((class_ids[0] == kFloat32x4Cid) && (ic_data.NumberOfChecks() == 1)) { |  | 
|   3148     return TryInlineFloat32x4Method(call, recognized_kind); |  | 
|   3149   } |  | 
|   3150  |  | 
|   3151   if ((class_ids[0] == kInt32x4Cid) && (ic_data.NumberOfChecks() == 1)) { |  | 
|   3152     return TryInlineInt32x4Method(call, recognized_kind); |  | 
|   3153   } |  | 
|   3154  |  | 
|   3155   if ((class_ids[0] == kFloat64x2Cid) && (ic_data.NumberOfChecks() == 1)) { |  | 
|   3156     return TryInlineFloat64x2Method(call, recognized_kind); |  | 
|   3157   } |  | 
|   3158  |  | 
|   3159   if (recognized_kind == MethodRecognizer::kIntegerLeftShiftWithMask32) { |  | 
|   3160     ASSERT(call->ArgumentCount() == 3); |  | 
|   3161     ASSERT(ic_data.NumArgsTested() == 2); |  | 
|   3162     Definition* value = call->ArgumentAt(0); |  | 
|   3163     Definition* count = call->ArgumentAt(1); |  | 
|   3164     Definition* int32_mask = call->ArgumentAt(2); |  | 
|   3165     if (HasOnlyTwoOf(ic_data, kSmiCid)) { |  | 
|   3166       if (ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |  | 
|   3167         return false; |  | 
|   3168       } |  | 
|   3169       // We cannot overflow. The input value must be a Smi |  | 
|   3170       AddCheckSmi(value, call->deopt_id(), call->env(), call); |  | 
|   3171       AddCheckSmi(count, call->deopt_id(), call->env(), call); |  | 
|   3172       ASSERT(int32_mask->IsConstant()); |  | 
|   3173       const Integer& mask_literal = Integer::Cast( |  | 
|   3174           int32_mask->AsConstant()->value()); |  | 
|   3175       const int64_t mask_value = mask_literal.AsInt64Value(); |  | 
|   3176       ASSERT(mask_value >= 0); |  | 
|   3177       if (mask_value > Smi::kMaxValue) { |  | 
|   3178         // The result will not be Smi. |  | 
|   3179         return false; |  | 
|   3180       } |  | 
|   3181       BinarySmiOpInstr* left_shift = |  | 
|   3182           new(I) BinarySmiOpInstr(Token::kSHL, |  | 
|   3183                                   new(I) Value(value), |  | 
|   3184                                   new(I) Value(count), |  | 
|   3185                                   call->deopt_id()); |  | 
|   3186       left_shift->mark_truncating(); |  | 
|   3187       if ((kBitsPerWord == 32) && (mask_value == 0xffffffffLL)) { |  | 
|   3188         // No BIT_AND operation needed. |  | 
|   3189         ReplaceCall(call, left_shift); |  | 
|   3190       } else { |  | 
|   3191         InsertBefore(call, left_shift, call->env(), FlowGraph::kValue); |  | 
|   3192         BinarySmiOpInstr* bit_and = |  | 
|   3193             new(I) BinarySmiOpInstr(Token::kBIT_AND, |  | 
|   3194                                     new(I) Value(left_shift), |  | 
|   3195                                     new(I) Value(int32_mask), |  | 
|   3196                                     call->deopt_id()); |  | 
|   3197         ReplaceCall(call, bit_and); |  | 
|   3198       } |  | 
|   3199       return true; |  | 
|   3200     } |  | 
|   3201  |  | 
|   3202     if (HasTwoMintOrSmi(ic_data) && |  | 
|   3203         HasOnlyOneSmi(ICData::Handle(I, |  | 
|   3204                                      ic_data.AsUnaryClassChecksForArgNr(1)))) { |  | 
|   3205       if (!FlowGraphCompiler::SupportsUnboxedMints() || |  | 
|   3206           ic_data.HasDeoptReason(ICData::kDeoptBinaryMintOp)) { |  | 
|   3207         return false; |  | 
|   3208       } |  | 
|   3209       ShiftMintOpInstr* left_shift = |  | 
|   3210           new(I) ShiftMintOpInstr(Token::kSHL, |  | 
|   3211                                   new(I) Value(value), |  | 
|   3212                                   new(I) Value(count), |  | 
|   3213                                   call->deopt_id()); |  | 
|   3214       InsertBefore(call, left_shift, call->env(), FlowGraph::kValue); |  | 
|   3215       BinaryMintOpInstr* bit_and = |  | 
|   3216           new(I) BinaryMintOpInstr(Token::kBIT_AND, |  | 
|   3217                                    new(I) Value(left_shift), |  | 
|   3218                                    new(I) Value(int32_mask), |  | 
|   3219                                    call->deopt_id()); |  | 
|   3220       ReplaceCall(call, bit_and); |  | 
|   3221       return true; |  | 
|   3222     } |  | 
|   3223   } |  | 
|   3224   return false; |  | 
|   3225 } |  | 
|   3226  |  | 
|   3227  |  | 
|   3228 bool FlowGraphOptimizer::TryInlineFloat32x4Constructor( |  | 
|   3229     StaticCallInstr* call, |  | 
|   3230     MethodRecognizer::Kind recognized_kind) { |  | 
|   3231   if (!ShouldInlineSimd()) { |  | 
|   3232     return false; |  | 
|   3233   } |  | 
|   3234   if (recognized_kind == MethodRecognizer::kFloat32x4Zero) { |  | 
|   3235     Float32x4ZeroInstr* zero = new(I) Float32x4ZeroInstr(); |  | 
|   3236     ReplaceCall(call, zero); |  | 
|   3237     return true; |  | 
|   3238   } else if (recognized_kind == MethodRecognizer::kFloat32x4Splat) { |  | 
|   3239     Float32x4SplatInstr* splat = |  | 
|   3240         new(I) Float32x4SplatInstr( |  | 
|   3241             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3242     ReplaceCall(call, splat); |  | 
|   3243     return true; |  | 
|   3244   } else if (recognized_kind == MethodRecognizer::kFloat32x4Constructor) { |  | 
|   3245     Float32x4ConstructorInstr* con = |  | 
|   3246         new(I) Float32x4ConstructorInstr( |  | 
|   3247             new(I) Value(call->ArgumentAt(1)), |  | 
|   3248             new(I) Value(call->ArgumentAt(2)), |  | 
|   3249             new(I) Value(call->ArgumentAt(3)), |  | 
|   3250             new(I) Value(call->ArgumentAt(4)), |  | 
|   3251             call->deopt_id()); |  | 
|   3252     ReplaceCall(call, con); |  | 
|   3253     return true; |  | 
|   3254   } else if (recognized_kind == MethodRecognizer::kFloat32x4FromInt32x4Bits) { |  | 
|   3255     Int32x4ToFloat32x4Instr* cast = |  | 
|   3256         new(I) Int32x4ToFloat32x4Instr( |  | 
|   3257             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3258     ReplaceCall(call, cast); |  | 
|   3259     return true; |  | 
|   3260   } else if (recognized_kind == MethodRecognizer::kFloat32x4FromFloat64x2) { |  | 
|   3261     Float64x2ToFloat32x4Instr* cast = |  | 
|   3262         new(I) Float64x2ToFloat32x4Instr( |  | 
|   3263             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3264     ReplaceCall(call, cast); |  | 
|   3265     return true; |  | 
|   3266   } |  | 
|   3267   return false; |  | 
|   3268 } |  | 
|   3269  |  | 
|   3270  |  | 
|   3271 bool FlowGraphOptimizer::TryInlineFloat64x2Constructor( |  | 
|   3272     StaticCallInstr* call, |  | 
|   3273     MethodRecognizer::Kind recognized_kind) { |  | 
|   3274   if (!ShouldInlineSimd()) { |  | 
|   3275     return false; |  | 
|   3276   } |  | 
|   3277   if (recognized_kind == MethodRecognizer::kFloat64x2Zero) { |  | 
|   3278     Float64x2ZeroInstr* zero = new(I) Float64x2ZeroInstr(); |  | 
|   3279     ReplaceCall(call, zero); |  | 
|   3280     return true; |  | 
|   3281   } else if (recognized_kind == MethodRecognizer::kFloat64x2Splat) { |  | 
|   3282     Float64x2SplatInstr* splat = |  | 
|   3283         new(I) Float64x2SplatInstr( |  | 
|   3284             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3285     ReplaceCall(call, splat); |  | 
|   3286     return true; |  | 
|   3287   } else if (recognized_kind == MethodRecognizer::kFloat64x2Constructor) { |  | 
|   3288     Float64x2ConstructorInstr* con = |  | 
|   3289         new(I) Float64x2ConstructorInstr( |  | 
|   3290             new(I) Value(call->ArgumentAt(1)), |  | 
|   3291             new(I) Value(call->ArgumentAt(2)), |  | 
|   3292             call->deopt_id()); |  | 
|   3293     ReplaceCall(call, con); |  | 
|   3294     return true; |  | 
|   3295   } else if (recognized_kind == MethodRecognizer::kFloat64x2FromFloat32x4) { |  | 
|   3296     Float32x4ToFloat64x2Instr* cast = |  | 
|   3297         new(I) Float32x4ToFloat64x2Instr( |  | 
|   3298             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3299     ReplaceCall(call, cast); |  | 
|   3300     return true; |  | 
|   3301   } |  | 
|   3302   return false; |  | 
|   3303 } |  | 
|   3304  |  | 
|   3305  |  | 
|   3306 bool FlowGraphOptimizer::TryInlineInt32x4Constructor( |  | 
|   3307     StaticCallInstr* call, |  | 
|   3308     MethodRecognizer::Kind recognized_kind) { |  | 
|   3309   if (!ShouldInlineSimd()) { |  | 
|   3310     return false; |  | 
|   3311   } |  | 
|   3312   if (recognized_kind == MethodRecognizer::kInt32x4BoolConstructor) { |  | 
|   3313     Int32x4BoolConstructorInstr* con = |  | 
|   3314         new(I) Int32x4BoolConstructorInstr( |  | 
|   3315             new(I) Value(call->ArgumentAt(1)), |  | 
|   3316             new(I) Value(call->ArgumentAt(2)), |  | 
|   3317             new(I) Value(call->ArgumentAt(3)), |  | 
|   3318             new(I) Value(call->ArgumentAt(4)), |  | 
|   3319             call->deopt_id()); |  | 
|   3320     ReplaceCall(call, con); |  | 
|   3321     return true; |  | 
|   3322   } else if (recognized_kind == MethodRecognizer::kInt32x4FromFloat32x4Bits) { |  | 
|   3323     Float32x4ToInt32x4Instr* cast = |  | 
|   3324         new(I) Float32x4ToInt32x4Instr( |  | 
|   3325             new(I) Value(call->ArgumentAt(1)), call->deopt_id()); |  | 
|   3326     ReplaceCall(call, cast); |  | 
|   3327     return true; |  | 
|   3328   } else if (recognized_kind == MethodRecognizer::kInt32x4Constructor) { |  | 
|   3329     Int32x4ConstructorInstr* con = |  | 
|   3330         new(I) Int32x4ConstructorInstr( |  | 
|   3331             new(I) Value(call->ArgumentAt(1)), |  | 
|   3332             new(I) Value(call->ArgumentAt(2)), |  | 
|   3333             new(I) Value(call->ArgumentAt(3)), |  | 
|   3334             new(I) Value(call->ArgumentAt(4)), |  | 
|   3335             call->deopt_id()); |  | 
|   3336     ReplaceCall(call, con); |  | 
|   3337     return true; |  | 
|   3338   } |  | 
|   3339   return false; |  | 
|   3340 } |  | 
|   3341  |  | 
|   3342  |  | 
|   3343 bool FlowGraphOptimizer::TryInlineFloat32x4Method( |  | 
|   3344     InstanceCallInstr* call, |  | 
|   3345     MethodRecognizer::Kind recognized_kind) { |  | 
|   3346   if (!ShouldInlineSimd()) { |  | 
|   3347     return false; |  | 
|   3348   } |  | 
|   3349   ASSERT(call->HasICData()); |  | 
|   3350   switch (recognized_kind) { |  | 
|   3351     case MethodRecognizer::kFloat32x4ShuffleX: |  | 
|   3352     case MethodRecognizer::kFloat32x4ShuffleY: |  | 
|   3353     case MethodRecognizer::kFloat32x4ShuffleZ: |  | 
|   3354     case MethodRecognizer::kFloat32x4ShuffleW: |  | 
|   3355     case MethodRecognizer::kFloat32x4GetSignMask: |  | 
|   3356       ASSERT(call->ic_data()->HasReceiverClassId(kFloat32x4Cid)); |  | 
|   3357       ASSERT(call->ic_data()->HasOneTarget()); |  | 
|   3358       return InlineFloat32x4Getter(call, recognized_kind); |  | 
|   3359  |  | 
|   3360     case MethodRecognizer::kFloat32x4Equal: |  | 
|   3361     case MethodRecognizer::kFloat32x4GreaterThan: |  | 
|   3362     case MethodRecognizer::kFloat32x4GreaterThanOrEqual: |  | 
|   3363     case MethodRecognizer::kFloat32x4LessThan: |  | 
|   3364     case MethodRecognizer::kFloat32x4LessThanOrEqual: |  | 
|   3365     case MethodRecognizer::kFloat32x4NotEqual: { |  | 
|   3366       Definition* left = call->ArgumentAt(0); |  | 
|   3367       Definition* right = call->ArgumentAt(1); |  | 
|   3368       // Type check left. |  | 
|   3369       AddCheckClass(left, |  | 
|   3370                     ICData::ZoneHandle( |  | 
|   3371                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3372                     call->deopt_id(), |  | 
|   3373                     call->env(), |  | 
|   3374                     call); |  | 
|   3375       // Replace call. |  | 
|   3376       Float32x4ComparisonInstr* cmp = |  | 
|   3377           new(I) Float32x4ComparisonInstr(recognized_kind, |  | 
|   3378                                           new(I) Value(left), |  | 
|   3379                                           new(I) Value(right), |  | 
|   3380                                           call->deopt_id()); |  | 
|   3381       ReplaceCall(call, cmp); |  | 
|   3382       return true; |  | 
|   3383     } |  | 
|   3384     case MethodRecognizer::kFloat32x4Min: |  | 
|   3385     case MethodRecognizer::kFloat32x4Max: { |  | 
|   3386       Definition* left = call->ArgumentAt(0); |  | 
|   3387       Definition* right = call->ArgumentAt(1); |  | 
|   3388       // Type check left. |  | 
|   3389       AddCheckClass(left, |  | 
|   3390                     ICData::ZoneHandle( |  | 
|   3391                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3392                     call->deopt_id(), |  | 
|   3393                     call->env(), |  | 
|   3394                     call); |  | 
|   3395       Float32x4MinMaxInstr* minmax = |  | 
|   3396           new(I) Float32x4MinMaxInstr( |  | 
|   3397               recognized_kind, |  | 
|   3398               new(I) Value(left), |  | 
|   3399               new(I) Value(right), |  | 
|   3400               call->deopt_id()); |  | 
|   3401       ReplaceCall(call, minmax); |  | 
|   3402       return true; |  | 
|   3403     } |  | 
|   3404     case MethodRecognizer::kFloat32x4Scale: { |  | 
|   3405       Definition* left = call->ArgumentAt(0); |  | 
|   3406       Definition* right = call->ArgumentAt(1); |  | 
|   3407       // Type check left. |  | 
|   3408       AddCheckClass(left, |  | 
|   3409                     ICData::ZoneHandle( |  | 
|   3410                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3411                     call->deopt_id(), |  | 
|   3412                     call->env(), |  | 
|   3413                     call); |  | 
|   3414       // Left and right values are swapped when handed to the instruction, |  | 
|   3415       // this is done so that the double value is loaded into the output |  | 
|   3416       // register and can be destroyed. |  | 
|   3417       Float32x4ScaleInstr* scale = |  | 
|   3418           new(I) Float32x4ScaleInstr(recognized_kind, |  | 
|   3419                                      new(I) Value(right), |  | 
|   3420                                      new(I) Value(left), |  | 
|   3421                                      call->deopt_id()); |  | 
|   3422       ReplaceCall(call, scale); |  | 
|   3423       return true; |  | 
|   3424     } |  | 
|   3425     case MethodRecognizer::kFloat32x4Sqrt: |  | 
|   3426     case MethodRecognizer::kFloat32x4ReciprocalSqrt: |  | 
|   3427     case MethodRecognizer::kFloat32x4Reciprocal: { |  | 
|   3428       Definition* left = call->ArgumentAt(0); |  | 
|   3429       AddCheckClass(left, |  | 
|   3430                     ICData::ZoneHandle( |  | 
|   3431                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3432                     call->deopt_id(), |  | 
|   3433                     call->env(), |  | 
|   3434                     call); |  | 
|   3435       Float32x4SqrtInstr* sqrt = |  | 
|   3436           new(I) Float32x4SqrtInstr(recognized_kind, |  | 
|   3437                                     new(I) Value(left), |  | 
|   3438                                     call->deopt_id()); |  | 
|   3439       ReplaceCall(call, sqrt); |  | 
|   3440       return true; |  | 
|   3441     } |  | 
|   3442     case MethodRecognizer::kFloat32x4WithX: |  | 
|   3443     case MethodRecognizer::kFloat32x4WithY: |  | 
|   3444     case MethodRecognizer::kFloat32x4WithZ: |  | 
|   3445     case MethodRecognizer::kFloat32x4WithW: { |  | 
|   3446       Definition* left = call->ArgumentAt(0); |  | 
|   3447       Definition* right = call->ArgumentAt(1); |  | 
|   3448       // Type check left. |  | 
|   3449       AddCheckClass(left, |  | 
|   3450                     ICData::ZoneHandle( |  | 
|   3451                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3452                     call->deopt_id(), |  | 
|   3453                     call->env(), |  | 
|   3454                     call); |  | 
|   3455       Float32x4WithInstr* with = new(I) Float32x4WithInstr(recognized_kind, |  | 
|   3456                                                            new(I) Value(left), |  | 
|   3457                                                            new(I) Value(right), |  | 
|   3458                                                            call->deopt_id()); |  | 
|   3459       ReplaceCall(call, with); |  | 
|   3460       return true; |  | 
|   3461     } |  | 
|   3462     case MethodRecognizer::kFloat32x4Absolute: |  | 
|   3463     case MethodRecognizer::kFloat32x4Negate: { |  | 
|   3464       Definition* left = call->ArgumentAt(0); |  | 
|   3465       // Type check left. |  | 
|   3466       AddCheckClass(left, |  | 
|   3467                     ICData::ZoneHandle( |  | 
|   3468                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3469                     call->deopt_id(), |  | 
|   3470                     call->env(), |  | 
|   3471                     call); |  | 
|   3472       Float32x4ZeroArgInstr* zeroArg = |  | 
|   3473           new(I) Float32x4ZeroArgInstr( |  | 
|   3474               recognized_kind, new(I) Value(left), call->deopt_id()); |  | 
|   3475       ReplaceCall(call, zeroArg); |  | 
|   3476       return true; |  | 
|   3477     } |  | 
|   3478     case MethodRecognizer::kFloat32x4Clamp: { |  | 
|   3479       Definition* left = call->ArgumentAt(0); |  | 
|   3480       Definition* lower = call->ArgumentAt(1); |  | 
|   3481       Definition* upper = call->ArgumentAt(2); |  | 
|   3482       // Type check left. |  | 
|   3483       AddCheckClass(left, |  | 
|   3484                     ICData::ZoneHandle( |  | 
|   3485                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3486                     call->deopt_id(), |  | 
|   3487                     call->env(), |  | 
|   3488                     call); |  | 
|   3489       Float32x4ClampInstr* clamp = new(I) Float32x4ClampInstr( |  | 
|   3490           new(I) Value(left), |  | 
|   3491           new(I) Value(lower), |  | 
|   3492           new(I) Value(upper), |  | 
|   3493           call->deopt_id()); |  | 
|   3494       ReplaceCall(call, clamp); |  | 
|   3495       return true; |  | 
|   3496     } |  | 
|   3497     case MethodRecognizer::kFloat32x4ShuffleMix: |  | 
|   3498     case MethodRecognizer::kFloat32x4Shuffle: { |  | 
|   3499       return InlineFloat32x4Getter(call, recognized_kind); |  | 
|   3500     } |  | 
|   3501     default: |  | 
|   3502       return false; |  | 
|   3503   } |  | 
|   3504 } |  | 
|   3505  |  | 
|   3506  |  | 
|   3507 bool FlowGraphOptimizer::TryInlineFloat64x2Method( |  | 
|   3508     InstanceCallInstr* call, |  | 
|   3509     MethodRecognizer::Kind recognized_kind) { |  | 
|   3510   if (!ShouldInlineSimd()) { |  | 
|   3511     return false; |  | 
|   3512   } |  | 
|   3513   ASSERT(call->HasICData()); |  | 
|   3514   switch (recognized_kind) { |  | 
|   3515     case MethodRecognizer::kFloat64x2GetX: |  | 
|   3516     case MethodRecognizer::kFloat64x2GetY: |  | 
|   3517       ASSERT(call->ic_data()->HasReceiverClassId(kFloat64x2Cid)); |  | 
|   3518       ASSERT(call->ic_data()->HasOneTarget()); |  | 
|   3519       return InlineFloat64x2Getter(call, recognized_kind); |  | 
|   3520     case MethodRecognizer::kFloat64x2Negate: |  | 
|   3521     case MethodRecognizer::kFloat64x2Abs: |  | 
|   3522     case MethodRecognizer::kFloat64x2Sqrt: |  | 
|   3523     case MethodRecognizer::kFloat64x2GetSignMask: { |  | 
|   3524       Definition* left = call->ArgumentAt(0); |  | 
|   3525       // Type check left. |  | 
|   3526       AddCheckClass(left, |  | 
|   3527                     ICData::ZoneHandle( |  | 
|   3528                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3529                     call->deopt_id(), |  | 
|   3530                     call->env(), |  | 
|   3531                     call); |  | 
|   3532       Float64x2ZeroArgInstr* zeroArg = |  | 
|   3533           new(I) Float64x2ZeroArgInstr( |  | 
|   3534               recognized_kind, new(I) Value(left), call->deopt_id()); |  | 
|   3535       ReplaceCall(call, zeroArg); |  | 
|   3536       return true; |  | 
|   3537     } |  | 
|   3538     case MethodRecognizer::kFloat64x2Scale: |  | 
|   3539     case MethodRecognizer::kFloat64x2WithX: |  | 
|   3540     case MethodRecognizer::kFloat64x2WithY: |  | 
|   3541     case MethodRecognizer::kFloat64x2Min: |  | 
|   3542     case MethodRecognizer::kFloat64x2Max: { |  | 
|   3543       Definition* left = call->ArgumentAt(0); |  | 
|   3544       Definition* right = call->ArgumentAt(1); |  | 
|   3545       // Type check left. |  | 
|   3546       AddCheckClass(left, |  | 
|   3547                     ICData::ZoneHandle( |  | 
|   3548                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3549                     call->deopt_id(), |  | 
|   3550                     call->env(), |  | 
|   3551                     call); |  | 
|   3552       Float64x2OneArgInstr* zeroArg = |  | 
|   3553           new(I) Float64x2OneArgInstr(recognized_kind, |  | 
|   3554                                       new(I) Value(left), |  | 
|   3555                                       new(I) Value(right), |  | 
|   3556                                       call->deopt_id()); |  | 
|   3557       ReplaceCall(call, zeroArg); |  | 
|   3558       return true; |  | 
|   3559     } |  | 
|   3560     default: |  | 
|   3561       return false; |  | 
|   3562   } |  | 
|   3563 } |  | 
|   3564  |  | 
|   3565  |  | 
|   3566 bool FlowGraphOptimizer::TryInlineInt32x4Method( |  | 
|   3567     InstanceCallInstr* call, |  | 
|   3568     MethodRecognizer::Kind recognized_kind) { |  | 
|   3569   if (!ShouldInlineSimd()) { |  | 
|   3570     return false; |  | 
|   3571   } |  | 
|   3572   ASSERT(call->HasICData()); |  | 
|   3573   switch (recognized_kind) { |  | 
|   3574     case MethodRecognizer::kInt32x4ShuffleMix: |  | 
|   3575     case MethodRecognizer::kInt32x4Shuffle: |  | 
|   3576     case MethodRecognizer::kInt32x4GetFlagX: |  | 
|   3577     case MethodRecognizer::kInt32x4GetFlagY: |  | 
|   3578     case MethodRecognizer::kInt32x4GetFlagZ: |  | 
|   3579     case MethodRecognizer::kInt32x4GetFlagW: |  | 
|   3580     case MethodRecognizer::kInt32x4GetSignMask: |  | 
|   3581       ASSERT(call->ic_data()->HasReceiverClassId(kInt32x4Cid)); |  | 
|   3582       ASSERT(call->ic_data()->HasOneTarget()); |  | 
|   3583       return InlineInt32x4Getter(call, recognized_kind); |  | 
|   3584  |  | 
|   3585     case MethodRecognizer::kInt32x4Select: { |  | 
|   3586       Definition* mask = call->ArgumentAt(0); |  | 
|   3587       Definition* trueValue = call->ArgumentAt(1); |  | 
|   3588       Definition* falseValue = call->ArgumentAt(2); |  | 
|   3589       // Type check left. |  | 
|   3590       AddCheckClass(mask, |  | 
|   3591                     ICData::ZoneHandle( |  | 
|   3592                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3593                     call->deopt_id(), |  | 
|   3594                     call->env(), |  | 
|   3595                     call); |  | 
|   3596       Int32x4SelectInstr* select = new(I) Int32x4SelectInstr( |  | 
|   3597           new(I) Value(mask), |  | 
|   3598           new(I) Value(trueValue), |  | 
|   3599           new(I) Value(falseValue), |  | 
|   3600           call->deopt_id()); |  | 
|   3601       ReplaceCall(call, select); |  | 
|   3602       return true; |  | 
|   3603     } |  | 
|   3604     case MethodRecognizer::kInt32x4WithFlagX: |  | 
|   3605     case MethodRecognizer::kInt32x4WithFlagY: |  | 
|   3606     case MethodRecognizer::kInt32x4WithFlagZ: |  | 
|   3607     case MethodRecognizer::kInt32x4WithFlagW: { |  | 
|   3608       Definition* left = call->ArgumentAt(0); |  | 
|   3609       Definition* flag = call->ArgumentAt(1); |  | 
|   3610       // Type check left. |  | 
|   3611       AddCheckClass(left, |  | 
|   3612                     ICData::ZoneHandle( |  | 
|   3613                         I, call->ic_data()->AsUnaryClassChecksForArgNr(0)), |  | 
|   3614                     call->deopt_id(), |  | 
|   3615                     call->env(), |  | 
|   3616                     call); |  | 
|   3617       Int32x4SetFlagInstr* setFlag = new(I) Int32x4SetFlagInstr( |  | 
|   3618           recognized_kind, |  | 
|   3619           new(I) Value(left), |  | 
|   3620           new(I) Value(flag), |  | 
|   3621           call->deopt_id()); |  | 
|   3622       ReplaceCall(call, setFlag); |  | 
|   3623       return true; |  | 
|   3624     } |  | 
|   3625     default: |  | 
|   3626       return false; |  | 
|   3627   } |  | 
|   3628 } |  | 
|   3629  |  | 
|   3630  |  | 
|   3631 bool FlowGraphOptimizer::InlineByteArrayViewLoad(Instruction* call, |  | 
|   3632                                                  Definition* receiver, |  | 
|   3633                                                  intptr_t array_cid, |  | 
|   3634                                                  intptr_t view_cid, |  | 
|   3635                                                  const ICData& ic_data, |  | 
|   3636                                                  TargetEntryInstr** entry, |  | 
|   3637                                                  Definition** last) { |  | 
|   3638   ASSERT(array_cid != kIllegalCid); |  | 
|   3639   Definition* array = receiver; |  | 
|   3640   Definition* index = call->ArgumentAt(1); |  | 
|   3641   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   3642                                    call->GetBlock()->try_index()); |  | 
|   3643   (*entry)->InheritDeoptTarget(I, call); |  | 
|   3644   Instruction* cursor = *entry; |  | 
|   3645  |  | 
|   3646   array_cid = PrepareInlineByteArrayViewOp(call, |  | 
|   3647                                            array_cid, |  | 
|   3648                                            view_cid, |  | 
|   3649                                            &array, |  | 
|   3650                                            index, |  | 
|   3651                                            &cursor); |  | 
|   3652  |  | 
|   3653   intptr_t deopt_id = Isolate::kNoDeoptId; |  | 
|   3654   if ((array_cid == kTypedDataInt32ArrayCid) || |  | 
|   3655       (array_cid == kTypedDataUint32ArrayCid)) { |  | 
|   3656     // Deoptimization may be needed if result does not always fit in a Smi. |  | 
|   3657     deopt_id = (kSmiBits >= 32) ? Isolate::kNoDeoptId : call->deopt_id(); |  | 
|   3658   } |  | 
|   3659  |  | 
|   3660   *last = new(I) LoadIndexedInstr(new(I) Value(array), |  | 
|   3661                                   new(I) Value(index), |  | 
|   3662                                   1, |  | 
|   3663                                   view_cid, |  | 
|   3664                                   deopt_id, |  | 
|   3665                                   call->token_pos()); |  | 
|   3666   cursor = flow_graph()->AppendTo( |  | 
|   3667       cursor, |  | 
|   3668       *last, |  | 
|   3669       deopt_id != Isolate::kNoDeoptId ? call->env() : NULL, |  | 
|   3670       FlowGraph::kValue); |  | 
|   3671  |  | 
|   3672   if (view_cid == kTypedDataFloat32ArrayCid) { |  | 
|   3673     *last = new(I) FloatToDoubleInstr(new(I) Value(*last), deopt_id); |  | 
|   3674     flow_graph()->AppendTo(cursor, |  | 
|   3675                            *last, |  | 
|   3676                            deopt_id != Isolate::kNoDeoptId ? call->env() : NULL, |  | 
|   3677                            FlowGraph::kValue); |  | 
|   3678   } |  | 
|   3679   return true; |  | 
|   3680 } |  | 
|   3681  |  | 
|   3682  |  | 
|   3683 bool FlowGraphOptimizer::InlineByteArrayViewStore(const Function& target, |  | 
|   3684                                                   Instruction* call, |  | 
|   3685                                                   Definition* receiver, |  | 
|   3686                                                   intptr_t array_cid, |  | 
|   3687                                                   intptr_t view_cid, |  | 
|   3688                                                   const ICData& ic_data, |  | 
|   3689                                                   TargetEntryInstr** entry, |  | 
|   3690                                                   Definition** last) { |  | 
|   3691   ASSERT(array_cid != kIllegalCid); |  | 
|   3692   Definition* array = receiver; |  | 
|   3693   Definition* index = call->ArgumentAt(1); |  | 
|   3694   *entry = new(I) TargetEntryInstr(flow_graph()->allocate_block_id(), |  | 
|   3695                                    call->GetBlock()->try_index()); |  | 
|   3696   (*entry)->InheritDeoptTarget(I, call); |  | 
|   3697   Instruction* cursor = *entry; |  | 
|   3698  |  | 
|   3699   array_cid = PrepareInlineByteArrayViewOp(call, |  | 
|   3700                                            array_cid, |  | 
|   3701                                            view_cid, |  | 
|   3702                                            &array, |  | 
|   3703                                            index, |  | 
|   3704                                            &cursor); |  | 
|   3705  |  | 
|   3706   // Extract the instance call so we can use the function_name in the stored |  | 
|   3707   // value check ICData. |  | 
|   3708   InstanceCallInstr* i_call = NULL; |  | 
|   3709   if (call->IsPolymorphicInstanceCall()) { |  | 
|   3710     i_call = call->AsPolymorphicInstanceCall()->instance_call(); |  | 
|   3711   } else { |  | 
|   3712     ASSERT(call->IsInstanceCall()); |  | 
|   3713     i_call = call->AsInstanceCall(); |  | 
|   3714   } |  | 
|   3715   ASSERT(i_call != NULL); |  | 
|   3716   ICData& value_check = ICData::ZoneHandle(I); |  | 
|   3717   switch (view_cid) { |  | 
|   3718     case kTypedDataInt8ArrayCid: |  | 
|   3719     case kTypedDataUint8ArrayCid: |  | 
|   3720     case kTypedDataUint8ClampedArrayCid: |  | 
|   3721     case kExternalTypedDataUint8ArrayCid: |  | 
|   3722     case kExternalTypedDataUint8ClampedArrayCid: |  | 
|   3723     case kTypedDataInt16ArrayCid: |  | 
|   3724     case kTypedDataUint16ArrayCid: { |  | 
|   3725       // Check that value is always smi. |  | 
|   3726       value_check = ICData::New(flow_graph_->parsed_function()->function(), |  | 
|   3727                                 i_call->function_name(), |  | 
|   3728                                 Object::empty_array(),  // Dummy args. descr. |  | 
|   3729                                 Isolate::kNoDeoptId, |  | 
|   3730                                 1); |  | 
|   3731       value_check.AddReceiverCheck(kSmiCid, target); |  | 
|   3732       break; |  | 
|   3733     } |  | 
|   3734     case kTypedDataInt32ArrayCid: |  | 
|   3735     case kTypedDataUint32ArrayCid: |  | 
|   3736       // On 64-bit platforms assume that stored value is always a smi. |  | 
|   3737       if (kSmiBits >= 32) { |  | 
|   3738         value_check = ICData::New(flow_graph_->parsed_function()->function(), |  | 
|   3739                                   i_call->function_name(), |  | 
|   3740                                   Object::empty_array(),  // Dummy args. descr. |  | 
|   3741                                   Isolate::kNoDeoptId, |  | 
|   3742                                   1); |  | 
|   3743         value_check.AddReceiverCheck(kSmiCid, target); |  | 
|   3744       } |  | 
|   3745       break; |  | 
|   3746     case kTypedDataFloat32ArrayCid: |  | 
|   3747     case kTypedDataFloat64ArrayCid: { |  | 
|   3748       // Check that value is always double. |  | 
|   3749       value_check = ICData::New(flow_graph_->parsed_function()->function(), |  | 
|   3750                                 i_call->function_name(), |  | 
|   3751                                 Object::empty_array(),  // Dummy args. descr. |  | 
|   3752                                 Isolate::kNoDeoptId, |  | 
|   3753                                 1); |  | 
|   3754       value_check.AddReceiverCheck(kDoubleCid, target); |  | 
|   3755       break; |  | 
|   3756     } |  | 
|   3757     case kTypedDataInt32x4ArrayCid: { |  | 
|   3758       // Check that value is always Int32x4. |  | 
|   3759       value_check = ICData::New(flow_graph_->parsed_function()->function(), |  | 
|   3760                                 i_call->function_name(), |  | 
|   3761                                 Object::empty_array(),  // Dummy args. descr. |  | 
|   3762                                 Isolate::kNoDeoptId, |  | 
|   3763                                 1); |  | 
|   3764       value_check.AddReceiverCheck(kInt32x4Cid, target); |  | 
|   3765       break; |  | 
|   3766     } |  | 
|   3767     case kTypedDataFloat32x4ArrayCid: { |  | 
|   3768       // Check that value is always Float32x4. |  | 
|   3769       value_check = ICData::New(flow_graph_->parsed_function()->function(), |  | 
|   3770                                 i_call->function_name(), |  | 
|   3771                                 Object::empty_array(),  // Dummy args. descr. |  | 
|   3772                                 Isolate::kNoDeoptId, |  | 
|   3773                                 1); |  | 
|   3774       value_check.AddReceiverCheck(kFloat32x4Cid, target); |  | 
|   3775       break; |  | 
|   3776     } |  | 
|   3777     default: |  | 
|   3778       // Array cids are already checked in the caller. |  | 
|   3779       UNREACHABLE(); |  | 
|   3780   } |  | 
|   3781  |  | 
|   3782   Definition* stored_value = call->ArgumentAt(2); |  | 
|   3783   if (!value_check.IsNull()) { |  | 
|   3784     AddCheckClass(stored_value, value_check, call->deopt_id(), call->env(), |  | 
|   3785                   call); |  | 
|   3786   } |  | 
|   3787  |  | 
|   3788   if (view_cid == kTypedDataFloat32ArrayCid) { |  | 
|   3789     stored_value = new(I) DoubleToFloatInstr( |  | 
|   3790         new(I) Value(stored_value), call->deopt_id()); |  | 
|   3791     cursor = flow_graph()->AppendTo(cursor, |  | 
|   3792                                     stored_value, |  | 
|   3793                                     NULL, |  | 
|   3794                                     FlowGraph::kValue); |  | 
|   3795   } else if (view_cid == kTypedDataInt32ArrayCid) { |  | 
|   3796     stored_value = new(I) UnboxInt32Instr( |  | 
|   3797         UnboxInt32Instr::kTruncate, |  | 
|   3798         new(I) Value(stored_value), |  | 
|   3799         call->deopt_id()); |  | 
|   3800     cursor = flow_graph()->AppendTo(cursor, |  | 
|   3801                                     stored_value, |  | 
|   3802                                     call->env(), |  | 
|   3803                                     FlowGraph::kValue); |  | 
|   3804   } else if (view_cid == kTypedDataUint32ArrayCid) { |  | 
|   3805     stored_value = new(I) UnboxUint32Instr( |  | 
|   3806         new(I) Value(stored_value), |  | 
|   3807         call->deopt_id()); |  | 
|   3808     ASSERT(stored_value->AsUnboxInteger()->is_truncating()); |  | 
|   3809     cursor = flow_graph()->AppendTo(cursor, |  | 
|   3810                                     stored_value, |  | 
|   3811                                     call->env(), |  | 
|   3812                                     FlowGraph::kValue); |  | 
|   3813   } |  | 
|   3814  |  | 
|   3815   StoreBarrierType needs_store_barrier = kNoStoreBarrier; |  | 
|   3816   *last = new(I) StoreIndexedInstr(new(I) Value(array), |  | 
|   3817                                    new(I) Value(index), |  | 
|   3818                                    new(I) Value(stored_value), |  | 
|   3819                                    needs_store_barrier, |  | 
|   3820                                    1,  // Index scale |  | 
|   3821                                    view_cid, |  | 
|   3822                                    call->deopt_id(), |  | 
|   3823                                    call->token_pos()); |  | 
|   3824  |  | 
|   3825   flow_graph()->AppendTo(cursor, |  | 
|   3826                          *last, |  | 
|   3827                          call->deopt_id() != Isolate::kNoDeoptId ? |  | 
|   3828                             call->env() : NULL, |  | 
|   3829                          FlowGraph::kEffect); |  | 
|   3830   return true; |  | 
|   3831 } |  | 
|   3832  |  | 
|   3833  |  | 
|   3834  |  | 
|   3835 intptr_t FlowGraphOptimizer::PrepareInlineByteArrayViewOp( |  | 
|   3836     Instruction* call, |  | 
|   3837     intptr_t array_cid, |  | 
|   3838     intptr_t view_cid, |  | 
|   3839     Definition** array, |  | 
|   3840     Definition* byte_index, |  | 
|   3841     Instruction** cursor) { |  | 
|   3842   // Insert byte_index smi check. |  | 
|   3843   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   3844                                    new(I) CheckSmiInstr( |  | 
|   3845                                        new(I) Value(byte_index), |  | 
|   3846                                        call->deopt_id(), |  | 
|   3847                                        call->token_pos()), |  | 
|   3848                                    call->env(), |  | 
|   3849                                    FlowGraph::kEffect); |  | 
|   3850  |  | 
|   3851   LoadFieldInstr* length = |  | 
|   3852       new(I) LoadFieldInstr( |  | 
|   3853           new(I) Value(*array), |  | 
|   3854           CheckArrayBoundInstr::LengthOffsetFor(array_cid), |  | 
|   3855           Type::ZoneHandle(I, Type::SmiType()), |  | 
|   3856           call->token_pos()); |  | 
|   3857   length->set_is_immutable(true); |  | 
|   3858   length->set_result_cid(kSmiCid); |  | 
|   3859   length->set_recognized_kind( |  | 
|   3860       LoadFieldInstr::RecognizedKindFromArrayCid(array_cid)); |  | 
|   3861   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   3862                                    length, |  | 
|   3863                                    NULL, |  | 
|   3864                                    FlowGraph::kValue); |  | 
|   3865  |  | 
|   3866   intptr_t element_size = Instance::ElementSizeFor(array_cid); |  | 
|   3867   ConstantInstr* bytes_per_element = |  | 
|   3868       flow_graph()->GetConstant(Smi::Handle(I, Smi::New(element_size))); |  | 
|   3869   BinarySmiOpInstr* len_in_bytes = |  | 
|   3870       new(I) BinarySmiOpInstr(Token::kMUL, |  | 
|   3871                               new(I) Value(length), |  | 
|   3872                               new(I) Value(bytes_per_element), |  | 
|   3873                               call->deopt_id()); |  | 
|   3874   *cursor = flow_graph()->AppendTo(*cursor, len_in_bytes, call->env(), |  | 
|   3875                                    FlowGraph::kValue); |  | 
|   3876  |  | 
|   3877   // adjusted_length = len_in_bytes - (element_size - 1). |  | 
|   3878   Definition* adjusted_length = len_in_bytes; |  | 
|   3879   intptr_t adjustment = Instance::ElementSizeFor(view_cid) - 1; |  | 
|   3880   if (adjustment > 0) { |  | 
|   3881     ConstantInstr* length_adjustment = |  | 
|   3882         flow_graph()->GetConstant(Smi::Handle(I, Smi::New(adjustment))); |  | 
|   3883     adjusted_length = |  | 
|   3884         new(I) BinarySmiOpInstr(Token::kSUB, |  | 
|   3885                                 new(I) Value(len_in_bytes), |  | 
|   3886                                 new(I) Value(length_adjustment), |  | 
|   3887                                 call->deopt_id()); |  | 
|   3888     *cursor = flow_graph()->AppendTo(*cursor, adjusted_length, call->env(), |  | 
|   3889                                      FlowGraph::kValue); |  | 
|   3890   } |  | 
|   3891  |  | 
|   3892   // Check adjusted_length > 0. |  | 
|   3893   ConstantInstr* zero = |  | 
|   3894       flow_graph()->GetConstant(Smi::Handle(I, Smi::New(0))); |  | 
|   3895   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   3896                                    new(I) CheckArrayBoundInstr( |  | 
|   3897                                        new(I) Value(adjusted_length), |  | 
|   3898                                        new(I) Value(zero), |  | 
|   3899                                        call->deopt_id()), |  | 
|   3900                                    call->env(), |  | 
|   3901                                    FlowGraph::kEffect); |  | 
|   3902   // Check 0 <= byte_index < adjusted_length. |  | 
|   3903   *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   3904                                    new(I) CheckArrayBoundInstr( |  | 
|   3905                                        new(I) Value(adjusted_length), |  | 
|   3906                                        new(I) Value(byte_index), |  | 
|   3907                                        call->deopt_id()), |  | 
|   3908                                    call->env(), |  | 
|   3909                                    FlowGraph::kEffect); |  | 
|   3910  |  | 
|   3911   if (RawObject::IsExternalTypedDataClassId(array_cid)) { |  | 
|   3912     LoadUntaggedInstr* elements = |  | 
|   3913         new(I) LoadUntaggedInstr(new(I) Value(*array), |  | 
|   3914                                  ExternalTypedData::data_offset()); |  | 
|   3915     *cursor = flow_graph()->AppendTo(*cursor, |  | 
|   3916                                      elements, |  | 
|   3917                                      NULL, |  | 
|   3918                                      FlowGraph::kValue); |  | 
|   3919     *array = elements; |  | 
|   3920   } |  | 
|   3921   return array_cid; |  | 
|   3922 } |  | 
|   3923  |  | 
|   3924  |  | 
|   3925 bool FlowGraphOptimizer::BuildByteArrayViewLoad(InstanceCallInstr* call, |  | 
|   3926                                                 intptr_t view_cid) { |  | 
|   3927   const bool simd_view = (view_cid == kTypedDataFloat32x4ArrayCid) || |  | 
|   3928                          (view_cid == kTypedDataInt32x4ArrayCid); |  | 
|   3929   const bool float_view = (view_cid == kTypedDataFloat32ArrayCid) || |  | 
|   3930                           (view_cid == kTypedDataFloat64ArrayCid); |  | 
|   3931   if (float_view && !CanUnboxDouble()) { |  | 
|   3932     return false; |  | 
|   3933   } |  | 
|   3934   if (simd_view && !ShouldInlineSimd()) { |  | 
|   3935     return false; |  | 
|   3936   } |  | 
|   3937   return TryReplaceInstanceCallWithInline(call); |  | 
|   3938 } |  | 
|   3939  |  | 
|   3940  |  | 
|   3941 bool FlowGraphOptimizer::BuildByteArrayViewStore(InstanceCallInstr* call, |  | 
|   3942                                                  intptr_t view_cid) { |  | 
|   3943   const bool simd_view = (view_cid == kTypedDataFloat32x4ArrayCid) || |  | 
|   3944                          (view_cid == kTypedDataInt32x4ArrayCid); |  | 
|   3945   const bool float_view = (view_cid == kTypedDataFloat32ArrayCid) || |  | 
|   3946                           (view_cid == kTypedDataFloat64ArrayCid); |  | 
|   3947   if (float_view && !CanUnboxDouble()) { |  | 
|   3948     return false; |  | 
|   3949   } |  | 
|   3950   if (simd_view && !ShouldInlineSimd()) { |  | 
|   3951     return false; |  | 
|   3952   } |  | 
|   3953   return TryReplaceInstanceCallWithInline(call); |  | 
|   3954 } |  | 
|   3955  |  | 
|   3956  |  | 
|   3957 // If type tests specified by 'ic_data' do not depend on type arguments, |  | 
|   3958 // return mapping cid->result in 'results' (i : cid; i + 1: result). |  | 
|   3959 // If all tests yield the same result, return it otherwise return Bool::null. |  | 
|   3960 // If no mapping is possible, 'results' is empty. |  | 
|   3961 // An instance-of test returning all same results can be converted to a class |  | 
|   3962 // check. |  | 
|   3963 RawBool* FlowGraphOptimizer::InstanceOfAsBool( |  | 
|   3964     const ICData& ic_data, |  | 
|   3965     const AbstractType& type, |  | 
|   3966     ZoneGrowableArray<intptr_t>* results) const { |  | 
|   3967   ASSERT(results->is_empty()); |  | 
|   3968   ASSERT(ic_data.NumArgsTested() == 1);  // Unary checks only. |  | 
|   3969   if (!type.IsInstantiated() || type.IsMalformedOrMalbounded()) { |  | 
|   3970     return Bool::null(); |  | 
|   3971   } |  | 
|   3972   const Class& type_class = Class::Handle(I, type.type_class()); |  | 
|   3973   const intptr_t num_type_args = type_class.NumTypeArguments(); |  | 
|   3974   if (num_type_args > 0) { |  | 
|   3975     // Only raw types can be directly compared, thus disregarding type |  | 
|   3976     // arguments. |  | 
|   3977     const intptr_t num_type_params = type_class.NumTypeParameters(); |  | 
|   3978     const intptr_t from_index = num_type_args - num_type_params; |  | 
|   3979     const TypeArguments& type_arguments = |  | 
|   3980         TypeArguments::Handle(I, type.arguments()); |  | 
|   3981     const bool is_raw_type = type_arguments.IsNull() || |  | 
|   3982         type_arguments.IsRaw(from_index, num_type_params); |  | 
|   3983     if (!is_raw_type) { |  | 
|   3984       // Unknown result. |  | 
|   3985       return Bool::null(); |  | 
|   3986     } |  | 
|   3987   } |  | 
|   3988  |  | 
|   3989   const ClassTable& class_table = *isolate()->class_table(); |  | 
|   3990   Bool& prev = Bool::Handle(I); |  | 
|   3991   Class& cls = Class::Handle(I); |  | 
|   3992  |  | 
|   3993   bool results_differ = false; |  | 
|   3994   for (int i = 0; i < ic_data.NumberOfChecks(); i++) { |  | 
|   3995     cls = class_table.At(ic_data.GetReceiverClassIdAt(i)); |  | 
|   3996     if (cls.NumTypeArguments() > 0) { |  | 
|   3997       return Bool::null(); |  | 
|   3998     } |  | 
|   3999     const bool is_subtype = cls.IsSubtypeOf( |  | 
|   4000         TypeArguments::Handle(I), |  | 
|   4001         type_class, |  | 
|   4002         TypeArguments::Handle(I), |  | 
|   4003         NULL); |  | 
|   4004     results->Add(cls.id()); |  | 
|   4005     results->Add(is_subtype); |  | 
|   4006     if (prev.IsNull()) { |  | 
|   4007       prev = Bool::Get(is_subtype).raw(); |  | 
|   4008     } else { |  | 
|   4009       if (is_subtype != prev.value()) { |  | 
|   4010         results_differ = true; |  | 
|   4011       } |  | 
|   4012     } |  | 
|   4013   } |  | 
|   4014   return results_differ ?  Bool::null() : prev.raw(); |  | 
|   4015 } |  | 
|   4016  |  | 
|   4017  |  | 
|   4018 // Returns true if checking against this type is a direct class id comparison. |  | 
|   4019 bool FlowGraphOptimizer::TypeCheckAsClassEquality(const AbstractType& type) { |  | 
|   4020   ASSERT(type.IsFinalized() && !type.IsMalformedOrMalbounded()); |  | 
|   4021   // Requires CHA. |  | 
|   4022   if (!FLAG_use_cha) return false; |  | 
|   4023   if (!type.IsInstantiated()) return false; |  | 
|   4024   const Class& type_class = Class::Handle(type.type_class()); |  | 
|   4025   // Signature classes have different type checking rules. |  | 
|   4026   if (type_class.IsSignatureClass()) return false; |  | 
|   4027   // Could be an interface check? |  | 
|   4028   if (isolate()->cha()->IsImplemented(type_class)) return false; |  | 
|   4029   // Check if there are subclasses. |  | 
|   4030   if (isolate()->cha()->HasSubclasses(type_class)) return false; |  | 
|   4031   const intptr_t num_type_args = type_class.NumTypeArguments(); |  | 
|   4032   if (num_type_args > 0) { |  | 
|   4033     // Only raw types can be directly compared, thus disregarding type |  | 
|   4034     // arguments. |  | 
|   4035     const intptr_t num_type_params = type_class.NumTypeParameters(); |  | 
|   4036     const intptr_t from_index = num_type_args - num_type_params; |  | 
|   4037     const TypeArguments& type_arguments = |  | 
|   4038         TypeArguments::Handle(type.arguments()); |  | 
|   4039     const bool is_raw_type = type_arguments.IsNull() || |  | 
|   4040         type_arguments.IsRaw(from_index, num_type_params); |  | 
|   4041     return is_raw_type; |  | 
|   4042   } |  | 
|   4043   return true; |  | 
|   4044 } |  | 
|   4045  |  | 
|   4046  |  | 
|   4047 static bool CidTestResultsContains(const ZoneGrowableArray<intptr_t>& results, |  | 
|   4048                                    intptr_t test_cid) { |  | 
|   4049   for (intptr_t i = 0; i < results.length(); i += 2) { |  | 
|   4050     if (results[i] == test_cid) return true; |  | 
|   4051   } |  | 
|   4052   return false; |  | 
|   4053 } |  | 
|   4054  |  | 
|   4055  |  | 
|   4056 static void TryAddTest(ZoneGrowableArray<intptr_t>* results, |  | 
|   4057                        intptr_t test_cid, |  | 
|   4058                        bool result) { |  | 
|   4059   if (!CidTestResultsContains(*results, test_cid)) { |  | 
|   4060     results->Add(test_cid); |  | 
|   4061     results->Add(result); |  | 
|   4062   } |  | 
|   4063 } |  | 
|   4064  |  | 
|   4065  |  | 
|   4066 // Tries to add cid tests to 'results' so that no deoptimization is |  | 
|   4067 // necessary. |  | 
|   4068 // TODO(srdjan): Do also for other than 'int' type. |  | 
|   4069 static bool TryExpandTestCidsResult(ZoneGrowableArray<intptr_t>* results, |  | 
|   4070                                     const AbstractType& type) { |  | 
|   4071   ASSERT(results->length() >= 2);  // At least on eentry. |  | 
|   4072   const ClassTable& class_table = *Isolate::Current()->class_table(); |  | 
|   4073   if ((*results)[0] != kSmiCid) { |  | 
|   4074     const Class& cls = Class::Handle(class_table.At(kSmiCid)); |  | 
|   4075     const Class& type_class = Class::Handle(type.type_class()); |  | 
|   4076     const bool smi_is_subtype = cls.IsSubtypeOf(TypeArguments::Handle(), |  | 
|   4077                                                 type_class, |  | 
|   4078                                                 TypeArguments::Handle(), |  | 
|   4079                                                 NULL); |  | 
|   4080     results->Add((*results)[results->length() - 2]); |  | 
|   4081     results->Add((*results)[results->length() - 2]); |  | 
|   4082     for (intptr_t i = results->length() - 3; i > 1; --i) { |  | 
|   4083       (*results)[i] = (*results)[i - 2]; |  | 
|   4084     } |  | 
|   4085     (*results)[0] = kSmiCid; |  | 
|   4086     (*results)[1] = smi_is_subtype; |  | 
|   4087   } |  | 
|   4088  |  | 
|   4089   ASSERT(type.IsInstantiated() && !type.IsMalformedOrMalbounded()); |  | 
|   4090   ASSERT(results->length() >= 2); |  | 
|   4091   if (type.IsIntType()) { |  | 
|   4092     ASSERT((*results)[0] == kSmiCid); |  | 
|   4093     TryAddTest(results, kMintCid, true); |  | 
|   4094     TryAddTest(results, kBigintCid, true); |  | 
|   4095     // Cannot deoptimize since all tests returning true have been added. |  | 
|   4096     return false; |  | 
|   4097   } |  | 
|   4098  |  | 
|   4099   return true;  // May deoptimize since we have not identified all 'true' tests. |  | 
|   4100 } |  | 
|   4101  |  | 
|   4102  |  | 
|   4103 // TODO(srdjan): Use ICData to check if always true or false. |  | 
|   4104 void FlowGraphOptimizer::ReplaceWithInstanceOf(InstanceCallInstr* call) { |  | 
|   4105   ASSERT(Token::IsTypeTestOperator(call->token_kind())); |  | 
|   4106   Definition* left = call->ArgumentAt(0); |  | 
|   4107   Definition* instantiator = call->ArgumentAt(1); |  | 
|   4108   Definition* type_args = call->ArgumentAt(2); |  | 
|   4109   const AbstractType& type = |  | 
|   4110       AbstractType::Cast(call->ArgumentAt(3)->AsConstant()->value()); |  | 
|   4111   const bool negate = Bool::Cast( |  | 
|   4112       call->ArgumentAt(4)->OriginalDefinition()->AsConstant()->value()).value(); |  | 
|   4113   const ICData& unary_checks = |  | 
|   4114       ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecks()); |  | 
|   4115   if (FLAG_warn_on_javascript_compatibility && |  | 
|   4116       !unary_checks.IssuedJSWarning() && |  | 
|   4117       (type.IsIntType() || type.IsDoubleType() || !type.IsInstantiated())) { |  | 
|   4118     // No warning was reported yet for this type check, either because it has |  | 
|   4119     // not been executed yet, or because no problematic combinations of instance |  | 
|   4120     // type and test type have been encountered so far. A warning may still be |  | 
|   4121     // reported, so do not replace the instance call. |  | 
|   4122     return; |  | 
|   4123   } |  | 
|   4124   if (unary_checks.NumberOfChecks() <= FLAG_max_polymorphic_checks) { |  | 
|   4125     ZoneGrowableArray<intptr_t>* results = |  | 
|   4126         new(I) ZoneGrowableArray<intptr_t>(unary_checks.NumberOfChecks() * 2); |  | 
|   4127     Bool& as_bool = |  | 
|   4128         Bool::ZoneHandle(I, InstanceOfAsBool(unary_checks, type, results)); |  | 
|   4129     if (as_bool.IsNull()) { |  | 
|   4130       if (results->length() == unary_checks.NumberOfChecks() * 2) { |  | 
|   4131         const bool can_deopt = TryExpandTestCidsResult(results, type); |  | 
|   4132         TestCidsInstr* test_cids = new(I) TestCidsInstr( |  | 
|   4133             call->token_pos(), |  | 
|   4134             negate ? Token::kISNOT : Token::kIS, |  | 
|   4135             new(I) Value(left), |  | 
|   4136             *results, |  | 
|   4137             can_deopt ? call->deopt_id() : Isolate::kNoDeoptId); |  | 
|   4138         // Remove type. |  | 
|   4139         ReplaceCall(call, test_cids); |  | 
|   4140         return; |  | 
|   4141       } |  | 
|   4142     } else { |  | 
|   4143       // TODO(srdjan): Use TestCidsInstr also for this case. |  | 
|   4144       // One result only. |  | 
|   4145       AddReceiverCheck(call); |  | 
|   4146       if (negate) { |  | 
|   4147         as_bool = Bool::Get(!as_bool.value()).raw(); |  | 
|   4148       } |  | 
|   4149       ConstantInstr* bool_const = flow_graph()->GetConstant(as_bool); |  | 
|   4150       for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   4151         PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   4152         push->ReplaceUsesWith(push->value()->definition()); |  | 
|   4153         push->RemoveFromGraph(); |  | 
|   4154       } |  | 
|   4155       call->ReplaceUsesWith(bool_const); |  | 
|   4156       ASSERT(current_iterator()->Current() == call); |  | 
|   4157       current_iterator()->RemoveCurrentFromGraph(); |  | 
|   4158       return; |  | 
|   4159     } |  | 
|   4160   } |  | 
|   4161  |  | 
|   4162   if (TypeCheckAsClassEquality(type)) { |  | 
|   4163     LoadClassIdInstr* left_cid = new(I) LoadClassIdInstr(new(I) Value(left)); |  | 
|   4164     InsertBefore(call, |  | 
|   4165                  left_cid, |  | 
|   4166                  NULL, |  | 
|   4167                  FlowGraph::kValue); |  | 
|   4168     const intptr_t type_cid = Class::Handle(I, type.type_class()).id(); |  | 
|   4169     ConstantInstr* cid = |  | 
|   4170         flow_graph()->GetConstant(Smi::Handle(I, Smi::New(type_cid))); |  | 
|   4171  |  | 
|   4172     StrictCompareInstr* check_cid = |  | 
|   4173         new(I) StrictCompareInstr( |  | 
|   4174             call->token_pos(), |  | 
|   4175             negate ? Token::kNE_STRICT : Token::kEQ_STRICT, |  | 
|   4176             new(I) Value(left_cid), |  | 
|   4177             new(I) Value(cid), |  | 
|   4178             false);  // No number check. |  | 
|   4179     ReplaceCall(call, check_cid); |  | 
|   4180     return; |  | 
|   4181   } |  | 
|   4182  |  | 
|   4183   InstanceOfInstr* instance_of = |  | 
|   4184       new(I) InstanceOfInstr(call->token_pos(), |  | 
|   4185                              new(I) Value(left), |  | 
|   4186                              new(I) Value(instantiator), |  | 
|   4187                              new(I) Value(type_args), |  | 
|   4188                              type, |  | 
|   4189                              negate, |  | 
|   4190                              call->deopt_id()); |  | 
|   4191   ReplaceCall(call, instance_of); |  | 
|   4192 } |  | 
|   4193  |  | 
|   4194  |  | 
|   4195 // TODO(srdjan): Apply optimizations as in ReplaceWithInstanceOf (TestCids). |  | 
|   4196 void FlowGraphOptimizer::ReplaceWithTypeCast(InstanceCallInstr* call) { |  | 
|   4197   ASSERT(Token::IsTypeCastOperator(call->token_kind())); |  | 
|   4198   Definition* left = call->ArgumentAt(0); |  | 
|   4199   Definition* instantiator = call->ArgumentAt(1); |  | 
|   4200   Definition* type_args = call->ArgumentAt(2); |  | 
|   4201   const AbstractType& type = |  | 
|   4202       AbstractType::Cast(call->ArgumentAt(3)->AsConstant()->value()); |  | 
|   4203   ASSERT(!type.IsMalformedOrMalbounded()); |  | 
|   4204   const ICData& unary_checks = |  | 
|   4205       ICData::ZoneHandle(I, call->ic_data()->AsUnaryClassChecks()); |  | 
|   4206   if (FLAG_warn_on_javascript_compatibility && |  | 
|   4207       !unary_checks.IssuedJSWarning() && |  | 
|   4208       (type.IsIntType() || type.IsDoubleType() || !type.IsInstantiated())) { |  | 
|   4209     // No warning was reported yet for this type check, either because it has |  | 
|   4210     // not been executed yet, or because no problematic combinations of instance |  | 
|   4211     // type and test type have been encountered so far. A warning may still be |  | 
|   4212     // reported, so do not replace the instance call. |  | 
|   4213     return; |  | 
|   4214   } |  | 
|   4215   if (unary_checks.NumberOfChecks() <= FLAG_max_polymorphic_checks) { |  | 
|   4216     ZoneGrowableArray<intptr_t>* results = |  | 
|   4217         new(I) ZoneGrowableArray<intptr_t>(unary_checks.NumberOfChecks() * 2); |  | 
|   4218     const Bool& as_bool = Bool::ZoneHandle(I, |  | 
|   4219         InstanceOfAsBool(unary_checks, type, results)); |  | 
|   4220     if (as_bool.raw() == Bool::True().raw()) { |  | 
|   4221       AddReceiverCheck(call); |  | 
|   4222       // Remove the original push arguments. |  | 
|   4223       for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   4224         PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   4225         push->ReplaceUsesWith(push->value()->definition()); |  | 
|   4226         push->RemoveFromGraph(); |  | 
|   4227       } |  | 
|   4228       // Remove call, replace it with 'left'. |  | 
|   4229       call->ReplaceUsesWith(left); |  | 
|   4230       ASSERT(current_iterator()->Current() == call); |  | 
|   4231       current_iterator()->RemoveCurrentFromGraph(); |  | 
|   4232       return; |  | 
|   4233     } |  | 
|   4234   } |  | 
|   4235   const String& dst_name = String::ZoneHandle(I, |  | 
|   4236       Symbols::New(Exceptions::kCastErrorDstName)); |  | 
|   4237   AssertAssignableInstr* assert_as = |  | 
|   4238       new(I) AssertAssignableInstr(call->token_pos(), |  | 
|   4239                                    new(I) Value(left), |  | 
|   4240                                    new(I) Value(instantiator), |  | 
|   4241                                    new(I) Value(type_args), |  | 
|   4242                                    type, |  | 
|   4243                                    dst_name, |  | 
|   4244                                    call->deopt_id()); |  | 
|   4245   ReplaceCall(call, assert_as); |  | 
|   4246 } |  | 
|   4247  |  | 
|   4248  |  | 
|   4249 // Tries to optimize instance call by replacing it with a faster instruction |  | 
|   4250 // (e.g, binary op, field load, ..). |  | 
|   4251 void FlowGraphOptimizer::VisitInstanceCall(InstanceCallInstr* instr) { |  | 
|   4252   if (!instr->HasICData() || (instr->ic_data()->NumberOfUsedChecks() == 0)) { |  | 
|   4253     return; |  | 
|   4254   } |  | 
|   4255  |  | 
|   4256   const Token::Kind op_kind = instr->token_kind(); |  | 
|   4257   // Type test is special as it always gets converted into inlined code. |  | 
|   4258   if (Token::IsTypeTestOperator(op_kind)) { |  | 
|   4259     ReplaceWithInstanceOf(instr); |  | 
|   4260     return; |  | 
|   4261   } |  | 
|   4262  |  | 
|   4263   if (Token::IsTypeCastOperator(op_kind)) { |  | 
|   4264     ReplaceWithTypeCast(instr); |  | 
|   4265     return; |  | 
|   4266   } |  | 
|   4267  |  | 
|   4268   const ICData& unary_checks = |  | 
|   4269       ICData::ZoneHandle(I, instr->ic_data()->AsUnaryClassChecks()); |  | 
|   4270  |  | 
|   4271   const intptr_t max_checks = (op_kind == Token::kEQ) |  | 
|   4272       ? FLAG_max_equality_polymorphic_checks |  | 
|   4273       : FLAG_max_polymorphic_checks; |  | 
|   4274   if ((unary_checks.NumberOfChecks() > max_checks) && |  | 
|   4275       InstanceCallNeedsClassCheck(instr, RawFunction::kRegularFunction)) { |  | 
|   4276     // Too many checks, it will be megamorphic which needs unary checks. |  | 
|   4277     instr->set_ic_data(&unary_checks); |  | 
|   4278     return; |  | 
|   4279   } |  | 
|   4280  |  | 
|   4281   if ((op_kind == Token::kASSIGN_INDEX) && TryReplaceWithStoreIndexed(instr)) { |  | 
|   4282     return; |  | 
|   4283   } |  | 
|   4284   if ((op_kind == Token::kINDEX) && TryReplaceWithLoadIndexed(instr)) { |  | 
|   4285     return; |  | 
|   4286   } |  | 
|   4287  |  | 
|   4288   if (op_kind == Token::kEQ && TryReplaceWithEqualityOp(instr, op_kind)) { |  | 
|   4289     return; |  | 
|   4290   } |  | 
|   4291  |  | 
|   4292   if (Token::IsRelationalOperator(op_kind) && |  | 
|   4293       TryReplaceWithRelationalOp(instr, op_kind)) { |  | 
|   4294     return; |  | 
|   4295   } |  | 
|   4296  |  | 
|   4297   if (Token::IsBinaryOperator(op_kind) && |  | 
|   4298       TryReplaceWithBinaryOp(instr, op_kind)) { |  | 
|   4299     return; |  | 
|   4300   } |  | 
|   4301   if (Token::IsUnaryOperator(op_kind) && |  | 
|   4302       TryReplaceWithUnaryOp(instr, op_kind)) { |  | 
|   4303     return; |  | 
|   4304   } |  | 
|   4305   if ((op_kind == Token::kGET) && TryInlineInstanceGetter(instr)) { |  | 
|   4306     return; |  | 
|   4307   } |  | 
|   4308   if ((op_kind == Token::kSET) && |  | 
|   4309       TryInlineInstanceSetter(instr, unary_checks)) { |  | 
|   4310     return; |  | 
|   4311   } |  | 
|   4312   if (TryInlineInstanceMethod(instr)) { |  | 
|   4313     return; |  | 
|   4314   } |  | 
|   4315  |  | 
|   4316   bool has_one_target = unary_checks.HasOneTarget(); |  | 
|   4317  |  | 
|   4318   if (has_one_target) { |  | 
|   4319     // Check if the single target is a polymorphic target, if it is, |  | 
|   4320     // we don't have one target. |  | 
|   4321     const Function& target = |  | 
|   4322         Function::Handle(I, unary_checks.GetTargetAt(0)); |  | 
|   4323     const bool polymorphic_target = MethodRecognizer::PolymorphicTarget(target); |  | 
|   4324     has_one_target = !polymorphic_target; |  | 
|   4325   } |  | 
|   4326  |  | 
|   4327   if (has_one_target) { |  | 
|   4328     RawFunction::Kind function_kind = |  | 
|   4329         Function::Handle(I, unary_checks.GetTargetAt(0)).kind(); |  | 
|   4330     if (!InstanceCallNeedsClassCheck(instr, function_kind)) { |  | 
|   4331       const bool call_with_checks = false; |  | 
|   4332       PolymorphicInstanceCallInstr* call = |  | 
|   4333           new(I) PolymorphicInstanceCallInstr(instr, unary_checks, |  | 
|   4334                                               call_with_checks); |  | 
|   4335       instr->ReplaceWith(call, current_iterator()); |  | 
|   4336       return; |  | 
|   4337     } |  | 
|   4338   } |  | 
|   4339  |  | 
|   4340   if (unary_checks.NumberOfChecks() <= FLAG_max_polymorphic_checks) { |  | 
|   4341     bool call_with_checks; |  | 
|   4342     if (has_one_target) { |  | 
|   4343       // Type propagation has not run yet, we cannot eliminate the check. |  | 
|   4344       AddReceiverCheck(instr); |  | 
|   4345       // Call can still deoptimize, do not detach environment from instr. |  | 
|   4346       call_with_checks = false; |  | 
|   4347     } else { |  | 
|   4348       call_with_checks = true; |  | 
|   4349     } |  | 
|   4350     PolymorphicInstanceCallInstr* call = |  | 
|   4351         new(I) PolymorphicInstanceCallInstr(instr, unary_checks, |  | 
|   4352                                             call_with_checks); |  | 
|   4353     instr->ReplaceWith(call, current_iterator()); |  | 
|   4354   } |  | 
|   4355 } |  | 
|   4356  |  | 
|   4357  |  | 
|   4358 void FlowGraphOptimizer::VisitStaticCall(StaticCallInstr* call) { |  | 
|   4359   if (!CanUnboxDouble()) { |  | 
|   4360     return; |  | 
|   4361   } |  | 
|   4362   MethodRecognizer::Kind recognized_kind = |  | 
|   4363       MethodRecognizer::RecognizeKind(call->function()); |  | 
|   4364   MathUnaryInstr::MathUnaryKind unary_kind; |  | 
|   4365   switch (recognized_kind) { |  | 
|   4366     case MethodRecognizer::kMathSqrt: |  | 
|   4367       unary_kind = MathUnaryInstr::kSqrt; |  | 
|   4368       break; |  | 
|   4369     case MethodRecognizer::kMathSin: |  | 
|   4370       unary_kind = MathUnaryInstr::kSin; |  | 
|   4371       break; |  | 
|   4372     case MethodRecognizer::kMathCos: |  | 
|   4373       unary_kind = MathUnaryInstr::kCos; |  | 
|   4374       break; |  | 
|   4375     default: |  | 
|   4376       unary_kind = MathUnaryInstr::kIllegal; |  | 
|   4377       break; |  | 
|   4378   } |  | 
|   4379   if (unary_kind != MathUnaryInstr::kIllegal) { |  | 
|   4380     MathUnaryInstr* math_unary = |  | 
|   4381         new(I) MathUnaryInstr(unary_kind, |  | 
|   4382                               new(I) Value(call->ArgumentAt(0)), |  | 
|   4383                               call->deopt_id()); |  | 
|   4384     ReplaceCall(call, math_unary); |  | 
|   4385   } else if ((recognized_kind == MethodRecognizer::kFloat32x4Zero) || |  | 
|   4386              (recognized_kind == MethodRecognizer::kFloat32x4Splat) || |  | 
|   4387              (recognized_kind == MethodRecognizer::kFloat32x4Constructor) || |  | 
|   4388              (recognized_kind == MethodRecognizer::kFloat32x4FromFloat64x2)) { |  | 
|   4389     TryInlineFloat32x4Constructor(call, recognized_kind); |  | 
|   4390   } else if ((recognized_kind == MethodRecognizer::kFloat64x2Constructor) || |  | 
|   4391              (recognized_kind == MethodRecognizer::kFloat64x2Zero) || |  | 
|   4392              (recognized_kind == MethodRecognizer::kFloat64x2Splat) || |  | 
|   4393              (recognized_kind == MethodRecognizer::kFloat64x2FromFloat32x4)) { |  | 
|   4394     TryInlineFloat64x2Constructor(call, recognized_kind); |  | 
|   4395   } else if ((recognized_kind == MethodRecognizer::kInt32x4BoolConstructor) || |  | 
|   4396              (recognized_kind == MethodRecognizer::kInt32x4Constructor)) { |  | 
|   4397     TryInlineInt32x4Constructor(call, recognized_kind); |  | 
|   4398   } else if (recognized_kind == MethodRecognizer::kObjectConstructor) { |  | 
|   4399     // Remove the original push arguments. |  | 
|   4400     for (intptr_t i = 0; i < call->ArgumentCount(); ++i) { |  | 
|   4401       PushArgumentInstr* push = call->PushArgumentAt(i); |  | 
|   4402       push->ReplaceUsesWith(push->value()->definition()); |  | 
|   4403       push->RemoveFromGraph(); |  | 
|   4404     } |  | 
|   4405     // Manually replace call with global null constant. ReplaceCall can't |  | 
|   4406     // be used for definitions that are already in the graph. |  | 
|   4407     call->ReplaceUsesWith(flow_graph_->constant_null()); |  | 
|   4408     ASSERT(current_iterator()->Current() == call); |  | 
|   4409     current_iterator()->RemoveCurrentFromGraph();; |  | 
|   4410   } else if ((recognized_kind == MethodRecognizer::kMathMin) || |  | 
|   4411              (recognized_kind == MethodRecognizer::kMathMax)) { |  | 
|   4412     // We can handle only monomorphic min/max call sites with both arguments |  | 
|   4413     // being either doubles or smis. |  | 
|   4414     if (call->HasICData() && (call->ic_data()->NumberOfChecks() == 1)) { |  | 
|   4415       const ICData& ic_data = *call->ic_data(); |  | 
|   4416       intptr_t result_cid = kIllegalCid; |  | 
|   4417       if (ICDataHasReceiverArgumentClassIds(ic_data, kDoubleCid, kDoubleCid)) { |  | 
|   4418         result_cid = kDoubleCid; |  | 
|   4419       } else if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) { |  | 
|   4420         result_cid = kSmiCid; |  | 
|   4421       } |  | 
|   4422       if (result_cid != kIllegalCid) { |  | 
|   4423         MathMinMaxInstr* min_max = new(I) MathMinMaxInstr( |  | 
|   4424             recognized_kind, |  | 
|   4425             new(I) Value(call->ArgumentAt(0)), |  | 
|   4426             new(I) Value(call->ArgumentAt(1)), |  | 
|   4427             call->deopt_id(), |  | 
|   4428             result_cid); |  | 
|   4429         const ICData& unary_checks = |  | 
|   4430             ICData::ZoneHandle(I, ic_data.AsUnaryClassChecks()); |  | 
|   4431         AddCheckClass(min_max->left()->definition(), |  | 
|   4432                       unary_checks, |  | 
|   4433                       call->deopt_id(), |  | 
|   4434                       call->env(), |  | 
|   4435                       call); |  | 
|   4436         AddCheckClass(min_max->right()->definition(), |  | 
|   4437                       unary_checks, |  | 
|   4438                       call->deopt_id(), |  | 
|   4439                       call->env(), |  | 
|   4440                       call); |  | 
|   4441         ReplaceCall(call, min_max); |  | 
|   4442       } |  | 
|   4443     } |  | 
|   4444   } else if (recognized_kind == MethodRecognizer::kMathDoublePow) { |  | 
|   4445     // We know that first argument is double, the second is num. |  | 
|   4446     // InvokeMathCFunctionInstr requires unboxed doubles. UnboxDouble |  | 
|   4447     // instructions contain type checks and conversions to double. |  | 
|   4448     ZoneGrowableArray<Value*>* args = |  | 
|   4449         new(I) ZoneGrowableArray<Value*>(call->ArgumentCount()); |  | 
|   4450     for (intptr_t i = 0; i < call->ArgumentCount(); i++) { |  | 
|   4451       args->Add(new(I) Value(call->ArgumentAt(i))); |  | 
|   4452     } |  | 
|   4453     InvokeMathCFunctionInstr* invoke = |  | 
|   4454         new(I) InvokeMathCFunctionInstr(args, |  | 
|   4455                                         call->deopt_id(), |  | 
|   4456                                         recognized_kind, |  | 
|   4457                                         call->token_pos()); |  | 
|   4458     ReplaceCall(call, invoke); |  | 
|   4459   } else if (recognized_kind == MethodRecognizer::kDoubleFromInteger) { |  | 
|   4460     if (call->HasICData() && (call->ic_data()->NumberOfChecks() == 1)) { |  | 
|   4461       const ICData& ic_data = *call->ic_data(); |  | 
|   4462       if (CanUnboxDouble()) { |  | 
|   4463         if (ArgIsAlways(kSmiCid, ic_data, 1)) { |  | 
|   4464           Definition* arg = call->ArgumentAt(1); |  | 
|   4465           AddCheckSmi(arg, call->deopt_id(), call->env(), call); |  | 
|   4466           ReplaceCall(call, |  | 
|   4467                       new(I) SmiToDoubleInstr(new(I) Value(arg), |  | 
|   4468                                               call->token_pos())); |  | 
|   4469         } else if (ArgIsAlways(kMintCid, ic_data, 1) && |  | 
|   4470                    CanConvertUnboxedMintToDouble()) { |  | 
|   4471           Definition* arg = call->ArgumentAt(1); |  | 
|   4472           ReplaceCall(call, |  | 
|   4473                       new(I) MintToDoubleInstr(new(I) Value(arg), |  | 
|   4474                                                call->deopt_id())); |  | 
|   4475         } |  | 
|   4476       } |  | 
|   4477     } |  | 
|   4478   } else if (call->function().IsFactory()) { |  | 
|   4479     const Class& function_class = |  | 
|   4480         Class::Handle(I, call->function().Owner()); |  | 
|   4481     if ((function_class.library() == Library::CoreLibrary()) || |  | 
|   4482         (function_class.library() == Library::TypedDataLibrary())) { |  | 
|   4483       intptr_t cid = FactoryRecognizer::ResultCid(call->function()); |  | 
|   4484       switch (cid) { |  | 
|   4485         case kArrayCid: { |  | 
|   4486           Value* type = new(I) Value(call->ArgumentAt(0)); |  | 
|   4487           Value* num_elements = new(I) Value(call->ArgumentAt(1)); |  | 
|   4488           if (num_elements->BindsToConstant() && |  | 
|   4489               num_elements->BoundConstant().IsSmi()) { |  | 
|   4490             intptr_t length = Smi::Cast(num_elements->BoundConstant()).Value(); |  | 
|   4491             if (length >= 0 && length <= Array::kMaxElements) { |  | 
|   4492               CreateArrayInstr* create_array = |  | 
|   4493                   new(I) CreateArrayInstr( |  | 
|   4494                       call->token_pos(), type, num_elements); |  | 
|   4495               ReplaceCall(call, create_array); |  | 
|   4496             } |  | 
|   4497           } |  | 
|   4498         } |  | 
|   4499         default: |  | 
|   4500           break; |  | 
|   4501       } |  | 
|   4502     } |  | 
|   4503   } |  | 
|   4504 } |  | 
|   4505  |  | 
|   4506  |  | 
|   4507 void FlowGraphOptimizer::VisitStoreInstanceField( |  | 
|   4508     StoreInstanceFieldInstr* instr) { |  | 
|   4509   if (instr->IsUnboxedStore()) { |  | 
|   4510     ASSERT(instr->is_initialization_); |  | 
|   4511     // Determine if this field should be unboxed based on the usage of getter |  | 
|   4512     // and setter functions: The heuristic requires that the setter has a |  | 
|   4513     // usage count of at least 1/kGetterSetterRatio of the getter usage count. |  | 
|   4514     // This is to avoid unboxing fields where the setter is never or rarely |  | 
|   4515     // executed. |  | 
|   4516     const Field& field = Field::ZoneHandle(I, instr->field().raw()); |  | 
|   4517     const String& field_name = String::Handle(I, field.name()); |  | 
|   4518     const Class& owner = Class::Handle(I, field.owner()); |  | 
|   4519     const Function& getter = |  | 
|   4520         Function::Handle(I, owner.LookupGetterFunction(field_name)); |  | 
|   4521     const Function& setter = |  | 
|   4522         Function::Handle(I, owner.LookupSetterFunction(field_name)); |  | 
|   4523     bool result = !getter.IsNull() |  | 
|   4524                && !setter.IsNull() |  | 
|   4525                && (setter.usage_counter() > 0) |  | 
|   4526                && (FLAG_getter_setter_ratio * setter.usage_counter() >= |  | 
|   4527                    getter.usage_counter()); |  | 
|   4528     if (!result) { |  | 
|   4529       if (FLAG_trace_optimization) { |  | 
|   4530         OS::Print("Disabling unboxing of %s\n", field.ToCString()); |  | 
|   4531       } |  | 
|   4532       field.set_is_unboxing_candidate(false); |  | 
|   4533       field.DeoptimizeDependentCode(); |  | 
|   4534     } else { |  | 
|   4535       FlowGraph::AddToGuardedFields(flow_graph_->guarded_fields(), &field); |  | 
|   4536     } |  | 
|   4537   } |  | 
|   4538 } |  | 
|   4539  |  | 
|   4540  |  | 
|   4541 void FlowGraphOptimizer::VisitAllocateContext(AllocateContextInstr* instr) { |  | 
|   4542   // Replace generic allocation with a sequence of inlined allocation and |  | 
|   4543   // explicit initalizing stores. |  | 
|   4544   AllocateUninitializedContextInstr* replacement = |  | 
|   4545       new AllocateUninitializedContextInstr(instr->token_pos(), |  | 
|   4546                                             instr->num_context_variables()); |  | 
|   4547   instr->ReplaceWith(replacement, current_iterator()); |  | 
|   4548  |  | 
|   4549   StoreInstanceFieldInstr* store = |  | 
|   4550       new(I) StoreInstanceFieldInstr(Context::parent_offset(), |  | 
|   4551                                      new Value(replacement), |  | 
|   4552                                      new Value(flow_graph_->constant_null()), |  | 
|   4553                                      kNoStoreBarrier, |  | 
|   4554                                      instr->token_pos()); |  | 
|   4555   store->set_is_initialization(true);  // Won't be eliminated by DSE. |  | 
|   4556   flow_graph_->InsertAfter(replacement, store, NULL, FlowGraph::kEffect); |  | 
|   4557   Definition* cursor = store; |  | 
|   4558   for (intptr_t i = 0; i < instr->num_context_variables(); ++i) { |  | 
|   4559     store = |  | 
|   4560         new(I) StoreInstanceFieldInstr(Context::variable_offset(i), |  | 
|   4561                                        new Value(replacement), |  | 
|   4562                                        new Value(flow_graph_->constant_null()), |  | 
|   4563                                        kNoStoreBarrier, |  | 
|   4564                                        instr->token_pos()); |  | 
|   4565     store->set_is_initialization(true);  // Won't be eliminated by DSE. |  | 
|   4566     flow_graph_->InsertAfter(cursor, store, NULL, FlowGraph::kEffect); |  | 
|   4567     cursor = store; |  | 
|   4568   } |  | 
|   4569 } |  | 
|   4570  |  | 
|   4571  |  | 
|   4572 void FlowGraphOptimizer::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) { |  | 
|   4573   // TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized. |  | 
|   4574 #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) |  | 
|   4575   if (!instr->can_pack_into_smi()) |  | 
|   4576     instr->set_representation(kUnboxedMint); |  | 
|   4577 #endif |  | 
|   4578 } |  | 
|   4579  |  | 
|   4580  |  | 
|   4581 bool FlowGraphOptimizer::TryInlineInstanceSetter(InstanceCallInstr* instr, |  | 
|   4582                                                  const ICData& unary_ic_data) { |  | 
|   4583   ASSERT((unary_ic_data.NumberOfChecks() > 0) && |  | 
|   4584       (unary_ic_data.NumArgsTested() == 1)); |  | 
|   4585   if (FLAG_enable_type_checks) { |  | 
|   4586     // Checked mode setters are inlined like normal methods by conventional |  | 
|   4587     // inlining. |  | 
|   4588     return false; |  | 
|   4589   } |  | 
|   4590  |  | 
|   4591   ASSERT(instr->HasICData()); |  | 
|   4592   if (unary_ic_data.NumberOfChecks() == 0) { |  | 
|   4593     // No type feedback collected. |  | 
|   4594     return false; |  | 
|   4595   } |  | 
|   4596   if (!unary_ic_data.HasOneTarget()) { |  | 
|   4597     // Polymorphic sites are inlined like normal method calls by conventional |  | 
|   4598     // inlining. |  | 
|   4599     return false; |  | 
|   4600   } |  | 
|   4601   Function& target = Function::Handle(I); |  | 
|   4602   intptr_t class_id; |  | 
|   4603   unary_ic_data.GetOneClassCheckAt(0, &class_id, &target); |  | 
|   4604   if (target.kind() != RawFunction::kImplicitSetter) { |  | 
|   4605     // Non-implicit setter are inlined like normal method calls. |  | 
|   4606     return false; |  | 
|   4607   } |  | 
|   4608   // Inline implicit instance setter. |  | 
|   4609   const String& field_name = |  | 
|   4610       String::Handle(I, Field::NameFromSetter(instr->function_name())); |  | 
|   4611   const Field& field = |  | 
|   4612       Field::ZoneHandle(I, GetField(class_id, field_name)); |  | 
|   4613   ASSERT(!field.IsNull()); |  | 
|   4614  |  | 
|   4615   if (InstanceCallNeedsClassCheck(instr, RawFunction::kImplicitSetter)) { |  | 
|   4616     AddReceiverCheck(instr); |  | 
|   4617   } |  | 
|   4618   StoreBarrierType needs_store_barrier = kEmitStoreBarrier; |  | 
|   4619   if (ArgIsAlways(kSmiCid, *instr->ic_data(), 1)) { |  | 
|   4620     InsertBefore(instr, |  | 
|   4621                  new(I) CheckSmiInstr( |  | 
|   4622                      new(I) Value(instr->ArgumentAt(1)), |  | 
|   4623                      instr->deopt_id(), |  | 
|   4624                      instr->token_pos()), |  | 
|   4625                  instr->env(), |  | 
|   4626                  FlowGraph::kEffect); |  | 
|   4627     needs_store_barrier = kNoStoreBarrier; |  | 
|   4628   } |  | 
|   4629  |  | 
|   4630   if (field.guarded_cid() != kDynamicCid) { |  | 
|   4631     InsertBefore(instr, |  | 
|   4632                  new(I) GuardFieldClassInstr( |  | 
|   4633                      new(I) Value(instr->ArgumentAt(1)), |  | 
|   4634                       field, |  | 
|   4635                       instr->deopt_id()), |  | 
|   4636                  instr->env(), |  | 
|   4637                  FlowGraph::kEffect); |  | 
|   4638   } |  | 
|   4639  |  | 
|   4640   if (field.needs_length_check()) { |  | 
|   4641     InsertBefore(instr, |  | 
|   4642                  new(I) GuardFieldLengthInstr( |  | 
|   4643                      new(I) Value(instr->ArgumentAt(1)), |  | 
|   4644                       field, |  | 
|   4645                       instr->deopt_id()), |  | 
|   4646                  instr->env(), |  | 
|   4647                  FlowGraph::kEffect); |  | 
|   4648   } |  | 
|   4649  |  | 
|   4650   // Field guard was detached. |  | 
|   4651   StoreInstanceFieldInstr* store = new(I) StoreInstanceFieldInstr( |  | 
|   4652       field, |  | 
|   4653       new(I) Value(instr->ArgumentAt(0)), |  | 
|   4654       new(I) Value(instr->ArgumentAt(1)), |  | 
|   4655       needs_store_barrier, |  | 
|   4656       instr->token_pos()); |  | 
|   4657  |  | 
|   4658   if (store->IsUnboxedStore()) { |  | 
|   4659     FlowGraph::AddToGuardedFields(flow_graph_->guarded_fields(), &field); |  | 
|   4660   } |  | 
|   4661  |  | 
|   4662   // Discard the environment from the original instruction because the store |  | 
|   4663   // can't deoptimize. |  | 
|   4664   instr->RemoveEnvironment(); |  | 
|   4665   ReplaceCall(instr, store); |  | 
|   4666   return true; |  | 
|   4667 } |  | 
|   4668  |  | 
|   4669  |  | 
|   4670 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) |  | 
|   4671 // Smi widening pass is only meaningful on platforms where Smi |  | 
|   4672 // is smaller than 32bit. For now only support it on ARM and ia32. |  | 
|   4673 static bool CanBeWidened(BinarySmiOpInstr* smi_op) { |  | 
|   4674   return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), |  | 
|   4675                                          smi_op->left(), |  | 
|   4676                                          smi_op->right()); |  | 
|   4677 } |  | 
|   4678  |  | 
|   4679  |  | 
|   4680 static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { |  | 
|   4681   // TODO(vegorov): when shifts with non-constants shift count are supported |  | 
|   4682   // add them here as we save untagging for the count. |  | 
|   4683   switch (smi_op->op_kind()) { |  | 
|   4684     case Token::kMUL: |  | 
|   4685     case Token::kSHR: |  | 
|   4686       // For kMUL we save untagging of the argument for kSHR |  | 
|   4687       // we save tagging of the result. |  | 
|   4688       return true; |  | 
|   4689  |  | 
|   4690     default: |  | 
|   4691       return false; |  | 
|   4692   } |  | 
|   4693 } |  | 
|   4694  |  | 
|   4695  |  | 
|   4696 void FlowGraphOptimizer::WidenSmiToInt32() { |  | 
|   4697   GrowableArray<BinarySmiOpInstr*> candidates; |  | 
|   4698  |  | 
|   4699   // Step 1. Collect all instructions that potentially benefit from widening of |  | 
|   4700   // their operands (or their result) into int32 range. |  | 
|   4701   for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |  | 
|   4702        !block_it.Done(); |  | 
|   4703        block_it.Advance()) { |  | 
|   4704     for (ForwardInstructionIterator instr_it(block_it.Current()); |  | 
|   4705          !instr_it.Done(); |  | 
|   4706          instr_it.Advance()) { |  | 
|   4707       BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); |  | 
|   4708       if ((smi_op != NULL) && |  | 
|   4709           BenefitsFromWidening(smi_op) && |  | 
|   4710           CanBeWidened(smi_op)) { |  | 
|   4711         candidates.Add(smi_op); |  | 
|   4712       } |  | 
|   4713     } |  | 
|   4714   } |  | 
|   4715  |  | 
|   4716   if (candidates.is_empty()) { |  | 
|   4717     return; |  | 
|   4718   } |  | 
|   4719  |  | 
|   4720   // Step 2. For each block in the graph compute which loop it belongs to. |  | 
|   4721   // We will use this information later during computation of the widening's |  | 
|   4722   // gain: we are going to assume that only conversion occuring inside the |  | 
|   4723   // same loop should be counted against the gain, all other conversions |  | 
|   4724   // can be hoisted and thus cost nothing compared to the loop cost itself. |  | 
|   4725   const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |  | 
|   4726       flow_graph()->LoopHeaders(); |  | 
|   4727  |  | 
|   4728   GrowableArray<intptr_t> loops(flow_graph_->preorder().length()); |  | 
|   4729   for (intptr_t i = 0; i < flow_graph_->preorder().length(); i++) { |  | 
|   4730     loops.Add(-1); |  | 
|   4731   } |  | 
|   4732  |  | 
|   4733   for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { |  | 
|   4734     for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); |  | 
|   4735          !loop_it.Done(); |  | 
|   4736          loop_it.Advance()) { |  | 
|   4737       loops[loop_it.Current()] = loop_id; |  | 
|   4738     } |  | 
|   4739   } |  | 
|   4740  |  | 
|   4741   // Step 3. For each candidate transitively collect all other BinarySmiOpInstr |  | 
|   4742   // and PhiInstr that depend on it and that it depends on and count amount of |  | 
|   4743   // untagging operations that we save in assumption that this whole graph of |  | 
|   4744   // values is using kUnboxedInt32 representation instead of kTagged. |  | 
|   4745   // Convert those graphs that have positive gain to kUnboxedInt32. |  | 
|   4746  |  | 
|   4747   // BitVector containing SSA indexes of all processed definitions. Used to skip |  | 
|   4748   // those candidates that belong to dependency graph of another candidate. |  | 
|   4749   BitVector* processed = |  | 
|   4750       new(I) BitVector(I, flow_graph_->current_ssa_temp_index()); |  | 
|   4751  |  | 
|   4752   // Worklist used to collect dependency graph. |  | 
|   4753   DefinitionWorklist worklist(flow_graph_, candidates.length()); |  | 
|   4754   for (intptr_t i = 0; i < candidates.length(); i++) { |  | 
|   4755     BinarySmiOpInstr* op = candidates[i]; |  | 
|   4756     if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { |  | 
|   4757       continue; |  | 
|   4758     } |  | 
|   4759  |  | 
|   4760     if (FLAG_trace_smi_widening) { |  | 
|   4761       OS::Print("analysing candidate: %s\n", op->ToCString()); |  | 
|   4762     } |  | 
|   4763     worklist.Clear(); |  | 
|   4764     worklist.Add(op); |  | 
|   4765  |  | 
|   4766     // Collect dependency graph. Note: more items are added to worklist |  | 
|   4767     // inside this loop. |  | 
|   4768     intptr_t gain = 0; |  | 
|   4769     for (intptr_t j = 0; j < worklist.definitions().length(); j++) { |  | 
|   4770       Definition* defn = worklist.definitions()[j]; |  | 
|   4771  |  | 
|   4772       if (FLAG_trace_smi_widening) { |  | 
|   4773         OS::Print("> %s\n", defn->ToCString()); |  | 
|   4774       } |  | 
|   4775  |  | 
|   4776       if (defn->IsBinarySmiOp() && |  | 
|   4777           BenefitsFromWidening(defn->AsBinarySmiOp())) { |  | 
|   4778         gain++; |  | 
|   4779         if (FLAG_trace_smi_widening) { |  | 
|   4780           OS::Print("^ [%" Pd "] (o) %s\n", gain, defn->ToCString()); |  | 
|   4781         } |  | 
|   4782       } |  | 
|   4783  |  | 
|   4784       const intptr_t defn_loop = loops[defn->GetBlock()->preorder_number()]; |  | 
|   4785  |  | 
|   4786       // Process all inputs. |  | 
|   4787       for (intptr_t k = 0; k < defn->InputCount(); k++) { |  | 
|   4788         Definition* input = defn->InputAt(k)->definition(); |  | 
|   4789         if (input->IsBinarySmiOp() && |  | 
|   4790             CanBeWidened(input->AsBinarySmiOp())) { |  | 
|   4791           worklist.Add(input); |  | 
|   4792         } else if (input->IsPhi() && (input->Type()->ToCid() == kSmiCid)) { |  | 
|   4793           worklist.Add(input); |  | 
|   4794         } else if (input->IsBinaryMintOp()) { |  | 
|   4795           // Mint operation produces untagged result. We avoid tagging. |  | 
|   4796           gain++; |  | 
|   4797           if (FLAG_trace_smi_widening) { |  | 
|   4798             OS::Print("^ [%" Pd "] (i) %s\n", gain, input->ToCString()); |  | 
|   4799           } |  | 
|   4800         } else if (defn_loop == loops[input->GetBlock()->preorder_number()] && |  | 
|   4801                    (input->Type()->ToCid() == kSmiCid)) { |  | 
|   4802           // Input comes from the same loop, is known to be smi and requires |  | 
|   4803           // untagging. |  | 
|   4804           // TODO(vegorov) this heuristic assumes that values that are not |  | 
|   4805           // known to be smi have to be checked and this check can be |  | 
|   4806           // coalesced with untagging. Start coalescing them. |  | 
|   4807           gain--; |  | 
|   4808           if (FLAG_trace_smi_widening) { |  | 
|   4809             OS::Print("v [%" Pd "] (i) %s\n", gain, input->ToCString()); |  | 
|   4810           } |  | 
|   4811         } |  | 
|   4812       } |  | 
|   4813  |  | 
|   4814       // Process all uses. |  | 
|   4815       for (Value* use = defn->input_use_list(); |  | 
|   4816            use != NULL; |  | 
|   4817            use = use->next_use()) { |  | 
|   4818         Instruction* instr = use->instruction(); |  | 
|   4819         Definition* use_defn = instr->AsDefinition(); |  | 
|   4820         if (use_defn == NULL) { |  | 
|   4821           // We assume that tagging before returning or pushing argument costs |  | 
|   4822           // very little compared to the cost of the return/call itself. |  | 
|   4823           if (!instr->IsReturn() && !instr->IsPushArgument()) { |  | 
|   4824             gain--; |  | 
|   4825             if (FLAG_trace_smi_widening) { |  | 
|   4826               OS::Print("v [%" Pd "] (u) %s\n", |  | 
|   4827                         gain, |  | 
|   4828                         use->instruction()->ToCString()); |  | 
|   4829             } |  | 
|   4830           } |  | 
|   4831           continue; |  | 
|   4832         } else if (use_defn->IsBinarySmiOp() && |  | 
|   4833                    CanBeWidened(use_defn->AsBinarySmiOp())) { |  | 
|   4834           worklist.Add(use_defn); |  | 
|   4835         } else if (use_defn->IsPhi() && |  | 
|   4836                    use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { |  | 
|   4837           worklist.Add(use_defn); |  | 
|   4838         } else if (use_defn->IsBinaryMintOp()) { |  | 
|   4839           // BinaryMintOp requires untagging of its inputs. |  | 
|   4840           // Converting kUnboxedInt32 to kUnboxedMint is essentially zero cost |  | 
|   4841           // sign extension operation. |  | 
|   4842           gain++; |  | 
|   4843           if (FLAG_trace_smi_widening) { |  | 
|   4844             OS::Print("^ [%" Pd "] (u) %s\n", |  | 
|   4845                       gain, |  | 
|   4846                       use->instruction()->ToCString()); |  | 
|   4847           } |  | 
|   4848         } else if (defn_loop == loops[instr->GetBlock()->preorder_number()]) { |  | 
|   4849           gain--; |  | 
|   4850           if (FLAG_trace_smi_widening) { |  | 
|   4851             OS::Print("v [%" Pd "] (u) %s\n", |  | 
|   4852                       gain, |  | 
|   4853                       use->instruction()->ToCString()); |  | 
|   4854           } |  | 
|   4855         } |  | 
|   4856       } |  | 
|   4857     } |  | 
|   4858  |  | 
|   4859     processed->AddAll(worklist.contains_vector()); |  | 
|   4860  |  | 
|   4861     if (FLAG_trace_smi_widening) { |  | 
|   4862       OS::Print("~ %s gain %" Pd "\n", op->ToCString(), gain); |  | 
|   4863     } |  | 
|   4864  |  | 
|   4865     if (gain > 0) { |  | 
|   4866       // We have positive gain from widening. Convert all BinarySmiOpInstr into |  | 
|   4867       // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. |  | 
|   4868       for (intptr_t j = 0; j < worklist.definitions().length(); j++) { |  | 
|   4869         Definition* defn = worklist.definitions()[j]; |  | 
|   4870         ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); |  | 
|   4871  |  | 
|   4872         if (defn->IsBinarySmiOp()) { |  | 
|   4873           BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); |  | 
|   4874           BinaryInt32OpInstr* int32_op = new(I) BinaryInt32OpInstr( |  | 
|   4875             smi_op->op_kind(), |  | 
|   4876             smi_op->left()->CopyWithType(), |  | 
|   4877             smi_op->right()->CopyWithType(), |  | 
|   4878             smi_op->DeoptimizationTarget()); |  | 
|   4879  |  | 
|   4880           smi_op->ReplaceWith(int32_op, NULL); |  | 
|   4881         } else if (defn->IsPhi()) { |  | 
|   4882           defn->AsPhi()->set_representation(kUnboxedInt32); |  | 
|   4883           ASSERT(defn->Type()->IsInt()); |  | 
|   4884         } |  | 
|   4885       } |  | 
|   4886     } |  | 
|   4887   } |  | 
|   4888 } |  | 
|   4889 #else |  | 
|   4890 void FlowGraphOptimizer::WidenSmiToInt32() { |  | 
|   4891   // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi |  | 
|   4892   // operations to 32-bit where it saves tagging and untagging and allows |  | 
|   4893   // to use shorted (and faster) instructions. But we currently don't |  | 
|   4894   // save enough range information in the ICData to drive this decision. |  | 
|   4895 } |  | 
|   4896 #endif |  | 
|   4897  |  | 
|   4898 void FlowGraphOptimizer::InferIntRanges() { |  | 
|   4899   RangeAnalysis range_analysis(flow_graph_); |  | 
|   4900   range_analysis.Analyze(); |  | 
|   4901 } |  | 
|   4902  |  | 
|   4903  |  | 
|   4904 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { |  | 
|   4905   // For every catch-block: Iterate over all call instructions inside the |  | 
|   4906   // corresponding try-block and figure out for each environment value if it |  | 
|   4907   // is the same constant at all calls. If yes, replace the initial definition |  | 
|   4908   // at the catch-entry with this constant. |  | 
|   4909   const GrowableArray<CatchBlockEntryInstr*>& catch_entries = |  | 
|   4910       flow_graph->graph_entry()->catch_entries(); |  | 
|   4911   intptr_t base = kFirstLocalSlotFromFp + flow_graph->num_non_copied_params(); |  | 
|   4912   for (intptr_t catch_idx = 0; |  | 
|   4913        catch_idx < catch_entries.length(); |  | 
|   4914        ++catch_idx) { |  | 
|   4915     CatchBlockEntryInstr* catch_entry = catch_entries[catch_idx]; |  | 
|   4916  |  | 
|   4917     // Initialize cdefs with the original initial definitions (ParameterInstr). |  | 
|   4918     // The following representation is used: |  | 
|   4919     // ParameterInstr => unknown |  | 
|   4920     // ConstantInstr => known constant |  | 
|   4921     // NULL => non-constant |  | 
|   4922     GrowableArray<Definition*>* idefs = catch_entry->initial_definitions(); |  | 
|   4923     GrowableArray<Definition*> cdefs(idefs->length()); |  | 
|   4924     cdefs.AddArray(*idefs); |  | 
|   4925  |  | 
|   4926     // exception_var and stacktrace_var are never constant. |  | 
|   4927     intptr_t ex_idx = base - catch_entry->exception_var().index(); |  | 
|   4928     intptr_t st_idx = base - catch_entry->stacktrace_var().index(); |  | 
|   4929     cdefs[ex_idx] = cdefs[st_idx] = NULL; |  | 
|   4930  |  | 
|   4931     for (BlockIterator block_it = flow_graph->reverse_postorder_iterator(); |  | 
|   4932          !block_it.Done(); |  | 
|   4933          block_it.Advance()) { |  | 
|   4934       BlockEntryInstr* block = block_it.Current(); |  | 
|   4935       if (block->try_index() == catch_entry->catch_try_index()) { |  | 
|   4936         for (ForwardInstructionIterator instr_it(block); |  | 
|   4937              !instr_it.Done(); |  | 
|   4938              instr_it.Advance()) { |  | 
|   4939           Instruction* current = instr_it.Current(); |  | 
|   4940           if (current->MayThrow()) { |  | 
|   4941             Environment* env = current->env()->Outermost(); |  | 
|   4942             ASSERT(env != NULL); |  | 
|   4943             for (intptr_t env_idx = 0; env_idx < cdefs.length(); ++env_idx) { |  | 
|   4944               if (cdefs[env_idx] != NULL && |  | 
|   4945                   env->ValueAt(env_idx)->BindsToConstant()) { |  | 
|   4946                 cdefs[env_idx] = env->ValueAt(env_idx)->definition(); |  | 
|   4947               } |  | 
|   4948               if (cdefs[env_idx] != env->ValueAt(env_idx)->definition()) { |  | 
|   4949                 cdefs[env_idx] = NULL; |  | 
|   4950               } |  | 
|   4951             } |  | 
|   4952           } |  | 
|   4953         } |  | 
|   4954       } |  | 
|   4955     } |  | 
|   4956     for (intptr_t j = 0; j < idefs->length(); ++j) { |  | 
|   4957       if (cdefs[j] != NULL && cdefs[j]->IsConstant()) { |  | 
|   4958         // TODO(fschneider): Use constants from the constant pool. |  | 
|   4959         Definition* old = (*idefs)[j]; |  | 
|   4960         ConstantInstr* orig = cdefs[j]->AsConstant(); |  | 
|   4961         ConstantInstr* copy = |  | 
|   4962             new(flow_graph->isolate()) ConstantInstr(orig->value()); |  | 
|   4963         copy->set_ssa_temp_index(flow_graph->alloc_ssa_temp_index()); |  | 
|   4964         old->ReplaceUsesWith(copy); |  | 
|   4965         (*idefs)[j] = copy; |  | 
|   4966       } |  | 
|   4967     } |  | 
|   4968   } |  | 
|   4969 } |  | 
|   4970  |  | 
|   4971  |  | 
|   4972 LICM::LICM(FlowGraph* flow_graph) : flow_graph_(flow_graph) { |  | 
|   4973   ASSERT(flow_graph->is_licm_allowed()); |  | 
|   4974 } |  | 
|   4975  |  | 
|   4976  |  | 
|   4977 void LICM::Hoist(ForwardInstructionIterator* it, |  | 
|   4978                  BlockEntryInstr* pre_header, |  | 
|   4979                  Instruction* current) { |  | 
|   4980   if (current->IsCheckClass()) { |  | 
|   4981     current->AsCheckClass()->set_licm_hoisted(true); |  | 
|   4982   } else if (current->IsCheckSmi()) { |  | 
|   4983     current->AsCheckSmi()->set_licm_hoisted(true); |  | 
|   4984   } else if (current->IsCheckEitherNonSmi()) { |  | 
|   4985     current->AsCheckEitherNonSmi()->set_licm_hoisted(true); |  | 
|   4986   } else if (current->IsCheckArrayBound()) { |  | 
|   4987     current->AsCheckArrayBound()->set_licm_hoisted(true); |  | 
|   4988   } |  | 
|   4989   if (FLAG_trace_optimization) { |  | 
|   4990     OS::Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", |  | 
|   4991               current->DebugName(), |  | 
|   4992               current->GetDeoptId(), |  | 
|   4993               current->GetBlock()->block_id(), |  | 
|   4994               pre_header->block_id()); |  | 
|   4995   } |  | 
|   4996   // Move the instruction out of the loop. |  | 
|   4997   current->RemoveEnvironment(); |  | 
|   4998   if (it != NULL) { |  | 
|   4999     it->RemoveCurrentFromGraph(); |  | 
|   5000   } else { |  | 
|   5001     current->RemoveFromGraph(); |  | 
|   5002   } |  | 
|   5003   GotoInstr* last = pre_header->last_instruction()->AsGoto(); |  | 
|   5004   // Using kind kEffect will not assign a fresh ssa temporary index. |  | 
|   5005   flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); |  | 
|   5006   current->CopyDeoptIdFrom(*last); |  | 
|   5007 } |  | 
|   5008  |  | 
|   5009  |  | 
|   5010 void LICM::TrySpecializeSmiPhi(PhiInstr* phi, |  | 
|   5011                                BlockEntryInstr* header, |  | 
|   5012                                BlockEntryInstr* pre_header) { |  | 
|   5013   if (phi->Type()->ToCid() == kSmiCid) { |  | 
|   5014     return; |  | 
|   5015   } |  | 
|   5016  |  | 
|   5017   // Check if there is only a single kDynamicCid input to the phi that |  | 
|   5018   // comes from the pre-header. |  | 
|   5019   const intptr_t kNotFound = -1; |  | 
|   5020   intptr_t non_smi_input = kNotFound; |  | 
|   5021   for (intptr_t i = 0; i < phi->InputCount(); ++i) { |  | 
|   5022     Value* input = phi->InputAt(i); |  | 
|   5023     if (input->Type()->ToCid() != kSmiCid) { |  | 
|   5024       if ((non_smi_input != kNotFound) || |  | 
|   5025           (input->Type()->ToCid() != kDynamicCid)) { |  | 
|   5026         // There are multiple kDynamicCid inputs or there is an input that is |  | 
|   5027         // known to be non-smi. |  | 
|   5028         return; |  | 
|   5029       } else { |  | 
|   5030         non_smi_input = i; |  | 
|   5031       } |  | 
|   5032     } |  | 
|   5033   } |  | 
|   5034  |  | 
|   5035   if ((non_smi_input == kNotFound) || |  | 
|   5036       (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { |  | 
|   5037     return; |  | 
|   5038   } |  | 
|   5039  |  | 
|   5040   CheckSmiInstr* check = NULL; |  | 
|   5041   for (Value* use = phi->input_use_list(); |  | 
|   5042        (use != NULL) && (check == NULL); |  | 
|   5043        use = use->next_use()) { |  | 
|   5044     check = use->instruction()->AsCheckSmi(); |  | 
|   5045   } |  | 
|   5046  |  | 
|   5047   if (check == NULL) { |  | 
|   5048     return; |  | 
|   5049   } |  | 
|   5050  |  | 
|   5051   // Host CheckSmi instruction and make this phi smi one. |  | 
|   5052   Hoist(NULL, pre_header, check); |  | 
|   5053  |  | 
|   5054   // Replace value we are checking with phi's input. |  | 
|   5055   check->value()->BindTo(phi->InputAt(non_smi_input)->definition()); |  | 
|   5056  |  | 
|   5057   phi->UpdateType(CompileType::FromCid(kSmiCid)); |  | 
|   5058 } |  | 
|   5059  |  | 
|   5060  |  | 
|   5061 // Load instructions handled by load elimination. |  | 
|   5062 static bool IsLoadEliminationCandidate(Instruction* instr) { |  | 
|   5063   return instr->IsLoadField() |  | 
|   5064       || instr->IsLoadIndexed() |  | 
|   5065       || instr->IsLoadStaticField(); |  | 
|   5066 } |  | 
|   5067  |  | 
|   5068  |  | 
|   5069 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, |  | 
|   5070                                 intptr_t loop_header_index, |  | 
|   5071                                 Instruction* instr) { |  | 
|   5072   return IsLoadEliminationCandidate(instr) && |  | 
|   5073       (sets != NULL) && |  | 
|   5074       instr->HasPlaceId() && |  | 
|   5075       ((*sets)[loop_header_index] != NULL) && |  | 
|   5076       (*sets)[loop_header_index]->Contains(instr->place_id()); |  | 
|   5077 } |  | 
|   5078  |  | 
|   5079  |  | 
|   5080 void LICM::OptimisticallySpecializeSmiPhis() { |  | 
|   5081   if (!flow_graph()->parsed_function()->function(). |  | 
|   5082           allows_hoisting_check_class()) { |  | 
|   5083     // Do not hoist any. |  | 
|   5084     return; |  | 
|   5085   } |  | 
|   5086  |  | 
|   5087   const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |  | 
|   5088       flow_graph()->LoopHeaders(); |  | 
|   5089  |  | 
|   5090   for (intptr_t i = 0; i < loop_headers.length(); ++i) { |  | 
|   5091     JoinEntryInstr* header = loop_headers[i]->AsJoinEntry(); |  | 
|   5092     // Skip loop that don't have a pre-header block. |  | 
|   5093     BlockEntryInstr* pre_header = header->ImmediateDominator(); |  | 
|   5094     if (pre_header == NULL) continue; |  | 
|   5095  |  | 
|   5096     for (PhiIterator it(header); !it.Done(); it.Advance()) { |  | 
|   5097       TrySpecializeSmiPhi(it.Current(), header, pre_header); |  | 
|   5098     } |  | 
|   5099   } |  | 
|   5100 } |  | 
|   5101  |  | 
|   5102  |  | 
|   5103 void LICM::Optimize() { |  | 
|   5104   if (!flow_graph()->parsed_function()->function(). |  | 
|   5105           allows_hoisting_check_class()) { |  | 
|   5106     // Do not hoist any. |  | 
|   5107     return; |  | 
|   5108   } |  | 
|   5109  |  | 
|   5110   const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |  | 
|   5111       flow_graph()->LoopHeaders(); |  | 
|   5112  |  | 
|   5113   ZoneGrowableArray<BitVector*>* loop_invariant_loads = |  | 
|   5114       flow_graph()->loop_invariant_loads(); |  | 
|   5115  |  | 
|   5116   BlockEffects* block_effects = flow_graph()->block_effects(); |  | 
|   5117  |  | 
|   5118   for (intptr_t i = 0; i < loop_headers.length(); ++i) { |  | 
|   5119     BlockEntryInstr* header = loop_headers[i]; |  | 
|   5120     // Skip loop that don't have a pre-header block. |  | 
|   5121     BlockEntryInstr* pre_header = header->ImmediateDominator(); |  | 
|   5122     if (pre_header == NULL) continue; |  | 
|   5123  |  | 
|   5124     for (BitVector::Iterator loop_it(header->loop_info()); |  | 
|   5125          !loop_it.Done(); |  | 
|   5126          loop_it.Advance()) { |  | 
|   5127       BlockEntryInstr* block = flow_graph()->preorder()[loop_it.Current()]; |  | 
|   5128       for (ForwardInstructionIterator it(block); |  | 
|   5129            !it.Done(); |  | 
|   5130            it.Advance()) { |  | 
|   5131         Instruction* current = it.Current(); |  | 
|   5132         if ((current->AllowsCSE() && |  | 
|   5133              block_effects->CanBeMovedTo(current, pre_header)) || |  | 
|   5134             IsLoopInvariantLoad(loop_invariant_loads, i, current)) { |  | 
|   5135           bool inputs_loop_invariant = true; |  | 
|   5136           for (int i = 0; i < current->InputCount(); ++i) { |  | 
|   5137             Definition* input_def = current->InputAt(i)->definition(); |  | 
|   5138             if (!input_def->GetBlock()->Dominates(pre_header)) { |  | 
|   5139               inputs_loop_invariant = false; |  | 
|   5140               break; |  | 
|   5141             } |  | 
|   5142           } |  | 
|   5143           if (inputs_loop_invariant && |  | 
|   5144               !current->IsAssertAssignable() && |  | 
|   5145               !current->IsAssertBoolean()) { |  | 
|   5146             // TODO(fschneider): Enable hoisting of Assert-instructions |  | 
|   5147             // if it safe to do. |  | 
|   5148             Hoist(&it, pre_header, current); |  | 
|   5149           } |  | 
|   5150         } |  | 
|   5151       } |  | 
|   5152     } |  | 
|   5153   } |  | 
|   5154 } |  | 
|   5155  |  | 
|   5156  |  | 
|   5157 // Place describes an abstract location (e.g. field) that IR can load |  | 
|   5158 // from or store to. |  | 
|   5159 // |  | 
|   5160 // Places are also used to describe wild-card locations also known as aliases, |  | 
|   5161 // that essentially represent sets of places that alias each other. Places A |  | 
|   5162 // and B are said to alias each other if store into A can affect load from B. |  | 
|   5163 // |  | 
|   5164 // We distinguish the following aliases: |  | 
|   5165 // |  | 
|   5166 //   - for fields |  | 
|   5167 //     - *.f, *.@offs - field inside some object; |  | 
|   5168 //     - X.f, X.@offs - field inside an allocated object X; |  | 
|   5169 //   - for indexed accesses |  | 
|   5170 //     - *[*] - non-constant index inside some object; |  | 
|   5171 //     - *[C] - constant index inside some object; |  | 
|   5172 //     - X[*] - non-constant index inside an allocated object X; |  | 
|   5173 //     - X[C] - constant index inside an allocated object X. |  | 
|   5174 // |  | 
|   5175 // Separating allocations from other objects improves precision of the |  | 
|   5176 // load forwarding pass because of the following two properties: |  | 
|   5177 // |  | 
|   5178 //   - if X can be proven to have no aliases itself (i.e. there is no other SSA |  | 
|   5179 //     variable that points to X) then no place inside X can be aliased with any |  | 
|   5180 //     wildcard dependent place (*.f, *.@offs, *[*], *[C]); |  | 
|   5181 //   - given allocations X and Y no place inside X can be aliased with any place |  | 
|   5182 //     inside Y even if any of them or both escape. |  | 
|   5183 // |  | 
|   5184 // It important to realize that single place can belong to multiple aliases. |  | 
|   5185 // For example place X.f with aliased allocation X belongs both to X.f and *.f |  | 
|   5186 // aliases. Likewise X[C] with non-aliased allocation X belongs to X[C] and X[*] |  | 
|   5187 // aliases. |  | 
|   5188 // |  | 
|   5189 class Place : public ValueObject { |  | 
|   5190  public: |  | 
|   5191   enum Kind { |  | 
|   5192     kNone, |  | 
|   5193  |  | 
|   5194     // Field location. For instance fields is represented as a pair of a Field |  | 
|   5195     // object and an instance (SSA definition) that is being accessed. |  | 
|   5196     // For static fields instance is NULL. |  | 
|   5197     kField, |  | 
|   5198  |  | 
|   5199     // VMField location. Represented as a pair of an instance (SSA definition) |  | 
|   5200     // being accessed and offset to the field. |  | 
|   5201     kVMField, |  | 
|   5202  |  | 
|   5203     // Indexed location with a non-constant index. |  | 
|   5204     kIndexed, |  | 
|   5205  |  | 
|   5206     // Indexed location with a constant index. |  | 
|   5207     kConstantIndexed, |  | 
|   5208   }; |  | 
|   5209  |  | 
|   5210   Place(const Place& other) |  | 
|   5211       : ValueObject(), |  | 
|   5212         kind_(other.kind_), |  | 
|   5213         representation_(other.representation_), |  | 
|   5214         instance_(other.instance_), |  | 
|   5215         raw_selector_(other.raw_selector_), |  | 
|   5216         id_(other.id_) { |  | 
|   5217   } |  | 
|   5218  |  | 
|   5219   // Construct a place from instruction if instruction accesses any place. |  | 
|   5220   // Otherwise constructs kNone place. |  | 
|   5221   Place(Instruction* instr, bool* is_load, bool* is_store) |  | 
|   5222       : kind_(kNone), |  | 
|   5223         representation_(kNoRepresentation), |  | 
|   5224         instance_(NULL), |  | 
|   5225         raw_selector_(0), |  | 
|   5226         id_(0) { |  | 
|   5227     switch (instr->tag()) { |  | 
|   5228       case Instruction::kLoadField: { |  | 
|   5229         LoadFieldInstr* load_field = instr->AsLoadField(); |  | 
|   5230         representation_ = load_field->representation(); |  | 
|   5231         instance_ = load_field->instance()->definition()->OriginalDefinition(); |  | 
|   5232         if (load_field->field() != NULL) { |  | 
|   5233           kind_ = kField; |  | 
|   5234           field_ = load_field->field(); |  | 
|   5235         } else { |  | 
|   5236           kind_ = kVMField; |  | 
|   5237           offset_in_bytes_ = load_field->offset_in_bytes(); |  | 
|   5238         } |  | 
|   5239         *is_load = true; |  | 
|   5240         break; |  | 
|   5241       } |  | 
|   5242  |  | 
|   5243       case Instruction::kStoreInstanceField: { |  | 
|   5244         StoreInstanceFieldInstr* store = |  | 
|   5245             instr->AsStoreInstanceField(); |  | 
|   5246         representation_ = store->RequiredInputRepresentation( |  | 
|   5247             StoreInstanceFieldInstr::kValuePos); |  | 
|   5248         instance_ = store->instance()->definition()->OriginalDefinition(); |  | 
|   5249         if (!store->field().IsNull()) { |  | 
|   5250           kind_ = kField; |  | 
|   5251           field_ = &store->field(); |  | 
|   5252         } else { |  | 
|   5253           kind_ = kVMField; |  | 
|   5254           offset_in_bytes_ = store->offset_in_bytes(); |  | 
|   5255         } |  | 
|   5256         *is_store = true; |  | 
|   5257         break; |  | 
|   5258       } |  | 
|   5259  |  | 
|   5260       case Instruction::kLoadStaticField: |  | 
|   5261         kind_ = kField; |  | 
|   5262         representation_ = instr->AsLoadStaticField()->representation(); |  | 
|   5263         field_ = &instr->AsLoadStaticField()->StaticField(); |  | 
|   5264         *is_load = true; |  | 
|   5265         break; |  | 
|   5266  |  | 
|   5267       case Instruction::kStoreStaticField: |  | 
|   5268         kind_ = kField; |  | 
|   5269         representation_ = instr->AsStoreStaticField()-> |  | 
|   5270             RequiredInputRepresentation(StoreStaticFieldInstr::kValuePos); |  | 
|   5271         field_ = &instr->AsStoreStaticField()->field(); |  | 
|   5272         *is_store = true; |  | 
|   5273         break; |  | 
|   5274  |  | 
|   5275       case Instruction::kLoadIndexed: { |  | 
|   5276         LoadIndexedInstr* load_indexed = instr->AsLoadIndexed(); |  | 
|   5277         representation_ = load_indexed->representation(); |  | 
|   5278         instance_ = load_indexed->array()->definition()->OriginalDefinition(); |  | 
|   5279         SetIndex(load_indexed->index()->definition()); |  | 
|   5280         *is_load = true; |  | 
|   5281         break; |  | 
|   5282       } |  | 
|   5283  |  | 
|   5284       case Instruction::kStoreIndexed: { |  | 
|   5285         StoreIndexedInstr* store_indexed = instr->AsStoreIndexed(); |  | 
|   5286         representation_ = store_indexed-> |  | 
|   5287             RequiredInputRepresentation(StoreIndexedInstr::kValuePos); |  | 
|   5288         instance_ = store_indexed->array()->definition()->OriginalDefinition(); |  | 
|   5289         SetIndex(store_indexed->index()->definition()); |  | 
|   5290         *is_store = true; |  | 
|   5291         break; |  | 
|   5292       } |  | 
|   5293  |  | 
|   5294       default: |  | 
|   5295         break; |  | 
|   5296     } |  | 
|   5297   } |  | 
|   5298  |  | 
|   5299   // Create object representing *[*] alias. |  | 
|   5300   static Place* CreateAnyInstanceAnyIndexAlias(Isolate* isolate, |  | 
|   5301                                                intptr_t id) { |  | 
|   5302     return Wrap(isolate, Place(kIndexed, NULL, 0), id); |  | 
|   5303   } |  | 
|   5304  |  | 
|   5305   // Return least generic alias for this place. Given that aliases are |  | 
|   5306   // essentially sets of places we define least generic alias as a smallest |  | 
|   5307   // alias that contains this place. |  | 
|   5308   // |  | 
|   5309   // We obtain such alias by a simple transformation: |  | 
|   5310   // |  | 
|   5311   //    - for places that depend on an instance X.f, X.@offs, X[i], X[C] |  | 
|   5312   //      we drop X if X is not an allocation because in this case X does not |  | 
|   5313   //      posess an identity obtaining aliases *.f, *.@offs, *[i] and *[C] |  | 
|   5314   //      respectively; |  | 
|   5315   //    - for non-constant indexed places X[i] we drop information about the |  | 
|   5316   //      index obtaining alias X[*]. |  | 
|   5317   // |  | 
|   5318   Place ToAlias() const { |  | 
|   5319     return Place( |  | 
|   5320         kind_, |  | 
|   5321         (DependsOnInstance() && IsAllocation(instance())) ? instance() : NULL, |  | 
|   5322         (kind() == kIndexed) ? 0 : raw_selector_); |  | 
|   5323   } |  | 
|   5324  |  | 
|   5325   bool DependsOnInstance() const { |  | 
|   5326     switch (kind()) { |  | 
|   5327       case kField: |  | 
|   5328       case kVMField: |  | 
|   5329       case kIndexed: |  | 
|   5330       case kConstantIndexed: |  | 
|   5331         return true; |  | 
|   5332  |  | 
|   5333       case kNone: |  | 
|   5334         return false; |  | 
|   5335     } |  | 
|   5336  |  | 
|   5337     UNREACHABLE(); |  | 
|   5338     return false; |  | 
|   5339   } |  | 
|   5340  |  | 
|   5341   // Given instance dependent alias X.f, X.@offs, X[C], X[*] return |  | 
|   5342   // wild-card dependent alias *.f, *.@offs, *[C] or *[*] respectively. |  | 
|   5343   Place CopyWithoutInstance() const { |  | 
|   5344     ASSERT(DependsOnInstance()); |  | 
|   5345     return Place(kind_, NULL, raw_selector_); |  | 
|   5346   } |  | 
|   5347  |  | 
|   5348   // Given alias X[C] or *[C] return X[*] and *[*] respectively. |  | 
|   5349   Place CopyWithoutIndex() const { |  | 
|   5350     ASSERT(kind_ == kConstantIndexed); |  | 
|   5351     return Place(kIndexed, instance_, 0); |  | 
|   5352   } |  | 
|   5353  |  | 
|   5354   intptr_t id() const { return id_; } |  | 
|   5355  |  | 
|   5356   Kind kind() const { return kind_; } |  | 
|   5357  |  | 
|   5358   Representation representation() const { return representation_; } |  | 
|   5359  |  | 
|   5360   Definition* instance() const { |  | 
|   5361     ASSERT(DependsOnInstance()); |  | 
|   5362     return instance_; |  | 
|   5363   } |  | 
|   5364  |  | 
|   5365   void set_instance(Definition* def) { |  | 
|   5366     ASSERT(DependsOnInstance()); |  | 
|   5367     instance_ = def->OriginalDefinition(); |  | 
|   5368   } |  | 
|   5369  |  | 
|   5370   const Field& field() const { |  | 
|   5371     ASSERT(kind_ == kField); |  | 
|   5372     return *field_; |  | 
|   5373   } |  | 
|   5374  |  | 
|   5375   intptr_t offset_in_bytes() const { |  | 
|   5376     ASSERT(kind_ == kVMField); |  | 
|   5377     return offset_in_bytes_; |  | 
|   5378   } |  | 
|   5379  |  | 
|   5380   Definition* index() const { |  | 
|   5381     ASSERT(kind_ == kIndexed); |  | 
|   5382     return index_; |  | 
|   5383   } |  | 
|   5384  |  | 
|   5385   intptr_t index_constant() const { |  | 
|   5386     ASSERT(kind_ == kConstantIndexed); |  | 
|   5387     return index_constant_; |  | 
|   5388   } |  | 
|   5389  |  | 
|   5390   static const char* DefinitionName(Definition* def) { |  | 
|   5391     if (def == NULL) { |  | 
|   5392       return "*"; |  | 
|   5393     } else { |  | 
|   5394       return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5395             "v%" Pd, def->ssa_temp_index()); |  | 
|   5396     } |  | 
|   5397   } |  | 
|   5398  |  | 
|   5399   const char* ToCString() const { |  | 
|   5400     switch (kind_) { |  | 
|   5401       case kNone: |  | 
|   5402         return "<none>"; |  | 
|   5403  |  | 
|   5404       case kField: { |  | 
|   5405         const char* field_name = String::Handle(field().name()).ToCString(); |  | 
|   5406         if (field().is_static()) { |  | 
|   5407           return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5408               "<%s>", field_name); |  | 
|   5409         } else { |  | 
|   5410           return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5411               "<%s.%s>", DefinitionName(instance()), field_name); |  | 
|   5412         } |  | 
|   5413       } |  | 
|   5414  |  | 
|   5415       case kVMField: |  | 
|   5416         return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5417             "<%s.@%" Pd ">", |  | 
|   5418             DefinitionName(instance()), |  | 
|   5419             offset_in_bytes()); |  | 
|   5420  |  | 
|   5421       case kIndexed: |  | 
|   5422         return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5423             "<%s[%s]>", |  | 
|   5424             DefinitionName(instance()), |  | 
|   5425             DefinitionName(index())); |  | 
|   5426  |  | 
|   5427       case kConstantIndexed: |  | 
|   5428         return Isolate::Current()->current_zone()->PrintToString( |  | 
|   5429             "<%s[%" Pd "]>", |  | 
|   5430             DefinitionName(instance()), |  | 
|   5431             index_constant()); |  | 
|   5432     } |  | 
|   5433     UNREACHABLE(); |  | 
|   5434     return "<?>"; |  | 
|   5435   } |  | 
|   5436  |  | 
|   5437   bool IsFinalField() const { |  | 
|   5438     return (kind() == kField) && field().is_final(); |  | 
|   5439   } |  | 
|   5440  |  | 
|   5441   intptr_t Hashcode() const { |  | 
|   5442     return (kind_ * 63 + reinterpret_cast<intptr_t>(instance_)) * 31 + |  | 
|   5443         representation_ * 15 + FieldHashcode(); |  | 
|   5444   } |  | 
|   5445  |  | 
|   5446   bool Equals(const Place* other) const { |  | 
|   5447     return (kind_ == other->kind_) && |  | 
|   5448         (representation_ == other->representation_) && |  | 
|   5449         (instance_ == other->instance_) && |  | 
|   5450         SameField(other); |  | 
|   5451   } |  | 
|   5452  |  | 
|   5453   // Create a zone allocated copy of this place and assign given id to it. |  | 
|   5454   static Place* Wrap(Isolate* isolate, const Place& place, intptr_t id); |  | 
|   5455  |  | 
|   5456   static bool IsAllocation(Definition* defn) { |  | 
|   5457     return (defn != NULL) && |  | 
|   5458         (defn->IsAllocateObject() || |  | 
|   5459          defn->IsCreateArray() || |  | 
|   5460          defn->IsAllocateUninitializedContext() || |  | 
|   5461          (defn->IsStaticCall() && |  | 
|   5462           defn->AsStaticCall()->IsRecognizedFactory())); |  | 
|   5463   } |  | 
|   5464  |  | 
|   5465  private: |  | 
|   5466   Place(Kind kind, Definition* instance, intptr_t selector) |  | 
|   5467       : kind_(kind), |  | 
|   5468         representation_(kNoRepresentation), |  | 
|   5469         instance_(instance), |  | 
|   5470         raw_selector_(selector), |  | 
|   5471         id_(0) { |  | 
|   5472   } |  | 
|   5473  |  | 
|   5474   bool SameField(const Place* other) const { |  | 
|   5475     return (kind_ == kField) ? (field().raw() == other->field().raw()) |  | 
|   5476                              : (offset_in_bytes_ == other->offset_in_bytes_); |  | 
|   5477   } |  | 
|   5478  |  | 
|   5479   intptr_t FieldHashcode() const { |  | 
|   5480     return (kind_ == kField) ? reinterpret_cast<intptr_t>(field().raw()) |  | 
|   5481                              : offset_in_bytes_; |  | 
|   5482   } |  | 
|   5483  |  | 
|   5484   void SetIndex(Definition* index) { |  | 
|   5485     ConstantInstr* index_constant = index->AsConstant(); |  | 
|   5486     if ((index_constant != NULL) && index_constant->value().IsSmi()) { |  | 
|   5487       kind_ = kConstantIndexed; |  | 
|   5488       index_constant_ = Smi::Cast(index_constant->value()).Value(); |  | 
|   5489     } else { |  | 
|   5490       kind_ = kIndexed; |  | 
|   5491       index_ = index; |  | 
|   5492     } |  | 
|   5493   } |  | 
|   5494  |  | 
|   5495   Kind kind_; |  | 
|   5496   Representation representation_; |  | 
|   5497   Definition* instance_; |  | 
|   5498   union { |  | 
|   5499     intptr_t raw_selector_; |  | 
|   5500     const Field* field_; |  | 
|   5501     intptr_t offset_in_bytes_; |  | 
|   5502     intptr_t index_constant_; |  | 
|   5503     Definition* index_; |  | 
|   5504   }; |  | 
|   5505  |  | 
|   5506   intptr_t id_; |  | 
|   5507 }; |  | 
|   5508  |  | 
|   5509  |  | 
|   5510 class ZonePlace : public ZoneAllocated { |  | 
|   5511  public: |  | 
|   5512   explicit ZonePlace(const Place& place) : place_(place) { } |  | 
|   5513  |  | 
|   5514   Place* place() { return &place_; } |  | 
|   5515  |  | 
|   5516  private: |  | 
|   5517   Place place_; |  | 
|   5518 }; |  | 
|   5519  |  | 
|   5520  |  | 
|   5521 Place* Place::Wrap(Isolate* isolate, const Place& place, intptr_t id) { |  | 
|   5522   Place* wrapped = (new(isolate) ZonePlace(place))->place(); |  | 
|   5523   wrapped->id_ = id; |  | 
|   5524   return wrapped; |  | 
|   5525 } |  | 
|   5526  |  | 
|   5527  |  | 
|   5528 // Correspondence between places connected through outgoing phi moves on the |  | 
|   5529 // edge that targets join. |  | 
|   5530 class PhiPlaceMoves : public ZoneAllocated { |  | 
|   5531  public: |  | 
|   5532   // Record a move from the place with id |from| to the place with id |to| at |  | 
|   5533   // the given block. |  | 
|   5534   void CreateOutgoingMove(Isolate* isolate, |  | 
|   5535                           BlockEntryInstr* block, intptr_t from, intptr_t to) { |  | 
|   5536     const intptr_t block_num = block->preorder_number(); |  | 
|   5537     while (moves_.length() <= block_num) { |  | 
|   5538       moves_.Add(NULL); |  | 
|   5539     } |  | 
|   5540  |  | 
|   5541     if (moves_[block_num] == NULL) { |  | 
|   5542       moves_[block_num] = new(isolate) ZoneGrowableArray<Move>(5); |  | 
|   5543     } |  | 
|   5544  |  | 
|   5545     moves_[block_num]->Add(Move(from, to)); |  | 
|   5546   } |  | 
|   5547  |  | 
|   5548   class Move { |  | 
|   5549    public: |  | 
|   5550     Move(intptr_t from, intptr_t to) : from_(from), to_(to) { } |  | 
|   5551  |  | 
|   5552     intptr_t from() const { return from_; } |  | 
|   5553     intptr_t to() const { return to_; } |  | 
|   5554  |  | 
|   5555    private: |  | 
|   5556     intptr_t from_; |  | 
|   5557     intptr_t to_; |  | 
|   5558   }; |  | 
|   5559  |  | 
|   5560   typedef const ZoneGrowableArray<Move>* MovesList; |  | 
|   5561  |  | 
|   5562   MovesList GetOutgoingMoves(BlockEntryInstr* block) const { |  | 
|   5563     const intptr_t block_num = block->preorder_number(); |  | 
|   5564     return (block_num < moves_.length()) ? |  | 
|   5565         moves_[block_num] : NULL; |  | 
|   5566   } |  | 
|   5567  |  | 
|   5568  private: |  | 
|   5569   GrowableArray<ZoneGrowableArray<Move>* > moves_; |  | 
|   5570 }; |  | 
|   5571  |  | 
|   5572  |  | 
|   5573 // A map from aliases to a set of places sharing the alias. Additionally |  | 
|   5574 // carries a set of places that can be aliased by side-effects, essentially |  | 
|   5575 // those that are affected by calls. |  | 
|   5576 class AliasedSet : public ZoneAllocated { |  | 
|   5577  public: |  | 
|   5578   AliasedSet(Isolate* isolate, |  | 
|   5579              DirectChainedHashMap<PointerKeyValueTrait<Place> >* places_map, |  | 
|   5580              ZoneGrowableArray<Place*>* places, |  | 
|   5581              PhiPlaceMoves* phi_moves) |  | 
|   5582       : isolate_(isolate), |  | 
|   5583         places_map_(places_map), |  | 
|   5584         places_(*places), |  | 
|   5585         phi_moves_(phi_moves), |  | 
|   5586         aliases_(5), |  | 
|   5587         aliases_map_(), |  | 
|   5588         representatives_(), |  | 
|   5589         killed_(), |  | 
|   5590         aliased_by_effects_(new(isolate) BitVector(isolate, places->length())) { |  | 
|   5591     InsertAlias(Place::CreateAnyInstanceAnyIndexAlias(isolate_, |  | 
|   5592         kAnyInstanceAnyIndexAlias)); |  | 
|   5593     for (intptr_t i = 0; i < places_.length(); i++) { |  | 
|   5594       AddRepresentative(places_[i]); |  | 
|   5595     } |  | 
|   5596     ComputeKillSets(); |  | 
|   5597   } |  | 
|   5598  |  | 
|   5599   intptr_t LookupAliasId(const Place& alias) { |  | 
|   5600     const Place* result = aliases_map_.Lookup(&alias); |  | 
|   5601     return (result != NULL) ? result->id() : static_cast<intptr_t>(kNoAlias); |  | 
|   5602   } |  | 
|   5603  |  | 
|   5604   BitVector* GetKilledSet(intptr_t alias) { |  | 
|   5605     return (alias < killed_.length()) ? killed_[alias] : NULL; |  | 
|   5606   } |  | 
|   5607  |  | 
|   5608   intptr_t max_place_id() const { return places().length(); } |  | 
|   5609   bool IsEmpty() const { return max_place_id() == 0; } |  | 
|   5610  |  | 
|   5611   BitVector* aliased_by_effects() const { return aliased_by_effects_; } |  | 
|   5612  |  | 
|   5613   const ZoneGrowableArray<Place*>& places() const { |  | 
|   5614     return places_; |  | 
|   5615   } |  | 
|   5616  |  | 
|   5617   Place* LookupCanonical(Place* place) const { |  | 
|   5618     return places_map_->Lookup(place); |  | 
|   5619   } |  | 
|   5620  |  | 
|   5621   void PrintSet(BitVector* set) { |  | 
|   5622     bool comma = false; |  | 
|   5623     for (BitVector::Iterator it(set); |  | 
|   5624          !it.Done(); |  | 
|   5625          it.Advance()) { |  | 
|   5626       if (comma) { |  | 
|   5627         OS::Print(", "); |  | 
|   5628       } |  | 
|   5629       OS::Print("%s", places_[it.Current()]->ToCString()); |  | 
|   5630       comma = true; |  | 
|   5631     } |  | 
|   5632   } |  | 
|   5633  |  | 
|   5634   const PhiPlaceMoves* phi_moves() const { return phi_moves_; } |  | 
|   5635  |  | 
|   5636   void RollbackAliasedIdentites() { |  | 
|   5637     for (intptr_t i = 0; i < identity_rollback_.length(); ++i) { |  | 
|   5638       identity_rollback_[i]->SetIdentity(AliasIdentity::Unknown()); |  | 
|   5639     } |  | 
|   5640   } |  | 
|   5641  |  | 
|   5642   // Returns false if the result of an allocation instruction can't be aliased |  | 
|   5643   // by another SSA variable and true otherwise. |  | 
|   5644   bool CanBeAliased(Definition* alloc) { |  | 
|   5645     if (!Place::IsAllocation(alloc)) { |  | 
|   5646       return true; |  | 
|   5647     } |  | 
|   5648  |  | 
|   5649     if (alloc->Identity().IsUnknown()) { |  | 
|   5650       ComputeAliasing(alloc); |  | 
|   5651     } |  | 
|   5652  |  | 
|   5653     return !alloc->Identity().IsNotAliased(); |  | 
|   5654   } |  | 
|   5655  |  | 
|   5656   enum { |  | 
|   5657     kNoAlias = 0 |  | 
|   5658   }; |  | 
|   5659  |  | 
|   5660  private: |  | 
|   5661   enum { |  | 
|   5662     // Artificial alias that is used to collect all representatives of the |  | 
|   5663     // *[C], X[C] aliases for arbitrary C. |  | 
|   5664     kAnyConstantIndexedAlias = 1, |  | 
|   5665  |  | 
|   5666     // Artificial alias that is used to collect all representatives of |  | 
|   5667     // *[C] alias for arbitrary C. |  | 
|   5668     kUnknownInstanceConstantIndexedAlias = 2, |  | 
|   5669  |  | 
|   5670     // Artificial alias that is used to collect all representatives of |  | 
|   5671     // X[*] alias for all X. |  | 
|   5672     kAnyAllocationIndexedAlias = 3, |  | 
|   5673  |  | 
|   5674     // *[*] alias. |  | 
|   5675     kAnyInstanceAnyIndexAlias = 4 |  | 
|   5676   }; |  | 
|   5677  |  | 
|   5678   // Compute least generic alias for the place and assign alias id to it. |  | 
|   5679   void AddRepresentative(Place* place) { |  | 
|   5680     if (!place->IsFinalField()) { |  | 
|   5681       const Place* alias = CanonicalizeAlias(place->ToAlias()); |  | 
|   5682       EnsureSet(&representatives_, alias->id())->Add(place->id()); |  | 
|   5683  |  | 
|   5684       // Update cumulative representative sets that are used during |  | 
|   5685       // killed sets computation. |  | 
|   5686       if (alias->kind() == Place::kConstantIndexed) { |  | 
|   5687         if (CanBeAliased(alias->instance())) { |  | 
|   5688           EnsureSet(&representatives_, kAnyConstantIndexedAlias)-> |  | 
|   5689               Add(place->id()); |  | 
|   5690         } |  | 
|   5691  |  | 
|   5692         if (alias->instance() == NULL) { |  | 
|   5693           EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)-> |  | 
|   5694               Add(place->id()); |  | 
|   5695         } |  | 
|   5696       } else if ((alias->kind() == Place::kIndexed) && |  | 
|   5697                  CanBeAliased(place->instance())) { |  | 
|   5698         EnsureSet(&representatives_, kAnyAllocationIndexedAlias)-> |  | 
|   5699             Add(place->id()); |  | 
|   5700       } |  | 
|   5701  |  | 
|   5702       if (!IsIndependentFromEffects(place)) { |  | 
|   5703         aliased_by_effects_->Add(place->id()); |  | 
|   5704       } |  | 
|   5705     } |  | 
|   5706   } |  | 
|   5707  |  | 
|   5708   void ComputeKillSets() { |  | 
|   5709     for (intptr_t i = 0; i < aliases_.length(); ++i) { |  | 
|   5710       const Place* alias = aliases_[i]; |  | 
|   5711       // Add all representatives to the kill set. |  | 
|   5712       AddAllRepresentatives(alias->id(), alias->id()); |  | 
|   5713       ComputeKillSet(alias); |  | 
|   5714     } |  | 
|   5715  |  | 
|   5716     if (FLAG_trace_load_optimization) { |  | 
|   5717       OS::Print("Aliases KILL sets:\n"); |  | 
|   5718       for (intptr_t i = 0; i < aliases_.length(); ++i) { |  | 
|   5719         const Place* alias = aliases_[i]; |  | 
|   5720         BitVector* kill = GetKilledSet(alias->id()); |  | 
|   5721  |  | 
|   5722         OS::Print("%s: ", alias->ToCString()); |  | 
|   5723         if (kill != NULL) { |  | 
|   5724           PrintSet(kill); |  | 
|   5725         } |  | 
|   5726         OS::Print("\n"); |  | 
|   5727       } |  | 
|   5728     } |  | 
|   5729   } |  | 
|   5730  |  | 
|   5731   void InsertAlias(const Place* alias) { |  | 
|   5732     aliases_map_.Insert(alias); |  | 
|   5733     aliases_.Add(alias); |  | 
|   5734   } |  | 
|   5735  |  | 
|   5736   const Place* CanonicalizeAlias(const Place& alias) { |  | 
|   5737     const Place* canonical = aliases_map_.Lookup(&alias); |  | 
|   5738     if (canonical == NULL) { |  | 
|   5739       canonical = Place::Wrap(isolate_, |  | 
|   5740                               alias, |  | 
|   5741                               kAnyInstanceAnyIndexAlias + aliases_.length()); |  | 
|   5742       InsertAlias(canonical); |  | 
|   5743     } |  | 
|   5744     return canonical; |  | 
|   5745   } |  | 
|   5746  |  | 
|   5747   BitVector* GetRepresentativesSet(intptr_t alias) { |  | 
|   5748     return (alias < representatives_.length()) ? representatives_[alias] : NULL; |  | 
|   5749   } |  | 
|   5750  |  | 
|   5751   BitVector* EnsureSet(GrowableArray<BitVector*>* sets, |  | 
|   5752                        intptr_t alias) { |  | 
|   5753     while (sets->length() <= alias) { |  | 
|   5754       sets->Add(NULL); |  | 
|   5755     } |  | 
|   5756  |  | 
|   5757     BitVector* set = (*sets)[alias]; |  | 
|   5758     if (set == NULL) { |  | 
|   5759       (*sets)[alias] = set = new(isolate_) BitVector(isolate_, max_place_id()); |  | 
|   5760     } |  | 
|   5761     return set; |  | 
|   5762   } |  | 
|   5763  |  | 
|   5764   void AddAllRepresentatives(const Place* to, intptr_t from) { |  | 
|   5765     AddAllRepresentatives(to->id(), from); |  | 
|   5766   } |  | 
|   5767  |  | 
|   5768   void AddAllRepresentatives(intptr_t to, intptr_t from) { |  | 
|   5769     BitVector* from_set = GetRepresentativesSet(from); |  | 
|   5770     if (from_set != NULL) { |  | 
|   5771       EnsureSet(&killed_, to)->AddAll(from_set); |  | 
|   5772     } |  | 
|   5773   } |  | 
|   5774  |  | 
|   5775   void CrossAlias(const Place* to, const Place& from) { |  | 
|   5776     const intptr_t from_id = LookupAliasId(from); |  | 
|   5777     if (from_id == kNoAlias) { |  | 
|   5778       return; |  | 
|   5779     } |  | 
|   5780     CrossAlias(to, from_id); |  | 
|   5781   } |  | 
|   5782  |  | 
|   5783   void CrossAlias(const Place* to, intptr_t from) { |  | 
|   5784     AddAllRepresentatives(to->id(), from); |  | 
|   5785     AddAllRepresentatives(from, to->id()); |  | 
|   5786   } |  | 
|   5787  |  | 
|   5788   // When computing kill sets we let less generic alias insert its |  | 
|   5789   // representatives into more generic alias'es kill set. For example |  | 
|   5790   // when visiting alias X[*] instead of searching for all aliases X[C] |  | 
|   5791   // and inserting their representatives into kill set for X[*] we update |  | 
|   5792   // kill set for X[*] each time we visit new X[C] for some C. |  | 
|   5793   // There is an exception however: if both aliases are parametric like *[C] |  | 
|   5794   // and X[*] which cross alias when X is an aliased allocation then we use |  | 
|   5795   // artificial aliases that contain all possible representatives for the given |  | 
|   5796   // alias for any value of the parameter to compute resulting kill set. |  | 
|   5797   void ComputeKillSet(const Place* alias) { |  | 
|   5798     switch (alias->kind()) { |  | 
|   5799       case Place::kIndexed:  // Either *[*] or X[*] alias. |  | 
|   5800         if (alias->instance() == NULL) { |  | 
|   5801           // *[*] aliases with X[*], X[C], *[C]. |  | 
|   5802           AddAllRepresentatives(alias, kAnyConstantIndexedAlias); |  | 
|   5803           AddAllRepresentatives(alias, kAnyAllocationIndexedAlias); |  | 
|   5804         } else if (CanBeAliased(alias->instance())) { |  | 
|   5805           // X[*] aliases with X[C]. |  | 
|   5806           // If X can be aliased then X[*] also aliases with *[C], *[*]. |  | 
|   5807           CrossAlias(alias, kAnyInstanceAnyIndexAlias); |  | 
|   5808           AddAllRepresentatives(alias, kUnknownInstanceConstantIndexedAlias); |  | 
|   5809         } |  | 
|   5810         break; |  | 
|   5811  |  | 
|   5812       case Place::kConstantIndexed:  // Either X[C] or *[C] alias. |  | 
|   5813         if (alias->instance() == NULL) { |  | 
|   5814           // *[C] aliases with X[C], X[*], *[*]. |  | 
|   5815           AddAllRepresentatives(alias, kAnyAllocationIndexedAlias); |  | 
|   5816           CrossAlias(alias, kAnyInstanceAnyIndexAlias); |  | 
|   5817         } else { |  | 
|   5818           // X[C] aliases with X[*]. |  | 
|   5819           // If X can be aliased then X[C] also aliases with *[C], *[*]. |  | 
|   5820           CrossAlias(alias, alias->CopyWithoutIndex()); |  | 
|   5821           if (CanBeAliased(alias->instance())) { |  | 
|   5822             CrossAlias(alias, alias->CopyWithoutInstance()); |  | 
|   5823             CrossAlias(alias, kAnyInstanceAnyIndexAlias); |  | 
|   5824           } |  | 
|   5825         } |  | 
|   5826         break; |  | 
|   5827  |  | 
|   5828       case Place::kField: |  | 
|   5829       case Place::kVMField: |  | 
|   5830         if (CanBeAliased(alias->instance())) { |  | 
|   5831           // X.f or X.@offs alias with *.f and *.@offs respectively. |  | 
|   5832           CrossAlias(alias, alias->CopyWithoutInstance()); |  | 
|   5833         } |  | 
|   5834         break; |  | 
|   5835  |  | 
|   5836       case Place::kNone: |  | 
|   5837         UNREACHABLE(); |  | 
|   5838     } |  | 
|   5839   } |  | 
|   5840  |  | 
|   5841   // Returns true if the given load is unaffected by external side-effects. |  | 
|   5842   // This essentially means that no stores to the same location can |  | 
|   5843   // occur in other functions. |  | 
|   5844   bool IsIndependentFromEffects(Place* place) { |  | 
|   5845     if (place->IsFinalField()) { |  | 
|   5846       // Note that we can't use LoadField's is_immutable attribute here because |  | 
|   5847       // some VM-fields (those that have no corresponding Field object and |  | 
|   5848       // accessed through offset alone) can share offset but have different |  | 
|   5849       // immutability properties. |  | 
|   5850       // One example is the length property of growable and fixed size list. If |  | 
|   5851       // loads of these two properties occur in the same function for the same |  | 
|   5852       // receiver then they will get the same expression number. However |  | 
|   5853       // immutability of the length of fixed size list does not mean that |  | 
|   5854       // growable list also has immutable property. Thus we will make a |  | 
|   5855       // conservative assumption for the VM-properties. |  | 
|   5856       // TODO(vegorov): disambiguate immutable and non-immutable VM-fields with |  | 
|   5857       // the same offset e.g. through recognized kind. |  | 
|   5858       return true; |  | 
|   5859     } |  | 
|   5860  |  | 
|   5861     return ((place->kind() == Place::kField) || |  | 
|   5862          (place->kind() == Place::kVMField)) && |  | 
|   5863         !CanBeAliased(place->instance()); |  | 
|   5864   } |  | 
|   5865  |  | 
|   5866   // Returns true if there are direct loads from the given place. |  | 
|   5867   bool HasLoadsFromPlace(Definition* defn, const Place* place) { |  | 
|   5868     ASSERT((place->kind() == Place::kField) || |  | 
|   5869            (place->kind() == Place::kVMField)); |  | 
|   5870  |  | 
|   5871     for (Value* use = defn->input_use_list(); |  | 
|   5872          use != NULL; |  | 
|   5873          use = use->next_use()) { |  | 
|   5874       Instruction* instr = use->instruction(); |  | 
|   5875       if ((instr->IsRedefinition() || |  | 
|   5876            instr->IsAssertAssignable()) && |  | 
|   5877           HasLoadsFromPlace(instr->AsDefinition(), place)) { |  | 
|   5878         return true; |  | 
|   5879       } |  | 
|   5880       bool is_load = false, is_store; |  | 
|   5881       Place load_place(instr, &is_load, &is_store); |  | 
|   5882  |  | 
|   5883       if (is_load && load_place.Equals(place)) { |  | 
|   5884         return true; |  | 
|   5885       } |  | 
|   5886     } |  | 
|   5887  |  | 
|   5888     return false; |  | 
|   5889   } |  | 
|   5890  |  | 
|   5891   // Check if any use of the definition can create an alias. |  | 
|   5892   // Can add more objects into aliasing_worklist_. |  | 
|   5893   bool AnyUseCreatesAlias(Definition* defn) { |  | 
|   5894     for (Value* use = defn->input_use_list(); |  | 
|   5895          use != NULL; |  | 
|   5896          use = use->next_use()) { |  | 
|   5897       Instruction* instr = use->instruction(); |  | 
|   5898       if (instr->IsPushArgument() || |  | 
|   5899           (instr->IsStoreIndexed() |  | 
|   5900            && (use->use_index() == StoreIndexedInstr::kValuePos)) || |  | 
|   5901           instr->IsStoreStaticField() || |  | 
|   5902           instr->IsPhi()) { |  | 
|   5903         return true; |  | 
|   5904       } else if ((instr->IsAssertAssignable() || instr->IsRedefinition()) && |  | 
|   5905                  AnyUseCreatesAlias(instr->AsDefinition())) { |  | 
|   5906         return true; |  | 
|   5907       } else if ((instr->IsStoreInstanceField() |  | 
|   5908            && (use->use_index() != StoreInstanceFieldInstr::kInstancePos))) { |  | 
|   5909         ASSERT(use->use_index() == StoreInstanceFieldInstr::kValuePos); |  | 
|   5910         // If we store this value into an object that is not aliased itself |  | 
|   5911         // and we never load again then the store does not create an alias. |  | 
|   5912         StoreInstanceFieldInstr* store = instr->AsStoreInstanceField(); |  | 
|   5913         Definition* instance = |  | 
|   5914             store->instance()->definition()->OriginalDefinition(); |  | 
|   5915         if (Place::IsAllocation(instance) && |  | 
|   5916             !instance->Identity().IsAliased()) { |  | 
|   5917           bool is_load, is_store; |  | 
|   5918           Place store_place(instr, &is_load, &is_store); |  | 
|   5919  |  | 
|   5920           if (!HasLoadsFromPlace(instance, &store_place)) { |  | 
|   5921             // No loads found that match this store. If it is yet unknown if |  | 
|   5922             // the object is not aliased then optimistically assume this but |  | 
|   5923             // add it to the worklist to check its uses transitively. |  | 
|   5924             if (instance->Identity().IsUnknown()) { |  | 
|   5925               instance->SetIdentity(AliasIdentity::NotAliased()); |  | 
|   5926               aliasing_worklist_.Add(instance); |  | 
|   5927             } |  | 
|   5928             continue; |  | 
|   5929           } |  | 
|   5930         } |  | 
|   5931         return true; |  | 
|   5932       } |  | 
|   5933     } |  | 
|   5934     return false; |  | 
|   5935   } |  | 
|   5936  |  | 
|   5937   // Mark any value stored into the given object as potentially aliased. |  | 
|   5938   void MarkStoredValuesEscaping(Definition* defn) { |  | 
|   5939     // Find all stores into this object. |  | 
|   5940     for (Value* use = defn->input_use_list(); |  | 
|   5941          use != NULL; |  | 
|   5942          use = use->next_use()) { |  | 
|   5943       if (use->instruction()->IsRedefinition() || |  | 
|   5944           use->instruction()->IsAssertAssignable()) { |  | 
|   5945         MarkStoredValuesEscaping(use->instruction()->AsDefinition()); |  | 
|   5946         continue; |  | 
|   5947       } |  | 
|   5948       if ((use->use_index() == StoreInstanceFieldInstr::kInstancePos) && |  | 
|   5949           use->instruction()->IsStoreInstanceField()) { |  | 
|   5950         StoreInstanceFieldInstr* store = |  | 
|   5951             use->instruction()->AsStoreInstanceField(); |  | 
|   5952         Definition* value = store->value()->definition()->OriginalDefinition(); |  | 
|   5953         if (value->Identity().IsNotAliased()) { |  | 
|   5954           value->SetIdentity(AliasIdentity::Aliased()); |  | 
|   5955           identity_rollback_.Add(value); |  | 
|   5956  |  | 
|   5957           // Add to worklist to propagate the mark transitively. |  | 
|   5958           aliasing_worklist_.Add(value); |  | 
|   5959         } |  | 
|   5960       } |  | 
|   5961     } |  | 
|   5962   } |  | 
|   5963  |  | 
|   5964   // Determine if the given definition can't be aliased. |  | 
|   5965   void ComputeAliasing(Definition* alloc) { |  | 
|   5966     ASSERT(Place::IsAllocation(alloc)); |  | 
|   5967     ASSERT(alloc->Identity().IsUnknown()); |  | 
|   5968     ASSERT(aliasing_worklist_.is_empty()); |  | 
|   5969  |  | 
|   5970     alloc->SetIdentity(AliasIdentity::NotAliased()); |  | 
|   5971     aliasing_worklist_.Add(alloc); |  | 
|   5972  |  | 
|   5973     while (!aliasing_worklist_.is_empty()) { |  | 
|   5974       Definition* defn = aliasing_worklist_.RemoveLast(); |  | 
|   5975       ASSERT(Place::IsAllocation(defn)); |  | 
|   5976       // If the definition in the worklist was optimistically marked as |  | 
|   5977       // not-aliased check that optimistic assumption still holds: check if |  | 
|   5978       // any of its uses can create an alias. |  | 
|   5979       if (!defn->Identity().IsAliased() && AnyUseCreatesAlias(defn)) { |  | 
|   5980         defn->SetIdentity(AliasIdentity::Aliased()); |  | 
|   5981         identity_rollback_.Add(defn); |  | 
|   5982       } |  | 
|   5983  |  | 
|   5984       // If the allocation site is marked as aliased conservatively mark |  | 
|   5985       // any values stored into the object aliased too. |  | 
|   5986       if (defn->Identity().IsAliased()) { |  | 
|   5987         MarkStoredValuesEscaping(defn); |  | 
|   5988       } |  | 
|   5989     } |  | 
|   5990   } |  | 
|   5991  |  | 
|   5992   Isolate* isolate_; |  | 
|   5993  |  | 
|   5994   DirectChainedHashMap<PointerKeyValueTrait<Place> >* places_map_; |  | 
|   5995  |  | 
|   5996   const ZoneGrowableArray<Place*>& places_; |  | 
|   5997  |  | 
|   5998   const PhiPlaceMoves* phi_moves_; |  | 
|   5999  |  | 
|   6000   // A list of all seen aliases and a map that allows looking up canonical |  | 
|   6001   // alias object. |  | 
|   6002   GrowableArray<const Place*> aliases_; |  | 
|   6003   DirectChainedHashMap<PointerKeyValueTrait<const Place> > aliases_map_; |  | 
|   6004  |  | 
|   6005   // Maps alias id to set of ids of places representing the alias. |  | 
|   6006   // Place represents an alias if this alias is least generic alias for |  | 
|   6007   // the place. |  | 
|   6008   // (see ToAlias for the definition of least generic alias). |  | 
|   6009   GrowableArray<BitVector*> representatives_; |  | 
|   6010  |  | 
|   6011   // Maps alias id to set of ids of places aliased. |  | 
|   6012   GrowableArray<BitVector*> killed_; |  | 
|   6013  |  | 
|   6014   // Set of ids of places that can be affected by side-effects other than |  | 
|   6015   // explicit stores (i.e. through calls). |  | 
|   6016   BitVector* aliased_by_effects_; |  | 
|   6017  |  | 
|   6018   // Worklist used during alias analysis. |  | 
|   6019   GrowableArray<Definition*> aliasing_worklist_; |  | 
|   6020  |  | 
|   6021   // List of definitions that had their identity set to Aliased. At the end |  | 
|   6022   // of load optimization their identity will be rolled back to Unknown to |  | 
|   6023   // avoid treating them as Aliased at later stages without checking first |  | 
|   6024   // as optimizations can potentially eliminate instructions leading to |  | 
|   6025   // aliasing. |  | 
|   6026   GrowableArray<Definition*> identity_rollback_; |  | 
|   6027 }; |  | 
|   6028  |  | 
|   6029  |  | 
|   6030 static Definition* GetStoredValue(Instruction* instr) { |  | 
|   6031   if (instr->IsStoreIndexed()) { |  | 
|   6032     return instr->AsStoreIndexed()->value()->definition(); |  | 
|   6033   } |  | 
|   6034  |  | 
|   6035   StoreInstanceFieldInstr* store_instance_field = instr->AsStoreInstanceField(); |  | 
|   6036   if (store_instance_field != NULL) { |  | 
|   6037     return store_instance_field->value()->definition(); |  | 
|   6038   } |  | 
|   6039  |  | 
|   6040   StoreStaticFieldInstr* store_static_field = instr->AsStoreStaticField(); |  | 
|   6041   if (store_static_field != NULL) { |  | 
|   6042     return store_static_field->value()->definition(); |  | 
|   6043   } |  | 
|   6044  |  | 
|   6045   UNREACHABLE();  // Should only be called for supported store instructions. |  | 
|   6046   return NULL; |  | 
|   6047 } |  | 
|   6048  |  | 
|   6049  |  | 
|   6050 static bool IsPhiDependentPlace(Place* place) { |  | 
|   6051   return ((place->kind() == Place::kField) || |  | 
|   6052           (place->kind() == Place::kVMField)) && |  | 
|   6053         (place->instance() != NULL) && |  | 
|   6054         place->instance()->IsPhi(); |  | 
|   6055 } |  | 
|   6056  |  | 
|   6057  |  | 
|   6058 // For each place that depends on a phi ensure that equivalent places |  | 
|   6059 // corresponding to phi input are numbered and record outgoing phi moves |  | 
|   6060 // for each block which establish correspondence between phi dependent place |  | 
|   6061 // and phi input's place that is flowing in. |  | 
|   6062 static PhiPlaceMoves* ComputePhiMoves( |  | 
|   6063     DirectChainedHashMap<PointerKeyValueTrait<Place> >* map, |  | 
|   6064     ZoneGrowableArray<Place*>* places) { |  | 
|   6065   Isolate* isolate = Isolate::Current(); |  | 
|   6066   PhiPlaceMoves* phi_moves = new(isolate) PhiPlaceMoves(); |  | 
|   6067  |  | 
|   6068   for (intptr_t i = 0; i < places->length(); i++) { |  | 
|   6069     Place* place = (*places)[i]; |  | 
|   6070  |  | 
|   6071     if (IsPhiDependentPlace(place)) { |  | 
|   6072       PhiInstr* phi = place->instance()->AsPhi(); |  | 
|   6073       BlockEntryInstr* block = phi->GetBlock(); |  | 
|   6074  |  | 
|   6075       if (FLAG_trace_optimization) { |  | 
|   6076         OS::Print("phi dependent place %s\n", place->ToCString()); |  | 
|   6077       } |  | 
|   6078  |  | 
|   6079       Place input_place(*place); |  | 
|   6080       for (intptr_t j = 0; j < phi->InputCount(); j++) { |  | 
|   6081         input_place.set_instance(phi->InputAt(j)->definition()); |  | 
|   6082  |  | 
|   6083         Place* result = map->Lookup(&input_place); |  | 
|   6084         if (result == NULL) { |  | 
|   6085           result = Place::Wrap(isolate, input_place, places->length()); |  | 
|   6086           map->Insert(result); |  | 
|   6087           places->Add(result); |  | 
|   6088           if (FLAG_trace_optimization) { |  | 
|   6089             OS::Print("  adding place %s as %" Pd "\n", |  | 
|   6090                       result->ToCString(), |  | 
|   6091                       result->id()); |  | 
|   6092           } |  | 
|   6093         } |  | 
|   6094         phi_moves->CreateOutgoingMove(isolate, |  | 
|   6095                                       block->PredecessorAt(j), |  | 
|   6096                                       result->id(), |  | 
|   6097                                       place->id()); |  | 
|   6098       } |  | 
|   6099     } |  | 
|   6100   } |  | 
|   6101  |  | 
|   6102   return phi_moves; |  | 
|   6103 } |  | 
|   6104  |  | 
|   6105  |  | 
|   6106 enum CSEMode { |  | 
|   6107   kOptimizeLoads, |  | 
|   6108   kOptimizeStores |  | 
|   6109 }; |  | 
|   6110  |  | 
|   6111  |  | 
|   6112 static AliasedSet* NumberPlaces( |  | 
|   6113     FlowGraph* graph, |  | 
|   6114     DirectChainedHashMap<PointerKeyValueTrait<Place> >* map, |  | 
|   6115     CSEMode mode) { |  | 
|   6116   // Loads representing different expression ids will be collected and |  | 
|   6117   // used to build per offset kill sets. |  | 
|   6118   Isolate* isolate = graph->isolate(); |  | 
|   6119   ZoneGrowableArray<Place*>* places = |  | 
|   6120       new(isolate) ZoneGrowableArray<Place*>(10); |  | 
|   6121  |  | 
|   6122   bool has_loads = false; |  | 
|   6123   bool has_stores = false; |  | 
|   6124   for (BlockIterator it = graph->reverse_postorder_iterator(); |  | 
|   6125        !it.Done(); |  | 
|   6126        it.Advance()) { |  | 
|   6127     BlockEntryInstr* block = it.Current(); |  | 
|   6128  |  | 
|   6129     for (ForwardInstructionIterator instr_it(block); |  | 
|   6130          !instr_it.Done(); |  | 
|   6131          instr_it.Advance()) { |  | 
|   6132       Instruction* instr = instr_it.Current(); |  | 
|   6133       Place place(instr, &has_loads, &has_stores); |  | 
|   6134       if (place.kind() == Place::kNone) { |  | 
|   6135         continue; |  | 
|   6136       } |  | 
|   6137  |  | 
|   6138       Place* result = map->Lookup(&place); |  | 
|   6139       if (result == NULL) { |  | 
|   6140         result = Place::Wrap(isolate, place, places->length()); |  | 
|   6141         map->Insert(result); |  | 
|   6142         places->Add(result); |  | 
|   6143  |  | 
|   6144         if (FLAG_trace_optimization) { |  | 
|   6145           OS::Print("numbering %s as %" Pd "\n", |  | 
|   6146                     result->ToCString(), |  | 
|   6147                     result->id()); |  | 
|   6148         } |  | 
|   6149       } |  | 
|   6150  |  | 
|   6151       instr->set_place_id(result->id()); |  | 
|   6152     } |  | 
|   6153   } |  | 
|   6154  |  | 
|   6155   if ((mode == kOptimizeLoads) && !has_loads) { |  | 
|   6156     return NULL; |  | 
|   6157   } |  | 
|   6158   if ((mode == kOptimizeStores) && !has_stores) { |  | 
|   6159     return NULL; |  | 
|   6160   } |  | 
|   6161  |  | 
|   6162   PhiPlaceMoves* phi_moves = ComputePhiMoves(map, places); |  | 
|   6163  |  | 
|   6164   // Build aliasing sets mapping aliases to loads. |  | 
|   6165   return new(isolate) AliasedSet(isolate, map, places, phi_moves); |  | 
|   6166 } |  | 
|   6167  |  | 
|   6168  |  | 
|   6169 class LoadOptimizer : public ValueObject { |  | 
|   6170  public: |  | 
|   6171   LoadOptimizer(FlowGraph* graph, AliasedSet* aliased_set) |  | 
|   6172       : graph_(graph), |  | 
|   6173         aliased_set_(aliased_set), |  | 
|   6174         in_(graph_->preorder().length()), |  | 
|   6175         out_(graph_->preorder().length()), |  | 
|   6176         gen_(graph_->preorder().length()), |  | 
|   6177         kill_(graph_->preorder().length()), |  | 
|   6178         exposed_values_(graph_->preorder().length()), |  | 
|   6179         out_values_(graph_->preorder().length()), |  | 
|   6180         phis_(5), |  | 
|   6181         worklist_(5), |  | 
|   6182         congruency_worklist_(6), |  | 
|   6183         in_worklist_(NULL), |  | 
|   6184         forwarded_(false) { |  | 
|   6185     const intptr_t num_blocks = graph_->preorder().length(); |  | 
|   6186     for (intptr_t i = 0; i < num_blocks; i++) { |  | 
|   6187       out_.Add(NULL); |  | 
|   6188       gen_.Add(new(I) BitVector(I, aliased_set_->max_place_id())); |  | 
|   6189       kill_.Add(new(I) BitVector(I, aliased_set_->max_place_id())); |  | 
|   6190       in_.Add(new(I) BitVector(I, aliased_set_->max_place_id())); |  | 
|   6191  |  | 
|   6192       exposed_values_.Add(NULL); |  | 
|   6193       out_values_.Add(NULL); |  | 
|   6194     } |  | 
|   6195   } |  | 
|   6196  |  | 
|   6197   ~LoadOptimizer() { |  | 
|   6198     aliased_set_->RollbackAliasedIdentites(); |  | 
|   6199   } |  | 
|   6200  |  | 
|   6201   Isolate* isolate() const { return graph_->isolate(); } |  | 
|   6202  |  | 
|   6203   static bool OptimizeGraph(FlowGraph* graph) { |  | 
|   6204     ASSERT(FLAG_load_cse); |  | 
|   6205     if (FLAG_trace_load_optimization) { |  | 
|   6206       FlowGraphPrinter::PrintGraph("Before LoadOptimizer", graph); |  | 
|   6207     } |  | 
|   6208  |  | 
|   6209     DirectChainedHashMap<PointerKeyValueTrait<Place> > map; |  | 
|   6210     AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeLoads); |  | 
|   6211     if ((aliased_set != NULL) && !aliased_set->IsEmpty()) { |  | 
|   6212       // If any loads were forwarded return true from Optimize to run load |  | 
|   6213       // forwarding again. This will allow to forward chains of loads. |  | 
|   6214       // This is especially important for context variables as they are built |  | 
|   6215       // as loads from loaded context. |  | 
|   6216       // TODO(vegorov): renumber newly discovered congruences during the |  | 
|   6217       // forwarding to forward chains without running whole pass twice. |  | 
|   6218       LoadOptimizer load_optimizer(graph, aliased_set); |  | 
|   6219       return load_optimizer.Optimize(); |  | 
|   6220     } |  | 
|   6221     return false; |  | 
|   6222   } |  | 
|   6223  |  | 
|   6224  private: |  | 
|   6225   bool Optimize() { |  | 
|   6226     ComputeInitialSets(); |  | 
|   6227     ComputeOutSets(); |  | 
|   6228     ComputeOutValues(); |  | 
|   6229     if (graph_->is_licm_allowed()) { |  | 
|   6230       MarkLoopInvariantLoads(); |  | 
|   6231     } |  | 
|   6232     ForwardLoads(); |  | 
|   6233     EmitPhis(); |  | 
|   6234  |  | 
|   6235     if (FLAG_trace_load_optimization) { |  | 
|   6236       FlowGraphPrinter::PrintGraph("After LoadOptimizer", graph_); |  | 
|   6237     } |  | 
|   6238  |  | 
|   6239     return forwarded_; |  | 
|   6240   } |  | 
|   6241  |  | 
|   6242   // Compute sets of loads generated and killed by each block. |  | 
|   6243   // Additionally compute upwards exposed and generated loads for each block. |  | 
|   6244   // Exposed loads are those that can be replaced if a corresponding |  | 
|   6245   // reaching load will be found. |  | 
|   6246   // Loads that are locally redundant will be replaced as we go through |  | 
|   6247   // instructions. |  | 
|   6248   void ComputeInitialSets() { |  | 
|   6249     for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |  | 
|   6250          !block_it.Done(); |  | 
|   6251          block_it.Advance()) { |  | 
|   6252       BlockEntryInstr* block = block_it.Current(); |  | 
|   6253       const intptr_t preorder_number = block->preorder_number(); |  | 
|   6254  |  | 
|   6255       BitVector* kill = kill_[preorder_number]; |  | 
|   6256       BitVector* gen = gen_[preorder_number]; |  | 
|   6257  |  | 
|   6258       ZoneGrowableArray<Definition*>* exposed_values = NULL; |  | 
|   6259       ZoneGrowableArray<Definition*>* out_values = NULL; |  | 
|   6260  |  | 
|   6261       for (ForwardInstructionIterator instr_it(block); |  | 
|   6262            !instr_it.Done(); |  | 
|   6263            instr_it.Advance()) { |  | 
|   6264         Instruction* instr = instr_it.Current(); |  | 
|   6265  |  | 
|   6266         bool is_load = false, is_store = false; |  | 
|   6267         Place place(instr, &is_load, &is_store); |  | 
|   6268  |  | 
|   6269         BitVector* killed = NULL; |  | 
|   6270         if (is_store) { |  | 
|   6271           const intptr_t alias_id = |  | 
|   6272               aliased_set_->LookupAliasId(place.ToAlias()); |  | 
|   6273           if (alias_id != AliasedSet::kNoAlias) { |  | 
|   6274             killed = aliased_set_->GetKilledSet(alias_id); |  | 
|   6275           } else if (!place.IsFinalField()) { |  | 
|   6276             // We encountered unknown alias: this means intrablock load |  | 
|   6277             // forwarding refined parameter of this store, for example |  | 
|   6278             // |  | 
|   6279             //     o   <- alloc() |  | 
|   6280             //     a.f <- o |  | 
|   6281             //     u   <- a.f |  | 
|   6282             //     u.x <- null ;; this store alias is *.x |  | 
|   6283             // |  | 
|   6284             // after intrablock load forwarding |  | 
|   6285             // |  | 
|   6286             //     o   <- alloc() |  | 
|   6287             //     a.f <- o |  | 
|   6288             //     o.x <- null ;; this store alias is o.x |  | 
|   6289             // |  | 
|   6290             // In this case we fallback to using place id recorded in the |  | 
|   6291             // instruction that still points to the old place with a more |  | 
|   6292             // generic alias. |  | 
|   6293             const intptr_t old_alias_id = aliased_set_->LookupAliasId( |  | 
|   6294                 aliased_set_->places()[instr->place_id()]->ToAlias()); |  | 
|   6295             killed = aliased_set_->GetKilledSet(old_alias_id); |  | 
|   6296           } |  | 
|   6297  |  | 
|   6298           if (killed != NULL) { |  | 
|   6299             kill->AddAll(killed); |  | 
|   6300             // There is no need to clear out_values when clearing GEN set |  | 
|   6301             // because only those values that are in the GEN set |  | 
|   6302             // will ever be used. |  | 
|   6303             gen->RemoveAll(killed); |  | 
|   6304           } |  | 
|   6305  |  | 
|   6306           // Only forward stores to normal arrays, float64, and simd arrays |  | 
|   6307           // to loads because other array stores (intXX/uintXX/float32) |  | 
|   6308           // may implicitly convert the value stored. |  | 
|   6309           StoreIndexedInstr* array_store = instr->AsStoreIndexed(); |  | 
|   6310           if ((array_store == NULL) || |  | 
|   6311               (array_store->class_id() == kArrayCid) || |  | 
|   6312               (array_store->class_id() == kTypedDataFloat64ArrayCid) || |  | 
|   6313               (array_store->class_id() == kTypedDataFloat32ArrayCid) || |  | 
|   6314               (array_store->class_id() == kTypedDataFloat32x4ArrayCid)) { |  | 
|   6315             Place* canonical_place = aliased_set_->LookupCanonical(&place); |  | 
|   6316             if (canonical_place != NULL) { |  | 
|   6317               // Store has a corresponding numbered place that might have a |  | 
|   6318               // load. Try forwarding stored value to it. |  | 
|   6319               gen->Add(canonical_place->id()); |  | 
|   6320               if (out_values == NULL) out_values = CreateBlockOutValues(); |  | 
|   6321               (*out_values)[canonical_place->id()] = GetStoredValue(instr); |  | 
|   6322             } |  | 
|   6323           } |  | 
|   6324  |  | 
|   6325           ASSERT(!instr->IsDefinition() || |  | 
|   6326                  !IsLoadEliminationCandidate(instr->AsDefinition())); |  | 
|   6327           continue; |  | 
|   6328         } else if (is_load) { |  | 
|   6329           // Check if this load needs renumbering because of the intrablock |  | 
|   6330           // load forwarding. |  | 
|   6331           const Place* canonical = aliased_set_->LookupCanonical(&place); |  | 
|   6332           if ((canonical != NULL) && |  | 
|   6333             (canonical->id() != instr->AsDefinition()->place_id())) { |  | 
|   6334             instr->AsDefinition()->set_place_id(canonical->id()); |  | 
|   6335           } |  | 
|   6336         } |  | 
|   6337  |  | 
|   6338         // If instruction has effects then kill all loads affected. |  | 
|   6339         if (!instr->Effects().IsNone()) { |  | 
|   6340           kill->AddAll(aliased_set_->aliased_by_effects()); |  | 
|   6341           // There is no need to clear out_values when removing values from GEN |  | 
|   6342           // set because only those values that are in the GEN set |  | 
|   6343           // will ever be used. |  | 
|   6344           gen->RemoveAll(aliased_set_->aliased_by_effects()); |  | 
|   6345           continue; |  | 
|   6346         } |  | 
|   6347  |  | 
|   6348         Definition* defn = instr->AsDefinition(); |  | 
|   6349         if (defn == NULL) { |  | 
|   6350           continue; |  | 
|   6351         } |  | 
|   6352  |  | 
|   6353         // For object allocation forward initial values of the fields to |  | 
|   6354         // subsequent loads. For skip final fields.  Final fields are |  | 
|   6355         // initialized in constructor that potentially can be not inlined into |  | 
|   6356         // the function that we are currently optimizing. However at the same |  | 
|   6357         // time we assume that values of the final fields can be forwarded |  | 
|   6358         // across side-effects. If we add 'null' as known values for these |  | 
|   6359         // fields here we will incorrectly propagate this null across |  | 
|   6360         // constructor invocation. |  | 
|   6361         AllocateObjectInstr* alloc = instr->AsAllocateObject(); |  | 
|   6362         if ((alloc != NULL)) { |  | 
|   6363           for (Value* use = alloc->input_use_list(); |  | 
|   6364                use != NULL; |  | 
|   6365                use = use->next_use()) { |  | 
|   6366             // Look for all immediate loads from this object. |  | 
|   6367             if (use->use_index() != 0) { |  | 
|   6368               continue; |  | 
|   6369             } |  | 
|   6370  |  | 
|   6371             LoadFieldInstr* load = use->instruction()->AsLoadField(); |  | 
|   6372             if (load != NULL) { |  | 
|   6373               // Found a load. Initialize current value of the field to null for |  | 
|   6374               // normal fields, or with type arguments. |  | 
|   6375  |  | 
|   6376               // Forward for all fields for non-escaping objects and only |  | 
|   6377               // non-final fields and type arguments for escaping ones. |  | 
|   6378               if (aliased_set_->CanBeAliased(alloc) && |  | 
|   6379                   (load->field() != NULL) && |  | 
|   6380                   load->field()->is_final()) { |  | 
|   6381                 continue; |  | 
|   6382               } |  | 
|   6383  |  | 
|   6384               Definition* forward_def = graph_->constant_null(); |  | 
|   6385               if (alloc->ArgumentCount() > 0) { |  | 
|   6386                 ASSERT(alloc->ArgumentCount() == 1); |  | 
|   6387                 intptr_t type_args_offset = |  | 
|   6388                     alloc->cls().type_arguments_field_offset(); |  | 
|   6389                 if (load->offset_in_bytes() == type_args_offset) { |  | 
|   6390                   forward_def = alloc->PushArgumentAt(0)->value()->definition(); |  | 
|   6391                 } |  | 
|   6392               } |  | 
|   6393               gen->Add(load->place_id()); |  | 
|   6394               if (out_values == NULL) out_values = CreateBlockOutValues(); |  | 
|   6395               (*out_values)[load->place_id()] = forward_def; |  | 
|   6396             } |  | 
|   6397           } |  | 
|   6398           continue; |  | 
|   6399         } |  | 
|   6400  |  | 
|   6401         if (!IsLoadEliminationCandidate(defn)) { |  | 
|   6402           continue; |  | 
|   6403         } |  | 
|   6404  |  | 
|   6405         const intptr_t place_id = defn->place_id(); |  | 
|   6406         if (gen->Contains(place_id)) { |  | 
|   6407           // This is a locally redundant load. |  | 
|   6408           ASSERT((out_values != NULL) && ((*out_values)[place_id] != NULL)); |  | 
|   6409  |  | 
|   6410           Definition* replacement = (*out_values)[place_id]; |  | 
|   6411           EnsureSSATempIndex(graph_, defn, replacement); |  | 
|   6412           if (FLAG_trace_optimization) { |  | 
|   6413             OS::Print("Replacing load v%" Pd " with v%" Pd "\n", |  | 
|   6414                       defn->ssa_temp_index(), |  | 
|   6415                       replacement->ssa_temp_index()); |  | 
|   6416           } |  | 
|   6417  |  | 
|   6418           defn->ReplaceUsesWith(replacement); |  | 
|   6419           instr_it.RemoveCurrentFromGraph(); |  | 
|   6420           forwarded_ = true; |  | 
|   6421           continue; |  | 
|   6422         } else if (!kill->Contains(place_id)) { |  | 
|   6423           // This is an exposed load: it is the first representative of a |  | 
|   6424           // given expression id and it is not killed on the path from |  | 
|   6425           // the block entry. |  | 
|   6426           if (exposed_values == NULL) { |  | 
|   6427             static const intptr_t kMaxExposedValuesInitialSize = 5; |  | 
|   6428             exposed_values = new(I) ZoneGrowableArray<Definition*>( |  | 
|   6429                 Utils::Minimum(kMaxExposedValuesInitialSize, |  | 
|   6430                                aliased_set_->max_place_id())); |  | 
|   6431           } |  | 
|   6432  |  | 
|   6433           exposed_values->Add(defn); |  | 
|   6434         } |  | 
|   6435  |  | 
|   6436         gen->Add(place_id); |  | 
|   6437  |  | 
|   6438         if (out_values == NULL) out_values = CreateBlockOutValues(); |  | 
|   6439         (*out_values)[place_id] = defn; |  | 
|   6440       } |  | 
|   6441  |  | 
|   6442       exposed_values_[preorder_number] = exposed_values; |  | 
|   6443       out_values_[preorder_number] = out_values; |  | 
|   6444     } |  | 
|   6445   } |  | 
|   6446  |  | 
|   6447   static void PerformPhiMoves(PhiPlaceMoves::MovesList phi_moves, |  | 
|   6448                               BitVector* out, |  | 
|   6449                               BitVector* forwarded_loads) { |  | 
|   6450     forwarded_loads->Clear(); |  | 
|   6451  |  | 
|   6452     for (intptr_t i = 0; i < phi_moves->length(); i++) { |  | 
|   6453       const intptr_t from = (*phi_moves)[i].from(); |  | 
|   6454       const intptr_t to = (*phi_moves)[i].to(); |  | 
|   6455       if (from == to) continue; |  | 
|   6456  |  | 
|   6457       if (out->Contains(from)) { |  | 
|   6458         forwarded_loads->Add(to); |  | 
|   6459       } |  | 
|   6460     } |  | 
|   6461  |  | 
|   6462     for (intptr_t i = 0; i < phi_moves->length(); i++) { |  | 
|   6463       const intptr_t from = (*phi_moves)[i].from(); |  | 
|   6464       const intptr_t to = (*phi_moves)[i].to(); |  | 
|   6465       if (from == to) continue; |  | 
|   6466  |  | 
|   6467       out->Remove(to); |  | 
|   6468     } |  | 
|   6469  |  | 
|   6470     out->AddAll(forwarded_loads); |  | 
|   6471   } |  | 
|   6472  |  | 
|   6473   // Compute OUT sets by propagating them iteratively until fix point |  | 
|   6474   // is reached. |  | 
|   6475   void ComputeOutSets() { |  | 
|   6476     BitVector* temp = new(I) BitVector(I, aliased_set_->max_place_id()); |  | 
|   6477     BitVector* forwarded_loads = |  | 
|   6478         new(I) BitVector(I, aliased_set_->max_place_id()); |  | 
|   6479     BitVector* temp_out = new(I) BitVector(I, aliased_set_->max_place_id()); |  | 
|   6480  |  | 
|   6481     bool changed = true; |  | 
|   6482     while (changed) { |  | 
|   6483       changed = false; |  | 
|   6484  |  | 
|   6485       for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |  | 
|   6486            !block_it.Done(); |  | 
|   6487            block_it.Advance()) { |  | 
|   6488         BlockEntryInstr* block = block_it.Current(); |  | 
|   6489  |  | 
|   6490         const intptr_t preorder_number = block->preorder_number(); |  | 
|   6491  |  | 
|   6492         BitVector* block_in = in_[preorder_number]; |  | 
|   6493         BitVector* block_out = out_[preorder_number]; |  | 
|   6494         BitVector* block_kill = kill_[preorder_number]; |  | 
|   6495         BitVector* block_gen = gen_[preorder_number]; |  | 
|   6496  |  | 
|   6497         // Compute block_in as the intersection of all out(p) where p |  | 
|   6498         // is a predecessor of the current block. |  | 
|   6499         if (block->IsGraphEntry()) { |  | 
|   6500           temp->Clear(); |  | 
|   6501         } else { |  | 
|   6502           temp->SetAll(); |  | 
|   6503           ASSERT(block->PredecessorCount() > 0); |  | 
|   6504           for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |  | 
|   6505             BlockEntryInstr* pred = block->PredecessorAt(i); |  | 
|   6506             BitVector* pred_out = out_[pred->preorder_number()]; |  | 
|   6507             if (pred_out == NULL) continue; |  | 
|   6508             PhiPlaceMoves::MovesList phi_moves = |  | 
|   6509                 aliased_set_->phi_moves()->GetOutgoingMoves(pred); |  | 
|   6510             if (phi_moves != NULL) { |  | 
|   6511               // If there are phi moves, perform intersection with |  | 
|   6512               // a copy of pred_out where the phi moves are applied. |  | 
|   6513               temp_out->CopyFrom(pred_out); |  | 
|   6514               PerformPhiMoves(phi_moves, temp_out, forwarded_loads); |  | 
|   6515               pred_out = temp_out; |  | 
|   6516             } |  | 
|   6517             temp->Intersect(pred_out); |  | 
|   6518           } |  | 
|   6519         } |  | 
|   6520  |  | 
|   6521         if (!temp->Equals(*block_in) || (block_out == NULL)) { |  | 
|   6522           // If IN set has changed propagate the change to OUT set. |  | 
|   6523           block_in->CopyFrom(temp); |  | 
|   6524  |  | 
|   6525           temp->RemoveAll(block_kill); |  | 
|   6526           temp->AddAll(block_gen); |  | 
|   6527  |  | 
|   6528           if ((block_out == NULL) || !block_out->Equals(*temp)) { |  | 
|   6529             if (block_out == NULL) { |  | 
|   6530               block_out = out_[preorder_number] = |  | 
|   6531                   new(I) BitVector(I, aliased_set_->max_place_id()); |  | 
|   6532             } |  | 
|   6533             block_out->CopyFrom(temp); |  | 
|   6534             changed = true; |  | 
|   6535           } |  | 
|   6536         } |  | 
|   6537       } |  | 
|   6538     } |  | 
|   6539   } |  | 
|   6540  |  | 
|   6541   // Compute out_values mappings by propagating them in reverse postorder once |  | 
|   6542   // through the graph. Generate phis on back edges where eager merge is |  | 
|   6543   // impossible. |  | 
|   6544   // No replacement is done at this point and thus any out_value[place_id] is |  | 
|   6545   // changed at most once: from NULL to an actual value. |  | 
|   6546   // When merging incoming loads we might need to create a phi. |  | 
|   6547   // These phis are not inserted at the graph immediately because some of them |  | 
|   6548   // might become redundant after load forwarding is done. |  | 
|   6549   void ComputeOutValues() { |  | 
|   6550     GrowableArray<PhiInstr*> pending_phis(5); |  | 
|   6551     ZoneGrowableArray<Definition*>* temp_forwarded_values = NULL; |  | 
|   6552  |  | 
|   6553     for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |  | 
|   6554          !block_it.Done(); |  | 
|   6555          block_it.Advance()) { |  | 
|   6556       BlockEntryInstr* block = block_it.Current(); |  | 
|   6557  |  | 
|   6558       const bool can_merge_eagerly = CanMergeEagerly(block); |  | 
|   6559  |  | 
|   6560       const intptr_t preorder_number = block->preorder_number(); |  | 
|   6561  |  | 
|   6562       ZoneGrowableArray<Definition*>* block_out_values = |  | 
|   6563           out_values_[preorder_number]; |  | 
|   6564  |  | 
|   6565  |  | 
|   6566       // If OUT set has changed then we have new values available out of |  | 
|   6567       // the block. Compute these values creating phi where necessary. |  | 
|   6568       for (BitVector::Iterator it(out_[preorder_number]); |  | 
|   6569            !it.Done(); |  | 
|   6570            it.Advance()) { |  | 
|   6571         const intptr_t place_id = it.Current(); |  | 
|   6572  |  | 
|   6573         if (block_out_values == NULL) { |  | 
|   6574           out_values_[preorder_number] = block_out_values = |  | 
|   6575               CreateBlockOutValues(); |  | 
|   6576         } |  | 
|   6577  |  | 
|   6578         if ((*block_out_values)[place_id] == NULL) { |  | 
|   6579           ASSERT(block->PredecessorCount() > 0); |  | 
|   6580           Definition* in_value = can_merge_eagerly ? |  | 
|   6581               MergeIncomingValues(block, place_id) : NULL; |  | 
|   6582           if ((in_value == NULL) && |  | 
|   6583               (in_[preorder_number]->Contains(place_id))) { |  | 
|   6584             PhiInstr* phi = new(I) PhiInstr(block->AsJoinEntry(), |  | 
|   6585                                             block->PredecessorCount()); |  | 
|   6586             phi->set_place_id(place_id); |  | 
|   6587             pending_phis.Add(phi); |  | 
|   6588             in_value = phi; |  | 
|   6589           } |  | 
|   6590           (*block_out_values)[place_id] = in_value; |  | 
|   6591         } |  | 
|   6592       } |  | 
|   6593  |  | 
|   6594       // If the block has outgoing phi moves perform them. Use temporary list |  | 
|   6595       // of values to ensure that cyclic moves are performed correctly. |  | 
|   6596       PhiPlaceMoves::MovesList phi_moves = |  | 
|   6597           aliased_set_->phi_moves()->GetOutgoingMoves(block); |  | 
|   6598       if ((phi_moves != NULL) && (block_out_values != NULL)) { |  | 
|   6599         if (temp_forwarded_values == NULL) { |  | 
|   6600           temp_forwarded_values = CreateBlockOutValues(); |  | 
|   6601         } |  | 
|   6602  |  | 
|   6603         for (intptr_t i = 0; i < phi_moves->length(); i++) { |  | 
|   6604           const intptr_t from = (*phi_moves)[i].from(); |  | 
|   6605           const intptr_t to = (*phi_moves)[i].to(); |  | 
|   6606           if (from == to) continue; |  | 
|   6607  |  | 
|   6608           (*temp_forwarded_values)[to] = (*block_out_values)[from]; |  | 
|   6609         } |  | 
|   6610  |  | 
|   6611         for (intptr_t i = 0; i < phi_moves->length(); i++) { |  | 
|   6612           const intptr_t from = (*phi_moves)[i].from(); |  | 
|   6613           const intptr_t to = (*phi_moves)[i].to(); |  | 
|   6614           if (from == to) continue; |  | 
|   6615  |  | 
|   6616           (*block_out_values)[to] = (*temp_forwarded_values)[to]; |  | 
|   6617         } |  | 
|   6618       } |  | 
|   6619  |  | 
|   6620       if (FLAG_trace_load_optimization) { |  | 
|   6621         OS::Print("B%" Pd "\n", block->block_id()); |  | 
|   6622         OS::Print("  IN: "); |  | 
|   6623         aliased_set_->PrintSet(in_[preorder_number]); |  | 
|   6624         OS::Print("\n"); |  | 
|   6625  |  | 
|   6626         OS::Print("  KILL: "); |  | 
|   6627         aliased_set_->PrintSet(kill_[preorder_number]); |  | 
|   6628         OS::Print("\n"); |  | 
|   6629  |  | 
|   6630         OS::Print("  OUT: "); |  | 
|   6631         aliased_set_->PrintSet(out_[preorder_number]); |  | 
|   6632         OS::Print("\n"); |  | 
|   6633       } |  | 
|   6634     } |  | 
|   6635  |  | 
|   6636     // All blocks were visited. Fill pending phis with inputs |  | 
|   6637     // that flow on back edges. |  | 
|   6638     for (intptr_t i = 0; i < pending_phis.length(); i++) { |  | 
|   6639       FillPhiInputs(pending_phis[i]); |  | 
|   6640     } |  | 
|   6641   } |  | 
|   6642  |  | 
|   6643   bool CanMergeEagerly(BlockEntryInstr* block) { |  | 
|   6644     for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |  | 
|   6645       BlockEntryInstr* pred = block->PredecessorAt(i); |  | 
|   6646       if (pred->postorder_number() < block->postorder_number()) { |  | 
|   6647         return false; |  | 
|   6648       } |  | 
|   6649     } |  | 
|   6650     return true; |  | 
|   6651   } |  | 
|   6652  |  | 
|   6653   void MarkLoopInvariantLoads() { |  | 
|   6654     const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |  | 
|   6655         graph_->LoopHeaders(); |  | 
|   6656  |  | 
|   6657     ZoneGrowableArray<BitVector*>* invariant_loads = |  | 
|   6658         new(I) ZoneGrowableArray<BitVector*>(loop_headers.length()); |  | 
|   6659  |  | 
|   6660     for (intptr_t i = 0; i < loop_headers.length(); i++) { |  | 
|   6661       BlockEntryInstr* header = loop_headers[i]; |  | 
|   6662       BlockEntryInstr* pre_header = header->ImmediateDominator(); |  | 
|   6663       if (pre_header == NULL) { |  | 
|   6664         invariant_loads->Add(NULL); |  | 
|   6665         continue; |  | 
|   6666       } |  | 
|   6667  |  | 
|   6668       BitVector* loop_gen = new(I) BitVector(I, aliased_set_->max_place_id()); |  | 
|   6669       for (BitVector::Iterator loop_it(header->loop_info()); |  | 
|   6670            !loop_it.Done(); |  | 
|   6671            loop_it.Advance()) { |  | 
|   6672         const intptr_t preorder_number = loop_it.Current(); |  | 
|   6673         loop_gen->AddAll(gen_[preorder_number]); |  | 
|   6674       } |  | 
|   6675  |  | 
|   6676       for (BitVector::Iterator loop_it(header->loop_info()); |  | 
|   6677            !loop_it.Done(); |  | 
|   6678            loop_it.Advance()) { |  | 
|   6679         const intptr_t preorder_number = loop_it.Current(); |  | 
|   6680         loop_gen->RemoveAll(kill_[preorder_number]); |  | 
|   6681       } |  | 
|   6682  |  | 
|   6683       if (FLAG_trace_optimization) { |  | 
|   6684         for (BitVector::Iterator it(loop_gen); !it.Done(); it.Advance()) { |  | 
|   6685           OS::Print("place %s is loop invariant for B%" Pd "\n", |  | 
|   6686                     aliased_set_->places()[it.Current()]->ToCString(), |  | 
|   6687                     header->block_id()); |  | 
|   6688         } |  | 
|   6689       } |  | 
|   6690  |  | 
|   6691       invariant_loads->Add(loop_gen); |  | 
|   6692     } |  | 
|   6693  |  | 
|   6694     graph_->set_loop_invariant_loads(invariant_loads); |  | 
|   6695   } |  | 
|   6696  |  | 
|   6697   // Compute incoming value for the given expression id. |  | 
|   6698   // Will create a phi if different values are incoming from multiple |  | 
|   6699   // predecessors. |  | 
|   6700   Definition* MergeIncomingValues(BlockEntryInstr* block, intptr_t place_id) { |  | 
|   6701     // First check if the same value is coming in from all predecessors. |  | 
|   6702     static Definition* const kDifferentValuesMarker = |  | 
|   6703         reinterpret_cast<Definition*>(-1); |  | 
|   6704     Definition* incoming = NULL; |  | 
|   6705     for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |  | 
|   6706       BlockEntryInstr* pred = block->PredecessorAt(i); |  | 
|   6707       ZoneGrowableArray<Definition*>* pred_out_values = |  | 
|   6708           out_values_[pred->preorder_number()]; |  | 
|   6709       if ((pred_out_values == NULL) || ((*pred_out_values)[place_id] == NULL)) { |  | 
|   6710         return NULL; |  | 
|   6711       } else if (incoming == NULL) { |  | 
|   6712         incoming = (*pred_out_values)[place_id]; |  | 
|   6713       } else if (incoming != (*pred_out_values)[place_id]) { |  | 
|   6714         incoming = kDifferentValuesMarker; |  | 
|   6715       } |  | 
|   6716     } |  | 
|   6717  |  | 
|   6718     if (incoming != kDifferentValuesMarker) { |  | 
|   6719       ASSERT(incoming != NULL); |  | 
|   6720       return incoming; |  | 
|   6721     } |  | 
|   6722  |  | 
|   6723     // Incoming values are different. Phi is required to merge. |  | 
|   6724     PhiInstr* phi = new(I) PhiInstr( |  | 
|   6725         block->AsJoinEntry(), block->PredecessorCount()); |  | 
|   6726     phi->set_place_id(place_id); |  | 
|   6727     FillPhiInputs(phi); |  | 
|   6728     return phi; |  | 
|   6729   } |  | 
|   6730  |  | 
|   6731   void FillPhiInputs(PhiInstr* phi) { |  | 
|   6732     BlockEntryInstr* block = phi->GetBlock(); |  | 
|   6733     const intptr_t place_id = phi->place_id(); |  | 
|   6734  |  | 
|   6735     for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |  | 
|   6736       BlockEntryInstr* pred = block->PredecessorAt(i); |  | 
|   6737       ZoneGrowableArray<Definition*>* pred_out_values = |  | 
|   6738           out_values_[pred->preorder_number()]; |  | 
|   6739       ASSERT((*pred_out_values)[place_id] != NULL); |  | 
|   6740  |  | 
|   6741       // Sets of outgoing values are not linked into use lists so |  | 
|   6742       // they might contain values that were replaced and removed |  | 
|   6743       // from the graph by this iteration. |  | 
|   6744       // To prevent using them we additionally mark definitions themselves |  | 
|   6745       // as replaced and store a pointer to the replacement. |  | 
|   6746       Definition* replacement = (*pred_out_values)[place_id]->Replacement(); |  | 
|   6747       Value* input = new(I) Value(replacement); |  | 
|   6748       phi->SetInputAt(i, input); |  | 
|   6749       replacement->AddInputUse(input); |  | 
|   6750     } |  | 
|   6751  |  | 
|   6752     phi->set_ssa_temp_index(graph_->alloc_ssa_temp_index()); |  | 
|   6753     phis_.Add(phi);  // Postpone phi insertion until after load forwarding. |  | 
|   6754  |  | 
|   6755     if (FLAG_trace_load_optimization) { |  | 
|   6756       OS::Print("created pending phi %s for %s at B%" Pd "\n", |  | 
|   6757                 phi->ToCString(), |  | 
|   6758                 aliased_set_->places()[place_id]->ToCString(), |  | 
|   6759                 block->block_id()); |  | 
|   6760     } |  | 
|   6761   } |  | 
|   6762  |  | 
|   6763   // Iterate over basic blocks and replace exposed loads with incoming |  | 
|   6764   // values. |  | 
|   6765   void ForwardLoads() { |  | 
|   6766     for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |  | 
|   6767          !block_it.Done(); |  | 
|   6768          block_it.Advance()) { |  | 
|   6769       BlockEntryInstr* block = block_it.Current(); |  | 
|   6770  |  | 
|   6771       ZoneGrowableArray<Definition*>* loads = |  | 
|   6772           exposed_values_[block->preorder_number()]; |  | 
|   6773       if (loads == NULL) continue;  // No exposed loads. |  | 
|   6774  |  | 
|   6775       BitVector* in = in_[block->preorder_number()]; |  | 
|   6776  |  | 
|   6777       for (intptr_t i = 0; i < loads->length(); i++) { |  | 
|   6778         Definition* load = (*loads)[i]; |  | 
|   6779         if (!in->Contains(load->place_id())) continue;  // No incoming value. |  | 
|   6780  |  | 
|   6781         Definition* replacement = MergeIncomingValues(block, load->place_id()); |  | 
|   6782         ASSERT(replacement != NULL); |  | 
|   6783  |  | 
|   6784         // Sets of outgoing values are not linked into use lists so |  | 
|   6785         // they might contain values that were replace and removed |  | 
|   6786         // from the graph by this iteration. |  | 
|   6787         // To prevent using them we additionally mark definitions themselves |  | 
|   6788         // as replaced and store a pointer to the replacement. |  | 
|   6789         replacement = replacement->Replacement(); |  | 
|   6790  |  | 
|   6791         if (load != replacement) { |  | 
|   6792           EnsureSSATempIndex(graph_, load, replacement); |  | 
|   6793  |  | 
|   6794           if (FLAG_trace_optimization) { |  | 
|   6795             OS::Print("Replacing load v%" Pd " with v%" Pd "\n", |  | 
|   6796                       load->ssa_temp_index(), |  | 
|   6797                       replacement->ssa_temp_index()); |  | 
|   6798           } |  | 
|   6799  |  | 
|   6800           load->ReplaceUsesWith(replacement); |  | 
|   6801           load->RemoveFromGraph(); |  | 
|   6802           load->SetReplacement(replacement); |  | 
|   6803           forwarded_ = true; |  | 
|   6804         } |  | 
|   6805       } |  | 
|   6806     } |  | 
|   6807   } |  | 
|   6808  |  | 
|   6809   // Check if the given phi take the same value on all code paths. |  | 
|   6810   // Eliminate it as redundant if this is the case. |  | 
|   6811   // When analyzing phi operands assumes that only generated during |  | 
|   6812   // this load phase can be redundant. They can be distinguished because |  | 
|   6813   // they are not marked alive. |  | 
|   6814   // TODO(vegorov): move this into a separate phase over all phis. |  | 
|   6815   bool EliminateRedundantPhi(PhiInstr* phi) { |  | 
|   6816     Definition* value = NULL;  // Possible value of this phi. |  | 
|   6817  |  | 
|   6818     worklist_.Clear(); |  | 
|   6819     if (in_worklist_ == NULL) { |  | 
|   6820       in_worklist_ = new(I) BitVector(I, graph_->current_ssa_temp_index()); |  | 
|   6821     } else { |  | 
|   6822       in_worklist_->Clear(); |  | 
|   6823     } |  | 
|   6824  |  | 
|   6825     worklist_.Add(phi); |  | 
|   6826     in_worklist_->Add(phi->ssa_temp_index()); |  | 
|   6827  |  | 
|   6828     for (intptr_t i = 0; i < worklist_.length(); i++) { |  | 
|   6829       PhiInstr* phi = worklist_[i]; |  | 
|   6830  |  | 
|   6831       for (intptr_t i = 0; i < phi->InputCount(); i++) { |  | 
|   6832         Definition* input = phi->InputAt(i)->definition(); |  | 
|   6833         if (input == phi) continue; |  | 
|   6834  |  | 
|   6835         PhiInstr* phi_input = input->AsPhi(); |  | 
|   6836         if ((phi_input != NULL) && !phi_input->is_alive()) { |  | 
|   6837           if (!in_worklist_->Contains(phi_input->ssa_temp_index())) { |  | 
|   6838             worklist_.Add(phi_input); |  | 
|   6839             in_worklist_->Add(phi_input->ssa_temp_index()); |  | 
|   6840           } |  | 
|   6841           continue; |  | 
|   6842         } |  | 
|   6843  |  | 
|   6844         if (value == NULL) { |  | 
|   6845           value = input; |  | 
|   6846         } else if (value != input) { |  | 
|   6847           return false;  // This phi is not redundant. |  | 
|   6848         } |  | 
|   6849       } |  | 
|   6850     } |  | 
|   6851  |  | 
|   6852     // All phis in the worklist are redundant and have the same computed |  | 
|   6853     // value on all code paths. |  | 
|   6854     ASSERT(value != NULL); |  | 
|   6855     for (intptr_t i = 0; i < worklist_.length(); i++) { |  | 
|   6856       worklist_[i]->ReplaceUsesWith(value); |  | 
|   6857     } |  | 
|   6858  |  | 
|   6859     return true; |  | 
|   6860   } |  | 
|   6861  |  | 
|   6862   // Returns true if definitions are congruent assuming their inputs |  | 
|   6863   // are congruent. |  | 
|   6864   bool CanBeCongruent(Definition* a, Definition* b) { |  | 
|   6865     return (a->tag() == b->tag()) && |  | 
|   6866        ((a->IsPhi() && (a->GetBlock() == b->GetBlock())) || |  | 
|   6867         (a->AllowsCSE() && a->Dependencies().IsNone() && |  | 
|   6868          a->AttributesEqual(b))); |  | 
|   6869   } |  | 
|   6870  |  | 
|   6871   // Given two definitions check if they are congruent under assumption that |  | 
|   6872   // their inputs will be proven congruent. If they are - add them to the |  | 
|   6873   // worklist to check their inputs' congruency. |  | 
|   6874   // Returns true if pair was added to the worklist or is already in the |  | 
|   6875   // worklist and false if a and b are not congruent. |  | 
|   6876   bool AddPairToCongruencyWorklist(Definition* a, Definition* b) { |  | 
|   6877     if (!CanBeCongruent(a, b)) { |  | 
|   6878       return false; |  | 
|   6879     } |  | 
|   6880  |  | 
|   6881     // If a is already in the worklist check if it is being compared to b. |  | 
|   6882     // Give up if it is not. |  | 
|   6883     if (in_worklist_->Contains(a->ssa_temp_index())) { |  | 
|   6884       for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |  | 
|   6885         if (a == congruency_worklist_[i]) { |  | 
|   6886           return (b == congruency_worklist_[i + 1]); |  | 
|   6887         } |  | 
|   6888       } |  | 
|   6889       UNREACHABLE(); |  | 
|   6890     } else if (in_worklist_->Contains(b->ssa_temp_index())) { |  | 
|   6891       return AddPairToCongruencyWorklist(b, a); |  | 
|   6892     } |  | 
|   6893  |  | 
|   6894     congruency_worklist_.Add(a); |  | 
|   6895     congruency_worklist_.Add(b); |  | 
|   6896     in_worklist_->Add(a->ssa_temp_index()); |  | 
|   6897     return true; |  | 
|   6898   } |  | 
|   6899  |  | 
|   6900   bool AreInputsCongruent(Definition* a, Definition* b) { |  | 
|   6901     ASSERT(a->tag() == b->tag()); |  | 
|   6902     ASSERT(a->InputCount() == b->InputCount()); |  | 
|   6903     for (intptr_t j = 0; j < a->InputCount(); j++) { |  | 
|   6904       Definition* inputA = a->InputAt(j)->definition(); |  | 
|   6905       Definition* inputB = b->InputAt(j)->definition(); |  | 
|   6906  |  | 
|   6907       if (inputA != inputB) { |  | 
|   6908         if (!AddPairToCongruencyWorklist(inputA, inputB)) { |  | 
|   6909           return false; |  | 
|   6910         } |  | 
|   6911       } |  | 
|   6912     } |  | 
|   6913     return true; |  | 
|   6914   } |  | 
|   6915  |  | 
|   6916   // Returns true if instruction dom dominates instruction other. |  | 
|   6917   static bool Dominates(Instruction* dom, Instruction* other) { |  | 
|   6918     BlockEntryInstr* dom_block = dom->GetBlock(); |  | 
|   6919     BlockEntryInstr* other_block = other->GetBlock(); |  | 
|   6920  |  | 
|   6921     if (dom_block == other_block) { |  | 
|   6922       for (Instruction* current = dom->next(); |  | 
|   6923            current != NULL; |  | 
|   6924            current = current->next()) { |  | 
|   6925         if (current == other) { |  | 
|   6926           return true; |  | 
|   6927         } |  | 
|   6928       } |  | 
|   6929       return false; |  | 
|   6930     } |  | 
|   6931  |  | 
|   6932     return dom_block->Dominates(other_block); |  | 
|   6933   } |  | 
|   6934  |  | 
|   6935   // Replace the given phi with another if they are congruent. |  | 
|   6936   // Returns true if succeeds. |  | 
|   6937   bool ReplacePhiWith(PhiInstr* phi, PhiInstr* replacement) { |  | 
|   6938     ASSERT(phi->InputCount() == replacement->InputCount()); |  | 
|   6939     ASSERT(phi->block() == replacement->block()); |  | 
|   6940  |  | 
|   6941     congruency_worklist_.Clear(); |  | 
|   6942     if (in_worklist_ == NULL) { |  | 
|   6943       in_worklist_ = new(I) BitVector(I, graph_->current_ssa_temp_index()); |  | 
|   6944     } else { |  | 
|   6945       in_worklist_->Clear(); |  | 
|   6946     } |  | 
|   6947  |  | 
|   6948     // During the comparison worklist contains pairs of definitions to be |  | 
|   6949     // compared. |  | 
|   6950     if (!AddPairToCongruencyWorklist(phi, replacement)) { |  | 
|   6951       return false; |  | 
|   6952     } |  | 
|   6953  |  | 
|   6954     // Process the worklist. It might grow during each comparison step. |  | 
|   6955     for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |  | 
|   6956       if (!AreInputsCongruent(congruency_worklist_[i], |  | 
|   6957                               congruency_worklist_[i + 1])) { |  | 
|   6958         return false; |  | 
|   6959       } |  | 
|   6960     } |  | 
|   6961  |  | 
|   6962     // At this point worklist contains pairs of congruent definitions. |  | 
|   6963     // Replace the one member of the pair with another maintaining proper |  | 
|   6964     // domination relation between definitions and uses. |  | 
|   6965     for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |  | 
|   6966       Definition* a = congruency_worklist_[i]; |  | 
|   6967       Definition* b = congruency_worklist_[i + 1]; |  | 
|   6968  |  | 
|   6969       // If these definitions are not phis then we need to pick up one |  | 
|   6970       // that dominates another as the replacement: if a dominates b swap them. |  | 
|   6971       // Note: both a and b are used as a phi input at the same block B which |  | 
|   6972       // means a dominates B and b dominates B, which guarantees that either |  | 
|   6973       // a dominates b or b dominates a. |  | 
|   6974       if (!a->IsPhi()) { |  | 
|   6975         if (Dominates(a, b)) { |  | 
|   6976           Definition* t = a; |  | 
|   6977           a = b; |  | 
|   6978           b = t; |  | 
|   6979         } |  | 
|   6980         ASSERT(Dominates(b, a)); |  | 
|   6981       } |  | 
|   6982  |  | 
|   6983       if (FLAG_trace_load_optimization) { |  | 
|   6984         OS::Print("Replacing %s with congruent %s\n", |  | 
|   6985                   a->ToCString(), |  | 
|   6986                   b->ToCString()); |  | 
|   6987       } |  | 
|   6988  |  | 
|   6989       a->ReplaceUsesWith(b); |  | 
|   6990       if (a->IsPhi()) { |  | 
|   6991         // We might be replacing a phi introduced by the load forwarding |  | 
|   6992         // that is not inserted in the graph yet. |  | 
|   6993         ASSERT(b->IsPhi()); |  | 
|   6994         PhiInstr* phi_a = a->AsPhi(); |  | 
|   6995         if (phi_a->is_alive()) { |  | 
|   6996           phi_a->mark_dead(); |  | 
|   6997           phi_a->block()->RemovePhi(phi_a); |  | 
|   6998           phi_a->UnuseAllInputs(); |  | 
|   6999         } |  | 
|   7000       } else { |  | 
|   7001         a->RemoveFromGraph(); |  | 
|   7002       } |  | 
|   7003     } |  | 
|   7004  |  | 
|   7005     return true; |  | 
|   7006   } |  | 
|   7007  |  | 
|   7008   // Insert the given phi into the graph. Attempt to find an equal one in the |  | 
|   7009   // target block first. |  | 
|   7010   // Returns true if the phi was inserted and false if it was replaced. |  | 
|   7011   bool EmitPhi(PhiInstr* phi) { |  | 
|   7012     for (PhiIterator it(phi->block()); !it.Done(); it.Advance()) { |  | 
|   7013       if (ReplacePhiWith(phi, it.Current())) { |  | 
|   7014         return false; |  | 
|   7015       } |  | 
|   7016     } |  | 
|   7017  |  | 
|   7018     phi->mark_alive(); |  | 
|   7019     phi->block()->InsertPhi(phi); |  | 
|   7020     return true; |  | 
|   7021   } |  | 
|   7022  |  | 
|   7023   // Phis have not yet been inserted into the graph but they have uses of |  | 
|   7024   // their inputs.  Insert the non-redundant ones and clear the input uses |  | 
|   7025   // of the redundant ones. |  | 
|   7026   void EmitPhis() { |  | 
|   7027     // First eliminate all redundant phis. |  | 
|   7028     for (intptr_t i = 0; i < phis_.length(); i++) { |  | 
|   7029       PhiInstr* phi = phis_[i]; |  | 
|   7030       if (!phi->HasUses() || EliminateRedundantPhi(phi)) { |  | 
|   7031         phi->UnuseAllInputs(); |  | 
|   7032         phis_[i] = NULL; |  | 
|   7033       } |  | 
|   7034     } |  | 
|   7035  |  | 
|   7036     // Now emit phis or replace them with equal phis already present in the |  | 
|   7037     // graph. |  | 
|   7038     for (intptr_t i = 0; i < phis_.length(); i++) { |  | 
|   7039       PhiInstr* phi = phis_[i]; |  | 
|   7040       if ((phi != NULL) && (!phi->HasUses() || !EmitPhi(phi))) { |  | 
|   7041         phi->UnuseAllInputs(); |  | 
|   7042       } |  | 
|   7043     } |  | 
|   7044   } |  | 
|   7045  |  | 
|   7046   ZoneGrowableArray<Definition*>* CreateBlockOutValues() { |  | 
|   7047     ZoneGrowableArray<Definition*>* out = |  | 
|   7048         new(I) ZoneGrowableArray<Definition*>(aliased_set_->max_place_id()); |  | 
|   7049     for (intptr_t i = 0; i < aliased_set_->max_place_id(); i++) { |  | 
|   7050       out->Add(NULL); |  | 
|   7051     } |  | 
|   7052     return out; |  | 
|   7053   } |  | 
|   7054  |  | 
|   7055   FlowGraph* graph_; |  | 
|   7056   DirectChainedHashMap<PointerKeyValueTrait<Place> >* map_; |  | 
|   7057  |  | 
|   7058   // Mapping between field offsets in words and expression ids of loads from |  | 
|   7059   // that offset. |  | 
|   7060   AliasedSet* aliased_set_; |  | 
|   7061  |  | 
|   7062   // Per block sets of expression ids for loads that are: incoming (available |  | 
|   7063   // on the entry), outgoing (available on the exit), generated and killed. |  | 
|   7064   GrowableArray<BitVector*> in_; |  | 
|   7065   GrowableArray<BitVector*> out_; |  | 
|   7066   GrowableArray<BitVector*> gen_; |  | 
|   7067   GrowableArray<BitVector*> kill_; |  | 
|   7068  |  | 
|   7069   // Per block list of upwards exposed loads. |  | 
|   7070   GrowableArray<ZoneGrowableArray<Definition*>*> exposed_values_; |  | 
|   7071  |  | 
|   7072   // Per block mappings between expression ids and outgoing definitions that |  | 
|   7073   // represent those ids. |  | 
|   7074   GrowableArray<ZoneGrowableArray<Definition*>*> out_values_; |  | 
|   7075  |  | 
|   7076   // List of phis generated during ComputeOutValues and ForwardLoads. |  | 
|   7077   // Some of these phis might be redundant and thus a separate pass is |  | 
|   7078   // needed to emit only non-redundant ones. |  | 
|   7079   GrowableArray<PhiInstr*> phis_; |  | 
|   7080  |  | 
|   7081   // Auxiliary worklist used by redundant phi elimination. |  | 
|   7082   GrowableArray<PhiInstr*> worklist_; |  | 
|   7083   GrowableArray<Definition*> congruency_worklist_; |  | 
|   7084   BitVector* in_worklist_; |  | 
|   7085  |  | 
|   7086  |  | 
|   7087   // True if any load was eliminated. |  | 
|   7088   bool forwarded_; |  | 
|   7089  |  | 
|   7090   DISALLOW_COPY_AND_ASSIGN(LoadOptimizer); |  | 
|   7091 }; |  | 
|   7092  |  | 
|   7093  |  | 
|   7094 class StoreOptimizer : public LivenessAnalysis { |  | 
|   7095  public: |  | 
|   7096   StoreOptimizer(FlowGraph* graph, |  | 
|   7097                  AliasedSet* aliased_set, |  | 
|   7098                  DirectChainedHashMap<PointerKeyValueTrait<Place> >* map) |  | 
|   7099       : LivenessAnalysis(aliased_set->max_place_id(), graph->postorder()), |  | 
|   7100         graph_(graph), |  | 
|   7101         map_(map), |  | 
|   7102         aliased_set_(aliased_set), |  | 
|   7103         exposed_stores_(graph_->postorder().length()) { |  | 
|   7104     const intptr_t num_blocks = graph_->postorder().length(); |  | 
|   7105     for (intptr_t i = 0; i < num_blocks; i++) { |  | 
|   7106       exposed_stores_.Add(NULL); |  | 
|   7107     } |  | 
|   7108   } |  | 
|   7109  |  | 
|   7110   static void OptimizeGraph(FlowGraph* graph) { |  | 
|   7111     ASSERT(FLAG_load_cse); |  | 
|   7112     if (FLAG_trace_load_optimization) { |  | 
|   7113       FlowGraphPrinter::PrintGraph("Before StoreOptimizer", graph); |  | 
|   7114     } |  | 
|   7115  |  | 
|   7116     DirectChainedHashMap<PointerKeyValueTrait<Place> > map; |  | 
|   7117     AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeStores); |  | 
|   7118     if ((aliased_set != NULL) && !aliased_set->IsEmpty()) { |  | 
|   7119       StoreOptimizer store_optimizer(graph, aliased_set, &map); |  | 
|   7120       store_optimizer.Optimize(); |  | 
|   7121     } |  | 
|   7122   } |  | 
|   7123  |  | 
|   7124  private: |  | 
|   7125   void Optimize() { |  | 
|   7126     Analyze(); |  | 
|   7127     if (FLAG_trace_load_optimization) { |  | 
|   7128       Dump(); |  | 
|   7129     } |  | 
|   7130     EliminateDeadStores(); |  | 
|   7131     if (FLAG_trace_load_optimization) { |  | 
|   7132       FlowGraphPrinter::PrintGraph("After StoreOptimizer", graph_); |  | 
|   7133     } |  | 
|   7134   } |  | 
|   7135  |  | 
|   7136   bool CanEliminateStore(Instruction* instr) { |  | 
|   7137     switch (instr->tag()) { |  | 
|   7138       case Instruction::kStoreInstanceField: |  | 
|   7139         if (instr->AsStoreInstanceField()->is_initialization()) { |  | 
|   7140           // Can't eliminate stores that initialized unboxed fields. |  | 
|   7141           return false; |  | 
|   7142         } |  | 
|   7143       case Instruction::kStoreIndexed: |  | 
|   7144       case Instruction::kStoreStaticField: |  | 
|   7145         return true; |  | 
|   7146       default: |  | 
|   7147         UNREACHABLE(); |  | 
|   7148         return false; |  | 
|   7149     } |  | 
|   7150   } |  | 
|   7151  |  | 
|   7152   virtual void ComputeInitialSets() { |  | 
|   7153     Isolate* isolate = graph_->isolate(); |  | 
|   7154     BitVector* all_places = new(isolate) BitVector(isolate, |  | 
|   7155         aliased_set_->max_place_id()); |  | 
|   7156     all_places->SetAll(); |  | 
|   7157     for (BlockIterator block_it = graph_->postorder_iterator(); |  | 
|   7158          !block_it.Done(); |  | 
|   7159          block_it.Advance()) { |  | 
|   7160       BlockEntryInstr* block = block_it.Current(); |  | 
|   7161       const intptr_t postorder_number = block->postorder_number(); |  | 
|   7162  |  | 
|   7163       BitVector* kill = kill_[postorder_number]; |  | 
|   7164       BitVector* live_in = live_in_[postorder_number]; |  | 
|   7165       BitVector* live_out = live_out_[postorder_number]; |  | 
|   7166  |  | 
|   7167       ZoneGrowableArray<Instruction*>* exposed_stores = NULL; |  | 
|   7168  |  | 
|   7169       // Iterate backwards starting at the last instruction. |  | 
|   7170       for (BackwardInstructionIterator instr_it(block); |  | 
|   7171            !instr_it.Done(); |  | 
|   7172            instr_it.Advance()) { |  | 
|   7173         Instruction* instr = instr_it.Current(); |  | 
|   7174  |  | 
|   7175         bool is_load = false; |  | 
|   7176         bool is_store = false; |  | 
|   7177         Place place(instr, &is_load, &is_store); |  | 
|   7178         if (place.IsFinalField()) { |  | 
|   7179           // Loads/stores of final fields do not participate. |  | 
|   7180           continue; |  | 
|   7181         } |  | 
|   7182  |  | 
|   7183         // Handle stores. |  | 
|   7184         if (is_store) { |  | 
|   7185           if (kill->Contains(instr->place_id())) { |  | 
|   7186             if (!live_in->Contains(instr->place_id()) && |  | 
|   7187                 CanEliminateStore(instr)) { |  | 
|   7188               if (FLAG_trace_optimization) { |  | 
|   7189                 OS::Print( |  | 
|   7190                     "Removing dead store to place %" Pd " in block B%" Pd "\n", |  | 
|   7191                     instr->place_id(), block->block_id()); |  | 
|   7192               } |  | 
|   7193               instr_it.RemoveCurrentFromGraph(); |  | 
|   7194             } |  | 
|   7195           } else if (!live_in->Contains(instr->place_id())) { |  | 
|   7196             // Mark this store as down-ward exposed: They are the only |  | 
|   7197             // candidates for the global store elimination. |  | 
|   7198             if (exposed_stores == NULL) { |  | 
|   7199               const intptr_t kMaxExposedStoresInitialSize = 5; |  | 
|   7200               exposed_stores = new(isolate) ZoneGrowableArray<Instruction*>( |  | 
|   7201                   Utils::Minimum(kMaxExposedStoresInitialSize, |  | 
|   7202                                  aliased_set_->max_place_id())); |  | 
|   7203             } |  | 
|   7204             exposed_stores->Add(instr); |  | 
|   7205           } |  | 
|   7206           // Interfering stores kill only loads from the same place. |  | 
|   7207           kill->Add(instr->place_id()); |  | 
|   7208           live_in->Remove(instr->place_id()); |  | 
|   7209           continue; |  | 
|   7210         } |  | 
|   7211  |  | 
|   7212         // Handle side effects, deoptimization and function return. |  | 
|   7213         if (!instr->Effects().IsNone() || |  | 
|   7214             instr->CanDeoptimize() || |  | 
|   7215             instr->IsThrow() || |  | 
|   7216             instr->IsReThrow() || |  | 
|   7217             instr->IsReturn()) { |  | 
|   7218           // Instructions that return from the function, instructions with side |  | 
|   7219           // effects and instructions that can deoptimize are considered as |  | 
|   7220           // loads from all places. |  | 
|   7221           live_in->CopyFrom(all_places); |  | 
|   7222           if (instr->IsThrow() || instr->IsReThrow() || instr->IsReturn()) { |  | 
|   7223             // Initialize live-out for exit blocks since it won't be computed |  | 
|   7224             // otherwise during the fixed point iteration. |  | 
|   7225             live_out->CopyFrom(all_places); |  | 
|   7226           } |  | 
|   7227           continue; |  | 
|   7228         } |  | 
|   7229  |  | 
|   7230         // Handle loads. |  | 
|   7231         Definition* defn = instr->AsDefinition(); |  | 
|   7232         if ((defn != NULL) && IsLoadEliminationCandidate(defn)) { |  | 
|   7233           const intptr_t alias = aliased_set_->LookupAliasId(place.ToAlias()); |  | 
|   7234           live_in->AddAll(aliased_set_->GetKilledSet(alias)); |  | 
|   7235           continue; |  | 
|   7236         } |  | 
|   7237       } |  | 
|   7238       exposed_stores_[postorder_number] = exposed_stores; |  | 
|   7239     } |  | 
|   7240     if (FLAG_trace_load_optimization) { |  | 
|   7241       Dump(); |  | 
|   7242       OS::Print("---\n"); |  | 
|   7243     } |  | 
|   7244   } |  | 
|   7245  |  | 
|   7246   void EliminateDeadStores() { |  | 
|   7247     // Iteration order does not matter here. |  | 
|   7248     for (BlockIterator block_it = graph_->postorder_iterator(); |  | 
|   7249          !block_it.Done(); |  | 
|   7250          block_it.Advance()) { |  | 
|   7251       BlockEntryInstr* block = block_it.Current(); |  | 
|   7252       const intptr_t postorder_number = block->postorder_number(); |  | 
|   7253  |  | 
|   7254       BitVector* live_out = live_out_[postorder_number]; |  | 
|   7255  |  | 
|   7256       ZoneGrowableArray<Instruction*>* exposed_stores = |  | 
|   7257         exposed_stores_[postorder_number]; |  | 
|   7258       if (exposed_stores == NULL) continue;  // No exposed stores. |  | 
|   7259  |  | 
|   7260       // Iterate over candidate stores. |  | 
|   7261       for (intptr_t i = 0; i < exposed_stores->length(); ++i) { |  | 
|   7262         Instruction* instr = (*exposed_stores)[i]; |  | 
|   7263         bool is_load = false; |  | 
|   7264         bool is_store = false; |  | 
|   7265         Place place(instr, &is_load, &is_store); |  | 
|   7266         ASSERT(!is_load && is_store); |  | 
|   7267         if (place.IsFinalField()) { |  | 
|   7268           // Final field do not participate in dead store elimination. |  | 
|   7269           continue; |  | 
|   7270         } |  | 
|   7271         // Eliminate a downward exposed store if the corresponding place is not |  | 
|   7272         // in live-out. |  | 
|   7273         if (!live_out->Contains(instr->place_id()) && |  | 
|   7274             CanEliminateStore(instr)) { |  | 
|   7275           if (FLAG_trace_optimization) { |  | 
|   7276             OS::Print("Removing dead store to place %" Pd " block B%" Pd "\n", |  | 
|   7277                       instr->place_id(), block->block_id()); |  | 
|   7278           } |  | 
|   7279           instr->RemoveFromGraph(/* ignored */ false); |  | 
|   7280         } |  | 
|   7281       } |  | 
|   7282     } |  | 
|   7283   } |  | 
|   7284  |  | 
|   7285   FlowGraph* graph_; |  | 
|   7286   DirectChainedHashMap<PointerKeyValueTrait<Place> >* map_; |  | 
|   7287  |  | 
|   7288   // Mapping between field offsets in words and expression ids of loads from |  | 
|   7289   // that offset. |  | 
|   7290   AliasedSet* aliased_set_; |  | 
|   7291  |  | 
|   7292   // Per block list of downward exposed stores. |  | 
|   7293   GrowableArray<ZoneGrowableArray<Instruction*>*> exposed_stores_; |  | 
|   7294  |  | 
|   7295   DISALLOW_COPY_AND_ASSIGN(StoreOptimizer); |  | 
|   7296 }; |  | 
|   7297  |  | 
|   7298  |  | 
|   7299 void DeadStoreElimination::Optimize(FlowGraph* graph) { |  | 
|   7300   if (FLAG_dead_store_elimination) { |  | 
|   7301     StoreOptimizer::OptimizeGraph(graph); |  | 
|   7302   } |  | 
|   7303 } |  | 
|   7304  |  | 
|   7305  |  | 
|   7306 // Returns true iff this definition is used in a non-phi instruction. |  | 
|   7307 static bool HasRealUse(Definition* def) { |  | 
|   7308   // Environment uses are real (non-phi) uses. |  | 
|   7309   if (def->env_use_list() != NULL) return true; |  | 
|   7310  |  | 
|   7311   for (Value::Iterator it(def->input_use_list()); |  | 
|   7312        !it.Done(); |  | 
|   7313        it.Advance()) { |  | 
|   7314     if (!it.Current()->instruction()->IsPhi()) return true; |  | 
|   7315   } |  | 
|   7316   return false; |  | 
|   7317 } |  | 
|   7318  |  | 
|   7319  |  | 
|   7320 void DeadCodeElimination::EliminateDeadPhis(FlowGraph* flow_graph) { |  | 
|   7321   GrowableArray<PhiInstr*> live_phis; |  | 
|   7322   for (BlockIterator b = flow_graph->postorder_iterator(); |  | 
|   7323        !b.Done(); |  | 
|   7324        b.Advance()) { |  | 
|   7325     JoinEntryInstr* join = b.Current()->AsJoinEntry(); |  | 
|   7326     if (join != NULL) { |  | 
|   7327       for (PhiIterator it(join); !it.Done(); it.Advance()) { |  | 
|   7328         PhiInstr* phi = it.Current(); |  | 
|   7329         // Phis that have uses and phis inside try blocks are |  | 
|   7330         // marked as live. |  | 
|   7331         if (HasRealUse(phi) || join->InsideTryBlock()) { |  | 
|   7332           live_phis.Add(phi); |  | 
|   7333           phi->mark_alive(); |  | 
|   7334         } else { |  | 
|   7335           phi->mark_dead(); |  | 
|   7336         } |  | 
|   7337       } |  | 
|   7338     } |  | 
|   7339   } |  | 
|   7340  |  | 
|   7341   while (!live_phis.is_empty()) { |  | 
|   7342     PhiInstr* phi = live_phis.RemoveLast(); |  | 
|   7343     for (intptr_t i = 0; i < phi->InputCount(); i++) { |  | 
|   7344       Value* val = phi->InputAt(i); |  | 
|   7345       PhiInstr* used_phi = val->definition()->AsPhi(); |  | 
|   7346       if ((used_phi != NULL) && !used_phi->is_alive()) { |  | 
|   7347         used_phi->mark_alive(); |  | 
|   7348         live_phis.Add(used_phi); |  | 
|   7349       } |  | 
|   7350     } |  | 
|   7351   } |  | 
|   7352  |  | 
|   7353   for (BlockIterator it(flow_graph->postorder_iterator()); |  | 
|   7354        !it.Done(); |  | 
|   7355        it.Advance()) { |  | 
|   7356     JoinEntryInstr* join = it.Current()->AsJoinEntry(); |  | 
|   7357     if (join != NULL) { |  | 
|   7358       if (join->phis_ == NULL) continue; |  | 
|   7359  |  | 
|   7360       // Eliminate dead phis and compact the phis_ array of the block. |  | 
|   7361       intptr_t to_index = 0; |  | 
|   7362       for (intptr_t i = 0; i < join->phis_->length(); ++i) { |  | 
|   7363         PhiInstr* phi = (*join->phis_)[i]; |  | 
|   7364         if (phi != NULL) { |  | 
|   7365           if (!phi->is_alive()) { |  | 
|   7366             phi->ReplaceUsesWith(flow_graph->constant_null()); |  | 
|   7367             phi->UnuseAllInputs(); |  | 
|   7368             (*join->phis_)[i] = NULL; |  | 
|   7369             if (FLAG_trace_optimization) { |  | 
|   7370               OS::Print("Removing dead phi v%" Pd "\n", phi->ssa_temp_index()); |  | 
|   7371             } |  | 
|   7372           } else if (phi->IsRedundant()) { |  | 
|   7373             phi->ReplaceUsesWith(phi->InputAt(0)->definition()); |  | 
|   7374             phi->UnuseAllInputs(); |  | 
|   7375             (*join->phis_)[i] = NULL; |  | 
|   7376             if (FLAG_trace_optimization) { |  | 
|   7377               OS::Print("Removing redundant phi v%" Pd "\n", |  | 
|   7378                          phi->ssa_temp_index()); |  | 
|   7379             } |  | 
|   7380           } else { |  | 
|   7381             (*join->phis_)[to_index++] = phi; |  | 
|   7382           } |  | 
|   7383         } |  | 
|   7384       } |  | 
|   7385       if (to_index == 0) { |  | 
|   7386         join->phis_ = NULL; |  | 
|   7387       } else { |  | 
|   7388         join->phis_->TruncateTo(to_index); |  | 
|   7389       } |  | 
|   7390     } |  | 
|   7391   } |  | 
|   7392 } |  | 
|   7393  |  | 
|   7394  |  | 
|   7395 class CSEInstructionMap : public ValueObject { |  | 
|   7396  public: |  | 
|   7397   // Right now CSE and LICM track a single effect: possible externalization of |  | 
|   7398   // strings. |  | 
|   7399   // Other effects like modifications of fields are tracked in a separate load |  | 
|   7400   // forwarding pass via Alias structure. |  | 
|   7401   COMPILE_ASSERT(EffectSet::kLastEffect == 1); |  | 
|   7402  |  | 
|   7403   CSEInstructionMap() : independent_(), dependent_() { } |  | 
|   7404   explicit CSEInstructionMap(const CSEInstructionMap& other) |  | 
|   7405       : ValueObject(), |  | 
|   7406         independent_(other.independent_), |  | 
|   7407         dependent_(other.dependent_) { |  | 
|   7408   } |  | 
|   7409  |  | 
|   7410   void RemoveAffected(EffectSet effects) { |  | 
|   7411     if (!effects.IsNone()) { |  | 
|   7412       dependent_.Clear(); |  | 
|   7413     } |  | 
|   7414   } |  | 
|   7415  |  | 
|   7416   Instruction* Lookup(Instruction* other) const { |  | 
|   7417     return GetMapFor(other)->Lookup(other); |  | 
|   7418   } |  | 
|   7419  |  | 
|   7420   void Insert(Instruction* instr) { |  | 
|   7421     return GetMapFor(instr)->Insert(instr); |  | 
|   7422   } |  | 
|   7423  |  | 
|   7424  private: |  | 
|   7425   typedef DirectChainedHashMap<PointerKeyValueTrait<Instruction> >  Map; |  | 
|   7426  |  | 
|   7427   Map* GetMapFor(Instruction* instr) { |  | 
|   7428     return instr->Dependencies().IsNone() ? &independent_ : &dependent_; |  | 
|   7429   } |  | 
|   7430  |  | 
|   7431   const Map* GetMapFor(Instruction* instr) const { |  | 
|   7432     return instr->Dependencies().IsNone() ? &independent_ : &dependent_; |  | 
|   7433   } |  | 
|   7434  |  | 
|   7435   // All computations that are not affected by any side-effect. |  | 
|   7436   // Majority of computations are not affected by anything and will be in |  | 
|   7437   // this map. |  | 
|   7438   Map independent_; |  | 
|   7439  |  | 
|   7440   // All computations that are affected by side effect. |  | 
|   7441   Map dependent_; |  | 
|   7442 }; |  | 
|   7443  |  | 
|   7444  |  | 
|   7445 bool DominatorBasedCSE::Optimize(FlowGraph* graph) { |  | 
|   7446   bool changed = false; |  | 
|   7447   if (FLAG_load_cse) { |  | 
|   7448     changed = LoadOptimizer::OptimizeGraph(graph) || changed; |  | 
|   7449   } |  | 
|   7450  |  | 
|   7451   CSEInstructionMap map; |  | 
|   7452   changed = OptimizeRecursive(graph, graph->graph_entry(), &map) || changed; |  | 
|   7453  |  | 
|   7454   return changed; |  | 
|   7455 } |  | 
|   7456  |  | 
|   7457  |  | 
|   7458 bool DominatorBasedCSE::OptimizeRecursive( |  | 
|   7459     FlowGraph* graph, |  | 
|   7460     BlockEntryInstr* block, |  | 
|   7461     CSEInstructionMap* map) { |  | 
|   7462   bool changed = false; |  | 
|   7463   for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |  | 
|   7464     Instruction* current = it.Current(); |  | 
|   7465     if (current->AllowsCSE()) { |  | 
|   7466       Instruction* replacement = map->Lookup(current); |  | 
|   7467       if ((replacement != NULL) && |  | 
|   7468           graph->block_effects()->IsAvailableAt(replacement, block)) { |  | 
|   7469         // Replace current with lookup result. |  | 
|   7470         ReplaceCurrentInstruction(&it, current, replacement, graph); |  | 
|   7471         changed = true; |  | 
|   7472         continue; |  | 
|   7473       } |  | 
|   7474  |  | 
|   7475       // For simplicity we assume that instruction either does not depend on |  | 
|   7476       // anything or does not affect anything. If this is not the case then |  | 
|   7477       // we should first remove affected instructions from the map and |  | 
|   7478       // then add instruction to the map so that it does not kill itself. |  | 
|   7479       ASSERT(current->Effects().IsNone() || current->Dependencies().IsNone()); |  | 
|   7480       map->Insert(current); |  | 
|   7481     } |  | 
|   7482  |  | 
|   7483     map->RemoveAffected(current->Effects()); |  | 
|   7484   } |  | 
|   7485  |  | 
|   7486   // Process children in the dominator tree recursively. |  | 
|   7487   intptr_t num_children = block->dominated_blocks().length(); |  | 
|   7488   for (intptr_t i = 0; i < num_children; ++i) { |  | 
|   7489     BlockEntryInstr* child = block->dominated_blocks()[i]; |  | 
|   7490     if (i  < num_children - 1) { |  | 
|   7491       // Copy map. |  | 
|   7492       CSEInstructionMap child_map(*map); |  | 
|   7493       changed = OptimizeRecursive(graph, child, &child_map) || changed; |  | 
|   7494     } else { |  | 
|   7495       // Reuse map for the last child. |  | 
|   7496       changed = OptimizeRecursive(graph, child, map) || changed; |  | 
|   7497     } |  | 
|   7498   } |  | 
|   7499   return changed; |  | 
|   7500 } |  | 
|   7501  |  | 
|   7502  |     25  | 
|   7503 ConstantPropagator::ConstantPropagator( |     26 ConstantPropagator::ConstantPropagator( | 
|   7504     FlowGraph* graph, |     27     FlowGraph* graph, | 
|   7505     const GrowableArray<BlockEntryInstr*>& ignored) |     28     const GrowableArray<BlockEntryInstr*>& ignored) | 
|   7506     : FlowGraphVisitor(ignored), |     29     : FlowGraphVisitor(ignored), | 
|   7507       graph_(graph), |     30       graph_(graph), | 
|   7508       unknown_(Object::unknown_constant()), |     31       unknown_(Object::unknown_constant()), | 
|   7509       non_constant_(Object::non_constant()), |     32       non_constant_(Object::non_constant()), | 
|   7510       reachable_(new(graph->isolate()) BitVector( |     33       reachable_(new(graph->isolate()) BitVector( | 
|   7511           graph->isolate(), graph->preorder().length())), |     34           graph->isolate(), graph->preorder().length())), | 
| (...skipping 683 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   8195 void ConstantPropagator::VisitInstanceOf(InstanceOfInstr* instr) { |    718 void ConstantPropagator::VisitInstanceOf(InstanceOfInstr* instr) { | 
|   8196   const Definition* def = instr->value()->definition(); |    719   const Definition* def = instr->value()->definition(); | 
|   8197   const Object& value = def->constant_value(); |    720   const Object& value = def->constant_value(); | 
|   8198   if (IsNonConstant(value)) { |    721   if (IsNonConstant(value)) { | 
|   8199     const AbstractType& checked_type = instr->type(); |    722     const AbstractType& checked_type = instr->type(); | 
|   8200     intptr_t value_cid = instr->value()->Type()->ToCid(); |    723     intptr_t value_cid = instr->value()->Type()->ToCid(); | 
|   8201     Representation rep = def->representation(); |    724     Representation rep = def->representation(); | 
|   8202     if ((checked_type.IsFloat32x4Type() && (rep == kUnboxedFloat32x4)) || |    725     if ((checked_type.IsFloat32x4Type() && (rep == kUnboxedFloat32x4)) || | 
|   8203         (checked_type.IsInt32x4Type() && (rep == kUnboxedInt32x4)) || |    726         (checked_type.IsInt32x4Type() && (rep == kUnboxedInt32x4)) || | 
|   8204         (checked_type.IsDoubleType() && (rep == kUnboxedDouble) && |    727         (checked_type.IsDoubleType() && (rep == kUnboxedDouble) && | 
|   8205          CanUnboxDouble()) || |    728          FlowGraphCompiler::SupportsUnboxedDoubles()) || | 
|   8206         (checked_type.IsIntType() && (rep == kUnboxedMint))) { |    729         (checked_type.IsIntType() && (rep == kUnboxedMint))) { | 
|   8207       // Ensure that compile time type matches representation. |    730       // Ensure that compile time type matches representation. | 
|   8208       ASSERT(((rep == kUnboxedFloat32x4) && (value_cid == kFloat32x4Cid)) || |    731       ASSERT(((rep == kUnboxedFloat32x4) && (value_cid == kFloat32x4Cid)) || | 
|   8209              ((rep == kUnboxedInt32x4) && (value_cid == kInt32x4Cid)) || |    732              ((rep == kUnboxedInt32x4) && (value_cid == kInt32x4Cid)) || | 
|   8210              ((rep == kUnboxedDouble) && (value_cid == kDoubleCid)) || |    733              ((rep == kUnboxedDouble) && (value_cid == kDoubleCid)) || | 
|   8211              ((rep == kUnboxedMint) && (value_cid == kMintCid))); |    734              ((rep == kUnboxedMint) && (value_cid == kMintCid))); | 
|   8212       // The representation guarantees the type check to be true. |    735       // The representation guarantees the type check to be true. | 
|   8213       SetValue(instr, instr->negate_result() ? Bool::False() : Bool::True()); |    736       SetValue(instr, instr->negate_result() ? Bool::False() : Bool::True()); | 
|   8214     } else { |    737     } else { | 
|   8215       SetValue(instr, non_constant_); |    738       SetValue(instr, non_constant_); | 
| (...skipping 889 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   9105   GrowableArray<BitVector*> dominance_frontier; |   1628   GrowableArray<BitVector*> dominance_frontier; | 
|   9106   graph_->ComputeDominators(&dominance_frontier); |   1629   graph_->ComputeDominators(&dominance_frontier); | 
|   9107  |   1630  | 
|   9108   if (FLAG_trace_constant_propagation) { |   1631   if (FLAG_trace_constant_propagation) { | 
|   9109     OS::Print("\n==== After constant propagation ====\n"); |   1632     OS::Print("\n==== After constant propagation ====\n"); | 
|   9110     FlowGraphPrinter printer(*graph_); |   1633     FlowGraphPrinter printer(*graph_); | 
|   9111     printer.PrintBlocks(); |   1634     printer.PrintBlocks(); | 
|   9112   } |   1635   } | 
|   9113 } |   1636 } | 
|   9114  |   1637  | 
|   9115  |  | 
|   9116 // Returns true if the given phi has a single input use and |  | 
|   9117 // is used in the environments either at the corresponding block entry or |  | 
|   9118 // at the same instruction where input use is. |  | 
|   9119 static bool PhiHasSingleUse(PhiInstr* phi, Value* use) { |  | 
|   9120   if ((use->next_use() != NULL) || (phi->input_use_list() != use)) { |  | 
|   9121     return false; |  | 
|   9122   } |  | 
|   9123  |  | 
|   9124   BlockEntryInstr* block = phi->block(); |  | 
|   9125   for (Value* env_use = phi->env_use_list(); |  | 
|   9126        env_use != NULL; |  | 
|   9127        env_use = env_use->next_use()) { |  | 
|   9128     if ((env_use->instruction() != block) && |  | 
|   9129         (env_use->instruction() != use->instruction())) { |  | 
|   9130       return false; |  | 
|   9131     } |  | 
|   9132   } |  | 
|   9133  |  | 
|   9134   return true; |  | 
|   9135 } |  | 
|   9136  |  | 
|   9137  |  | 
|   9138 bool BranchSimplifier::Match(JoinEntryInstr* block) { |  | 
|   9139   // Match the pattern of a branch on a comparison whose left operand is a |  | 
|   9140   // phi from the same block, and whose right operand is a constant. |  | 
|   9141   // |  | 
|   9142   //   Branch(Comparison(kind, Phi, Constant)) |  | 
|   9143   // |  | 
|   9144   // These are the branches produced by inlining in a test context.  Also, |  | 
|   9145   // the phi has no other uses so they can simply be eliminated.  The block |  | 
|   9146   // has no other phis and no instructions intervening between the phi and |  | 
|   9147   // branch so the block can simply be eliminated. |  | 
|   9148   BranchInstr* branch = block->last_instruction()->AsBranch(); |  | 
|   9149   ASSERT(branch != NULL); |  | 
|   9150   ComparisonInstr* comparison = branch->comparison(); |  | 
|   9151   Value* left = comparison->left(); |  | 
|   9152   PhiInstr* phi = left->definition()->AsPhi(); |  | 
|   9153   Value* right = comparison->right(); |  | 
|   9154   ConstantInstr* constant = |  | 
|   9155       (right == NULL) ? NULL : right->definition()->AsConstant(); |  | 
|   9156   return (phi != NULL) && |  | 
|   9157       (constant != NULL) && |  | 
|   9158       (phi->GetBlock() == block) && |  | 
|   9159       PhiHasSingleUse(phi, left) && |  | 
|   9160       (block->next() == branch) && |  | 
|   9161       (block->phis()->length() == 1); |  | 
|   9162 } |  | 
|   9163  |  | 
|   9164  |  | 
|   9165 JoinEntryInstr* BranchSimplifier::ToJoinEntry(Isolate* isolate, |  | 
|   9166                                               TargetEntryInstr* target) { |  | 
|   9167   // Convert a target block into a join block.  Branches will be duplicated |  | 
|   9168   // so the former true and false targets become joins of the control flows |  | 
|   9169   // from all the duplicated branches. |  | 
|   9170   JoinEntryInstr* join = |  | 
|   9171       new(isolate) JoinEntryInstr(target->block_id(), target->try_index()); |  | 
|   9172   join->InheritDeoptTarget(isolate, target); |  | 
|   9173   join->LinkTo(target->next()); |  | 
|   9174   join->set_last_instruction(target->last_instruction()); |  | 
|   9175   target->UnuseAllInputs(); |  | 
|   9176   return join; |  | 
|   9177 } |  | 
|   9178  |  | 
|   9179  |  | 
|   9180 BranchInstr* BranchSimplifier::CloneBranch(Isolate* isolate, |  | 
|   9181                                            BranchInstr* branch, |  | 
|   9182                                            Value* new_left, |  | 
|   9183                                            Value* new_right) { |  | 
|   9184   ComparisonInstr* comparison = branch->comparison(); |  | 
|   9185   ComparisonInstr* new_comparison = |  | 
|   9186       comparison->CopyWithNewOperands(new_left, new_right); |  | 
|   9187   BranchInstr* new_branch = new(isolate) BranchInstr(new_comparison); |  | 
|   9188   new_branch->set_is_checked(branch->is_checked()); |  | 
|   9189   return new_branch; |  | 
|   9190 } |  | 
|   9191  |  | 
|   9192  |  | 
|   9193 void BranchSimplifier::Simplify(FlowGraph* flow_graph) { |  | 
|   9194   // Optimize some branches that test the value of a phi.  When it is safe |  | 
|   9195   // to do so, push the branch to each of the predecessor blocks.  This is |  | 
|   9196   // an optimization when (a) it can avoid materializing a boolean object at |  | 
|   9197   // the phi only to test its value, and (b) it can expose opportunities for |  | 
|   9198   // constant propagation and unreachable code elimination.  This |  | 
|   9199   // optimization is intended to run after inlining which creates |  | 
|   9200   // opportunities for optimization (a) and before constant folding which |  | 
|   9201   // can perform optimization (b). |  | 
|   9202  |  | 
|   9203   // Begin with a worklist of join blocks ending in branches.  They are |  | 
|   9204   // candidates for the pattern below. |  | 
|   9205   Isolate* isolate = flow_graph->isolate(); |  | 
|   9206   const GrowableArray<BlockEntryInstr*>& postorder = flow_graph->postorder(); |  | 
|   9207   GrowableArray<BlockEntryInstr*> worklist(postorder.length()); |  | 
|   9208   for (BlockIterator it(postorder); !it.Done(); it.Advance()) { |  | 
|   9209     BlockEntryInstr* block = it.Current(); |  | 
|   9210     if (block->IsJoinEntry() && block->last_instruction()->IsBranch()) { |  | 
|   9211       worklist.Add(block); |  | 
|   9212     } |  | 
|   9213   } |  | 
|   9214  |  | 
|   9215   // Rewrite until no more instance of the pattern exists. |  | 
|   9216   bool changed = false; |  | 
|   9217   while (!worklist.is_empty()) { |  | 
|   9218     // All blocks in the worklist are join blocks (ending with a branch). |  | 
|   9219     JoinEntryInstr* block = worklist.RemoveLast()->AsJoinEntry(); |  | 
|   9220     ASSERT(block != NULL); |  | 
|   9221  |  | 
|   9222     if (Match(block)) { |  | 
|   9223       changed = true; |  | 
|   9224  |  | 
|   9225       // The branch will be copied and pushed to all the join's |  | 
|   9226       // predecessors.  Convert the true and false target blocks into join |  | 
|   9227       // blocks to join the control flows from all of the true |  | 
|   9228       // (respectively, false) targets of the copied branches. |  | 
|   9229       // |  | 
|   9230       // The converted join block will have no phis, so it cannot be another |  | 
|   9231       // instance of the pattern.  There is thus no need to add it to the |  | 
|   9232       // worklist. |  | 
|   9233       BranchInstr* branch = block->last_instruction()->AsBranch(); |  | 
|   9234       ASSERT(branch != NULL); |  | 
|   9235       JoinEntryInstr* join_true = |  | 
|   9236           ToJoinEntry(isolate, branch->true_successor()); |  | 
|   9237       JoinEntryInstr* join_false = |  | 
|   9238           ToJoinEntry(isolate, branch->false_successor()); |  | 
|   9239  |  | 
|   9240       ComparisonInstr* comparison = branch->comparison(); |  | 
|   9241       PhiInstr* phi = comparison->left()->definition()->AsPhi(); |  | 
|   9242       ConstantInstr* constant = comparison->right()->definition()->AsConstant(); |  | 
|   9243       ASSERT(constant != NULL); |  | 
|   9244       // Copy the constant and branch and push it to all the predecessors. |  | 
|   9245       for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) { |  | 
|   9246         GotoInstr* old_goto = |  | 
|   9247             block->PredecessorAt(i)->last_instruction()->AsGoto(); |  | 
|   9248         ASSERT(old_goto != NULL); |  | 
|   9249  |  | 
|   9250         // Replace the goto in each predecessor with a rewritten branch, |  | 
|   9251         // rewritten to use the corresponding phi input instead of the phi. |  | 
|   9252         Value* new_left = phi->InputAt(i)->Copy(isolate); |  | 
|   9253         Value* new_right = new(isolate) Value(constant); |  | 
|   9254         BranchInstr* new_branch = |  | 
|   9255             CloneBranch(isolate, branch, new_left, new_right); |  | 
|   9256         if (branch->env() == NULL) { |  | 
|   9257           new_branch->InheritDeoptTarget(isolate, old_goto); |  | 
|   9258         } else { |  | 
|   9259           // Take the environment from the branch if it has one. |  | 
|   9260           new_branch->InheritDeoptTarget(isolate, branch); |  | 
|   9261           // InheritDeoptTarget gave the new branch's comparison the same |  | 
|   9262           // deopt id that it gave the new branch.  The id should be the |  | 
|   9263           // deopt id of the original comparison. |  | 
|   9264           new_branch->comparison()->SetDeoptId(*comparison); |  | 
|   9265           // The phi can be used in the branch's environment.  Rename such |  | 
|   9266           // uses. |  | 
|   9267           for (Environment::DeepIterator it(new_branch->env()); |  | 
|   9268                !it.Done(); |  | 
|   9269                it.Advance()) { |  | 
|   9270             Value* use = it.CurrentValue(); |  | 
|   9271             if (use->definition() == phi) { |  | 
|   9272               Definition* replacement = phi->InputAt(i)->definition(); |  | 
|   9273               use->RemoveFromUseList(); |  | 
|   9274               use->set_definition(replacement); |  | 
|   9275               replacement->AddEnvUse(use); |  | 
|   9276             } |  | 
|   9277           } |  | 
|   9278         } |  | 
|   9279  |  | 
|   9280         new_branch->InsertBefore(old_goto); |  | 
|   9281         new_branch->set_next(NULL);  // Detaching the goto from the graph. |  | 
|   9282         old_goto->UnuseAllInputs(); |  | 
|   9283  |  | 
|   9284         // Update the predecessor block.  We may have created another |  | 
|   9285         // instance of the pattern so add it to the worklist if necessary. |  | 
|   9286         BlockEntryInstr* branch_block = new_branch->GetBlock(); |  | 
|   9287         branch_block->set_last_instruction(new_branch); |  | 
|   9288         if (branch_block->IsJoinEntry()) worklist.Add(branch_block); |  | 
|   9289  |  | 
|   9290         // Connect the branch to the true and false joins, via empty target |  | 
|   9291         // blocks. |  | 
|   9292         TargetEntryInstr* true_target = |  | 
|   9293             new(isolate) TargetEntryInstr(flow_graph->max_block_id() + 1, |  | 
|   9294                                           block->try_index()); |  | 
|   9295         true_target->InheritDeoptTarget(isolate, join_true); |  | 
|   9296         TargetEntryInstr* false_target = |  | 
|   9297             new(isolate) TargetEntryInstr(flow_graph->max_block_id() + 2, |  | 
|   9298                                           block->try_index()); |  | 
|   9299         false_target->InheritDeoptTarget(isolate, join_false); |  | 
|   9300         flow_graph->set_max_block_id(flow_graph->max_block_id() + 2); |  | 
|   9301         *new_branch->true_successor_address() = true_target; |  | 
|   9302         *new_branch->false_successor_address() = false_target; |  | 
|   9303         GotoInstr* goto_true = new(isolate) GotoInstr(join_true); |  | 
|   9304         goto_true->InheritDeoptTarget(isolate, join_true); |  | 
|   9305         true_target->LinkTo(goto_true); |  | 
|   9306         true_target->set_last_instruction(goto_true); |  | 
|   9307         GotoInstr* goto_false = new(isolate) GotoInstr(join_false); |  | 
|   9308         goto_false->InheritDeoptTarget(isolate, join_false); |  | 
|   9309         false_target->LinkTo(goto_false); |  | 
|   9310         false_target->set_last_instruction(goto_false); |  | 
|   9311       } |  | 
|   9312       // When all predecessors have been rewritten, the original block is |  | 
|   9313       // unreachable from the graph. |  | 
|   9314       phi->UnuseAllInputs(); |  | 
|   9315       branch->UnuseAllInputs(); |  | 
|   9316       block->UnuseAllInputs(); |  | 
|   9317       ASSERT(!phi->HasUses()); |  | 
|   9318     } |  | 
|   9319   } |  | 
|   9320  |  | 
|   9321   if (changed) { |  | 
|   9322     // We may have changed the block order and the dominator tree. |  | 
|   9323     flow_graph->DiscoverBlocks(); |  | 
|   9324     GrowableArray<BitVector*> dominance_frontier; |  | 
|   9325     flow_graph->ComputeDominators(&dominance_frontier); |  | 
|   9326   } |  | 
|   9327 } |  | 
|   9328  |  | 
|   9329  |  | 
|   9330 static bool IsTrivialBlock(BlockEntryInstr* block, Definition* defn) { |  | 
|   9331   return (block->IsTargetEntry() && (block->PredecessorCount() == 1)) && |  | 
|   9332     ((block->next() == block->last_instruction()) || |  | 
|   9333      ((block->next() == defn) && (defn->next() == block->last_instruction()))); |  | 
|   9334 } |  | 
|   9335  |  | 
|   9336  |  | 
|   9337 static void EliminateTrivialBlock(BlockEntryInstr* block, |  | 
|   9338                                   Definition* instr, |  | 
|   9339                                   IfThenElseInstr* before) { |  | 
|   9340   block->UnuseAllInputs(); |  | 
|   9341   block->last_instruction()->UnuseAllInputs(); |  | 
|   9342  |  | 
|   9343   if ((block->next() == instr) && |  | 
|   9344       (instr->next() == block->last_instruction())) { |  | 
|   9345     before->previous()->LinkTo(instr); |  | 
|   9346     instr->LinkTo(before); |  | 
|   9347   } |  | 
|   9348 } |  | 
|   9349  |  | 
|   9350  |  | 
|   9351 void IfConverter::Simplify(FlowGraph* flow_graph) { |  | 
|   9352   Isolate* isolate = flow_graph->isolate(); |  | 
|   9353   bool changed = false; |  | 
|   9354  |  | 
|   9355   const GrowableArray<BlockEntryInstr*>& postorder = flow_graph->postorder(); |  | 
|   9356   for (BlockIterator it(postorder); !it.Done(); it.Advance()) { |  | 
|   9357     BlockEntryInstr* block = it.Current(); |  | 
|   9358     JoinEntryInstr* join = block->AsJoinEntry(); |  | 
|   9359  |  | 
|   9360     // Detect diamond control flow pattern which materializes a value depending |  | 
|   9361     // on the result of the comparison: |  | 
|   9362     // |  | 
|   9363     // B_pred: |  | 
|   9364     //   ... |  | 
|   9365     //   Branch if COMP goto (B_pred1, B_pred2) |  | 
|   9366     // B_pred1: -- trivial block that contains at most one definition |  | 
|   9367     //   v1 = Constant(...) |  | 
|   9368     //   goto B_block |  | 
|   9369     // B_pred2: -- trivial block that contains at most one definition |  | 
|   9370     //   v2 = Constant(...) |  | 
|   9371     //   goto B_block |  | 
|   9372     // B_block: |  | 
|   9373     //   v3 = phi(v1, v2) -- single phi |  | 
|   9374     // |  | 
|   9375     // and replace it with |  | 
|   9376     // |  | 
|   9377     // Ba: |  | 
|   9378     //   v3 = IfThenElse(COMP ? v1 : v2) |  | 
|   9379     // |  | 
|   9380     if ((join != NULL) && |  | 
|   9381         (join->phis() != NULL) && |  | 
|   9382         (join->phis()->length() == 1) && |  | 
|   9383         (block->PredecessorCount() == 2)) { |  | 
|   9384       BlockEntryInstr* pred1 = block->PredecessorAt(0); |  | 
|   9385       BlockEntryInstr* pred2 = block->PredecessorAt(1); |  | 
|   9386  |  | 
|   9387       PhiInstr* phi = (*join->phis())[0]; |  | 
|   9388       Value* v1 = phi->InputAt(0); |  | 
|   9389       Value* v2 = phi->InputAt(1); |  | 
|   9390  |  | 
|   9391       if (IsTrivialBlock(pred1, v1->definition()) && |  | 
|   9392           IsTrivialBlock(pred2, v2->definition()) && |  | 
|   9393           (pred1->PredecessorAt(0) == pred2->PredecessorAt(0))) { |  | 
|   9394         BlockEntryInstr* pred = pred1->PredecessorAt(0); |  | 
|   9395         BranchInstr* branch = pred->last_instruction()->AsBranch(); |  | 
|   9396         ComparisonInstr* comparison = branch->comparison(); |  | 
|   9397  |  | 
|   9398         // Check if the platform supports efficient branchless IfThenElseInstr |  | 
|   9399         // for the given combination of comparison and values flowing from |  | 
|   9400         // false and true paths. |  | 
|   9401         if (IfThenElseInstr::Supports(comparison, v1, v2)) { |  | 
|   9402           Value* if_true = (pred1 == branch->true_successor()) ? v1 : v2; |  | 
|   9403           Value* if_false = (pred2 == branch->true_successor()) ? v1 : v2; |  | 
|   9404  |  | 
|   9405           ComparisonInstr* new_comparison = |  | 
|   9406               comparison->CopyWithNewOperands( |  | 
|   9407                   comparison->left()->Copy(isolate), |  | 
|   9408                   comparison->right()->Copy(isolate)); |  | 
|   9409           IfThenElseInstr* if_then_else = new(isolate) IfThenElseInstr( |  | 
|   9410               new_comparison, |  | 
|   9411               if_true->Copy(isolate), |  | 
|   9412               if_false->Copy(isolate)); |  | 
|   9413           flow_graph->InsertBefore(branch, |  | 
|   9414                                    if_then_else, |  | 
|   9415                                    NULL, |  | 
|   9416                                    FlowGraph::kValue); |  | 
|   9417  |  | 
|   9418           phi->ReplaceUsesWith(if_then_else); |  | 
|   9419  |  | 
|   9420           // Connect IfThenElseInstr to the first instruction in the merge block |  | 
|   9421           // effectively eliminating diamond control flow. |  | 
|   9422           // Current block as well as pred1 and pred2 blocks are no longer in |  | 
|   9423           // the graph at this point. |  | 
|   9424           if_then_else->LinkTo(join->next()); |  | 
|   9425           pred->set_last_instruction(join->last_instruction()); |  | 
|   9426  |  | 
|   9427           // Resulting block must inherit block id from the eliminated current |  | 
|   9428           // block to guarantee that ordering of phi operands in its successor |  | 
|   9429           // stays consistent. |  | 
|   9430           pred->set_block_id(block->block_id()); |  | 
|   9431  |  | 
|   9432           // If v1 and v2 were defined inside eliminated blocks pred1/pred2 |  | 
|   9433           // move them out to the place before inserted IfThenElse instruction. |  | 
|   9434           EliminateTrivialBlock(pred1, v1->definition(), if_then_else); |  | 
|   9435           EliminateTrivialBlock(pred2, v2->definition(), if_then_else); |  | 
|   9436  |  | 
|   9437           // Update use lists to reflect changes in the graph. |  | 
|   9438           phi->UnuseAllInputs(); |  | 
|   9439           branch->UnuseAllInputs(); |  | 
|   9440           block->UnuseAllInputs(); |  | 
|   9441  |  | 
|   9442           // The graph has changed. Recompute dominators and block orders after |  | 
|   9443           // this pass is finished. |  | 
|   9444           changed = true; |  | 
|   9445         } |  | 
|   9446       } |  | 
|   9447     } |  | 
|   9448   } |  | 
|   9449  |  | 
|   9450   if (changed) { |  | 
|   9451     // We may have changed the block order and the dominator tree. |  | 
|   9452     flow_graph->DiscoverBlocks(); |  | 
|   9453     GrowableArray<BitVector*> dominance_frontier; |  | 
|   9454     flow_graph->ComputeDominators(&dominance_frontier); |  | 
|   9455   } |  | 
|   9456 } |  | 
|   9457  |  | 
|   9458  |  | 
|   9459 void FlowGraphOptimizer::EliminateEnvironments() { |  | 
|   9460   // After this pass we can no longer perform LICM and hoist instructions |  | 
|   9461   // that can deoptimize. |  | 
|   9462  |  | 
|   9463   flow_graph_->disallow_licm(); |  | 
|   9464   for (intptr_t i = 0; i < block_order_.length(); ++i) { |  | 
|   9465     BlockEntryInstr* block = block_order_[i]; |  | 
|   9466     block->RemoveEnvironment(); |  | 
|   9467     for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |  | 
|   9468       Instruction* current = it.Current(); |  | 
|   9469       if (!current->CanDeoptimize()) { |  | 
|   9470         // TODO(srdjan): --source-lines needs deopt environments to get at |  | 
|   9471         // the code for this instruction, however, leaving the environment |  | 
|   9472         // changes code. |  | 
|   9473         current->RemoveEnvironment(); |  | 
|   9474       } |  | 
|   9475     } |  | 
|   9476   } |  | 
|   9477 } |  | 
|   9478  |  | 
|   9479  |  | 
|   9480 enum SafeUseCheck { kOptimisticCheck, kStrictCheck }; |  | 
|   9481  |  | 
|   9482 // Check if the use is safe for allocation sinking. Allocation sinking |  | 
|   9483 // candidates can only be used at store instructions: |  | 
|   9484 // |  | 
|   9485 //     - any store into the allocation candidate itself is unconditionally safe |  | 
|   9486 //       as it just changes the rematerialization state of this candidate; |  | 
|   9487 //     - store into another object is only safe if another object is allocation |  | 
|   9488 //       candidate. |  | 
|   9489 // |  | 
|   9490 // We use a simple fix-point algorithm to discover the set of valid candidates |  | 
|   9491 // (see CollectCandidates method), that's why this IsSafeUse can operate in two |  | 
|   9492 // modes: |  | 
|   9493 // |  | 
|   9494 //     - optimistic, when every allocation is assumed to be an allocation |  | 
|   9495 //       sinking candidate; |  | 
|   9496 //     - strict, when only marked allocations are assumed to be allocation |  | 
|   9497 //       sinking candidates. |  | 
|   9498 // |  | 
|   9499 // Fix-point algorithm in CollectCandiates first collects a set of allocations |  | 
|   9500 // optimistically and then checks each collected candidate strictly and unmarks |  | 
|   9501 // invalid candidates transitively until only strictly valid ones remain. |  | 
|   9502 static bool IsSafeUse(Value* use, SafeUseCheck check_type) { |  | 
|   9503   if (use->instruction()->IsMaterializeObject()) { |  | 
|   9504     return true; |  | 
|   9505   } |  | 
|   9506  |  | 
|   9507   StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |  | 
|   9508   if (store != NULL) { |  | 
|   9509     if (use == store->value()) { |  | 
|   9510       Definition* instance = store->instance()->definition(); |  | 
|   9511       return instance->IsAllocateObject() && |  | 
|   9512           ((check_type == kOptimisticCheck) || |  | 
|   9513            instance->Identity().IsAllocationSinkingCandidate()); |  | 
|   9514     } |  | 
|   9515     return true; |  | 
|   9516   } |  | 
|   9517  |  | 
|   9518   return false; |  | 
|   9519 } |  | 
|   9520  |  | 
|   9521  |  | 
|   9522 // Right now we are attempting to sink allocation only into |  | 
|   9523 // deoptimization exit. So candidate should only be used in StoreInstanceField |  | 
|   9524 // instructions that write into fields of the allocated object. |  | 
|   9525 // We do not support materialization of the object that has type arguments. |  | 
|   9526 static bool IsAllocationSinkingCandidate(Definition* alloc, |  | 
|   9527                                          SafeUseCheck check_type) { |  | 
|   9528   for (Value* use = alloc->input_use_list(); |  | 
|   9529        use != NULL; |  | 
|   9530        use = use->next_use()) { |  | 
|   9531     if (!IsSafeUse(use, check_type)) { |  | 
|   9532       if (FLAG_trace_optimization) { |  | 
|   9533         OS::Print("use of %s at %s is unsafe for allocation sinking\n", |  | 
|   9534                   alloc->ToCString(), |  | 
|   9535                   use->instruction()->ToCString()); |  | 
|   9536       } |  | 
|   9537       return false; |  | 
|   9538     } |  | 
|   9539   } |  | 
|   9540  |  | 
|   9541   return true; |  | 
|   9542 } |  | 
|   9543  |  | 
|   9544  |  | 
|   9545 // If the given use is a store into an object then return an object we are |  | 
|   9546 // storing into. |  | 
|   9547 static Definition* StoreInto(Value* use) { |  | 
|   9548   StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |  | 
|   9549   if (store != NULL) { |  | 
|   9550     return store->instance()->definition(); |  | 
|   9551   } |  | 
|   9552  |  | 
|   9553   return NULL; |  | 
|   9554 } |  | 
|   9555  |  | 
|   9556  |  | 
|   9557 // Remove the given allocation from the graph. It is not observable. |  | 
|   9558 // If deoptimization occurs the object will be materialized. |  | 
|   9559 void AllocationSinking::EliminateAllocation(Definition* alloc) { |  | 
|   9560   ASSERT(IsAllocationSinkingCandidate(alloc, kStrictCheck)); |  | 
|   9561  |  | 
|   9562   if (FLAG_trace_optimization) { |  | 
|   9563     OS::Print("removing allocation from the graph: v%" Pd "\n", |  | 
|   9564               alloc->ssa_temp_index()); |  | 
|   9565   } |  | 
|   9566  |  | 
|   9567   // As an allocation sinking candidate it is only used in stores to its own |  | 
|   9568   // fields. Remove these stores. |  | 
|   9569   for (Value* use = alloc->input_use_list(); |  | 
|   9570        use != NULL; |  | 
|   9571        use = alloc->input_use_list()) { |  | 
|   9572     use->instruction()->RemoveFromGraph(); |  | 
|   9573   } |  | 
|   9574  |  | 
|   9575   // There should be no environment uses. The pass replaced them with |  | 
|   9576   // MaterializeObject instructions. |  | 
|   9577 #ifdef DEBUG |  | 
|   9578   for (Value* use = alloc->env_use_list(); |  | 
|   9579        use != NULL; |  | 
|   9580        use = use->next_use()) { |  | 
|   9581     ASSERT(use->instruction()->IsMaterializeObject()); |  | 
|   9582   } |  | 
|   9583 #endif |  | 
|   9584   ASSERT(alloc->input_use_list() == NULL); |  | 
|   9585   alloc->RemoveFromGraph(); |  | 
|   9586   if (alloc->ArgumentCount() > 0) { |  | 
|   9587     ASSERT(alloc->ArgumentCount() == 1); |  | 
|   9588     for (intptr_t i = 0; i < alloc->ArgumentCount(); ++i) { |  | 
|   9589       alloc->PushArgumentAt(i)->RemoveFromGraph(); |  | 
|   9590     } |  | 
|   9591   } |  | 
|   9592 } |  | 
|   9593  |  | 
|   9594  |  | 
|   9595 // Find allocation instructions that can be potentially eliminated and |  | 
|   9596 // rematerialized at deoptimization exits if needed. See IsSafeUse |  | 
|   9597 // for the description of algorithm used below. |  | 
|   9598 void AllocationSinking::CollectCandidates() { |  | 
|   9599   // Optimistically collect all potential candidates. |  | 
|   9600   for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |  | 
|   9601        !block_it.Done(); |  | 
|   9602        block_it.Advance()) { |  | 
|   9603     BlockEntryInstr* block = block_it.Current(); |  | 
|   9604     for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |  | 
|   9605       { AllocateObjectInstr* alloc = it.Current()->AsAllocateObject(); |  | 
|   9606         if ((alloc != NULL) && |  | 
|   9607             IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) { |  | 
|   9608           alloc->SetIdentity(AliasIdentity::AllocationSinkingCandidate()); |  | 
|   9609           candidates_.Add(alloc); |  | 
|   9610         } |  | 
|   9611       } |  | 
|   9612       { AllocateUninitializedContextInstr* alloc = |  | 
|   9613             it.Current()->AsAllocateUninitializedContext(); |  | 
|   9614         if ((alloc != NULL) && |  | 
|   9615             IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) { |  | 
|   9616           alloc->SetIdentity(AliasIdentity::AllocationSinkingCandidate()); |  | 
|   9617           candidates_.Add(alloc); |  | 
|   9618         } |  | 
|   9619       } |  | 
|   9620     } |  | 
|   9621   } |  | 
|   9622  |  | 
|   9623   // Transitively unmark all candidates that are not strictly valid. |  | 
|   9624   bool changed; |  | 
|   9625   do { |  | 
|   9626     changed = false; |  | 
|   9627     for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9628       Definition* alloc = candidates_[i]; |  | 
|   9629       if (alloc->Identity().IsAllocationSinkingCandidate()) { |  | 
|   9630         if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) { |  | 
|   9631           alloc->SetIdentity(AliasIdentity::Unknown()); |  | 
|   9632           changed = true; |  | 
|   9633         } |  | 
|   9634       } |  | 
|   9635     } |  | 
|   9636   } while (changed); |  | 
|   9637  |  | 
|   9638   // Shrink the list of candidates removing all unmarked ones. |  | 
|   9639   intptr_t j = 0; |  | 
|   9640   for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9641     Definition* alloc = candidates_[i]; |  | 
|   9642     if (alloc->Identity().IsAllocationSinkingCandidate()) { |  | 
|   9643       if (FLAG_trace_optimization) { |  | 
|   9644         OS::Print("discovered allocation sinking candidate: v%" Pd "\n", |  | 
|   9645                   alloc->ssa_temp_index()); |  | 
|   9646       } |  | 
|   9647  |  | 
|   9648       if (j != i) { |  | 
|   9649         candidates_[j] = alloc; |  | 
|   9650       } |  | 
|   9651       j++; |  | 
|   9652     } |  | 
|   9653   } |  | 
|   9654   candidates_.TruncateTo(j); |  | 
|   9655 } |  | 
|   9656  |  | 
|   9657  |  | 
|   9658 // If materialization references an allocation sinking candidate then replace |  | 
|   9659 // this reference with a materialization which should have been computed for |  | 
|   9660 // this side-exit. CollectAllExits should have collected this exit. |  | 
|   9661 void AllocationSinking::NormalizeMaterializations() { |  | 
|   9662   for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9663     Definition* alloc = candidates_[i]; |  | 
|   9664  |  | 
|   9665     Value* next_use; |  | 
|   9666     for (Value* use = alloc->input_use_list(); |  | 
|   9667          use != NULL; |  | 
|   9668          use = next_use) { |  | 
|   9669       next_use = use->next_use(); |  | 
|   9670       if (use->instruction()->IsMaterializeObject()) { |  | 
|   9671         use->BindTo(MaterializationFor(alloc, use->instruction())); |  | 
|   9672       } |  | 
|   9673     } |  | 
|   9674   } |  | 
|   9675 } |  | 
|   9676  |  | 
|   9677  |  | 
|   9678 // We transitively insert materializations at each deoptimization exit that |  | 
|   9679 // might see the given allocation (see ExitsCollector). Some of this |  | 
|   9680 // materializations are not actually used and some fail to compute because |  | 
|   9681 // they are inserted in the block that is not dominated by the allocation. |  | 
|   9682 // Remove them unused materializations from the graph. |  | 
|   9683 void AllocationSinking::RemoveUnusedMaterializations() { |  | 
|   9684   intptr_t j = 0; |  | 
|   9685   for (intptr_t i = 0; i < materializations_.length(); i++) { |  | 
|   9686     MaterializeObjectInstr* mat = materializations_[i]; |  | 
|   9687     if ((mat->input_use_list() == NULL) && (mat->env_use_list() == NULL)) { |  | 
|   9688       // Check if this materialization failed to compute and remove any |  | 
|   9689       // unforwarded loads. There were no loads from any allocation sinking |  | 
|   9690       // candidate in the beggining so it is safe to assume that any encountered |  | 
|   9691       // load was inserted by CreateMaterializationAt. |  | 
|   9692       for (intptr_t i = 0; i < mat->InputCount(); i++) { |  | 
|   9693         LoadFieldInstr* load = mat->InputAt(i)->definition()->AsLoadField(); |  | 
|   9694         if ((load != NULL) && |  | 
|   9695             (load->instance()->definition() == mat->allocation())) { |  | 
|   9696           load->ReplaceUsesWith(flow_graph_->constant_null()); |  | 
|   9697           load->RemoveFromGraph(); |  | 
|   9698         } |  | 
|   9699       } |  | 
|   9700       mat->RemoveFromGraph(); |  | 
|   9701     } else { |  | 
|   9702       if (j != i) { |  | 
|   9703         materializations_[j] = mat; |  | 
|   9704       } |  | 
|   9705       j++; |  | 
|   9706     } |  | 
|   9707   } |  | 
|   9708   materializations_.TruncateTo(j); |  | 
|   9709 } |  | 
|   9710  |  | 
|   9711  |  | 
|   9712 // Some candidates might stop being eligible for allocation sinking after |  | 
|   9713 // the load forwarding because they flow into phis that load forwarding |  | 
|   9714 // inserts. Discover such allocations and remove them from the list |  | 
|   9715 // of allocation sinking candidates undoing all changes that we did |  | 
|   9716 // in preparation for sinking these allocations. |  | 
|   9717 void AllocationSinking::DiscoverFailedCandidates() { |  | 
|   9718   // Transitively unmark all candidates that are not strictly valid. |  | 
|   9719   bool changed; |  | 
|   9720   do { |  | 
|   9721     changed = false; |  | 
|   9722     for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9723       Definition* alloc = candidates_[i]; |  | 
|   9724       if (alloc->Identity().IsAllocationSinkingCandidate()) { |  | 
|   9725         if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) { |  | 
|   9726           alloc->SetIdentity(AliasIdentity::Unknown()); |  | 
|   9727           changed = true; |  | 
|   9728         } |  | 
|   9729       } |  | 
|   9730     } |  | 
|   9731   } while (changed); |  | 
|   9732  |  | 
|   9733   // Remove all failed candidates from the candidates list. |  | 
|   9734   intptr_t j = 0; |  | 
|   9735   for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9736     Definition* alloc = candidates_[i]; |  | 
|   9737     if (!alloc->Identity().IsAllocationSinkingCandidate()) { |  | 
|   9738       if (FLAG_trace_optimization) { |  | 
|   9739         OS::Print("allocation v%" Pd " can't be eliminated\n", |  | 
|   9740                   alloc->ssa_temp_index()); |  | 
|   9741       } |  | 
|   9742  |  | 
|   9743 #ifdef DEBUG |  | 
|   9744       for (Value* use = alloc->env_use_list(); |  | 
|   9745            use != NULL; |  | 
|   9746            use = use->next_use()) { |  | 
|   9747         ASSERT(use->instruction()->IsMaterializeObject()); |  | 
|   9748       } |  | 
|   9749 #endif |  | 
|   9750  |  | 
|   9751       // All materializations will be removed from the graph. Remove inserted |  | 
|   9752       // loads first and detach materializations from allocation's environment |  | 
|   9753       // use list: we will reconstruct it when we start removing |  | 
|   9754       // materializations. |  | 
|   9755       alloc->set_env_use_list(NULL); |  | 
|   9756       for (Value* use = alloc->input_use_list(); |  | 
|   9757            use != NULL; |  | 
|   9758            use = use->next_use()) { |  | 
|   9759         if (use->instruction()->IsLoadField()) { |  | 
|   9760           LoadFieldInstr* load = use->instruction()->AsLoadField(); |  | 
|   9761           load->ReplaceUsesWith(flow_graph_->constant_null()); |  | 
|   9762           load->RemoveFromGraph(); |  | 
|   9763         } else { |  | 
|   9764           ASSERT(use->instruction()->IsMaterializeObject() || |  | 
|   9765                  use->instruction()->IsPhi() || |  | 
|   9766                  use->instruction()->IsStoreInstanceField()); |  | 
|   9767         } |  | 
|   9768       } |  | 
|   9769     } else { |  | 
|   9770       if (j != i) { |  | 
|   9771         candidates_[j] = alloc; |  | 
|   9772       } |  | 
|   9773       j++; |  | 
|   9774     } |  | 
|   9775   } |  | 
|   9776  |  | 
|   9777   if (j != candidates_.length()) {  // Something was removed from candidates. |  | 
|   9778     intptr_t k = 0; |  | 
|   9779     for (intptr_t i = 0; i < materializations_.length(); i++) { |  | 
|   9780       MaterializeObjectInstr* mat = materializations_[i]; |  | 
|   9781       if (!mat->allocation()->Identity().IsAllocationSinkingCandidate()) { |  | 
|   9782         // Restore environment uses of the allocation that were replaced |  | 
|   9783         // by this materialization and drop materialization. |  | 
|   9784         mat->ReplaceUsesWith(mat->allocation()); |  | 
|   9785         mat->RemoveFromGraph(); |  | 
|   9786       } else { |  | 
|   9787         if (k != i) { |  | 
|   9788           materializations_[k] = mat; |  | 
|   9789         } |  | 
|   9790         k++; |  | 
|   9791       } |  | 
|   9792     } |  | 
|   9793     materializations_.TruncateTo(k); |  | 
|   9794   } |  | 
|   9795  |  | 
|   9796   candidates_.TruncateTo(j); |  | 
|   9797 } |  | 
|   9798  |  | 
|   9799  |  | 
|   9800 void AllocationSinking::Optimize() { |  | 
|   9801   CollectCandidates(); |  | 
|   9802  |  | 
|   9803   // Insert MaterializeObject instructions that will describe the state of the |  | 
|   9804   // object at all deoptimization points. Each inserted materialization looks |  | 
|   9805   // like this (where v_0 is allocation that we are going to eliminate): |  | 
|   9806   //   v_1     <- LoadField(v_0, field_1) |  | 
|   9807   //           ... |  | 
|   9808   //   v_N     <- LoadField(v_0, field_N) |  | 
|   9809   //   v_{N+1} <- MaterializeObject(field_1 = v_1, ..., field_N = v_{N}) |  | 
|   9810   for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9811     InsertMaterializations(candidates_[i]); |  | 
|   9812   } |  | 
|   9813  |  | 
|   9814   // Run load forwarding to eliminate LoadField instructions inserted above. |  | 
|   9815   // All loads will be successfully eliminated because: |  | 
|   9816   //   a) they use fields (not offsets) and thus provide precise aliasing |  | 
|   9817   //      information |  | 
|   9818   //   b) candidate does not escape and thus its fields is not affected by |  | 
|   9819   //      external effects from calls. |  | 
|   9820   LoadOptimizer::OptimizeGraph(flow_graph_); |  | 
|   9821  |  | 
|   9822   NormalizeMaterializations(); |  | 
|   9823  |  | 
|   9824   RemoveUnusedMaterializations(); |  | 
|   9825  |  | 
|   9826   // If any candidates are no longer eligible for allocation sinking abort |  | 
|   9827   // the optimization for them and undo any changes we did in preparation. |  | 
|   9828   DiscoverFailedCandidates(); |  | 
|   9829  |  | 
|   9830   // At this point we have computed the state of object at each deoptimization |  | 
|   9831   // point and we can eliminate it. Loads inserted above were forwarded so there |  | 
|   9832   // are no uses of the allocation just as in the begging of the pass. |  | 
|   9833   for (intptr_t i = 0; i < candidates_.length(); i++) { |  | 
|   9834     EliminateAllocation(candidates_[i]); |  | 
|   9835   } |  | 
|   9836  |  | 
|   9837   // Process materializations and unbox their arguments: materializations |  | 
|   9838   // are part of the environment and can materialize boxes for double/mint/simd |  | 
|   9839   // values when needed. |  | 
|   9840   // TODO(vegorov): handle all box types here. |  | 
|   9841   for (intptr_t i = 0; i < materializations_.length(); i++) { |  | 
|   9842     MaterializeObjectInstr* mat = materializations_[i]; |  | 
|   9843     for (intptr_t j = 0; j < mat->InputCount(); j++) { |  | 
|   9844       Definition* defn = mat->InputAt(j)->definition(); |  | 
|   9845       if (defn->IsBox()) { |  | 
|   9846         mat->InputAt(j)->BindTo(defn->InputAt(0)->definition()); |  | 
|   9847       } |  | 
|   9848     } |  | 
|   9849   } |  | 
|   9850 } |  | 
|   9851  |  | 
|   9852  |  | 
|   9853 // Remove materializations from the graph. Register allocator will treat them |  | 
|   9854 // as part of the environment not as a real instruction. |  | 
|   9855 void AllocationSinking::DetachMaterializations() { |  | 
|   9856   for (intptr_t i = 0; i < materializations_.length(); i++) { |  | 
|   9857     materializations_[i]->previous()->LinkTo(materializations_[i]->next()); |  | 
|   9858   } |  | 
|   9859 } |  | 
|   9860  |  | 
|   9861  |  | 
|   9862 // Add a field/offset to the list of fields if it is not yet present there. |  | 
|   9863 static bool AddSlot(ZoneGrowableArray<const Object*>* slots, |  | 
|   9864                     const Object& slot) { |  | 
|   9865   for (intptr_t i = 0; i < slots->length(); i++) { |  | 
|   9866     if ((*slots)[i]->raw() == slot.raw()) { |  | 
|   9867       return false; |  | 
|   9868     } |  | 
|   9869   } |  | 
|   9870   slots->Add(&slot); |  | 
|   9871   return true; |  | 
|   9872 } |  | 
|   9873  |  | 
|   9874  |  | 
|   9875 // Find deoptimization exit for the given materialization assuming that all |  | 
|   9876 // materializations are emitted right before the instruction which is a |  | 
|   9877 // deoptimization exit. |  | 
|   9878 static Instruction* ExitForMaterialization(MaterializeObjectInstr* mat) { |  | 
|   9879   while (mat->next()->IsMaterializeObject()) { |  | 
|   9880     mat = mat->next()->AsMaterializeObject(); |  | 
|   9881   } |  | 
|   9882   return mat->next(); |  | 
|   9883 } |  | 
|   9884  |  | 
|   9885  |  | 
|   9886 // Given the deoptimization exit find first materialization that was inserted |  | 
|   9887 // before it. |  | 
|   9888 static Instruction* FirstMaterializationAt(Instruction* exit) { |  | 
|   9889   while (exit->previous()->IsMaterializeObject()) { |  | 
|   9890     exit = exit->previous(); |  | 
|   9891   } |  | 
|   9892   return exit; |  | 
|   9893 } |  | 
|   9894  |  | 
|   9895  |  | 
|   9896 // Given the allocation and deoptimization exit try to find MaterializeObject |  | 
|   9897 // instruction corresponding to this allocation at this exit. |  | 
|   9898 MaterializeObjectInstr* AllocationSinking::MaterializationFor( |  | 
|   9899     Definition* alloc, Instruction* exit) { |  | 
|   9900   if (exit->IsMaterializeObject()) { |  | 
|   9901     exit = ExitForMaterialization(exit->AsMaterializeObject()); |  | 
|   9902   } |  | 
|   9903  |  | 
|   9904   for (MaterializeObjectInstr* mat = exit->previous()->AsMaterializeObject(); |  | 
|   9905        mat != NULL; |  | 
|   9906        mat = mat->previous()->AsMaterializeObject()) { |  | 
|   9907     if (mat->allocation() == alloc) { |  | 
|   9908       return mat; |  | 
|   9909     } |  | 
|   9910   } |  | 
|   9911  |  | 
|   9912   return NULL; |  | 
|   9913 } |  | 
|   9914  |  | 
|   9915  |  | 
|   9916 // Insert MaterializeObject instruction for the given allocation before |  | 
|   9917 // the given instruction that can deoptimize. |  | 
|   9918 void AllocationSinking::CreateMaterializationAt( |  | 
|   9919     Instruction* exit, |  | 
|   9920     Definition* alloc, |  | 
|   9921     const ZoneGrowableArray<const Object*>& slots) { |  | 
|   9922   ZoneGrowableArray<Value*>* values = |  | 
|   9923       new(I) ZoneGrowableArray<Value*>(slots.length()); |  | 
|   9924  |  | 
|   9925   // All loads should be inserted before the first materialization so that |  | 
|   9926   // IR follows the following pattern: loads, materializations, deoptimizing |  | 
|   9927   // instruction. |  | 
|   9928   Instruction* load_point = FirstMaterializationAt(exit); |  | 
|   9929  |  | 
|   9930   // Insert load instruction for every field. |  | 
|   9931   for (intptr_t i = 0; i < slots.length(); i++) { |  | 
|   9932     LoadFieldInstr* load = slots[i]->IsField() |  | 
|   9933         ? new(I) LoadFieldInstr( |  | 
|   9934             new(I) Value(alloc), |  | 
|   9935             &Field::Cast(*slots[i]), |  | 
|   9936             AbstractType::ZoneHandle(I), |  | 
|   9937             alloc->token_pos()) |  | 
|   9938         : new(I) LoadFieldInstr( |  | 
|   9939             new(I) Value(alloc), |  | 
|   9940             Smi::Cast(*slots[i]).Value(), |  | 
|   9941             AbstractType::ZoneHandle(I), |  | 
|   9942             alloc->token_pos()); |  | 
|   9943     flow_graph_->InsertBefore( |  | 
|   9944         load_point, load, NULL, FlowGraph::kValue); |  | 
|   9945     values->Add(new(I) Value(load)); |  | 
|   9946   } |  | 
|   9947  |  | 
|   9948   MaterializeObjectInstr* mat = NULL; |  | 
|   9949   if (alloc->IsAllocateObject()) { |  | 
|   9950     mat = new(I) MaterializeObjectInstr( |  | 
|   9951         alloc->AsAllocateObject(), slots, values); |  | 
|   9952   } else { |  | 
|   9953     ASSERT(alloc->IsAllocateUninitializedContext()); |  | 
|   9954     mat = new(I) MaterializeObjectInstr( |  | 
|   9955         alloc->AsAllocateUninitializedContext(), slots, values); |  | 
|   9956   } |  | 
|   9957  |  | 
|   9958   flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue); |  | 
|   9959  |  | 
|   9960   // Replace all mentions of this allocation with a newly inserted |  | 
|   9961   // MaterializeObject instruction. |  | 
|   9962   // We must preserve the identity: all mentions are replaced by the same |  | 
|   9963   // materialization. |  | 
|   9964   for (Environment::DeepIterator env_it(exit->env()); |  | 
|   9965        !env_it.Done(); |  | 
|   9966        env_it.Advance()) { |  | 
|   9967     Value* use = env_it.CurrentValue(); |  | 
|   9968     if (use->definition() == alloc) { |  | 
|   9969       use->RemoveFromUseList(); |  | 
|   9970       use->set_definition(mat); |  | 
|   9971       mat->AddEnvUse(use); |  | 
|   9972     } |  | 
|   9973   } |  | 
|   9974  |  | 
|   9975   // Mark MaterializeObject as an environment use of this allocation. |  | 
|   9976   // This will allow us to discover it when we are looking for deoptimization |  | 
|   9977   // exits for another allocation that potentially flows into this one. |  | 
|   9978   Value* val = new(I) Value(alloc); |  | 
|   9979   val->set_instruction(mat); |  | 
|   9980   alloc->AddEnvUse(val); |  | 
|   9981  |  | 
|   9982   // Record inserted materialization. |  | 
|   9983   materializations_.Add(mat); |  | 
|   9984 } |  | 
|   9985  |  | 
|   9986  |  | 
|   9987 // Add given instruction to the list of the instructions if it is not yet |  | 
|   9988 // present there. |  | 
|   9989 template<typename T> |  | 
|   9990 void AddInstruction(GrowableArray<T*>* list, T* value) { |  | 
|   9991   ASSERT(!value->IsGraphEntry()); |  | 
|   9992   for (intptr_t i = 0; i < list->length(); i++) { |  | 
|   9993     if ((*list)[i] == value) { |  | 
|   9994       return; |  | 
|   9995     } |  | 
|   9996   } |  | 
|   9997   list->Add(value); |  | 
|   9998 } |  | 
|   9999  |  | 
|  10000  |  | 
|  10001 // Transitively collect all deoptimization exits that might need this allocation |  | 
|  10002 // rematerialized. It is not enough to collect only environment uses of this |  | 
|  10003 // allocation because it can flow into other objects that will be |  | 
|  10004 // dematerialized and that are referenced by deopt environments that |  | 
|  10005 // don't contain this allocation explicitly. |  | 
|  10006 void AllocationSinking::ExitsCollector::Collect(Definition* alloc) { |  | 
|  10007   for (Value* use = alloc->env_use_list(); |  | 
|  10008        use != NULL; |  | 
|  10009        use = use->next_use()) { |  | 
|  10010     if (use->instruction()->IsMaterializeObject()) { |  | 
|  10011       AddInstruction(&exits_, ExitForMaterialization( |  | 
|  10012           use->instruction()->AsMaterializeObject())); |  | 
|  10013     } else { |  | 
|  10014       AddInstruction(&exits_, use->instruction()); |  | 
|  10015     } |  | 
|  10016   } |  | 
|  10017  |  | 
|  10018   // Check if this allocation is stored into any other allocation sinking |  | 
|  10019   // candidate and put it on worklist so that we conservatively collect all |  | 
|  10020   // exits for that candidate as well because they potentially might see |  | 
|  10021   // this object. |  | 
|  10022   for (Value* use = alloc->input_use_list(); |  | 
|  10023        use != NULL; |  | 
|  10024        use = use->next_use()) { |  | 
|  10025     Definition* obj = StoreInto(use); |  | 
|  10026     if ((obj != NULL) && (obj != alloc)) { |  | 
|  10027       AddInstruction(&worklist_, obj); |  | 
|  10028     } |  | 
|  10029   } |  | 
|  10030 } |  | 
|  10031  |  | 
|  10032  |  | 
|  10033 void AllocationSinking::ExitsCollector::CollectTransitively(Definition* alloc) { |  | 
|  10034   exits_.TruncateTo(0); |  | 
|  10035   worklist_.TruncateTo(0); |  | 
|  10036  |  | 
|  10037   worklist_.Add(alloc); |  | 
|  10038  |  | 
|  10039   // Note: worklist potentially will grow while we are iterating over it. |  | 
|  10040   // We are not removing allocations from the worklist not to waste space on |  | 
|  10041   // the side maintaining BitVector of already processed allocations: worklist |  | 
|  10042   // is expected to be very small thus linear search in it is just as effecient |  | 
|  10043   // as a bitvector. |  | 
|  10044   for (intptr_t i = 0; i < worklist_.length(); i++) { |  | 
|  10045     Collect(worklist_[i]); |  | 
|  10046   } |  | 
|  10047 } |  | 
|  10048  |  | 
|  10049  |  | 
|  10050 void AllocationSinking::InsertMaterializations(Definition* alloc) { |  | 
|  10051   // Collect all fields that are written for this instance. |  | 
|  10052   ZoneGrowableArray<const Object*>* slots = |  | 
|  10053       new(I) ZoneGrowableArray<const Object*>(5); |  | 
|  10054  |  | 
|  10055   for (Value* use = alloc->input_use_list(); |  | 
|  10056        use != NULL; |  | 
|  10057        use = use->next_use()) { |  | 
|  10058     StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |  | 
|  10059     if ((store != NULL) && (store->instance()->definition() == alloc)) { |  | 
|  10060       if (!store->field().IsNull()) { |  | 
|  10061         AddSlot(slots, store->field()); |  | 
|  10062       } else { |  | 
|  10063         AddSlot(slots, Smi::ZoneHandle(I, Smi::New(store->offset_in_bytes()))); |  | 
|  10064       } |  | 
|  10065     } |  | 
|  10066   } |  | 
|  10067  |  | 
|  10068   if (alloc->ArgumentCount() > 0) { |  | 
|  10069     AllocateObjectInstr* alloc_object = alloc->AsAllocateObject(); |  | 
|  10070     ASSERT(alloc_object->ArgumentCount() == 1); |  | 
|  10071     intptr_t type_args_offset = |  | 
|  10072         alloc_object->cls().type_arguments_field_offset(); |  | 
|  10073     AddSlot(slots, Smi::ZoneHandle(I, Smi::New(type_args_offset))); |  | 
|  10074   } |  | 
|  10075  |  | 
|  10076   // Collect all instructions that mention this object in the environment. |  | 
|  10077   exits_collector_.CollectTransitively(alloc); |  | 
|  10078  |  | 
|  10079   // Insert materializations at environment uses. |  | 
|  10080   for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { |  | 
|  10081     CreateMaterializationAt( |  | 
|  10082         exits_collector_.exits()[i], alloc, *slots); |  | 
|  10083   } |  | 
|  10084 } |  | 
|  10085  |  | 
|  10086  |  | 
|  10087 }  // namespace dart |   1638 }  // namespace dart | 
| OLD | NEW |