| OLD | NEW | 
| (Empty) |  | 
 |     1 // Copyright 2011 the V8 project authors. All rights reserved. | 
 |     2 // Redistribution and use in source and binary forms, with or without | 
 |     3 // modification, are permitted provided that the following conditions are | 
 |     4 // met: | 
 |     5 // | 
 |     6 //     * Redistributions of source code must retain the above copyright | 
 |     7 //       notice, this list of conditions and the following disclaimer. | 
 |     8 //     * Redistributions in binary form must reproduce the above | 
 |     9 //       copyright notice, this list of conditions and the following | 
 |    10 //       disclaimer in the documentation and/or other materials provided | 
 |    11 //       with the distribution. | 
 |    12 //     * Neither the name of Google Inc. nor the names of its | 
 |    13 //       contributors may be used to endorse or promote products derived | 
 |    14 //       from this software without specific prior written permission. | 
 |    15 // | 
 |    16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 |    17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 |    18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
 |    19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
 |    20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
 |    21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
 |    22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
 |    23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
 |    24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 |    25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
 |    26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 |    27  | 
 |    28 #include "v8.h" | 
 |    29  | 
 |    30 #include "mips/lithium-codegen-mips.h" | 
 |    31 #include "mips/lithium-gap-resolver-mips.h" | 
 |    32 #include "code-stubs.h" | 
 |    33 #include "stub-cache.h" | 
 |    34  | 
 |    35 namespace v8 { | 
 |    36 namespace internal { | 
 |    37  | 
 |    38  | 
 |    39 class SafepointGenerator : public CallWrapper { | 
 |    40  public: | 
 |    41   SafepointGenerator(LCodeGen* codegen, | 
 |    42                      LPointerMap* pointers, | 
 |    43                      int deoptimization_index) | 
 |    44       : codegen_(codegen), | 
 |    45         pointers_(pointers), | 
 |    46         deoptimization_index_(deoptimization_index) { } | 
 |    47   virtual ~SafepointGenerator() { } | 
 |    48  | 
 |    49   virtual void BeforeCall(int call_size) const { | 
 |    50     ASSERT(call_size >= 0); | 
 |    51     // Ensure that we have enough space after the previous safepoint position | 
 |    52     // for the generated code there. | 
 |    53     int call_end = codegen_->masm()->pc_offset() + call_size; | 
 |    54     int prev_jump_end = | 
 |    55         codegen_->LastSafepointEnd() + Deoptimizer::patch_size(); | 
 |    56     if (call_end < prev_jump_end) { | 
 |    57       int padding_size = prev_jump_end - call_end; | 
 |    58       ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 
 |    59       while (padding_size > 0) { | 
 |    60         codegen_->masm()->nop(); | 
 |    61         padding_size -= Assembler::kInstrSize; | 
 |    62       } | 
 |    63     } | 
 |    64   } | 
 |    65  | 
 |    66   virtual void AfterCall() const { | 
 |    67     codegen_->RecordSafepoint(pointers_, deoptimization_index_); | 
 |    68   } | 
 |    69  | 
 |    70  private: | 
 |    71   LCodeGen* codegen_; | 
 |    72   LPointerMap* pointers_; | 
 |    73   int deoptimization_index_; | 
 |    74 }; | 
 |    75  | 
 |    76  | 
 |    77 #define __ masm()-> | 
 |    78  | 
 |    79 bool LCodeGen::GenerateCode() { | 
 |    80   HPhase phase("Code generation", chunk()); | 
 |    81   ASSERT(is_unused()); | 
 |    82   status_ = GENERATING; | 
 |    83   CpuFeatures::Scope scope(FPU); | 
 |    84  | 
 |    85   CodeStub::GenerateFPStubs(); | 
 |    86  | 
 |    87   // Open a frame scope to indicate that there is a frame on the stack.  The | 
 |    88   // NONE indicates that the scope shouldn't actually generate code to set up | 
 |    89   // the frame (that is done in GeneratePrologue). | 
 |    90   FrameScope frame_scope(masm_, StackFrame::NONE); | 
 |    91  | 
 |    92   return GeneratePrologue() && | 
 |    93       GenerateBody() && | 
 |    94       GenerateDeferredCode() && | 
 |    95       GenerateSafepointTable(); | 
 |    96 } | 
 |    97  | 
 |    98  | 
 |    99 void LCodeGen::FinishCode(Handle<Code> code) { | 
 |   100   ASSERT(is_done()); | 
 |   101   code->set_stack_slots(GetStackSlotCount()); | 
 |   102   code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 
 |   103   PopulateDeoptimizationData(code); | 
 |   104   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); | 
 |   105 } | 
 |   106  | 
 |   107  | 
 |   108 void LCodeGen::Abort(const char* format, ...) { | 
 |   109   if (FLAG_trace_bailout) { | 
 |   110     SmartArrayPointer<char> name( | 
 |   111         info()->shared_info()->DebugName()->ToCString()); | 
 |   112     PrintF("Aborting LCodeGen in @\"%s\": ", *name); | 
 |   113     va_list arguments; | 
 |   114     va_start(arguments, format); | 
 |   115     OS::VPrint(format, arguments); | 
 |   116     va_end(arguments); | 
 |   117     PrintF("\n"); | 
 |   118   } | 
 |   119   status_ = ABORTED; | 
 |   120 } | 
 |   121  | 
 |   122  | 
 |   123 void LCodeGen::Comment(const char* format, ...) { | 
 |   124   if (!FLAG_code_comments) return; | 
 |   125   char buffer[4 * KB]; | 
 |   126   StringBuilder builder(buffer, ARRAY_SIZE(buffer)); | 
 |   127   va_list arguments; | 
 |   128   va_start(arguments, format); | 
 |   129   builder.AddFormattedList(format, arguments); | 
 |   130   va_end(arguments); | 
 |   131  | 
 |   132   // Copy the string before recording it in the assembler to avoid | 
 |   133   // issues when the stack allocated buffer goes out of scope. | 
 |   134   size_t length = builder.position(); | 
 |   135   Vector<char> copy = Vector<char>::New(length + 1); | 
 |   136   memcpy(copy.start(), builder.Finalize(), copy.length()); | 
 |   137   masm()->RecordComment(copy.start()); | 
 |   138 } | 
 |   139  | 
 |   140  | 
 |   141 bool LCodeGen::GeneratePrologue() { | 
 |   142   ASSERT(is_generating()); | 
 |   143  | 
 |   144 #ifdef DEBUG | 
 |   145   if (strlen(FLAG_stop_at) > 0 && | 
 |   146       info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 
 |   147     __ stop("stop_at"); | 
 |   148   } | 
 |   149 #endif | 
 |   150  | 
 |   151   // a1: Callee's JS function. | 
 |   152   // cp: Callee's context. | 
 |   153   // fp: Caller's frame pointer. | 
 |   154   // lr: Caller's pc. | 
 |   155  | 
 |   156   // Strict mode functions and builtins need to replace the receiver | 
 |   157   // with undefined when called as functions (without an explicit | 
 |   158   // receiver object). r5 is zero for method calls and non-zero for | 
 |   159   // function calls. | 
 |   160   if (info_->is_strict_mode() || info_->is_native()) { | 
 |   161     Label ok; | 
 |   162     __ Branch(&ok, eq, t1, Operand(zero_reg)); | 
 |   163  | 
 |   164     int receiver_offset = scope()->num_parameters() * kPointerSize; | 
 |   165     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 
 |   166     __ sw(a2, MemOperand(sp, receiver_offset)); | 
 |   167     __ bind(&ok); | 
 |   168   } | 
 |   169  | 
 |   170   __ Push(ra, fp, cp, a1); | 
 |   171   __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP. | 
 |   172  | 
 |   173   // Reserve space for the stack slots needed by the code. | 
 |   174   int slots = GetStackSlotCount(); | 
 |   175   if (slots > 0) { | 
 |   176     if (FLAG_debug_code) { | 
 |   177       __ li(a0, Operand(slots)); | 
 |   178       __ li(a2, Operand(kSlotsZapValue)); | 
 |   179       Label loop; | 
 |   180       __ bind(&loop); | 
 |   181       __ push(a2); | 
 |   182       __ Subu(a0, a0, 1); | 
 |   183       __ Branch(&loop, ne, a0, Operand(zero_reg)); | 
 |   184     } else { | 
 |   185       __ Subu(sp, sp, Operand(slots * kPointerSize)); | 
 |   186     } | 
 |   187   } | 
 |   188  | 
 |   189   // Possibly allocate a local context. | 
 |   190   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 
 |   191   if (heap_slots > 0) { | 
 |   192     Comment(";;; Allocate local context"); | 
 |   193     // Argument to NewContext is the function, which is in a1. | 
 |   194     __ push(a1); | 
 |   195     if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 
 |   196       FastNewContextStub stub(heap_slots); | 
 |   197       __ CallStub(&stub); | 
 |   198     } else { | 
 |   199       __ CallRuntime(Runtime::kNewFunctionContext, 1); | 
 |   200     } | 
 |   201     RecordSafepoint(Safepoint::kNoDeoptimizationIndex); | 
 |   202     // Context is returned in both v0 and cp.  It replaces the context | 
 |   203     // passed to us.  It's saved in the stack and kept live in cp. | 
 |   204     __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |   205     // Copy any necessary parameters into the context. | 
 |   206     int num_parameters = scope()->num_parameters(); | 
 |   207     for (int i = 0; i < num_parameters; i++) { | 
 |   208       Variable* var = scope()->parameter(i); | 
 |   209       if (var->IsContextSlot()) { | 
 |   210         int parameter_offset = StandardFrameConstants::kCallerSPOffset + | 
 |   211             (num_parameters - 1 - i) * kPointerSize; | 
 |   212         // Load parameter from stack. | 
 |   213         __ lw(a0, MemOperand(fp, parameter_offset)); | 
 |   214         // Store it in the context. | 
 |   215         MemOperand target = ContextOperand(cp, var->index()); | 
 |   216         __ sw(a0, target); | 
 |   217         // Update the write barrier. This clobbers a3 and a0. | 
 |   218         __ RecordWriteContextSlot( | 
 |   219             cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs); | 
 |   220       } | 
 |   221     } | 
 |   222     Comment(";;; End allocate local context"); | 
 |   223   } | 
 |   224  | 
 |   225   // Trace the call. | 
 |   226   if (FLAG_trace) { | 
 |   227     __ CallRuntime(Runtime::kTraceEnter, 0); | 
 |   228   } | 
 |   229   return !is_aborted(); | 
 |   230 } | 
 |   231  | 
 |   232  | 
 |   233 bool LCodeGen::GenerateBody() { | 
 |   234   ASSERT(is_generating()); | 
 |   235   bool emit_instructions = true; | 
 |   236   for (current_instruction_ = 0; | 
 |   237        !is_aborted() && current_instruction_ < instructions_->length(); | 
 |   238        current_instruction_++) { | 
 |   239     LInstruction* instr = instructions_->at(current_instruction_); | 
 |   240     if (instr->IsLabel()) { | 
 |   241       LLabel* label = LLabel::cast(instr); | 
 |   242       emit_instructions = !label->HasReplacement(); | 
 |   243     } | 
 |   244  | 
 |   245     if (emit_instructions) { | 
 |   246       Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); | 
 |   247       instr->CompileToNative(this); | 
 |   248     } | 
 |   249   } | 
 |   250   return !is_aborted(); | 
 |   251 } | 
 |   252  | 
 |   253  | 
 |   254 LInstruction* LCodeGen::GetNextInstruction() { | 
 |   255   if (current_instruction_ < instructions_->length() - 1) { | 
 |   256     return instructions_->at(current_instruction_ + 1); | 
 |   257   } else { | 
 |   258     return NULL; | 
 |   259   } | 
 |   260 } | 
 |   261  | 
 |   262  | 
 |   263 bool LCodeGen::GenerateDeferredCode() { | 
 |   264   ASSERT(is_generating()); | 
 |   265   if (deferred_.length() > 0) { | 
 |   266     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 
 |   267       LDeferredCode* code = deferred_[i]; | 
 |   268       __ bind(code->entry()); | 
 |   269       Comment(";;; Deferred code @%d: %s.", | 
 |   270               code->instruction_index(), | 
 |   271               code->instr()->Mnemonic()); | 
 |   272       code->Generate(); | 
 |   273       __ jmp(code->exit()); | 
 |   274     } | 
 |   275  | 
 |   276     // Pad code to ensure that the last piece of deferred code have | 
 |   277     // room for lazy bailout. | 
 |   278     while ((masm()->pc_offset() - LastSafepointEnd()) | 
 |   279            < Deoptimizer::patch_size()) { | 
 |   280       __ nop(); | 
 |   281     } | 
 |   282   } | 
 |   283   // Deferred code is the last part of the instruction sequence. Mark | 
 |   284   // the generated code as done unless we bailed out. | 
 |   285   if (!is_aborted()) status_ = DONE; | 
 |   286   return !is_aborted(); | 
 |   287 } | 
 |   288  | 
 |   289  | 
 |   290 bool LCodeGen::GenerateDeoptJumpTable() { | 
 |   291   // TODO(plind): not clear that this will have advantage for MIPS. | 
 |   292   // Skipping it for now. Raised issue #100 for this. | 
 |   293   Abort("Unimplemented: %s", "GenerateDeoptJumpTable"); | 
 |   294   return false; | 
 |   295 } | 
 |   296  | 
 |   297  | 
 |   298 bool LCodeGen::GenerateSafepointTable() { | 
 |   299   ASSERT(is_done()); | 
 |   300   safepoints_.Emit(masm(), GetStackSlotCount()); | 
 |   301   return !is_aborted(); | 
 |   302 } | 
 |   303  | 
 |   304  | 
 |   305 Register LCodeGen::ToRegister(int index) const { | 
 |   306   return Register::FromAllocationIndex(index); | 
 |   307 } | 
 |   308  | 
 |   309  | 
 |   310 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { | 
 |   311   return DoubleRegister::FromAllocationIndex(index); | 
 |   312 } | 
 |   313  | 
 |   314  | 
 |   315 Register LCodeGen::ToRegister(LOperand* op) const { | 
 |   316   ASSERT(op->IsRegister()); | 
 |   317   return ToRegister(op->index()); | 
 |   318 } | 
 |   319  | 
 |   320  | 
 |   321 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 
 |   322   if (op->IsRegister()) { | 
 |   323     return ToRegister(op->index()); | 
 |   324   } else if (op->IsConstantOperand()) { | 
 |   325     __ li(scratch, ToOperand(op)); | 
 |   326     return scratch; | 
 |   327   } else if (op->IsStackSlot() || op->IsArgument()) { | 
 |   328     __ lw(scratch, ToMemOperand(op)); | 
 |   329     return scratch; | 
 |   330   } | 
 |   331   UNREACHABLE(); | 
 |   332   return scratch; | 
 |   333 } | 
 |   334  | 
 |   335  | 
 |   336 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 
 |   337   ASSERT(op->IsDoubleRegister()); | 
 |   338   return ToDoubleRegister(op->index()); | 
 |   339 } | 
 |   340  | 
 |   341  | 
 |   342 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 
 |   343                                                 FloatRegister flt_scratch, | 
 |   344                                                 DoubleRegister dbl_scratch) { | 
 |   345   if (op->IsDoubleRegister()) { | 
 |   346     return ToDoubleRegister(op->index()); | 
 |   347   } else if (op->IsConstantOperand()) { | 
 |   348     LConstantOperand* const_op = LConstantOperand::cast(op); | 
 |   349     Handle<Object> literal = chunk_->LookupLiteral(const_op); | 
 |   350     Representation r = chunk_->LookupLiteralRepresentation(const_op); | 
 |   351     if (r.IsInteger32()) { | 
 |   352       ASSERT(literal->IsNumber()); | 
 |   353       __ li(at, Operand(static_cast<int32_t>(literal->Number()))); | 
 |   354       __ mtc1(at, flt_scratch); | 
 |   355       __ cvt_d_w(dbl_scratch, flt_scratch); | 
 |   356       return dbl_scratch; | 
 |   357     } else if (r.IsDouble()) { | 
 |   358       Abort("unsupported double immediate"); | 
 |   359     } else if (r.IsTagged()) { | 
 |   360       Abort("unsupported tagged immediate"); | 
 |   361     } | 
 |   362   } else if (op->IsStackSlot() || op->IsArgument()) { | 
 |   363     MemOperand mem_op = ToMemOperand(op); | 
 |   364     __ ldc1(dbl_scratch, mem_op); | 
 |   365     return dbl_scratch; | 
 |   366   } | 
 |   367   UNREACHABLE(); | 
 |   368   return dbl_scratch; | 
 |   369 } | 
 |   370  | 
 |   371  | 
 |   372 int LCodeGen::ToInteger32(LConstantOperand* op) const { | 
 |   373   Handle<Object> value = chunk_->LookupLiteral(op); | 
 |   374   ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); | 
 |   375   ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) == | 
 |   376       value->Number()); | 
 |   377   return static_cast<int32_t>(value->Number()); | 
 |   378 } | 
 |   379  | 
 |   380  | 
 |   381 Operand LCodeGen::ToOperand(LOperand* op) { | 
 |   382   if (op->IsConstantOperand()) { | 
 |   383     LConstantOperand* const_op = LConstantOperand::cast(op); | 
 |   384     Handle<Object> literal = chunk_->LookupLiteral(const_op); | 
 |   385     Representation r = chunk_->LookupLiteralRepresentation(const_op); | 
 |   386     if (r.IsInteger32()) { | 
 |   387       ASSERT(literal->IsNumber()); | 
 |   388       return Operand(static_cast<int32_t>(literal->Number())); | 
 |   389     } else if (r.IsDouble()) { | 
 |   390       Abort("ToOperand Unsupported double immediate."); | 
 |   391     } | 
 |   392     ASSERT(r.IsTagged()); | 
 |   393     return Operand(literal); | 
 |   394   } else if (op->IsRegister()) { | 
 |   395     return Operand(ToRegister(op)); | 
 |   396   } else if (op->IsDoubleRegister()) { | 
 |   397     Abort("ToOperand IsDoubleRegister unimplemented"); | 
 |   398     return Operand(0); | 
 |   399   } | 
 |   400   // Stack slots not implemented, use ToMemOperand instead. | 
 |   401   UNREACHABLE(); | 
 |   402   return Operand(0); | 
 |   403 } | 
 |   404  | 
 |   405  | 
 |   406 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | 
 |   407   ASSERT(!op->IsRegister()); | 
 |   408   ASSERT(!op->IsDoubleRegister()); | 
 |   409   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 
 |   410   int index = op->index(); | 
 |   411   if (index >= 0) { | 
 |   412     // Local or spill slot. Skip the frame pointer, function, and | 
 |   413     // context in the fixed part of the frame. | 
 |   414     return MemOperand(fp, -(index + 3) * kPointerSize); | 
 |   415   } else { | 
 |   416     // Incoming parameter. Skip the return address. | 
 |   417     return MemOperand(fp, -(index - 1) * kPointerSize); | 
 |   418   } | 
 |   419 } | 
 |   420  | 
 |   421  | 
 |   422 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { | 
 |   423   ASSERT(op->IsDoubleStackSlot()); | 
 |   424   int index = op->index(); | 
 |   425   if (index >= 0) { | 
 |   426     // Local or spill slot. Skip the frame pointer, function, context, | 
 |   427     // and the first word of the double in the fixed part of the frame. | 
 |   428     return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); | 
 |   429   } else { | 
 |   430     // Incoming parameter. Skip the return address and the first word of | 
 |   431     // the double. | 
 |   432     return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); | 
 |   433   } | 
 |   434 } | 
 |   435  | 
 |   436  | 
 |   437 void LCodeGen::WriteTranslation(LEnvironment* environment, | 
 |   438                                 Translation* translation) { | 
 |   439   if (environment == NULL) return; | 
 |   440  | 
 |   441   // The translation includes one command per value in the environment. | 
 |   442   int translation_size = environment->values()->length(); | 
 |   443   // The output frame height does not include the parameters. | 
 |   444   int height = translation_size - environment->parameter_count(); | 
 |   445  | 
 |   446   WriteTranslation(environment->outer(), translation); | 
 |   447   int closure_id = DefineDeoptimizationLiteral(environment->closure()); | 
 |   448   translation->BeginFrame(environment->ast_id(), closure_id, height); | 
 |   449   for (int i = 0; i < translation_size; ++i) { | 
 |   450     LOperand* value = environment->values()->at(i); | 
 |   451     // spilled_registers_ and spilled_double_registers_ are either | 
 |   452     // both NULL or both set. | 
 |   453     if (environment->spilled_registers() != NULL && value != NULL) { | 
 |   454       if (value->IsRegister() && | 
 |   455           environment->spilled_registers()[value->index()] != NULL) { | 
 |   456         translation->MarkDuplicate(); | 
 |   457         AddToTranslation(translation, | 
 |   458                          environment->spilled_registers()[value->index()], | 
 |   459                          environment->HasTaggedValueAt(i)); | 
 |   460       } else if ( | 
 |   461           value->IsDoubleRegister() && | 
 |   462           environment->spilled_double_registers()[value->index()] != NULL) { | 
 |   463         translation->MarkDuplicate(); | 
 |   464         AddToTranslation( | 
 |   465             translation, | 
 |   466             environment->spilled_double_registers()[value->index()], | 
 |   467             false); | 
 |   468       } | 
 |   469     } | 
 |   470  | 
 |   471     AddToTranslation(translation, value, environment->HasTaggedValueAt(i)); | 
 |   472   } | 
 |   473 } | 
 |   474  | 
 |   475  | 
 |   476 void LCodeGen::AddToTranslation(Translation* translation, | 
 |   477                                 LOperand* op, | 
 |   478                                 bool is_tagged) { | 
 |   479   if (op == NULL) { | 
 |   480     // TODO(twuerthinger): Introduce marker operands to indicate that this value | 
 |   481     // is not present and must be reconstructed from the deoptimizer. Currently | 
 |   482     // this is only used for the arguments object. | 
 |   483     translation->StoreArgumentsObject(); | 
 |   484   } else if (op->IsStackSlot()) { | 
 |   485     if (is_tagged) { | 
 |   486       translation->StoreStackSlot(op->index()); | 
 |   487     } else { | 
 |   488       translation->StoreInt32StackSlot(op->index()); | 
 |   489     } | 
 |   490   } else if (op->IsDoubleStackSlot()) { | 
 |   491     translation->StoreDoubleStackSlot(op->index()); | 
 |   492   } else if (op->IsArgument()) { | 
 |   493     ASSERT(is_tagged); | 
 |   494     int src_index = GetStackSlotCount() + op->index(); | 
 |   495     translation->StoreStackSlot(src_index); | 
 |   496   } else if (op->IsRegister()) { | 
 |   497     Register reg = ToRegister(op); | 
 |   498     if (is_tagged) { | 
 |   499       translation->StoreRegister(reg); | 
 |   500     } else { | 
 |   501       translation->StoreInt32Register(reg); | 
 |   502     } | 
 |   503   } else if (op->IsDoubleRegister()) { | 
 |   504     DoubleRegister reg = ToDoubleRegister(op); | 
 |   505     translation->StoreDoubleRegister(reg); | 
 |   506   } else if (op->IsConstantOperand()) { | 
 |   507     Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); | 
 |   508     int src_index = DefineDeoptimizationLiteral(literal); | 
 |   509     translation->StoreLiteral(src_index); | 
 |   510   } else { | 
 |   511     UNREACHABLE(); | 
 |   512   } | 
 |   513 } | 
 |   514  | 
 |   515  | 
 |   516 void LCodeGen::CallCode(Handle<Code> code, | 
 |   517                         RelocInfo::Mode mode, | 
 |   518                         LInstruction* instr) { | 
 |   519   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | 
 |   520 } | 
 |   521  | 
 |   522  | 
 |   523 void LCodeGen::CallCodeGeneric(Handle<Code> code, | 
 |   524                                RelocInfo::Mode mode, | 
 |   525                                LInstruction* instr, | 
 |   526                                SafepointMode safepoint_mode) { | 
 |   527   ASSERT(instr != NULL); | 
 |   528   LPointerMap* pointers = instr->pointer_map(); | 
 |   529   RecordPosition(pointers->position()); | 
 |   530   __ Call(code, mode); | 
 |   531   RegisterLazyDeoptimization(instr, safepoint_mode); | 
 |   532 } | 
 |   533  | 
 |   534  | 
 |   535 void LCodeGen::CallRuntime(const Runtime::Function* function, | 
 |   536                            int num_arguments, | 
 |   537                            LInstruction* instr) { | 
 |   538   ASSERT(instr != NULL); | 
 |   539   LPointerMap* pointers = instr->pointer_map(); | 
 |   540   ASSERT(pointers != NULL); | 
 |   541   RecordPosition(pointers->position()); | 
 |   542  | 
 |   543   __ CallRuntime(function, num_arguments); | 
 |   544   RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); | 
 |   545 } | 
 |   546  | 
 |   547  | 
 |   548 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | 
 |   549                                        int argc, | 
 |   550                                        LInstruction* instr) { | 
 |   551   __ CallRuntimeSaveDoubles(id); | 
 |   552   RecordSafepointWithRegisters( | 
 |   553       instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex); | 
 |   554 } | 
 |   555  | 
 |   556  | 
 |   557 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr, | 
 |   558                                           SafepointMode safepoint_mode) { | 
 |   559   // Create the environment to bailout to. If the call has side effects | 
 |   560   // execution has to continue after the call otherwise execution can continue | 
 |   561   // from a previous bailout point repeating the call. | 
 |   562   LEnvironment* deoptimization_environment; | 
 |   563   if (instr->HasDeoptimizationEnvironment()) { | 
 |   564     deoptimization_environment = instr->deoptimization_environment(); | 
 |   565   } else { | 
 |   566     deoptimization_environment = instr->environment(); | 
 |   567   } | 
 |   568  | 
 |   569   RegisterEnvironmentForDeoptimization(deoptimization_environment); | 
 |   570   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 
 |   571     RecordSafepoint(instr->pointer_map(), | 
 |   572                     deoptimization_environment->deoptimization_index()); | 
 |   573   } else { | 
 |   574     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 
 |   575     RecordSafepointWithRegisters( | 
 |   576         instr->pointer_map(), | 
 |   577         0, | 
 |   578         deoptimization_environment->deoptimization_index()); | 
 |   579   } | 
 |   580 } | 
 |   581  | 
 |   582  | 
 |   583 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { | 
 |   584   if (!environment->HasBeenRegistered()) { | 
 |   585     // Physical stack frame layout: | 
 |   586     // -x ............. -4  0 ..................................... y | 
 |   587     // [incoming arguments] [spill slots] [pushed outgoing arguments] | 
 |   588  | 
 |   589     // Layout of the environment: | 
 |   590     // 0 ..................................................... size-1 | 
 |   591     // [parameters] [locals] [expression stack including arguments] | 
 |   592  | 
 |   593     // Layout of the translation: | 
 |   594     // 0 ........................................................ size - 1 + 4 | 
 |   595     // [expression stack including arguments] [locals] [4 words] [parameters] | 
 |   596     // |>------------  translation_size ------------<| | 
 |   597  | 
 |   598     int frame_count = 0; | 
 |   599     for (LEnvironment* e = environment; e != NULL; e = e->outer()) { | 
 |   600       ++frame_count; | 
 |   601     } | 
 |   602     Translation translation(&translations_, frame_count); | 
 |   603     WriteTranslation(environment, &translation); | 
 |   604     int deoptimization_index = deoptimizations_.length(); | 
 |   605     environment->Register(deoptimization_index, translation.index()); | 
 |   606     deoptimizations_.Add(environment); | 
 |   607   } | 
 |   608 } | 
 |   609  | 
 |   610  | 
 |   611 void LCodeGen::DeoptimizeIf(Condition cc, | 
 |   612                             LEnvironment* environment, | 
 |   613                             Register src1, | 
 |   614                             const Operand& src2) { | 
 |   615   RegisterEnvironmentForDeoptimization(environment); | 
 |   616   ASSERT(environment->HasBeenRegistered()); | 
 |   617   int id = environment->deoptimization_index(); | 
 |   618   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 
 |   619   ASSERT(entry != NULL); | 
 |   620   if (entry == NULL) { | 
 |   621     Abort("bailout was not prepared"); | 
 |   622     return; | 
 |   623   } | 
 |   624  | 
 |   625   ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS. | 
 |   626  | 
 |   627   if (FLAG_deopt_every_n_times == 1 && | 
 |   628       info_->shared_info()->opt_count() == id) { | 
 |   629     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 
 |   630     return; | 
 |   631   } | 
 |   632  | 
 |   633   if (FLAG_trap_on_deopt) { | 
 |   634     Label skip; | 
 |   635     if (cc != al) { | 
 |   636       __ Branch(&skip, NegateCondition(cc), src1, src2); | 
 |   637     } | 
 |   638     __ stop("trap_on_deopt"); | 
 |   639     __ bind(&skip); | 
 |   640   } | 
 |   641  | 
 |   642   if (cc == al) { | 
 |   643     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 
 |   644   } else { | 
 |   645     // TODO(plind): The Arm port is a little different here, due to their | 
 |   646     // DeOpt jump table, which is not used for Mips yet. | 
 |   647     __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); | 
 |   648   } | 
 |   649 } | 
 |   650  | 
 |   651  | 
 |   652 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 
 |   653   int length = deoptimizations_.length(); | 
 |   654   if (length == 0) return; | 
 |   655   ASSERT(FLAG_deopt); | 
 |   656   Handle<DeoptimizationInputData> data = | 
 |   657       factory()->NewDeoptimizationInputData(length, TENURED); | 
 |   658  | 
 |   659   Handle<ByteArray> translations = translations_.CreateByteArray(); | 
 |   660   data->SetTranslationByteArray(*translations); | 
 |   661   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); | 
 |   662  | 
 |   663   Handle<FixedArray> literals = | 
 |   664       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); | 
 |   665   for (int i = 0; i < deoptimization_literals_.length(); i++) { | 
 |   666     literals->set(i, *deoptimization_literals_[i]); | 
 |   667   } | 
 |   668   data->SetLiteralArray(*literals); | 
 |   669  | 
 |   670   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); | 
 |   671   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); | 
 |   672  | 
 |   673   // Populate the deoptimization entries. | 
 |   674   for (int i = 0; i < length; i++) { | 
 |   675     LEnvironment* env = deoptimizations_[i]; | 
 |   676     data->SetAstId(i, Smi::FromInt(env->ast_id())); | 
 |   677     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); | 
 |   678     data->SetArgumentsStackHeight(i, | 
 |   679                                   Smi::FromInt(env->arguments_stack_height())); | 
 |   680   } | 
 |   681   code->set_deoptimization_data(*data); | 
 |   682 } | 
 |   683  | 
 |   684  | 
 |   685 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { | 
 |   686   int result = deoptimization_literals_.length(); | 
 |   687   for (int i = 0; i < deoptimization_literals_.length(); ++i) { | 
 |   688     if (deoptimization_literals_[i].is_identical_to(literal)) return i; | 
 |   689   } | 
 |   690   deoptimization_literals_.Add(literal); | 
 |   691   return result; | 
 |   692 } | 
 |   693  | 
 |   694  | 
 |   695 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | 
 |   696   ASSERT(deoptimization_literals_.length() == 0); | 
 |   697  | 
 |   698   const ZoneList<Handle<JSFunction> >* inlined_closures = | 
 |   699       chunk()->inlined_closures(); | 
 |   700  | 
 |   701   for (int i = 0, length = inlined_closures->length(); | 
 |   702        i < length; | 
 |   703        i++) { | 
 |   704     DefineDeoptimizationLiteral(inlined_closures->at(i)); | 
 |   705   } | 
 |   706  | 
 |   707   inlined_function_count_ = deoptimization_literals_.length(); | 
 |   708 } | 
 |   709  | 
 |   710  | 
 |   711 void LCodeGen::RecordSafepoint( | 
 |   712     LPointerMap* pointers, | 
 |   713     Safepoint::Kind kind, | 
 |   714     int arguments, | 
 |   715     int deoptimization_index) { | 
 |   716   ASSERT(expected_safepoint_kind_ == kind); | 
 |   717  | 
 |   718   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | 
 |   719   Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | 
 |   720       kind, arguments, deoptimization_index); | 
 |   721   for (int i = 0; i < operands->length(); i++) { | 
 |   722     LOperand* pointer = operands->at(i); | 
 |   723     if (pointer->IsStackSlot()) { | 
 |   724       safepoint.DefinePointerSlot(pointer->index()); | 
 |   725     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | 
 |   726       safepoint.DefinePointerRegister(ToRegister(pointer)); | 
 |   727     } | 
 |   728   } | 
 |   729   if (kind & Safepoint::kWithRegisters) { | 
 |   730     // Register cp always contains a pointer to the context. | 
 |   731     safepoint.DefinePointerRegister(cp); | 
 |   732   } | 
 |   733 } | 
 |   734  | 
 |   735  | 
 |   736 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | 
 |   737                                int deoptimization_index) { | 
 |   738   RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); | 
 |   739 } | 
 |   740  | 
 |   741  | 
 |   742 void LCodeGen::RecordSafepoint(int deoptimization_index) { | 
 |   743   LPointerMap empty_pointers(RelocInfo::kNoPosition); | 
 |   744   RecordSafepoint(&empty_pointers, deoptimization_index); | 
 |   745 } | 
 |   746  | 
 |   747  | 
 |   748 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | 
 |   749                                             int arguments, | 
 |   750                                             int deoptimization_index) { | 
 |   751   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, | 
 |   752       deoptimization_index); | 
 |   753 } | 
 |   754  | 
 |   755  | 
 |   756 void LCodeGen::RecordSafepointWithRegistersAndDoubles( | 
 |   757     LPointerMap* pointers, | 
 |   758     int arguments, | 
 |   759     int deoptimization_index) { | 
 |   760   RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments, | 
 |   761       deoptimization_index); | 
 |   762 } | 
 |   763  | 
 |   764  | 
 |   765 void LCodeGen::RecordPosition(int position) { | 
 |   766   if (position == RelocInfo::kNoPosition) return; | 
 |   767   masm()->positions_recorder()->RecordPosition(position); | 
 |   768 } | 
 |   769  | 
 |   770  | 
 |   771 void LCodeGen::DoLabel(LLabel* label) { | 
 |   772   if (label->is_loop_header()) { | 
 |   773     Comment(";;; B%d - LOOP entry", label->block_id()); | 
 |   774   } else { | 
 |   775     Comment(";;; B%d", label->block_id()); | 
 |   776   } | 
 |   777   __ bind(label->label()); | 
 |   778   current_block_ = label->block_id(); | 
 |   779   DoGap(label); | 
 |   780 } | 
 |   781  | 
 |   782  | 
 |   783 void LCodeGen::DoParallelMove(LParallelMove* move) { | 
 |   784   resolver_.Resolve(move); | 
 |   785 } | 
 |   786  | 
 |   787  | 
 |   788 void LCodeGen::DoGap(LGap* gap) { | 
 |   789   for (int i = LGap::FIRST_INNER_POSITION; | 
 |   790        i <= LGap::LAST_INNER_POSITION; | 
 |   791        i++) { | 
 |   792     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); | 
 |   793     LParallelMove* move = gap->GetParallelMove(inner_pos); | 
 |   794     if (move != NULL) DoParallelMove(move); | 
 |   795   } | 
 |   796  | 
 |   797   LInstruction* next = GetNextInstruction(); | 
 |   798   if (next != NULL && next->IsLazyBailout()) { | 
 |   799     int pc = masm()->pc_offset(); | 
 |   800     safepoints_.SetPcAfterGap(pc); | 
 |   801   } | 
 |   802 } | 
 |   803  | 
 |   804  | 
 |   805 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { | 
 |   806   DoGap(instr); | 
 |   807 } | 
 |   808  | 
 |   809  | 
 |   810 void LCodeGen::DoParameter(LParameter* instr) { | 
 |   811   // Nothing to do. | 
 |   812 } | 
 |   813  | 
 |   814  | 
 |   815 void LCodeGen::DoCallStub(LCallStub* instr) { | 
 |   816   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |   817   switch (instr->hydrogen()->major_key()) { | 
 |   818     case CodeStub::RegExpConstructResult: { | 
 |   819       RegExpConstructResultStub stub; | 
 |   820       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   821       break; | 
 |   822     } | 
 |   823     case CodeStub::RegExpExec: { | 
 |   824       RegExpExecStub stub; | 
 |   825       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   826       break; | 
 |   827     } | 
 |   828     case CodeStub::SubString: { | 
 |   829       SubStringStub stub; | 
 |   830       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   831       break; | 
 |   832     } | 
 |   833     case CodeStub::NumberToString: { | 
 |   834       NumberToStringStub stub; | 
 |   835       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   836       break; | 
 |   837     } | 
 |   838     case CodeStub::StringAdd: { | 
 |   839       StringAddStub stub(NO_STRING_ADD_FLAGS); | 
 |   840       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   841       break; | 
 |   842     } | 
 |   843     case CodeStub::StringCompare: { | 
 |   844       StringCompareStub stub; | 
 |   845       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   846       break; | 
 |   847     } | 
 |   848     case CodeStub::TranscendentalCache: { | 
 |   849       __ lw(a0, MemOperand(sp, 0)); | 
 |   850       TranscendentalCacheStub stub(instr->transcendental_type(), | 
 |   851                                    TranscendentalCacheStub::TAGGED); | 
 |   852       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |   853       break; | 
 |   854     } | 
 |   855     default: | 
 |   856       UNREACHABLE(); | 
 |   857   } | 
 |   858 } | 
 |   859  | 
 |   860  | 
 |   861 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | 
 |   862   // Nothing to do. | 
 |   863 } | 
 |   864  | 
 |   865  | 
 |   866 void LCodeGen::DoModI(LModI* instr) { | 
 |   867   Register scratch = scratch0(); | 
 |   868   const Register left = ToRegister(instr->InputAt(0)); | 
 |   869   const Register result = ToRegister(instr->result()); | 
 |   870  | 
 |   871   // p2constant holds the right side value if it's a power of 2 constant. | 
 |   872   // In other cases it is 0. | 
 |   873   int32_t p2constant = 0; | 
 |   874  | 
 |   875   if (instr->InputAt(1)->IsConstantOperand()) { | 
 |   876       p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); | 
 |   877       if (p2constant % 2 != 0) { | 
 |   878         p2constant = 0; | 
 |   879       } | 
 |   880       // Result always takes the sign of the dividend (left). | 
 |   881       p2constant = abs(p2constant); | 
 |   882   } | 
 |   883  | 
 |   884   // div runs in the background while we check for special cases. | 
 |   885   Register right = EmitLoadRegister(instr->InputAt(1), scratch); | 
 |   886   __ div(left, right); | 
 |   887  | 
 |   888   // Check for x % 0. | 
 |   889   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 
 |   890     DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); | 
 |   891   } | 
 |   892  | 
 |   893   Label skip_div, do_div; | 
 |   894   if (p2constant != 0) { | 
 |   895     // Fall back to the result of the div instruction if we could have sign | 
 |   896     // problems. | 
 |   897     __ Branch(&do_div, lt, left, Operand(zero_reg)); | 
 |   898     // Modulo by masking. | 
 |   899     __ And(scratch, left, p2constant - 1); | 
 |   900     __ Branch(&skip_div); | 
 |   901   } | 
 |   902  | 
 |   903   __ bind(&do_div); | 
 |   904   __ mfhi(scratch); | 
 |   905   __ bind(&skip_div); | 
 |   906  | 
 |   907   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |   908     // Result always takes the sign of the dividend (left). | 
 |   909     Label done; | 
 |   910     __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg)); | 
 |   911     __ mov(result, scratch); | 
 |   912     DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); | 
 |   913     __ bind(&done); | 
 |   914   } else { | 
 |   915     __ Move(result, scratch); | 
 |   916   } | 
 |   917 } | 
 |   918  | 
 |   919  | 
 |   920 void LCodeGen::DoDivI(LDivI* instr) { | 
 |   921   const Register left = ToRegister(instr->InputAt(0)); | 
 |   922   const Register right = ToRegister(instr->InputAt(1)); | 
 |   923   const Register result = ToRegister(instr->result()); | 
 |   924  | 
 |   925   // On MIPS div is asynchronous - it will run in the background while we | 
 |   926   // check for special cases. | 
 |   927   __ div(left, right); | 
 |   928  | 
 |   929   // Check for x / 0. | 
 |   930   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 
 |   931     DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); | 
 |   932   } | 
 |   933  | 
 |   934   // Check for (0 / -x) that will produce negative zero. | 
 |   935   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |   936     Label left_not_zero; | 
 |   937     __ Branch(&left_not_zero, ne, left, Operand(zero_reg)); | 
 |   938     DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg)); | 
 |   939     __ bind(&left_not_zero); | 
 |   940   } | 
 |   941  | 
 |   942   // Check for (-kMinInt / -1). | 
 |   943   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 
 |   944     Label left_not_min_int; | 
 |   945     __ Branch(&left_not_min_int, ne, left, Operand(kMinInt)); | 
 |   946     DeoptimizeIf(eq, instr->environment(), right, Operand(-1)); | 
 |   947     __ bind(&left_not_min_int); | 
 |   948   } | 
 |   949  | 
 |   950   __ mfhi(result); | 
 |   951   DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg)); | 
 |   952   __ mflo(result); | 
 |   953 } | 
 |   954  | 
 |   955  | 
 |   956 void LCodeGen::DoMulI(LMulI* instr) { | 
 |   957   Register scratch = scratch0(); | 
 |   958   Register result = ToRegister(instr->result()); | 
 |   959   // Note that result may alias left. | 
 |   960   Register left = ToRegister(instr->InputAt(0)); | 
 |   961   LOperand* right_op = instr->InputAt(1); | 
 |   962  | 
 |   963   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 
 |   964   bool bailout_on_minus_zero = | 
 |   965     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 
 |   966  | 
 |   967   if (right_op->IsConstantOperand() && !can_overflow) { | 
 |   968     // Use optimized code for specific constants. | 
 |   969     int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 
 |   970  | 
 |   971     if (bailout_on_minus_zero && (constant < 0)) { | 
 |   972       // The case of a null constant will be handled separately. | 
 |   973       // If constant is negative and left is null, the result should be -0. | 
 |   974       DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg)); | 
 |   975     } | 
 |   976  | 
 |   977     switch (constant) { | 
 |   978       case -1: | 
 |   979         __ Subu(result, zero_reg, left); | 
 |   980         break; | 
 |   981       case 0: | 
 |   982         if (bailout_on_minus_zero) { | 
 |   983           // If left is strictly negative and the constant is null, the | 
 |   984           // result is -0. Deoptimize if required, otherwise return 0. | 
 |   985           DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg)); | 
 |   986         } | 
 |   987         __ mov(result, zero_reg); | 
 |   988         break; | 
 |   989       case 1: | 
 |   990         // Nothing to do. | 
 |   991         __ Move(result, left); | 
 |   992         break; | 
 |   993       default: | 
 |   994         // Multiplying by powers of two and powers of two plus or minus | 
 |   995         // one can be done faster with shifted operands. | 
 |   996         // For other constants we emit standard code. | 
 |   997         int32_t mask = constant >> 31; | 
 |   998         uint32_t constant_abs = (constant + mask) ^ mask; | 
 |   999  | 
 |  1000         if (IsPowerOf2(constant_abs) || | 
 |  1001             IsPowerOf2(constant_abs - 1) || | 
 |  1002             IsPowerOf2(constant_abs + 1)) { | 
 |  1003           if (IsPowerOf2(constant_abs)) { | 
 |  1004             int32_t shift = WhichPowerOf2(constant_abs); | 
 |  1005             __ sll(result, left, shift); | 
 |  1006           } else if (IsPowerOf2(constant_abs - 1)) { | 
 |  1007             int32_t shift = WhichPowerOf2(constant_abs - 1); | 
 |  1008             __ sll(result, left, shift); | 
 |  1009             __ Addu(result, result, left); | 
 |  1010           } else if (IsPowerOf2(constant_abs + 1)) { | 
 |  1011             int32_t shift = WhichPowerOf2(constant_abs + 1); | 
 |  1012             __ sll(result, left, shift); | 
 |  1013             __ Subu(result, result, left); | 
 |  1014           } | 
 |  1015  | 
 |  1016           // Correct the sign of the result is the constant is negative. | 
 |  1017           if (constant < 0)  { | 
 |  1018             __ Subu(result, zero_reg, result); | 
 |  1019           } | 
 |  1020  | 
 |  1021         } else { | 
 |  1022           // Generate standard code. | 
 |  1023           __ li(at, constant); | 
 |  1024           __ mul(result, left, at); | 
 |  1025         } | 
 |  1026     } | 
 |  1027  | 
 |  1028   } else { | 
 |  1029     Register right = EmitLoadRegister(right_op, scratch); | 
 |  1030     if (bailout_on_minus_zero) { | 
 |  1031       __ Or(ToRegister(instr->TempAt(0)), left, right); | 
 |  1032     } | 
 |  1033  | 
 |  1034     if (can_overflow) { | 
 |  1035       // hi:lo = left * right. | 
 |  1036       __ mult(left, right); | 
 |  1037       __ mfhi(scratch); | 
 |  1038       __ mflo(result); | 
 |  1039       __ sra(at, result, 31); | 
 |  1040       DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); | 
 |  1041     } else { | 
 |  1042       __ mul(result, left, right); | 
 |  1043     } | 
 |  1044  | 
 |  1045     if (bailout_on_minus_zero) { | 
 |  1046       // Bail out if the result is supposed to be negative zero. | 
 |  1047       Label done; | 
 |  1048       __ Branch(&done, ne, result, Operand(zero_reg)); | 
 |  1049       DeoptimizeIf(lt, | 
 |  1050                    instr->environment(), | 
 |  1051                    ToRegister(instr->TempAt(0)), | 
 |  1052                    Operand(zero_reg)); | 
 |  1053       __ bind(&done); | 
 |  1054     } | 
 |  1055   } | 
 |  1056 } | 
 |  1057  | 
 |  1058  | 
 |  1059 void LCodeGen::DoBitI(LBitI* instr) { | 
 |  1060   LOperand* left_op = instr->InputAt(0); | 
 |  1061   LOperand* right_op = instr->InputAt(1); | 
 |  1062   ASSERT(left_op->IsRegister()); | 
 |  1063   Register left = ToRegister(left_op); | 
 |  1064   Register result = ToRegister(instr->result()); | 
 |  1065   Operand right(no_reg); | 
 |  1066  | 
 |  1067   if (right_op->IsStackSlot() || right_op->IsArgument()) { | 
 |  1068     right = Operand(EmitLoadRegister(right_op, at)); | 
 |  1069   } else { | 
 |  1070     ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); | 
 |  1071     right = ToOperand(right_op); | 
 |  1072   } | 
 |  1073  | 
 |  1074   switch (instr->op()) { | 
 |  1075     case Token::BIT_AND: | 
 |  1076       __ And(result, left, right); | 
 |  1077       break; | 
 |  1078     case Token::BIT_OR: | 
 |  1079       __ Or(result, left, right); | 
 |  1080       break; | 
 |  1081     case Token::BIT_XOR: | 
 |  1082       __ Xor(result, left, right); | 
 |  1083       break; | 
 |  1084     default: | 
 |  1085       UNREACHABLE(); | 
 |  1086       break; | 
 |  1087   } | 
 |  1088 } | 
 |  1089  | 
 |  1090  | 
 |  1091 void LCodeGen::DoShiftI(LShiftI* instr) { | 
 |  1092   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so | 
 |  1093   // result may alias either of them. | 
 |  1094   LOperand* right_op = instr->InputAt(1); | 
 |  1095   Register left = ToRegister(instr->InputAt(0)); | 
 |  1096   Register result = ToRegister(instr->result()); | 
 |  1097  | 
 |  1098   if (right_op->IsRegister()) { | 
 |  1099     // No need to mask the right operand on MIPS, it is built into the variable | 
 |  1100     // shift instructions. | 
 |  1101     switch (instr->op()) { | 
 |  1102       case Token::SAR: | 
 |  1103         __ srav(result, left, ToRegister(right_op)); | 
 |  1104         break; | 
 |  1105       case Token::SHR: | 
 |  1106         __ srlv(result, left, ToRegister(right_op)); | 
 |  1107         if (instr->can_deopt()) { | 
 |  1108           DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); | 
 |  1109         } | 
 |  1110         break; | 
 |  1111       case Token::SHL: | 
 |  1112         __ sllv(result, left, ToRegister(right_op)); | 
 |  1113         break; | 
 |  1114       default: | 
 |  1115         UNREACHABLE(); | 
 |  1116         break; | 
 |  1117     } | 
 |  1118   } else { | 
 |  1119     // Mask the right_op operand. | 
 |  1120     int value = ToInteger32(LConstantOperand::cast(right_op)); | 
 |  1121     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | 
 |  1122     switch (instr->op()) { | 
 |  1123       case Token::SAR: | 
 |  1124         if (shift_count != 0) { | 
 |  1125           __ sra(result, left, shift_count); | 
 |  1126         } else { | 
 |  1127           __ Move(result, left); | 
 |  1128         } | 
 |  1129         break; | 
 |  1130       case Token::SHR: | 
 |  1131         if (shift_count != 0) { | 
 |  1132           __ srl(result, left, shift_count); | 
 |  1133         } else { | 
 |  1134           if (instr->can_deopt()) { | 
 |  1135             __ And(at, left, Operand(0x80000000)); | 
 |  1136             DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); | 
 |  1137           } | 
 |  1138           __ Move(result, left); | 
 |  1139         } | 
 |  1140         break; | 
 |  1141       case Token::SHL: | 
 |  1142         if (shift_count != 0) { | 
 |  1143           __ sll(result, left, shift_count); | 
 |  1144         } else { | 
 |  1145           __ Move(result, left); | 
 |  1146         } | 
 |  1147         break; | 
 |  1148       default: | 
 |  1149         UNREACHABLE(); | 
 |  1150         break; | 
 |  1151     } | 
 |  1152   } | 
 |  1153 } | 
 |  1154  | 
 |  1155  | 
 |  1156 void LCodeGen::DoSubI(LSubI* instr) { | 
 |  1157   LOperand* left = instr->InputAt(0); | 
 |  1158   LOperand* right = instr->InputAt(1); | 
 |  1159   LOperand* result = instr->result(); | 
 |  1160   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 
 |  1161  | 
 |  1162   if (!can_overflow) { | 
 |  1163     if (right->IsStackSlot() || right->IsArgument()) { | 
 |  1164       Register right_reg = EmitLoadRegister(right, at); | 
 |  1165       __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg)); | 
 |  1166     } else { | 
 |  1167       ASSERT(right->IsRegister() || right->IsConstantOperand()); | 
 |  1168       __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); | 
 |  1169     } | 
 |  1170   } else {  // can_overflow. | 
 |  1171     Register overflow = scratch0(); | 
 |  1172     Register scratch = scratch1(); | 
 |  1173     if (right->IsStackSlot() || | 
 |  1174         right->IsArgument() || | 
 |  1175         right->IsConstantOperand()) { | 
 |  1176       Register right_reg = EmitLoadRegister(right, scratch); | 
 |  1177       __ SubuAndCheckForOverflow(ToRegister(result), | 
 |  1178                                  ToRegister(left), | 
 |  1179                                  right_reg, | 
 |  1180                                  overflow);  // Reg at also used as scratch. | 
 |  1181     } else { | 
 |  1182       ASSERT(right->IsRegister()); | 
 |  1183       // Due to overflow check macros not supporting constant operands, | 
 |  1184       // handling the IsConstantOperand case was moved to prev if clause. | 
 |  1185       __ SubuAndCheckForOverflow(ToRegister(result), | 
 |  1186                                  ToRegister(left), | 
 |  1187                                  ToRegister(right), | 
 |  1188                                  overflow);  // Reg at also used as scratch. | 
 |  1189     } | 
 |  1190     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); | 
 |  1191   } | 
 |  1192 } | 
 |  1193  | 
 |  1194  | 
 |  1195 void LCodeGen::DoConstantI(LConstantI* instr) { | 
 |  1196   ASSERT(instr->result()->IsRegister()); | 
 |  1197   __ li(ToRegister(instr->result()), Operand(instr->value())); | 
 |  1198 } | 
 |  1199  | 
 |  1200  | 
 |  1201 void LCodeGen::DoConstantD(LConstantD* instr) { | 
 |  1202   ASSERT(instr->result()->IsDoubleRegister()); | 
 |  1203   DoubleRegister result = ToDoubleRegister(instr->result()); | 
 |  1204   double v = instr->value(); | 
 |  1205   __ Move(result, v); | 
 |  1206 } | 
 |  1207  | 
 |  1208  | 
 |  1209 void LCodeGen::DoConstantT(LConstantT* instr) { | 
 |  1210   ASSERT(instr->result()->IsRegister()); | 
 |  1211   __ li(ToRegister(instr->result()), Operand(instr->value())); | 
 |  1212 } | 
 |  1213  | 
 |  1214  | 
 |  1215 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { | 
 |  1216   Register result = ToRegister(instr->result()); | 
 |  1217   Register array = ToRegister(instr->InputAt(0)); | 
 |  1218   __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset)); | 
 |  1219 } | 
 |  1220  | 
 |  1221  | 
 |  1222 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) { | 
 |  1223   Register result = ToRegister(instr->result()); | 
 |  1224   Register array = ToRegister(instr->InputAt(0)); | 
 |  1225   __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset)); | 
 |  1226 } | 
 |  1227  | 
 |  1228  | 
 |  1229 void LCodeGen::DoElementsKind(LElementsKind* instr) { | 
 |  1230   Register result = ToRegister(instr->result()); | 
 |  1231   Register input = ToRegister(instr->InputAt(0)); | 
 |  1232  | 
 |  1233   // Load map into |result|. | 
 |  1234   __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  1235   // Load the map's "bit field 2" into |result|. We only need the first byte, | 
 |  1236   // but the following bit field extraction takes care of that anyway. | 
 |  1237   __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset)); | 
 |  1238   // Retrieve elements_kind from bit field 2. | 
 |  1239   __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | 
 |  1240 } | 
 |  1241  | 
 |  1242  | 
 |  1243 void LCodeGen::DoValueOf(LValueOf* instr) { | 
 |  1244   Register input = ToRegister(instr->InputAt(0)); | 
 |  1245   Register result = ToRegister(instr->result()); | 
 |  1246   Register map = ToRegister(instr->TempAt(0)); | 
 |  1247   Label done; | 
 |  1248  | 
 |  1249   // If the object is a smi return the object. | 
 |  1250   __ Move(result, input); | 
 |  1251   __ JumpIfSmi(input, &done); | 
 |  1252  | 
 |  1253   // If the object is not a value type, return the object. | 
 |  1254   __ GetObjectType(input, map, map); | 
 |  1255   __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE)); | 
 |  1256   __ lw(result, FieldMemOperand(input, JSValue::kValueOffset)); | 
 |  1257  | 
 |  1258   __ bind(&done); | 
 |  1259 } | 
 |  1260  | 
 |  1261  | 
 |  1262 void LCodeGen::DoBitNotI(LBitNotI* instr) { | 
 |  1263   Register input = ToRegister(instr->InputAt(0)); | 
 |  1264   Register result = ToRegister(instr->result()); | 
 |  1265   __ Nor(result, zero_reg, Operand(input)); | 
 |  1266 } | 
 |  1267  | 
 |  1268  | 
 |  1269 void LCodeGen::DoThrow(LThrow* instr) { | 
 |  1270   Register input_reg = EmitLoadRegister(instr->InputAt(0), at); | 
 |  1271   __ push(input_reg); | 
 |  1272   CallRuntime(Runtime::kThrow, 1, instr); | 
 |  1273  | 
 |  1274   if (FLAG_debug_code) { | 
 |  1275     __ stop("Unreachable code."); | 
 |  1276   } | 
 |  1277 } | 
 |  1278  | 
 |  1279  | 
 |  1280 void LCodeGen::DoAddI(LAddI* instr) { | 
 |  1281   LOperand* left = instr->InputAt(0); | 
 |  1282   LOperand* right = instr->InputAt(1); | 
 |  1283   LOperand* result = instr->result(); | 
 |  1284   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 
 |  1285  | 
 |  1286   if (!can_overflow) { | 
 |  1287     if (right->IsStackSlot() || right->IsArgument()) { | 
 |  1288       Register right_reg = EmitLoadRegister(right, at); | 
 |  1289       __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg)); | 
 |  1290     } else { | 
 |  1291       ASSERT(right->IsRegister() || right->IsConstantOperand()); | 
 |  1292       __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); | 
 |  1293     } | 
 |  1294   } else {  // can_overflow. | 
 |  1295     Register overflow = scratch0(); | 
 |  1296     Register scratch = scratch1(); | 
 |  1297     if (right->IsStackSlot() || | 
 |  1298         right->IsArgument() || | 
 |  1299         right->IsConstantOperand()) { | 
 |  1300       Register right_reg = EmitLoadRegister(right, scratch); | 
 |  1301       __ AdduAndCheckForOverflow(ToRegister(result), | 
 |  1302                                  ToRegister(left), | 
 |  1303                                  right_reg, | 
 |  1304                                  overflow);  // Reg at also used as scratch. | 
 |  1305     } else { | 
 |  1306       ASSERT(right->IsRegister()); | 
 |  1307       // Due to overflow check macros not supporting constant operands, | 
 |  1308       // handling the IsConstantOperand case was moved to prev if clause. | 
 |  1309       __ AdduAndCheckForOverflow(ToRegister(result), | 
 |  1310                                  ToRegister(left), | 
 |  1311                                  ToRegister(right), | 
 |  1312                                  overflow);  // Reg at also used as scratch. | 
 |  1313     } | 
 |  1314     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); | 
 |  1315   } | 
 |  1316 } | 
 |  1317  | 
 |  1318  | 
 |  1319 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 
 |  1320   DoubleRegister left = ToDoubleRegister(instr->InputAt(0)); | 
 |  1321   DoubleRegister right = ToDoubleRegister(instr->InputAt(1)); | 
 |  1322   DoubleRegister result = ToDoubleRegister(instr->result()); | 
 |  1323   switch (instr->op()) { | 
 |  1324     case Token::ADD: | 
 |  1325       __ add_d(result, left, right); | 
 |  1326       break; | 
 |  1327     case Token::SUB: | 
 |  1328       __ sub_d(result, left, right); | 
 |  1329       break; | 
 |  1330     case Token::MUL: | 
 |  1331       __ mul_d(result, left, right); | 
 |  1332       break; | 
 |  1333     case Token::DIV: | 
 |  1334       __ div_d(result, left, right); | 
 |  1335       break; | 
 |  1336     case Token::MOD: { | 
 |  1337       // Save a0-a3 on the stack. | 
 |  1338       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); | 
 |  1339       __ MultiPush(saved_regs); | 
 |  1340  | 
 |  1341       __ PrepareCallCFunction(0, 2, scratch0()); | 
 |  1342       __ SetCallCDoubleArguments(left, right); | 
 |  1343       __ CallCFunction( | 
 |  1344           ExternalReference::double_fp_operation(Token::MOD, isolate()), | 
 |  1345           0, 2); | 
 |  1346       // Move the result in the double result register. | 
 |  1347       __ GetCFunctionDoubleResult(result); | 
 |  1348  | 
 |  1349       // Restore saved register. | 
 |  1350       __ MultiPop(saved_regs); | 
 |  1351       break; | 
 |  1352     } | 
 |  1353     default: | 
 |  1354       UNREACHABLE(); | 
 |  1355       break; | 
 |  1356   } | 
 |  1357 } | 
 |  1358  | 
 |  1359  | 
 |  1360 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 
 |  1361   ASSERT(ToRegister(instr->InputAt(0)).is(a1)); | 
 |  1362   ASSERT(ToRegister(instr->InputAt(1)).is(a0)); | 
 |  1363   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  1364  | 
 |  1365   BinaryOpStub stub(instr->op(), NO_OVERWRITE); | 
 |  1366   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  1367   // Other arch use a nop here, to signal that there is no inlined | 
 |  1368   // patchable code. Mips does not need the nop, since our marker | 
 |  1369   // instruction (andi zero_reg) will never be used in normal code. | 
 |  1370 } | 
 |  1371  | 
 |  1372  | 
 |  1373 int LCodeGen::GetNextEmittedBlock(int block) { | 
 |  1374   for (int i = block + 1; i < graph()->blocks()->length(); ++i) { | 
 |  1375     LLabel* label = chunk_->GetLabel(i); | 
 |  1376     if (!label->HasReplacement()) return i; | 
 |  1377   } | 
 |  1378   return -1; | 
 |  1379 } | 
 |  1380  | 
 |  1381  | 
 |  1382 void LCodeGen::EmitBranch(int left_block, int right_block, | 
 |  1383                           Condition cc, Register src1, const Operand& src2) { | 
 |  1384   int next_block = GetNextEmittedBlock(current_block_); | 
 |  1385   right_block = chunk_->LookupDestination(right_block); | 
 |  1386   left_block = chunk_->LookupDestination(left_block); | 
 |  1387   if (right_block == left_block) { | 
 |  1388     EmitGoto(left_block); | 
 |  1389   } else if (left_block == next_block) { | 
 |  1390     __ Branch(chunk_->GetAssemblyLabel(right_block), | 
 |  1391               NegateCondition(cc), src1, src2); | 
 |  1392   } else if (right_block == next_block) { | 
 |  1393     __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2); | 
 |  1394   } else { | 
 |  1395     __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2); | 
 |  1396     __ Branch(chunk_->GetAssemblyLabel(right_block)); | 
 |  1397   } | 
 |  1398 } | 
 |  1399  | 
 |  1400  | 
 |  1401 void LCodeGen::EmitBranchF(int left_block, int right_block, | 
 |  1402                            Condition cc, FPURegister src1, FPURegister src2) { | 
 |  1403   int next_block = GetNextEmittedBlock(current_block_); | 
 |  1404   right_block = chunk_->LookupDestination(right_block); | 
 |  1405   left_block = chunk_->LookupDestination(left_block); | 
 |  1406   if (right_block == left_block) { | 
 |  1407     EmitGoto(left_block); | 
 |  1408   } else if (left_block == next_block) { | 
 |  1409     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, | 
 |  1410                NegateCondition(cc), src1, src2); | 
 |  1411   } else if (right_block == next_block) { | 
 |  1412     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2); | 
 |  1413   } else { | 
 |  1414     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2); | 
 |  1415     __ Branch(chunk_->GetAssemblyLabel(right_block)); | 
 |  1416   } | 
 |  1417 } | 
 |  1418  | 
 |  1419  | 
 |  1420 void LCodeGen::DoBranch(LBranch* instr) { | 
 |  1421   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1422   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1423  | 
 |  1424   Representation r = instr->hydrogen()->value()->representation(); | 
 |  1425   if (r.IsInteger32()) { | 
 |  1426     Register reg = ToRegister(instr->InputAt(0)); | 
 |  1427     EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); | 
 |  1428   } else if (r.IsDouble()) { | 
 |  1429     DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); | 
 |  1430     // Test the double value. Zero and NaN are false. | 
 |  1431     EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); | 
 |  1432   } else { | 
 |  1433     ASSERT(r.IsTagged()); | 
 |  1434     Register reg = ToRegister(instr->InputAt(0)); | 
 |  1435     HType type = instr->hydrogen()->value()->type(); | 
 |  1436     if (type.IsBoolean()) { | 
 |  1437       __ LoadRoot(at, Heap::kTrueValueRootIndex); | 
 |  1438       EmitBranch(true_block, false_block, eq, reg, Operand(at)); | 
 |  1439     } else if (type.IsSmi()) { | 
 |  1440       EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); | 
 |  1441     } else { | 
 |  1442       Label* true_label = chunk_->GetAssemblyLabel(true_block); | 
 |  1443       Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  1444  | 
 |  1445       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 
 |  1446       // Avoid deopts in the case where we've never executed this path before. | 
 |  1447       if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); | 
 |  1448  | 
 |  1449       if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 
 |  1450         // undefined -> false. | 
 |  1451         __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 
 |  1452         __ Branch(false_label, eq, reg, Operand(at)); | 
 |  1453       } | 
 |  1454       if (expected.Contains(ToBooleanStub::BOOLEAN)) { | 
 |  1455         // Boolean -> its value. | 
 |  1456         __ LoadRoot(at, Heap::kTrueValueRootIndex); | 
 |  1457         __ Branch(true_label, eq, reg, Operand(at)); | 
 |  1458         __ LoadRoot(at, Heap::kFalseValueRootIndex); | 
 |  1459         __ Branch(false_label, eq, reg, Operand(at)); | 
 |  1460       } | 
 |  1461       if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | 
 |  1462         // 'null' -> false. | 
 |  1463         __ LoadRoot(at, Heap::kNullValueRootIndex); | 
 |  1464         __ Branch(false_label, eq, reg, Operand(at)); | 
 |  1465       } | 
 |  1466  | 
 |  1467       if (expected.Contains(ToBooleanStub::SMI)) { | 
 |  1468         // Smis: 0 -> false, all other -> true. | 
 |  1469         __ Branch(false_label, eq, reg, Operand(zero_reg)); | 
 |  1470         __ JumpIfSmi(reg, true_label); | 
 |  1471       } else if (expected.NeedsMap()) { | 
 |  1472         // If we need a map later and have a Smi -> deopt. | 
 |  1473         __ And(at, reg, Operand(kSmiTagMask)); | 
 |  1474         DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); | 
 |  1475       } | 
 |  1476  | 
 |  1477       const Register map = scratch0(); | 
 |  1478       if (expected.NeedsMap()) { | 
 |  1479         __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 
 |  1480         if (expected.CanBeUndetectable()) { | 
 |  1481           // Undetectable -> false. | 
 |  1482           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 
 |  1483           __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 
 |  1484           __ Branch(false_label, ne, at, Operand(zero_reg)); | 
 |  1485         } | 
 |  1486       } | 
 |  1487  | 
 |  1488       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | 
 |  1489         // spec object -> true. | 
 |  1490         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 
 |  1491         __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
 |  1492       } | 
 |  1493  | 
 |  1494       if (expected.Contains(ToBooleanStub::STRING)) { | 
 |  1495         // String value -> false iff empty. | 
 |  1496         Label not_string; | 
 |  1497         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 
 |  1498         __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); | 
 |  1499         __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); | 
 |  1500         __ Branch(true_label, ne, at, Operand(zero_reg)); | 
 |  1501         __ Branch(false_label); | 
 |  1502         __ bind(¬_string); | 
 |  1503       } | 
 |  1504  | 
 |  1505       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 
 |  1506         // heap number -> false iff +0, -0, or NaN. | 
 |  1507         DoubleRegister dbl_scratch = double_scratch0(); | 
 |  1508         Label not_heap_number; | 
 |  1509         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  1510         __ Branch(¬_heap_number, ne, map, Operand(at)); | 
 |  1511         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 
 |  1512         __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero); | 
 |  1513         // Falls through if dbl_scratch == 0. | 
 |  1514         __ Branch(false_label); | 
 |  1515         __ bind(¬_heap_number); | 
 |  1516       } | 
 |  1517  | 
 |  1518       // We've seen something for the first time -> deopt. | 
 |  1519       DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); | 
 |  1520     } | 
 |  1521   } | 
 |  1522 } | 
 |  1523  | 
 |  1524  | 
 |  1525 void LCodeGen::EmitGoto(int block) { | 
 |  1526   block = chunk_->LookupDestination(block); | 
 |  1527   int next_block = GetNextEmittedBlock(current_block_); | 
 |  1528   if (block != next_block) { | 
 |  1529     __ jmp(chunk_->GetAssemblyLabel(block)); | 
 |  1530   } | 
 |  1531 } | 
 |  1532  | 
 |  1533  | 
 |  1534 void LCodeGen::DoGoto(LGoto* instr) { | 
 |  1535   EmitGoto(instr->block_id()); | 
 |  1536 } | 
 |  1537  | 
 |  1538  | 
 |  1539 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | 
 |  1540   Condition cond = kNoCondition; | 
 |  1541   switch (op) { | 
 |  1542     case Token::EQ: | 
 |  1543     case Token::EQ_STRICT: | 
 |  1544       cond = eq; | 
 |  1545       break; | 
 |  1546     case Token::LT: | 
 |  1547       cond = is_unsigned ? lo : lt; | 
 |  1548       break; | 
 |  1549     case Token::GT: | 
 |  1550       cond = is_unsigned ? hi : gt; | 
 |  1551       break; | 
 |  1552     case Token::LTE: | 
 |  1553       cond = is_unsigned ? ls : le; | 
 |  1554       break; | 
 |  1555     case Token::GTE: | 
 |  1556       cond = is_unsigned ? hs : ge; | 
 |  1557       break; | 
 |  1558     case Token::IN: | 
 |  1559     case Token::INSTANCEOF: | 
 |  1560     default: | 
 |  1561       UNREACHABLE(); | 
 |  1562   } | 
 |  1563   return cond; | 
 |  1564 } | 
 |  1565  | 
 |  1566  | 
 |  1567 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { | 
 |  1568   // This function must never be called for Mips. | 
 |  1569   // It is just a compare, it should be generated inline as | 
 |  1570   // part of the branch that uses it. It should always remain | 
 |  1571   // as un-implemented function. | 
 |  1572   // arm: __ cmp(ToRegister(left), ToRegister(right)); | 
 |  1573   Abort("Unimplemented: %s (line %d)", __func__, __LINE__); | 
 |  1574 } | 
 |  1575  | 
 |  1576  | 
 |  1577 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { | 
 |  1578   LOperand* left = instr->InputAt(0); | 
 |  1579   LOperand* right = instr->InputAt(1); | 
 |  1580   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1581   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1582  | 
 |  1583   Condition cc = TokenToCondition(instr->op(), instr->is_double()); | 
 |  1584  | 
 |  1585   if (instr->is_double()) { | 
 |  1586     // Compare left and right as doubles and load the | 
 |  1587     // resulting flags into the normal status register. | 
 |  1588     FPURegister left_reg = ToDoubleRegister(left); | 
 |  1589     FPURegister right_reg = ToDoubleRegister(right); | 
 |  1590  | 
 |  1591     // If a NaN is involved, i.e. the result is unordered, | 
 |  1592     // jump to false block label. | 
 |  1593     __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq, | 
 |  1594                left_reg, right_reg); | 
 |  1595  | 
 |  1596     EmitBranchF(true_block, false_block, cc, left_reg, right_reg); | 
 |  1597   } else { | 
 |  1598     // EmitCmpI cannot be used on MIPS. | 
 |  1599     // EmitCmpI(left, right); | 
 |  1600     EmitBranch(true_block, | 
 |  1601                false_block, | 
 |  1602                cc, | 
 |  1603                ToRegister(left), | 
 |  1604                Operand(ToRegister(right))); | 
 |  1605   } | 
 |  1606 } | 
 |  1607  | 
 |  1608  | 
 |  1609 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { | 
 |  1610   Register left = ToRegister(instr->InputAt(0)); | 
 |  1611   Register right = ToRegister(instr->InputAt(1)); | 
 |  1612   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1613   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1614  | 
 |  1615   EmitBranch(true_block, false_block, eq, left, Operand(right)); | 
 |  1616 } | 
 |  1617  | 
 |  1618  | 
 |  1619 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { | 
 |  1620   Register left = ToRegister(instr->InputAt(0)); | 
 |  1621   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1622   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1623  | 
 |  1624   EmitBranch(true_block, false_block, eq, left, | 
 |  1625              Operand(instr->hydrogen()->right())); | 
 |  1626 } | 
 |  1627  | 
 |  1628  | 
 |  1629  | 
 |  1630 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { | 
 |  1631   Register scratch = scratch0(); | 
 |  1632   Register reg = ToRegister(instr->InputAt(0)); | 
 |  1633   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1634  | 
 |  1635   // If the expression is known to be untagged or a smi, then it's definitely | 
 |  1636   // not null, and it can't be a an undetectable object. | 
 |  1637   if (instr->hydrogen()->representation().IsSpecialization() || | 
 |  1638       instr->hydrogen()->type().IsSmi()) { | 
 |  1639     EmitGoto(false_block); | 
 |  1640     return; | 
 |  1641   } | 
 |  1642  | 
 |  1643   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1644  | 
 |  1645   Heap::RootListIndex nil_value = instr->nil() == kNullValue ? | 
 |  1646       Heap::kNullValueRootIndex : | 
 |  1647       Heap::kUndefinedValueRootIndex; | 
 |  1648   __ LoadRoot(at, nil_value); | 
 |  1649   if (instr->kind() == kStrictEquality) { | 
 |  1650     EmitBranch(true_block, false_block, eq, reg, Operand(at)); | 
 |  1651   } else { | 
 |  1652     Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? | 
 |  1653         Heap::kUndefinedValueRootIndex : | 
 |  1654         Heap::kNullValueRootIndex; | 
 |  1655     Label* true_label = chunk_->GetAssemblyLabel(true_block); | 
 |  1656     Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  1657     __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at)); | 
 |  1658     __ LoadRoot(at, other_nil_value);  // In the delay slot. | 
 |  1659     __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at)); | 
 |  1660     __ JumpIfSmi(reg, false_label);  // In the delay slot. | 
 |  1661     // Check for undetectable objects by looking in the bit field in | 
 |  1662     // the map. The object has already been smi checked. | 
 |  1663     __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); | 
 |  1664     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | 
 |  1665     __ And(scratch, scratch, 1 << Map::kIsUndetectable); | 
 |  1666     EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg)); | 
 |  1667   } | 
 |  1668 } | 
 |  1669  | 
 |  1670  | 
 |  1671 Condition LCodeGen::EmitIsObject(Register input, | 
 |  1672                                  Register temp1, | 
 |  1673                                  Label* is_not_object, | 
 |  1674                                  Label* is_object) { | 
 |  1675   Register temp2 = scratch0(); | 
 |  1676   __ JumpIfSmi(input, is_not_object); | 
 |  1677  | 
 |  1678   __ LoadRoot(temp2, Heap::kNullValueRootIndex); | 
 |  1679   __ Branch(is_object, eq, input, Operand(temp2)); | 
 |  1680  | 
 |  1681   // Load map. | 
 |  1682   __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  1683   // Undetectable objects behave like undefined. | 
 |  1684   __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); | 
 |  1685   __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable)); | 
 |  1686   __ Branch(is_not_object, ne, temp2, Operand(zero_reg)); | 
 |  1687  | 
 |  1688   // Load instance type and check that it is in object type range. | 
 |  1689   __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); | 
 |  1690   __ Branch(is_not_object, | 
 |  1691             lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  1692  | 
 |  1693   return le; | 
 |  1694 } | 
 |  1695  | 
 |  1696  | 
 |  1697 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { | 
 |  1698   Register reg = ToRegister(instr->InputAt(0)); | 
 |  1699   Register temp1 = ToRegister(instr->TempAt(0)); | 
 |  1700   Register temp2 = scratch0(); | 
 |  1701  | 
 |  1702   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1703   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1704   Label* true_label = chunk_->GetAssemblyLabel(true_block); | 
 |  1705   Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  1706  | 
 |  1707   Condition true_cond = | 
 |  1708       EmitIsObject(reg, temp1, false_label, true_label); | 
 |  1709  | 
 |  1710   EmitBranch(true_block, false_block, true_cond, temp2, | 
 |  1711              Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  1712 } | 
 |  1713  | 
 |  1714  | 
 |  1715 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { | 
 |  1716   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1717   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1718  | 
 |  1719   Register input_reg = EmitLoadRegister(instr->InputAt(0), at); | 
 |  1720   __ And(at, input_reg, kSmiTagMask); | 
 |  1721   EmitBranch(true_block, false_block, eq, at, Operand(zero_reg)); | 
 |  1722 } | 
 |  1723  | 
 |  1724  | 
 |  1725 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { | 
 |  1726   Register input = ToRegister(instr->InputAt(0)); | 
 |  1727   Register temp = ToRegister(instr->TempAt(0)); | 
 |  1728  | 
 |  1729   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1730   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1731  | 
 |  1732   __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block)); | 
 |  1733   __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  1734   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | 
 |  1735   __ And(at, temp, Operand(1 << Map::kIsUndetectable)); | 
 |  1736   EmitBranch(true_block, false_block, ne, at, Operand(zero_reg)); | 
 |  1737 } | 
 |  1738  | 
 |  1739  | 
 |  1740 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | 
 |  1741   InstanceType from = instr->from(); | 
 |  1742   InstanceType to = instr->to(); | 
 |  1743   if (from == FIRST_TYPE) return to; | 
 |  1744   ASSERT(from == to || to == LAST_TYPE); | 
 |  1745   return from; | 
 |  1746 } | 
 |  1747  | 
 |  1748  | 
 |  1749 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | 
 |  1750   InstanceType from = instr->from(); | 
 |  1751   InstanceType to = instr->to(); | 
 |  1752   if (from == to) return eq; | 
 |  1753   if (to == LAST_TYPE) return hs; | 
 |  1754   if (from == FIRST_TYPE) return ls; | 
 |  1755   UNREACHABLE(); | 
 |  1756   return eq; | 
 |  1757 } | 
 |  1758  | 
 |  1759  | 
 |  1760 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { | 
 |  1761   Register scratch = scratch0(); | 
 |  1762   Register input = ToRegister(instr->InputAt(0)); | 
 |  1763  | 
 |  1764   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1765   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1766  | 
 |  1767   Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  1768  | 
 |  1769   __ JumpIfSmi(input, false_label); | 
 |  1770  | 
 |  1771   __ GetObjectType(input, scratch, scratch); | 
 |  1772   EmitBranch(true_block, | 
 |  1773              false_block, | 
 |  1774              BranchCondition(instr->hydrogen()), | 
 |  1775              scratch, | 
 |  1776              Operand(TestType(instr->hydrogen()))); | 
 |  1777 } | 
 |  1778  | 
 |  1779  | 
 |  1780 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | 
 |  1781   Register input = ToRegister(instr->InputAt(0)); | 
 |  1782   Register result = ToRegister(instr->result()); | 
 |  1783  | 
 |  1784   if (FLAG_debug_code) { | 
 |  1785     __ AbortIfNotString(input); | 
 |  1786   } | 
 |  1787  | 
 |  1788   __ lw(result, FieldMemOperand(input, String::kHashFieldOffset)); | 
 |  1789   __ IndexFromHash(result, result); | 
 |  1790 } | 
 |  1791  | 
 |  1792  | 
 |  1793 void LCodeGen::DoHasCachedArrayIndexAndBranch( | 
 |  1794     LHasCachedArrayIndexAndBranch* instr) { | 
 |  1795   Register input = ToRegister(instr->InputAt(0)); | 
 |  1796   Register scratch = scratch0(); | 
 |  1797  | 
 |  1798   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1799   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1800  | 
 |  1801   __ lw(scratch, | 
 |  1802          FieldMemOperand(input, String::kHashFieldOffset)); | 
 |  1803   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask)); | 
 |  1804   EmitBranch(true_block, false_block, eq, at, Operand(zero_reg)); | 
 |  1805 } | 
 |  1806  | 
 |  1807  | 
 |  1808 // Branches to a label or falls through with this instance class-name adr | 
 |  1809 // returned in temp reg, available for comparison by the caller. Trashes the | 
 |  1810 // temp registers, but not the input. Only input and temp2 may alias. | 
 |  1811 void LCodeGen::EmitClassOfTest(Label* is_true, | 
 |  1812                                Label* is_false, | 
 |  1813                                Handle<String>class_name, | 
 |  1814                                Register input, | 
 |  1815                                Register temp, | 
 |  1816                                Register temp2) { | 
 |  1817   ASSERT(!input.is(temp)); | 
 |  1818   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register. | 
 |  1819   __ JumpIfSmi(input, is_false); | 
 |  1820  | 
 |  1821   if (class_name->IsEqualTo(CStrVector("Function"))) { | 
 |  1822     // Assuming the following assertions, we can use the same compares to test | 
 |  1823     // for both being a function type and being in the object type range. | 
 |  1824     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | 
 |  1825     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | 
 |  1826                   FIRST_SPEC_OBJECT_TYPE + 1); | 
 |  1827     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | 
 |  1828                   LAST_SPEC_OBJECT_TYPE - 1); | 
 |  1829     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | 
 |  1830  | 
 |  1831     __ GetObjectType(input, temp, temp2); | 
 |  1832     __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
 |  1833     __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
 |  1834     __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE)); | 
 |  1835   } else { | 
 |  1836     // Faster code path to avoid two compares: subtract lower bound from the | 
 |  1837     // actual type and do a signed compare with the width of the type range. | 
 |  1838     __ GetObjectType(input, temp, temp2); | 
 |  1839     __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  1840     __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - | 
 |  1841                                            FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  1842   } | 
 |  1843  | 
 |  1844   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. | 
 |  1845   // Check if the constructor in the map is a function. | 
 |  1846   __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset)); | 
 |  1847  | 
 |  1848   // Objects with a non-function constructor have class 'Object'. | 
 |  1849   __ GetObjectType(temp, temp2, temp2); | 
 |  1850   if (class_name->IsEqualTo(CStrVector("Object"))) { | 
 |  1851     __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE)); | 
 |  1852   } else { | 
 |  1853     __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE)); | 
 |  1854   } | 
 |  1855  | 
 |  1856   // temp now contains the constructor function. Grab the | 
 |  1857   // instance class name from there. | 
 |  1858   __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); | 
 |  1859   __ lw(temp, FieldMemOperand(temp, | 
 |  1860                                SharedFunctionInfo::kInstanceClassNameOffset)); | 
 |  1861   // The class name we are testing against is a symbol because it's a literal. | 
 |  1862   // The name in the constructor is a symbol because of the way the context is | 
 |  1863   // booted.  This routine isn't expected to work for random API-created | 
 |  1864   // classes and it doesn't have to because you can't access it with natives | 
 |  1865   // syntax.  Since both sides are symbols it is sufficient to use an identity | 
 |  1866   // comparison. | 
 |  1867  | 
 |  1868   // End with the address of this class_name instance in temp register. | 
 |  1869   // On MIPS, the caller must do the comparison with Handle<String>class_name. | 
 |  1870 } | 
 |  1871  | 
 |  1872  | 
 |  1873 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | 
 |  1874   Register input = ToRegister(instr->InputAt(0)); | 
 |  1875   Register temp = scratch0(); | 
 |  1876   Register temp2 = ToRegister(instr->TempAt(0)); | 
 |  1877   Handle<String> class_name = instr->hydrogen()->class_name(); | 
 |  1878  | 
 |  1879   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  1880   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  1881  | 
 |  1882   Label* true_label = chunk_->GetAssemblyLabel(true_block); | 
 |  1883   Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  1884  | 
 |  1885   EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); | 
 |  1886  | 
 |  1887   EmitBranch(true_block, false_block, eq, temp, Operand(class_name)); | 
 |  1888 } | 
 |  1889  | 
 |  1890  | 
 |  1891 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { | 
 |  1892   Register reg = ToRegister(instr->InputAt(0)); | 
 |  1893   Register temp = ToRegister(instr->TempAt(0)); | 
 |  1894   int true_block = instr->true_block_id(); | 
 |  1895   int false_block = instr->false_block_id(); | 
 |  1896  | 
 |  1897   __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | 
 |  1898   EmitBranch(true_block, false_block, eq, temp, Operand(instr->map())); | 
 |  1899 } | 
 |  1900  | 
 |  1901  | 
 |  1902 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | 
 |  1903   Label true_label, done; | 
 |  1904   ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0. | 
 |  1905   ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1. | 
 |  1906   Register result = ToRegister(instr->result()); | 
 |  1907   ASSERT(result.is(v0)); | 
 |  1908  | 
 |  1909   InstanceofStub stub(InstanceofStub::kArgsInRegisters); | 
 |  1910   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  1911  | 
 |  1912   __ Branch(&true_label, eq, result, Operand(zero_reg)); | 
 |  1913   __ li(result, Operand(factory()->false_value())); | 
 |  1914   __ Branch(&done); | 
 |  1915   __ bind(&true_label); | 
 |  1916   __ li(result, Operand(factory()->true_value())); | 
 |  1917   __ bind(&done); | 
 |  1918 } | 
 |  1919  | 
 |  1920  | 
 |  1921 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 
 |  1922   class DeferredInstanceOfKnownGlobal: public LDeferredCode { | 
 |  1923    public: | 
 |  1924     DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 
 |  1925                                   LInstanceOfKnownGlobal* instr) | 
 |  1926         : LDeferredCode(codegen), instr_(instr) { } | 
 |  1927     virtual void Generate() { | 
 |  1928       codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); | 
 |  1929     } | 
 |  1930     virtual LInstruction* instr() { return instr_; } | 
 |  1931     Label* map_check() { return &map_check_; } | 
 |  1932  | 
 |  1933    private: | 
 |  1934     LInstanceOfKnownGlobal* instr_; | 
 |  1935     Label map_check_; | 
 |  1936   }; | 
 |  1937  | 
 |  1938   DeferredInstanceOfKnownGlobal* deferred; | 
 |  1939   deferred = new DeferredInstanceOfKnownGlobal(this, instr); | 
 |  1940  | 
 |  1941   Label done, false_result; | 
 |  1942   Register object = ToRegister(instr->InputAt(0)); | 
 |  1943   Register temp = ToRegister(instr->TempAt(0)); | 
 |  1944   Register result = ToRegister(instr->result()); | 
 |  1945  | 
 |  1946   ASSERT(object.is(a0)); | 
 |  1947   ASSERT(result.is(v0)); | 
 |  1948  | 
 |  1949   // A Smi is not instance of anything. | 
 |  1950   __ JumpIfSmi(object, &false_result); | 
 |  1951  | 
 |  1952   // This is the inlined call site instanceof cache. The two occurences of the | 
 |  1953   // hole value will be patched to the last map/result pair generated by the | 
 |  1954   // instanceof stub. | 
 |  1955   Label cache_miss; | 
 |  1956   Register map = temp; | 
 |  1957   __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 
 |  1958  | 
 |  1959   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 
 |  1960   __ bind(deferred->map_check());  // Label for calculating code patching. | 
 |  1961   // We use Factory::the_hole_value() on purpose instead of loading from the | 
 |  1962   // root array to force relocation to be able to later patch with | 
 |  1963   // the cached map. | 
 |  1964   __ li(at, Operand(factory()->the_hole_value()), true); | 
 |  1965   __ Branch(&cache_miss, ne, map, Operand(at)); | 
 |  1966   // We use Factory::the_hole_value() on purpose instead of loading from the | 
 |  1967   // root array to force relocation to be able to later patch | 
 |  1968   // with true or false. | 
 |  1969   __ li(result, Operand(factory()->the_hole_value()), true); | 
 |  1970   __ Branch(&done); | 
 |  1971  | 
 |  1972   // The inlined call site cache did not match. Check null and string before | 
 |  1973   // calling the deferred code. | 
 |  1974   __ bind(&cache_miss); | 
 |  1975   // Null is not instance of anything. | 
 |  1976   __ LoadRoot(temp, Heap::kNullValueRootIndex); | 
 |  1977   __ Branch(&false_result, eq, object, Operand(temp)); | 
 |  1978  | 
 |  1979   // String values is not instance of anything. | 
 |  1980   Condition cc = __ IsObjectStringType(object, temp, temp); | 
 |  1981   __ Branch(&false_result, cc, temp, Operand(zero_reg)); | 
 |  1982  | 
 |  1983   // Go to the deferred code. | 
 |  1984   __ Branch(deferred->entry()); | 
 |  1985  | 
 |  1986   __ bind(&false_result); | 
 |  1987   __ LoadRoot(result, Heap::kFalseValueRootIndex); | 
 |  1988  | 
 |  1989   // Here result has either true or false. Deferred code also produces true or | 
 |  1990   // false object. | 
 |  1991   __ bind(deferred->exit()); | 
 |  1992   __ bind(&done); | 
 |  1993 } | 
 |  1994  | 
 |  1995  | 
 |  1996 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, | 
 |  1997                                                 Label* map_check) { | 
 |  1998   Register result = ToRegister(instr->result()); | 
 |  1999   ASSERT(result.is(v0)); | 
 |  2000  | 
 |  2001   InstanceofStub::Flags flags = InstanceofStub::kNoFlags; | 
 |  2002   flags = static_cast<InstanceofStub::Flags>( | 
 |  2003       flags | InstanceofStub::kArgsInRegisters); | 
 |  2004   flags = static_cast<InstanceofStub::Flags>( | 
 |  2005       flags | InstanceofStub::kCallSiteInlineCheck); | 
 |  2006   flags = static_cast<InstanceofStub::Flags>( | 
 |  2007       flags | InstanceofStub::kReturnTrueFalseObject); | 
 |  2008   InstanceofStub stub(flags); | 
 |  2009  | 
 |  2010   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  2011  | 
 |  2012   // Get the temp register reserved by the instruction. This needs to be t0 as | 
 |  2013   // its slot of the pushing of safepoint registers is used to communicate the | 
 |  2014   // offset to the location of the map check. | 
 |  2015   Register temp = ToRegister(instr->TempAt(0)); | 
 |  2016   ASSERT(temp.is(t0)); | 
 |  2017   __ li(InstanceofStub::right(), Operand(instr->function())); | 
 |  2018   static const int kAdditionalDelta = 7; | 
 |  2019   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; | 
 |  2020   Label before_push_delta; | 
 |  2021   __ bind(&before_push_delta); | 
 |  2022   { | 
 |  2023     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 
 |  2024     __ li(temp, Operand(delta * kPointerSize), true); | 
 |  2025     __ StoreToSafepointRegisterSlot(temp, temp); | 
 |  2026   } | 
 |  2027   CallCodeGeneric(stub.GetCode(), | 
 |  2028                   RelocInfo::CODE_TARGET, | 
 |  2029                   instr, | 
 |  2030                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 
 |  2031   // Put the result value into the result register slot and | 
 |  2032   // restore all registers. | 
 |  2033   __ StoreToSafepointRegisterSlot(result, result); | 
 |  2034 } | 
 |  2035  | 
 |  2036  | 
 |  2037 static Condition ComputeCompareCondition(Token::Value op) { | 
 |  2038   switch (op) { | 
 |  2039     case Token::EQ_STRICT: | 
 |  2040     case Token::EQ: | 
 |  2041       return eq; | 
 |  2042     case Token::LT: | 
 |  2043       return lt; | 
 |  2044     case Token::GT: | 
 |  2045       return gt; | 
 |  2046     case Token::LTE: | 
 |  2047       return le; | 
 |  2048     case Token::GTE: | 
 |  2049       return ge; | 
 |  2050     default: | 
 |  2051       UNREACHABLE(); | 
 |  2052       return kNoCondition; | 
 |  2053   } | 
 |  2054 } | 
 |  2055  | 
 |  2056  | 
 |  2057 void LCodeGen::DoCmpT(LCmpT* instr) { | 
 |  2058   Token::Value op = instr->op(); | 
 |  2059  | 
 |  2060   Handle<Code> ic = CompareIC::GetUninitialized(op); | 
 |  2061   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  2062   // On MIPS there is no need for a "no inlined smi code" marker (nop). | 
 |  2063  | 
 |  2064   Condition condition = ComputeCompareCondition(op); | 
 |  2065   // A minor optimization that relies on LoadRoot always emitting one | 
 |  2066   // instruction. | 
 |  2067   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | 
 |  2068   Label done; | 
 |  2069   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); | 
 |  2070   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); | 
 |  2071   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); | 
 |  2072   ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done)); | 
 |  2073   __ bind(&done); | 
 |  2074 } | 
 |  2075  | 
 |  2076  | 
 |  2077 void LCodeGen::DoReturn(LReturn* instr) { | 
 |  2078   if (FLAG_trace) { | 
 |  2079     // Push the return value on the stack as the parameter. | 
 |  2080     // Runtime::TraceExit returns its parameter in v0. | 
 |  2081     __ push(v0); | 
 |  2082     __ CallRuntime(Runtime::kTraceExit, 1); | 
 |  2083   } | 
 |  2084   int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 
 |  2085   __ mov(sp, fp); | 
 |  2086   __ Pop(ra, fp); | 
 |  2087   __ Addu(sp, sp, Operand(sp_delta)); | 
 |  2088   __ Jump(ra); | 
 |  2089 } | 
 |  2090  | 
 |  2091  | 
 |  2092 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 
 |  2093   Register result = ToRegister(instr->result()); | 
 |  2094   __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 
 |  2095   __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset)); | 
 |  2096   if (instr->hydrogen()->RequiresHoleCheck()) { | 
 |  2097     __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 
 |  2098     DeoptimizeIf(eq, instr->environment(), result, Operand(at)); | 
 |  2099   } | 
 |  2100 } | 
 |  2101  | 
 |  2102  | 
 |  2103 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | 
 |  2104   ASSERT(ToRegister(instr->global_object()).is(a0)); | 
 |  2105   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  2106  | 
 |  2107   __ li(a2, Operand(instr->name())); | 
 |  2108   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET | 
 |  2109                                              : RelocInfo::CODE_TARGET_CONTEXT; | 
 |  2110   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); | 
 |  2111   CallCode(ic, mode, instr); | 
 |  2112 } | 
 |  2113  | 
 |  2114  | 
 |  2115 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { | 
 |  2116   Register value = ToRegister(instr->InputAt(0)); | 
 |  2117   Register scratch = scratch0(); | 
 |  2118   Register scratch2 = ToRegister(instr->TempAt(0)); | 
 |  2119  | 
 |  2120   // Load the cell. | 
 |  2121   __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 
 |  2122  | 
 |  2123   // If the cell we are storing to contains the hole it could have | 
 |  2124   // been deleted from the property dictionary. In that case, we need | 
 |  2125   // to update the property details in the property dictionary to mark | 
 |  2126   // it as no longer deleted. | 
 |  2127   if (instr->hydrogen()->RequiresHoleCheck()) { | 
 |  2128     __ lw(scratch2, | 
 |  2129           FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); | 
 |  2130     __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 
 |  2131     DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at)); | 
 |  2132   } | 
 |  2133  | 
 |  2134   // Store the value. | 
 |  2135   __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); | 
 |  2136  | 
 |  2137   // Cells are always in the remembered set. | 
 |  2138   if (instr->hydrogen()->NeedsWriteBarrier()) { | 
 |  2139     HType type = instr->hydrogen()->value()->type(); | 
 |  2140     SmiCheck check_needed = | 
 |  2141         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 
 |  2142     __ RecordWriteField(scratch, | 
 |  2143                         JSGlobalPropertyCell::kValueOffset, | 
 |  2144                         value, | 
 |  2145                         scratch2, | 
 |  2146                         kRAHasBeenSaved, | 
 |  2147                         kSaveFPRegs, | 
 |  2148                         OMIT_REMEMBERED_SET, | 
 |  2149                         check_needed); | 
 |  2150   } | 
 |  2151 } | 
 |  2152  | 
 |  2153  | 
 |  2154 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { | 
 |  2155   ASSERT(ToRegister(instr->global_object()).is(a1)); | 
 |  2156   ASSERT(ToRegister(instr->value()).is(a0)); | 
 |  2157  | 
 |  2158   __ li(a2, Operand(instr->name())); | 
 |  2159   Handle<Code> ic = instr->strict_mode() | 
 |  2160       ? isolate()->builtins()->StoreIC_Initialize_Strict() | 
 |  2161       : isolate()->builtins()->StoreIC_Initialize(); | 
 |  2162   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); | 
 |  2163 } | 
 |  2164  | 
 |  2165  | 
 |  2166 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 
 |  2167   Register context = ToRegister(instr->context()); | 
 |  2168   Register result = ToRegister(instr->result()); | 
 |  2169   __ lw(result, ContextOperand(context, instr->slot_index())); | 
 |  2170 } | 
 |  2171  | 
 |  2172  | 
 |  2173 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 
 |  2174   Register context = ToRegister(instr->context()); | 
 |  2175   Register value = ToRegister(instr->value()); | 
 |  2176   MemOperand target = ContextOperand(context, instr->slot_index()); | 
 |  2177   __ sw(value, target); | 
 |  2178   if (instr->hydrogen()->NeedsWriteBarrier()) { | 
 |  2179     HType type = instr->hydrogen()->value()->type(); | 
 |  2180     SmiCheck check_needed = | 
 |  2181         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 
 |  2182     __ RecordWriteContextSlot(context, | 
 |  2183                               target.offset(), | 
 |  2184                               value, | 
 |  2185                               scratch0(), | 
 |  2186                               kRAHasBeenSaved, | 
 |  2187                               kSaveFPRegs, | 
 |  2188                               EMIT_REMEMBERED_SET, | 
 |  2189                               check_needed); | 
 |  2190   } | 
 |  2191 } | 
 |  2192  | 
 |  2193  | 
 |  2194 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | 
 |  2195   Register object = ToRegister(instr->InputAt(0)); | 
 |  2196   Register result = ToRegister(instr->result()); | 
 |  2197   if (instr->hydrogen()->is_in_object()) { | 
 |  2198     __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset())); | 
 |  2199   } else { | 
 |  2200     __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 
 |  2201     __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset())); | 
 |  2202   } | 
 |  2203 } | 
 |  2204  | 
 |  2205  | 
 |  2206 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, | 
 |  2207                                                Register object, | 
 |  2208                                                Handle<Map> type, | 
 |  2209                                                Handle<String> name) { | 
 |  2210   LookupResult lookup(isolate()); | 
 |  2211   type->LookupInDescriptors(NULL, *name, &lookup); | 
 |  2212   ASSERT(lookup.IsProperty() && | 
 |  2213          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); | 
 |  2214   if (lookup.type() == FIELD) { | 
 |  2215     int index = lookup.GetLocalFieldIndexFromMap(*type); | 
 |  2216     int offset = index * kPointerSize; | 
 |  2217     if (index < 0) { | 
 |  2218       // Negative property indices are in-object properties, indexed | 
 |  2219       // from the end of the fixed part of the object. | 
 |  2220       __ lw(result, FieldMemOperand(object, offset + type->instance_size())); | 
 |  2221     } else { | 
 |  2222       // Non-negative property indices are in the properties array. | 
 |  2223       __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 
 |  2224       __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); | 
 |  2225     } | 
 |  2226   } else { | 
 |  2227     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); | 
 |  2228     LoadHeapObject(result, Handle<HeapObject>::cast(function)); | 
 |  2229   } | 
 |  2230 } | 
 |  2231  | 
 |  2232  | 
 |  2233 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { | 
 |  2234   Register object = ToRegister(instr->object()); | 
 |  2235   Register result = ToRegister(instr->result()); | 
 |  2236   Register scratch = scratch0(); | 
 |  2237   int map_count = instr->hydrogen()->types()->length(); | 
 |  2238   Handle<String> name = instr->hydrogen()->name(); | 
 |  2239   if (map_count == 0) { | 
 |  2240     ASSERT(instr->hydrogen()->need_generic()); | 
 |  2241     __ li(a2, Operand(name)); | 
 |  2242     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); | 
 |  2243     CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  2244   } else { | 
 |  2245     Label done; | 
 |  2246     __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 
 |  2247     for (int i = 0; i < map_count - 1; ++i) { | 
 |  2248       Handle<Map> map = instr->hydrogen()->types()->at(i); | 
 |  2249       Label next; | 
 |  2250       __ Branch(&next, ne, scratch, Operand(map)); | 
 |  2251       EmitLoadFieldOrConstantFunction(result, object, map, name); | 
 |  2252       __ Branch(&done); | 
 |  2253       __ bind(&next); | 
 |  2254     } | 
 |  2255     Handle<Map> map = instr->hydrogen()->types()->last(); | 
 |  2256     if (instr->hydrogen()->need_generic()) { | 
 |  2257       Label generic; | 
 |  2258       __ Branch(&generic, ne, scratch, Operand(map)); | 
 |  2259       EmitLoadFieldOrConstantFunction(result, object, map, name); | 
 |  2260       __ Branch(&done); | 
 |  2261       __ bind(&generic); | 
 |  2262       __ li(a2, Operand(name)); | 
 |  2263       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); | 
 |  2264       CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  2265     } else { | 
 |  2266       DeoptimizeIf(ne, instr->environment(), scratch, Operand(map)); | 
 |  2267       EmitLoadFieldOrConstantFunction(result, object, map, name); | 
 |  2268     } | 
 |  2269     __ bind(&done); | 
 |  2270   } | 
 |  2271 } | 
 |  2272  | 
 |  2273  | 
 |  2274 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | 
 |  2275   ASSERT(ToRegister(instr->object()).is(a0)); | 
 |  2276   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  2277  | 
 |  2278   // Name is always in a2. | 
 |  2279   __ li(a2, Operand(instr->name())); | 
 |  2280   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); | 
 |  2281   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  2282 } | 
 |  2283  | 
 |  2284  | 
 |  2285 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 
 |  2286   Register scratch = scratch0(); | 
 |  2287   Register function = ToRegister(instr->function()); | 
 |  2288   Register result = ToRegister(instr->result()); | 
 |  2289  | 
 |  2290   // Check that the function really is a function. Load map into the | 
 |  2291   // result register. | 
 |  2292   __ GetObjectType(function, result, scratch); | 
 |  2293   DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE)); | 
 |  2294  | 
 |  2295   // Make sure that the function has an instance prototype. | 
 |  2296   Label non_instance; | 
 |  2297   __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | 
 |  2298   __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); | 
 |  2299   __ Branch(&non_instance, ne, scratch, Operand(zero_reg)); | 
 |  2300  | 
 |  2301   // Get the prototype or initial map from the function. | 
 |  2302   __ lw(result, | 
 |  2303          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 
 |  2304  | 
 |  2305   // Check that the function has a prototype or an initial map. | 
 |  2306   __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 
 |  2307   DeoptimizeIf(eq, instr->environment(), result, Operand(at)); | 
 |  2308  | 
 |  2309   // If the function does not have an initial map, we're done. | 
 |  2310   Label done; | 
 |  2311   __ GetObjectType(result, scratch, scratch); | 
 |  2312   __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | 
 |  2313  | 
 |  2314   // Get the prototype from the initial map. | 
 |  2315   __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 
 |  2316   __ Branch(&done); | 
 |  2317  | 
 |  2318   // Non-instance prototype: Fetch prototype from constructor field | 
 |  2319   // in initial map. | 
 |  2320   __ bind(&non_instance); | 
 |  2321   __ lw(result, FieldMemOperand(result, Map::kConstructorOffset)); | 
 |  2322  | 
 |  2323   // All done. | 
 |  2324   __ bind(&done); | 
 |  2325 } | 
 |  2326  | 
 |  2327  | 
 |  2328 void LCodeGen::DoLoadElements(LLoadElements* instr) { | 
 |  2329   Register result = ToRegister(instr->result()); | 
 |  2330   Register input = ToRegister(instr->InputAt(0)); | 
 |  2331   Register scratch = scratch0(); | 
 |  2332  | 
 |  2333   __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset)); | 
 |  2334   if (FLAG_debug_code) { | 
 |  2335     Label done, fail; | 
 |  2336     __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset)); | 
 |  2337     __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); | 
 |  2338     __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at)); | 
 |  2339     __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot. | 
 |  2340     __ Branch(&done, eq, scratch, Operand(at)); | 
 |  2341     // |scratch| still contains |input|'s map. | 
 |  2342     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); | 
 |  2343     __ Ext(scratch, scratch, Map::kElementsKindShift, | 
 |  2344            Map::kElementsKindBitCount); | 
 |  2345     __ Branch(&done, eq, scratch, | 
 |  2346               Operand(FAST_ELEMENTS)); | 
 |  2347     __ Branch(&fail, lt, scratch, | 
 |  2348               Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); | 
 |  2349     __ Branch(&done, le, scratch, | 
 |  2350               Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); | 
 |  2351     __ bind(&fail); | 
 |  2352     __ Abort("Check for fast or external elements failed."); | 
 |  2353     __ bind(&done); | 
 |  2354   } | 
 |  2355 } | 
 |  2356  | 
 |  2357  | 
 |  2358 void LCodeGen::DoLoadExternalArrayPointer( | 
 |  2359     LLoadExternalArrayPointer* instr) { | 
 |  2360   Register to_reg = ToRegister(instr->result()); | 
 |  2361   Register from_reg  = ToRegister(instr->InputAt(0)); | 
 |  2362   __ lw(to_reg, FieldMemOperand(from_reg, | 
 |  2363                                 ExternalArray::kExternalPointerOffset)); | 
 |  2364 } | 
 |  2365  | 
 |  2366  | 
 |  2367 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { | 
 |  2368   Register arguments = ToRegister(instr->arguments()); | 
 |  2369   Register length = ToRegister(instr->length()); | 
 |  2370   Register index = ToRegister(instr->index()); | 
 |  2371   Register result = ToRegister(instr->result()); | 
 |  2372  | 
 |  2373   // Bailout index is not a valid argument index. Use unsigned check to get | 
 |  2374   // negative check for free. | 
 |  2375  | 
 |  2376   // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(), | 
 |  2377   // as they do in Arm. It will save us an instruction. | 
 |  2378   DeoptimizeIf(ls, instr->environment(), length, Operand(index)); | 
 |  2379  | 
 |  2380   // There are two words between the frame pointer and the last argument. | 
 |  2381   // Subtracting from length accounts for one of them, add one more. | 
 |  2382   __ subu(length, length, index); | 
 |  2383   __ Addu(length, length, Operand(1)); | 
 |  2384   __ sll(length, length, kPointerSizeLog2); | 
 |  2385   __ Addu(at, arguments, Operand(length)); | 
 |  2386   __ lw(result, MemOperand(at, 0)); | 
 |  2387 } | 
 |  2388  | 
 |  2389  | 
 |  2390 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { | 
 |  2391   Register elements = ToRegister(instr->elements()); | 
 |  2392   Register key = EmitLoadRegister(instr->key(), scratch0()); | 
 |  2393   Register result = ToRegister(instr->result()); | 
 |  2394   Register scratch = scratch0(); | 
 |  2395  | 
 |  2396   // Load the result. | 
 |  2397   __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words. | 
 |  2398   __ addu(scratch, elements, scratch); | 
 |  2399   __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 
 |  2400  | 
 |  2401   // Check for the hole value. | 
 |  2402   if (instr->hydrogen()->RequiresHoleCheck()) { | 
 |  2403     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 
 |  2404     DeoptimizeIf(eq, instr->environment(), result, Operand(scratch)); | 
 |  2405   } | 
 |  2406 } | 
 |  2407  | 
 |  2408  | 
 |  2409 void LCodeGen::DoLoadKeyedFastDoubleElement( | 
 |  2410     LLoadKeyedFastDoubleElement* instr) { | 
 |  2411   Register elements = ToRegister(instr->elements()); | 
 |  2412   bool key_is_constant = instr->key()->IsConstantOperand(); | 
 |  2413   Register key = no_reg; | 
 |  2414   DoubleRegister result = ToDoubleRegister(instr->result()); | 
 |  2415   Register scratch = scratch0(); | 
 |  2416  | 
 |  2417   int shift_size = | 
 |  2418       ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | 
 |  2419   int constant_key = 0; | 
 |  2420   if (key_is_constant) { | 
 |  2421     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
 |  2422     if (constant_key & 0xF0000000) { | 
 |  2423       Abort("array index constant value too big."); | 
 |  2424     } | 
 |  2425   } else { | 
 |  2426     key = ToRegister(instr->key()); | 
 |  2427   } | 
 |  2428  | 
 |  2429   if (key_is_constant) { | 
 |  2430     __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) + | 
 |  2431             FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 
 |  2432   } else { | 
 |  2433     __ sll(scratch, key, shift_size); | 
 |  2434     __ Addu(elements, elements, Operand(scratch)); | 
 |  2435     __ Addu(elements, elements, | 
 |  2436             Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 
 |  2437   } | 
 |  2438  | 
 |  2439   __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); | 
 |  2440   DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); | 
 |  2441  | 
 |  2442   __ ldc1(result, MemOperand(elements)); | 
 |  2443 } | 
 |  2444  | 
 |  2445  | 
 |  2446 void LCodeGen::DoLoadKeyedSpecializedArrayElement( | 
 |  2447     LLoadKeyedSpecializedArrayElement* instr) { | 
 |  2448   Register external_pointer = ToRegister(instr->external_pointer()); | 
 |  2449   Register key = no_reg; | 
 |  2450   ElementsKind elements_kind = instr->elements_kind(); | 
 |  2451   bool key_is_constant = instr->key()->IsConstantOperand(); | 
 |  2452   int constant_key = 0; | 
 |  2453   if (key_is_constant) { | 
 |  2454     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
 |  2455     if (constant_key & 0xF0000000) { | 
 |  2456       Abort("array index constant value too big."); | 
 |  2457     } | 
 |  2458   } else { | 
 |  2459     key = ToRegister(instr->key()); | 
 |  2460   } | 
 |  2461   int shift_size = ElementsKindToShiftSize(elements_kind); | 
 |  2462  | 
 |  2463   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 
 |  2464       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 
 |  2465     FPURegister result = ToDoubleRegister(instr->result()); | 
 |  2466     if (key_is_constant) { | 
 |  2467       __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); | 
 |  2468     } else { | 
 |  2469       __ sll(scratch0(), key, shift_size); | 
 |  2470       __ Addu(scratch0(), scratch0(), external_pointer); | 
 |  2471     } | 
 |  2472  | 
 |  2473     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 
 |  2474       __ lwc1(result, MemOperand(scratch0())); | 
 |  2475       __ cvt_d_s(result, result); | 
 |  2476     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | 
 |  2477       __ ldc1(result, MemOperand(scratch0())); | 
 |  2478     } | 
 |  2479   } else { | 
 |  2480     Register result = ToRegister(instr->result()); | 
 |  2481     Register scratch = scratch0(); | 
 |  2482     MemOperand mem_operand(zero_reg); | 
 |  2483     if (key_is_constant) { | 
 |  2484       mem_operand = MemOperand(external_pointer, | 
 |  2485                                constant_key * (1 << shift_size)); | 
 |  2486     } else { | 
 |  2487       __ sll(scratch, key, shift_size); | 
 |  2488       __ Addu(scratch, scratch, external_pointer); | 
 |  2489       mem_operand = MemOperand(scratch); | 
 |  2490     } | 
 |  2491     switch (elements_kind) { | 
 |  2492       case EXTERNAL_BYTE_ELEMENTS: | 
 |  2493         __ lb(result, mem_operand); | 
 |  2494         break; | 
 |  2495       case EXTERNAL_PIXEL_ELEMENTS: | 
 |  2496       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 
 |  2497         __ lbu(result, mem_operand); | 
 |  2498         break; | 
 |  2499       case EXTERNAL_SHORT_ELEMENTS: | 
 |  2500         __ lh(result, mem_operand); | 
 |  2501         break; | 
 |  2502       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 
 |  2503         __ lhu(result, mem_operand); | 
 |  2504         break; | 
 |  2505       case EXTERNAL_INT_ELEMENTS: | 
 |  2506         __ lw(result, mem_operand); | 
 |  2507         break; | 
 |  2508       case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 
 |  2509         __ lw(result, mem_operand); | 
 |  2510         // TODO(danno): we could be more clever here, perhaps having a special | 
 |  2511         // version of the stub that detects if the overflow case actually | 
 |  2512         // happens, and generate code that returns a double rather than int. | 
 |  2513         DeoptimizeIf(Ugreater_equal, instr->environment(), | 
 |  2514             result, Operand(0x80000000)); | 
 |  2515         break; | 
 |  2516       case EXTERNAL_FLOAT_ELEMENTS: | 
 |  2517       case EXTERNAL_DOUBLE_ELEMENTS: | 
 |  2518       case FAST_DOUBLE_ELEMENTS: | 
 |  2519       case FAST_ELEMENTS: | 
 |  2520       case FAST_SMI_ONLY_ELEMENTS: | 
 |  2521       case DICTIONARY_ELEMENTS: | 
 |  2522       case NON_STRICT_ARGUMENTS_ELEMENTS: | 
 |  2523         UNREACHABLE(); | 
 |  2524         break; | 
 |  2525     } | 
 |  2526   } | 
 |  2527 } | 
 |  2528  | 
 |  2529  | 
 |  2530 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 
 |  2531   ASSERT(ToRegister(instr->object()).is(a1)); | 
 |  2532   ASSERT(ToRegister(instr->key()).is(a0)); | 
 |  2533  | 
 |  2534   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); | 
 |  2535   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  2536 } | 
 |  2537  | 
 |  2538  | 
 |  2539 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | 
 |  2540   Register scratch = scratch0(); | 
 |  2541   Register temp = scratch1(); | 
 |  2542   Register result = ToRegister(instr->result()); | 
 |  2543  | 
 |  2544   // Check if the calling frame is an arguments adaptor frame. | 
 |  2545   Label done, adapted; | 
 |  2546   __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
 |  2547   __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); | 
 |  2548   __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
 |  2549  | 
 |  2550   // Result is the frame pointer for the frame if not adapted and for the real | 
 |  2551   // frame below the adaptor frame if adapted. | 
 |  2552   __ movn(result, fp, temp);  // move only if temp is not equal to zero (ne) | 
 |  2553   __ movz(result, scratch, temp);  // move only if temp is equal to zero (eq) | 
 |  2554 } | 
 |  2555  | 
 |  2556  | 
 |  2557 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { | 
 |  2558   Register elem = ToRegister(instr->InputAt(0)); | 
 |  2559   Register result = ToRegister(instr->result()); | 
 |  2560  | 
 |  2561   Label done; | 
 |  2562  | 
 |  2563   // If no arguments adaptor frame the number of arguments is fixed. | 
 |  2564   __ Addu(result, zero_reg, Operand(scope()->num_parameters())); | 
 |  2565   __ Branch(&done, eq, fp, Operand(elem)); | 
 |  2566  | 
 |  2567   // Arguments adaptor frame present. Get argument length from there. | 
 |  2568   __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
 |  2569   __ lw(result, | 
 |  2570         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
 |  2571   __ SmiUntag(result); | 
 |  2572  | 
 |  2573   // Argument length is in result register. | 
 |  2574   __ bind(&done); | 
 |  2575 } | 
 |  2576  | 
 |  2577  | 
 |  2578 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 
 |  2579   Register receiver = ToRegister(instr->receiver()); | 
 |  2580   Register function = ToRegister(instr->function()); | 
 |  2581   Register length = ToRegister(instr->length()); | 
 |  2582   Register elements = ToRegister(instr->elements()); | 
 |  2583   Register scratch = scratch0(); | 
 |  2584   ASSERT(receiver.is(a0));  // Used for parameter count. | 
 |  2585   ASSERT(function.is(a1));  // Required by InvokeFunction. | 
 |  2586   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  2587  | 
 |  2588   // If the receiver is null or undefined, we have to pass the global | 
 |  2589   // object as a receiver to normal functions. Values have to be | 
 |  2590   // passed unchanged to builtins and strict-mode functions. | 
 |  2591   Label global_object, receiver_ok; | 
 |  2592  | 
 |  2593   // Do not transform the receiver to object for strict mode | 
 |  2594   // functions. | 
 |  2595   __ lw(scratch, | 
 |  2596          FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 
 |  2597   __ lw(scratch, | 
 |  2598          FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | 
 |  2599  | 
 |  2600   // Do not transform the receiver to object for builtins. | 
 |  2601   int32_t strict_mode_function_mask = | 
 |  2602                   1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); | 
 |  2603   int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); | 
 |  2604   __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask)); | 
 |  2605   __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg)); | 
 |  2606  | 
 |  2607   // Normal function. Replace undefined or null with global receiver. | 
 |  2608   __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 
 |  2609   __ Branch(&global_object, eq, receiver, Operand(scratch)); | 
 |  2610   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 
 |  2611   __ Branch(&global_object, eq, receiver, Operand(scratch)); | 
 |  2612  | 
 |  2613   // Deoptimize if the receiver is not a JS object. | 
 |  2614   __ And(scratch, receiver, Operand(kSmiTagMask)); | 
 |  2615   DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); | 
 |  2616  | 
 |  2617   __ GetObjectType(receiver, scratch, scratch); | 
 |  2618   DeoptimizeIf(lt, instr->environment(), | 
 |  2619                scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
 |  2620   __ Branch(&receiver_ok); | 
 |  2621  | 
 |  2622   __ bind(&global_object); | 
 |  2623   __ lw(receiver, GlobalObjectOperand()); | 
 |  2624   __ lw(receiver, | 
 |  2625          FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); | 
 |  2626   __ bind(&receiver_ok); | 
 |  2627  | 
 |  2628   // Copy the arguments to this function possibly from the | 
 |  2629   // adaptor frame below it. | 
 |  2630   const uint32_t kArgumentsLimit = 1 * KB; | 
 |  2631   DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); | 
 |  2632  | 
 |  2633   // Push the receiver and use the register to keep the original | 
 |  2634   // number of arguments. | 
 |  2635   __ push(receiver); | 
 |  2636   __ Move(receiver, length); | 
 |  2637   // The arguments are at a one pointer size offset from elements. | 
 |  2638   __ Addu(elements, elements, Operand(1 * kPointerSize)); | 
 |  2639  | 
 |  2640   // Loop through the arguments pushing them onto the execution | 
 |  2641   // stack. | 
 |  2642   Label invoke, loop; | 
 |  2643   // length is a small non-negative integer, due to the test above. | 
 |  2644   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); | 
 |  2645   __ sll(scratch, length, 2); | 
 |  2646   __ bind(&loop); | 
 |  2647   __ Addu(scratch, elements, scratch); | 
 |  2648   __ lw(scratch, MemOperand(scratch)); | 
 |  2649   __ push(scratch); | 
 |  2650   __ Subu(length, length, Operand(1)); | 
 |  2651   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); | 
 |  2652   __ sll(scratch, length, 2); | 
 |  2653  | 
 |  2654   __ bind(&invoke); | 
 |  2655   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); | 
 |  2656   LPointerMap* pointers = instr->pointer_map(); | 
 |  2657   LEnvironment* env = instr->deoptimization_environment(); | 
 |  2658   RecordPosition(pointers->position()); | 
 |  2659   RegisterEnvironmentForDeoptimization(env); | 
 |  2660   SafepointGenerator safepoint_generator(this, | 
 |  2661                                          pointers, | 
 |  2662                                          env->deoptimization_index()); | 
 |  2663   // The number of arguments is stored in receiver which is a0, as expected | 
 |  2664   // by InvokeFunction. | 
 |  2665   v8::internal::ParameterCount actual(receiver); | 
 |  2666   __ InvokeFunction(function, actual, CALL_FUNCTION, | 
 |  2667                     safepoint_generator, CALL_AS_METHOD); | 
 |  2668   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  2669 } | 
 |  2670  | 
 |  2671  | 
 |  2672 void LCodeGen::DoPushArgument(LPushArgument* instr) { | 
 |  2673   LOperand* argument = instr->InputAt(0); | 
 |  2674   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | 
 |  2675     Abort("DoPushArgument not implemented for double type."); | 
 |  2676   } else { | 
 |  2677     Register argument_reg = EmitLoadRegister(argument, at); | 
 |  2678     __ push(argument_reg); | 
 |  2679   } | 
 |  2680 } | 
 |  2681  | 
 |  2682  | 
 |  2683 void LCodeGen::DoThisFunction(LThisFunction* instr) { | 
 |  2684   Register result = ToRegister(instr->result()); | 
 |  2685   __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
 |  2686 } | 
 |  2687  | 
 |  2688  | 
 |  2689 void LCodeGen::DoContext(LContext* instr) { | 
 |  2690   Register result = ToRegister(instr->result()); | 
 |  2691   __ mov(result, cp); | 
 |  2692 } | 
 |  2693  | 
 |  2694  | 
 |  2695 void LCodeGen::DoOuterContext(LOuterContext* instr) { | 
 |  2696   Register context = ToRegister(instr->context()); | 
 |  2697   Register result = ToRegister(instr->result()); | 
 |  2698   __ lw(result, | 
 |  2699         MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 
 |  2700 } | 
 |  2701  | 
 |  2702  | 
 |  2703 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { | 
 |  2704   Register context = ToRegister(instr->context()); | 
 |  2705   Register result = ToRegister(instr->result()); | 
 |  2706   __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX)); | 
 |  2707 } | 
 |  2708  | 
 |  2709  | 
 |  2710 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { | 
 |  2711   Register global = ToRegister(instr->global()); | 
 |  2712   Register result = ToRegister(instr->result()); | 
 |  2713   __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); | 
 |  2714 } | 
 |  2715  | 
 |  2716  | 
 |  2717 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 
 |  2718                                  int arity, | 
 |  2719                                  LInstruction* instr, | 
 |  2720                                  CallKind call_kind) { | 
 |  2721   // Change context if needed. | 
 |  2722   bool change_context = | 
 |  2723       (info()->closure()->context() != function->context()) || | 
 |  2724       scope()->contains_with() || | 
 |  2725       (scope()->num_heap_slots() > 0); | 
 |  2726   if (change_context) { | 
 |  2727     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 
 |  2728   } | 
 |  2729  | 
 |  2730   // Set a0 to arguments count if adaption is not needed. Assumes that a0 | 
 |  2731   // is available to write to at this point. | 
 |  2732   if (!function->NeedsArgumentsAdaption()) { | 
 |  2733     __ li(a0, Operand(arity)); | 
 |  2734   } | 
 |  2735  | 
 |  2736   LPointerMap* pointers = instr->pointer_map(); | 
 |  2737   RecordPosition(pointers->position()); | 
 |  2738  | 
 |  2739   // Invoke function. | 
 |  2740   __ SetCallKind(t1, call_kind); | 
 |  2741   __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); | 
 |  2742   __ Call(at); | 
 |  2743  | 
 |  2744   // Setup deoptimization. | 
 |  2745   RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); | 
 |  2746  | 
 |  2747   // Restore context. | 
 |  2748   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  2749 } | 
 |  2750  | 
 |  2751  | 
 |  2752 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { | 
 |  2753   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  2754   __ mov(a0, v0); | 
 |  2755   __ li(a1, Operand(instr->function())); | 
 |  2756   CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD); | 
 |  2757 } | 
 |  2758  | 
 |  2759  | 
 |  2760 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { | 
 |  2761   Register input = ToRegister(instr->InputAt(0)); | 
 |  2762   Register result = ToRegister(instr->result()); | 
 |  2763   Register scratch = scratch0(); | 
 |  2764  | 
 |  2765   // Deoptimize if not a heap number. | 
 |  2766   __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  2767   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  2768   DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); | 
 |  2769  | 
 |  2770   Label done; | 
 |  2771   Register exponent = scratch0(); | 
 |  2772   scratch = no_reg; | 
 |  2773   __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 
 |  2774   // Check the sign of the argument. If the argument is positive, just | 
 |  2775   // return it. | 
 |  2776   __ Move(result, input); | 
 |  2777   __ And(at, exponent, Operand(HeapNumber::kSignMask)); | 
 |  2778   __ Branch(&done, eq, at, Operand(zero_reg)); | 
 |  2779  | 
 |  2780   // Input is negative. Reverse its sign. | 
 |  2781   // Preserve the value of all registers. | 
 |  2782   { | 
 |  2783     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  2784  | 
 |  2785     // Registers were saved at the safepoint, so we can use | 
 |  2786     // many scratch registers. | 
 |  2787     Register tmp1 = input.is(a1) ? a0 : a1; | 
 |  2788     Register tmp2 = input.is(a2) ? a0 : a2; | 
 |  2789     Register tmp3 = input.is(a3) ? a0 : a3; | 
 |  2790     Register tmp4 = input.is(t0) ? a0 : t0; | 
 |  2791  | 
 |  2792     // exponent: floating point exponent value. | 
 |  2793  | 
 |  2794     Label allocated, slow; | 
 |  2795     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); | 
 |  2796     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); | 
 |  2797     __ Branch(&allocated); | 
 |  2798  | 
 |  2799     // Slow case: Call the runtime system to do the number allocation. | 
 |  2800     __ bind(&slow); | 
 |  2801  | 
 |  2802     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 
 |  2803     // Set the pointer to the new heap number in tmp. | 
 |  2804     if (!tmp1.is(v0)) | 
 |  2805       __ mov(tmp1, v0); | 
 |  2806     // Restore input_reg after call to runtime. | 
 |  2807     __ LoadFromSafepointRegisterSlot(input, input); | 
 |  2808     __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 
 |  2809  | 
 |  2810     __ bind(&allocated); | 
 |  2811     // exponent: floating point exponent value. | 
 |  2812     // tmp1: allocated heap number. | 
 |  2813     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); | 
 |  2814     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); | 
 |  2815     __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); | 
 |  2816     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); | 
 |  2817  | 
 |  2818     __ StoreToSafepointRegisterSlot(tmp1, result); | 
 |  2819   } | 
 |  2820  | 
 |  2821   __ bind(&done); | 
 |  2822 } | 
 |  2823  | 
 |  2824  | 
 |  2825 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { | 
 |  2826   Register input = ToRegister(instr->InputAt(0)); | 
 |  2827   Register result = ToRegister(instr->result()); | 
 |  2828   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 
 |  2829   Label done; | 
 |  2830   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 
 |  2831   __ mov(result, input); | 
 |  2832   ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done)); | 
 |  2833   __ subu(result, zero_reg, input); | 
 |  2834   // Overflow if result is still negative, ie 0x80000000. | 
 |  2835   DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); | 
 |  2836   __ bind(&done); | 
 |  2837 } | 
 |  2838  | 
 |  2839  | 
 |  2840 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 
 |  2841   // Class for deferred case. | 
 |  2842   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 
 |  2843    public: | 
 |  2844     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 
 |  2845                                     LUnaryMathOperation* instr) | 
 |  2846         : LDeferredCode(codegen), instr_(instr) { } | 
 |  2847     virtual void Generate() { | 
 |  2848       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 
 |  2849     } | 
 |  2850     virtual LInstruction* instr() { return instr_; } | 
 |  2851    private: | 
 |  2852     LUnaryMathOperation* instr_; | 
 |  2853   }; | 
 |  2854  | 
 |  2855   Representation r = instr->hydrogen()->value()->representation(); | 
 |  2856   if (r.IsDouble()) { | 
 |  2857     FPURegister input = ToDoubleRegister(instr->InputAt(0)); | 
 |  2858     FPURegister result = ToDoubleRegister(instr->result()); | 
 |  2859     __ abs_d(result, input); | 
 |  2860   } else if (r.IsInteger32()) { | 
 |  2861     EmitIntegerMathAbs(instr); | 
 |  2862   } else { | 
 |  2863     // Representation is tagged. | 
 |  2864     DeferredMathAbsTaggedHeapNumber* deferred = | 
 |  2865         new DeferredMathAbsTaggedHeapNumber(this, instr); | 
 |  2866     Register input = ToRegister(instr->InputAt(0)); | 
 |  2867     // Smi check. | 
 |  2868     __ JumpIfNotSmi(input, deferred->entry()); | 
 |  2869     // If smi, handle it directly. | 
 |  2870     EmitIntegerMathAbs(instr); | 
 |  2871     __ bind(deferred->exit()); | 
 |  2872   } | 
 |  2873 } | 
 |  2874  | 
 |  2875  | 
 |  2876 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 
 |  2877   DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 
 |  2878   Register result = ToRegister(instr->result()); | 
 |  2879   FPURegister single_scratch = double_scratch0().low(); | 
 |  2880   Register scratch1 = scratch0(); | 
 |  2881   Register except_flag = ToRegister(instr->TempAt(0)); | 
 |  2882  | 
 |  2883   __ EmitFPUTruncate(kRoundToMinusInf, | 
 |  2884                      single_scratch, | 
 |  2885                      input, | 
 |  2886                      scratch1, | 
 |  2887                      except_flag); | 
 |  2888  | 
 |  2889   // Deopt if the operation did not succeed. | 
 |  2890   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); | 
 |  2891  | 
 |  2892   // Load the result. | 
 |  2893   __ mfc1(result, single_scratch); | 
 |  2894  | 
 |  2895   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |  2896     // Test for -0. | 
 |  2897     Label done; | 
 |  2898     __ Branch(&done, ne, result, Operand(zero_reg)); | 
 |  2899     __ mfc1(scratch1, input.high()); | 
 |  2900     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 
 |  2901     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); | 
 |  2902     __ bind(&done); | 
 |  2903   } | 
 |  2904 } | 
 |  2905  | 
 |  2906  | 
 |  2907 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 
 |  2908   DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 
 |  2909   Register result = ToRegister(instr->result()); | 
 |  2910   Register scratch = scratch0(); | 
 |  2911   Label done, check_sign_on_zero; | 
 |  2912  | 
 |  2913   // Extract exponent bits. | 
 |  2914   __ mfc1(result, input.high()); | 
 |  2915   __ Ext(scratch, | 
 |  2916          result, | 
 |  2917          HeapNumber::kExponentShift, | 
 |  2918          HeapNumber::kExponentBits); | 
 |  2919  | 
 |  2920   // If the number is in ]-0.5, +0.5[, the result is +/- 0. | 
 |  2921   Label skip1; | 
 |  2922   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); | 
 |  2923   __ mov(result, zero_reg); | 
 |  2924   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |  2925     __ Branch(&check_sign_on_zero); | 
 |  2926   } else { | 
 |  2927     __ Branch(&done); | 
 |  2928   } | 
 |  2929   __ bind(&skip1); | 
 |  2930  | 
 |  2931   // The following conversion will not work with numbers | 
 |  2932   // outside of ]-2^32, 2^32[. | 
 |  2933   DeoptimizeIf(ge, instr->environment(), scratch, | 
 |  2934                Operand(HeapNumber::kExponentBias + 32)); | 
 |  2935  | 
 |  2936   // Save the original sign for later comparison. | 
 |  2937   __ And(scratch, result, Operand(HeapNumber::kSignMask)); | 
 |  2938  | 
 |  2939   __ Move(double_scratch0(), 0.5); | 
 |  2940   __ add_d(input, input, double_scratch0()); | 
 |  2941  | 
 |  2942   // Check sign of the result: if the sign changed, the input | 
 |  2943   // value was in ]0.5, 0[ and the result should be -0. | 
 |  2944   __ mfc1(result, input.high()); | 
 |  2945   __ Xor(result, result, Operand(scratch)); | 
 |  2946   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |  2947     // ARM uses 'mi' here, which is 'lt' | 
 |  2948     DeoptimizeIf(lt, instr->environment(), result, | 
 |  2949                  Operand(zero_reg)); | 
 |  2950   } else { | 
 |  2951     Label skip2; | 
 |  2952     // ARM uses 'mi' here, which is 'lt' | 
 |  2953     // Negating it results in 'ge' | 
 |  2954     __ Branch(&skip2, ge, result, Operand(zero_reg)); | 
 |  2955     __ mov(result, zero_reg); | 
 |  2956     __ Branch(&done); | 
 |  2957     __ bind(&skip2); | 
 |  2958   } | 
 |  2959  | 
 |  2960   Register except_flag = scratch; | 
 |  2961  | 
 |  2962   __ EmitFPUTruncate(kRoundToMinusInf, | 
 |  2963                      double_scratch0().low(), | 
 |  2964                      input, | 
 |  2965                      result, | 
 |  2966                      except_flag); | 
 |  2967  | 
 |  2968   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); | 
 |  2969  | 
 |  2970   __ mfc1(result, double_scratch0().low()); | 
 |  2971  | 
 |  2972   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |  2973     // Test for -0. | 
 |  2974     __ Branch(&done, ne, result, Operand(zero_reg)); | 
 |  2975     __ bind(&check_sign_on_zero); | 
 |  2976     __ mfc1(scratch, input.high()); | 
 |  2977     __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 
 |  2978     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); | 
 |  2979   } | 
 |  2980   __ bind(&done); | 
 |  2981 } | 
 |  2982  | 
 |  2983  | 
 |  2984 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 
 |  2985   DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 
 |  2986   DoubleRegister result = ToDoubleRegister(instr->result()); | 
 |  2987   __ sqrt_d(result, input); | 
 |  2988 } | 
 |  2989  | 
 |  2990  | 
 |  2991 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 
 |  2992   DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 
 |  2993   DoubleRegister result = ToDoubleRegister(instr->result()); | 
 |  2994   DoubleRegister double_scratch = double_scratch0(); | 
 |  2995  | 
 |  2996   // Add +0 to convert -0 to +0. | 
 |  2997   __ mtc1(zero_reg, double_scratch.low()); | 
 |  2998   __ mtc1(zero_reg, double_scratch.high()); | 
 |  2999   __ add_d(result, input, double_scratch); | 
 |  3000   __ sqrt_d(result, result); | 
 |  3001 } | 
 |  3002  | 
 |  3003  | 
 |  3004 void LCodeGen::DoPower(LPower* instr) { | 
 |  3005   LOperand* left = instr->InputAt(0); | 
 |  3006   LOperand* right = instr->InputAt(1); | 
 |  3007   Register scratch = scratch0(); | 
 |  3008   DoubleRegister result_reg = ToDoubleRegister(instr->result()); | 
 |  3009   Representation exponent_type = instr->hydrogen()->right()->representation(); | 
 |  3010   if (exponent_type.IsDouble()) { | 
 |  3011     // Prepare arguments and call C function. | 
 |  3012     __ PrepareCallCFunction(0, 2, scratch); | 
 |  3013     __ SetCallCDoubleArguments(ToDoubleRegister(left), | 
 |  3014                                ToDoubleRegister(right)); | 
 |  3015     __ CallCFunction( | 
 |  3016         ExternalReference::power_double_double_function(isolate()), 0, 2); | 
 |  3017   } else if (exponent_type.IsInteger32()) { | 
 |  3018     ASSERT(ToRegister(right).is(a0)); | 
 |  3019     // Prepare arguments and call C function. | 
 |  3020     __ PrepareCallCFunction(1, 1, scratch); | 
 |  3021     __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right)); | 
 |  3022     __ CallCFunction( | 
 |  3023         ExternalReference::power_double_int_function(isolate()), 1, 1); | 
 |  3024   } else { | 
 |  3025     ASSERT(exponent_type.IsTagged()); | 
 |  3026     ASSERT(instr->hydrogen()->left()->representation().IsDouble()); | 
 |  3027  | 
 |  3028     Register right_reg = ToRegister(right); | 
 |  3029  | 
 |  3030     // Check for smi on the right hand side. | 
 |  3031     Label non_smi, call; | 
 |  3032     __ JumpIfNotSmi(right_reg, &non_smi); | 
 |  3033  | 
 |  3034     // Untag smi and convert it to a double. | 
 |  3035     __ SmiUntag(right_reg); | 
 |  3036     FPURegister single_scratch = double_scratch0(); | 
 |  3037     __ mtc1(right_reg, single_scratch); | 
 |  3038     __ cvt_d_w(result_reg, single_scratch); | 
 |  3039     __ Branch(&call); | 
 |  3040  | 
 |  3041     // Heap number map check. | 
 |  3042     __ bind(&non_smi); | 
 |  3043     __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); | 
 |  3044     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  3045     DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); | 
 |  3046     __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset)); | 
 |  3047  | 
 |  3048     // Prepare arguments and call C function. | 
 |  3049     __ bind(&call); | 
 |  3050     __ PrepareCallCFunction(0, 2, scratch); | 
 |  3051     __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg); | 
 |  3052     __ CallCFunction( | 
 |  3053         ExternalReference::power_double_double_function(isolate()), 0, 2); | 
 |  3054   } | 
 |  3055   // Store the result in the result register. | 
 |  3056   __ GetCFunctionDoubleResult(result_reg); | 
 |  3057 } | 
 |  3058  | 
 |  3059  | 
 |  3060 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 
 |  3061   ASSERT(ToDoubleRegister(instr->result()).is(f4)); | 
 |  3062   TranscendentalCacheStub stub(TranscendentalCache::LOG, | 
 |  3063                                TranscendentalCacheStub::UNTAGGED); | 
 |  3064   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  3065 } | 
 |  3066  | 
 |  3067  | 
 |  3068 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { | 
 |  3069   ASSERT(ToDoubleRegister(instr->result()).is(f4)); | 
 |  3070   TranscendentalCacheStub stub(TranscendentalCache::COS, | 
 |  3071                                TranscendentalCacheStub::UNTAGGED); | 
 |  3072   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  3073 } | 
 |  3074  | 
 |  3075  | 
 |  3076 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { | 
 |  3077   ASSERT(ToDoubleRegister(instr->result()).is(f4)); | 
 |  3078   TranscendentalCacheStub stub(TranscendentalCache::SIN, | 
 |  3079                                TranscendentalCacheStub::UNTAGGED); | 
 |  3080   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  3081 } | 
 |  3082  | 
 |  3083  | 
 |  3084 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { | 
 |  3085   switch (instr->op()) { | 
 |  3086     case kMathAbs: | 
 |  3087       DoMathAbs(instr); | 
 |  3088       break; | 
 |  3089     case kMathFloor: | 
 |  3090       DoMathFloor(instr); | 
 |  3091       break; | 
 |  3092     case kMathRound: | 
 |  3093       DoMathRound(instr); | 
 |  3094       break; | 
 |  3095     case kMathSqrt: | 
 |  3096       DoMathSqrt(instr); | 
 |  3097       break; | 
 |  3098     case kMathPowHalf: | 
 |  3099       DoMathPowHalf(instr); | 
 |  3100       break; | 
 |  3101     case kMathCos: | 
 |  3102       DoMathCos(instr); | 
 |  3103       break; | 
 |  3104     case kMathSin: | 
 |  3105       DoMathSin(instr); | 
 |  3106       break; | 
 |  3107     case kMathLog: | 
 |  3108       DoMathLog(instr); | 
 |  3109       break; | 
 |  3110     default: | 
 |  3111       Abort("Unimplemented type of LUnaryMathOperation."); | 
 |  3112       UNREACHABLE(); | 
 |  3113   } | 
 |  3114 } | 
 |  3115  | 
 |  3116  | 
 |  3117 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 
 |  3118   ASSERT(ToRegister(instr->function()).is(a1)); | 
 |  3119   ASSERT(instr->HasPointerMap()); | 
 |  3120   ASSERT(instr->HasDeoptimizationEnvironment()); | 
 |  3121   LPointerMap* pointers = instr->pointer_map(); | 
 |  3122   LEnvironment* env = instr->deoptimization_environment(); | 
 |  3123   RecordPosition(pointers->position()); | 
 |  3124   RegisterEnvironmentForDeoptimization(env); | 
 |  3125   SafepointGenerator generator(this, pointers, env->deoptimization_index()); | 
 |  3126   ParameterCount count(instr->arity()); | 
 |  3127   __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); | 
 |  3128   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  3129 } | 
 |  3130  | 
 |  3131  | 
 |  3132 void LCodeGen::DoCallKeyed(LCallKeyed* instr) { | 
 |  3133   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3134  | 
 |  3135   int arity = instr->arity(); | 
 |  3136   Handle<Code> ic = | 
 |  3137       isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); | 
 |  3138   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  3139   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  3140 } | 
 |  3141  | 
 |  3142  | 
 |  3143 void LCodeGen::DoCallNamed(LCallNamed* instr) { | 
 |  3144   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3145  | 
 |  3146   int arity = instr->arity(); | 
 |  3147   RelocInfo::Mode mode = RelocInfo::CODE_TARGET; | 
 |  3148   Handle<Code> ic = | 
 |  3149       isolate()->stub_cache()->ComputeCallInitialize(arity, mode); | 
 |  3150   __ li(a2, Operand(instr->name())); | 
 |  3151   CallCode(ic, mode, instr); | 
 |  3152   // Restore context register. | 
 |  3153   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  3154 } | 
 |  3155  | 
 |  3156  | 
 |  3157 void LCodeGen::DoCallFunction(LCallFunction* instr) { | 
 |  3158   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3159  | 
 |  3160   int arity = instr->arity(); | 
 |  3161   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); | 
 |  3162   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  3163   __ Drop(1); | 
 |  3164   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  3165 } | 
 |  3166  | 
 |  3167  | 
 |  3168 void LCodeGen::DoCallGlobal(LCallGlobal* instr) { | 
 |  3169   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3170  | 
 |  3171   int arity = instr->arity(); | 
 |  3172   RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; | 
 |  3173   Handle<Code> ic = | 
 |  3174       isolate()->stub_cache()->ComputeCallInitialize(arity, mode); | 
 |  3175   __ li(a2, Operand(instr->name())); | 
 |  3176   CallCode(ic, mode, instr); | 
 |  3177   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
 |  3178 } | 
 |  3179  | 
 |  3180  | 
 |  3181 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { | 
 |  3182   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3183   __ li(a1, Operand(instr->target())); | 
 |  3184   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); | 
 |  3185 } | 
 |  3186  | 
 |  3187  | 
 |  3188 void LCodeGen::DoCallNew(LCallNew* instr) { | 
 |  3189   ASSERT(ToRegister(instr->InputAt(0)).is(a1)); | 
 |  3190   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  3191  | 
 |  3192   Handle<Code> builtin = isolate()->builtins()->JSConstructCall(); | 
 |  3193   __ li(a0, Operand(instr->arity())); | 
 |  3194   CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); | 
 |  3195 } | 
 |  3196  | 
 |  3197  | 
 |  3198 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | 
 |  3199   CallRuntime(instr->function(), instr->arity(), instr); | 
 |  3200 } | 
 |  3201  | 
 |  3202  | 
 |  3203 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { | 
 |  3204   Register object = ToRegister(instr->object()); | 
 |  3205   Register value = ToRegister(instr->value()); | 
 |  3206   Register scratch = scratch0(); | 
 |  3207   int offset = instr->offset(); | 
 |  3208  | 
 |  3209   ASSERT(!object.is(value)); | 
 |  3210  | 
 |  3211   if (!instr->transition().is_null()) { | 
 |  3212     __ li(scratch, Operand(instr->transition())); | 
 |  3213     __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 
 |  3214   } | 
 |  3215  | 
 |  3216   // Do the store. | 
 |  3217   HType type = instr->hydrogen()->value()->type(); | 
 |  3218   SmiCheck check_needed = | 
 |  3219       type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 
 |  3220   if (instr->is_in_object()) { | 
 |  3221     __ sw(value, FieldMemOperand(object, offset)); | 
 |  3222     if (instr->hydrogen()->NeedsWriteBarrier()) { | 
 |  3223       // Update the write barrier for the object for in-object properties. | 
 |  3224       __ RecordWriteField(object, | 
 |  3225                           offset, | 
 |  3226                           value, | 
 |  3227                           scratch, | 
 |  3228                           kRAHasBeenSaved, | 
 |  3229                           kSaveFPRegs, | 
 |  3230                           EMIT_REMEMBERED_SET, | 
 |  3231                           check_needed); | 
 |  3232     } | 
 |  3233   } else { | 
 |  3234     __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 
 |  3235     __ sw(value, FieldMemOperand(scratch, offset)); | 
 |  3236     if (instr->hydrogen()->NeedsWriteBarrier()) { | 
 |  3237       // Update the write barrier for the properties array. | 
 |  3238       // object is used as a scratch register. | 
 |  3239       __ RecordWriteField(scratch, | 
 |  3240                           offset, | 
 |  3241                           value, | 
 |  3242                           object, | 
 |  3243                           kRAHasBeenSaved, | 
 |  3244                           kSaveFPRegs, | 
 |  3245                           EMIT_REMEMBERED_SET, | 
 |  3246                           check_needed); | 
 |  3247     } | 
 |  3248   } | 
 |  3249 } | 
 |  3250  | 
 |  3251  | 
 |  3252 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 
 |  3253   ASSERT(ToRegister(instr->object()).is(a1)); | 
 |  3254   ASSERT(ToRegister(instr->value()).is(a0)); | 
 |  3255  | 
 |  3256   // Name is always in a2. | 
 |  3257   __ li(a2, Operand(instr->name())); | 
 |  3258   Handle<Code> ic = instr->strict_mode() | 
 |  3259       ? isolate()->builtins()->StoreIC_Initialize_Strict() | 
 |  3260       : isolate()->builtins()->StoreIC_Initialize(); | 
 |  3261   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  3262 } | 
 |  3263  | 
 |  3264  | 
 |  3265 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { | 
 |  3266   DeoptimizeIf(hs, | 
 |  3267                instr->environment(), | 
 |  3268                ToRegister(instr->index()), | 
 |  3269                Operand(ToRegister(instr->length()))); | 
 |  3270 } | 
 |  3271  | 
 |  3272  | 
 |  3273 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { | 
 |  3274   Register value = ToRegister(instr->value()); | 
 |  3275   Register elements = ToRegister(instr->object()); | 
 |  3276   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | 
 |  3277   Register scratch = scratch0(); | 
 |  3278  | 
 |  3279   // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS | 
 |  3280   // conversion, so it deopts in that case. | 
 |  3281   if (instr->hydrogen()->ValueNeedsSmiCheck()) { | 
 |  3282     __ And(at, value, Operand(kSmiTagMask)); | 
 |  3283     DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); | 
 |  3284   } | 
 |  3285  | 
 |  3286   // Do the store. | 
 |  3287   if (instr->key()->IsConstantOperand()) { | 
 |  3288     ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 
 |  3289     LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | 
 |  3290     int offset = | 
 |  3291         ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; | 
 |  3292     __ sw(value, FieldMemOperand(elements, offset)); | 
 |  3293   } else { | 
 |  3294     __ sll(scratch, key, kPointerSizeLog2); | 
 |  3295     __ addu(scratch, elements, scratch); | 
 |  3296     __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 
 |  3297   } | 
 |  3298  | 
 |  3299   if (instr->hydrogen()->NeedsWriteBarrier()) { | 
 |  3300     HType type = instr->hydrogen()->value()->type(); | 
 |  3301     SmiCheck check_needed = | 
 |  3302         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 
 |  3303     // Compute address of modified element and store it into key register. | 
 |  3304     __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
 |  3305     __ RecordWrite(elements, | 
 |  3306                    key, | 
 |  3307                    value, | 
 |  3308                    kRAHasBeenSaved, | 
 |  3309                    kSaveFPRegs, | 
 |  3310                    EMIT_REMEMBERED_SET, | 
 |  3311                    check_needed); | 
 |  3312   } | 
 |  3313 } | 
 |  3314  | 
 |  3315  | 
 |  3316 void LCodeGen::DoStoreKeyedFastDoubleElement( | 
 |  3317     LStoreKeyedFastDoubleElement* instr) { | 
 |  3318   DoubleRegister value = ToDoubleRegister(instr->value()); | 
 |  3319   Register elements = ToRegister(instr->elements()); | 
 |  3320   Register key = no_reg; | 
 |  3321   Register scratch = scratch0(); | 
 |  3322   bool key_is_constant = instr->key()->IsConstantOperand(); | 
 |  3323   int constant_key = 0; | 
 |  3324   Label not_nan; | 
 |  3325  | 
 |  3326   // Calculate the effective address of the slot in the array to store the | 
 |  3327   // double value. | 
 |  3328   if (key_is_constant) { | 
 |  3329     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
 |  3330     if (constant_key & 0xF0000000) { | 
 |  3331       Abort("array index constant value too big."); | 
 |  3332     } | 
 |  3333   } else { | 
 |  3334     key = ToRegister(instr->key()); | 
 |  3335   } | 
 |  3336   int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | 
 |  3337   if (key_is_constant) { | 
 |  3338     __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) + | 
 |  3339             FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 
 |  3340   } else { | 
 |  3341     __ sll(scratch, key, shift_size); | 
 |  3342     __ Addu(scratch, elements, Operand(scratch)); | 
 |  3343     __ Addu(scratch, scratch, | 
 |  3344             Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 
 |  3345   } | 
 |  3346  | 
 |  3347   Label is_nan; | 
 |  3348   // Check for NaN. All NaNs must be canonicalized. | 
 |  3349   __ BranchF(NULL, &is_nan, eq, value, value); | 
 |  3350   __ Branch(¬_nan); | 
 |  3351  | 
 |  3352   // Only load canonical NaN if the comparison above set the overflow. | 
 |  3353   __ bind(&is_nan); | 
 |  3354   __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | 
 |  3355  | 
 |  3356   __ bind(¬_nan); | 
 |  3357   __ sdc1(value, MemOperand(scratch)); | 
 |  3358 } | 
 |  3359  | 
 |  3360  | 
 |  3361 void LCodeGen::DoStoreKeyedSpecializedArrayElement( | 
 |  3362     LStoreKeyedSpecializedArrayElement* instr) { | 
 |  3363  | 
 |  3364   Register external_pointer = ToRegister(instr->external_pointer()); | 
 |  3365   Register key = no_reg; | 
 |  3366   ElementsKind elements_kind = instr->elements_kind(); | 
 |  3367   bool key_is_constant = instr->key()->IsConstantOperand(); | 
 |  3368   int constant_key = 0; | 
 |  3369   if (key_is_constant) { | 
 |  3370     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
 |  3371     if (constant_key & 0xF0000000) { | 
 |  3372       Abort("array index constant value too big."); | 
 |  3373     } | 
 |  3374   } else { | 
 |  3375     key = ToRegister(instr->key()); | 
 |  3376   } | 
 |  3377   int shift_size = ElementsKindToShiftSize(elements_kind); | 
 |  3378  | 
 |  3379   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 
 |  3380       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 
 |  3381     FPURegister value(ToDoubleRegister(instr->value())); | 
 |  3382     if (key_is_constant) { | 
 |  3383       __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); | 
 |  3384     } else { | 
 |  3385       __ sll(scratch0(), key, shift_size); | 
 |  3386       __ Addu(scratch0(), scratch0(), external_pointer); | 
 |  3387     } | 
 |  3388  | 
 |  3389     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 
 |  3390       __ cvt_s_d(double_scratch0(), value); | 
 |  3391       __ swc1(double_scratch0(), MemOperand(scratch0())); | 
 |  3392     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | 
 |  3393       __ sdc1(value, MemOperand(scratch0())); | 
 |  3394     } | 
 |  3395   } else { | 
 |  3396     Register value(ToRegister(instr->value())); | 
 |  3397     MemOperand mem_operand(zero_reg); | 
 |  3398     Register scratch = scratch0(); | 
 |  3399     if (key_is_constant) { | 
 |  3400       mem_operand = MemOperand(external_pointer, | 
 |  3401                                constant_key * (1 << shift_size)); | 
 |  3402     } else { | 
 |  3403       __ sll(scratch, key, shift_size); | 
 |  3404       __ Addu(scratch, scratch, external_pointer); | 
 |  3405       mem_operand = MemOperand(scratch); | 
 |  3406     } | 
 |  3407     switch (elements_kind) { | 
 |  3408       case EXTERNAL_PIXEL_ELEMENTS: | 
 |  3409       case EXTERNAL_BYTE_ELEMENTS: | 
 |  3410       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 
 |  3411         __ sb(value, mem_operand); | 
 |  3412         break; | 
 |  3413       case EXTERNAL_SHORT_ELEMENTS: | 
 |  3414       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 
 |  3415         __ sh(value, mem_operand); | 
 |  3416         break; | 
 |  3417       case EXTERNAL_INT_ELEMENTS: | 
 |  3418       case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 
 |  3419         __ sw(value, mem_operand); | 
 |  3420         break; | 
 |  3421       case EXTERNAL_FLOAT_ELEMENTS: | 
 |  3422       case EXTERNAL_DOUBLE_ELEMENTS: | 
 |  3423       case FAST_DOUBLE_ELEMENTS: | 
 |  3424       case FAST_ELEMENTS: | 
 |  3425       case FAST_SMI_ONLY_ELEMENTS: | 
 |  3426       case DICTIONARY_ELEMENTS: | 
 |  3427       case NON_STRICT_ARGUMENTS_ELEMENTS: | 
 |  3428         UNREACHABLE(); | 
 |  3429         break; | 
 |  3430     } | 
 |  3431   } | 
 |  3432 } | 
 |  3433  | 
 |  3434 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | 
 |  3435   ASSERT(ToRegister(instr->object()).is(a2)); | 
 |  3436   ASSERT(ToRegister(instr->key()).is(a1)); | 
 |  3437   ASSERT(ToRegister(instr->value()).is(a0)); | 
 |  3438  | 
 |  3439   Handle<Code> ic = instr->strict_mode() | 
 |  3440       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 
 |  3441       : isolate()->builtins()->KeyedStoreIC_Initialize(); | 
 |  3442   CallCode(ic, RelocInfo::CODE_TARGET, instr); | 
 |  3443 } | 
 |  3444  | 
 |  3445  | 
 |  3446 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 
 |  3447   __ push(ToRegister(instr->left())); | 
 |  3448   __ push(ToRegister(instr->right())); | 
 |  3449   StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 
 |  3450   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  3451 } | 
 |  3452  | 
 |  3453  | 
 |  3454 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 
 |  3455   class DeferredStringCharCodeAt: public LDeferredCode { | 
 |  3456    public: | 
 |  3457     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) | 
 |  3458         : LDeferredCode(codegen), instr_(instr) { } | 
 |  3459     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } | 
 |  3460     virtual LInstruction* instr() { return instr_; } | 
 |  3461    private: | 
 |  3462     LStringCharCodeAt* instr_; | 
 |  3463   }; | 
 |  3464  | 
 |  3465   Register temp = scratch1(); | 
 |  3466   Register string = ToRegister(instr->string()); | 
 |  3467   Register index = ToRegister(instr->index()); | 
 |  3468   Register result = ToRegister(instr->result()); | 
 |  3469   DeferredStringCharCodeAt* deferred = | 
 |  3470       new DeferredStringCharCodeAt(this, instr); | 
 |  3471  | 
 |  3472   // Fetch the instance type of the receiver into result register. | 
 |  3473   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); | 
 |  3474   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); | 
 |  3475  | 
 |  3476   // We need special handling for indirect strings. | 
 |  3477   Label check_sequential; | 
 |  3478   __ And(temp, result, kIsIndirectStringMask); | 
 |  3479   __ Branch(&check_sequential, eq, temp, Operand(zero_reg)); | 
 |  3480  | 
 |  3481   // Dispatch on the indirect string shape: slice or cons. | 
 |  3482   Label cons_string; | 
 |  3483   __ And(temp, result, kSlicedNotConsMask); | 
 |  3484   __ Branch(&cons_string, eq, temp, Operand(zero_reg)); | 
 |  3485  | 
 |  3486   // Handle slices. | 
 |  3487   Label indirect_string_loaded; | 
 |  3488   __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); | 
 |  3489   __ sra(temp, result, kSmiTagSize); | 
 |  3490   __ addu(index, index, temp); | 
 |  3491   __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset)); | 
 |  3492   __ jmp(&indirect_string_loaded); | 
 |  3493  | 
 |  3494   // Handle conses. | 
 |  3495   // Check whether the right hand side is the empty string (i.e. if | 
 |  3496   // this is really a flat string in a cons string). If that is not | 
 |  3497   // the case we would rather go to the runtime system now to flatten | 
 |  3498   // the string. | 
 |  3499   __ bind(&cons_string); | 
 |  3500   __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset)); | 
 |  3501   __ LoadRoot(temp, Heap::kEmptyStringRootIndex); | 
 |  3502   __ Branch(deferred->entry(), ne, result, Operand(temp)); | 
 |  3503   // Get the first of the two strings and load its instance type. | 
 |  3504   __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset)); | 
 |  3505  | 
 |  3506   __ bind(&indirect_string_loaded); | 
 |  3507   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); | 
 |  3508   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); | 
 |  3509  | 
 |  3510   // Check whether the string is sequential. The only non-sequential | 
 |  3511   // shapes we support have just been unwrapped above. | 
 |  3512   __ bind(&check_sequential); | 
 |  3513   STATIC_ASSERT(kSeqStringTag == 0); | 
 |  3514   __ And(temp, result, Operand(kStringRepresentationMask)); | 
 |  3515   __ Branch(deferred->entry(), ne, temp, Operand(zero_reg)); | 
 |  3516  | 
 |  3517   // Dispatch on the encoding: ASCII or two-byte. | 
 |  3518   Label ascii_string; | 
 |  3519   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); | 
 |  3520   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 
 |  3521   __ And(temp, result, Operand(kStringEncodingMask)); | 
 |  3522   __ Branch(&ascii_string, ne, temp, Operand(zero_reg)); | 
 |  3523  | 
 |  3524   // Two-byte string. | 
 |  3525   // Load the two-byte character code into the result register. | 
 |  3526   Label done; | 
 |  3527   __ Addu(result, | 
 |  3528           string, | 
 |  3529           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
 |  3530   __ sll(temp, index, 1); | 
 |  3531   __ Addu(result, result, temp); | 
 |  3532   __ lhu(result, MemOperand(result, 0)); | 
 |  3533   __ Branch(&done); | 
 |  3534  | 
 |  3535   // ASCII string. | 
 |  3536   // Load the byte into the result register. | 
 |  3537   __ bind(&ascii_string); | 
 |  3538   __ Addu(result, | 
 |  3539           string, | 
 |  3540           Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
 |  3541   __ Addu(result, result, index); | 
 |  3542   __ lbu(result, MemOperand(result, 0)); | 
 |  3543  | 
 |  3544   __ bind(&done); | 
 |  3545   __ bind(deferred->exit()); | 
 |  3546 } | 
 |  3547  | 
 |  3548  | 
 |  3549 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { | 
 |  3550   Register string = ToRegister(instr->string()); | 
 |  3551   Register result = ToRegister(instr->result()); | 
 |  3552   Register scratch = scratch0(); | 
 |  3553  | 
 |  3554   // TODO(3095996): Get rid of this. For now, we need to make the | 
 |  3555   // result register contain a valid pointer because it is already | 
 |  3556   // contained in the register pointer map. | 
 |  3557   __ mov(result, zero_reg); | 
 |  3558  | 
 |  3559   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  3560   __ push(string); | 
 |  3561   // Push the index as a smi. This is safe because of the checks in | 
 |  3562   // DoStringCharCodeAt above. | 
 |  3563   if (instr->index()->IsConstantOperand()) { | 
 |  3564     int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | 
 |  3565     __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); | 
 |  3566     __ push(scratch); | 
 |  3567   } else { | 
 |  3568     Register index = ToRegister(instr->index()); | 
 |  3569     __ SmiTag(index); | 
 |  3570     __ push(index); | 
 |  3571   } | 
 |  3572   CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); | 
 |  3573   if (FLAG_debug_code) { | 
 |  3574     __ AbortIfNotSmi(v0); | 
 |  3575   } | 
 |  3576   __ SmiUntag(v0); | 
 |  3577   __ StoreToSafepointRegisterSlot(v0, result); | 
 |  3578 } | 
 |  3579  | 
 |  3580  | 
 |  3581 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | 
 |  3582   class DeferredStringCharFromCode: public LDeferredCode { | 
 |  3583    public: | 
 |  3584     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) | 
 |  3585         : LDeferredCode(codegen), instr_(instr) { } | 
 |  3586     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } | 
 |  3587     virtual LInstruction* instr() { return instr_; } | 
 |  3588    private: | 
 |  3589     LStringCharFromCode* instr_; | 
 |  3590   }; | 
 |  3591  | 
 |  3592   DeferredStringCharFromCode* deferred = | 
 |  3593       new DeferredStringCharFromCode(this, instr); | 
 |  3594  | 
 |  3595   ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 
 |  3596   Register char_code = ToRegister(instr->char_code()); | 
 |  3597   Register result = ToRegister(instr->result()); | 
 |  3598   Register scratch = scratch0(); | 
 |  3599   ASSERT(!char_code.is(result)); | 
 |  3600  | 
 |  3601   __ Branch(deferred->entry(), hi, | 
 |  3602             char_code, Operand(String::kMaxAsciiCharCode)); | 
 |  3603   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | 
 |  3604   __ sll(scratch, char_code, kPointerSizeLog2); | 
 |  3605   __ Addu(result, result, scratch); | 
 |  3606   __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | 
 |  3607   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 
 |  3608   __ Branch(deferred->entry(), eq, result, Operand(scratch)); | 
 |  3609   __ bind(deferred->exit()); | 
 |  3610 } | 
 |  3611  | 
 |  3612  | 
 |  3613 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { | 
 |  3614   Register char_code = ToRegister(instr->char_code()); | 
 |  3615   Register result = ToRegister(instr->result()); | 
 |  3616  | 
 |  3617   // TODO(3095996): Get rid of this. For now, we need to make the | 
 |  3618   // result register contain a valid pointer because it is already | 
 |  3619   // contained in the register pointer map. | 
 |  3620   __ mov(result, zero_reg); | 
 |  3621  | 
 |  3622   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  3623   __ SmiTag(char_code); | 
 |  3624   __ push(char_code); | 
 |  3625   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); | 
 |  3626   __ StoreToSafepointRegisterSlot(v0, result); | 
 |  3627 } | 
 |  3628  | 
 |  3629  | 
 |  3630 void LCodeGen::DoStringLength(LStringLength* instr) { | 
 |  3631   Register string = ToRegister(instr->InputAt(0)); | 
 |  3632   Register result = ToRegister(instr->result()); | 
 |  3633   __ lw(result, FieldMemOperand(string, String::kLengthOffset)); | 
 |  3634 } | 
 |  3635  | 
 |  3636  | 
 |  3637 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 
 |  3638   LOperand* input = instr->InputAt(0); | 
 |  3639   ASSERT(input->IsRegister() || input->IsStackSlot()); | 
 |  3640   LOperand* output = instr->result(); | 
 |  3641   ASSERT(output->IsDoubleRegister()); | 
 |  3642   FPURegister single_scratch = double_scratch0().low(); | 
 |  3643   if (input->IsStackSlot()) { | 
 |  3644     Register scratch = scratch0(); | 
 |  3645     __ lw(scratch, ToMemOperand(input)); | 
 |  3646     __ mtc1(scratch, single_scratch); | 
 |  3647   } else { | 
 |  3648     __ mtc1(ToRegister(input), single_scratch); | 
 |  3649   } | 
 |  3650   __ cvt_d_w(ToDoubleRegister(output), single_scratch); | 
 |  3651 } | 
 |  3652  | 
 |  3653  | 
 |  3654 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 
 |  3655   class DeferredNumberTagI: public LDeferredCode { | 
 |  3656    public: | 
 |  3657     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) | 
 |  3658         : LDeferredCode(codegen), instr_(instr) { } | 
 |  3659     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } | 
 |  3660     virtual LInstruction* instr() { return instr_; } | 
 |  3661    private: | 
 |  3662     LNumberTagI* instr_; | 
 |  3663   }; | 
 |  3664  | 
 |  3665   LOperand* input = instr->InputAt(0); | 
 |  3666   ASSERT(input->IsRegister() && input->Equals(instr->result())); | 
 |  3667   Register reg = ToRegister(input); | 
 |  3668   Register overflow = scratch0(); | 
 |  3669  | 
 |  3670   DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); | 
 |  3671   __ SmiTagCheckOverflow(reg, overflow); | 
 |  3672   __ BranchOnOverflow(deferred->entry(), overflow); | 
 |  3673   __ bind(deferred->exit()); | 
 |  3674 } | 
 |  3675  | 
 |  3676  | 
 |  3677 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { | 
 |  3678   Label slow; | 
 |  3679   Register reg = ToRegister(instr->InputAt(0)); | 
 |  3680   FPURegister dbl_scratch = double_scratch0(); | 
 |  3681  | 
 |  3682   // Preserve the value of all registers. | 
 |  3683   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  3684  | 
 |  3685   // There was overflow, so bits 30 and 31 of the original integer | 
 |  3686   // disagree. Try to allocate a heap number in new space and store | 
 |  3687   // the value in there. If that fails, call the runtime system. | 
 |  3688   Label done; | 
 |  3689   __ SmiUntag(reg); | 
 |  3690   __ Xor(reg, reg, Operand(0x80000000)); | 
 |  3691   __ mtc1(reg, dbl_scratch); | 
 |  3692   __ cvt_d_w(dbl_scratch, dbl_scratch); | 
 |  3693   if (FLAG_inline_new) { | 
 |  3694     __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); | 
 |  3695     __ AllocateHeapNumber(t1, a3, t0, t2, &slow); | 
 |  3696     if (!reg.is(t1)) __ mov(reg, t1); | 
 |  3697     __ Branch(&done); | 
 |  3698   } | 
 |  3699  | 
 |  3700   // Slow case: Call the runtime system to do the number allocation. | 
 |  3701   __ bind(&slow); | 
 |  3702  | 
 |  3703   // TODO(3095996): Put a valid pointer value in the stack slot where the result | 
 |  3704   // register is stored, as this register is in the pointer map, but contains an | 
 |  3705   // integer value. | 
 |  3706   __ StoreToSafepointRegisterSlot(zero_reg, reg); | 
 |  3707   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 
 |  3708   if (!reg.is(v0)) __ mov(reg, v0); | 
 |  3709  | 
 |  3710   // Done. Put the value in dbl_scratch into the value of the allocated heap | 
 |  3711   // number. | 
 |  3712   __ bind(&done); | 
 |  3713   __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 
 |  3714   __ StoreToSafepointRegisterSlot(reg, reg); | 
 |  3715 } | 
 |  3716  | 
 |  3717  | 
 |  3718 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 
 |  3719   class DeferredNumberTagD: public LDeferredCode { | 
 |  3720    public: | 
 |  3721     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 
 |  3722         : LDeferredCode(codegen), instr_(instr) { } | 
 |  3723     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 
 |  3724     virtual LInstruction* instr() { return instr_; } | 
 |  3725    private: | 
 |  3726     LNumberTagD* instr_; | 
 |  3727   }; | 
 |  3728  | 
 |  3729   DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 
 |  3730   Register scratch = scratch0(); | 
 |  3731   Register reg = ToRegister(instr->result()); | 
 |  3732   Register temp1 = ToRegister(instr->TempAt(0)); | 
 |  3733   Register temp2 = ToRegister(instr->TempAt(1)); | 
 |  3734  | 
 |  3735   DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); | 
 |  3736   if (FLAG_inline_new) { | 
 |  3737     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 
 |  3738     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); | 
 |  3739   } else { | 
 |  3740     __ Branch(deferred->entry()); | 
 |  3741   } | 
 |  3742   __ bind(deferred->exit()); | 
 |  3743   __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 
 |  3744 } | 
 |  3745  | 
 |  3746  | 
 |  3747 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 
 |  3748   // TODO(3095996): Get rid of this. For now, we need to make the | 
 |  3749   // result register contain a valid pointer because it is already | 
 |  3750   // contained in the register pointer map. | 
 |  3751   Register reg = ToRegister(instr->result()); | 
 |  3752   __ mov(reg, zero_reg); | 
 |  3753  | 
 |  3754   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  3755   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 
 |  3756   __ StoreToSafepointRegisterSlot(v0, reg); | 
 |  3757 } | 
 |  3758  | 
 |  3759  | 
 |  3760 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 
 |  3761   LOperand* input = instr->InputAt(0); | 
 |  3762   ASSERT(input->IsRegister() && input->Equals(instr->result())); | 
 |  3763   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); | 
 |  3764   __ SmiTag(ToRegister(input)); | 
 |  3765 } | 
 |  3766  | 
 |  3767  | 
 |  3768 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 
 |  3769   Register scratch = scratch0(); | 
 |  3770   LOperand* input = instr->InputAt(0); | 
 |  3771   ASSERT(input->IsRegister() && input->Equals(instr->result())); | 
 |  3772   if (instr->needs_check()) { | 
 |  3773     STATIC_ASSERT(kHeapObjectTag == 1); | 
 |  3774     // If the input is a HeapObject, value of scratch won't be zero. | 
 |  3775     __ And(scratch, ToRegister(input), Operand(kHeapObjectTag)); | 
 |  3776     __ SmiUntag(ToRegister(input)); | 
 |  3777     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); | 
 |  3778   } else { | 
 |  3779     __ SmiUntag(ToRegister(input)); | 
 |  3780   } | 
 |  3781 } | 
 |  3782  | 
 |  3783  | 
 |  3784 void LCodeGen::EmitNumberUntagD(Register input_reg, | 
 |  3785                                 DoubleRegister result_reg, | 
 |  3786                                 bool deoptimize_on_undefined, | 
 |  3787                                 LEnvironment* env) { | 
 |  3788   Register scratch = scratch0(); | 
 |  3789  | 
 |  3790   Label load_smi, heap_number, done; | 
 |  3791  | 
 |  3792   // Smi check. | 
 |  3793   __ JumpIfSmi(input_reg, &load_smi); | 
 |  3794  | 
 |  3795   // Heap number map check. | 
 |  3796   __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
 |  3797   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  3798   if (deoptimize_on_undefined) { | 
 |  3799     DeoptimizeIf(ne, env, scratch, Operand(at)); | 
 |  3800   } else { | 
 |  3801     Label heap_number; | 
 |  3802     __ Branch(&heap_number, eq, scratch, Operand(at)); | 
 |  3803  | 
 |  3804     __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 
 |  3805     DeoptimizeIf(ne, env, input_reg, Operand(at)); | 
 |  3806  | 
 |  3807     // Convert undefined to NaN. | 
 |  3808     __ LoadRoot(at, Heap::kNanValueRootIndex); | 
 |  3809     __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); | 
 |  3810     __ Branch(&done); | 
 |  3811  | 
 |  3812     __ bind(&heap_number); | 
 |  3813   } | 
 |  3814   // Heap number to double register conversion. | 
 |  3815   __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 
 |  3816   __ Branch(&done); | 
 |  3817  | 
 |  3818   // Smi to double register conversion | 
 |  3819   __ bind(&load_smi); | 
 |  3820   __ SmiUntag(input_reg);  // Untag smi before converting to float. | 
 |  3821   __ mtc1(input_reg, result_reg); | 
 |  3822   __ cvt_d_w(result_reg, result_reg); | 
 |  3823   __ SmiTag(input_reg);  // Retag smi. | 
 |  3824   __ bind(&done); | 
 |  3825 } | 
 |  3826  | 
 |  3827  | 
 |  3828 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | 
 |  3829   Register input_reg = ToRegister(instr->InputAt(0)); | 
 |  3830   Register scratch1 = scratch0(); | 
 |  3831   Register scratch2 = ToRegister(instr->TempAt(0)); | 
 |  3832   DoubleRegister double_scratch = double_scratch0(); | 
 |  3833   FPURegister single_scratch = double_scratch.low(); | 
 |  3834  | 
 |  3835   ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); | 
 |  3836   ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); | 
 |  3837  | 
 |  3838   Label done; | 
 |  3839  | 
 |  3840   // The input is a tagged HeapObject. | 
 |  3841   // Heap number map check. | 
 |  3842   __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
 |  3843   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  3844   // This 'at' value and scratch1 map value are used for tests in both clauses | 
 |  3845   // of the if. | 
 |  3846  | 
 |  3847   if (instr->truncating()) { | 
 |  3848     Register scratch3 = ToRegister(instr->TempAt(1)); | 
 |  3849     DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2)); | 
 |  3850     ASSERT(!scratch3.is(input_reg) && | 
 |  3851            !scratch3.is(scratch1) && | 
 |  3852            !scratch3.is(scratch2)); | 
 |  3853     // Performs a truncating conversion of a floating point number as used by | 
 |  3854     // the JS bitwise operations. | 
 |  3855     Label heap_number; | 
 |  3856     __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map? | 
 |  3857     // Check for undefined. Undefined is converted to zero for truncating | 
 |  3858     // conversions. | 
 |  3859     __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 
 |  3860     DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at)); | 
 |  3861     ASSERT(ToRegister(instr->result()).is(input_reg)); | 
 |  3862     __ mov(input_reg, zero_reg); | 
 |  3863     __ Branch(&done); | 
 |  3864  | 
 |  3865     __ bind(&heap_number); | 
 |  3866     __ ldc1(double_scratch2, | 
 |  3867             FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 
 |  3868     __ EmitECMATruncate(input_reg, | 
 |  3869                         double_scratch2, | 
 |  3870                         single_scratch, | 
 |  3871                         scratch1, | 
 |  3872                         scratch2, | 
 |  3873                         scratch3); | 
 |  3874   } else { | 
 |  3875     // Deoptimize if we don't have a heap number. | 
 |  3876     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at)); | 
 |  3877  | 
 |  3878     // Load the double value. | 
 |  3879     __ ldc1(double_scratch, | 
 |  3880             FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 
 |  3881  | 
 |  3882     Register except_flag = scratch2; | 
 |  3883     __ EmitFPUTruncate(kRoundToZero, | 
 |  3884                        single_scratch, | 
 |  3885                        double_scratch, | 
 |  3886                        scratch1, | 
 |  3887                        except_flag, | 
 |  3888                        kCheckForInexactConversion); | 
 |  3889  | 
 |  3890     // Deopt if the operation did not succeed. | 
 |  3891     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); | 
 |  3892  | 
 |  3893     // Load the result. | 
 |  3894     __ mfc1(input_reg, single_scratch); | 
 |  3895  | 
 |  3896     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
 |  3897       __ Branch(&done, ne, input_reg, Operand(zero_reg)); | 
 |  3898  | 
 |  3899       __ mfc1(scratch1, double_scratch.high()); | 
 |  3900       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 
 |  3901       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); | 
 |  3902     } | 
 |  3903   } | 
 |  3904   __ bind(&done); | 
 |  3905 } | 
 |  3906  | 
 |  3907  | 
 |  3908 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 
 |  3909   class DeferredTaggedToI: public LDeferredCode { | 
 |  3910    public: | 
 |  3911     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 
 |  3912         : LDeferredCode(codegen), instr_(instr) { } | 
 |  3913     virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } | 
 |  3914     virtual LInstruction* instr() { return instr_; } | 
 |  3915    private: | 
 |  3916     LTaggedToI* instr_; | 
 |  3917   }; | 
 |  3918  | 
 |  3919   LOperand* input = instr->InputAt(0); | 
 |  3920   ASSERT(input->IsRegister()); | 
 |  3921   ASSERT(input->Equals(instr->result())); | 
 |  3922  | 
 |  3923   Register input_reg = ToRegister(input); | 
 |  3924  | 
 |  3925   DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); | 
 |  3926  | 
 |  3927   // Let the deferred code handle the HeapObject case. | 
 |  3928   __ JumpIfNotSmi(input_reg, deferred->entry()); | 
 |  3929  | 
 |  3930   // Smi to int32 conversion. | 
 |  3931   __ SmiUntag(input_reg); | 
 |  3932   __ bind(deferred->exit()); | 
 |  3933 } | 
 |  3934  | 
 |  3935  | 
 |  3936 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 
 |  3937   LOperand* input = instr->InputAt(0); | 
 |  3938   ASSERT(input->IsRegister()); | 
 |  3939   LOperand* result = instr->result(); | 
 |  3940   ASSERT(result->IsDoubleRegister()); | 
 |  3941  | 
 |  3942   Register input_reg = ToRegister(input); | 
 |  3943   DoubleRegister result_reg = ToDoubleRegister(result); | 
 |  3944  | 
 |  3945   EmitNumberUntagD(input_reg, result_reg, | 
 |  3946                    instr->hydrogen()->deoptimize_on_undefined(), | 
 |  3947                    instr->environment()); | 
 |  3948 } | 
 |  3949  | 
 |  3950  | 
 |  3951 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 
 |  3952   Register result_reg = ToRegister(instr->result()); | 
 |  3953   Register scratch1 = scratch0(); | 
 |  3954   Register scratch2 = ToRegister(instr->TempAt(0)); | 
 |  3955   DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0)); | 
 |  3956   DoubleRegister double_scratch = double_scratch0(); | 
 |  3957   FPURegister single_scratch = double_scratch0().low(); | 
 |  3958  | 
 |  3959   if (instr->truncating()) { | 
 |  3960     Register scratch3 = ToRegister(instr->TempAt(1)); | 
 |  3961     __ EmitECMATruncate(result_reg, | 
 |  3962                         double_input, | 
 |  3963                         single_scratch, | 
 |  3964                         scratch1, | 
 |  3965                         scratch2, | 
 |  3966                         scratch3); | 
 |  3967   } else { | 
 |  3968     Register except_flag = scratch2; | 
 |  3969  | 
 |  3970     __ EmitFPUTruncate(kRoundToMinusInf, | 
 |  3971                        single_scratch, | 
 |  3972                        double_input, | 
 |  3973                        scratch1, | 
 |  3974                        except_flag, | 
 |  3975                        kCheckForInexactConversion); | 
 |  3976  | 
 |  3977     // Deopt if the operation did not succeed (except_flag != 0). | 
 |  3978     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); | 
 |  3979  | 
 |  3980     // Load the result. | 
 |  3981     __ mfc1(result_reg, single_scratch); | 
 |  3982   } | 
 |  3983 } | 
 |  3984  | 
 |  3985  | 
 |  3986 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 
 |  3987   LOperand* input = instr->InputAt(0); | 
 |  3988   __ And(at, ToRegister(input), Operand(kSmiTagMask)); | 
 |  3989   DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); | 
 |  3990 } | 
 |  3991  | 
 |  3992  | 
 |  3993 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 
 |  3994   LOperand* input = instr->InputAt(0); | 
 |  3995   __ And(at, ToRegister(input), Operand(kSmiTagMask)); | 
 |  3996   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); | 
 |  3997 } | 
 |  3998  | 
 |  3999  | 
 |  4000 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 
 |  4001   Register input = ToRegister(instr->InputAt(0)); | 
 |  4002   Register scratch = scratch0(); | 
 |  4003  | 
 |  4004   __ GetObjectType(input, scratch, scratch); | 
 |  4005  | 
 |  4006   if (instr->hydrogen()->is_interval_check()) { | 
 |  4007     InstanceType first; | 
 |  4008     InstanceType last; | 
 |  4009     instr->hydrogen()->GetCheckInterval(&first, &last); | 
 |  4010  | 
 |  4011     // If there is only one type in the interval check for equality. | 
 |  4012     if (first == last) { | 
 |  4013       DeoptimizeIf(ne, instr->environment(), scratch, Operand(first)); | 
 |  4014     } else { | 
 |  4015       DeoptimizeIf(lo, instr->environment(), scratch, Operand(first)); | 
 |  4016       // Omit check for the last type. | 
 |  4017       if (last != LAST_TYPE) { | 
 |  4018         DeoptimizeIf(hi, instr->environment(), scratch, Operand(last)); | 
 |  4019       } | 
 |  4020     } | 
 |  4021   } else { | 
 |  4022     uint8_t mask; | 
 |  4023     uint8_t tag; | 
 |  4024     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 
 |  4025  | 
 |  4026     if (IsPowerOf2(mask)) { | 
 |  4027       ASSERT(tag == 0 || IsPowerOf2(tag)); | 
 |  4028       __ And(at, scratch, mask); | 
 |  4029       DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), | 
 |  4030           at, Operand(zero_reg)); | 
 |  4031     } else { | 
 |  4032       __ And(scratch, scratch, Operand(mask)); | 
 |  4033       DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag)); | 
 |  4034     } | 
 |  4035   } | 
 |  4036 } | 
 |  4037  | 
 |  4038  | 
 |  4039 void LCodeGen::DoCheckFunction(LCheckFunction* instr) { | 
 |  4040   ASSERT(instr->InputAt(0)->IsRegister()); | 
 |  4041   Register reg = ToRegister(instr->InputAt(0)); | 
 |  4042   DeoptimizeIf(ne, instr->environment(), reg, | 
 |  4043                Operand(instr->hydrogen()->target())); | 
 |  4044 } | 
 |  4045  | 
 |  4046  | 
 |  4047 void LCodeGen::DoCheckMap(LCheckMap* instr) { | 
 |  4048   Register scratch = scratch0(); | 
 |  4049   LOperand* input = instr->InputAt(0); | 
 |  4050   ASSERT(input->IsRegister()); | 
 |  4051   Register reg = ToRegister(input); | 
 |  4052   __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); | 
 |  4053   DeoptimizeIf(ne, | 
 |  4054                instr->environment(), | 
 |  4055                scratch, | 
 |  4056                Operand(instr->hydrogen()->map())); | 
 |  4057 } | 
 |  4058  | 
 |  4059  | 
 |  4060 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 
 |  4061   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 
 |  4062   Register result_reg = ToRegister(instr->result()); | 
 |  4063   DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); | 
 |  4064   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | 
 |  4065 } | 
 |  4066  | 
 |  4067  | 
 |  4068 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 
 |  4069   Register unclamped_reg = ToRegister(instr->unclamped()); | 
 |  4070   Register result_reg = ToRegister(instr->result()); | 
 |  4071   __ ClampUint8(result_reg, unclamped_reg); | 
 |  4072 } | 
 |  4073  | 
 |  4074  | 
 |  4075 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 
 |  4076   Register scratch = scratch0(); | 
 |  4077   Register input_reg = ToRegister(instr->unclamped()); | 
 |  4078   Register result_reg = ToRegister(instr->result()); | 
 |  4079   DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); | 
 |  4080   Label is_smi, done, heap_number; | 
 |  4081  | 
 |  4082   // Both smi and heap number cases are handled. | 
 |  4083   __ JumpIfSmi(input_reg, &is_smi); | 
 |  4084  | 
 |  4085   // Check for heap number | 
 |  4086   __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
 |  4087   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | 
 |  4088  | 
 |  4089   // Check for undefined. Undefined is converted to zero for clamping | 
 |  4090   // conversions. | 
 |  4091   DeoptimizeIf(ne, instr->environment(), input_reg, | 
 |  4092                Operand(factory()->undefined_value())); | 
 |  4093   __ mov(result_reg, zero_reg); | 
 |  4094   __ jmp(&done); | 
 |  4095  | 
 |  4096   // Heap number | 
 |  4097   __ bind(&heap_number); | 
 |  4098   __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | 
 |  4099                                              HeapNumber::kValueOffset)); | 
 |  4100   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 
 |  4101   __ jmp(&done); | 
 |  4102  | 
 |  4103   // smi | 
 |  4104   __ bind(&is_smi); | 
 |  4105   __ SmiUntag(scratch, input_reg); | 
 |  4106   __ ClampUint8(result_reg, scratch); | 
 |  4107  | 
 |  4108   __ bind(&done); | 
 |  4109 } | 
 |  4110  | 
 |  4111  | 
 |  4112 void LCodeGen::LoadHeapObject(Register result, | 
 |  4113                               Handle<HeapObject> object) { | 
 |  4114   if (heap()->InNewSpace(*object)) { | 
 |  4115     Handle<JSGlobalPropertyCell> cell = | 
 |  4116         factory()->NewJSGlobalPropertyCell(object); | 
 |  4117     __ li(result, Operand(cell)); | 
 |  4118     __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); | 
 |  4119   } else { | 
 |  4120     __ li(result, Operand(object)); | 
 |  4121   } | 
 |  4122 } | 
 |  4123  | 
 |  4124  | 
 |  4125 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { | 
 |  4126   Register temp1 = ToRegister(instr->TempAt(0)); | 
 |  4127   Register temp2 = ToRegister(instr->TempAt(1)); | 
 |  4128  | 
 |  4129   Handle<JSObject> holder = instr->holder(); | 
 |  4130   Handle<JSObject> current_prototype = instr->prototype(); | 
 |  4131  | 
 |  4132   // Load prototype object. | 
 |  4133   LoadHeapObject(temp1, current_prototype); | 
 |  4134  | 
 |  4135   // Check prototype maps up to the holder. | 
 |  4136   while (!current_prototype.is_identical_to(holder)) { | 
 |  4137     __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); | 
 |  4138     DeoptimizeIf(ne, | 
 |  4139                  instr->environment(), | 
 |  4140                  temp2, | 
 |  4141                  Operand(Handle<Map>(current_prototype->map()))); | 
 |  4142     current_prototype = | 
 |  4143         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); | 
 |  4144     // Load next prototype object. | 
 |  4145     LoadHeapObject(temp1, current_prototype); | 
 |  4146   } | 
 |  4147  | 
 |  4148   // Check the holder map. | 
 |  4149   __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); | 
 |  4150   DeoptimizeIf(ne, | 
 |  4151                instr->environment(), | 
 |  4152                temp2, | 
 |  4153                Operand(Handle<Map>(current_prototype->map()))); | 
 |  4154 } | 
 |  4155  | 
 |  4156  | 
 |  4157 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { | 
 |  4158   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
 |  4159   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); | 
 |  4160   __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); | 
 |  4161   __ li(a1, Operand(instr->hydrogen()->constant_elements())); | 
 |  4162   __ Push(a3, a2, a1); | 
 |  4163  | 
 |  4164   // Pick the right runtime function or stub to call. | 
 |  4165   int length = instr->hydrogen()->length(); | 
 |  4166   if (instr->hydrogen()->IsCopyOnWrite()) { | 
 |  4167     ASSERT(instr->hydrogen()->depth() == 1); | 
 |  4168     FastCloneShallowArrayStub::Mode mode = | 
 |  4169         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; | 
 |  4170     FastCloneShallowArrayStub stub(mode, length); | 
 |  4171     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  4172   } else if (instr->hydrogen()->depth() > 1) { | 
 |  4173     CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); | 
 |  4174   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { | 
 |  4175     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); | 
 |  4176   } else { | 
 |  4177     FastCloneShallowArrayStub::Mode mode = | 
 |  4178         FastCloneShallowArrayStub::CLONE_ELEMENTS; | 
 |  4179     FastCloneShallowArrayStub stub(mode, length); | 
 |  4180     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  4181   } | 
 |  4182 } | 
 |  4183  | 
 |  4184  | 
 |  4185 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { | 
 |  4186   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  4187   __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
 |  4188   __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); | 
 |  4189   __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); | 
 |  4190   __ li(a2, Operand(instr->hydrogen()->constant_properties())); | 
 |  4191   __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0))); | 
 |  4192   __ Push(t0, a3, a2, a1); | 
 |  4193  | 
 |  4194   // Pick the right runtime function to call. | 
 |  4195   if (instr->hydrogen()->depth() > 1) { | 
 |  4196     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); | 
 |  4197   } else { | 
 |  4198     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); | 
 |  4199   } | 
 |  4200 } | 
 |  4201  | 
 |  4202  | 
 |  4203 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | 
 |  4204   ASSERT(ToRegister(instr->InputAt(0)).is(a0)); | 
 |  4205   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  4206   __ push(a0); | 
 |  4207   CallRuntime(Runtime::kToFastProperties, 1, instr); | 
 |  4208 } | 
 |  4209  | 
 |  4210  | 
 |  4211 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | 
 |  4212   Label materialized; | 
 |  4213   // Registers will be used as follows: | 
 |  4214   // a3 = JS function. | 
 |  4215   // t3 = literals array. | 
 |  4216   // a1 = regexp literal. | 
 |  4217   // a0 = regexp literal clone. | 
 |  4218   // a2 and t0-t2 are used as temporaries. | 
 |  4219   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
 |  4220   __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); | 
 |  4221   int literal_offset = FixedArray::kHeaderSize + | 
 |  4222       instr->hydrogen()->literal_index() * kPointerSize; | 
 |  4223   __ lw(a1, FieldMemOperand(t3, literal_offset)); | 
 |  4224   __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 
 |  4225   __ Branch(&materialized, ne, a1, Operand(at)); | 
 |  4226  | 
 |  4227   // Create regexp literal using runtime function | 
 |  4228   // Result will be in v0. | 
 |  4229   __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); | 
 |  4230   __ li(t1, Operand(instr->hydrogen()->pattern())); | 
 |  4231   __ li(t0, Operand(instr->hydrogen()->flags())); | 
 |  4232   __ Push(t3, t2, t1, t0); | 
 |  4233   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); | 
 |  4234   __ mov(a1, v0); | 
 |  4235  | 
 |  4236   __ bind(&materialized); | 
 |  4237   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | 
 |  4238   Label allocated, runtime_allocate; | 
 |  4239  | 
 |  4240   __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); | 
 |  4241   __ jmp(&allocated); | 
 |  4242  | 
 |  4243   __ bind(&runtime_allocate); | 
 |  4244   __ li(a0, Operand(Smi::FromInt(size))); | 
 |  4245   __ Push(a1, a0); | 
 |  4246   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); | 
 |  4247   __ pop(a1); | 
 |  4248  | 
 |  4249   __ bind(&allocated); | 
 |  4250   // Copy the content into the newly allocated memory. | 
 |  4251   // (Unroll copy loop once for better throughput). | 
 |  4252   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { | 
 |  4253     __ lw(a3, FieldMemOperand(a1, i)); | 
 |  4254     __ lw(a2, FieldMemOperand(a1, i + kPointerSize)); | 
 |  4255     __ sw(a3, FieldMemOperand(v0, i)); | 
 |  4256     __ sw(a2, FieldMemOperand(v0, i + kPointerSize)); | 
 |  4257   } | 
 |  4258   if ((size % (2 * kPointerSize)) != 0) { | 
 |  4259     __ lw(a3, FieldMemOperand(a1, size - kPointerSize)); | 
 |  4260     __ sw(a3, FieldMemOperand(v0, size - kPointerSize)); | 
 |  4261   } | 
 |  4262 } | 
 |  4263  | 
 |  4264  | 
 |  4265 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 
 |  4266   // Use the fast case closure allocation code that allocates in new | 
 |  4267   // space for nested functions that don't need literals cloning. | 
 |  4268   Handle<SharedFunctionInfo> shared_info = instr->shared_info(); | 
 |  4269   bool pretenure = instr->hydrogen()->pretenure(); | 
 |  4270   if (!pretenure && shared_info->num_literals() == 0) { | 
 |  4271     FastNewClosureStub stub( | 
 |  4272         shared_info->strict_mode() ? kStrictMode : kNonStrictMode); | 
 |  4273     __ li(a1, Operand(shared_info)); | 
 |  4274     __ push(a1); | 
 |  4275     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  4276   } else { | 
 |  4277     __ li(a2, Operand(shared_info)); | 
 |  4278     __ li(a1, Operand(pretenure | 
 |  4279                        ? factory()->true_value() | 
 |  4280                        : factory()->false_value())); | 
 |  4281     __ Push(cp, a2, a1); | 
 |  4282     CallRuntime(Runtime::kNewClosure, 3, instr); | 
 |  4283   } | 
 |  4284 } | 
 |  4285  | 
 |  4286  | 
 |  4287 void LCodeGen::DoTypeof(LTypeof* instr) { | 
 |  4288   ASSERT(ToRegister(instr->result()).is(v0)); | 
 |  4289   Register input = ToRegister(instr->InputAt(0)); | 
 |  4290   __ push(input); | 
 |  4291   CallRuntime(Runtime::kTypeof, 1, instr); | 
 |  4292 } | 
 |  4293  | 
 |  4294  | 
 |  4295 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | 
 |  4296   Register input = ToRegister(instr->InputAt(0)); | 
 |  4297   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  4298   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  4299   Label* true_label = chunk_->GetAssemblyLabel(true_block); | 
 |  4300   Label* false_label = chunk_->GetAssemblyLabel(false_block); | 
 |  4301  | 
 |  4302   Register cmp1 = no_reg; | 
 |  4303   Operand cmp2 = Operand(no_reg); | 
 |  4304  | 
 |  4305   Condition final_branch_condition = EmitTypeofIs(true_label, | 
 |  4306                                                   false_label, | 
 |  4307                                                   input, | 
 |  4308                                                   instr->type_literal(), | 
 |  4309                                                   cmp1, | 
 |  4310                                                   cmp2); | 
 |  4311  | 
 |  4312   ASSERT(cmp1.is_valid()); | 
 |  4313   ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid()); | 
 |  4314  | 
 |  4315   if (final_branch_condition != kNoCondition) { | 
 |  4316     EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2); | 
 |  4317   } | 
 |  4318 } | 
 |  4319  | 
 |  4320  | 
 |  4321 Condition LCodeGen::EmitTypeofIs(Label* true_label, | 
 |  4322                                  Label* false_label, | 
 |  4323                                  Register input, | 
 |  4324                                  Handle<String> type_name, | 
 |  4325                                  Register& cmp1, | 
 |  4326                                  Operand& cmp2) { | 
 |  4327   // This function utilizes the delay slot heavily. This is used to load | 
 |  4328   // values that are always usable without depending on the type of the input | 
 |  4329   // register. | 
 |  4330   Condition final_branch_condition = kNoCondition; | 
 |  4331   Register scratch = scratch0(); | 
 |  4332   if (type_name->Equals(heap()->number_symbol())) { | 
 |  4333     __ JumpIfSmi(input, true_label); | 
 |  4334     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  4335     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 
 |  4336     cmp1 = input; | 
 |  4337     cmp2 = Operand(at); | 
 |  4338     final_branch_condition = eq; | 
 |  4339  | 
 |  4340   } else if (type_name->Equals(heap()->string_symbol())) { | 
 |  4341     __ JumpIfSmi(input, false_label); | 
 |  4342     __ GetObjectType(input, input, scratch); | 
 |  4343     __ Branch(USE_DELAY_SLOT, false_label, | 
 |  4344               ge, scratch, Operand(FIRST_NONSTRING_TYPE)); | 
 |  4345     // input is an object so we can load the BitFieldOffset even if we take the | 
 |  4346     // other branch. | 
 |  4347     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); | 
 |  4348     __ And(at, at, 1 << Map::kIsUndetectable); | 
 |  4349     cmp1 = at; | 
 |  4350     cmp2 = Operand(zero_reg); | 
 |  4351     final_branch_condition = eq; | 
 |  4352  | 
 |  4353   } else if (type_name->Equals(heap()->boolean_symbol())) { | 
 |  4354     __ LoadRoot(at, Heap::kTrueValueRootIndex); | 
 |  4355     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | 
 |  4356     __ LoadRoot(at, Heap::kFalseValueRootIndex); | 
 |  4357     cmp1 = at; | 
 |  4358     cmp2 = Operand(input); | 
 |  4359     final_branch_condition = eq; | 
 |  4360  | 
 |  4361   } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { | 
 |  4362     __ LoadRoot(at, Heap::kNullValueRootIndex); | 
 |  4363     cmp1 = at; | 
 |  4364     cmp2 = Operand(input); | 
 |  4365     final_branch_condition = eq; | 
 |  4366  | 
 |  4367   } else if (type_name->Equals(heap()->undefined_symbol())) { | 
 |  4368     __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 
 |  4369     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | 
 |  4370     // The first instruction of JumpIfSmi is an And - it is safe in the delay | 
 |  4371     // slot. | 
 |  4372     __ JumpIfSmi(input, false_label); | 
 |  4373     // Check for undetectable objects => true. | 
 |  4374     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); | 
 |  4375     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); | 
 |  4376     __ And(at, at, 1 << Map::kIsUndetectable); | 
 |  4377     cmp1 = at; | 
 |  4378     cmp2 = Operand(zero_reg); | 
 |  4379     final_branch_condition = ne; | 
 |  4380  | 
 |  4381   } else if (type_name->Equals(heap()->function_symbol())) { | 
 |  4382     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | 
 |  4383     __ JumpIfSmi(input, false_label); | 
 |  4384     __ GetObjectType(input, scratch, input); | 
 |  4385     __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE)); | 
 |  4386     cmp1 = input; | 
 |  4387     cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); | 
 |  4388     final_branch_condition = eq; | 
 |  4389  | 
 |  4390   } else if (type_name->Equals(heap()->object_symbol())) { | 
 |  4391     __ JumpIfSmi(input, false_label); | 
 |  4392     if (!FLAG_harmony_typeof) { | 
 |  4393       __ LoadRoot(at, Heap::kNullValueRootIndex); | 
 |  4394       __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | 
 |  4395     } | 
 |  4396     // input is an object, it is safe to use GetObjectType in the delay slot. | 
 |  4397     __ GetObjectType(input, input, scratch); | 
 |  4398     __ Branch(USE_DELAY_SLOT, false_label, | 
 |  4399               lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  4400     // Still an object, so the InstanceType can be loaded. | 
 |  4401     __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset)); | 
 |  4402     __ Branch(USE_DELAY_SLOT, false_label, | 
 |  4403               gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 
 |  4404     // Still an object, so the BitField can be loaded. | 
 |  4405     // Check for undetectable objects => false. | 
 |  4406     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); | 
 |  4407     __ And(at, at, 1 << Map::kIsUndetectable); | 
 |  4408     cmp1 = at; | 
 |  4409     cmp2 = Operand(zero_reg); | 
 |  4410     final_branch_condition = eq; | 
 |  4411  | 
 |  4412   } else { | 
 |  4413     cmp1 = at; | 
 |  4414     cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion. | 
 |  4415     __ Branch(false_label); | 
 |  4416   } | 
 |  4417  | 
 |  4418   return final_branch_condition; | 
 |  4419 } | 
 |  4420  | 
 |  4421  | 
 |  4422 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { | 
 |  4423   Register temp1 = ToRegister(instr->TempAt(0)); | 
 |  4424   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
 |  4425   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
 |  4426  | 
 |  4427   EmitIsConstructCall(temp1, scratch0()); | 
 |  4428  | 
 |  4429   EmitBranch(true_block, false_block, eq, temp1, | 
 |  4430              Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 
 |  4431 } | 
 |  4432  | 
 |  4433  | 
 |  4434 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { | 
 |  4435   ASSERT(!temp1.is(temp2)); | 
 |  4436   // Get the frame pointer for the calling frame. | 
 |  4437   __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
 |  4438  | 
 |  4439   // Skip the arguments adaptor frame if it exists. | 
 |  4440   Label check_frame_marker; | 
 |  4441   __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | 
 |  4442   __ Branch(&check_frame_marker, ne, temp2, | 
 |  4443             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
 |  4444   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 
 |  4445  | 
 |  4446   // Check the marker in the calling frame. | 
 |  4447   __ bind(&check_frame_marker); | 
 |  4448   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 
 |  4449 } | 
 |  4450  | 
 |  4451  | 
 |  4452 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 
 |  4453   // No code for lazy bailout instruction. Used to capture environment after a | 
 |  4454   // call for populating the safepoint data with deoptimization data. | 
 |  4455 } | 
 |  4456  | 
 |  4457  | 
 |  4458 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 
 |  4459   DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); | 
 |  4460 } | 
 |  4461  | 
 |  4462  | 
 |  4463 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { | 
 |  4464   Register object = ToRegister(instr->object()); | 
 |  4465   Register key = ToRegister(instr->key()); | 
 |  4466   Register strict = scratch0(); | 
 |  4467   __ li(strict, Operand(Smi::FromInt(strict_mode_flag()))); | 
 |  4468   __ Push(object, key, strict); | 
 |  4469   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); | 
 |  4470   LPointerMap* pointers = instr->pointer_map(); | 
 |  4471   LEnvironment* env = instr->deoptimization_environment(); | 
 |  4472   RecordPosition(pointers->position()); | 
 |  4473   RegisterEnvironmentForDeoptimization(env); | 
 |  4474   SafepointGenerator safepoint_generator(this, | 
 |  4475                                          pointers, | 
 |  4476                                          env->deoptimization_index()); | 
 |  4477   __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator); | 
 |  4478 } | 
 |  4479  | 
 |  4480  | 
 |  4481 void LCodeGen::DoIn(LIn* instr) { | 
 |  4482   Register obj = ToRegister(instr->object()); | 
 |  4483   Register key = ToRegister(instr->key()); | 
 |  4484   __ Push(key, obj); | 
 |  4485   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); | 
 |  4486   LPointerMap* pointers = instr->pointer_map(); | 
 |  4487   LEnvironment* env = instr->deoptimization_environment(); | 
 |  4488   RecordPosition(pointers->position()); | 
 |  4489   RegisterEnvironmentForDeoptimization(env); | 
 |  4490   SafepointGenerator safepoint_generator(this, | 
 |  4491                                          pointers, | 
 |  4492                                          env->deoptimization_index()); | 
 |  4493   __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator); | 
 |  4494 } | 
 |  4495  | 
 |  4496  | 
 |  4497 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | 
 |  4498   { | 
 |  4499     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
 |  4500     __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | 
 |  4501     RegisterLazyDeoptimization( | 
 |  4502         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 
 |  4503   } | 
 |  4504  | 
 |  4505   // The gap code includes the restoring of the safepoint registers. | 
 |  4506   int pc = masm()->pc_offset(); | 
 |  4507   safepoints_.SetPcAfterGap(pc); | 
 |  4508 } | 
 |  4509  | 
 |  4510  | 
 |  4511 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 
 |  4512   class DeferredStackCheck: public LDeferredCode { | 
 |  4513    public: | 
 |  4514     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | 
 |  4515         : LDeferredCode(codegen), instr_(instr) { } | 
 |  4516     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } | 
 |  4517     virtual LInstruction* instr() { return instr_; } | 
 |  4518    private: | 
 |  4519     LStackCheck* instr_; | 
 |  4520   }; | 
 |  4521  | 
 |  4522   if (instr->hydrogen()->is_function_entry()) { | 
 |  4523     // Perform stack overflow check. | 
 |  4524     Label done; | 
 |  4525     __ LoadRoot(at, Heap::kStackLimitRootIndex); | 
 |  4526     __ Branch(&done, hs, sp, Operand(at)); | 
 |  4527     StackCheckStub stub; | 
 |  4528     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 
 |  4529     __ bind(&done); | 
 |  4530   } else { | 
 |  4531     ASSERT(instr->hydrogen()->is_backwards_branch()); | 
 |  4532     // Perform stack overflow check if this goto needs it before jumping. | 
 |  4533     DeferredStackCheck* deferred_stack_check = | 
 |  4534         new DeferredStackCheck(this, instr); | 
 |  4535     __ LoadRoot(at, Heap::kStackLimitRootIndex); | 
 |  4536     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); | 
 |  4537     __ bind(instr->done_label()); | 
 |  4538     deferred_stack_check->SetExit(instr->done_label()); | 
 |  4539   } | 
 |  4540 } | 
 |  4541  | 
 |  4542  | 
 |  4543 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | 
 |  4544   // This is a pseudo-instruction that ensures that the environment here is | 
 |  4545   // properly registered for deoptimization and records the assembler's PC | 
 |  4546   // offset. | 
 |  4547   LEnvironment* environment = instr->environment(); | 
 |  4548   environment->SetSpilledRegisters(instr->SpilledRegisterArray(), | 
 |  4549                                    instr->SpilledDoubleRegisterArray()); | 
 |  4550  | 
 |  4551   // If the environment were already registered, we would have no way of | 
 |  4552   // backpatching it with the spill slot operands. | 
 |  4553   ASSERT(!environment->HasBeenRegistered()); | 
 |  4554   RegisterEnvironmentForDeoptimization(environment); | 
 |  4555   ASSERT(osr_pc_offset_ == -1); | 
 |  4556   osr_pc_offset_ = masm()->pc_offset(); | 
 |  4557 } | 
 |  4558  | 
 |  4559  | 
 |  4560 #undef __ | 
 |  4561  | 
 |  4562 } }  // namespace v8::internal | 
| OLD | NEW |