| OLD | NEW | 
|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 58   Safepoint::DeoptMode deopt_mode_; | 58   Safepoint::DeoptMode deopt_mode_; | 
| 59 }; | 59 }; | 
| 60 | 60 | 
| 61 | 61 | 
| 62 #define __ masm()-> | 62 #define __ masm()-> | 
| 63 | 63 | 
| 64 bool LCodeGen::GenerateCode() { | 64 bool LCodeGen::GenerateCode() { | 
| 65   HPhase phase("Z_Code generation", chunk()); | 65   HPhase phase("Z_Code generation", chunk()); | 
| 66   ASSERT(is_unused()); | 66   ASSERT(is_unused()); | 
| 67   status_ = GENERATING; | 67   status_ = GENERATING; | 
|  | 68   CpuFeatures::Scope scope1(VFP3); | 
|  | 69   CpuFeatures::Scope scope2(ARMv7); | 
| 68 | 70 | 
| 69   CodeStub::GenerateFPStubs(); | 71   CodeStub::GenerateFPStubs(); | 
| 70 | 72 | 
| 71   // Open a frame scope to indicate that there is a frame on the stack.  The | 73   // Open a frame scope to indicate that there is a frame on the stack.  The | 
| 72   // NONE indicates that the scope shouldn't actually generate code to set up | 74   // NONE indicates that the scope shouldn't actually generate code to set up | 
| 73   // the frame (that is done in GeneratePrologue). | 75   // the frame (that is done in GeneratePrologue). | 
| 74   FrameScope frame_scope(masm_, StackFrame::NONE); | 76   FrameScope frame_scope(masm_, StackFrame::NONE); | 
| 75 | 77 | 
| 76   return GeneratePrologue() && | 78   return GeneratePrologue() && | 
| 77       GenerateBody() && | 79       GenerateBody() && | 
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 109   size_t length = builder.position(); | 111   size_t length = builder.position(); | 
| 110   Vector<char> copy = Vector<char>::New(length + 1); | 112   Vector<char> copy = Vector<char>::New(length + 1); | 
| 111   memcpy(copy.start(), builder.Finalize(), copy.length()); | 113   memcpy(copy.start(), builder.Finalize(), copy.length()); | 
| 112   masm()->RecordComment(copy.start()); | 114   masm()->RecordComment(copy.start()); | 
| 113 } | 115 } | 
| 114 | 116 | 
| 115 | 117 | 
| 116 bool LCodeGen::GeneratePrologue() { | 118 bool LCodeGen::GeneratePrologue() { | 
| 117   ASSERT(is_generating()); | 119   ASSERT(is_generating()); | 
| 118 | 120 | 
| 119   if (info()->IsOptimizing()) { | 121   ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 
| 120     ProfileEntryHookStub::MaybeCallEntryHook(masm_); |  | 
| 121 | 122 | 
| 122 #ifdef DEBUG | 123 #ifdef DEBUG | 
| 123     if (strlen(FLAG_stop_at) > 0 && | 124   if (strlen(FLAG_stop_at) > 0 && | 
| 124         info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 125       info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 
| 125       __ stop("stop_at"); | 126     __ stop("stop_at"); | 
| 126     } | 127   } | 
| 127 #endif | 128 #endif | 
| 128 | 129 | 
| 129     // r1: Callee's JS function. | 130   // r1: Callee's JS function. | 
| 130     // cp: Callee's context. | 131   // cp: Callee's context. | 
| 131     // fp: Caller's frame pointer. | 132   // fp: Caller's frame pointer. | 
| 132     // lr: Caller's pc. | 133   // lr: Caller's pc. | 
| 133 | 134 | 
| 134     // Strict mode functions and builtins need to replace the receiver | 135   // Strict mode functions and builtins need to replace the receiver | 
| 135     // with undefined when called as functions (without an explicit | 136   // with undefined when called as functions (without an explicit | 
| 136     // receiver object). r5 is zero for method calls and non-zero for | 137   // receiver object). r5 is zero for method calls and non-zero for | 
| 137     // function calls. | 138   // function calls. | 
| 138     if (!info_->is_classic_mode() || info_->is_native()) { | 139   if (!info_->is_classic_mode() || info_->is_native()) { | 
| 139       Label ok; | 140     Label ok; | 
| 140       __ cmp(r5, Operand(0)); | 141     __ cmp(r5, Operand(0)); | 
| 141       __ b(eq, &ok); | 142     __ b(eq, &ok); | 
| 142       int receiver_offset = scope()->num_parameters() * kPointerSize; | 143     int receiver_offset = scope()->num_parameters() * kPointerSize; | 
| 143       __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 144     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 
| 144       __ str(r2, MemOperand(sp, receiver_offset)); | 145     __ str(r2, MemOperand(sp, receiver_offset)); | 
| 145       __ bind(&ok); | 146     __ bind(&ok); | 
| 146     } |  | 
| 147   } | 147   } | 
| 148 | 148 | 
|  | 149 | 
| 149   info()->set_prologue_offset(masm_->pc_offset()); | 150   info()->set_prologue_offset(masm_->pc_offset()); | 
| 150   if (NeedsEagerFrame()) { | 151   { | 
| 151     PredictableCodeSizeScope predictible_code_size_scope( | 152     PredictableCodeSizeScope predictible_code_size_scope( | 
| 152         masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); | 153         masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); | 
| 153     // The following three instructions must remain together and unmodified | 154     // The following three instructions must remain together and unmodified | 
| 154     // for code aging to work properly. | 155     // for code aging to work properly. | 
| 155     __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); | 156     __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); | 
| 156     // Load undefined value here, so the value is ready for the loop | 157     // Load undefined value here, so the value is ready for the loop | 
| 157     // below. | 158     // below. | 
| 158     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 159     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 
| 159     // Adjust FP to point to saved FP. | 160     // Adjust FP to point to saved FP. | 
| 160     __ add(fp, sp, Operand(2 * kPointerSize)); | 161     __ add(fp, sp, Operand(2 * kPointerSize)); | 
| 161     frame_is_built_ = true; |  | 
| 162   } | 162   } | 
| 163 | 163 | 
| 164   // Reserve space for the stack slots needed by the code. | 164   // Reserve space for the stack slots needed by the code. | 
| 165   int slots = GetStackSlotCount(); | 165   int slots = GetStackSlotCount(); | 
| 166   if (slots > 0) { | 166   if (slots > 0) { | 
| 167     if (FLAG_debug_code) { | 167     if (FLAG_debug_code) { | 
| 168       __ mov(r0, Operand(slots)); | 168       __ mov(r0, Operand(slots)); | 
| 169       __ mov(r2, Operand(kSlotsZapValue)); | 169       __ mov(r2, Operand(kSlotsZapValue)); | 
| 170       Label loop; | 170       Label loop; | 
| 171       __ bind(&loop); | 171       __ bind(&loop); | 
| 172       __ push(r2); | 172       __ push(r2); | 
| 173       __ sub(r0, r0, Operand(1), SetCC); | 173       __ sub(r0, r0, Operand(1), SetCC); | 
| 174       __ b(ne, &loop); | 174       __ b(ne, &loop); | 
| 175     } else { | 175     } else { | 
| 176       __ sub(sp,  sp, Operand(slots * kPointerSize)); | 176       __ sub(sp,  sp, Operand(slots * kPointerSize)); | 
| 177     } | 177     } | 
| 178   } | 178   } | 
| 179 | 179 | 
| 180   // Possibly allocate a local context. | 180   // Possibly allocate a local context. | 
| 181   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 181   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 
| 182   if (heap_slots > 0) { | 182   if (heap_slots > 0) { | 
| 183     Comment(";;; Allocate local context"); | 183     Comment(";;; Allocate local context"); | 
| 184     // Argument to NewContext is the function, which is in r1. | 184     // Argument to NewContext is the function, which is in r1. | 
| 185     __ push(r1); | 185     __ push(r1); | 
| 186     if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 186     if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 
| 187       FastNewContextStub stub(heap_slots); | 187       FastNewContextStub stub(heap_slots); | 
| 188       __ CallStub(&stub); | 188       __ CallStub(&stub); | 
| 189     } else { | 189     } else { | 
| 190       __ CallRuntime(Runtime::kNewFunctionContext, 1); | 190       __ CallRuntime(Runtime::kNewFunctionContext, 1); | 
| 191     } | 191     } | 
| (...skipping 15 matching lines...) Expand all  Loading... | 
| 207         __ str(r0, target); | 207         __ str(r0, target); | 
| 208         // Update the write barrier. This clobbers r3 and r0. | 208         // Update the write barrier. This clobbers r3 and r0. | 
| 209         __ RecordWriteContextSlot( | 209         __ RecordWriteContextSlot( | 
| 210             cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); | 210             cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); | 
| 211       } | 211       } | 
| 212     } | 212     } | 
| 213     Comment(";;; End allocate local context"); | 213     Comment(";;; End allocate local context"); | 
| 214   } | 214   } | 
| 215 | 215 | 
| 216   // Trace the call. | 216   // Trace the call. | 
| 217   if (FLAG_trace && info()->IsOptimizing()) { | 217   if (FLAG_trace) { | 
| 218     __ CallRuntime(Runtime::kTraceEnter, 0); | 218     __ CallRuntime(Runtime::kTraceEnter, 0); | 
| 219   } | 219   } | 
| 220   return !is_aborted(); | 220   return !is_aborted(); | 
| 221 } | 221 } | 
| 222 | 222 | 
| 223 | 223 | 
| 224 bool LCodeGen::GenerateBody() { | 224 bool LCodeGen::GenerateBody() { | 
| 225   ASSERT(is_generating()); | 225   ASSERT(is_generating()); | 
| 226   bool emit_instructions = true; | 226   bool emit_instructions = true; | 
| 227   for (current_instruction_ = 0; | 227   for (current_instruction_ = 0; | 
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 265   return !is_aborted(); | 265   return !is_aborted(); | 
| 266 } | 266 } | 
| 267 | 267 | 
| 268 | 268 | 
| 269 bool LCodeGen::GenerateDeferredCode() { | 269 bool LCodeGen::GenerateDeferredCode() { | 
| 270   ASSERT(is_generating()); | 270   ASSERT(is_generating()); | 
| 271   if (deferred_.length() > 0) { | 271   if (deferred_.length() > 0) { | 
| 272     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 272     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 
| 273       LDeferredCode* code = deferred_[i]; | 273       LDeferredCode* code = deferred_[i]; | 
| 274       __ bind(code->entry()); | 274       __ bind(code->entry()); | 
| 275       if (NeedsDeferredFrame()) { |  | 
| 276         Comment(";;; Deferred build frame", |  | 
| 277                 code->instruction_index(), |  | 
| 278                 code->instr()->Mnemonic()); |  | 
| 279         ASSERT(!frame_is_built_); |  | 
| 280         ASSERT(info()->IsStub()); |  | 
| 281         frame_is_built_ = true; |  | 
| 282         __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |  | 
| 283         __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |  | 
| 284         __ push(scratch0()); |  | 
| 285         __ add(fp, sp, Operand(2 * kPointerSize)); |  | 
| 286       } |  | 
| 287       Comment(";;; Deferred code @%d: %s.", | 275       Comment(";;; Deferred code @%d: %s.", | 
| 288               code->instruction_index(), | 276               code->instruction_index(), | 
| 289               code->instr()->Mnemonic()); | 277               code->instr()->Mnemonic()); | 
| 290       code->Generate(); | 278       code->Generate(); | 
| 291       if (NeedsDeferredFrame()) { |  | 
| 292         Comment(";;; Deferred destroy frame", |  | 
| 293                 code->instruction_index(), |  | 
| 294                 code->instr()->Mnemonic()); |  | 
| 295         ASSERT(frame_is_built_); |  | 
| 296         __ pop(ip); |  | 
| 297         __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); |  | 
| 298         frame_is_built_ = false; |  | 
| 299       } |  | 
| 300       __ jmp(code->exit()); | 279       __ jmp(code->exit()); | 
| 301     } | 280     } | 
| 302   } | 281   } | 
| 303 | 282 | 
| 304   // Force constant pool emission at the end of the deferred code to make | 283   // Force constant pool emission at the end of the deferred code to make | 
| 305   // sure that no constant pools are emitted after. | 284   // sure that no constant pools are emitted after. | 
| 306   masm()->CheckConstPool(true, false); | 285   masm()->CheckConstPool(true, false); | 
| 307 | 286 | 
| 308   return !is_aborted(); | 287   return !is_aborted(); | 
| 309 } | 288 } | 
| 310 | 289 | 
| 311 | 290 | 
| 312 bool LCodeGen::GenerateDeoptJumpTable() { | 291 bool LCodeGen::GenerateDeoptJumpTable() { | 
| 313   // Check that the jump table is accessible from everywhere in the function | 292   // Check that the jump table is accessible from everywhere in the function | 
| 314   // code, i.e. that offsets to the table can be encoded in the 24bit signed | 293   // code, i.e. that offsets to the table can be encoded in the 24bit signed | 
| 315   // immediate of a branch instruction. | 294   // immediate of a branch instruction. | 
| 316   // To simplify we consider the code size from the first instruction to the | 295   // To simplify we consider the code size from the first instruction to the | 
| 317   // end of the jump table. We also don't consider the pc load delta. | 296   // end of the jump table. We also don't consider the pc load delta. | 
| 318   // Each entry in the jump table generates one instruction and inlines one | 297   // Each entry in the jump table generates one instruction and inlines one | 
| 319   // 32bit data after it. | 298   // 32bit data after it. | 
| 320   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | 299   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | 
| 321       deopt_jump_table_.length() * 7)) { | 300       deopt_jump_table_.length() * 2)) { | 
| 322     Abort("Generated code is too large"); | 301     Abort("Generated code is too large"); | 
| 323   } | 302   } | 
| 324 | 303 | 
|  | 304   // Block the constant pool emission during the jump table emission. | 
|  | 305   __ BlockConstPoolFor(deopt_jump_table_.length()); | 
| 325   __ RecordComment("[ Deoptimisation jump table"); | 306   __ RecordComment("[ Deoptimisation jump table"); | 
| 326   Label table_start; | 307   Label table_start; | 
| 327   __ bind(&table_start); | 308   __ bind(&table_start); | 
| 328   Label needs_frame_not_call; |  | 
| 329   Label needs_frame_is_call; |  | 
| 330   for (int i = 0; i < deopt_jump_table_.length(); i++) { | 309   for (int i = 0; i < deopt_jump_table_.length(); i++) { | 
| 331     __ bind(&deopt_jump_table_[i].label); | 310     __ bind(&deopt_jump_table_[i].label); | 
| 332     Address entry = deopt_jump_table_[i].address; | 311     __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); | 
| 333     if (deopt_jump_table_[i].needs_frame) { | 312     __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); | 
| 334       __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); |  | 
| 335       if (deopt_jump_table_[i].is_lazy_deopt) { |  | 
| 336         if (needs_frame_is_call.is_bound()) { |  | 
| 337           __ b(&needs_frame_is_call); |  | 
| 338         } else { |  | 
| 339           __ bind(&needs_frame_is_call); |  | 
| 340           __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |  | 
| 341           // This variant of deopt can only be used with stubs. Since we don't |  | 
| 342           // have a function pointer to install in the stack frame that we're |  | 
| 343           // building, install a special marker there instead. |  | 
| 344           ASSERT(info()->IsStub()); |  | 
| 345           __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |  | 
| 346           __ push(scratch0()); |  | 
| 347           __ add(fp, sp, Operand(2 * kPointerSize)); |  | 
| 348           __ mov(lr, Operand(pc), LeaveCC, al); |  | 
| 349           __ mov(pc, ip); |  | 
| 350         } |  | 
| 351       } else { |  | 
| 352         if (needs_frame_not_call.is_bound()) { |  | 
| 353           __ b(&needs_frame_not_call); |  | 
| 354         } else { |  | 
| 355           __ bind(&needs_frame_not_call); |  | 
| 356           __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |  | 
| 357           // This variant of deopt can only be used with stubs. Since we don't |  | 
| 358           // have a function pointer to install in the stack frame that we're |  | 
| 359           // building, install a special marker there instead. |  | 
| 360           ASSERT(info()->IsStub()); |  | 
| 361           __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |  | 
| 362           __ push(scratch0()); |  | 
| 363           __ add(fp, sp, Operand(2 * kPointerSize)); |  | 
| 364           __ mov(pc, ip); |  | 
| 365         } |  | 
| 366       } |  | 
| 367     } else { |  | 
| 368       if (deopt_jump_table_[i].is_lazy_deopt) { |  | 
| 369         __ mov(lr, Operand(pc), LeaveCC, al); |  | 
| 370         __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); |  | 
| 371       } else { |  | 
| 372         __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); |  | 
| 373       } |  | 
| 374     } |  | 
| 375     masm()->CheckConstPool(false, false); |  | 
| 376   } | 313   } | 
|  | 314   ASSERT(masm()->InstructionsGeneratedSince(&table_start) == | 
|  | 315       deopt_jump_table_.length() * 2); | 
| 377   __ RecordComment("]"); | 316   __ RecordComment("]"); | 
| 378 | 317 | 
| 379   // Force constant pool emission at the end of the deopt jump table to make |  | 
| 380   // sure that no constant pools are emitted after. |  | 
| 381   masm()->CheckConstPool(true, false); |  | 
| 382 |  | 
| 383   // The deoptimization jump table is the last part of the instruction | 318   // The deoptimization jump table is the last part of the instruction | 
| 384   // sequence. Mark the generated code as done unless we bailed out. | 319   // sequence. Mark the generated code as done unless we bailed out. | 
| 385   if (!is_aborted()) status_ = DONE; | 320   if (!is_aborted()) status_ = DONE; | 
| 386   return !is_aborted(); | 321   return !is_aborted(); | 
| 387 } | 322 } | 
| 388 | 323 | 
| 389 | 324 | 
| 390 bool LCodeGen::GenerateSafepointTable() { | 325 bool LCodeGen::GenerateSafepointTable() { | 
| 391   ASSERT(is_done()); | 326   ASSERT(is_done()); | 
| 392   safepoints_.Emit(masm(), GetStackSlotCount()); | 327   safepoints_.Emit(masm(), GetStackSlotCount()); | 
| 393   return !is_aborted(); | 328   return !is_aborted(); | 
| 394 } | 329 } | 
| 395 | 330 | 
| 396 | 331 | 
| 397 Register LCodeGen::ToRegister(int index) const { | 332 Register LCodeGen::ToRegister(int index) const { | 
| 398   return Register::FromAllocationIndex(index); | 333   return Register::FromAllocationIndex(index); | 
| 399 } | 334 } | 
| 400 | 335 | 
| 401 | 336 | 
| 402 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { | 337 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { | 
| 403   return DwVfpRegister::FromAllocationIndex(index); | 338   return DoubleRegister::FromAllocationIndex(index); | 
| 404 } | 339 } | 
| 405 | 340 | 
| 406 | 341 | 
| 407 Register LCodeGen::ToRegister(LOperand* op) const { | 342 Register LCodeGen::ToRegister(LOperand* op) const { | 
| 408   ASSERT(op->IsRegister()); | 343   ASSERT(op->IsRegister()); | 
| 409   return ToRegister(op->index()); | 344   return ToRegister(op->index()); | 
| 410 } | 345 } | 
| 411 | 346 | 
| 412 | 347 | 
| 413 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 348 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 
| (...skipping 20 matching lines...) Expand all  Loading... | 
| 434     return scratch; | 369     return scratch; | 
| 435   } else if (op->IsStackSlot() || op->IsArgument()) { | 370   } else if (op->IsStackSlot() || op->IsArgument()) { | 
| 436     __ ldr(scratch, ToMemOperand(op)); | 371     __ ldr(scratch, ToMemOperand(op)); | 
| 437     return scratch; | 372     return scratch; | 
| 438   } | 373   } | 
| 439   UNREACHABLE(); | 374   UNREACHABLE(); | 
| 440   return scratch; | 375   return scratch; | 
| 441 } | 376 } | 
| 442 | 377 | 
| 443 | 378 | 
| 444 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 379 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 
| 445   ASSERT(op->IsDoubleRegister()); | 380   ASSERT(op->IsDoubleRegister()); | 
| 446   return ToDoubleRegister(op->index()); | 381   return ToDoubleRegister(op->index()); | 
| 447 } | 382 } | 
| 448 | 383 | 
| 449 | 384 | 
| 450 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 385 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 
| 451                                                SwVfpRegister flt_scratch, | 386                                                 SwVfpRegister flt_scratch, | 
| 452                                                DwVfpRegister dbl_scratch) { | 387                                                 DoubleRegister dbl_scratch) { | 
| 453   if (op->IsDoubleRegister()) { | 388   if (op->IsDoubleRegister()) { | 
| 454     return ToDoubleRegister(op->index()); | 389     return ToDoubleRegister(op->index()); | 
| 455   } else if (op->IsConstantOperand()) { | 390   } else if (op->IsConstantOperand()) { | 
| 456     LConstantOperand* const_op = LConstantOperand::cast(op); | 391     LConstantOperand* const_op = LConstantOperand::cast(op); | 
| 457     HConstant* constant = chunk_->LookupConstant(const_op); | 392     HConstant* constant = chunk_->LookupConstant(const_op); | 
| 458     Handle<Object> literal = constant->handle(); | 393     Handle<Object> literal = constant->handle(); | 
| 459     Representation r = chunk_->LookupLiteralRepresentation(const_op); | 394     Representation r = chunk_->LookupLiteralRepresentation(const_op); | 
| 460     if (r.IsInteger32()) { | 395     if (r.IsInteger32()) { | 
| 461       ASSERT(literal->IsNumber()); | 396       ASSERT(literal->IsNumber()); | 
| 462       __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); | 397       __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); | 
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 578   // arguments index points to the first element of a sequence of tagged | 513   // arguments index points to the first element of a sequence of tagged | 
| 579   // values on the stack that represent the arguments. This needs to be | 514   // values on the stack that represent the arguments. This needs to be | 
| 580   // kept in sync with the LArgumentsElements implementation. | 515   // kept in sync with the LArgumentsElements implementation. | 
| 581   *arguments_index = -environment->parameter_count(); | 516   *arguments_index = -environment->parameter_count(); | 
| 582   *arguments_count = environment->parameter_count(); | 517   *arguments_count = environment->parameter_count(); | 
| 583 | 518 | 
| 584   WriteTranslation(environment->outer(), | 519   WriteTranslation(environment->outer(), | 
| 585                    translation, | 520                    translation, | 
| 586                    arguments_index, | 521                    arguments_index, | 
| 587                    arguments_count); | 522                    arguments_count); | 
| 588   bool has_closure_id = !info()->closure().is_null() && | 523   int closure_id = *info()->closure() != *environment->closure() | 
| 589       *info()->closure() != *environment->closure(); |  | 
| 590   int closure_id = has_closure_id |  | 
| 591       ? DefineDeoptimizationLiteral(environment->closure()) | 524       ? DefineDeoptimizationLiteral(environment->closure()) | 
| 592       : Translation::kSelfLiteralId; | 525       : Translation::kSelfLiteralId; | 
| 593 | 526 | 
| 594   switch (environment->frame_type()) { | 527   switch (environment->frame_type()) { | 
| 595     case JS_FUNCTION: | 528     case JS_FUNCTION: | 
| 596       translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 529       translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 
| 597       break; | 530       break; | 
| 598     case JS_CONSTRUCT: | 531     case JS_CONSTRUCT: | 
| 599       translation->BeginConstructStubFrame(closure_id, translation_size); | 532       translation->BeginConstructStubFrame(closure_id, translation_size); | 
| 600       break; | 533       break; | 
| 601     case JS_GETTER: | 534     case JS_GETTER: | 
| 602       ASSERT(translation_size == 1); | 535       ASSERT(translation_size == 1); | 
| 603       ASSERT(height == 0); | 536       ASSERT(height == 0); | 
| 604       translation->BeginGetterStubFrame(closure_id); | 537       translation->BeginGetterStubFrame(closure_id); | 
| 605       break; | 538       break; | 
| 606     case JS_SETTER: | 539     case JS_SETTER: | 
| 607       ASSERT(translation_size == 2); | 540       ASSERT(translation_size == 2); | 
| 608       ASSERT(height == 0); | 541       ASSERT(height == 0); | 
| 609       translation->BeginSetterStubFrame(closure_id); | 542       translation->BeginSetterStubFrame(closure_id); | 
| 610       break; | 543       break; | 
| 611     case STUB: |  | 
| 612       translation->BeginCompiledStubFrame(); |  | 
| 613       break; |  | 
| 614     case ARGUMENTS_ADAPTOR: | 544     case ARGUMENTS_ADAPTOR: | 
| 615       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 545       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 
| 616       break; | 546       break; | 
| 617   } | 547   } | 
| 618 | 548 | 
| 619   // Inlined frames which push their arguments cause the index to be | 549   // Inlined frames which push their arguments cause the index to be | 
| 620   // bumped and a new stack area to be used for materialization. | 550   // bumped and a new stack area to be used for materialization. | 
| 621   if (environment->entry() != NULL && | 551   if (environment->entry() != NULL && | 
| 622       environment->entry()->arguments_pushed()) { | 552       environment->entry()->arguments_pushed()) { | 
| 623     *arguments_index = *arguments_index < 0 | 553     *arguments_index = *arguments_index < 0 | 
| (...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 799                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 729                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 
| 800     deoptimizations_.Add(environment, zone()); | 730     deoptimizations_.Add(environment, zone()); | 
| 801   } | 731   } | 
| 802 } | 732 } | 
| 803 | 733 | 
| 804 | 734 | 
| 805 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 735 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 
| 806   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 736   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 
| 807   ASSERT(environment->HasBeenRegistered()); | 737   ASSERT(environment->HasBeenRegistered()); | 
| 808   int id = environment->deoptimization_index(); | 738   int id = environment->deoptimization_index(); | 
| 809 | 739   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 
| 810   Deoptimizer::BailoutType bailout_type = info()->IsStub() |  | 
| 811       ? Deoptimizer::LAZY |  | 
| 812       : Deoptimizer::EAGER; |  | 
| 813   Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); |  | 
| 814   if (entry == NULL) { | 740   if (entry == NULL) { | 
| 815     Abort("bailout was not prepared"); | 741     Abort("bailout was not prepared"); | 
| 816     return; | 742     return; | 
| 817   } | 743   } | 
| 818 | 744 | 
| 819   ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM. | 745   ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM. | 
| 820 | 746 | 
| 821   if (FLAG_deopt_every_n_times == 1 && | 747   if (FLAG_deopt_every_n_times == 1 && | 
| 822       info_->shared_info()->opt_count() == id) { | 748       info_->shared_info()->opt_count() == id) { | 
| 823     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 749     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 
| 824     return; | 750     return; | 
| 825   } | 751   } | 
| 826 | 752 | 
| 827   if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); | 753   if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); | 
| 828 | 754 | 
| 829   bool needs_lazy_deopt = info()->IsStub(); | 755   if (cc == al) { | 
| 830   ASSERT(info()->IsStub() || frame_is_built_); |  | 
| 831   if (cc == al && !needs_lazy_deopt) { |  | 
| 832     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 756     __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 
| 833   } else { | 757   } else { | 
| 834     // We often have several deopts to the same entry, reuse the last | 758     // We often have several deopts to the same entry, reuse the last | 
| 835     // jump entry if this is the case. | 759     // jump entry if this is the case. | 
| 836     if (deopt_jump_table_.is_empty() || | 760     if (deopt_jump_table_.is_empty() || | 
| 837         (deopt_jump_table_.last().address != entry) || | 761         (deopt_jump_table_.last().address != entry)) { | 
| 838         (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || | 762       deopt_jump_table_.Add(JumpTableEntry(entry), zone()); | 
| 839         (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { |  | 
| 840       JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); |  | 
| 841       deopt_jump_table_.Add(table_entry, zone()); |  | 
| 842     } | 763     } | 
| 843     __ b(cc, &deopt_jump_table_.last().label); | 764     __ b(cc, &deopt_jump_table_.last().label); | 
| 844   } | 765   } | 
| 845 } | 766 } | 
| 846 | 767 | 
| 847 | 768 | 
| 848 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 769 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 
| 849   int length = deoptimizations_.length(); | 770   int length = deoptimizations_.length(); | 
| 850   if (length == 0) return; | 771   if (length == 0) return; | 
| 851   Handle<DeoptimizationInputData> data = | 772   Handle<DeoptimizationInputData> data = | 
| (...skipping 588 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1440   __ cmp(remainder, Operand(0)); | 1361   __ cmp(remainder, Operand(0)); | 
| 1441   __ teq(remainder, Operand(divisor), ne); | 1362   __ teq(remainder, Operand(divisor), ne); | 
| 1442   __ sub(result, result, Operand(1), LeaveCC, mi); | 1363   __ sub(result, result, Operand(1), LeaveCC, mi); | 
| 1443 } | 1364 } | 
| 1444 | 1365 | 
| 1445 | 1366 | 
| 1446 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, | 1367 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, | 
| 1447                                       LOperand* left_argument, | 1368                                       LOperand* left_argument, | 
| 1448                                       LOperand* right_argument, | 1369                                       LOperand* right_argument, | 
| 1449                                       Token::Value op) { | 1370                                       Token::Value op) { | 
| 1450   CpuFeatures::Scope vfp_scope(VFP2); |  | 
| 1451   Register left = ToRegister(left_argument); | 1371   Register left = ToRegister(left_argument); | 
| 1452   Register right = ToRegister(right_argument); | 1372   Register right = ToRegister(right_argument); | 
| 1453 | 1373 | 
| 1454   PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); | 1374   PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); | 
| 1455   // Move left to r1 and right to r0 for the stub call. | 1375   // Move left to r1 and right to r0 for the stub call. | 
| 1456   if (left.is(r1)) { | 1376   if (left.is(r1)) { | 
| 1457     __ Move(r0, right); | 1377     __ Move(r0, right); | 
| 1458   } else if (left.is(r0) && right.is(r1)) { | 1378   } else if (left.is(r0) && right.is(r1)) { | 
| 1459     __ Swap(r0, r1, r2); | 1379     __ Swap(r0, r1, r2); | 
| 1460   } else if (left.is(r0)) { | 1380   } else if (left.is(r0)) { | 
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1726 | 1646 | 
| 1727 void LCodeGen::DoConstantI(LConstantI* instr) { | 1647 void LCodeGen::DoConstantI(LConstantI* instr) { | 
| 1728   ASSERT(instr->result()->IsRegister()); | 1648   ASSERT(instr->result()->IsRegister()); | 
| 1729   __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1649   __ mov(ToRegister(instr->result()), Operand(instr->value())); | 
| 1730 } | 1650 } | 
| 1731 | 1651 | 
| 1732 | 1652 | 
| 1733 void LCodeGen::DoConstantD(LConstantD* instr) { | 1653 void LCodeGen::DoConstantD(LConstantD* instr) { | 
| 1734   ASSERT(instr->result()->IsDoubleRegister()); | 1654   ASSERT(instr->result()->IsDoubleRegister()); | 
| 1735   DwVfpRegister result = ToDoubleRegister(instr->result()); | 1655   DwVfpRegister result = ToDoubleRegister(instr->result()); | 
| 1736   CpuFeatures::Scope scope(VFP2); |  | 
| 1737   double v = instr->value(); | 1656   double v = instr->value(); | 
| 1738   __ Vmov(result, v, scratch0()); | 1657   __ Vmov(result, v, scratch0()); | 
| 1739 } | 1658 } | 
| 1740 | 1659 | 
| 1741 | 1660 | 
| 1742 void LCodeGen::DoConstantT(LConstantT* instr) { | 1661 void LCodeGen::DoConstantT(LConstantT* instr) { | 
| 1743   Handle<Object> value = instr->value(); | 1662   Handle<Object> value = instr->value(); | 
| 1744   if (value->IsSmi()) { | 1663   if (value->IsSmi()) { | 
| 1745     __ mov(ToRegister(instr->result()), Operand(value)); | 1664     __ mov(ToRegister(instr->result()), Operand(value)); | 
| 1746   } else { | 1665   } else { | 
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1904         ? ToOperand(right) | 1823         ? ToOperand(right) | 
| 1905         : Operand(EmitLoadRegister(right, ip)); | 1824         : Operand(EmitLoadRegister(right, ip)); | 
| 1906     Register result_reg = ToRegister(instr->result()); | 1825     Register result_reg = ToRegister(instr->result()); | 
| 1907     __ cmp(left_reg, right_op); | 1826     __ cmp(left_reg, right_op); | 
| 1908     if (!result_reg.is(left_reg)) { | 1827     if (!result_reg.is(left_reg)) { | 
| 1909       __ mov(result_reg, left_reg, LeaveCC, condition); | 1828       __ mov(result_reg, left_reg, LeaveCC, condition); | 
| 1910     } | 1829     } | 
| 1911     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); | 1830     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); | 
| 1912   } else { | 1831   } else { | 
| 1913     ASSERT(instr->hydrogen()->representation().IsDouble()); | 1832     ASSERT(instr->hydrogen()->representation().IsDouble()); | 
| 1914     CpuFeatures::Scope scope(VFP2); | 1833     DoubleRegister left_reg = ToDoubleRegister(left); | 
| 1915     DwVfpRegister left_reg = ToDoubleRegister(left); | 1834     DoubleRegister right_reg = ToDoubleRegister(right); | 
| 1916     DwVfpRegister right_reg = ToDoubleRegister(right); | 1835     DoubleRegister result_reg = ToDoubleRegister(instr->result()); | 
| 1917     DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |  | 
| 1918     Label check_nan_left, check_zero, return_left, return_right, done; | 1836     Label check_nan_left, check_zero, return_left, return_right, done; | 
| 1919     __ VFPCompareAndSetFlags(left_reg, right_reg); | 1837     __ VFPCompareAndSetFlags(left_reg, right_reg); | 
| 1920     __ b(vs, &check_nan_left); | 1838     __ b(vs, &check_nan_left); | 
| 1921     __ b(eq, &check_zero); | 1839     __ b(eq, &check_zero); | 
| 1922     __ b(condition, &return_left); | 1840     __ b(condition, &return_left); | 
| 1923     __ b(al, &return_right); | 1841     __ b(al, &return_right); | 
| 1924 | 1842 | 
| 1925     __ bind(&check_zero); | 1843     __ bind(&check_zero); | 
| 1926     __ VFPCompareAndSetFlags(left_reg, 0.0); | 1844     __ VFPCompareAndSetFlags(left_reg, 0.0); | 
| 1927     __ b(ne, &return_left);  // left == right != 0. | 1845     __ b(ne, &return_left);  // left == right != 0. | 
| (...skipping 22 matching lines...) Expand all  Loading... | 
| 1950     __ bind(&return_left); | 1868     __ bind(&return_left); | 
| 1951     if (!left_reg.is(result_reg)) { | 1869     if (!left_reg.is(result_reg)) { | 
| 1952       __ vmov(result_reg, left_reg); | 1870       __ vmov(result_reg, left_reg); | 
| 1953     } | 1871     } | 
| 1954     __ bind(&done); | 1872     __ bind(&done); | 
| 1955   } | 1873   } | 
| 1956 } | 1874 } | 
| 1957 | 1875 | 
| 1958 | 1876 | 
| 1959 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1877 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 
| 1960   CpuFeatures::Scope scope(VFP2); | 1878   DoubleRegister left = ToDoubleRegister(instr->left()); | 
| 1961   DwVfpRegister left = ToDoubleRegister(instr->left()); | 1879   DoubleRegister right = ToDoubleRegister(instr->right()); | 
| 1962   DwVfpRegister right = ToDoubleRegister(instr->right()); | 1880   DoubleRegister result = ToDoubleRegister(instr->result()); | 
| 1963   DwVfpRegister result = ToDoubleRegister(instr->result()); |  | 
| 1964   switch (instr->op()) { | 1881   switch (instr->op()) { | 
| 1965     case Token::ADD: | 1882     case Token::ADD: | 
| 1966       __ vadd(result, left, right); | 1883       __ vadd(result, left, right); | 
| 1967       break; | 1884       break; | 
| 1968     case Token::SUB: | 1885     case Token::SUB: | 
| 1969       __ vsub(result, left, right); | 1886       __ vsub(result, left, right); | 
| 1970       break; | 1887       break; | 
| 1971     case Token::MUL: | 1888     case Token::MUL: | 
| 1972       __ vmul(result, left, right); | 1889       __ vmul(result, left, right); | 
| 1973       break; | 1890       break; | 
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2041 void LCodeGen::DoBranch(LBranch* instr) { | 1958 void LCodeGen::DoBranch(LBranch* instr) { | 
| 2042   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1959   int true_block = chunk_->LookupDestination(instr->true_block_id()); | 
| 2043   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1960   int false_block = chunk_->LookupDestination(instr->false_block_id()); | 
| 2044 | 1961 | 
| 2045   Representation r = instr->hydrogen()->value()->representation(); | 1962   Representation r = instr->hydrogen()->value()->representation(); | 
| 2046   if (r.IsInteger32()) { | 1963   if (r.IsInteger32()) { | 
| 2047     Register reg = ToRegister(instr->value()); | 1964     Register reg = ToRegister(instr->value()); | 
| 2048     __ cmp(reg, Operand(0)); | 1965     __ cmp(reg, Operand(0)); | 
| 2049     EmitBranch(true_block, false_block, ne); | 1966     EmitBranch(true_block, false_block, ne); | 
| 2050   } else if (r.IsDouble()) { | 1967   } else if (r.IsDouble()) { | 
| 2051     CpuFeatures::Scope scope(VFP2); | 1968     DoubleRegister reg = ToDoubleRegister(instr->value()); | 
| 2052     DwVfpRegister reg = ToDoubleRegister(instr->value()); |  | 
| 2053     Register scratch = scratch0(); | 1969     Register scratch = scratch0(); | 
| 2054 | 1970 | 
| 2055     // Test the double value. Zero and NaN are false. | 1971     // Test the double value. Zero and NaN are false. | 
| 2056     __ VFPCompareAndLoadFlags(reg, 0.0, scratch); | 1972     __ VFPCompareAndLoadFlags(reg, 0.0, scratch); | 
| 2057     __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); | 1973     __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); | 
| 2058     EmitBranch(true_block, false_block, eq); | 1974     EmitBranch(true_block, false_block, eq); | 
| 2059   } else { | 1975   } else { | 
| 2060     ASSERT(r.IsTagged()); | 1976     ASSERT(r.IsTagged()); | 
| 2061     Register reg = ToRegister(instr->value()); | 1977     Register reg = ToRegister(instr->value()); | 
| 2062     HType type = instr->hydrogen()->value()->type(); | 1978     HType type = instr->hydrogen()->value()->type(); | 
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2127         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 2043         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 
| 2128         __ b(ge, ¬_string); | 2044         __ b(ge, ¬_string); | 
| 2129         __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); | 2045         __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); | 
| 2130         __ cmp(ip, Operand(0)); | 2046         __ cmp(ip, Operand(0)); | 
| 2131         __ b(ne, true_label); | 2047         __ b(ne, true_label); | 
| 2132         __ b(false_label); | 2048         __ b(false_label); | 
| 2133         __ bind(¬_string); | 2049         __ bind(¬_string); | 
| 2134       } | 2050       } | 
| 2135 | 2051 | 
| 2136       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2052       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 
| 2137         CpuFeatures::Scope scope(VFP2); |  | 
| 2138         // heap number -> false iff +0, -0, or NaN. | 2053         // heap number -> false iff +0, -0, or NaN. | 
| 2139         DwVfpRegister dbl_scratch = double_scratch0(); | 2054         DoubleRegister dbl_scratch = double_scratch0(); | 
| 2140         Label not_heap_number; | 2055         Label not_heap_number; | 
| 2141         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 2056         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 
| 2142         __ b(ne, ¬_heap_number); | 2057         __ b(ne, ¬_heap_number); | 
| 2143         __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 2058         __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 
| 2144         __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2059         __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 
| 2145         __ b(vs, false_label);  // NaN -> false. | 2060         __ b(vs, false_label);  // NaN -> false. | 
| 2146         __ b(eq, false_label);  // +0, -0 -> false. | 2061         __ b(eq, false_label);  // +0, -0 -> false. | 
| 2147         __ b(true_label); | 2062         __ b(true_label); | 
| 2148         __ bind(¬_heap_number); | 2063         __ bind(¬_heap_number); | 
| 2149       } | 2064       } | 
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2207   if (left->IsConstantOperand() && right->IsConstantOperand()) { | 2122   if (left->IsConstantOperand() && right->IsConstantOperand()) { | 
| 2208     // We can statically evaluate the comparison. | 2123     // We can statically evaluate the comparison. | 
| 2209     double left_val = ToDouble(LConstantOperand::cast(left)); | 2124     double left_val = ToDouble(LConstantOperand::cast(left)); | 
| 2210     double right_val = ToDouble(LConstantOperand::cast(right)); | 2125     double right_val = ToDouble(LConstantOperand::cast(right)); | 
| 2211     int next_block = | 2126     int next_block = | 
| 2212       EvalComparison(instr->op(), left_val, right_val) ? true_block | 2127       EvalComparison(instr->op(), left_val, right_val) ? true_block | 
| 2213                                                        : false_block; | 2128                                                        : false_block; | 
| 2214     EmitGoto(next_block); | 2129     EmitGoto(next_block); | 
| 2215   } else { | 2130   } else { | 
| 2216     if (instr->is_double()) { | 2131     if (instr->is_double()) { | 
| 2217       CpuFeatures::Scope scope(VFP2); |  | 
| 2218       // Compare left and right operands as doubles and load the | 2132       // Compare left and right operands as doubles and load the | 
| 2219       // resulting flags into the normal status register. | 2133       // resulting flags into the normal status register. | 
| 2220       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); | 2134       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); | 
| 2221       // If a NaN is involved, i.e. the result is unordered (V set), | 2135       // If a NaN is involved, i.e. the result is unordered (V set), | 
| 2222       // jump to false block label. | 2136       // jump to false block label. | 
| 2223       __ b(vs, chunk_->GetAssemblyLabel(false_block)); | 2137       __ b(vs, chunk_->GetAssemblyLabel(false_block)); | 
| 2224     } else { | 2138     } else { | 
| 2225       if (right->IsConstantOperand()) { | 2139       if (right->IsConstantOperand()) { | 
| 2226         __ cmp(ToRegister(left), | 2140         __ cmp(ToRegister(left), | 
| 2227                Operand(ToInteger32(LConstantOperand::cast(right)))); | 2141                Operand(ToInteger32(LConstantOperand::cast(right)))); | 
| (...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2746   __ LoadRoot(ToRegister(instr->result()), | 2660   __ LoadRoot(ToRegister(instr->result()), | 
| 2747               Heap::kTrueValueRootIndex, | 2661               Heap::kTrueValueRootIndex, | 
| 2748               condition); | 2662               condition); | 
| 2749   __ LoadRoot(ToRegister(instr->result()), | 2663   __ LoadRoot(ToRegister(instr->result()), | 
| 2750               Heap::kFalseValueRootIndex, | 2664               Heap::kFalseValueRootIndex, | 
| 2751               NegateCondition(condition)); | 2665               NegateCondition(condition)); | 
| 2752 } | 2666 } | 
| 2753 | 2667 | 
| 2754 | 2668 | 
| 2755 void LCodeGen::DoReturn(LReturn* instr) { | 2669 void LCodeGen::DoReturn(LReturn* instr) { | 
| 2756   if (FLAG_trace && info()->IsOptimizing()) { | 2670   if (FLAG_trace) { | 
| 2757     // Push the return value on the stack as the parameter. | 2671     // Push the return value on the stack as the parameter. | 
| 2758     // Runtime::TraceExit returns its parameter in r0. | 2672     // Runtime::TraceExit returns its parameter in r0. | 
| 2759     __ push(r0); | 2673     __ push(r0); | 
| 2760     __ CallRuntime(Runtime::kTraceExit, 1); | 2674     __ CallRuntime(Runtime::kTraceExit, 1); | 
| 2761   } | 2675   } | 
| 2762   if (NeedsEagerFrame()) { | 2676   int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 
| 2763     int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 2677   __ mov(sp, fp); | 
| 2764     __ mov(sp, fp); | 2678   __ ldm(ia_w, sp, fp.bit() | lr.bit()); | 
| 2765     __ ldm(ia_w, sp, fp.bit() | lr.bit()); | 2679   __ add(sp, sp, Operand(sp_delta)); | 
| 2766     __ add(sp, sp, Operand(sp_delta)); |  | 
| 2767   } |  | 
| 2768   if (info()->IsStub()) { |  | 
| 2769     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |  | 
| 2770   } |  | 
| 2771   __ Jump(lr); | 2680   __ Jump(lr); | 
| 2772 } | 2681 } | 
| 2773 | 2682 | 
| 2774 | 2683 | 
| 2775 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2684 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 
| 2776   Register result = ToRegister(instr->result()); | 2685   Register result = ToRegister(instr->result()); | 
| 2777   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 2686   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 
| 2778   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); | 2687   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); | 
| 2779   if (instr->hydrogen()->RequiresHoleCheck()) { | 2688   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 2780     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2689     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| (...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3110   } else { | 3019   } else { | 
| 3111     key = ToRegister(instr->key()); | 3020     key = ToRegister(instr->key()); | 
| 3112   } | 3021   } | 
| 3113   int element_size_shift = ElementsKindToShiftSize(elements_kind); | 3022   int element_size_shift = ElementsKindToShiftSize(elements_kind); | 
| 3114   int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) | 3023   int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) | 
| 3115       ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3024       ? (element_size_shift - kSmiTagSize) : element_size_shift; | 
| 3116   int additional_offset = instr->additional_index() << element_size_shift; | 3025   int additional_offset = instr->additional_index() << element_size_shift; | 
| 3117 | 3026 | 
| 3118   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 3027   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 
| 3119       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3028       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 
|  | 3029     CpuFeatures::Scope scope(VFP3); | 
| 3120     DwVfpRegister result = ToDoubleRegister(instr->result()); | 3030     DwVfpRegister result = ToDoubleRegister(instr->result()); | 
| 3121     Operand operand = key_is_constant | 3031     Operand operand = key_is_constant | 
| 3122         ? Operand(constant_key << element_size_shift) | 3032         ? Operand(constant_key << element_size_shift) | 
| 3123         : Operand(key, LSL, shift_size); | 3033         : Operand(key, LSL, shift_size); | 
| 3124     __ add(scratch0(), external_pointer, operand); | 3034     __ add(scratch0(), external_pointer, operand); | 
| 3125     if (CpuFeatures::IsSupported(VFP2)) { | 3035     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 
| 3126       CpuFeatures::Scope scope(VFP2); | 3036       __ vldr(result.low(), scratch0(), additional_offset); | 
| 3127       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3037       __ vcvt_f64_f32(result, result.low()); | 
| 3128         __ vldr(result.low(), scratch0(), additional_offset); | 3038     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | 
| 3129         __ vcvt_f64_f32(result, result.low()); | 3039       __ vldr(result, scratch0(), additional_offset); | 
| 3130       } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |  | 
| 3131         __ vldr(result, scratch0(), additional_offset); |  | 
| 3132       } |  | 
| 3133     } else { |  | 
| 3134       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |  | 
| 3135         Register value = external_pointer; |  | 
| 3136         __ ldr(value, MemOperand(scratch0(), additional_offset)); |  | 
| 3137         __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask)); |  | 
| 3138 |  | 
| 3139         __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits)); |  | 
| 3140         __ and_(scratch0(), scratch0(), |  | 
| 3141                 Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |  | 
| 3142 |  | 
| 3143         Label exponent_rebiased; |  | 
| 3144         __ teq(scratch0(), Operand(0x00)); |  | 
| 3145         __ b(eq, &exponent_rebiased); |  | 
| 3146 |  | 
| 3147         __ teq(scratch0(), Operand(0xff)); |  | 
| 3148         __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq); |  | 
| 3149         __ b(eq, &exponent_rebiased); |  | 
| 3150 |  | 
| 3151         // Rebias exponent. |  | 
| 3152         __ add(scratch0(), |  | 
| 3153                scratch0(), |  | 
| 3154                Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |  | 
| 3155 |  | 
| 3156         __ bind(&exponent_rebiased); |  | 
| 3157         __ and_(sfpd_hi, value, Operand(kBinary32SignMask)); |  | 
| 3158         __ orr(sfpd_hi, sfpd_hi, |  | 
| 3159                Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord)); |  | 
| 3160 |  | 
| 3161         // Shift mantissa. |  | 
| 3162         static const int kMantissaShiftForHiWord = |  | 
| 3163             kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |  | 
| 3164 |  | 
| 3165         static const int kMantissaShiftForLoWord = |  | 
| 3166             kBitsPerInt - kMantissaShiftForHiWord; |  | 
| 3167 |  | 
| 3168         __ orr(sfpd_hi, sfpd_hi, |  | 
| 3169                Operand(sfpd_lo, LSR, kMantissaShiftForHiWord)); |  | 
| 3170         __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord)); |  | 
| 3171 |  | 
| 3172       } else { |  | 
| 3173         __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset)); |  | 
| 3174         __ ldr(sfpd_hi, MemOperand(scratch0(), |  | 
| 3175                                    additional_offset + kPointerSize)); |  | 
| 3176       } |  | 
| 3177     } | 3040     } | 
| 3178   } else { | 3041   } else { | 
| 3179     Register result = ToRegister(instr->result()); | 3042     Register result = ToRegister(instr->result()); | 
| 3180     MemOperand mem_operand = PrepareKeyedOperand( | 3043     MemOperand mem_operand = PrepareKeyedOperand( | 
| 3181         key, external_pointer, key_is_constant, constant_key, | 3044         key, external_pointer, key_is_constant, constant_key, | 
| 3182         element_size_shift, shift_size, | 3045         element_size_shift, shift_size, | 
| 3183         instr->additional_index(), additional_offset); | 3046         instr->additional_index(), additional_offset); | 
| 3184     switch (elements_kind) { | 3047     switch (elements_kind) { | 
| 3185       case EXTERNAL_BYTE_ELEMENTS: | 3048       case EXTERNAL_BYTE_ELEMENTS: | 
| 3186         __ ldrsb(result, mem_operand); | 3049         __ ldrsb(result, mem_operand); | 
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3235   int constant_key = 0; | 3098   int constant_key = 0; | 
| 3236   if (key_is_constant) { | 3099   if (key_is_constant) { | 
| 3237     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 3100     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
| 3238     if (constant_key & 0xF0000000) { | 3101     if (constant_key & 0xF0000000) { | 
| 3239       Abort("array index constant value too big."); | 3102       Abort("array index constant value too big."); | 
| 3240     } | 3103     } | 
| 3241   } else { | 3104   } else { | 
| 3242     key = ToRegister(instr->key()); | 3105     key = ToRegister(instr->key()); | 
| 3243   } | 3106   } | 
| 3244 | 3107 | 
| 3245   int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + | 3108   Operand operand = key_is_constant | 
| 3246       ((constant_key + instr->additional_index()) << element_size_shift); | 3109       ? Operand(((constant_key + instr->additional_index()) << | 
|  | 3110                  element_size_shift) + | 
|  | 3111                 FixedDoubleArray::kHeaderSize - kHeapObjectTag) | 
|  | 3112       : Operand(key, LSL, shift_size); | 
|  | 3113   __ add(elements, elements, operand); | 
| 3247   if (!key_is_constant) { | 3114   if (!key_is_constant) { | 
| 3248     __ add(elements, elements, Operand(key, LSL, shift_size)); | 3115     __ add(elements, elements, | 
|  | 3116            Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + | 
|  | 3117                    (instr->additional_index() << element_size_shift))); | 
| 3249   } | 3118   } | 
| 3250   if (CpuFeatures::IsSupported(VFP2)) { | 3119 | 
| 3251     CpuFeatures::Scope scope(VFP2); | 3120   __ vldr(result, elements, 0); | 
| 3252     __ add(elements, elements, Operand(base_offset)); | 3121   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3253     __ vldr(result, elements, 0); | 3122     __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); | 
| 3254     if (instr->hydrogen()->RequiresHoleCheck()) { | 3123     __ cmp(scratch, Operand(kHoleNanUpper32)); | 
| 3255       __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); | 3124     DeoptimizeIf(eq, instr->environment()); | 
| 3256       __ cmp(scratch, Operand(kHoleNanUpper32)); |  | 
| 3257       DeoptimizeIf(eq, instr->environment()); |  | 
| 3258     } |  | 
| 3259   } else { |  | 
| 3260       __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); |  | 
| 3261       __ ldr(sfpd_lo, MemOperand(elements, base_offset)); |  | 
| 3262     if (instr->hydrogen()->RequiresHoleCheck()) { |  | 
| 3263       ASSERT(kPointerSize == sizeof(kHoleNanLower32)); |  | 
| 3264       __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); |  | 
| 3265       DeoptimizeIf(eq, instr->environment()); |  | 
| 3266     } |  | 
| 3267   } | 3125   } | 
| 3268 } | 3126 } | 
| 3269 | 3127 | 
| 3270 | 3128 | 
| 3271 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3129 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 
| 3272   Register elements = ToRegister(instr->elements()); | 3130   Register elements = ToRegister(instr->elements()); | 
| 3273   Register result = ToRegister(instr->result()); | 3131   Register result = ToRegister(instr->result()); | 
| 3274   Register scratch = scratch0(); | 3132   Register scratch = scratch0(); | 
| 3275   Register store_base = scratch; | 3133   Register store_base = scratch; | 
| 3276   int offset = 0; | 3134   int offset = 0; | 
| (...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3692   // We can make rsb conditional because the previous cmp instruction | 3550   // We can make rsb conditional because the previous cmp instruction | 
| 3693   // will clear the V (overflow) flag and rsb won't set this flag | 3551   // will clear the V (overflow) flag and rsb won't set this flag | 
| 3694   // if input is positive. | 3552   // if input is positive. | 
| 3695   __ rsb(result, input, Operand(0), SetCC, mi); | 3553   __ rsb(result, input, Operand(0), SetCC, mi); | 
| 3696   // Deoptimize on overflow. | 3554   // Deoptimize on overflow. | 
| 3697   DeoptimizeIf(vs, instr->environment()); | 3555   DeoptimizeIf(vs, instr->environment()); | 
| 3698 } | 3556 } | 
| 3699 | 3557 | 
| 3700 | 3558 | 
| 3701 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 3559 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 
| 3702   CpuFeatures::Scope scope(VFP2); |  | 
| 3703   // Class for deferred case. | 3560   // Class for deferred case. | 
| 3704   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 3561   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 
| 3705    public: | 3562    public: | 
| 3706     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3563     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 
| 3707                                     LUnaryMathOperation* instr) | 3564                                     LUnaryMathOperation* instr) | 
| 3708         : LDeferredCode(codegen), instr_(instr) { } | 3565         : LDeferredCode(codegen), instr_(instr) { } | 
| 3709     virtual void Generate() { | 3566     virtual void Generate() { | 
| 3710       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3567       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 
| 3711     } | 3568     } | 
| 3712     virtual LInstruction* instr() { return instr_; } | 3569     virtual LInstruction* instr() { return instr_; } | 
| (...skipping 16 matching lines...) Expand all  Loading... | 
| 3729     // Smi check. | 3586     // Smi check. | 
| 3730     __ JumpIfNotSmi(input, deferred->entry()); | 3587     __ JumpIfNotSmi(input, deferred->entry()); | 
| 3731     // If smi, handle it directly. | 3588     // If smi, handle it directly. | 
| 3732     EmitIntegerMathAbs(instr); | 3589     EmitIntegerMathAbs(instr); | 
| 3733     __ bind(deferred->exit()); | 3590     __ bind(deferred->exit()); | 
| 3734   } | 3591   } | 
| 3735 } | 3592 } | 
| 3736 | 3593 | 
| 3737 | 3594 | 
| 3738 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 3595 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 
| 3739   CpuFeatures::Scope scope(VFP2); | 3596   DoubleRegister input = ToDoubleRegister(instr->value()); | 
| 3740   DwVfpRegister input = ToDoubleRegister(instr->value()); |  | 
| 3741   Register result = ToRegister(instr->result()); | 3597   Register result = ToRegister(instr->result()); | 
| 3742   Register scratch = scratch0(); | 3598   Register scratch = scratch0(); | 
| 3743 | 3599 | 
| 3744   __ EmitVFPTruncate(kRoundToMinusInf, | 3600   __ EmitVFPTruncate(kRoundToMinusInf, | 
| 3745                      result, | 3601                      result, | 
| 3746                      input, | 3602                      input, | 
| 3747                      scratch, | 3603                      scratch, | 
| 3748                      double_scratch0()); | 3604                      double_scratch0()); | 
| 3749   DeoptimizeIf(ne, instr->environment()); | 3605   DeoptimizeIf(ne, instr->environment()); | 
| 3750 | 3606 | 
| 3751   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3607   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 3752     // Test for -0. | 3608     // Test for -0. | 
| 3753     Label done; | 3609     Label done; | 
| 3754     __ cmp(result, Operand(0)); | 3610     __ cmp(result, Operand(0)); | 
| 3755     __ b(ne, &done); | 3611     __ b(ne, &done); | 
| 3756     __ vmov(scratch, input.high()); | 3612     __ vmov(scratch, input.high()); | 
| 3757     __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3613     __ tst(scratch, Operand(HeapNumber::kSignMask)); | 
| 3758     DeoptimizeIf(ne, instr->environment()); | 3614     DeoptimizeIf(ne, instr->environment()); | 
| 3759     __ bind(&done); | 3615     __ bind(&done); | 
| 3760   } | 3616   } | 
| 3761 } | 3617 } | 
| 3762 | 3618 | 
| 3763 | 3619 | 
| 3764 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3620 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 
| 3765   CpuFeatures::Scope scope(VFP2); | 3621   DoubleRegister input = ToDoubleRegister(instr->value()); | 
| 3766   DwVfpRegister input = ToDoubleRegister(instr->value()); |  | 
| 3767   Register result = ToRegister(instr->result()); | 3622   Register result = ToRegister(instr->result()); | 
| 3768   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3623   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 
| 3769   Register scratch = scratch0(); | 3624   Register scratch = scratch0(); | 
| 3770   Label done, check_sign_on_zero; | 3625   Label done, check_sign_on_zero; | 
| 3771 | 3626 | 
| 3772   // Extract exponent bits. | 3627   // Extract exponent bits. | 
| 3773   __ vmov(result, input.high()); | 3628   __ vmov(result, input.high()); | 
| 3774   __ ubfx(scratch, | 3629   __ ubfx(scratch, | 
| 3775           result, | 3630           result, | 
| 3776           HeapNumber::kExponentShift, | 3631           HeapNumber::kExponentShift, | 
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3821     __ bind(&check_sign_on_zero); | 3676     __ bind(&check_sign_on_zero); | 
| 3822     __ vmov(scratch, input.high()); | 3677     __ vmov(scratch, input.high()); | 
| 3823     __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3678     __ tst(scratch, Operand(HeapNumber::kSignMask)); | 
| 3824     DeoptimizeIf(ne, instr->environment()); | 3679     DeoptimizeIf(ne, instr->environment()); | 
| 3825   } | 3680   } | 
| 3826   __ bind(&done); | 3681   __ bind(&done); | 
| 3827 } | 3682 } | 
| 3828 | 3683 | 
| 3829 | 3684 | 
| 3830 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 3685 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 
| 3831   CpuFeatures::Scope scope(VFP2); | 3686   DoubleRegister input = ToDoubleRegister(instr->value()); | 
| 3832   DwVfpRegister input = ToDoubleRegister(instr->value()); | 3687   DoubleRegister result = ToDoubleRegister(instr->result()); | 
| 3833   DwVfpRegister result = ToDoubleRegister(instr->result()); |  | 
| 3834   __ vsqrt(result, input); | 3688   __ vsqrt(result, input); | 
| 3835 } | 3689 } | 
| 3836 | 3690 | 
| 3837 | 3691 | 
| 3838 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 3692 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 
| 3839   CpuFeatures::Scope scope(VFP2); | 3693   DoubleRegister input = ToDoubleRegister(instr->value()); | 
| 3840   DwVfpRegister input = ToDoubleRegister(instr->value()); | 3694   DoubleRegister result = ToDoubleRegister(instr->result()); | 
| 3841   DwVfpRegister result = ToDoubleRegister(instr->result()); | 3695   DoubleRegister temp = ToDoubleRegister(instr->temp()); | 
| 3842   DwVfpRegister temp = ToDoubleRegister(instr->temp()); |  | 
| 3843 | 3696 | 
| 3844   // Note that according to ECMA-262 15.8.2.13: | 3697   // Note that according to ECMA-262 15.8.2.13: | 
| 3845   // Math.pow(-Infinity, 0.5) == Infinity | 3698   // Math.pow(-Infinity, 0.5) == Infinity | 
| 3846   // Math.sqrt(-Infinity) == NaN | 3699   // Math.sqrt(-Infinity) == NaN | 
| 3847   Label done; | 3700   Label done; | 
| 3848   __ vmov(temp, -V8_INFINITY, scratch0()); | 3701   __ vmov(temp, -V8_INFINITY, scratch0()); | 
| 3849   __ VFPCompareAndSetFlags(input, temp); | 3702   __ VFPCompareAndSetFlags(input, temp); | 
| 3850   __ vneg(result, temp, eq); | 3703   __ vneg(result, temp, eq); | 
| 3851   __ b(&done, eq); | 3704   __ b(&done, eq); | 
| 3852 | 3705 | 
| 3853   // Add +0 to convert -0 to +0. | 3706   // Add +0 to convert -0 to +0. | 
| 3854   __ vadd(result, input, kDoubleRegZero); | 3707   __ vadd(result, input, kDoubleRegZero); | 
| 3855   __ vsqrt(result, result); | 3708   __ vsqrt(result, result); | 
| 3856   __ bind(&done); | 3709   __ bind(&done); | 
| 3857 } | 3710 } | 
| 3858 | 3711 | 
| 3859 | 3712 | 
| 3860 void LCodeGen::DoPower(LPower* instr) { | 3713 void LCodeGen::DoPower(LPower* instr) { | 
| 3861   CpuFeatures::Scope scope(VFP2); |  | 
| 3862   Representation exponent_type = instr->hydrogen()->right()->representation(); | 3714   Representation exponent_type = instr->hydrogen()->right()->representation(); | 
| 3863   // Having marked this as a call, we can use any registers. | 3715   // Having marked this as a call, we can use any registers. | 
| 3864   // Just make sure that the input/output registers are the expected ones. | 3716   // Just make sure that the input/output registers are the expected ones. | 
| 3865   ASSERT(!instr->right()->IsDoubleRegister() || | 3717   ASSERT(!instr->right()->IsDoubleRegister() || | 
| 3866          ToDoubleRegister(instr->right()).is(d2)); | 3718          ToDoubleRegister(instr->right()).is(d2)); | 
| 3867   ASSERT(!instr->right()->IsRegister() || | 3719   ASSERT(!instr->right()->IsRegister() || | 
| 3868          ToRegister(instr->right()).is(r2)); | 3720          ToRegister(instr->right()).is(r2)); | 
| 3869   ASSERT(ToDoubleRegister(instr->left()).is(d1)); | 3721   ASSERT(ToDoubleRegister(instr->left()).is(d1)); | 
| 3870   ASSERT(ToDoubleRegister(instr->result()).is(d3)); | 3722   ASSERT(ToDoubleRegister(instr->result()).is(d3)); | 
| 3871 | 3723 | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
| 3884     __ CallStub(&stub); | 3736     __ CallStub(&stub); | 
| 3885   } else { | 3737   } else { | 
| 3886     ASSERT(exponent_type.IsDouble()); | 3738     ASSERT(exponent_type.IsDouble()); | 
| 3887     MathPowStub stub(MathPowStub::DOUBLE); | 3739     MathPowStub stub(MathPowStub::DOUBLE); | 
| 3888     __ CallStub(&stub); | 3740     __ CallStub(&stub); | 
| 3889   } | 3741   } | 
| 3890 } | 3742 } | 
| 3891 | 3743 | 
| 3892 | 3744 | 
| 3893 void LCodeGen::DoRandom(LRandom* instr) { | 3745 void LCodeGen::DoRandom(LRandom* instr) { | 
| 3894   CpuFeatures::Scope scope(VFP2); |  | 
| 3895   class DeferredDoRandom: public LDeferredCode { | 3746   class DeferredDoRandom: public LDeferredCode { | 
| 3896    public: | 3747    public: | 
| 3897     DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 3748     DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 
| 3898         : LDeferredCode(codegen), instr_(instr) { } | 3749         : LDeferredCode(codegen), instr_(instr) { } | 
| 3899     virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 3750     virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 
| 3900     virtual LInstruction* instr() { return instr_; } | 3751     virtual LInstruction* instr() { return instr_; } | 
| 3901    private: | 3752    private: | 
| 3902     LRandom* instr_; | 3753     LRandom* instr_; | 
| 3903   }; | 3754   }; | 
| 3904 | 3755 | 
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3963 | 3814 | 
| 3964 | 3815 | 
| 3965 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 3816 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 
| 3966   __ PrepareCallCFunction(1, scratch0()); | 3817   __ PrepareCallCFunction(1, scratch0()); | 
| 3967   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 3818   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 
| 3968   // Return value is in r0. | 3819   // Return value is in r0. | 
| 3969 } | 3820 } | 
| 3970 | 3821 | 
| 3971 | 3822 | 
| 3972 void LCodeGen::DoMathExp(LMathExp* instr) { | 3823 void LCodeGen::DoMathExp(LMathExp* instr) { | 
| 3973   CpuFeatures::Scope scope(VFP2); | 3824   DoubleRegister input = ToDoubleRegister(instr->value()); | 
| 3974   DwVfpRegister input = ToDoubleRegister(instr->value()); | 3825   DoubleRegister result = ToDoubleRegister(instr->result()); | 
| 3975   DwVfpRegister result = ToDoubleRegister(instr->result()); | 3826   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | 
| 3976   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | 3827   DoubleRegister double_scratch2 = double_scratch0(); | 
| 3977   DwVfpRegister double_scratch2 = double_scratch0(); |  | 
| 3978   Register temp1 = ToRegister(instr->temp1()); | 3828   Register temp1 = ToRegister(instr->temp1()); | 
| 3979   Register temp2 = ToRegister(instr->temp2()); | 3829   Register temp2 = ToRegister(instr->temp2()); | 
| 3980 | 3830 | 
| 3981   MathExpGenerator::EmitMathExp( | 3831   MathExpGenerator::EmitMathExp( | 
| 3982       masm(), input, result, double_scratch1, double_scratch2, | 3832       masm(), input, result, double_scratch1, double_scratch2, | 
| 3983       temp1, temp2, scratch0()); | 3833       temp1, temp2, scratch0()); | 
| 3984 } | 3834 } | 
| 3985 | 3835 | 
| 3986 | 3836 | 
| 3987 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 3837 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4253     } | 4103     } | 
| 4254     __ cmp(ip, ToRegister(instr->length())); | 4104     __ cmp(ip, ToRegister(instr->length())); | 
| 4255   } else { | 4105   } else { | 
| 4256     __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); | 4106     __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); | 
| 4257   } | 4107   } | 
| 4258   DeoptimizeIf(hs, instr->environment()); | 4108   DeoptimizeIf(hs, instr->environment()); | 
| 4259 } | 4109 } | 
| 4260 | 4110 | 
| 4261 | 4111 | 
| 4262 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4112 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 
| 4263   CpuFeatures::Scope scope(VFP2); |  | 
| 4264   Register external_pointer = ToRegister(instr->elements()); | 4113   Register external_pointer = ToRegister(instr->elements()); | 
| 4265   Register key = no_reg; | 4114   Register key = no_reg; | 
| 4266   ElementsKind elements_kind = instr->elements_kind(); | 4115   ElementsKind elements_kind = instr->elements_kind(); | 
| 4267   bool key_is_constant = instr->key()->IsConstantOperand(); | 4116   bool key_is_constant = instr->key()->IsConstantOperand(); | 
| 4268   int constant_key = 0; | 4117   int constant_key = 0; | 
| 4269   if (key_is_constant) { | 4118   if (key_is_constant) { | 
| 4270     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 4119     constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 
| 4271     if (constant_key & 0xF0000000) { | 4120     if (constant_key & 0xF0000000) { | 
| 4272       Abort("array index constant value too big."); | 4121       Abort("array index constant value too big."); | 
| 4273     } | 4122     } | 
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4324       case DICTIONARY_ELEMENTS: | 4173       case DICTIONARY_ELEMENTS: | 
| 4325       case NON_STRICT_ARGUMENTS_ELEMENTS: | 4174       case NON_STRICT_ARGUMENTS_ELEMENTS: | 
| 4326         UNREACHABLE(); | 4175         UNREACHABLE(); | 
| 4327         break; | 4176         break; | 
| 4328     } | 4177     } | 
| 4329   } | 4178   } | 
| 4330 } | 4179 } | 
| 4331 | 4180 | 
| 4332 | 4181 | 
| 4333 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4182 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 
| 4334   CpuFeatures::Scope scope(VFP2); |  | 
| 4335   DwVfpRegister value = ToDoubleRegister(instr->value()); | 4183   DwVfpRegister value = ToDoubleRegister(instr->value()); | 
| 4336   Register elements = ToRegister(instr->elements()); | 4184   Register elements = ToRegister(instr->elements()); | 
| 4337   Register key = no_reg; | 4185   Register key = no_reg; | 
| 4338   Register scratch = scratch0(); | 4186   Register scratch = scratch0(); | 
| 4339   bool key_is_constant = instr->key()->IsConstantOperand(); | 4187   bool key_is_constant = instr->key()->IsConstantOperand(); | 
| 4340   int constant_key = 0; | 4188   int constant_key = 0; | 
| 4341 | 4189 | 
| 4342   // Calculate the effective address of the slot in the array to store the | 4190   // Calculate the effective address of the slot in the array to store the | 
| 4343   // double value. | 4191   // double value. | 
| 4344   if (key_is_constant) { | 4192   if (key_is_constant) { | 
| (...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4601 | 4449 | 
| 4602 | 4450 | 
| 4603 void LCodeGen::DoStringLength(LStringLength* instr) { | 4451 void LCodeGen::DoStringLength(LStringLength* instr) { | 
| 4604   Register string = ToRegister(instr->string()); | 4452   Register string = ToRegister(instr->string()); | 
| 4605   Register result = ToRegister(instr->result()); | 4453   Register result = ToRegister(instr->result()); | 
| 4606   __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); | 4454   __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); | 
| 4607 } | 4455 } | 
| 4608 | 4456 | 
| 4609 | 4457 | 
| 4610 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4458 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 
| 4611   CpuFeatures::Scope scope(VFP2); |  | 
| 4612   LOperand* input = instr->value(); | 4459   LOperand* input = instr->value(); | 
| 4613   ASSERT(input->IsRegister() || input->IsStackSlot()); | 4460   ASSERT(input->IsRegister() || input->IsStackSlot()); | 
| 4614   LOperand* output = instr->result(); | 4461   LOperand* output = instr->result(); | 
| 4615   ASSERT(output->IsDoubleRegister()); | 4462   ASSERT(output->IsDoubleRegister()); | 
| 4616   SwVfpRegister single_scratch = double_scratch0().low(); | 4463   SwVfpRegister single_scratch = double_scratch0().low(); | 
| 4617   if (input->IsStackSlot()) { | 4464   if (input->IsStackSlot()) { | 
| 4618     Register scratch = scratch0(); | 4465     Register scratch = scratch0(); | 
| 4619     __ ldr(scratch, ToMemOperand(input)); | 4466     __ ldr(scratch, ToMemOperand(input)); | 
| 4620     __ vmov(single_scratch, scratch); | 4467     __ vmov(single_scratch, scratch); | 
| 4621   } else { | 4468   } else { | 
| 4622     __ vmov(single_scratch, ToRegister(input)); | 4469     __ vmov(single_scratch, ToRegister(input)); | 
| 4623   } | 4470   } | 
| 4624   __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); | 4471   __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); | 
| 4625 } | 4472 } | 
| 4626 | 4473 | 
| 4627 | 4474 | 
| 4628 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4475 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 
| 4629   CpuFeatures::Scope scope(VFP2); |  | 
| 4630   LOperand* input = instr->value(); | 4476   LOperand* input = instr->value(); | 
| 4631   LOperand* output = instr->result(); | 4477   LOperand* output = instr->result(); | 
| 4632 | 4478 | 
| 4633   SwVfpRegister flt_scratch = double_scratch0().low(); | 4479   SwVfpRegister flt_scratch = double_scratch0().low(); | 
| 4634   __ vmov(flt_scratch, ToRegister(input)); | 4480   __ vmov(flt_scratch, ToRegister(input)); | 
| 4635   __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); | 4481   __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); | 
| 4636 } | 4482 } | 
| 4637 | 4483 | 
| 4638 | 4484 | 
| 4639 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4485 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4681   Register reg = ToRegister(input); | 4527   Register reg = ToRegister(input); | 
| 4682 | 4528 | 
| 4683   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); | 4529   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); | 
| 4684   __ cmp(reg, Operand(Smi::kMaxValue)); | 4530   __ cmp(reg, Operand(Smi::kMaxValue)); | 
| 4685   __ b(hi, deferred->entry()); | 4531   __ b(hi, deferred->entry()); | 
| 4686   __ SmiTag(reg, reg); | 4532   __ SmiTag(reg, reg); | 
| 4687   __ bind(deferred->exit()); | 4533   __ bind(deferred->exit()); | 
| 4688 } | 4534 } | 
| 4689 | 4535 | 
| 4690 | 4536 | 
| 4691 // Convert unsigned integer with specified number of leading zeroes in binary |  | 
| 4692 // representation to IEEE 754 double. |  | 
| 4693 // Integer to convert is passed in register hiword. |  | 
| 4694 // Resulting double is returned in registers hiword:loword. |  | 
| 4695 // This functions does not work correctly for 0. |  | 
| 4696 static void GenerateUInt2Double(MacroAssembler* masm, |  | 
| 4697                                 Register hiword, |  | 
| 4698                                 Register loword, |  | 
| 4699                                 Register scratch, |  | 
| 4700                                 int leading_zeroes) { |  | 
| 4701   const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |  | 
| 4702   const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |  | 
| 4703 |  | 
| 4704   const int mantissa_shift_for_hi_word = |  | 
| 4705       meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |  | 
| 4706   const int mantissa_shift_for_lo_word = |  | 
| 4707       kBitsPerInt - mantissa_shift_for_hi_word; |  | 
| 4708   masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |  | 
| 4709   if (mantissa_shift_for_hi_word > 0) { |  | 
| 4710     masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); |  | 
| 4711     masm->orr(hiword, scratch, |  | 
| 4712               Operand(hiword, LSR, mantissa_shift_for_hi_word)); |  | 
| 4713   } else { |  | 
| 4714     masm->mov(loword, Operand(0, RelocInfo::NONE)); |  | 
| 4715     masm->orr(hiword, scratch, |  | 
| 4716               Operand(hiword, LSL, -mantissa_shift_for_hi_word)); |  | 
| 4717   } |  | 
| 4718 |  | 
| 4719   // If least significant bit of biased exponent was not 1 it was corrupted |  | 
| 4720   // by most significant bit of mantissa so we should fix that. |  | 
| 4721   if (!(biased_exponent & 1)) { |  | 
| 4722     masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |  | 
| 4723   } |  | 
| 4724 } |  | 
| 4725 |  | 
| 4726 |  | 
| 4727 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, | 4537 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, | 
| 4728                                     LOperand* value, | 4538                                     LOperand* value, | 
| 4729                                     IntegerSignedness signedness) { | 4539                                     IntegerSignedness signedness) { | 
| 4730   Label slow; | 4540   Label slow; | 
| 4731   Register src = ToRegister(value); | 4541   Register src = ToRegister(value); | 
| 4732   Register dst = ToRegister(instr->result()); | 4542   Register dst = ToRegister(instr->result()); | 
| 4733   DwVfpRegister dbl_scratch = double_scratch0(); | 4543   DoubleRegister dbl_scratch = double_scratch0(); | 
| 4734   SwVfpRegister flt_scratch = dbl_scratch.low(); | 4544   SwVfpRegister flt_scratch = dbl_scratch.low(); | 
| 4735 | 4545 | 
| 4736   // Preserve the value of all registers. | 4546   // Preserve the value of all registers. | 
| 4737   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4547   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 
| 4738 | 4548 | 
| 4739   Label done; | 4549   Label done; | 
| 4740   if (signedness == SIGNED_INT32) { | 4550   if (signedness == SIGNED_INT32) { | 
| 4741     // There was overflow, so bits 30 and 31 of the original integer | 4551     // There was overflow, so bits 30 and 31 of the original integer | 
| 4742     // disagree. Try to allocate a heap number in new space and store | 4552     // disagree. Try to allocate a heap number in new space and store | 
| 4743     // the value in there. If that fails, call the runtime system. | 4553     // the value in there. If that fails, call the runtime system. | 
| 4744     if (dst.is(src)) { | 4554     if (dst.is(src)) { | 
| 4745       __ SmiUntag(src, dst); | 4555       __ SmiUntag(src, dst); | 
| 4746       __ eor(src, src, Operand(0x80000000)); | 4556       __ eor(src, src, Operand(0x80000000)); | 
| 4747     } | 4557     } | 
| 4748     if (CpuFeatures::IsSupported(VFP2)) { | 4558     __ vmov(flt_scratch, src); | 
| 4749       CpuFeatures::Scope scope(VFP2); | 4559     __ vcvt_f64_s32(dbl_scratch, flt_scratch); | 
| 4750       __ vmov(flt_scratch, src); |  | 
| 4751       __ vcvt_f64_s32(dbl_scratch, flt_scratch); |  | 
| 4752     } else { |  | 
| 4753       FloatingPointHelper::Destination dest = |  | 
| 4754           FloatingPointHelper::kCoreRegisters; |  | 
| 4755       FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0, |  | 
| 4756                                               sfpd_lo, sfpd_hi, |  | 
| 4757                                               scratch0(), s0); |  | 
| 4758     } |  | 
| 4759   } else { | 4560   } else { | 
| 4760     if (CpuFeatures::IsSupported(VFP2)) { | 4561     __ vmov(flt_scratch, src); | 
| 4761       CpuFeatures::Scope scope(VFP2); | 4562     __ vcvt_f64_u32(dbl_scratch, flt_scratch); | 
| 4762       __ vmov(flt_scratch, src); |  | 
| 4763       __ vcvt_f64_u32(dbl_scratch, flt_scratch); |  | 
| 4764     } else { |  | 
| 4765       Label no_leading_zero, done; |  | 
| 4766       __ tst(src, Operand(0x80000000)); |  | 
| 4767       __ b(ne, &no_leading_zero); |  | 
| 4768 |  | 
| 4769       // Integer has one leading zeros. |  | 
| 4770       GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1); |  | 
| 4771       __ b(&done); |  | 
| 4772 |  | 
| 4773       __ bind(&no_leading_zero); |  | 
| 4774       GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0); |  | 
| 4775       __ b(&done); |  | 
| 4776     } |  | 
| 4777   } | 4563   } | 
| 4778 | 4564 | 
| 4779   if (FLAG_inline_new) { | 4565   if (FLAG_inline_new) { | 
| 4780     __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); | 4566     __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 
| 4781     __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT); | 4567     __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); | 
| 4782     __ Move(dst, r5); | 4568     __ Move(dst, r5); | 
| 4783     __ b(&done); | 4569     __ b(&done); | 
| 4784   } | 4570   } | 
| 4785 | 4571 | 
| 4786   // Slow case: Call the runtime system to do the number allocation. | 4572   // Slow case: Call the runtime system to do the number allocation. | 
| 4787   __ bind(&slow); | 4573   __ bind(&slow); | 
| 4788 | 4574 | 
| 4789   // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4575   // TODO(3095996): Put a valid pointer value in the stack slot where the result | 
| 4790   // register is stored, as this register is in the pointer map, but contains an | 4576   // register is stored, as this register is in the pointer map, but contains an | 
| 4791   // integer value. | 4577   // integer value. | 
| 4792   __ mov(ip, Operand(0)); | 4578   __ mov(ip, Operand(0)); | 
| 4793   __ StoreToSafepointRegisterSlot(ip, dst); | 4579   __ StoreToSafepointRegisterSlot(ip, dst); | 
| 4794   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 4580   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 
| 4795   __ Move(dst, r0); | 4581   __ Move(dst, r0); | 
| 4796   __ sub(dst, dst, Operand(kHeapObjectTag)); | 4582   __ sub(dst, dst, Operand(kHeapObjectTag)); | 
| 4797 | 4583 | 
| 4798   // Done. Put the value in dbl_scratch into the value of the allocated heap | 4584   // Done. Put the value in dbl_scratch into the value of the allocated heap | 
| 4799   // number. | 4585   // number. | 
| 4800   __ bind(&done); | 4586   __ bind(&done); | 
| 4801   if (CpuFeatures::IsSupported(VFP2)) { | 4587   __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); | 
| 4802     CpuFeatures::Scope scope(VFP2); |  | 
| 4803     __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); |  | 
| 4804   } else { |  | 
| 4805     __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); |  | 
| 4806     __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); |  | 
| 4807   } |  | 
| 4808   __ add(dst, dst, Operand(kHeapObjectTag)); | 4588   __ add(dst, dst, Operand(kHeapObjectTag)); | 
| 4809   __ StoreToSafepointRegisterSlot(dst, dst); | 4589   __ StoreToSafepointRegisterSlot(dst, dst); | 
| 4810 } | 4590 } | 
| 4811 | 4591 | 
| 4812 | 4592 | 
| 4813 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4593 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 
| 4814   class DeferredNumberTagD: public LDeferredCode { | 4594   class DeferredNumberTagD: public LDeferredCode { | 
| 4815    public: | 4595    public: | 
| 4816     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4596     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 
| 4817         : LDeferredCode(codegen), instr_(instr) { } | 4597         : LDeferredCode(codegen), instr_(instr) { } | 
| 4818     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 4598     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 
| 4819     virtual LInstruction* instr() { return instr_; } | 4599     virtual LInstruction* instr() { return instr_; } | 
| 4820    private: | 4600    private: | 
| 4821     LNumberTagD* instr_; | 4601     LNumberTagD* instr_; | 
| 4822   }; | 4602   }; | 
| 4823 | 4603 | 
| 4824   DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 4604   DoubleRegister input_reg = ToDoubleRegister(instr->value()); | 
| 4825   Register scratch = scratch0(); | 4605   Register scratch = scratch0(); | 
| 4826   Register reg = ToRegister(instr->result()); | 4606   Register reg = ToRegister(instr->result()); | 
| 4827   Register temp1 = ToRegister(instr->temp()); | 4607   Register temp1 = ToRegister(instr->temp()); | 
| 4828   Register temp2 = ToRegister(instr->temp2()); | 4608   Register temp2 = ToRegister(instr->temp2()); | 
| 4829 | 4609 | 
| 4830   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4610   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 
| 4831   if (FLAG_inline_new) { | 4611   if (FLAG_inline_new) { | 
| 4832     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 4612     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 
| 4833     // We want the untagged address first for performance | 4613     // We want the untagged address first for performance | 
| 4834     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | 4614     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | 
| 4835                           DONT_TAG_RESULT); | 4615                           DONT_TAG_RESULT); | 
| 4836   } else { | 4616   } else { | 
| 4837     __ jmp(deferred->entry()); | 4617     __ jmp(deferred->entry()); | 
| 4838   } | 4618   } | 
| 4839   __ bind(deferred->exit()); | 4619   __ bind(deferred->exit()); | 
| 4840   if (CpuFeatures::IsSupported(VFP2)) { | 4620   __ vstr(input_reg, reg, HeapNumber::kValueOffset); | 
| 4841     CpuFeatures::Scope scope(VFP2); |  | 
| 4842     __ vstr(input_reg, reg, HeapNumber::kValueOffset); |  | 
| 4843   } else { |  | 
| 4844     __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); |  | 
| 4845     __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); |  | 
| 4846   } |  | 
| 4847   // Now that we have finished with the object's real address tag it | 4621   // Now that we have finished with the object's real address tag it | 
| 4848   __ add(reg, reg, Operand(kHeapObjectTag)); | 4622   __ add(reg, reg, Operand(kHeapObjectTag)); | 
| 4849 } | 4623 } | 
| 4850 | 4624 | 
| 4851 | 4625 | 
| 4852 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4626 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 
| 4853   // TODO(3095996): Get rid of this. For now, we need to make the | 4627   // TODO(3095996): Get rid of this. For now, we need to make the | 
| 4854   // result register contain a valid pointer because it is already | 4628   // result register contain a valid pointer because it is already | 
| 4855   // contained in the register pointer map. | 4629   // contained in the register pointer map. | 
| 4856   Register reg = ToRegister(instr->result()); | 4630   Register reg = ToRegister(instr->result()); | 
| (...skipping 20 matching lines...) Expand all  Loading... | 
| 4877     // If the input is a HeapObject, SmiUntag will set the carry flag. | 4651     // If the input is a HeapObject, SmiUntag will set the carry flag. | 
| 4878     __ SmiUntag(result, input, SetCC); | 4652     __ SmiUntag(result, input, SetCC); | 
| 4879     DeoptimizeIf(cs, instr->environment()); | 4653     DeoptimizeIf(cs, instr->environment()); | 
| 4880   } else { | 4654   } else { | 
| 4881     __ SmiUntag(result, input); | 4655     __ SmiUntag(result, input); | 
| 4882   } | 4656   } | 
| 4883 } | 4657 } | 
| 4884 | 4658 | 
| 4885 | 4659 | 
| 4886 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4660 void LCodeGen::EmitNumberUntagD(Register input_reg, | 
| 4887                                 DwVfpRegister result_reg, | 4661                                 DoubleRegister result_reg, | 
| 4888                                 bool deoptimize_on_undefined, | 4662                                 bool deoptimize_on_undefined, | 
| 4889                                 bool deoptimize_on_minus_zero, | 4663                                 bool deoptimize_on_minus_zero, | 
| 4890                                 LEnvironment* env) { | 4664                                 LEnvironment* env) { | 
| 4891   Register scratch = scratch0(); | 4665   Register scratch = scratch0(); | 
| 4892   SwVfpRegister flt_scratch = double_scratch0().low(); | 4666   SwVfpRegister flt_scratch = double_scratch0().low(); | 
| 4893   ASSERT(!result_reg.is(double_scratch0())); | 4667   ASSERT(!result_reg.is(double_scratch0())); | 
| 4894   CpuFeatures::Scope scope(VFP2); |  | 
| 4895 | 4668 | 
| 4896   Label load_smi, heap_number, done; | 4669   Label load_smi, heap_number, done; | 
| 4897 | 4670 | 
| 4898   // Smi check. | 4671   // Smi check. | 
| 4899   __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4672   __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 
| 4900 | 4673 | 
| 4901   // Heap number map check. | 4674   // Heap number map check. | 
| 4902   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4675   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
| 4903   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4676   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
| 4904   __ cmp(scratch, Operand(ip)); | 4677   __ cmp(scratch, Operand(ip)); | 
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4959   // SmiUntag(heap_object, SetCC) | 4732   // SmiUntag(heap_object, SetCC) | 
| 4960   STATIC_ASSERT(kHeapObjectTag == 1); | 4733   STATIC_ASSERT(kHeapObjectTag == 1); | 
| 4961   __ adc(input_reg, input_reg, Operand(input_reg)); | 4734   __ adc(input_reg, input_reg, Operand(input_reg)); | 
| 4962 | 4735 | 
| 4963   // Heap number map check. | 4736   // Heap number map check. | 
| 4964   __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4737   __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
| 4965   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4738   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
| 4966   __ cmp(scratch1, Operand(ip)); | 4739   __ cmp(scratch1, Operand(ip)); | 
| 4967 | 4740 | 
| 4968   if (instr->truncating()) { | 4741   if (instr->truncating()) { | 
| 4969     CpuFeatures::Scope scope(VFP2); |  | 
| 4970     Register scratch3 = ToRegister(instr->temp2()); | 4742     Register scratch3 = ToRegister(instr->temp2()); | 
| 4971     ASSERT(!scratch3.is(input_reg) && | 4743     ASSERT(!scratch3.is(input_reg) && | 
| 4972            !scratch3.is(scratch1) && | 4744            !scratch3.is(scratch1) && | 
| 4973            !scratch3.is(scratch2)); | 4745            !scratch3.is(scratch2)); | 
| 4974     // Performs a truncating conversion of a floating point number as used by | 4746     // Performs a truncating conversion of a floating point number as used by | 
| 4975     // the JS bitwise operations. | 4747     // the JS bitwise operations. | 
| 4976     Label heap_number; | 4748     Label heap_number; | 
| 4977     __ b(eq, &heap_number); | 4749     __ b(eq, &heap_number); | 
| 4978     // Check for undefined. Undefined is converted to zero for truncating | 4750     // Check for undefined. Undefined is converted to zero for truncating | 
| 4979     // conversions. | 4751     // conversions. | 
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5050 } | 4822 } | 
| 5051 | 4823 | 
| 5052 | 4824 | 
| 5053 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4825 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 
| 5054   LOperand* input = instr->value(); | 4826   LOperand* input = instr->value(); | 
| 5055   ASSERT(input->IsRegister()); | 4827   ASSERT(input->IsRegister()); | 
| 5056   LOperand* result = instr->result(); | 4828   LOperand* result = instr->result(); | 
| 5057   ASSERT(result->IsDoubleRegister()); | 4829   ASSERT(result->IsDoubleRegister()); | 
| 5058 | 4830 | 
| 5059   Register input_reg = ToRegister(input); | 4831   Register input_reg = ToRegister(input); | 
| 5060   DwVfpRegister result_reg = ToDoubleRegister(result); | 4832   DoubleRegister result_reg = ToDoubleRegister(result); | 
| 5061 | 4833 | 
| 5062   EmitNumberUntagD(input_reg, result_reg, | 4834   EmitNumberUntagD(input_reg, result_reg, | 
| 5063                    instr->hydrogen()->deoptimize_on_undefined(), | 4835                    instr->hydrogen()->deoptimize_on_undefined(), | 
| 5064                    instr->hydrogen()->deoptimize_on_minus_zero(), | 4836                    instr->hydrogen()->deoptimize_on_minus_zero(), | 
| 5065                    instr->environment()); | 4837                    instr->environment()); | 
| 5066 } | 4838 } | 
| 5067 | 4839 | 
| 5068 | 4840 | 
| 5069 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4841 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 
| 5070   Register result_reg = ToRegister(instr->result()); | 4842   Register result_reg = ToRegister(instr->result()); | 
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5198     __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); | 4970     __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); | 
| 5199     __ b(eq, &success); | 4971     __ b(eq, &success); | 
| 5200   } | 4972   } | 
| 5201   Handle<Map> map = map_set->last(); | 4973   Handle<Map> map = map_set->last(); | 
| 5202   DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); | 4974   DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); | 
| 5203   __ bind(&success); | 4975   __ bind(&success); | 
| 5204 } | 4976 } | 
| 5205 | 4977 | 
| 5206 | 4978 | 
| 5207 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4979 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 
| 5208   CpuFeatures::Scope vfp_scope(VFP2); | 4980   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 
| 5209   DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |  | 
| 5210   Register result_reg = ToRegister(instr->result()); | 4981   Register result_reg = ToRegister(instr->result()); | 
| 5211   DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); | 4982   DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 
| 5212   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | 4983   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | 
| 5213 } | 4984 } | 
| 5214 | 4985 | 
| 5215 | 4986 | 
| 5216 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 4987 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 
| 5217   CpuFeatures::Scope scope(VFP2); |  | 
| 5218   Register unclamped_reg = ToRegister(instr->unclamped()); | 4988   Register unclamped_reg = ToRegister(instr->unclamped()); | 
| 5219   Register result_reg = ToRegister(instr->result()); | 4989   Register result_reg = ToRegister(instr->result()); | 
| 5220   __ ClampUint8(result_reg, unclamped_reg); | 4990   __ ClampUint8(result_reg, unclamped_reg); | 
| 5221 } | 4991 } | 
| 5222 | 4992 | 
| 5223 | 4993 | 
| 5224 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 4994 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 
| 5225   CpuFeatures::Scope scope(VFP2); |  | 
| 5226   Register scratch = scratch0(); | 4995   Register scratch = scratch0(); | 
| 5227   Register input_reg = ToRegister(instr->unclamped()); | 4996   Register input_reg = ToRegister(instr->unclamped()); | 
| 5228   Register result_reg = ToRegister(instr->result()); | 4997   Register result_reg = ToRegister(instr->result()); | 
| 5229   DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); | 4998   DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 
| 5230   Label is_smi, done, heap_number; | 4999   Label is_smi, done, heap_number; | 
| 5231 | 5000 | 
| 5232   // Both smi and heap number cases are handled. | 5001   // Both smi and heap number cases are handled. | 
| 5233   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5002   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 
| 5234 | 5003 | 
| 5235   // Check for heap number | 5004   // Check for heap number | 
| 5236   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5005   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
| 5237   __ cmp(scratch, Operand(factory()->heap_number_map())); | 5006   __ cmp(scratch, Operand(factory()->heap_number_map())); | 
| 5238   __ b(eq, &heap_number); | 5007   __ b(eq, &heap_number); | 
| 5239 | 5008 | 
| (...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5796   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 5565   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 
| 5797 | 5566 | 
| 5798   // Check the marker in the calling frame. | 5567   // Check the marker in the calling frame. | 
| 5799   __ bind(&check_frame_marker); | 5568   __ bind(&check_frame_marker); | 
| 5800   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 5569   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 
| 5801   __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 5570   __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 
| 5802 } | 5571 } | 
| 5803 | 5572 | 
| 5804 | 5573 | 
| 5805 void LCodeGen::EnsureSpaceForLazyDeopt() { | 5574 void LCodeGen::EnsureSpaceForLazyDeopt() { | 
| 5806   if (info()->IsStub()) return; |  | 
| 5807   // Ensure that we have enough space after the previous lazy-bailout | 5575   // Ensure that we have enough space after the previous lazy-bailout | 
| 5808   // instruction for patching the code here. | 5576   // instruction for patching the code here. | 
| 5809   int current_pc = masm()->pc_offset(); | 5577   int current_pc = masm()->pc_offset(); | 
| 5810   int patch_size = Deoptimizer::patch_size(); | 5578   int patch_size = Deoptimizer::patch_size(); | 
| 5811   if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 5579   if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 
| 5812     // Block literal pool emission for duration of padding. | 5580     // Block literal pool emission for duration of padding. | 
| 5813     Assembler::BlockConstPoolScope block_const_pool(masm()); | 5581     Assembler::BlockConstPoolScope block_const_pool(masm()); | 
| 5814     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 5582     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 
| 5815     ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 5583     ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 
| 5816     while (padding_size > 0) { | 5584     while (padding_size > 0) { | 
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 6028   __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5796   __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
| 6029   __ ldr(result, FieldMemOperand(scratch, | 5797   __ ldr(result, FieldMemOperand(scratch, | 
| 6030                                  FixedArray::kHeaderSize - kPointerSize)); | 5798                                  FixedArray::kHeaderSize - kPointerSize)); | 
| 6031   __ bind(&done); | 5799   __ bind(&done); | 
| 6032 } | 5800 } | 
| 6033 | 5801 | 
| 6034 | 5802 | 
| 6035 #undef __ | 5803 #undef __ | 
| 6036 | 5804 | 
| 6037 } }  // namespace v8::internal | 5805 } }  // namespace v8::internal | 
| OLD | NEW | 
|---|