OLD | NEW |
(Empty) | |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #include "mips/lithium-codegen-mips.h" |
| 31 #include "mips/lithium-gap-resolver-mips.h" |
| 32 #include "code-stubs.h" |
| 33 #include "stub-cache.h" |
| 34 |
| 35 namespace v8 { |
| 36 namespace internal { |
| 37 |
| 38 |
| 39 class SafepointGenerator : public CallWrapper { |
| 40 public: |
| 41 SafepointGenerator(LCodeGen* codegen, |
| 42 LPointerMap* pointers, |
| 43 int deoptimization_index) |
| 44 : codegen_(codegen), |
| 45 pointers_(pointers), |
| 46 deoptimization_index_(deoptimization_index) { } |
| 47 virtual ~SafepointGenerator() { } |
| 48 |
| 49 virtual void BeforeCall(int call_size) const { |
| 50 ASSERT(call_size >= 0); |
| 51 // Ensure that we have enough space after the previous safepoint position |
| 52 // for the generated code there. |
| 53 int call_end = codegen_->masm()->pc_offset() + call_size; |
| 54 int prev_jump_end = |
| 55 codegen_->LastSafepointEnd() + Deoptimizer::patch_size(); |
| 56 if (call_end < prev_jump_end) { |
| 57 int padding_size = prev_jump_end - call_end; |
| 58 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); |
| 59 while (padding_size > 0) { |
| 60 codegen_->masm()->nop(); |
| 61 padding_size -= Assembler::kInstrSize; |
| 62 } |
| 63 } |
| 64 } |
| 65 |
| 66 virtual void AfterCall() const { |
| 67 codegen_->RecordSafepoint(pointers_, deoptimization_index_); |
| 68 } |
| 69 |
| 70 private: |
| 71 LCodeGen* codegen_; |
| 72 LPointerMap* pointers_; |
| 73 int deoptimization_index_; |
| 74 }; |
| 75 |
| 76 |
| 77 #define __ masm()-> |
| 78 |
| 79 bool LCodeGen::GenerateCode() { |
| 80 HPhase phase("Code generation", chunk()); |
| 81 ASSERT(is_unused()); |
| 82 status_ = GENERATING; |
| 83 CpuFeatures::Scope scope(FPU); |
| 84 |
| 85 CodeStub::GenerateFPStubs(); |
| 86 |
| 87 // Open a frame scope to indicate that there is a frame on the stack. The |
| 88 // NONE indicates that the scope shouldn't actually generate code to set up |
| 89 // the frame (that is done in GeneratePrologue). |
| 90 FrameScope frame_scope(masm_, StackFrame::NONE); |
| 91 |
| 92 return GeneratePrologue() && |
| 93 GenerateBody() && |
| 94 GenerateDeferredCode() && |
| 95 GenerateSafepointTable(); |
| 96 } |
| 97 |
| 98 |
| 99 void LCodeGen::FinishCode(Handle<Code> code) { |
| 100 ASSERT(is_done()); |
| 101 code->set_stack_slots(GetStackSlotCount()); |
| 102 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 103 PopulateDeoptimizationData(code); |
| 104 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); |
| 105 } |
| 106 |
| 107 |
| 108 void LCodeGen::Abort(const char* format, ...) { |
| 109 if (FLAG_trace_bailout) { |
| 110 SmartArrayPointer<char> name( |
| 111 info()->shared_info()->DebugName()->ToCString()); |
| 112 PrintF("Aborting LCodeGen in @\"%s\": ", *name); |
| 113 va_list arguments; |
| 114 va_start(arguments, format); |
| 115 OS::VPrint(format, arguments); |
| 116 va_end(arguments); |
| 117 PrintF("\n"); |
| 118 } |
| 119 status_ = ABORTED; |
| 120 } |
| 121 |
| 122 |
| 123 void LCodeGen::Comment(const char* format, ...) { |
| 124 if (!FLAG_code_comments) return; |
| 125 char buffer[4 * KB]; |
| 126 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); |
| 127 va_list arguments; |
| 128 va_start(arguments, format); |
| 129 builder.AddFormattedList(format, arguments); |
| 130 va_end(arguments); |
| 131 |
| 132 // Copy the string before recording it in the assembler to avoid |
| 133 // issues when the stack allocated buffer goes out of scope. |
| 134 size_t length = builder.position(); |
| 135 Vector<char> copy = Vector<char>::New(length + 1); |
| 136 memcpy(copy.start(), builder.Finalize(), copy.length()); |
| 137 masm()->RecordComment(copy.start()); |
| 138 } |
| 139 |
| 140 |
| 141 bool LCodeGen::GeneratePrologue() { |
| 142 ASSERT(is_generating()); |
| 143 |
| 144 #ifdef DEBUG |
| 145 if (strlen(FLAG_stop_at) > 0 && |
| 146 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
| 147 __ stop("stop_at"); |
| 148 } |
| 149 #endif |
| 150 |
| 151 // a1: Callee's JS function. |
| 152 // cp: Callee's context. |
| 153 // fp: Caller's frame pointer. |
| 154 // lr: Caller's pc. |
| 155 |
| 156 // Strict mode functions and builtins need to replace the receiver |
| 157 // with undefined when called as functions (without an explicit |
| 158 // receiver object). r5 is zero for method calls and non-zero for |
| 159 // function calls. |
| 160 if (info_->is_strict_mode() || info_->is_native()) { |
| 161 Label ok; |
| 162 __ Branch(&ok, eq, t1, Operand(zero_reg)); |
| 163 |
| 164 int receiver_offset = scope()->num_parameters() * kPointerSize; |
| 165 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
| 166 __ sw(a2, MemOperand(sp, receiver_offset)); |
| 167 __ bind(&ok); |
| 168 } |
| 169 |
| 170 __ Push(ra, fp, cp, a1); |
| 171 __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP. |
| 172 |
| 173 // Reserve space for the stack slots needed by the code. |
| 174 int slots = GetStackSlotCount(); |
| 175 if (slots > 0) { |
| 176 if (FLAG_debug_code) { |
| 177 __ li(a0, Operand(slots)); |
| 178 __ li(a2, Operand(kSlotsZapValue)); |
| 179 Label loop; |
| 180 __ bind(&loop); |
| 181 __ push(a2); |
| 182 __ Subu(a0, a0, 1); |
| 183 __ Branch(&loop, ne, a0, Operand(zero_reg)); |
| 184 } else { |
| 185 __ Subu(sp, sp, Operand(slots * kPointerSize)); |
| 186 } |
| 187 } |
| 188 |
| 189 // Possibly allocate a local context. |
| 190 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| 191 if (heap_slots > 0) { |
| 192 Comment(";;; Allocate local context"); |
| 193 // Argument to NewContext is the function, which is in a1. |
| 194 __ push(a1); |
| 195 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
| 196 FastNewContextStub stub(heap_slots); |
| 197 __ CallStub(&stub); |
| 198 } else { |
| 199 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
| 200 } |
| 201 RecordSafepoint(Safepoint::kNoDeoptimizationIndex); |
| 202 // Context is returned in both v0 and cp. It replaces the context |
| 203 // passed to us. It's saved in the stack and kept live in cp. |
| 204 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 205 // Copy any necessary parameters into the context. |
| 206 int num_parameters = scope()->num_parameters(); |
| 207 for (int i = 0; i < num_parameters; i++) { |
| 208 Variable* var = scope()->parameter(i); |
| 209 if (var->IsContextSlot()) { |
| 210 int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
| 211 (num_parameters - 1 - i) * kPointerSize; |
| 212 // Load parameter from stack. |
| 213 __ lw(a0, MemOperand(fp, parameter_offset)); |
| 214 // Store it in the context. |
| 215 MemOperand target = ContextOperand(cp, var->index()); |
| 216 __ sw(a0, target); |
| 217 // Update the write barrier. This clobbers a3 and a0. |
| 218 __ RecordWriteContextSlot( |
| 219 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs); |
| 220 } |
| 221 } |
| 222 Comment(";;; End allocate local context"); |
| 223 } |
| 224 |
| 225 // Trace the call. |
| 226 if (FLAG_trace) { |
| 227 __ CallRuntime(Runtime::kTraceEnter, 0); |
| 228 } |
| 229 return !is_aborted(); |
| 230 } |
| 231 |
| 232 |
| 233 bool LCodeGen::GenerateBody() { |
| 234 ASSERT(is_generating()); |
| 235 bool emit_instructions = true; |
| 236 for (current_instruction_ = 0; |
| 237 !is_aborted() && current_instruction_ < instructions_->length(); |
| 238 current_instruction_++) { |
| 239 LInstruction* instr = instructions_->at(current_instruction_); |
| 240 if (instr->IsLabel()) { |
| 241 LLabel* label = LLabel::cast(instr); |
| 242 emit_instructions = !label->HasReplacement(); |
| 243 } |
| 244 |
| 245 if (emit_instructions) { |
| 246 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); |
| 247 instr->CompileToNative(this); |
| 248 } |
| 249 } |
| 250 return !is_aborted(); |
| 251 } |
| 252 |
| 253 |
| 254 LInstruction* LCodeGen::GetNextInstruction() { |
| 255 if (current_instruction_ < instructions_->length() - 1) { |
| 256 return instructions_->at(current_instruction_ + 1); |
| 257 } else { |
| 258 return NULL; |
| 259 } |
| 260 } |
| 261 |
| 262 |
| 263 bool LCodeGen::GenerateDeferredCode() { |
| 264 ASSERT(is_generating()); |
| 265 if (deferred_.length() > 0) { |
| 266 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
| 267 LDeferredCode* code = deferred_[i]; |
| 268 __ bind(code->entry()); |
| 269 Comment(";;; Deferred code @%d: %s.", |
| 270 code->instruction_index(), |
| 271 code->instr()->Mnemonic()); |
| 272 code->Generate(); |
| 273 __ jmp(code->exit()); |
| 274 } |
| 275 |
| 276 // Pad code to ensure that the last piece of deferred code have |
| 277 // room for lazy bailout. |
| 278 while ((masm()->pc_offset() - LastSafepointEnd()) |
| 279 < Deoptimizer::patch_size()) { |
| 280 __ nop(); |
| 281 } |
| 282 } |
| 283 // Deferred code is the last part of the instruction sequence. Mark |
| 284 // the generated code as done unless we bailed out. |
| 285 if (!is_aborted()) status_ = DONE; |
| 286 return !is_aborted(); |
| 287 } |
| 288 |
| 289 |
| 290 bool LCodeGen::GenerateDeoptJumpTable() { |
| 291 // TODO(plind): not clear that this will have advantage for MIPS. |
| 292 // Skipping it for now. Raised issue #100 for this. |
| 293 Abort("Unimplemented: %s", "GenerateDeoptJumpTable"); |
| 294 return false; |
| 295 } |
| 296 |
| 297 |
| 298 bool LCodeGen::GenerateSafepointTable() { |
| 299 ASSERT(is_done()); |
| 300 safepoints_.Emit(masm(), GetStackSlotCount()); |
| 301 return !is_aborted(); |
| 302 } |
| 303 |
| 304 |
| 305 Register LCodeGen::ToRegister(int index) const { |
| 306 return Register::FromAllocationIndex(index); |
| 307 } |
| 308 |
| 309 |
| 310 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { |
| 311 return DoubleRegister::FromAllocationIndex(index); |
| 312 } |
| 313 |
| 314 |
| 315 Register LCodeGen::ToRegister(LOperand* op) const { |
| 316 ASSERT(op->IsRegister()); |
| 317 return ToRegister(op->index()); |
| 318 } |
| 319 |
| 320 |
| 321 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
| 322 if (op->IsRegister()) { |
| 323 return ToRegister(op->index()); |
| 324 } else if (op->IsConstantOperand()) { |
| 325 __ li(scratch, ToOperand(op)); |
| 326 return scratch; |
| 327 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 328 __ lw(scratch, ToMemOperand(op)); |
| 329 return scratch; |
| 330 } |
| 331 UNREACHABLE(); |
| 332 return scratch; |
| 333 } |
| 334 |
| 335 |
| 336 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| 337 ASSERT(op->IsDoubleRegister()); |
| 338 return ToDoubleRegister(op->index()); |
| 339 } |
| 340 |
| 341 |
| 342 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
| 343 FloatRegister flt_scratch, |
| 344 DoubleRegister dbl_scratch) { |
| 345 if (op->IsDoubleRegister()) { |
| 346 return ToDoubleRegister(op->index()); |
| 347 } else if (op->IsConstantOperand()) { |
| 348 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 349 Handle<Object> literal = chunk_->LookupLiteral(const_op); |
| 350 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 351 if (r.IsInteger32()) { |
| 352 ASSERT(literal->IsNumber()); |
| 353 __ li(at, Operand(static_cast<int32_t>(literal->Number()))); |
| 354 __ mtc1(at, flt_scratch); |
| 355 __ cvt_d_w(dbl_scratch, flt_scratch); |
| 356 return dbl_scratch; |
| 357 } else if (r.IsDouble()) { |
| 358 Abort("unsupported double immediate"); |
| 359 } else if (r.IsTagged()) { |
| 360 Abort("unsupported tagged immediate"); |
| 361 } |
| 362 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 363 MemOperand mem_op = ToMemOperand(op); |
| 364 __ ldc1(dbl_scratch, mem_op); |
| 365 return dbl_scratch; |
| 366 } |
| 367 UNREACHABLE(); |
| 368 return dbl_scratch; |
| 369 } |
| 370 |
| 371 |
| 372 int LCodeGen::ToInteger32(LConstantOperand* op) const { |
| 373 Handle<Object> value = chunk_->LookupLiteral(op); |
| 374 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); |
| 375 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) == |
| 376 value->Number()); |
| 377 return static_cast<int32_t>(value->Number()); |
| 378 } |
| 379 |
| 380 |
| 381 Operand LCodeGen::ToOperand(LOperand* op) { |
| 382 if (op->IsConstantOperand()) { |
| 383 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 384 Handle<Object> literal = chunk_->LookupLiteral(const_op); |
| 385 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 386 if (r.IsInteger32()) { |
| 387 ASSERT(literal->IsNumber()); |
| 388 return Operand(static_cast<int32_t>(literal->Number())); |
| 389 } else if (r.IsDouble()) { |
| 390 Abort("ToOperand Unsupported double immediate."); |
| 391 } |
| 392 ASSERT(r.IsTagged()); |
| 393 return Operand(literal); |
| 394 } else if (op->IsRegister()) { |
| 395 return Operand(ToRegister(op)); |
| 396 } else if (op->IsDoubleRegister()) { |
| 397 Abort("ToOperand IsDoubleRegister unimplemented"); |
| 398 return Operand(0); |
| 399 } |
| 400 // Stack slots not implemented, use ToMemOperand instead. |
| 401 UNREACHABLE(); |
| 402 return Operand(0); |
| 403 } |
| 404 |
| 405 |
| 406 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
| 407 ASSERT(!op->IsRegister()); |
| 408 ASSERT(!op->IsDoubleRegister()); |
| 409 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 410 int index = op->index(); |
| 411 if (index >= 0) { |
| 412 // Local or spill slot. Skip the frame pointer, function, and |
| 413 // context in the fixed part of the frame. |
| 414 return MemOperand(fp, -(index + 3) * kPointerSize); |
| 415 } else { |
| 416 // Incoming parameter. Skip the return address. |
| 417 return MemOperand(fp, -(index - 1) * kPointerSize); |
| 418 } |
| 419 } |
| 420 |
| 421 |
| 422 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { |
| 423 ASSERT(op->IsDoubleStackSlot()); |
| 424 int index = op->index(); |
| 425 if (index >= 0) { |
| 426 // Local or spill slot. Skip the frame pointer, function, context, |
| 427 // and the first word of the double in the fixed part of the frame. |
| 428 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); |
| 429 } else { |
| 430 // Incoming parameter. Skip the return address and the first word of |
| 431 // the double. |
| 432 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); |
| 433 } |
| 434 } |
| 435 |
| 436 |
| 437 void LCodeGen::WriteTranslation(LEnvironment* environment, |
| 438 Translation* translation) { |
| 439 if (environment == NULL) return; |
| 440 |
| 441 // The translation includes one command per value in the environment. |
| 442 int translation_size = environment->values()->length(); |
| 443 // The output frame height does not include the parameters. |
| 444 int height = translation_size - environment->parameter_count(); |
| 445 |
| 446 WriteTranslation(environment->outer(), translation); |
| 447 int closure_id = DefineDeoptimizationLiteral(environment->closure()); |
| 448 translation->BeginFrame(environment->ast_id(), closure_id, height); |
| 449 for (int i = 0; i < translation_size; ++i) { |
| 450 LOperand* value = environment->values()->at(i); |
| 451 // spilled_registers_ and spilled_double_registers_ are either |
| 452 // both NULL or both set. |
| 453 if (environment->spilled_registers() != NULL && value != NULL) { |
| 454 if (value->IsRegister() && |
| 455 environment->spilled_registers()[value->index()] != NULL) { |
| 456 translation->MarkDuplicate(); |
| 457 AddToTranslation(translation, |
| 458 environment->spilled_registers()[value->index()], |
| 459 environment->HasTaggedValueAt(i)); |
| 460 } else if ( |
| 461 value->IsDoubleRegister() && |
| 462 environment->spilled_double_registers()[value->index()] != NULL) { |
| 463 translation->MarkDuplicate(); |
| 464 AddToTranslation( |
| 465 translation, |
| 466 environment->spilled_double_registers()[value->index()], |
| 467 false); |
| 468 } |
| 469 } |
| 470 |
| 471 AddToTranslation(translation, value, environment->HasTaggedValueAt(i)); |
| 472 } |
| 473 } |
| 474 |
| 475 |
| 476 void LCodeGen::AddToTranslation(Translation* translation, |
| 477 LOperand* op, |
| 478 bool is_tagged) { |
| 479 if (op == NULL) { |
| 480 // TODO(twuerthinger): Introduce marker operands to indicate that this value |
| 481 // is not present and must be reconstructed from the deoptimizer. Currently |
| 482 // this is only used for the arguments object. |
| 483 translation->StoreArgumentsObject(); |
| 484 } else if (op->IsStackSlot()) { |
| 485 if (is_tagged) { |
| 486 translation->StoreStackSlot(op->index()); |
| 487 } else { |
| 488 translation->StoreInt32StackSlot(op->index()); |
| 489 } |
| 490 } else if (op->IsDoubleStackSlot()) { |
| 491 translation->StoreDoubleStackSlot(op->index()); |
| 492 } else if (op->IsArgument()) { |
| 493 ASSERT(is_tagged); |
| 494 int src_index = GetStackSlotCount() + op->index(); |
| 495 translation->StoreStackSlot(src_index); |
| 496 } else if (op->IsRegister()) { |
| 497 Register reg = ToRegister(op); |
| 498 if (is_tagged) { |
| 499 translation->StoreRegister(reg); |
| 500 } else { |
| 501 translation->StoreInt32Register(reg); |
| 502 } |
| 503 } else if (op->IsDoubleRegister()) { |
| 504 DoubleRegister reg = ToDoubleRegister(op); |
| 505 translation->StoreDoubleRegister(reg); |
| 506 } else if (op->IsConstantOperand()) { |
| 507 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); |
| 508 int src_index = DefineDeoptimizationLiteral(literal); |
| 509 translation->StoreLiteral(src_index); |
| 510 } else { |
| 511 UNREACHABLE(); |
| 512 } |
| 513 } |
| 514 |
| 515 |
| 516 void LCodeGen::CallCode(Handle<Code> code, |
| 517 RelocInfo::Mode mode, |
| 518 LInstruction* instr) { |
| 519 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
| 520 } |
| 521 |
| 522 |
| 523 void LCodeGen::CallCodeGeneric(Handle<Code> code, |
| 524 RelocInfo::Mode mode, |
| 525 LInstruction* instr, |
| 526 SafepointMode safepoint_mode) { |
| 527 ASSERT(instr != NULL); |
| 528 LPointerMap* pointers = instr->pointer_map(); |
| 529 RecordPosition(pointers->position()); |
| 530 __ Call(code, mode); |
| 531 RegisterLazyDeoptimization(instr, safepoint_mode); |
| 532 } |
| 533 |
| 534 |
| 535 void LCodeGen::CallRuntime(const Runtime::Function* function, |
| 536 int num_arguments, |
| 537 LInstruction* instr) { |
| 538 ASSERT(instr != NULL); |
| 539 LPointerMap* pointers = instr->pointer_map(); |
| 540 ASSERT(pointers != NULL); |
| 541 RecordPosition(pointers->position()); |
| 542 |
| 543 __ CallRuntime(function, num_arguments); |
| 544 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); |
| 545 } |
| 546 |
| 547 |
| 548 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
| 549 int argc, |
| 550 LInstruction* instr) { |
| 551 __ CallRuntimeSaveDoubles(id); |
| 552 RecordSafepointWithRegisters( |
| 553 instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex); |
| 554 } |
| 555 |
| 556 |
| 557 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr, |
| 558 SafepointMode safepoint_mode) { |
| 559 // Create the environment to bailout to. If the call has side effects |
| 560 // execution has to continue after the call otherwise execution can continue |
| 561 // from a previous bailout point repeating the call. |
| 562 LEnvironment* deoptimization_environment; |
| 563 if (instr->HasDeoptimizationEnvironment()) { |
| 564 deoptimization_environment = instr->deoptimization_environment(); |
| 565 } else { |
| 566 deoptimization_environment = instr->environment(); |
| 567 } |
| 568 |
| 569 RegisterEnvironmentForDeoptimization(deoptimization_environment); |
| 570 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| 571 RecordSafepoint(instr->pointer_map(), |
| 572 deoptimization_environment->deoptimization_index()); |
| 573 } else { |
| 574 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 575 RecordSafepointWithRegisters( |
| 576 instr->pointer_map(), |
| 577 0, |
| 578 deoptimization_environment->deoptimization_index()); |
| 579 } |
| 580 } |
| 581 |
| 582 |
| 583 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { |
| 584 if (!environment->HasBeenRegistered()) { |
| 585 // Physical stack frame layout: |
| 586 // -x ............. -4 0 ..................................... y |
| 587 // [incoming arguments] [spill slots] [pushed outgoing arguments] |
| 588 |
| 589 // Layout of the environment: |
| 590 // 0 ..................................................... size-1 |
| 591 // [parameters] [locals] [expression stack including arguments] |
| 592 |
| 593 // Layout of the translation: |
| 594 // 0 ........................................................ size - 1 + 4 |
| 595 // [expression stack including arguments] [locals] [4 words] [parameters] |
| 596 // |>------------ translation_size ------------<| |
| 597 |
| 598 int frame_count = 0; |
| 599 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { |
| 600 ++frame_count; |
| 601 } |
| 602 Translation translation(&translations_, frame_count); |
| 603 WriteTranslation(environment, &translation); |
| 604 int deoptimization_index = deoptimizations_.length(); |
| 605 environment->Register(deoptimization_index, translation.index()); |
| 606 deoptimizations_.Add(environment); |
| 607 } |
| 608 } |
| 609 |
| 610 |
| 611 void LCodeGen::DeoptimizeIf(Condition cc, |
| 612 LEnvironment* environment, |
| 613 Register src1, |
| 614 const Operand& src2) { |
| 615 RegisterEnvironmentForDeoptimization(environment); |
| 616 ASSERT(environment->HasBeenRegistered()); |
| 617 int id = environment->deoptimization_index(); |
| 618 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); |
| 619 ASSERT(entry != NULL); |
| 620 if (entry == NULL) { |
| 621 Abort("bailout was not prepared"); |
| 622 return; |
| 623 } |
| 624 |
| 625 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS. |
| 626 |
| 627 if (FLAG_deopt_every_n_times == 1 && |
| 628 info_->shared_info()->opt_count() == id) { |
| 629 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 630 return; |
| 631 } |
| 632 |
| 633 if (FLAG_trap_on_deopt) { |
| 634 Label skip; |
| 635 if (cc != al) { |
| 636 __ Branch(&skip, NegateCondition(cc), src1, src2); |
| 637 } |
| 638 __ stop("trap_on_deopt"); |
| 639 __ bind(&skip); |
| 640 } |
| 641 |
| 642 if (cc == al) { |
| 643 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 644 } else { |
| 645 // TODO(plind): The Arm port is a little different here, due to their |
| 646 // DeOpt jump table, which is not used for Mips yet. |
| 647 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); |
| 648 } |
| 649 } |
| 650 |
| 651 |
| 652 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 653 int length = deoptimizations_.length(); |
| 654 if (length == 0) return; |
| 655 ASSERT(FLAG_deopt); |
| 656 Handle<DeoptimizationInputData> data = |
| 657 factory()->NewDeoptimizationInputData(length, TENURED); |
| 658 |
| 659 Handle<ByteArray> translations = translations_.CreateByteArray(); |
| 660 data->SetTranslationByteArray(*translations); |
| 661 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); |
| 662 |
| 663 Handle<FixedArray> literals = |
| 664 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); |
| 665 for (int i = 0; i < deoptimization_literals_.length(); i++) { |
| 666 literals->set(i, *deoptimization_literals_[i]); |
| 667 } |
| 668 data->SetLiteralArray(*literals); |
| 669 |
| 670 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); |
| 671 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); |
| 672 |
| 673 // Populate the deoptimization entries. |
| 674 for (int i = 0; i < length; i++) { |
| 675 LEnvironment* env = deoptimizations_[i]; |
| 676 data->SetAstId(i, Smi::FromInt(env->ast_id())); |
| 677 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); |
| 678 data->SetArgumentsStackHeight(i, |
| 679 Smi::FromInt(env->arguments_stack_height())); |
| 680 } |
| 681 code->set_deoptimization_data(*data); |
| 682 } |
| 683 |
| 684 |
| 685 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
| 686 int result = deoptimization_literals_.length(); |
| 687 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
| 688 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
| 689 } |
| 690 deoptimization_literals_.Add(literal); |
| 691 return result; |
| 692 } |
| 693 |
| 694 |
| 695 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
| 696 ASSERT(deoptimization_literals_.length() == 0); |
| 697 |
| 698 const ZoneList<Handle<JSFunction> >* inlined_closures = |
| 699 chunk()->inlined_closures(); |
| 700 |
| 701 for (int i = 0, length = inlined_closures->length(); |
| 702 i < length; |
| 703 i++) { |
| 704 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 705 } |
| 706 |
| 707 inlined_function_count_ = deoptimization_literals_.length(); |
| 708 } |
| 709 |
| 710 |
| 711 void LCodeGen::RecordSafepoint( |
| 712 LPointerMap* pointers, |
| 713 Safepoint::Kind kind, |
| 714 int arguments, |
| 715 int deoptimization_index) { |
| 716 ASSERT(expected_safepoint_kind_ == kind); |
| 717 |
| 718 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
| 719 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
| 720 kind, arguments, deoptimization_index); |
| 721 for (int i = 0; i < operands->length(); i++) { |
| 722 LOperand* pointer = operands->at(i); |
| 723 if (pointer->IsStackSlot()) { |
| 724 safepoint.DefinePointerSlot(pointer->index()); |
| 725 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
| 726 safepoint.DefinePointerRegister(ToRegister(pointer)); |
| 727 } |
| 728 } |
| 729 if (kind & Safepoint::kWithRegisters) { |
| 730 // Register cp always contains a pointer to the context. |
| 731 safepoint.DefinePointerRegister(cp); |
| 732 } |
| 733 } |
| 734 |
| 735 |
| 736 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 737 int deoptimization_index) { |
| 738 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); |
| 739 } |
| 740 |
| 741 |
| 742 void LCodeGen::RecordSafepoint(int deoptimization_index) { |
| 743 LPointerMap empty_pointers(RelocInfo::kNoPosition); |
| 744 RecordSafepoint(&empty_pointers, deoptimization_index); |
| 745 } |
| 746 |
| 747 |
| 748 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| 749 int arguments, |
| 750 int deoptimization_index) { |
| 751 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, |
| 752 deoptimization_index); |
| 753 } |
| 754 |
| 755 |
| 756 void LCodeGen::RecordSafepointWithRegistersAndDoubles( |
| 757 LPointerMap* pointers, |
| 758 int arguments, |
| 759 int deoptimization_index) { |
| 760 RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments, |
| 761 deoptimization_index); |
| 762 } |
| 763 |
| 764 |
| 765 void LCodeGen::RecordPosition(int position) { |
| 766 if (position == RelocInfo::kNoPosition) return; |
| 767 masm()->positions_recorder()->RecordPosition(position); |
| 768 } |
| 769 |
| 770 |
| 771 void LCodeGen::DoLabel(LLabel* label) { |
| 772 if (label->is_loop_header()) { |
| 773 Comment(";;; B%d - LOOP entry", label->block_id()); |
| 774 } else { |
| 775 Comment(";;; B%d", label->block_id()); |
| 776 } |
| 777 __ bind(label->label()); |
| 778 current_block_ = label->block_id(); |
| 779 DoGap(label); |
| 780 } |
| 781 |
| 782 |
| 783 void LCodeGen::DoParallelMove(LParallelMove* move) { |
| 784 resolver_.Resolve(move); |
| 785 } |
| 786 |
| 787 |
| 788 void LCodeGen::DoGap(LGap* gap) { |
| 789 for (int i = LGap::FIRST_INNER_POSITION; |
| 790 i <= LGap::LAST_INNER_POSITION; |
| 791 i++) { |
| 792 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
| 793 LParallelMove* move = gap->GetParallelMove(inner_pos); |
| 794 if (move != NULL) DoParallelMove(move); |
| 795 } |
| 796 |
| 797 LInstruction* next = GetNextInstruction(); |
| 798 if (next != NULL && next->IsLazyBailout()) { |
| 799 int pc = masm()->pc_offset(); |
| 800 safepoints_.SetPcAfterGap(pc); |
| 801 } |
| 802 } |
| 803 |
| 804 |
| 805 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { |
| 806 DoGap(instr); |
| 807 } |
| 808 |
| 809 |
| 810 void LCodeGen::DoParameter(LParameter* instr) { |
| 811 // Nothing to do. |
| 812 } |
| 813 |
| 814 |
| 815 void LCodeGen::DoCallStub(LCallStub* instr) { |
| 816 ASSERT(ToRegister(instr->result()).is(v0)); |
| 817 switch (instr->hydrogen()->major_key()) { |
| 818 case CodeStub::RegExpConstructResult: { |
| 819 RegExpConstructResultStub stub; |
| 820 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 821 break; |
| 822 } |
| 823 case CodeStub::RegExpExec: { |
| 824 RegExpExecStub stub; |
| 825 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 826 break; |
| 827 } |
| 828 case CodeStub::SubString: { |
| 829 SubStringStub stub; |
| 830 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 831 break; |
| 832 } |
| 833 case CodeStub::NumberToString: { |
| 834 NumberToStringStub stub; |
| 835 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 836 break; |
| 837 } |
| 838 case CodeStub::StringAdd: { |
| 839 StringAddStub stub(NO_STRING_ADD_FLAGS); |
| 840 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 841 break; |
| 842 } |
| 843 case CodeStub::StringCompare: { |
| 844 StringCompareStub stub; |
| 845 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 846 break; |
| 847 } |
| 848 case CodeStub::TranscendentalCache: { |
| 849 __ lw(a0, MemOperand(sp, 0)); |
| 850 TranscendentalCacheStub stub(instr->transcendental_type(), |
| 851 TranscendentalCacheStub::TAGGED); |
| 852 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 853 break; |
| 854 } |
| 855 default: |
| 856 UNREACHABLE(); |
| 857 } |
| 858 } |
| 859 |
| 860 |
| 861 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
| 862 // Nothing to do. |
| 863 } |
| 864 |
| 865 |
| 866 void LCodeGen::DoModI(LModI* instr) { |
| 867 Register scratch = scratch0(); |
| 868 const Register left = ToRegister(instr->InputAt(0)); |
| 869 const Register result = ToRegister(instr->result()); |
| 870 |
| 871 // p2constant holds the right side value if it's a power of 2 constant. |
| 872 // In other cases it is 0. |
| 873 int32_t p2constant = 0; |
| 874 |
| 875 if (instr->InputAt(1)->IsConstantOperand()) { |
| 876 p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); |
| 877 if (p2constant % 2 != 0) { |
| 878 p2constant = 0; |
| 879 } |
| 880 // Result always takes the sign of the dividend (left). |
| 881 p2constant = abs(p2constant); |
| 882 } |
| 883 |
| 884 // div runs in the background while we check for special cases. |
| 885 Register right = EmitLoadRegister(instr->InputAt(1), scratch); |
| 886 __ div(left, right); |
| 887 |
| 888 // Check for x % 0. |
| 889 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 890 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); |
| 891 } |
| 892 |
| 893 Label skip_div, do_div; |
| 894 if (p2constant != 0) { |
| 895 // Fall back to the result of the div instruction if we could have sign |
| 896 // problems. |
| 897 __ Branch(&do_div, lt, left, Operand(zero_reg)); |
| 898 // Modulo by masking. |
| 899 __ And(scratch, left, p2constant - 1); |
| 900 __ Branch(&skip_div); |
| 901 } |
| 902 |
| 903 __ bind(&do_div); |
| 904 __ mfhi(scratch); |
| 905 __ bind(&skip_div); |
| 906 |
| 907 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 908 // Result always takes the sign of the dividend (left). |
| 909 Label done; |
| 910 __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg)); |
| 911 __ mov(result, scratch); |
| 912 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); |
| 913 __ bind(&done); |
| 914 } else { |
| 915 __ Move(result, scratch); |
| 916 } |
| 917 } |
| 918 |
| 919 |
| 920 void LCodeGen::DoDivI(LDivI* instr) { |
| 921 const Register left = ToRegister(instr->InputAt(0)); |
| 922 const Register right = ToRegister(instr->InputAt(1)); |
| 923 const Register result = ToRegister(instr->result()); |
| 924 |
| 925 // On MIPS div is asynchronous - it will run in the background while we |
| 926 // check for special cases. |
| 927 __ div(left, right); |
| 928 |
| 929 // Check for x / 0. |
| 930 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 931 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); |
| 932 } |
| 933 |
| 934 // Check for (0 / -x) that will produce negative zero. |
| 935 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 936 Label left_not_zero; |
| 937 __ Branch(&left_not_zero, ne, left, Operand(zero_reg)); |
| 938 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg)); |
| 939 __ bind(&left_not_zero); |
| 940 } |
| 941 |
| 942 // Check for (-kMinInt / -1). |
| 943 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 944 Label left_not_min_int; |
| 945 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt)); |
| 946 DeoptimizeIf(eq, instr->environment(), right, Operand(-1)); |
| 947 __ bind(&left_not_min_int); |
| 948 } |
| 949 |
| 950 __ mfhi(result); |
| 951 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg)); |
| 952 __ mflo(result); |
| 953 } |
| 954 |
| 955 |
| 956 void LCodeGen::DoMulI(LMulI* instr) { |
| 957 Register scratch = scratch0(); |
| 958 Register result = ToRegister(instr->result()); |
| 959 // Note that result may alias left. |
| 960 Register left = ToRegister(instr->InputAt(0)); |
| 961 LOperand* right_op = instr->InputAt(1); |
| 962 |
| 963 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 964 bool bailout_on_minus_zero = |
| 965 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 966 |
| 967 if (right_op->IsConstantOperand() && !can_overflow) { |
| 968 // Use optimized code for specific constants. |
| 969 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 970 |
| 971 if (bailout_on_minus_zero && (constant < 0)) { |
| 972 // The case of a null constant will be handled separately. |
| 973 // If constant is negative and left is null, the result should be -0. |
| 974 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg)); |
| 975 } |
| 976 |
| 977 switch (constant) { |
| 978 case -1: |
| 979 __ Subu(result, zero_reg, left); |
| 980 break; |
| 981 case 0: |
| 982 if (bailout_on_minus_zero) { |
| 983 // If left is strictly negative and the constant is null, the |
| 984 // result is -0. Deoptimize if required, otherwise return 0. |
| 985 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg)); |
| 986 } |
| 987 __ mov(result, zero_reg); |
| 988 break; |
| 989 case 1: |
| 990 // Nothing to do. |
| 991 __ Move(result, left); |
| 992 break; |
| 993 default: |
| 994 // Multiplying by powers of two and powers of two plus or minus |
| 995 // one can be done faster with shifted operands. |
| 996 // For other constants we emit standard code. |
| 997 int32_t mask = constant >> 31; |
| 998 uint32_t constant_abs = (constant + mask) ^ mask; |
| 999 |
| 1000 if (IsPowerOf2(constant_abs) || |
| 1001 IsPowerOf2(constant_abs - 1) || |
| 1002 IsPowerOf2(constant_abs + 1)) { |
| 1003 if (IsPowerOf2(constant_abs)) { |
| 1004 int32_t shift = WhichPowerOf2(constant_abs); |
| 1005 __ sll(result, left, shift); |
| 1006 } else if (IsPowerOf2(constant_abs - 1)) { |
| 1007 int32_t shift = WhichPowerOf2(constant_abs - 1); |
| 1008 __ sll(result, left, shift); |
| 1009 __ Addu(result, result, left); |
| 1010 } else if (IsPowerOf2(constant_abs + 1)) { |
| 1011 int32_t shift = WhichPowerOf2(constant_abs + 1); |
| 1012 __ sll(result, left, shift); |
| 1013 __ Subu(result, result, left); |
| 1014 } |
| 1015 |
| 1016 // Correct the sign of the result is the constant is negative. |
| 1017 if (constant < 0) { |
| 1018 __ Subu(result, zero_reg, result); |
| 1019 } |
| 1020 |
| 1021 } else { |
| 1022 // Generate standard code. |
| 1023 __ li(at, constant); |
| 1024 __ mul(result, left, at); |
| 1025 } |
| 1026 } |
| 1027 |
| 1028 } else { |
| 1029 Register right = EmitLoadRegister(right_op, scratch); |
| 1030 if (bailout_on_minus_zero) { |
| 1031 __ Or(ToRegister(instr->TempAt(0)), left, right); |
| 1032 } |
| 1033 |
| 1034 if (can_overflow) { |
| 1035 // hi:lo = left * right. |
| 1036 __ mult(left, right); |
| 1037 __ mfhi(scratch); |
| 1038 __ mflo(result); |
| 1039 __ sra(at, result, 31); |
| 1040 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); |
| 1041 } else { |
| 1042 __ mul(result, left, right); |
| 1043 } |
| 1044 |
| 1045 if (bailout_on_minus_zero) { |
| 1046 // Bail out if the result is supposed to be negative zero. |
| 1047 Label done; |
| 1048 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 1049 DeoptimizeIf(lt, |
| 1050 instr->environment(), |
| 1051 ToRegister(instr->TempAt(0)), |
| 1052 Operand(zero_reg)); |
| 1053 __ bind(&done); |
| 1054 } |
| 1055 } |
| 1056 } |
| 1057 |
| 1058 |
| 1059 void LCodeGen::DoBitI(LBitI* instr) { |
| 1060 LOperand* left_op = instr->InputAt(0); |
| 1061 LOperand* right_op = instr->InputAt(1); |
| 1062 ASSERT(left_op->IsRegister()); |
| 1063 Register left = ToRegister(left_op); |
| 1064 Register result = ToRegister(instr->result()); |
| 1065 Operand right(no_reg); |
| 1066 |
| 1067 if (right_op->IsStackSlot() || right_op->IsArgument()) { |
| 1068 right = Operand(EmitLoadRegister(right_op, at)); |
| 1069 } else { |
| 1070 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); |
| 1071 right = ToOperand(right_op); |
| 1072 } |
| 1073 |
| 1074 switch (instr->op()) { |
| 1075 case Token::BIT_AND: |
| 1076 __ And(result, left, right); |
| 1077 break; |
| 1078 case Token::BIT_OR: |
| 1079 __ Or(result, left, right); |
| 1080 break; |
| 1081 case Token::BIT_XOR: |
| 1082 __ Xor(result, left, right); |
| 1083 break; |
| 1084 default: |
| 1085 UNREACHABLE(); |
| 1086 break; |
| 1087 } |
| 1088 } |
| 1089 |
| 1090 |
| 1091 void LCodeGen::DoShiftI(LShiftI* instr) { |
| 1092 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so |
| 1093 // result may alias either of them. |
| 1094 LOperand* right_op = instr->InputAt(1); |
| 1095 Register left = ToRegister(instr->InputAt(0)); |
| 1096 Register result = ToRegister(instr->result()); |
| 1097 |
| 1098 if (right_op->IsRegister()) { |
| 1099 // No need to mask the right operand on MIPS, it is built into the variable |
| 1100 // shift instructions. |
| 1101 switch (instr->op()) { |
| 1102 case Token::SAR: |
| 1103 __ srav(result, left, ToRegister(right_op)); |
| 1104 break; |
| 1105 case Token::SHR: |
| 1106 __ srlv(result, left, ToRegister(right_op)); |
| 1107 if (instr->can_deopt()) { |
| 1108 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); |
| 1109 } |
| 1110 break; |
| 1111 case Token::SHL: |
| 1112 __ sllv(result, left, ToRegister(right_op)); |
| 1113 break; |
| 1114 default: |
| 1115 UNREACHABLE(); |
| 1116 break; |
| 1117 } |
| 1118 } else { |
| 1119 // Mask the right_op operand. |
| 1120 int value = ToInteger32(LConstantOperand::cast(right_op)); |
| 1121 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
| 1122 switch (instr->op()) { |
| 1123 case Token::SAR: |
| 1124 if (shift_count != 0) { |
| 1125 __ sra(result, left, shift_count); |
| 1126 } else { |
| 1127 __ Move(result, left); |
| 1128 } |
| 1129 break; |
| 1130 case Token::SHR: |
| 1131 if (shift_count != 0) { |
| 1132 __ srl(result, left, shift_count); |
| 1133 } else { |
| 1134 if (instr->can_deopt()) { |
| 1135 __ And(at, left, Operand(0x80000000)); |
| 1136 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); |
| 1137 } |
| 1138 __ Move(result, left); |
| 1139 } |
| 1140 break; |
| 1141 case Token::SHL: |
| 1142 if (shift_count != 0) { |
| 1143 __ sll(result, left, shift_count); |
| 1144 } else { |
| 1145 __ Move(result, left); |
| 1146 } |
| 1147 break; |
| 1148 default: |
| 1149 UNREACHABLE(); |
| 1150 break; |
| 1151 } |
| 1152 } |
| 1153 } |
| 1154 |
| 1155 |
| 1156 void LCodeGen::DoSubI(LSubI* instr) { |
| 1157 LOperand* left = instr->InputAt(0); |
| 1158 LOperand* right = instr->InputAt(1); |
| 1159 LOperand* result = instr->result(); |
| 1160 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1161 |
| 1162 if (!can_overflow) { |
| 1163 if (right->IsStackSlot() || right->IsArgument()) { |
| 1164 Register right_reg = EmitLoadRegister(right, at); |
| 1165 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg)); |
| 1166 } else { |
| 1167 ASSERT(right->IsRegister() || right->IsConstantOperand()); |
| 1168 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); |
| 1169 } |
| 1170 } else { // can_overflow. |
| 1171 Register overflow = scratch0(); |
| 1172 Register scratch = scratch1(); |
| 1173 if (right->IsStackSlot() || |
| 1174 right->IsArgument() || |
| 1175 right->IsConstantOperand()) { |
| 1176 Register right_reg = EmitLoadRegister(right, scratch); |
| 1177 __ SubuAndCheckForOverflow(ToRegister(result), |
| 1178 ToRegister(left), |
| 1179 right_reg, |
| 1180 overflow); // Reg at also used as scratch. |
| 1181 } else { |
| 1182 ASSERT(right->IsRegister()); |
| 1183 // Due to overflow check macros not supporting constant operands, |
| 1184 // handling the IsConstantOperand case was moved to prev if clause. |
| 1185 __ SubuAndCheckForOverflow(ToRegister(result), |
| 1186 ToRegister(left), |
| 1187 ToRegister(right), |
| 1188 overflow); // Reg at also used as scratch. |
| 1189 } |
| 1190 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); |
| 1191 } |
| 1192 } |
| 1193 |
| 1194 |
| 1195 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1196 ASSERT(instr->result()->IsRegister()); |
| 1197 __ li(ToRegister(instr->result()), Operand(instr->value())); |
| 1198 } |
| 1199 |
| 1200 |
| 1201 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 1202 ASSERT(instr->result()->IsDoubleRegister()); |
| 1203 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 1204 double v = instr->value(); |
| 1205 __ Move(result, v); |
| 1206 } |
| 1207 |
| 1208 |
| 1209 void LCodeGen::DoConstantT(LConstantT* instr) { |
| 1210 ASSERT(instr->result()->IsRegister()); |
| 1211 __ li(ToRegister(instr->result()), Operand(instr->value())); |
| 1212 } |
| 1213 |
| 1214 |
| 1215 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { |
| 1216 Register result = ToRegister(instr->result()); |
| 1217 Register array = ToRegister(instr->InputAt(0)); |
| 1218 __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset)); |
| 1219 } |
| 1220 |
| 1221 |
| 1222 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) { |
| 1223 Register result = ToRegister(instr->result()); |
| 1224 Register array = ToRegister(instr->InputAt(0)); |
| 1225 __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset)); |
| 1226 } |
| 1227 |
| 1228 |
| 1229 void LCodeGen::DoElementsKind(LElementsKind* instr) { |
| 1230 Register result = ToRegister(instr->result()); |
| 1231 Register input = ToRegister(instr->InputAt(0)); |
| 1232 |
| 1233 // Load map into |result|. |
| 1234 __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 1235 // Load the map's "bit field 2" into |result|. We only need the first byte, |
| 1236 // but the following bit field extraction takes care of that anyway. |
| 1237 __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset)); |
| 1238 // Retrieve elements_kind from bit field 2. |
| 1239 __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| 1240 } |
| 1241 |
| 1242 |
| 1243 void LCodeGen::DoValueOf(LValueOf* instr) { |
| 1244 Register input = ToRegister(instr->InputAt(0)); |
| 1245 Register result = ToRegister(instr->result()); |
| 1246 Register map = ToRegister(instr->TempAt(0)); |
| 1247 Label done; |
| 1248 |
| 1249 // If the object is a smi return the object. |
| 1250 __ Move(result, input); |
| 1251 __ JumpIfSmi(input, &done); |
| 1252 |
| 1253 // If the object is not a value type, return the object. |
| 1254 __ GetObjectType(input, map, map); |
| 1255 __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE)); |
| 1256 __ lw(result, FieldMemOperand(input, JSValue::kValueOffset)); |
| 1257 |
| 1258 __ bind(&done); |
| 1259 } |
| 1260 |
| 1261 |
| 1262 void LCodeGen::DoBitNotI(LBitNotI* instr) { |
| 1263 Register input = ToRegister(instr->InputAt(0)); |
| 1264 Register result = ToRegister(instr->result()); |
| 1265 __ Nor(result, zero_reg, Operand(input)); |
| 1266 } |
| 1267 |
| 1268 |
| 1269 void LCodeGen::DoThrow(LThrow* instr) { |
| 1270 Register input_reg = EmitLoadRegister(instr->InputAt(0), at); |
| 1271 __ push(input_reg); |
| 1272 CallRuntime(Runtime::kThrow, 1, instr); |
| 1273 |
| 1274 if (FLAG_debug_code) { |
| 1275 __ stop("Unreachable code."); |
| 1276 } |
| 1277 } |
| 1278 |
| 1279 |
| 1280 void LCodeGen::DoAddI(LAddI* instr) { |
| 1281 LOperand* left = instr->InputAt(0); |
| 1282 LOperand* right = instr->InputAt(1); |
| 1283 LOperand* result = instr->result(); |
| 1284 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1285 |
| 1286 if (!can_overflow) { |
| 1287 if (right->IsStackSlot() || right->IsArgument()) { |
| 1288 Register right_reg = EmitLoadRegister(right, at); |
| 1289 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg)); |
| 1290 } else { |
| 1291 ASSERT(right->IsRegister() || right->IsConstantOperand()); |
| 1292 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); |
| 1293 } |
| 1294 } else { // can_overflow. |
| 1295 Register overflow = scratch0(); |
| 1296 Register scratch = scratch1(); |
| 1297 if (right->IsStackSlot() || |
| 1298 right->IsArgument() || |
| 1299 right->IsConstantOperand()) { |
| 1300 Register right_reg = EmitLoadRegister(right, scratch); |
| 1301 __ AdduAndCheckForOverflow(ToRegister(result), |
| 1302 ToRegister(left), |
| 1303 right_reg, |
| 1304 overflow); // Reg at also used as scratch. |
| 1305 } else { |
| 1306 ASSERT(right->IsRegister()); |
| 1307 // Due to overflow check macros not supporting constant operands, |
| 1308 // handling the IsConstantOperand case was moved to prev if clause. |
| 1309 __ AdduAndCheckForOverflow(ToRegister(result), |
| 1310 ToRegister(left), |
| 1311 ToRegister(right), |
| 1312 overflow); // Reg at also used as scratch. |
| 1313 } |
| 1314 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); |
| 1315 } |
| 1316 } |
| 1317 |
| 1318 |
| 1319 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 1320 DoubleRegister left = ToDoubleRegister(instr->InputAt(0)); |
| 1321 DoubleRegister right = ToDoubleRegister(instr->InputAt(1)); |
| 1322 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 1323 switch (instr->op()) { |
| 1324 case Token::ADD: |
| 1325 __ add_d(result, left, right); |
| 1326 break; |
| 1327 case Token::SUB: |
| 1328 __ sub_d(result, left, right); |
| 1329 break; |
| 1330 case Token::MUL: |
| 1331 __ mul_d(result, left, right); |
| 1332 break; |
| 1333 case Token::DIV: |
| 1334 __ div_d(result, left, right); |
| 1335 break; |
| 1336 case Token::MOD: { |
| 1337 // Save a0-a3 on the stack. |
| 1338 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); |
| 1339 __ MultiPush(saved_regs); |
| 1340 |
| 1341 __ PrepareCallCFunction(0, 2, scratch0()); |
| 1342 __ SetCallCDoubleArguments(left, right); |
| 1343 __ CallCFunction( |
| 1344 ExternalReference::double_fp_operation(Token::MOD, isolate()), |
| 1345 0, 2); |
| 1346 // Move the result in the double result register. |
| 1347 __ GetCFunctionDoubleResult(result); |
| 1348 |
| 1349 // Restore saved register. |
| 1350 __ MultiPop(saved_regs); |
| 1351 break; |
| 1352 } |
| 1353 default: |
| 1354 UNREACHABLE(); |
| 1355 break; |
| 1356 } |
| 1357 } |
| 1358 |
| 1359 |
| 1360 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 1361 ASSERT(ToRegister(instr->InputAt(0)).is(a1)); |
| 1362 ASSERT(ToRegister(instr->InputAt(1)).is(a0)); |
| 1363 ASSERT(ToRegister(instr->result()).is(v0)); |
| 1364 |
| 1365 BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
| 1366 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 1367 // Other arch use a nop here, to signal that there is no inlined |
| 1368 // patchable code. Mips does not need the nop, since our marker |
| 1369 // instruction (andi zero_reg) will never be used in normal code. |
| 1370 } |
| 1371 |
| 1372 |
| 1373 int LCodeGen::GetNextEmittedBlock(int block) { |
| 1374 for (int i = block + 1; i < graph()->blocks()->length(); ++i) { |
| 1375 LLabel* label = chunk_->GetLabel(i); |
| 1376 if (!label->HasReplacement()) return i; |
| 1377 } |
| 1378 return -1; |
| 1379 } |
| 1380 |
| 1381 |
| 1382 void LCodeGen::EmitBranch(int left_block, int right_block, |
| 1383 Condition cc, Register src1, const Operand& src2) { |
| 1384 int next_block = GetNextEmittedBlock(current_block_); |
| 1385 right_block = chunk_->LookupDestination(right_block); |
| 1386 left_block = chunk_->LookupDestination(left_block); |
| 1387 if (right_block == left_block) { |
| 1388 EmitGoto(left_block); |
| 1389 } else if (left_block == next_block) { |
| 1390 __ Branch(chunk_->GetAssemblyLabel(right_block), |
| 1391 NegateCondition(cc), src1, src2); |
| 1392 } else if (right_block == next_block) { |
| 1393 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2); |
| 1394 } else { |
| 1395 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2); |
| 1396 __ Branch(chunk_->GetAssemblyLabel(right_block)); |
| 1397 } |
| 1398 } |
| 1399 |
| 1400 |
| 1401 void LCodeGen::EmitBranchF(int left_block, int right_block, |
| 1402 Condition cc, FPURegister src1, FPURegister src2) { |
| 1403 int next_block = GetNextEmittedBlock(current_block_); |
| 1404 right_block = chunk_->LookupDestination(right_block); |
| 1405 left_block = chunk_->LookupDestination(left_block); |
| 1406 if (right_block == left_block) { |
| 1407 EmitGoto(left_block); |
| 1408 } else if (left_block == next_block) { |
| 1409 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, |
| 1410 NegateCondition(cc), src1, src2); |
| 1411 } else if (right_block == next_block) { |
| 1412 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2); |
| 1413 } else { |
| 1414 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2); |
| 1415 __ Branch(chunk_->GetAssemblyLabel(right_block)); |
| 1416 } |
| 1417 } |
| 1418 |
| 1419 |
| 1420 void LCodeGen::DoBranch(LBranch* instr) { |
| 1421 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1422 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1423 |
| 1424 Representation r = instr->hydrogen()->value()->representation(); |
| 1425 if (r.IsInteger32()) { |
| 1426 Register reg = ToRegister(instr->InputAt(0)); |
| 1427 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); |
| 1428 } else if (r.IsDouble()) { |
| 1429 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); |
| 1430 // Test the double value. Zero and NaN are false. |
| 1431 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); |
| 1432 } else { |
| 1433 ASSERT(r.IsTagged()); |
| 1434 Register reg = ToRegister(instr->InputAt(0)); |
| 1435 HType type = instr->hydrogen()->value()->type(); |
| 1436 if (type.IsBoolean()) { |
| 1437 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 1438 EmitBranch(true_block, false_block, eq, reg, Operand(at)); |
| 1439 } else if (type.IsSmi()) { |
| 1440 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); |
| 1441 } else { |
| 1442 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1443 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1444 |
| 1445 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
| 1446 // Avoid deopts in the case where we've never executed this path before. |
| 1447 if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); |
| 1448 |
| 1449 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
| 1450 // undefined -> false. |
| 1451 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 1452 __ Branch(false_label, eq, reg, Operand(at)); |
| 1453 } |
| 1454 if (expected.Contains(ToBooleanStub::BOOLEAN)) { |
| 1455 // Boolean -> its value. |
| 1456 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 1457 __ Branch(true_label, eq, reg, Operand(at)); |
| 1458 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 1459 __ Branch(false_label, eq, reg, Operand(at)); |
| 1460 } |
| 1461 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { |
| 1462 // 'null' -> false. |
| 1463 __ LoadRoot(at, Heap::kNullValueRootIndex); |
| 1464 __ Branch(false_label, eq, reg, Operand(at)); |
| 1465 } |
| 1466 |
| 1467 if (expected.Contains(ToBooleanStub::SMI)) { |
| 1468 // Smis: 0 -> false, all other -> true. |
| 1469 __ Branch(false_label, eq, reg, Operand(zero_reg)); |
| 1470 __ JumpIfSmi(reg, true_label); |
| 1471 } else if (expected.NeedsMap()) { |
| 1472 // If we need a map later and have a Smi -> deopt. |
| 1473 __ And(at, reg, Operand(kSmiTagMask)); |
| 1474 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); |
| 1475 } |
| 1476 |
| 1477 const Register map = scratch0(); |
| 1478 if (expected.NeedsMap()) { |
| 1479 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1480 if (expected.CanBeUndetectable()) { |
| 1481 // Undetectable -> false. |
| 1482 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 1483 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 1484 __ Branch(false_label, ne, at, Operand(zero_reg)); |
| 1485 } |
| 1486 } |
| 1487 |
| 1488 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { |
| 1489 // spec object -> true. |
| 1490 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1491 __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1492 } |
| 1493 |
| 1494 if (expected.Contains(ToBooleanStub::STRING)) { |
| 1495 // String value -> false iff empty. |
| 1496 Label not_string; |
| 1497 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1498 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); |
| 1499 __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); |
| 1500 __ Branch(true_label, ne, at, Operand(zero_reg)); |
| 1501 __ Branch(false_label); |
| 1502 __ bind(¬_string); |
| 1503 } |
| 1504 |
| 1505 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
| 1506 // heap number -> false iff +0, -0, or NaN. |
| 1507 DoubleRegister dbl_scratch = double_scratch0(); |
| 1508 Label not_heap_number; |
| 1509 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 1510 __ Branch(¬_heap_number, ne, map, Operand(at)); |
| 1511 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
| 1512 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero); |
| 1513 // Falls through if dbl_scratch == 0. |
| 1514 __ Branch(false_label); |
| 1515 __ bind(¬_heap_number); |
| 1516 } |
| 1517 |
| 1518 // We've seen something for the first time -> deopt. |
| 1519 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); |
| 1520 } |
| 1521 } |
| 1522 } |
| 1523 |
| 1524 |
| 1525 void LCodeGen::EmitGoto(int block) { |
| 1526 block = chunk_->LookupDestination(block); |
| 1527 int next_block = GetNextEmittedBlock(current_block_); |
| 1528 if (block != next_block) { |
| 1529 __ jmp(chunk_->GetAssemblyLabel(block)); |
| 1530 } |
| 1531 } |
| 1532 |
| 1533 |
| 1534 void LCodeGen::DoGoto(LGoto* instr) { |
| 1535 EmitGoto(instr->block_id()); |
| 1536 } |
| 1537 |
| 1538 |
| 1539 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
| 1540 Condition cond = kNoCondition; |
| 1541 switch (op) { |
| 1542 case Token::EQ: |
| 1543 case Token::EQ_STRICT: |
| 1544 cond = eq; |
| 1545 break; |
| 1546 case Token::LT: |
| 1547 cond = is_unsigned ? lo : lt; |
| 1548 break; |
| 1549 case Token::GT: |
| 1550 cond = is_unsigned ? hi : gt; |
| 1551 break; |
| 1552 case Token::LTE: |
| 1553 cond = is_unsigned ? ls : le; |
| 1554 break; |
| 1555 case Token::GTE: |
| 1556 cond = is_unsigned ? hs : ge; |
| 1557 break; |
| 1558 case Token::IN: |
| 1559 case Token::INSTANCEOF: |
| 1560 default: |
| 1561 UNREACHABLE(); |
| 1562 } |
| 1563 return cond; |
| 1564 } |
| 1565 |
| 1566 |
| 1567 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { |
| 1568 // This function must never be called for Mips. |
| 1569 // It is just a compare, it should be generated inline as |
| 1570 // part of the branch that uses it. It should always remain |
| 1571 // as un-implemented function. |
| 1572 // arm: __ cmp(ToRegister(left), ToRegister(right)); |
| 1573 Abort("Unimplemented: %s (line %d)", __func__, __LINE__); |
| 1574 } |
| 1575 |
| 1576 |
| 1577 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { |
| 1578 LOperand* left = instr->InputAt(0); |
| 1579 LOperand* right = instr->InputAt(1); |
| 1580 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1581 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1582 |
| 1583 Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
| 1584 |
| 1585 if (instr->is_double()) { |
| 1586 // Compare left and right as doubles and load the |
| 1587 // resulting flags into the normal status register. |
| 1588 FPURegister left_reg = ToDoubleRegister(left); |
| 1589 FPURegister right_reg = ToDoubleRegister(right); |
| 1590 |
| 1591 // If a NaN is involved, i.e. the result is unordered, |
| 1592 // jump to false block label. |
| 1593 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq, |
| 1594 left_reg, right_reg); |
| 1595 |
| 1596 EmitBranchF(true_block, false_block, cc, left_reg, right_reg); |
| 1597 } else { |
| 1598 // EmitCmpI cannot be used on MIPS. |
| 1599 // EmitCmpI(left, right); |
| 1600 EmitBranch(true_block, |
| 1601 false_block, |
| 1602 cc, |
| 1603 ToRegister(left), |
| 1604 Operand(ToRegister(right))); |
| 1605 } |
| 1606 } |
| 1607 |
| 1608 |
| 1609 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { |
| 1610 Register left = ToRegister(instr->InputAt(0)); |
| 1611 Register right = ToRegister(instr->InputAt(1)); |
| 1612 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1613 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1614 |
| 1615 EmitBranch(true_block, false_block, eq, left, Operand(right)); |
| 1616 } |
| 1617 |
| 1618 |
| 1619 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { |
| 1620 Register left = ToRegister(instr->InputAt(0)); |
| 1621 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1622 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1623 |
| 1624 EmitBranch(true_block, false_block, eq, left, |
| 1625 Operand(instr->hydrogen()->right())); |
| 1626 } |
| 1627 |
| 1628 |
| 1629 |
| 1630 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { |
| 1631 Register scratch = scratch0(); |
| 1632 Register reg = ToRegister(instr->InputAt(0)); |
| 1633 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1634 |
| 1635 // If the expression is known to be untagged or a smi, then it's definitely |
| 1636 // not null, and it can't be a an undetectable object. |
| 1637 if (instr->hydrogen()->representation().IsSpecialization() || |
| 1638 instr->hydrogen()->type().IsSmi()) { |
| 1639 EmitGoto(false_block); |
| 1640 return; |
| 1641 } |
| 1642 |
| 1643 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1644 |
| 1645 Heap::RootListIndex nil_value = instr->nil() == kNullValue ? |
| 1646 Heap::kNullValueRootIndex : |
| 1647 Heap::kUndefinedValueRootIndex; |
| 1648 __ LoadRoot(at, nil_value); |
| 1649 if (instr->kind() == kStrictEquality) { |
| 1650 EmitBranch(true_block, false_block, eq, reg, Operand(at)); |
| 1651 } else { |
| 1652 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? |
| 1653 Heap::kUndefinedValueRootIndex : |
| 1654 Heap::kNullValueRootIndex; |
| 1655 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1656 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1657 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at)); |
| 1658 __ LoadRoot(at, other_nil_value); // In the delay slot. |
| 1659 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at)); |
| 1660 __ JumpIfSmi(reg, false_label); // In the delay slot. |
| 1661 // Check for undetectable objects by looking in the bit field in |
| 1662 // the map. The object has already been smi checked. |
| 1663 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1664 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
| 1665 __ And(scratch, scratch, 1 << Map::kIsUndetectable); |
| 1666 EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg)); |
| 1667 } |
| 1668 } |
| 1669 |
| 1670 |
| 1671 Condition LCodeGen::EmitIsObject(Register input, |
| 1672 Register temp1, |
| 1673 Label* is_not_object, |
| 1674 Label* is_object) { |
| 1675 Register temp2 = scratch0(); |
| 1676 __ JumpIfSmi(input, is_not_object); |
| 1677 |
| 1678 __ LoadRoot(temp2, Heap::kNullValueRootIndex); |
| 1679 __ Branch(is_object, eq, input, Operand(temp2)); |
| 1680 |
| 1681 // Load map. |
| 1682 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 1683 // Undetectable objects behave like undefined. |
| 1684 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); |
| 1685 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable)); |
| 1686 __ Branch(is_not_object, ne, temp2, Operand(zero_reg)); |
| 1687 |
| 1688 // Load instance type and check that it is in object type range. |
| 1689 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); |
| 1690 __ Branch(is_not_object, |
| 1691 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1692 |
| 1693 return le; |
| 1694 } |
| 1695 |
| 1696 |
| 1697 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { |
| 1698 Register reg = ToRegister(instr->InputAt(0)); |
| 1699 Register temp1 = ToRegister(instr->TempAt(0)); |
| 1700 Register temp2 = scratch0(); |
| 1701 |
| 1702 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1703 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1704 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1705 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1706 |
| 1707 Condition true_cond = |
| 1708 EmitIsObject(reg, temp1, false_label, true_label); |
| 1709 |
| 1710 EmitBranch(true_block, false_block, true_cond, temp2, |
| 1711 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1712 } |
| 1713 |
| 1714 |
| 1715 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { |
| 1716 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1717 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1718 |
| 1719 Register input_reg = EmitLoadRegister(instr->InputAt(0), at); |
| 1720 __ And(at, input_reg, kSmiTagMask); |
| 1721 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg)); |
| 1722 } |
| 1723 |
| 1724 |
| 1725 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { |
| 1726 Register input = ToRegister(instr->InputAt(0)); |
| 1727 Register temp = ToRegister(instr->TempAt(0)); |
| 1728 |
| 1729 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1730 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1731 |
| 1732 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block)); |
| 1733 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 1734 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
| 1735 __ And(at, temp, Operand(1 << Map::kIsUndetectable)); |
| 1736 EmitBranch(true_block, false_block, ne, at, Operand(zero_reg)); |
| 1737 } |
| 1738 |
| 1739 |
| 1740 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
| 1741 InstanceType from = instr->from(); |
| 1742 InstanceType to = instr->to(); |
| 1743 if (from == FIRST_TYPE) return to; |
| 1744 ASSERT(from == to || to == LAST_TYPE); |
| 1745 return from; |
| 1746 } |
| 1747 |
| 1748 |
| 1749 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
| 1750 InstanceType from = instr->from(); |
| 1751 InstanceType to = instr->to(); |
| 1752 if (from == to) return eq; |
| 1753 if (to == LAST_TYPE) return hs; |
| 1754 if (from == FIRST_TYPE) return ls; |
| 1755 UNREACHABLE(); |
| 1756 return eq; |
| 1757 } |
| 1758 |
| 1759 |
| 1760 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { |
| 1761 Register scratch = scratch0(); |
| 1762 Register input = ToRegister(instr->InputAt(0)); |
| 1763 |
| 1764 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1765 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1766 |
| 1767 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1768 |
| 1769 __ JumpIfSmi(input, false_label); |
| 1770 |
| 1771 __ GetObjectType(input, scratch, scratch); |
| 1772 EmitBranch(true_block, |
| 1773 false_block, |
| 1774 BranchCondition(instr->hydrogen()), |
| 1775 scratch, |
| 1776 Operand(TestType(instr->hydrogen()))); |
| 1777 } |
| 1778 |
| 1779 |
| 1780 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
| 1781 Register input = ToRegister(instr->InputAt(0)); |
| 1782 Register result = ToRegister(instr->result()); |
| 1783 |
| 1784 if (FLAG_debug_code) { |
| 1785 __ AbortIfNotString(input); |
| 1786 } |
| 1787 |
| 1788 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset)); |
| 1789 __ IndexFromHash(result, result); |
| 1790 } |
| 1791 |
| 1792 |
| 1793 void LCodeGen::DoHasCachedArrayIndexAndBranch( |
| 1794 LHasCachedArrayIndexAndBranch* instr) { |
| 1795 Register input = ToRegister(instr->InputAt(0)); |
| 1796 Register scratch = scratch0(); |
| 1797 |
| 1798 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1799 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1800 |
| 1801 __ lw(scratch, |
| 1802 FieldMemOperand(input, String::kHashFieldOffset)); |
| 1803 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask)); |
| 1804 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg)); |
| 1805 } |
| 1806 |
| 1807 |
| 1808 // Branches to a label or falls through with this instance class-name adr |
| 1809 // returned in temp reg, available for comparison by the caller. Trashes the |
| 1810 // temp registers, but not the input. Only input and temp2 may alias. |
| 1811 void LCodeGen::EmitClassOfTest(Label* is_true, |
| 1812 Label* is_false, |
| 1813 Handle<String>class_name, |
| 1814 Register input, |
| 1815 Register temp, |
| 1816 Register temp2) { |
| 1817 ASSERT(!input.is(temp)); |
| 1818 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. |
| 1819 __ JumpIfSmi(input, is_false); |
| 1820 |
| 1821 if (class_name->IsEqualTo(CStrVector("Function"))) { |
| 1822 // Assuming the following assertions, we can use the same compares to test |
| 1823 // for both being a function type and being in the object type range. |
| 1824 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| 1825 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| 1826 FIRST_SPEC_OBJECT_TYPE + 1); |
| 1827 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| 1828 LAST_SPEC_OBJECT_TYPE - 1); |
| 1829 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 1830 |
| 1831 __ GetObjectType(input, temp, temp2); |
| 1832 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1833 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1834 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE)); |
| 1835 } else { |
| 1836 // Faster code path to avoid two compares: subtract lower bound from the |
| 1837 // actual type and do a signed compare with the width of the type range. |
| 1838 __ GetObjectType(input, temp, temp2); |
| 1839 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1840 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - |
| 1841 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1842 } |
| 1843 |
| 1844 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. |
| 1845 // Check if the constructor in the map is a function. |
| 1846 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset)); |
| 1847 |
| 1848 // Objects with a non-function constructor have class 'Object'. |
| 1849 __ GetObjectType(temp, temp2, temp2); |
| 1850 if (class_name->IsEqualTo(CStrVector("Object"))) { |
| 1851 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE)); |
| 1852 } else { |
| 1853 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE)); |
| 1854 } |
| 1855 |
| 1856 // temp now contains the constructor function. Grab the |
| 1857 // instance class name from there. |
| 1858 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); |
| 1859 __ lw(temp, FieldMemOperand(temp, |
| 1860 SharedFunctionInfo::kInstanceClassNameOffset)); |
| 1861 // The class name we are testing against is a symbol because it's a literal. |
| 1862 // The name in the constructor is a symbol because of the way the context is |
| 1863 // booted. This routine isn't expected to work for random API-created |
| 1864 // classes and it doesn't have to because you can't access it with natives |
| 1865 // syntax. Since both sides are symbols it is sufficient to use an identity |
| 1866 // comparison. |
| 1867 |
| 1868 // End with the address of this class_name instance in temp register. |
| 1869 // On MIPS, the caller must do the comparison with Handle<String>class_name. |
| 1870 } |
| 1871 |
| 1872 |
| 1873 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
| 1874 Register input = ToRegister(instr->InputAt(0)); |
| 1875 Register temp = scratch0(); |
| 1876 Register temp2 = ToRegister(instr->TempAt(0)); |
| 1877 Handle<String> class_name = instr->hydrogen()->class_name(); |
| 1878 |
| 1879 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1880 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1881 |
| 1882 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1883 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1884 |
| 1885 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); |
| 1886 |
| 1887 EmitBranch(true_block, false_block, eq, temp, Operand(class_name)); |
| 1888 } |
| 1889 |
| 1890 |
| 1891 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
| 1892 Register reg = ToRegister(instr->InputAt(0)); |
| 1893 Register temp = ToRegister(instr->TempAt(0)); |
| 1894 int true_block = instr->true_block_id(); |
| 1895 int false_block = instr->false_block_id(); |
| 1896 |
| 1897 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1898 EmitBranch(true_block, false_block, eq, temp, Operand(instr->map())); |
| 1899 } |
| 1900 |
| 1901 |
| 1902 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
| 1903 Label true_label, done; |
| 1904 ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0. |
| 1905 ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1. |
| 1906 Register result = ToRegister(instr->result()); |
| 1907 ASSERT(result.is(v0)); |
| 1908 |
| 1909 InstanceofStub stub(InstanceofStub::kArgsInRegisters); |
| 1910 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 1911 |
| 1912 __ Branch(&true_label, eq, result, Operand(zero_reg)); |
| 1913 __ li(result, Operand(factory()->false_value())); |
| 1914 __ Branch(&done); |
| 1915 __ bind(&true_label); |
| 1916 __ li(result, Operand(factory()->true_value())); |
| 1917 __ bind(&done); |
| 1918 } |
| 1919 |
| 1920 |
| 1921 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| 1922 class DeferredInstanceOfKnownGlobal: public LDeferredCode { |
| 1923 public: |
| 1924 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
| 1925 LInstanceOfKnownGlobal* instr) |
| 1926 : LDeferredCode(codegen), instr_(instr) { } |
| 1927 virtual void Generate() { |
| 1928 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); |
| 1929 } |
| 1930 virtual LInstruction* instr() { return instr_; } |
| 1931 Label* map_check() { return &map_check_; } |
| 1932 |
| 1933 private: |
| 1934 LInstanceOfKnownGlobal* instr_; |
| 1935 Label map_check_; |
| 1936 }; |
| 1937 |
| 1938 DeferredInstanceOfKnownGlobal* deferred; |
| 1939 deferred = new DeferredInstanceOfKnownGlobal(this, instr); |
| 1940 |
| 1941 Label done, false_result; |
| 1942 Register object = ToRegister(instr->InputAt(0)); |
| 1943 Register temp = ToRegister(instr->TempAt(0)); |
| 1944 Register result = ToRegister(instr->result()); |
| 1945 |
| 1946 ASSERT(object.is(a0)); |
| 1947 ASSERT(result.is(v0)); |
| 1948 |
| 1949 // A Smi is not instance of anything. |
| 1950 __ JumpIfSmi(object, &false_result); |
| 1951 |
| 1952 // This is the inlined call site instanceof cache. The two occurences of the |
| 1953 // hole value will be patched to the last map/result pair generated by the |
| 1954 // instanceof stub. |
| 1955 Label cache_miss; |
| 1956 Register map = temp; |
| 1957 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1958 |
| 1959 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 1960 __ bind(deferred->map_check()); // Label for calculating code patching. |
| 1961 // We use Factory::the_hole_value() on purpose instead of loading from the |
| 1962 // root array to force relocation to be able to later patch with |
| 1963 // the cached map. |
| 1964 __ li(at, Operand(factory()->the_hole_value()), true); |
| 1965 __ Branch(&cache_miss, ne, map, Operand(at)); |
| 1966 // We use Factory::the_hole_value() on purpose instead of loading from the |
| 1967 // root array to force relocation to be able to later patch |
| 1968 // with true or false. |
| 1969 __ li(result, Operand(factory()->the_hole_value()), true); |
| 1970 __ Branch(&done); |
| 1971 |
| 1972 // The inlined call site cache did not match. Check null and string before |
| 1973 // calling the deferred code. |
| 1974 __ bind(&cache_miss); |
| 1975 // Null is not instance of anything. |
| 1976 __ LoadRoot(temp, Heap::kNullValueRootIndex); |
| 1977 __ Branch(&false_result, eq, object, Operand(temp)); |
| 1978 |
| 1979 // String values is not instance of anything. |
| 1980 Condition cc = __ IsObjectStringType(object, temp, temp); |
| 1981 __ Branch(&false_result, cc, temp, Operand(zero_reg)); |
| 1982 |
| 1983 // Go to the deferred code. |
| 1984 __ Branch(deferred->entry()); |
| 1985 |
| 1986 __ bind(&false_result); |
| 1987 __ LoadRoot(result, Heap::kFalseValueRootIndex); |
| 1988 |
| 1989 // Here result has either true or false. Deferred code also produces true or |
| 1990 // false object. |
| 1991 __ bind(deferred->exit()); |
| 1992 __ bind(&done); |
| 1993 } |
| 1994 |
| 1995 |
| 1996 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
| 1997 Label* map_check) { |
| 1998 Register result = ToRegister(instr->result()); |
| 1999 ASSERT(result.is(v0)); |
| 2000 |
| 2001 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; |
| 2002 flags = static_cast<InstanceofStub::Flags>( |
| 2003 flags | InstanceofStub::kArgsInRegisters); |
| 2004 flags = static_cast<InstanceofStub::Flags>( |
| 2005 flags | InstanceofStub::kCallSiteInlineCheck); |
| 2006 flags = static_cast<InstanceofStub::Flags>( |
| 2007 flags | InstanceofStub::kReturnTrueFalseObject); |
| 2008 InstanceofStub stub(flags); |
| 2009 |
| 2010 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 2011 |
| 2012 // Get the temp register reserved by the instruction. This needs to be t0 as |
| 2013 // its slot of the pushing of safepoint registers is used to communicate the |
| 2014 // offset to the location of the map check. |
| 2015 Register temp = ToRegister(instr->TempAt(0)); |
| 2016 ASSERT(temp.is(t0)); |
| 2017 __ li(InstanceofStub::right(), Operand(instr->function())); |
| 2018 static const int kAdditionalDelta = 7; |
| 2019 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; |
| 2020 Label before_push_delta; |
| 2021 __ bind(&before_push_delta); |
| 2022 { |
| 2023 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 2024 __ li(temp, Operand(delta * kPointerSize), true); |
| 2025 __ StoreToSafepointRegisterSlot(temp, temp); |
| 2026 } |
| 2027 CallCodeGeneric(stub.GetCode(), |
| 2028 RelocInfo::CODE_TARGET, |
| 2029 instr, |
| 2030 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 2031 // Put the result value into the result register slot and |
| 2032 // restore all registers. |
| 2033 __ StoreToSafepointRegisterSlot(result, result); |
| 2034 } |
| 2035 |
| 2036 |
| 2037 static Condition ComputeCompareCondition(Token::Value op) { |
| 2038 switch (op) { |
| 2039 case Token::EQ_STRICT: |
| 2040 case Token::EQ: |
| 2041 return eq; |
| 2042 case Token::LT: |
| 2043 return lt; |
| 2044 case Token::GT: |
| 2045 return gt; |
| 2046 case Token::LTE: |
| 2047 return le; |
| 2048 case Token::GTE: |
| 2049 return ge; |
| 2050 default: |
| 2051 UNREACHABLE(); |
| 2052 return kNoCondition; |
| 2053 } |
| 2054 } |
| 2055 |
| 2056 |
| 2057 void LCodeGen::DoCmpT(LCmpT* instr) { |
| 2058 Token::Value op = instr->op(); |
| 2059 |
| 2060 Handle<Code> ic = CompareIC::GetUninitialized(op); |
| 2061 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2062 // On MIPS there is no need for a "no inlined smi code" marker (nop). |
| 2063 |
| 2064 Condition condition = ComputeCompareCondition(op); |
| 2065 if (op == Token::GT || op == Token::LTE) { |
| 2066 condition = ReverseCondition(condition); |
| 2067 } |
| 2068 // A minor optimization that relies on LoadRoot always emitting one |
| 2069 // instruction. |
| 2070 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
| 2071 Label done; |
| 2072 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); |
| 2073 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
| 2074 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
| 2075 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done)); |
| 2076 __ bind(&done); |
| 2077 } |
| 2078 |
| 2079 |
| 2080 void LCodeGen::DoReturn(LReturn* instr) { |
| 2081 if (FLAG_trace) { |
| 2082 // Push the return value on the stack as the parameter. |
| 2083 // Runtime::TraceExit returns its parameter in v0. |
| 2084 __ push(v0); |
| 2085 __ CallRuntime(Runtime::kTraceExit, 1); |
| 2086 } |
| 2087 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
| 2088 __ mov(sp, fp); |
| 2089 __ Pop(ra, fp); |
| 2090 __ Addu(sp, sp, Operand(sp_delta)); |
| 2091 __ Jump(ra); |
| 2092 } |
| 2093 |
| 2094 |
| 2095 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2096 Register result = ToRegister(instr->result()); |
| 2097 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
| 2098 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset)); |
| 2099 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2100 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2101 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); |
| 2102 } |
| 2103 } |
| 2104 |
| 2105 |
| 2106 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
| 2107 ASSERT(ToRegister(instr->global_object()).is(a0)); |
| 2108 ASSERT(ToRegister(instr->result()).is(v0)); |
| 2109 |
| 2110 __ li(a2, Operand(instr->name())); |
| 2111 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET |
| 2112 : RelocInfo::CODE_TARGET_CONTEXT; |
| 2113 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| 2114 CallCode(ic, mode, instr); |
| 2115 } |
| 2116 |
| 2117 |
| 2118 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
| 2119 Register value = ToRegister(instr->InputAt(0)); |
| 2120 Register scratch = scratch0(); |
| 2121 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 2122 |
| 2123 // Load the cell. |
| 2124 __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
| 2125 |
| 2126 // If the cell we are storing to contains the hole it could have |
| 2127 // been deleted from the property dictionary. In that case, we need |
| 2128 // to update the property details in the property dictionary to mark |
| 2129 // it as no longer deleted. |
| 2130 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2131 __ lw(scratch2, |
| 2132 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); |
| 2133 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2134 DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at)); |
| 2135 } |
| 2136 |
| 2137 // Store the value. |
| 2138 __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); |
| 2139 |
| 2140 // Cells are always in the remembered set. |
| 2141 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2142 HType type = instr->hydrogen()->value()->type(); |
| 2143 SmiCheck check_needed = |
| 2144 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 2145 __ RecordWriteField(scratch, |
| 2146 JSGlobalPropertyCell::kValueOffset, |
| 2147 value, |
| 2148 scratch2, |
| 2149 kRAHasBeenSaved, |
| 2150 kSaveFPRegs, |
| 2151 OMIT_REMEMBERED_SET, |
| 2152 check_needed); |
| 2153 } |
| 2154 } |
| 2155 |
| 2156 |
| 2157 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { |
| 2158 ASSERT(ToRegister(instr->global_object()).is(a1)); |
| 2159 ASSERT(ToRegister(instr->value()).is(a0)); |
| 2160 |
| 2161 __ li(a2, Operand(instr->name())); |
| 2162 Handle<Code> ic = instr->strict_mode() |
| 2163 ? isolate()->builtins()->StoreIC_Initialize_Strict() |
| 2164 : isolate()->builtins()->StoreIC_Initialize(); |
| 2165 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); |
| 2166 } |
| 2167 |
| 2168 |
| 2169 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2170 Register context = ToRegister(instr->context()); |
| 2171 Register result = ToRegister(instr->result()); |
| 2172 __ lw(result, ContextOperand(context, instr->slot_index())); |
| 2173 } |
| 2174 |
| 2175 |
| 2176 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2177 Register context = ToRegister(instr->context()); |
| 2178 Register value = ToRegister(instr->value()); |
| 2179 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 2180 __ sw(value, target); |
| 2181 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2182 HType type = instr->hydrogen()->value()->type(); |
| 2183 SmiCheck check_needed = |
| 2184 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 2185 __ RecordWriteContextSlot(context, |
| 2186 target.offset(), |
| 2187 value, |
| 2188 scratch0(), |
| 2189 kRAHasBeenSaved, |
| 2190 kSaveFPRegs, |
| 2191 EMIT_REMEMBERED_SET, |
| 2192 check_needed); |
| 2193 } |
| 2194 } |
| 2195 |
| 2196 |
| 2197 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
| 2198 Register object = ToRegister(instr->InputAt(0)); |
| 2199 Register result = ToRegister(instr->result()); |
| 2200 if (instr->hydrogen()->is_in_object()) { |
| 2201 __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset())); |
| 2202 } else { |
| 2203 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 2204 __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset())); |
| 2205 } |
| 2206 } |
| 2207 |
| 2208 |
| 2209 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
| 2210 Register object, |
| 2211 Handle<Map> type, |
| 2212 Handle<String> name) { |
| 2213 LookupResult lookup; |
| 2214 type->LookupInDescriptors(NULL, *name, &lookup); |
| 2215 ASSERT(lookup.IsProperty() && |
| 2216 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); |
| 2217 if (lookup.type() == FIELD) { |
| 2218 int index = lookup.GetLocalFieldIndexFromMap(*type); |
| 2219 int offset = index * kPointerSize; |
| 2220 if (index < 0) { |
| 2221 // Negative property indices are in-object properties, indexed |
| 2222 // from the end of the fixed part of the object. |
| 2223 __ lw(result, FieldMemOperand(object, offset + type->instance_size())); |
| 2224 } else { |
| 2225 // Non-negative property indices are in the properties array. |
| 2226 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 2227 __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); |
| 2228 } |
| 2229 } else { |
| 2230 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); |
| 2231 LoadHeapObject(result, Handle<HeapObject>::cast(function)); |
| 2232 } |
| 2233 } |
| 2234 |
| 2235 |
| 2236 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { |
| 2237 Register object = ToRegister(instr->object()); |
| 2238 Register result = ToRegister(instr->result()); |
| 2239 Register scratch = scratch0(); |
| 2240 int map_count = instr->hydrogen()->types()->length(); |
| 2241 Handle<String> name = instr->hydrogen()->name(); |
| 2242 if (map_count == 0) { |
| 2243 ASSERT(instr->hydrogen()->need_generic()); |
| 2244 __ li(a2, Operand(name)); |
| 2245 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| 2246 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2247 } else { |
| 2248 Label done; |
| 2249 __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2250 for (int i = 0; i < map_count - 1; ++i) { |
| 2251 Handle<Map> map = instr->hydrogen()->types()->at(i); |
| 2252 Label next; |
| 2253 __ Branch(&next, ne, scratch, Operand(map)); |
| 2254 EmitLoadFieldOrConstantFunction(result, object, map, name); |
| 2255 __ Branch(&done); |
| 2256 __ bind(&next); |
| 2257 } |
| 2258 Handle<Map> map = instr->hydrogen()->types()->last(); |
| 2259 if (instr->hydrogen()->need_generic()) { |
| 2260 Label generic; |
| 2261 __ Branch(&generic, ne, scratch, Operand(map)); |
| 2262 EmitLoadFieldOrConstantFunction(result, object, map, name); |
| 2263 __ Branch(&done); |
| 2264 __ bind(&generic); |
| 2265 __ li(a2, Operand(name)); |
| 2266 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| 2267 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2268 } else { |
| 2269 DeoptimizeIf(ne, instr->environment(), scratch, Operand(map)); |
| 2270 EmitLoadFieldOrConstantFunction(result, object, map, name); |
| 2271 } |
| 2272 __ bind(&done); |
| 2273 } |
| 2274 } |
| 2275 |
| 2276 |
| 2277 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
| 2278 ASSERT(ToRegister(instr->object()).is(a0)); |
| 2279 ASSERT(ToRegister(instr->result()).is(v0)); |
| 2280 |
| 2281 // Name is always in a2. |
| 2282 __ li(a2, Operand(instr->name())); |
| 2283 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| 2284 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2285 } |
| 2286 |
| 2287 |
| 2288 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
| 2289 Register scratch = scratch0(); |
| 2290 Register function = ToRegister(instr->function()); |
| 2291 Register result = ToRegister(instr->result()); |
| 2292 |
| 2293 // Check that the function really is a function. Load map into the |
| 2294 // result register. |
| 2295 __ GetObjectType(function, result, scratch); |
| 2296 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE)); |
| 2297 |
| 2298 // Make sure that the function has an instance prototype. |
| 2299 Label non_instance; |
| 2300 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 2301 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
| 2302 __ Branch(&non_instance, ne, scratch, Operand(zero_reg)); |
| 2303 |
| 2304 // Get the prototype or initial map from the function. |
| 2305 __ lw(result, |
| 2306 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2307 |
| 2308 // Check that the function has a prototype or an initial map. |
| 2309 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2310 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); |
| 2311 |
| 2312 // If the function does not have an initial map, we're done. |
| 2313 Label done; |
| 2314 __ GetObjectType(result, scratch, scratch); |
| 2315 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
| 2316 |
| 2317 // Get the prototype from the initial map. |
| 2318 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 2319 __ Branch(&done); |
| 2320 |
| 2321 // Non-instance prototype: Fetch prototype from constructor field |
| 2322 // in initial map. |
| 2323 __ bind(&non_instance); |
| 2324 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 2325 |
| 2326 // All done. |
| 2327 __ bind(&done); |
| 2328 } |
| 2329 |
| 2330 |
| 2331 void LCodeGen::DoLoadElements(LLoadElements* instr) { |
| 2332 Register result = ToRegister(instr->result()); |
| 2333 Register input = ToRegister(instr->InputAt(0)); |
| 2334 Register scratch = scratch0(); |
| 2335 |
| 2336 __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset)); |
| 2337 if (FLAG_debug_code) { |
| 2338 Label done, fail; |
| 2339 __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset)); |
| 2340 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); |
| 2341 __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at)); |
| 2342 __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot. |
| 2343 __ Branch(&done, eq, scratch, Operand(at)); |
| 2344 // |scratch| still contains |input|'s map. |
| 2345 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); |
| 2346 __ Ext(scratch, scratch, Map::kElementsKindShift, |
| 2347 Map::kElementsKindBitCount); |
| 2348 __ Branch(&done, eq, scratch, |
| 2349 Operand(FAST_ELEMENTS)); |
| 2350 __ Branch(&fail, lt, scratch, |
| 2351 Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
| 2352 __ Branch(&done, le, scratch, |
| 2353 Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); |
| 2354 __ bind(&fail); |
| 2355 __ Abort("Check for fast or external elements failed."); |
| 2356 __ bind(&done); |
| 2357 } |
| 2358 } |
| 2359 |
| 2360 |
| 2361 void LCodeGen::DoLoadExternalArrayPointer( |
| 2362 LLoadExternalArrayPointer* instr) { |
| 2363 Register to_reg = ToRegister(instr->result()); |
| 2364 Register from_reg = ToRegister(instr->InputAt(0)); |
| 2365 __ lw(to_reg, FieldMemOperand(from_reg, |
| 2366 ExternalArray::kExternalPointerOffset)); |
| 2367 } |
| 2368 |
| 2369 |
| 2370 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
| 2371 Register arguments = ToRegister(instr->arguments()); |
| 2372 Register length = ToRegister(instr->length()); |
| 2373 Register index = ToRegister(instr->index()); |
| 2374 Register result = ToRegister(instr->result()); |
| 2375 |
| 2376 // Bailout index is not a valid argument index. Use unsigned check to get |
| 2377 // negative check for free. |
| 2378 |
| 2379 // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(), |
| 2380 // as they do in Arm. It will save us an instruction. |
| 2381 DeoptimizeIf(ls, instr->environment(), length, Operand(index)); |
| 2382 |
| 2383 // There are two words between the frame pointer and the last argument. |
| 2384 // Subtracting from length accounts for one of them, add one more. |
| 2385 __ subu(length, length, index); |
| 2386 __ Addu(length, length, Operand(1)); |
| 2387 __ sll(length, length, kPointerSizeLog2); |
| 2388 __ Addu(at, arguments, Operand(length)); |
| 2389 __ lw(result, MemOperand(at, 0)); |
| 2390 } |
| 2391 |
| 2392 |
| 2393 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { |
| 2394 Register elements = ToRegister(instr->elements()); |
| 2395 Register key = EmitLoadRegister(instr->key(), scratch0()); |
| 2396 Register result = ToRegister(instr->result()); |
| 2397 Register scratch = scratch0(); |
| 2398 |
| 2399 // Load the result. |
| 2400 __ sll(scratch, key, kPointerSizeLog2); // Key indexes words. |
| 2401 __ addu(scratch, elements, scratch); |
| 2402 __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| 2403 |
| 2404 // Check for the hole value. |
| 2405 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2406 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 2407 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch)); |
| 2408 } |
| 2409 } |
| 2410 |
| 2411 |
| 2412 void LCodeGen::DoLoadKeyedFastDoubleElement( |
| 2413 LLoadKeyedFastDoubleElement* instr) { |
| 2414 Register elements = ToRegister(instr->elements()); |
| 2415 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 2416 Register key = no_reg; |
| 2417 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 2418 Register scratch = scratch0(); |
| 2419 |
| 2420 int shift_size = |
| 2421 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
| 2422 int constant_key = 0; |
| 2423 if (key_is_constant) { |
| 2424 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 2425 if (constant_key & 0xF0000000) { |
| 2426 Abort("array index constant value too big."); |
| 2427 } |
| 2428 } else { |
| 2429 key = ToRegister(instr->key()); |
| 2430 } |
| 2431 |
| 2432 if (key_is_constant) { |
| 2433 __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) + |
| 2434 FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 2435 } else { |
| 2436 __ sll(scratch, key, shift_size); |
| 2437 __ Addu(elements, elements, Operand(scratch)); |
| 2438 __ Addu(elements, elements, |
| 2439 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 2440 } |
| 2441 |
| 2442 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
| 2443 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); |
| 2444 |
| 2445 __ ldc1(result, MemOperand(elements)); |
| 2446 } |
| 2447 |
| 2448 |
| 2449 void LCodeGen::DoLoadKeyedSpecializedArrayElement( |
| 2450 LLoadKeyedSpecializedArrayElement* instr) { |
| 2451 Register external_pointer = ToRegister(instr->external_pointer()); |
| 2452 Register key = no_reg; |
| 2453 ElementsKind elements_kind = instr->elements_kind(); |
| 2454 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 2455 int constant_key = 0; |
| 2456 if (key_is_constant) { |
| 2457 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 2458 if (constant_key & 0xF0000000) { |
| 2459 Abort("array index constant value too big."); |
| 2460 } |
| 2461 } else { |
| 2462 key = ToRegister(instr->key()); |
| 2463 } |
| 2464 int shift_size = ElementsKindToShiftSize(elements_kind); |
| 2465 |
| 2466 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
| 2467 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 2468 FPURegister result = ToDoubleRegister(instr->result()); |
| 2469 if (key_is_constant) { |
| 2470 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); |
| 2471 } else { |
| 2472 __ sll(scratch0(), key, shift_size); |
| 2473 __ Addu(scratch0(), scratch0(), external_pointer); |
| 2474 } |
| 2475 |
| 2476 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 2477 __ lwc1(result, MemOperand(scratch0())); |
| 2478 __ cvt_d_s(result, result); |
| 2479 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
| 2480 __ ldc1(result, MemOperand(scratch0())); |
| 2481 } |
| 2482 } else { |
| 2483 Register result = ToRegister(instr->result()); |
| 2484 Register scratch = scratch0(); |
| 2485 MemOperand mem_operand(zero_reg); |
| 2486 if (key_is_constant) { |
| 2487 mem_operand = MemOperand(external_pointer, |
| 2488 constant_key * (1 << shift_size)); |
| 2489 } else { |
| 2490 __ sll(scratch, key, shift_size); |
| 2491 __ Addu(scratch, scratch, external_pointer); |
| 2492 mem_operand = MemOperand(scratch); |
| 2493 } |
| 2494 switch (elements_kind) { |
| 2495 case EXTERNAL_BYTE_ELEMENTS: |
| 2496 __ lb(result, mem_operand); |
| 2497 break; |
| 2498 case EXTERNAL_PIXEL_ELEMENTS: |
| 2499 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 2500 __ lbu(result, mem_operand); |
| 2501 break; |
| 2502 case EXTERNAL_SHORT_ELEMENTS: |
| 2503 __ lh(result, mem_operand); |
| 2504 break; |
| 2505 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 2506 __ lhu(result, mem_operand); |
| 2507 break; |
| 2508 case EXTERNAL_INT_ELEMENTS: |
| 2509 __ lw(result, mem_operand); |
| 2510 break; |
| 2511 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 2512 __ lw(result, mem_operand); |
| 2513 // TODO(danno): we could be more clever here, perhaps having a special |
| 2514 // version of the stub that detects if the overflow case actually |
| 2515 // happens, and generate code that returns a double rather than int. |
| 2516 DeoptimizeIf(Ugreater_equal, instr->environment(), |
| 2517 result, Operand(0x80000000)); |
| 2518 break; |
| 2519 case EXTERNAL_FLOAT_ELEMENTS: |
| 2520 case EXTERNAL_DOUBLE_ELEMENTS: |
| 2521 case FAST_DOUBLE_ELEMENTS: |
| 2522 case FAST_ELEMENTS: |
| 2523 case FAST_SMI_ONLY_ELEMENTS: |
| 2524 case DICTIONARY_ELEMENTS: |
| 2525 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 2526 UNREACHABLE(); |
| 2527 break; |
| 2528 } |
| 2529 } |
| 2530 } |
| 2531 |
| 2532 |
| 2533 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
| 2534 ASSERT(ToRegister(instr->object()).is(a1)); |
| 2535 ASSERT(ToRegister(instr->key()).is(a0)); |
| 2536 |
| 2537 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
| 2538 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2539 } |
| 2540 |
| 2541 |
| 2542 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
| 2543 Register scratch = scratch0(); |
| 2544 Register temp = scratch1(); |
| 2545 Register result = ToRegister(instr->result()); |
| 2546 |
| 2547 // Check if the calling frame is an arguments adaptor frame. |
| 2548 Label done, adapted; |
| 2549 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 2550 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); |
| 2551 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 2552 |
| 2553 // Result is the frame pointer for the frame if not adapted and for the real |
| 2554 // frame below the adaptor frame if adapted. |
| 2555 __ movn(result, fp, temp); // move only if temp is not equal to zero (ne) |
| 2556 __ movz(result, scratch, temp); // move only if temp is equal to zero (eq) |
| 2557 } |
| 2558 |
| 2559 |
| 2560 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { |
| 2561 Register elem = ToRegister(instr->InputAt(0)); |
| 2562 Register result = ToRegister(instr->result()); |
| 2563 |
| 2564 Label done; |
| 2565 |
| 2566 // If no arguments adaptor frame the number of arguments is fixed. |
| 2567 __ Addu(result, zero_reg, Operand(scope()->num_parameters())); |
| 2568 __ Branch(&done, eq, fp, Operand(elem)); |
| 2569 |
| 2570 // Arguments adaptor frame present. Get argument length from there. |
| 2571 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 2572 __ lw(result, |
| 2573 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 2574 __ SmiUntag(result); |
| 2575 |
| 2576 // Argument length is in result register. |
| 2577 __ bind(&done); |
| 2578 } |
| 2579 |
| 2580 |
| 2581 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| 2582 Register receiver = ToRegister(instr->receiver()); |
| 2583 Register function = ToRegister(instr->function()); |
| 2584 Register length = ToRegister(instr->length()); |
| 2585 Register elements = ToRegister(instr->elements()); |
| 2586 Register scratch = scratch0(); |
| 2587 ASSERT(receiver.is(a0)); // Used for parameter count. |
| 2588 ASSERT(function.is(a1)); // Required by InvokeFunction. |
| 2589 ASSERT(ToRegister(instr->result()).is(v0)); |
| 2590 |
| 2591 // If the receiver is null or undefined, we have to pass the global |
| 2592 // object as a receiver to normal functions. Values have to be |
| 2593 // passed unchanged to builtins and strict-mode functions. |
| 2594 Label global_object, receiver_ok; |
| 2595 |
| 2596 // Do not transform the receiver to object for strict mode |
| 2597 // functions. |
| 2598 __ lw(scratch, |
| 2599 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 2600 __ lw(scratch, |
| 2601 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
| 2602 |
| 2603 // Do not transform the receiver to object for builtins. |
| 2604 int32_t strict_mode_function_mask = |
| 2605 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); |
| 2606 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); |
| 2607 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask)); |
| 2608 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg)); |
| 2609 |
| 2610 // Normal function. Replace undefined or null with global receiver. |
| 2611 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 2612 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 2613 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 2614 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 2615 |
| 2616 // Deoptimize if the receiver is not a JS object. |
| 2617 __ And(scratch, receiver, Operand(kSmiTagMask)); |
| 2618 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); |
| 2619 |
| 2620 __ GetObjectType(receiver, scratch, scratch); |
| 2621 DeoptimizeIf(lt, instr->environment(), |
| 2622 scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 2623 __ Branch(&receiver_ok); |
| 2624 |
| 2625 __ bind(&global_object); |
| 2626 __ lw(receiver, GlobalObjectOperand()); |
| 2627 __ lw(receiver, |
| 2628 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); |
| 2629 __ bind(&receiver_ok); |
| 2630 |
| 2631 // Copy the arguments to this function possibly from the |
| 2632 // adaptor frame below it. |
| 2633 const uint32_t kArgumentsLimit = 1 * KB; |
| 2634 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); |
| 2635 |
| 2636 // Push the receiver and use the register to keep the original |
| 2637 // number of arguments. |
| 2638 __ push(receiver); |
| 2639 __ Move(receiver, length); |
| 2640 // The arguments are at a one pointer size offset from elements. |
| 2641 __ Addu(elements, elements, Operand(1 * kPointerSize)); |
| 2642 |
| 2643 // Loop through the arguments pushing them onto the execution |
| 2644 // stack. |
| 2645 Label invoke, loop; |
| 2646 // length is a small non-negative integer, due to the test above. |
| 2647 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); |
| 2648 __ sll(scratch, length, 2); |
| 2649 __ bind(&loop); |
| 2650 __ Addu(scratch, elements, scratch); |
| 2651 __ lw(scratch, MemOperand(scratch)); |
| 2652 __ push(scratch); |
| 2653 __ Subu(length, length, Operand(1)); |
| 2654 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); |
| 2655 __ sll(scratch, length, 2); |
| 2656 |
| 2657 __ bind(&invoke); |
| 2658 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
| 2659 LPointerMap* pointers = instr->pointer_map(); |
| 2660 LEnvironment* env = instr->deoptimization_environment(); |
| 2661 RecordPosition(pointers->position()); |
| 2662 RegisterEnvironmentForDeoptimization(env); |
| 2663 SafepointGenerator safepoint_generator(this, |
| 2664 pointers, |
| 2665 env->deoptimization_index()); |
| 2666 // The number of arguments is stored in receiver which is a0, as expected |
| 2667 // by InvokeFunction. |
| 2668 v8::internal::ParameterCount actual(receiver); |
| 2669 __ InvokeFunction(function, actual, CALL_FUNCTION, |
| 2670 safepoint_generator, CALL_AS_METHOD); |
| 2671 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2672 } |
| 2673 |
| 2674 |
| 2675 void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| 2676 LOperand* argument = instr->InputAt(0); |
| 2677 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { |
| 2678 Abort("DoPushArgument not implemented for double type."); |
| 2679 } else { |
| 2680 Register argument_reg = EmitLoadRegister(argument, at); |
| 2681 __ push(argument_reg); |
| 2682 } |
| 2683 } |
| 2684 |
| 2685 |
| 2686 void LCodeGen::DoThisFunction(LThisFunction* instr) { |
| 2687 Register result = ToRegister(instr->result()); |
| 2688 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 2689 } |
| 2690 |
| 2691 |
| 2692 void LCodeGen::DoContext(LContext* instr) { |
| 2693 Register result = ToRegister(instr->result()); |
| 2694 __ mov(result, cp); |
| 2695 } |
| 2696 |
| 2697 |
| 2698 void LCodeGen::DoOuterContext(LOuterContext* instr) { |
| 2699 Register context = ToRegister(instr->context()); |
| 2700 Register result = ToRegister(instr->result()); |
| 2701 __ lw(result, |
| 2702 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 2703 } |
| 2704 |
| 2705 |
| 2706 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { |
| 2707 Register context = ToRegister(instr->context()); |
| 2708 Register result = ToRegister(instr->result()); |
| 2709 __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 2710 } |
| 2711 |
| 2712 |
| 2713 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { |
| 2714 Register global = ToRegister(instr->global()); |
| 2715 Register result = ToRegister(instr->result()); |
| 2716 __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); |
| 2717 } |
| 2718 |
| 2719 |
| 2720 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| 2721 int arity, |
| 2722 LInstruction* instr, |
| 2723 CallKind call_kind) { |
| 2724 // Change context if needed. |
| 2725 bool change_context = |
| 2726 (info()->closure()->context() != function->context()) || |
| 2727 scope()->contains_with() || |
| 2728 (scope()->num_heap_slots() > 0); |
| 2729 if (change_context) { |
| 2730 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 2731 } |
| 2732 |
| 2733 // Set a0 to arguments count if adaption is not needed. Assumes that a0 |
| 2734 // is available to write to at this point. |
| 2735 if (!function->NeedsArgumentsAdaption()) { |
| 2736 __ li(a0, Operand(arity)); |
| 2737 } |
| 2738 |
| 2739 LPointerMap* pointers = instr->pointer_map(); |
| 2740 RecordPosition(pointers->position()); |
| 2741 |
| 2742 // Invoke function. |
| 2743 __ SetCallKind(t1, call_kind); |
| 2744 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
| 2745 __ Call(at); |
| 2746 |
| 2747 // Setup deoptimization. |
| 2748 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); |
| 2749 |
| 2750 // Restore context. |
| 2751 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2752 } |
| 2753 |
| 2754 |
| 2755 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
| 2756 ASSERT(ToRegister(instr->result()).is(v0)); |
| 2757 __ mov(a0, v0); |
| 2758 __ li(a1, Operand(instr->function())); |
| 2759 CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD); |
| 2760 } |
| 2761 |
| 2762 |
| 2763 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { |
| 2764 Register input = ToRegister(instr->InputAt(0)); |
| 2765 Register result = ToRegister(instr->result()); |
| 2766 Register scratch = scratch0(); |
| 2767 |
| 2768 // Deoptimize if not a heap number. |
| 2769 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2770 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 2771 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); |
| 2772 |
| 2773 Label done; |
| 2774 Register exponent = scratch0(); |
| 2775 scratch = no_reg; |
| 2776 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 2777 // Check the sign of the argument. If the argument is positive, just |
| 2778 // return it. |
| 2779 __ Move(result, input); |
| 2780 __ And(at, exponent, Operand(HeapNumber::kSignMask)); |
| 2781 __ Branch(&done, eq, at, Operand(zero_reg)); |
| 2782 |
| 2783 // Input is negative. Reverse its sign. |
| 2784 // Preserve the value of all registers. |
| 2785 { |
| 2786 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 2787 |
| 2788 // Registers were saved at the safepoint, so we can use |
| 2789 // many scratch registers. |
| 2790 Register tmp1 = input.is(a1) ? a0 : a1; |
| 2791 Register tmp2 = input.is(a2) ? a0 : a2; |
| 2792 Register tmp3 = input.is(a3) ? a0 : a3; |
| 2793 Register tmp4 = input.is(t0) ? a0 : t0; |
| 2794 |
| 2795 // exponent: floating point exponent value. |
| 2796 |
| 2797 Label allocated, slow; |
| 2798 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); |
| 2799 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); |
| 2800 __ Branch(&allocated); |
| 2801 |
| 2802 // Slow case: Call the runtime system to do the number allocation. |
| 2803 __ bind(&slow); |
| 2804 |
| 2805 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
| 2806 // Set the pointer to the new heap number in tmp. |
| 2807 if (!tmp1.is(v0)) |
| 2808 __ mov(tmp1, v0); |
| 2809 // Restore input_reg after call to runtime. |
| 2810 __ LoadFromSafepointRegisterSlot(input, input); |
| 2811 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 2812 |
| 2813 __ bind(&allocated); |
| 2814 // exponent: floating point exponent value. |
| 2815 // tmp1: allocated heap number. |
| 2816 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); |
| 2817 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); |
| 2818 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); |
| 2819 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); |
| 2820 |
| 2821 __ StoreToSafepointRegisterSlot(tmp1, result); |
| 2822 } |
| 2823 |
| 2824 __ bind(&done); |
| 2825 } |
| 2826 |
| 2827 |
| 2828 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { |
| 2829 Register input = ToRegister(instr->InputAt(0)); |
| 2830 Register result = ToRegister(instr->result()); |
| 2831 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 2832 Label done; |
| 2833 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 2834 __ mov(result, input); |
| 2835 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done)); |
| 2836 __ subu(result, zero_reg, input); |
| 2837 // Overflow if result is still negative, ie 0x80000000. |
| 2838 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); |
| 2839 __ bind(&done); |
| 2840 } |
| 2841 |
| 2842 |
| 2843 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| 2844 // Class for deferred case. |
| 2845 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { |
| 2846 public: |
| 2847 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
| 2848 LUnaryMathOperation* instr) |
| 2849 : LDeferredCode(codegen), instr_(instr) { } |
| 2850 virtual void Generate() { |
| 2851 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
| 2852 } |
| 2853 virtual LInstruction* instr() { return instr_; } |
| 2854 private: |
| 2855 LUnaryMathOperation* instr_; |
| 2856 }; |
| 2857 |
| 2858 Representation r = instr->hydrogen()->value()->representation(); |
| 2859 if (r.IsDouble()) { |
| 2860 FPURegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2861 FPURegister result = ToDoubleRegister(instr->result()); |
| 2862 __ abs_d(result, input); |
| 2863 } else if (r.IsInteger32()) { |
| 2864 EmitIntegerMathAbs(instr); |
| 2865 } else { |
| 2866 // Representation is tagged. |
| 2867 DeferredMathAbsTaggedHeapNumber* deferred = |
| 2868 new DeferredMathAbsTaggedHeapNumber(this, instr); |
| 2869 Register input = ToRegister(instr->InputAt(0)); |
| 2870 // Smi check. |
| 2871 __ JumpIfNotSmi(input, deferred->entry()); |
| 2872 // If smi, handle it directly. |
| 2873 EmitIntegerMathAbs(instr); |
| 2874 __ bind(deferred->exit()); |
| 2875 } |
| 2876 } |
| 2877 |
| 2878 |
| 2879 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 2880 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2881 Register result = ToRegister(instr->result()); |
| 2882 FPURegister single_scratch = double_scratch0().low(); |
| 2883 Register scratch1 = scratch0(); |
| 2884 Register except_flag = ToRegister(instr->TempAt(0)); |
| 2885 |
| 2886 __ EmitFPUTruncate(kRoundToMinusInf, |
| 2887 single_scratch, |
| 2888 input, |
| 2889 scratch1, |
| 2890 except_flag); |
| 2891 |
| 2892 // Deopt if the operation did not succeed. |
| 2893 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); |
| 2894 |
| 2895 // Load the result. |
| 2896 __ mfc1(result, single_scratch); |
| 2897 |
| 2898 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2899 // Test for -0. |
| 2900 Label done; |
| 2901 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 2902 __ mfc1(scratch1, input.high()); |
| 2903 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 2904 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); |
| 2905 __ bind(&done); |
| 2906 } |
| 2907 } |
| 2908 |
| 2909 |
| 2910 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
| 2911 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2912 Register result = ToRegister(instr->result()); |
| 2913 Register scratch = scratch0(); |
| 2914 Label done, check_sign_on_zero; |
| 2915 |
| 2916 // Extract exponent bits. |
| 2917 __ mfc1(result, input.high()); |
| 2918 __ Ext(scratch, |
| 2919 result, |
| 2920 HeapNumber::kExponentShift, |
| 2921 HeapNumber::kExponentBits); |
| 2922 |
| 2923 // If the number is in ]-0.5, +0.5[, the result is +/- 0. |
| 2924 Label skip1; |
| 2925 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); |
| 2926 __ mov(result, zero_reg); |
| 2927 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2928 __ Branch(&check_sign_on_zero); |
| 2929 } else { |
| 2930 __ Branch(&done); |
| 2931 } |
| 2932 __ bind(&skip1); |
| 2933 |
| 2934 // The following conversion will not work with numbers |
| 2935 // outside of ]-2^32, 2^32[. |
| 2936 DeoptimizeIf(ge, instr->environment(), scratch, |
| 2937 Operand(HeapNumber::kExponentBias + 32)); |
| 2938 |
| 2939 // Save the original sign for later comparison. |
| 2940 __ And(scratch, result, Operand(HeapNumber::kSignMask)); |
| 2941 |
| 2942 __ Move(double_scratch0(), 0.5); |
| 2943 __ add_d(input, input, double_scratch0()); |
| 2944 |
| 2945 // Check sign of the result: if the sign changed, the input |
| 2946 // value was in ]0.5, 0[ and the result should be -0. |
| 2947 __ mfc1(result, input.high()); |
| 2948 __ Xor(result, result, Operand(scratch)); |
| 2949 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2950 // ARM uses 'mi' here, which is 'lt' |
| 2951 DeoptimizeIf(lt, instr->environment(), result, |
| 2952 Operand(zero_reg)); |
| 2953 } else { |
| 2954 Label skip2; |
| 2955 // ARM uses 'mi' here, which is 'lt' |
| 2956 // Negating it results in 'ge' |
| 2957 __ Branch(&skip2, ge, result, Operand(zero_reg)); |
| 2958 __ mov(result, zero_reg); |
| 2959 __ Branch(&done); |
| 2960 __ bind(&skip2); |
| 2961 } |
| 2962 |
| 2963 Register except_flag = scratch; |
| 2964 |
| 2965 __ EmitFPUTruncate(kRoundToMinusInf, |
| 2966 double_scratch0().low(), |
| 2967 input, |
| 2968 result, |
| 2969 except_flag); |
| 2970 |
| 2971 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); |
| 2972 |
| 2973 __ mfc1(result, double_scratch0().low()); |
| 2974 |
| 2975 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2976 // Test for -0. |
| 2977 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 2978 __ bind(&check_sign_on_zero); |
| 2979 __ mfc1(scratch, input.high()); |
| 2980 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
| 2981 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); |
| 2982 } |
| 2983 __ bind(&done); |
| 2984 } |
| 2985 |
| 2986 |
| 2987 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| 2988 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2989 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 2990 __ sqrt_d(result, input); |
| 2991 } |
| 2992 |
| 2993 |
| 2994 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { |
| 2995 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2996 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 2997 DoubleRegister double_scratch = double_scratch0(); |
| 2998 |
| 2999 // Add +0 to convert -0 to +0. |
| 3000 __ mtc1(zero_reg, double_scratch.low()); |
| 3001 __ mtc1(zero_reg, double_scratch.high()); |
| 3002 __ add_d(result, input, double_scratch); |
| 3003 __ sqrt_d(result, result); |
| 3004 } |
| 3005 |
| 3006 |
| 3007 void LCodeGen::DoPower(LPower* instr) { |
| 3008 LOperand* left = instr->InputAt(0); |
| 3009 LOperand* right = instr->InputAt(1); |
| 3010 Register scratch = scratch0(); |
| 3011 DoubleRegister result_reg = ToDoubleRegister(instr->result()); |
| 3012 Representation exponent_type = instr->hydrogen()->right()->representation(); |
| 3013 if (exponent_type.IsDouble()) { |
| 3014 // Prepare arguments and call C function. |
| 3015 __ PrepareCallCFunction(0, 2, scratch); |
| 3016 __ SetCallCDoubleArguments(ToDoubleRegister(left), |
| 3017 ToDoubleRegister(right)); |
| 3018 __ CallCFunction( |
| 3019 ExternalReference::power_double_double_function(isolate()), 0, 2); |
| 3020 } else if (exponent_type.IsInteger32()) { |
| 3021 ASSERT(ToRegister(right).is(a0)); |
| 3022 // Prepare arguments and call C function. |
| 3023 __ PrepareCallCFunction(1, 1, scratch); |
| 3024 __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right)); |
| 3025 __ CallCFunction( |
| 3026 ExternalReference::power_double_int_function(isolate()), 1, 1); |
| 3027 } else { |
| 3028 ASSERT(exponent_type.IsTagged()); |
| 3029 ASSERT(instr->hydrogen()->left()->representation().IsDouble()); |
| 3030 |
| 3031 Register right_reg = ToRegister(right); |
| 3032 |
| 3033 // Check for smi on the right hand side. |
| 3034 Label non_smi, call; |
| 3035 __ JumpIfNotSmi(right_reg, &non_smi); |
| 3036 |
| 3037 // Untag smi and convert it to a double. |
| 3038 __ SmiUntag(right_reg); |
| 3039 FPURegister single_scratch = double_scratch0(); |
| 3040 __ mtc1(right_reg, single_scratch); |
| 3041 __ cvt_d_w(result_reg, single_scratch); |
| 3042 __ Branch(&call); |
| 3043 |
| 3044 // Heap number map check. |
| 3045 __ bind(&non_smi); |
| 3046 __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); |
| 3047 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3048 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); |
| 3049 __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset)); |
| 3050 |
| 3051 // Prepare arguments and call C function. |
| 3052 __ bind(&call); |
| 3053 __ PrepareCallCFunction(0, 2, scratch); |
| 3054 __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg); |
| 3055 __ CallCFunction( |
| 3056 ExternalReference::power_double_double_function(isolate()), 0, 2); |
| 3057 } |
| 3058 // Store the result in the result register. |
| 3059 __ GetCFunctionDoubleResult(result_reg); |
| 3060 } |
| 3061 |
| 3062 |
| 3063 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { |
| 3064 ASSERT(ToDoubleRegister(instr->result()).is(f4)); |
| 3065 TranscendentalCacheStub stub(TranscendentalCache::LOG, |
| 3066 TranscendentalCacheStub::UNTAGGED); |
| 3067 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3068 } |
| 3069 |
| 3070 |
| 3071 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { |
| 3072 ASSERT(ToDoubleRegister(instr->result()).is(f4)); |
| 3073 TranscendentalCacheStub stub(TranscendentalCache::COS, |
| 3074 TranscendentalCacheStub::UNTAGGED); |
| 3075 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3076 } |
| 3077 |
| 3078 |
| 3079 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { |
| 3080 ASSERT(ToDoubleRegister(instr->result()).is(f4)); |
| 3081 TranscendentalCacheStub stub(TranscendentalCache::SIN, |
| 3082 TranscendentalCacheStub::UNTAGGED); |
| 3083 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3084 } |
| 3085 |
| 3086 |
| 3087 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { |
| 3088 switch (instr->op()) { |
| 3089 case kMathAbs: |
| 3090 DoMathAbs(instr); |
| 3091 break; |
| 3092 case kMathFloor: |
| 3093 DoMathFloor(instr); |
| 3094 break; |
| 3095 case kMathRound: |
| 3096 DoMathRound(instr); |
| 3097 break; |
| 3098 case kMathSqrt: |
| 3099 DoMathSqrt(instr); |
| 3100 break; |
| 3101 case kMathPowHalf: |
| 3102 DoMathPowHalf(instr); |
| 3103 break; |
| 3104 case kMathCos: |
| 3105 DoMathCos(instr); |
| 3106 break; |
| 3107 case kMathSin: |
| 3108 DoMathSin(instr); |
| 3109 break; |
| 3110 case kMathLog: |
| 3111 DoMathLog(instr); |
| 3112 break; |
| 3113 default: |
| 3114 Abort("Unimplemented type of LUnaryMathOperation."); |
| 3115 UNREACHABLE(); |
| 3116 } |
| 3117 } |
| 3118 |
| 3119 |
| 3120 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
| 3121 ASSERT(ToRegister(instr->function()).is(a1)); |
| 3122 ASSERT(instr->HasPointerMap()); |
| 3123 ASSERT(instr->HasDeoptimizationEnvironment()); |
| 3124 LPointerMap* pointers = instr->pointer_map(); |
| 3125 LEnvironment* env = instr->deoptimization_environment(); |
| 3126 RecordPosition(pointers->position()); |
| 3127 RegisterEnvironmentForDeoptimization(env); |
| 3128 SafepointGenerator generator(this, pointers, env->deoptimization_index()); |
| 3129 ParameterCount count(instr->arity()); |
| 3130 __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); |
| 3131 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3132 } |
| 3133 |
| 3134 |
| 3135 void LCodeGen::DoCallKeyed(LCallKeyed* instr) { |
| 3136 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3137 |
| 3138 int arity = instr->arity(); |
| 3139 Handle<Code> ic = |
| 3140 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); |
| 3141 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3142 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3143 } |
| 3144 |
| 3145 |
| 3146 void LCodeGen::DoCallNamed(LCallNamed* instr) { |
| 3147 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3148 |
| 3149 int arity = instr->arity(); |
| 3150 RelocInfo::Mode mode = RelocInfo::CODE_TARGET; |
| 3151 Handle<Code> ic = |
| 3152 isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
| 3153 __ li(a2, Operand(instr->name())); |
| 3154 CallCode(ic, mode, instr); |
| 3155 // Restore context register. |
| 3156 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3157 } |
| 3158 |
| 3159 |
| 3160 void LCodeGen::DoCallFunction(LCallFunction* instr) { |
| 3161 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3162 |
| 3163 int arity = instr->arity(); |
| 3164 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); |
| 3165 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3166 __ Drop(1); |
| 3167 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3168 } |
| 3169 |
| 3170 |
| 3171 void LCodeGen::DoCallGlobal(LCallGlobal* instr) { |
| 3172 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3173 |
| 3174 int arity = instr->arity(); |
| 3175 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; |
| 3176 Handle<Code> ic = |
| 3177 isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
| 3178 __ li(a2, Operand(instr->name())); |
| 3179 CallCode(ic, mode, instr); |
| 3180 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3181 } |
| 3182 |
| 3183 |
| 3184 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { |
| 3185 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3186 __ li(a1, Operand(instr->target())); |
| 3187 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); |
| 3188 } |
| 3189 |
| 3190 |
| 3191 void LCodeGen::DoCallNew(LCallNew* instr) { |
| 3192 ASSERT(ToRegister(instr->InputAt(0)).is(a1)); |
| 3193 ASSERT(ToRegister(instr->result()).is(v0)); |
| 3194 |
| 3195 Handle<Code> builtin = isolate()->builtins()->JSConstructCall(); |
| 3196 __ li(a0, Operand(instr->arity())); |
| 3197 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); |
| 3198 } |
| 3199 |
| 3200 |
| 3201 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
| 3202 CallRuntime(instr->function(), instr->arity(), instr); |
| 3203 } |
| 3204 |
| 3205 |
| 3206 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
| 3207 Register object = ToRegister(instr->object()); |
| 3208 Register value = ToRegister(instr->value()); |
| 3209 Register scratch = scratch0(); |
| 3210 int offset = instr->offset(); |
| 3211 |
| 3212 ASSERT(!object.is(value)); |
| 3213 |
| 3214 if (!instr->transition().is_null()) { |
| 3215 __ li(scratch, Operand(instr->transition())); |
| 3216 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3217 } |
| 3218 |
| 3219 // Do the store. |
| 3220 HType type = instr->hydrogen()->value()->type(); |
| 3221 SmiCheck check_needed = |
| 3222 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 3223 if (instr->is_in_object()) { |
| 3224 __ sw(value, FieldMemOperand(object, offset)); |
| 3225 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3226 // Update the write barrier for the object for in-object properties. |
| 3227 __ RecordWriteField(object, |
| 3228 offset, |
| 3229 value, |
| 3230 scratch, |
| 3231 kRAHasBeenSaved, |
| 3232 kSaveFPRegs, |
| 3233 EMIT_REMEMBERED_SET, |
| 3234 check_needed); |
| 3235 } |
| 3236 } else { |
| 3237 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 3238 __ sw(value, FieldMemOperand(scratch, offset)); |
| 3239 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3240 // Update the write barrier for the properties array. |
| 3241 // object is used as a scratch register. |
| 3242 __ RecordWriteField(scratch, |
| 3243 offset, |
| 3244 value, |
| 3245 object, |
| 3246 kRAHasBeenSaved, |
| 3247 kSaveFPRegs, |
| 3248 EMIT_REMEMBERED_SET, |
| 3249 check_needed); |
| 3250 } |
| 3251 } |
| 3252 } |
| 3253 |
| 3254 |
| 3255 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| 3256 ASSERT(ToRegister(instr->object()).is(a1)); |
| 3257 ASSERT(ToRegister(instr->value()).is(a0)); |
| 3258 |
| 3259 // Name is always in a2. |
| 3260 __ li(a2, Operand(instr->name())); |
| 3261 Handle<Code> ic = instr->strict_mode() |
| 3262 ? isolate()->builtins()->StoreIC_Initialize_Strict() |
| 3263 : isolate()->builtins()->StoreIC_Initialize(); |
| 3264 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3265 } |
| 3266 |
| 3267 |
| 3268 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
| 3269 DeoptimizeIf(hs, |
| 3270 instr->environment(), |
| 3271 ToRegister(instr->index()), |
| 3272 Operand(ToRegister(instr->length()))); |
| 3273 } |
| 3274 |
| 3275 |
| 3276 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { |
| 3277 Register value = ToRegister(instr->value()); |
| 3278 Register elements = ToRegister(instr->object()); |
| 3279 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
| 3280 Register scratch = scratch0(); |
| 3281 |
| 3282 // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS |
| 3283 // conversion, so it deopts in that case. |
| 3284 if (instr->hydrogen()->ValueNeedsSmiCheck()) { |
| 3285 __ And(at, value, Operand(kSmiTagMask)); |
| 3286 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); |
| 3287 } |
| 3288 |
| 3289 // Do the store. |
| 3290 if (instr->key()->IsConstantOperand()) { |
| 3291 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
| 3292 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
| 3293 int offset = |
| 3294 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; |
| 3295 __ sw(value, FieldMemOperand(elements, offset)); |
| 3296 } else { |
| 3297 __ sll(scratch, key, kPointerSizeLog2); |
| 3298 __ addu(scratch, elements, scratch); |
| 3299 __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| 3300 } |
| 3301 |
| 3302 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3303 HType type = instr->hydrogen()->value()->type(); |
| 3304 SmiCheck check_needed = |
| 3305 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 3306 // Compute address of modified element and store it into key register. |
| 3307 __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 3308 __ RecordWrite(elements, |
| 3309 key, |
| 3310 value, |
| 3311 kRAHasBeenSaved, |
| 3312 kSaveFPRegs, |
| 3313 EMIT_REMEMBERED_SET, |
| 3314 check_needed); |
| 3315 } |
| 3316 } |
| 3317 |
| 3318 |
| 3319 void LCodeGen::DoStoreKeyedFastDoubleElement( |
| 3320 LStoreKeyedFastDoubleElement* instr) { |
| 3321 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 3322 Register elements = ToRegister(instr->elements()); |
| 3323 Register key = no_reg; |
| 3324 Register scratch = scratch0(); |
| 3325 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 3326 int constant_key = 0; |
| 3327 Label not_nan; |
| 3328 |
| 3329 // Calculate the effective address of the slot in the array to store the |
| 3330 // double value. |
| 3331 if (key_is_constant) { |
| 3332 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3333 if (constant_key & 0xF0000000) { |
| 3334 Abort("array index constant value too big."); |
| 3335 } |
| 3336 } else { |
| 3337 key = ToRegister(instr->key()); |
| 3338 } |
| 3339 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
| 3340 if (key_is_constant) { |
| 3341 __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) + |
| 3342 FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 3343 } else { |
| 3344 __ sll(scratch, key, shift_size); |
| 3345 __ Addu(scratch, elements, Operand(scratch)); |
| 3346 __ Addu(scratch, scratch, |
| 3347 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 3348 } |
| 3349 |
| 3350 Label is_nan; |
| 3351 // Check for NaN. All NaNs must be canonicalized. |
| 3352 __ BranchF(NULL, &is_nan, eq, value, value); |
| 3353 __ Branch(¬_nan); |
| 3354 |
| 3355 // Only load canonical NaN if the comparison above set the overflow. |
| 3356 __ bind(&is_nan); |
| 3357 __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
| 3358 |
| 3359 __ bind(¬_nan); |
| 3360 __ sdc1(value, MemOperand(scratch)); |
| 3361 } |
| 3362 |
| 3363 |
| 3364 void LCodeGen::DoStoreKeyedSpecializedArrayElement( |
| 3365 LStoreKeyedSpecializedArrayElement* instr) { |
| 3366 |
| 3367 Register external_pointer = ToRegister(instr->external_pointer()); |
| 3368 Register key = no_reg; |
| 3369 ElementsKind elements_kind = instr->elements_kind(); |
| 3370 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 3371 int constant_key = 0; |
| 3372 if (key_is_constant) { |
| 3373 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3374 if (constant_key & 0xF0000000) { |
| 3375 Abort("array index constant value too big."); |
| 3376 } |
| 3377 } else { |
| 3378 key = ToRegister(instr->key()); |
| 3379 } |
| 3380 int shift_size = ElementsKindToShiftSize(elements_kind); |
| 3381 |
| 3382 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
| 3383 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 3384 FPURegister value(ToDoubleRegister(instr->value())); |
| 3385 if (key_is_constant) { |
| 3386 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); |
| 3387 } else { |
| 3388 __ sll(scratch0(), key, shift_size); |
| 3389 __ Addu(scratch0(), scratch0(), external_pointer); |
| 3390 } |
| 3391 |
| 3392 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 3393 __ cvt_s_d(double_scratch0(), value); |
| 3394 __ swc1(double_scratch0(), MemOperand(scratch0())); |
| 3395 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
| 3396 __ sdc1(value, MemOperand(scratch0())); |
| 3397 } |
| 3398 } else { |
| 3399 Register value(ToRegister(instr->value())); |
| 3400 MemOperand mem_operand(zero_reg); |
| 3401 Register scratch = scratch0(); |
| 3402 if (key_is_constant) { |
| 3403 mem_operand = MemOperand(external_pointer, |
| 3404 constant_key * (1 << shift_size)); |
| 3405 } else { |
| 3406 __ sll(scratch, key, shift_size); |
| 3407 __ Addu(scratch, scratch, external_pointer); |
| 3408 mem_operand = MemOperand(scratch); |
| 3409 } |
| 3410 switch (elements_kind) { |
| 3411 case EXTERNAL_PIXEL_ELEMENTS: |
| 3412 case EXTERNAL_BYTE_ELEMENTS: |
| 3413 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3414 __ sb(value, mem_operand); |
| 3415 break; |
| 3416 case EXTERNAL_SHORT_ELEMENTS: |
| 3417 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 3418 __ sh(value, mem_operand); |
| 3419 break; |
| 3420 case EXTERNAL_INT_ELEMENTS: |
| 3421 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 3422 __ sw(value, mem_operand); |
| 3423 break; |
| 3424 case EXTERNAL_FLOAT_ELEMENTS: |
| 3425 case EXTERNAL_DOUBLE_ELEMENTS: |
| 3426 case FAST_DOUBLE_ELEMENTS: |
| 3427 case FAST_ELEMENTS: |
| 3428 case FAST_SMI_ONLY_ELEMENTS: |
| 3429 case DICTIONARY_ELEMENTS: |
| 3430 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3431 UNREACHABLE(); |
| 3432 break; |
| 3433 } |
| 3434 } |
| 3435 } |
| 3436 |
| 3437 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| 3438 ASSERT(ToRegister(instr->object()).is(a2)); |
| 3439 ASSERT(ToRegister(instr->key()).is(a1)); |
| 3440 ASSERT(ToRegister(instr->value()).is(a0)); |
| 3441 |
| 3442 Handle<Code> ic = instr->strict_mode() |
| 3443 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
| 3444 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
| 3445 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3446 } |
| 3447 |
| 3448 |
| 3449 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 3450 __ push(ToRegister(instr->left())); |
| 3451 __ push(ToRegister(instr->right())); |
| 3452 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| 3453 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3454 } |
| 3455 |
| 3456 |
| 3457 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
| 3458 class DeferredStringCharCodeAt: public LDeferredCode { |
| 3459 public: |
| 3460 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
| 3461 : LDeferredCode(codegen), instr_(instr) { } |
| 3462 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } |
| 3463 virtual LInstruction* instr() { return instr_; } |
| 3464 private: |
| 3465 LStringCharCodeAt* instr_; |
| 3466 }; |
| 3467 |
| 3468 Register temp = scratch1(); |
| 3469 Register string = ToRegister(instr->string()); |
| 3470 Register index = ToRegister(instr->index()); |
| 3471 Register result = ToRegister(instr->result()); |
| 3472 DeferredStringCharCodeAt* deferred = |
| 3473 new DeferredStringCharCodeAt(this, instr); |
| 3474 |
| 3475 // Fetch the instance type of the receiver into result register. |
| 3476 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 3477 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 3478 |
| 3479 // We need special handling for indirect strings. |
| 3480 Label check_sequential; |
| 3481 __ And(temp, result, kIsIndirectStringMask); |
| 3482 __ Branch(&check_sequential, eq, temp, Operand(zero_reg)); |
| 3483 |
| 3484 // Dispatch on the indirect string shape: slice or cons. |
| 3485 Label cons_string; |
| 3486 __ And(temp, result, kSlicedNotConsMask); |
| 3487 __ Branch(&cons_string, eq, temp, Operand(zero_reg)); |
| 3488 |
| 3489 // Handle slices. |
| 3490 Label indirect_string_loaded; |
| 3491 __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); |
| 3492 __ sra(temp, result, kSmiTagSize); |
| 3493 __ addu(index, index, temp); |
| 3494 __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset)); |
| 3495 __ jmp(&indirect_string_loaded); |
| 3496 |
| 3497 // Handle conses. |
| 3498 // Check whether the right hand side is the empty string (i.e. if |
| 3499 // this is really a flat string in a cons string). If that is not |
| 3500 // the case we would rather go to the runtime system now to flatten |
| 3501 // the string. |
| 3502 __ bind(&cons_string); |
| 3503 __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset)); |
| 3504 __ LoadRoot(temp, Heap::kEmptyStringRootIndex); |
| 3505 __ Branch(deferred->entry(), ne, result, Operand(temp)); |
| 3506 // Get the first of the two strings and load its instance type. |
| 3507 __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset)); |
| 3508 |
| 3509 __ bind(&indirect_string_loaded); |
| 3510 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 3511 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 3512 |
| 3513 // Check whether the string is sequential. The only non-sequential |
| 3514 // shapes we support have just been unwrapped above. |
| 3515 __ bind(&check_sequential); |
| 3516 STATIC_ASSERT(kSeqStringTag == 0); |
| 3517 __ And(temp, result, Operand(kStringRepresentationMask)); |
| 3518 __ Branch(deferred->entry(), ne, temp, Operand(zero_reg)); |
| 3519 |
| 3520 // Dispatch on the encoding: ASCII or two-byte. |
| 3521 Label ascii_string; |
| 3522 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); |
| 3523 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
| 3524 __ And(temp, result, Operand(kStringEncodingMask)); |
| 3525 __ Branch(&ascii_string, ne, temp, Operand(zero_reg)); |
| 3526 |
| 3527 // Two-byte string. |
| 3528 // Load the two-byte character code into the result register. |
| 3529 Label done; |
| 3530 __ Addu(result, |
| 3531 string, |
| 3532 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 3533 __ sll(temp, index, 1); |
| 3534 __ Addu(result, result, temp); |
| 3535 __ lhu(result, MemOperand(result, 0)); |
| 3536 __ Branch(&done); |
| 3537 |
| 3538 // ASCII string. |
| 3539 // Load the byte into the result register. |
| 3540 __ bind(&ascii_string); |
| 3541 __ Addu(result, |
| 3542 string, |
| 3543 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 3544 __ Addu(result, result, index); |
| 3545 __ lbu(result, MemOperand(result, 0)); |
| 3546 |
| 3547 __ bind(&done); |
| 3548 __ bind(deferred->exit()); |
| 3549 } |
| 3550 |
| 3551 |
| 3552 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
| 3553 Register string = ToRegister(instr->string()); |
| 3554 Register result = ToRegister(instr->result()); |
| 3555 Register scratch = scratch0(); |
| 3556 |
| 3557 // TODO(3095996): Get rid of this. For now, we need to make the |
| 3558 // result register contain a valid pointer because it is already |
| 3559 // contained in the register pointer map. |
| 3560 __ mov(result, zero_reg); |
| 3561 |
| 3562 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3563 __ push(string); |
| 3564 // Push the index as a smi. This is safe because of the checks in |
| 3565 // DoStringCharCodeAt above. |
| 3566 if (instr->index()->IsConstantOperand()) { |
| 3567 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
| 3568 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); |
| 3569 __ push(scratch); |
| 3570 } else { |
| 3571 Register index = ToRegister(instr->index()); |
| 3572 __ SmiTag(index); |
| 3573 __ push(index); |
| 3574 } |
| 3575 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); |
| 3576 if (FLAG_debug_code) { |
| 3577 __ AbortIfNotSmi(v0); |
| 3578 } |
| 3579 __ SmiUntag(v0); |
| 3580 __ StoreToSafepointRegisterSlot(v0, result); |
| 3581 } |
| 3582 |
| 3583 |
| 3584 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
| 3585 class DeferredStringCharFromCode: public LDeferredCode { |
| 3586 public: |
| 3587 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
| 3588 : LDeferredCode(codegen), instr_(instr) { } |
| 3589 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } |
| 3590 virtual LInstruction* instr() { return instr_; } |
| 3591 private: |
| 3592 LStringCharFromCode* instr_; |
| 3593 }; |
| 3594 |
| 3595 DeferredStringCharFromCode* deferred = |
| 3596 new DeferredStringCharFromCode(this, instr); |
| 3597 |
| 3598 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
| 3599 Register char_code = ToRegister(instr->char_code()); |
| 3600 Register result = ToRegister(instr->result()); |
| 3601 Register scratch = scratch0(); |
| 3602 ASSERT(!char_code.is(result)); |
| 3603 |
| 3604 __ Branch(deferred->entry(), hi, |
| 3605 char_code, Operand(String::kMaxAsciiCharCode)); |
| 3606 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
| 3607 __ sll(scratch, char_code, kPointerSizeLog2); |
| 3608 __ Addu(result, result, scratch); |
| 3609 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
| 3610 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3611 __ Branch(deferred->entry(), eq, result, Operand(scratch)); |
| 3612 __ bind(deferred->exit()); |
| 3613 } |
| 3614 |
| 3615 |
| 3616 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { |
| 3617 Register char_code = ToRegister(instr->char_code()); |
| 3618 Register result = ToRegister(instr->result()); |
| 3619 |
| 3620 // TODO(3095996): Get rid of this. For now, we need to make the |
| 3621 // result register contain a valid pointer because it is already |
| 3622 // contained in the register pointer map. |
| 3623 __ mov(result, zero_reg); |
| 3624 |
| 3625 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3626 __ SmiTag(char_code); |
| 3627 __ push(char_code); |
| 3628 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); |
| 3629 __ StoreToSafepointRegisterSlot(v0, result); |
| 3630 } |
| 3631 |
| 3632 |
| 3633 void LCodeGen::DoStringLength(LStringLength* instr) { |
| 3634 Register string = ToRegister(instr->InputAt(0)); |
| 3635 Register result = ToRegister(instr->result()); |
| 3636 __ lw(result, FieldMemOperand(string, String::kLengthOffset)); |
| 3637 } |
| 3638 |
| 3639 |
| 3640 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 3641 LOperand* input = instr->InputAt(0); |
| 3642 ASSERT(input->IsRegister() || input->IsStackSlot()); |
| 3643 LOperand* output = instr->result(); |
| 3644 ASSERT(output->IsDoubleRegister()); |
| 3645 FPURegister single_scratch = double_scratch0().low(); |
| 3646 if (input->IsStackSlot()) { |
| 3647 Register scratch = scratch0(); |
| 3648 __ lw(scratch, ToMemOperand(input)); |
| 3649 __ mtc1(scratch, single_scratch); |
| 3650 } else { |
| 3651 __ mtc1(ToRegister(input), single_scratch); |
| 3652 } |
| 3653 __ cvt_d_w(ToDoubleRegister(output), single_scratch); |
| 3654 } |
| 3655 |
| 3656 |
| 3657 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
| 3658 class DeferredNumberTagI: public LDeferredCode { |
| 3659 public: |
| 3660 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) |
| 3661 : LDeferredCode(codegen), instr_(instr) { } |
| 3662 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } |
| 3663 virtual LInstruction* instr() { return instr_; } |
| 3664 private: |
| 3665 LNumberTagI* instr_; |
| 3666 }; |
| 3667 |
| 3668 LOperand* input = instr->InputAt(0); |
| 3669 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 3670 Register reg = ToRegister(input); |
| 3671 Register overflow = scratch0(); |
| 3672 |
| 3673 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); |
| 3674 __ SmiTagCheckOverflow(reg, overflow); |
| 3675 __ BranchOnOverflow(deferred->entry(), overflow); |
| 3676 __ bind(deferred->exit()); |
| 3677 } |
| 3678 |
| 3679 |
| 3680 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { |
| 3681 Label slow; |
| 3682 Register reg = ToRegister(instr->InputAt(0)); |
| 3683 FPURegister dbl_scratch = double_scratch0(); |
| 3684 |
| 3685 // Preserve the value of all registers. |
| 3686 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3687 |
| 3688 // There was overflow, so bits 30 and 31 of the original integer |
| 3689 // disagree. Try to allocate a heap number in new space and store |
| 3690 // the value in there. If that fails, call the runtime system. |
| 3691 Label done; |
| 3692 __ SmiUntag(reg); |
| 3693 __ Xor(reg, reg, Operand(0x80000000)); |
| 3694 __ mtc1(reg, dbl_scratch); |
| 3695 __ cvt_d_w(dbl_scratch, dbl_scratch); |
| 3696 if (FLAG_inline_new) { |
| 3697 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); |
| 3698 __ AllocateHeapNumber(t1, a3, t0, t2, &slow); |
| 3699 if (!reg.is(t1)) __ mov(reg, t1); |
| 3700 __ Branch(&done); |
| 3701 } |
| 3702 |
| 3703 // Slow case: Call the runtime system to do the number allocation. |
| 3704 __ bind(&slow); |
| 3705 |
| 3706 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
| 3707 // register is stored, as this register is in the pointer map, but contains an |
| 3708 // integer value. |
| 3709 __ StoreToSafepointRegisterSlot(zero_reg, reg); |
| 3710 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
| 3711 if (!reg.is(v0)) __ mov(reg, v0); |
| 3712 |
| 3713 // Done. Put the value in dbl_scratch into the value of the allocated heap |
| 3714 // number. |
| 3715 __ bind(&done); |
| 3716 __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
| 3717 __ StoreToSafepointRegisterSlot(reg, reg); |
| 3718 } |
| 3719 |
| 3720 |
| 3721 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 3722 class DeferredNumberTagD: public LDeferredCode { |
| 3723 public: |
| 3724 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| 3725 : LDeferredCode(codegen), instr_(instr) { } |
| 3726 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
| 3727 virtual LInstruction* instr() { return instr_; } |
| 3728 private: |
| 3729 LNumberTagD* instr_; |
| 3730 }; |
| 3731 |
| 3732 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 3733 Register scratch = scratch0(); |
| 3734 Register reg = ToRegister(instr->result()); |
| 3735 Register temp1 = ToRegister(instr->TempAt(0)); |
| 3736 Register temp2 = ToRegister(instr->TempAt(1)); |
| 3737 |
| 3738 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); |
| 3739 if (FLAG_inline_new) { |
| 3740 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
| 3741 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); |
| 3742 } else { |
| 3743 __ Branch(deferred->entry()); |
| 3744 } |
| 3745 __ bind(deferred->exit()); |
| 3746 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
| 3747 } |
| 3748 |
| 3749 |
| 3750 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 3751 // TODO(3095996): Get rid of this. For now, we need to make the |
| 3752 // result register contain a valid pointer because it is already |
| 3753 // contained in the register pointer map. |
| 3754 Register reg = ToRegister(instr->result()); |
| 3755 __ mov(reg, zero_reg); |
| 3756 |
| 3757 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3758 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
| 3759 __ StoreToSafepointRegisterSlot(v0, reg); |
| 3760 } |
| 3761 |
| 3762 |
| 3763 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 3764 LOperand* input = instr->InputAt(0); |
| 3765 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 3766 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
| 3767 __ SmiTag(ToRegister(input)); |
| 3768 } |
| 3769 |
| 3770 |
| 3771 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 3772 Register scratch = scratch0(); |
| 3773 LOperand* input = instr->InputAt(0); |
| 3774 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 3775 if (instr->needs_check()) { |
| 3776 STATIC_ASSERT(kHeapObjectTag == 1); |
| 3777 // If the input is a HeapObject, value of scratch won't be zero. |
| 3778 __ And(scratch, ToRegister(input), Operand(kHeapObjectTag)); |
| 3779 __ SmiUntag(ToRegister(input)); |
| 3780 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); |
| 3781 } else { |
| 3782 __ SmiUntag(ToRegister(input)); |
| 3783 } |
| 3784 } |
| 3785 |
| 3786 |
| 3787 void LCodeGen::EmitNumberUntagD(Register input_reg, |
| 3788 DoubleRegister result_reg, |
| 3789 bool deoptimize_on_undefined, |
| 3790 LEnvironment* env) { |
| 3791 Register scratch = scratch0(); |
| 3792 |
| 3793 Label load_smi, heap_number, done; |
| 3794 |
| 3795 // Smi check. |
| 3796 __ JumpIfSmi(input_reg, &load_smi); |
| 3797 |
| 3798 // Heap number map check. |
| 3799 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 3800 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3801 if (deoptimize_on_undefined) { |
| 3802 DeoptimizeIf(ne, env, scratch, Operand(at)); |
| 3803 } else { |
| 3804 Label heap_number; |
| 3805 __ Branch(&heap_number, eq, scratch, Operand(at)); |
| 3806 |
| 3807 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 3808 DeoptimizeIf(ne, env, input_reg, Operand(at)); |
| 3809 |
| 3810 // Convert undefined to NaN. |
| 3811 __ LoadRoot(at, Heap::kNanValueRootIndex); |
| 3812 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); |
| 3813 __ Branch(&done); |
| 3814 |
| 3815 __ bind(&heap_number); |
| 3816 } |
| 3817 // Heap number to double register conversion. |
| 3818 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 3819 __ Branch(&done); |
| 3820 |
| 3821 // Smi to double register conversion |
| 3822 __ bind(&load_smi); |
| 3823 __ SmiUntag(input_reg); // Untag smi before converting to float. |
| 3824 __ mtc1(input_reg, result_reg); |
| 3825 __ cvt_d_w(result_reg, result_reg); |
| 3826 __ SmiTag(input_reg); // Retag smi. |
| 3827 __ bind(&done); |
| 3828 } |
| 3829 |
| 3830 |
| 3831 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
| 3832 Register input_reg = ToRegister(instr->InputAt(0)); |
| 3833 Register scratch1 = scratch0(); |
| 3834 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 3835 DoubleRegister double_scratch = double_scratch0(); |
| 3836 FPURegister single_scratch = double_scratch.low(); |
| 3837 |
| 3838 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); |
| 3839 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); |
| 3840 |
| 3841 Label done; |
| 3842 |
| 3843 // The input is a tagged HeapObject. |
| 3844 // Heap number map check. |
| 3845 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 3846 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3847 // This 'at' value and scratch1 map value are used for tests in both clauses |
| 3848 // of the if. |
| 3849 |
| 3850 if (instr->truncating()) { |
| 3851 Register scratch3 = ToRegister(instr->TempAt(1)); |
| 3852 DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2)); |
| 3853 ASSERT(!scratch3.is(input_reg) && |
| 3854 !scratch3.is(scratch1) && |
| 3855 !scratch3.is(scratch2)); |
| 3856 // Performs a truncating conversion of a floating point number as used by |
| 3857 // the JS bitwise operations. |
| 3858 Label heap_number; |
| 3859 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map? |
| 3860 // Check for undefined. Undefined is converted to zero for truncating |
| 3861 // conversions. |
| 3862 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 3863 DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at)); |
| 3864 ASSERT(ToRegister(instr->result()).is(input_reg)); |
| 3865 __ mov(input_reg, zero_reg); |
| 3866 __ Branch(&done); |
| 3867 |
| 3868 __ bind(&heap_number); |
| 3869 __ ldc1(double_scratch2, |
| 3870 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 3871 __ EmitECMATruncate(input_reg, |
| 3872 double_scratch2, |
| 3873 single_scratch, |
| 3874 scratch1, |
| 3875 scratch2, |
| 3876 scratch3); |
| 3877 } else { |
| 3878 // Deoptimize if we don't have a heap number. |
| 3879 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at)); |
| 3880 |
| 3881 // Load the double value. |
| 3882 __ ldc1(double_scratch, |
| 3883 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 3884 |
| 3885 Register except_flag = scratch2; |
| 3886 __ EmitFPUTruncate(kRoundToZero, |
| 3887 single_scratch, |
| 3888 double_scratch, |
| 3889 scratch1, |
| 3890 except_flag, |
| 3891 kCheckForInexactConversion); |
| 3892 |
| 3893 // Deopt if the operation did not succeed. |
| 3894 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); |
| 3895 |
| 3896 // Load the result. |
| 3897 __ mfc1(input_reg, single_scratch); |
| 3898 |
| 3899 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3900 __ Branch(&done, ne, input_reg, Operand(zero_reg)); |
| 3901 |
| 3902 __ mfc1(scratch1, double_scratch.high()); |
| 3903 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 3904 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); |
| 3905 } |
| 3906 } |
| 3907 __ bind(&done); |
| 3908 } |
| 3909 |
| 3910 |
| 3911 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 3912 class DeferredTaggedToI: public LDeferredCode { |
| 3913 public: |
| 3914 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 3915 : LDeferredCode(codegen), instr_(instr) { } |
| 3916 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } |
| 3917 virtual LInstruction* instr() { return instr_; } |
| 3918 private: |
| 3919 LTaggedToI* instr_; |
| 3920 }; |
| 3921 |
| 3922 LOperand* input = instr->InputAt(0); |
| 3923 ASSERT(input->IsRegister()); |
| 3924 ASSERT(input->Equals(instr->result())); |
| 3925 |
| 3926 Register input_reg = ToRegister(input); |
| 3927 |
| 3928 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); |
| 3929 |
| 3930 // Let the deferred code handle the HeapObject case. |
| 3931 __ JumpIfNotSmi(input_reg, deferred->entry()); |
| 3932 |
| 3933 // Smi to int32 conversion. |
| 3934 __ SmiUntag(input_reg); |
| 3935 __ bind(deferred->exit()); |
| 3936 } |
| 3937 |
| 3938 |
| 3939 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| 3940 LOperand* input = instr->InputAt(0); |
| 3941 ASSERT(input->IsRegister()); |
| 3942 LOperand* result = instr->result(); |
| 3943 ASSERT(result->IsDoubleRegister()); |
| 3944 |
| 3945 Register input_reg = ToRegister(input); |
| 3946 DoubleRegister result_reg = ToDoubleRegister(result); |
| 3947 |
| 3948 EmitNumberUntagD(input_reg, result_reg, |
| 3949 instr->hydrogen()->deoptimize_on_undefined(), |
| 3950 instr->environment()); |
| 3951 } |
| 3952 |
| 3953 |
| 3954 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 3955 Register result_reg = ToRegister(instr->result()); |
| 3956 Register scratch1 = scratch0(); |
| 3957 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 3958 DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0)); |
| 3959 DoubleRegister double_scratch = double_scratch0(); |
| 3960 FPURegister single_scratch = double_scratch0().low(); |
| 3961 |
| 3962 if (instr->truncating()) { |
| 3963 Register scratch3 = ToRegister(instr->TempAt(1)); |
| 3964 __ EmitECMATruncate(result_reg, |
| 3965 double_input, |
| 3966 single_scratch, |
| 3967 scratch1, |
| 3968 scratch2, |
| 3969 scratch3); |
| 3970 } else { |
| 3971 Register except_flag = scratch2; |
| 3972 |
| 3973 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3974 single_scratch, |
| 3975 double_input, |
| 3976 scratch1, |
| 3977 except_flag, |
| 3978 kCheckForInexactConversion); |
| 3979 |
| 3980 // Deopt if the operation did not succeed (except_flag != 0). |
| 3981 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); |
| 3982 |
| 3983 // Load the result. |
| 3984 __ mfc1(result_reg, single_scratch); |
| 3985 } |
| 3986 } |
| 3987 |
| 3988 |
| 3989 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 3990 LOperand* input = instr->InputAt(0); |
| 3991 __ And(at, ToRegister(input), Operand(kSmiTagMask)); |
| 3992 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); |
| 3993 } |
| 3994 |
| 3995 |
| 3996 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 3997 LOperand* input = instr->InputAt(0); |
| 3998 __ And(at, ToRegister(input), Operand(kSmiTagMask)); |
| 3999 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); |
| 4000 } |
| 4001 |
| 4002 |
| 4003 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 4004 Register input = ToRegister(instr->InputAt(0)); |
| 4005 Register scratch = scratch0(); |
| 4006 |
| 4007 __ GetObjectType(input, scratch, scratch); |
| 4008 |
| 4009 if (instr->hydrogen()->is_interval_check()) { |
| 4010 InstanceType first; |
| 4011 InstanceType last; |
| 4012 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 4013 |
| 4014 // If there is only one type in the interval check for equality. |
| 4015 if (first == last) { |
| 4016 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first)); |
| 4017 } else { |
| 4018 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first)); |
| 4019 // Omit check for the last type. |
| 4020 if (last != LAST_TYPE) { |
| 4021 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last)); |
| 4022 } |
| 4023 } |
| 4024 } else { |
| 4025 uint8_t mask; |
| 4026 uint8_t tag; |
| 4027 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 4028 |
| 4029 if (IsPowerOf2(mask)) { |
| 4030 ASSERT(tag == 0 || IsPowerOf2(tag)); |
| 4031 __ And(at, scratch, mask); |
| 4032 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), |
| 4033 at, Operand(zero_reg)); |
| 4034 } else { |
| 4035 __ And(scratch, scratch, Operand(mask)); |
| 4036 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag)); |
| 4037 } |
| 4038 } |
| 4039 } |
| 4040 |
| 4041 |
| 4042 void LCodeGen::DoCheckFunction(LCheckFunction* instr) { |
| 4043 ASSERT(instr->InputAt(0)->IsRegister()); |
| 4044 Register reg = ToRegister(instr->InputAt(0)); |
| 4045 DeoptimizeIf(ne, instr->environment(), reg, |
| 4046 Operand(instr->hydrogen()->target())); |
| 4047 } |
| 4048 |
| 4049 |
| 4050 void LCodeGen::DoCheckMap(LCheckMap* instr) { |
| 4051 Register scratch = scratch0(); |
| 4052 LOperand* input = instr->InputAt(0); |
| 4053 ASSERT(input->IsRegister()); |
| 4054 Register reg = ToRegister(input); |
| 4055 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 4056 DeoptimizeIf(ne, |
| 4057 instr->environment(), |
| 4058 scratch, |
| 4059 Operand(instr->hydrogen()->map())); |
| 4060 } |
| 4061 |
| 4062 |
| 4063 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 4064 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 4065 Register result_reg = ToRegister(instr->result()); |
| 4066 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); |
| 4067 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); |
| 4068 } |
| 4069 |
| 4070 |
| 4071 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
| 4072 Register unclamped_reg = ToRegister(instr->unclamped()); |
| 4073 Register result_reg = ToRegister(instr->result()); |
| 4074 __ ClampUint8(result_reg, unclamped_reg); |
| 4075 } |
| 4076 |
| 4077 |
| 4078 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| 4079 Register scratch = scratch0(); |
| 4080 Register input_reg = ToRegister(instr->unclamped()); |
| 4081 Register result_reg = ToRegister(instr->result()); |
| 4082 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); |
| 4083 Label is_smi, done, heap_number; |
| 4084 |
| 4085 // Both smi and heap number cases are handled. |
| 4086 __ JumpIfSmi(input_reg, &is_smi); |
| 4087 |
| 4088 // Check for heap number |
| 4089 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4090 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); |
| 4091 |
| 4092 // Check for undefined. Undefined is converted to zero for clamping |
| 4093 // conversions. |
| 4094 DeoptimizeIf(ne, instr->environment(), input_reg, |
| 4095 Operand(factory()->undefined_value())); |
| 4096 __ mov(result_reg, zero_reg); |
| 4097 __ jmp(&done); |
| 4098 |
| 4099 // Heap number |
| 4100 __ bind(&heap_number); |
| 4101 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, |
| 4102 HeapNumber::kValueOffset)); |
| 4103 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
| 4104 __ jmp(&done); |
| 4105 |
| 4106 // smi |
| 4107 __ bind(&is_smi); |
| 4108 __ SmiUntag(scratch, input_reg); |
| 4109 __ ClampUint8(result_reg, scratch); |
| 4110 |
| 4111 __ bind(&done); |
| 4112 } |
| 4113 |
| 4114 |
| 4115 void LCodeGen::LoadHeapObject(Register result, |
| 4116 Handle<HeapObject> object) { |
| 4117 if (heap()->InNewSpace(*object)) { |
| 4118 Handle<JSGlobalPropertyCell> cell = |
| 4119 factory()->NewJSGlobalPropertyCell(object); |
| 4120 __ li(result, Operand(cell)); |
| 4121 __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); |
| 4122 } else { |
| 4123 __ li(result, Operand(object)); |
| 4124 } |
| 4125 } |
| 4126 |
| 4127 |
| 4128 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
| 4129 Register temp1 = ToRegister(instr->TempAt(0)); |
| 4130 Register temp2 = ToRegister(instr->TempAt(1)); |
| 4131 |
| 4132 Handle<JSObject> holder = instr->holder(); |
| 4133 Handle<JSObject> current_prototype = instr->prototype(); |
| 4134 |
| 4135 // Load prototype object. |
| 4136 LoadHeapObject(temp1, current_prototype); |
| 4137 |
| 4138 // Check prototype maps up to the holder. |
| 4139 while (!current_prototype.is_identical_to(holder)) { |
| 4140 __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); |
| 4141 DeoptimizeIf(ne, |
| 4142 instr->environment(), |
| 4143 temp2, |
| 4144 Operand(Handle<Map>(current_prototype->map()))); |
| 4145 current_prototype = |
| 4146 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); |
| 4147 // Load next prototype object. |
| 4148 LoadHeapObject(temp1, current_prototype); |
| 4149 } |
| 4150 |
| 4151 // Check the holder map. |
| 4152 __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset)); |
| 4153 DeoptimizeIf(ne, |
| 4154 instr->environment(), |
| 4155 temp2, |
| 4156 Operand(Handle<Map>(current_prototype->map()))); |
| 4157 } |
| 4158 |
| 4159 |
| 4160 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
| 4161 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 4162 __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
| 4163 __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); |
| 4164 __ li(a1, Operand(instr->hydrogen()->constant_elements())); |
| 4165 __ Push(a3, a2, a1); |
| 4166 |
| 4167 // Pick the right runtime function or stub to call. |
| 4168 int length = instr->hydrogen()->length(); |
| 4169 if (instr->hydrogen()->IsCopyOnWrite()) { |
| 4170 ASSERT(instr->hydrogen()->depth() == 1); |
| 4171 FastCloneShallowArrayStub::Mode mode = |
| 4172 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; |
| 4173 FastCloneShallowArrayStub stub(mode, length); |
| 4174 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 4175 } else if (instr->hydrogen()->depth() > 1) { |
| 4176 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); |
| 4177 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { |
| 4178 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); |
| 4179 } else { |
| 4180 FastCloneShallowArrayStub::Mode mode = |
| 4181 FastCloneShallowArrayStub::CLONE_ELEMENTS; |
| 4182 FastCloneShallowArrayStub stub(mode, length); |
| 4183 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 4184 } |
| 4185 } |
| 4186 |
| 4187 |
| 4188 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { |
| 4189 ASSERT(ToRegister(instr->result()).is(v0)); |
| 4190 __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 4191 __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); |
| 4192 __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); |
| 4193 __ li(a2, Operand(instr->hydrogen()->constant_properties())); |
| 4194 __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0))); |
| 4195 __ Push(t0, a3, a2, a1); |
| 4196 |
| 4197 // Pick the right runtime function to call. |
| 4198 if (instr->hydrogen()->depth() > 1) { |
| 4199 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); |
| 4200 } else { |
| 4201 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); |
| 4202 } |
| 4203 } |
| 4204 |
| 4205 |
| 4206 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
| 4207 ASSERT(ToRegister(instr->InputAt(0)).is(a0)); |
| 4208 ASSERT(ToRegister(instr->result()).is(v0)); |
| 4209 __ push(a0); |
| 4210 CallRuntime(Runtime::kToFastProperties, 1, instr); |
| 4211 } |
| 4212 |
| 4213 |
| 4214 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
| 4215 Label materialized; |
| 4216 // Registers will be used as follows: |
| 4217 // a3 = JS function. |
| 4218 // t3 = literals array. |
| 4219 // a1 = regexp literal. |
| 4220 // a0 = regexp literal clone. |
| 4221 // a2 and t0-t2 are used as temporaries. |
| 4222 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 4223 __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
| 4224 int literal_offset = FixedArray::kHeaderSize + |
| 4225 instr->hydrogen()->literal_index() * kPointerSize; |
| 4226 __ lw(a1, FieldMemOperand(t3, literal_offset)); |
| 4227 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4228 __ Branch(&materialized, ne, a1, Operand(at)); |
| 4229 |
| 4230 // Create regexp literal using runtime function |
| 4231 // Result will be in v0. |
| 4232 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); |
| 4233 __ li(t1, Operand(instr->hydrogen()->pattern())); |
| 4234 __ li(t0, Operand(instr->hydrogen()->flags())); |
| 4235 __ Push(t3, t2, t1, t0); |
| 4236 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); |
| 4237 __ mov(a1, v0); |
| 4238 |
| 4239 __ bind(&materialized); |
| 4240 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
| 4241 Label allocated, runtime_allocate; |
| 4242 |
| 4243 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); |
| 4244 __ jmp(&allocated); |
| 4245 |
| 4246 __ bind(&runtime_allocate); |
| 4247 __ li(a0, Operand(Smi::FromInt(size))); |
| 4248 __ Push(a1, a0); |
| 4249 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); |
| 4250 __ pop(a1); |
| 4251 |
| 4252 __ bind(&allocated); |
| 4253 // Copy the content into the newly allocated memory. |
| 4254 // (Unroll copy loop once for better throughput). |
| 4255 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { |
| 4256 __ lw(a3, FieldMemOperand(a1, i)); |
| 4257 __ lw(a2, FieldMemOperand(a1, i + kPointerSize)); |
| 4258 __ sw(a3, FieldMemOperand(v0, i)); |
| 4259 __ sw(a2, FieldMemOperand(v0, i + kPointerSize)); |
| 4260 } |
| 4261 if ((size % (2 * kPointerSize)) != 0) { |
| 4262 __ lw(a3, FieldMemOperand(a1, size - kPointerSize)); |
| 4263 __ sw(a3, FieldMemOperand(v0, size - kPointerSize)); |
| 4264 } |
| 4265 } |
| 4266 |
| 4267 |
| 4268 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 4269 // Use the fast case closure allocation code that allocates in new |
| 4270 // space for nested functions that don't need literals cloning. |
| 4271 Handle<SharedFunctionInfo> shared_info = instr->shared_info(); |
| 4272 bool pretenure = instr->hydrogen()->pretenure(); |
| 4273 if (!pretenure && shared_info->num_literals() == 0) { |
| 4274 FastNewClosureStub stub( |
| 4275 shared_info->strict_mode() ? kStrictMode : kNonStrictMode); |
| 4276 __ li(a1, Operand(shared_info)); |
| 4277 __ push(a1); |
| 4278 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 4279 } else { |
| 4280 __ li(a2, Operand(shared_info)); |
| 4281 __ li(a1, Operand(pretenure |
| 4282 ? factory()->true_value() |
| 4283 : factory()->false_value())); |
| 4284 __ Push(cp, a2, a1); |
| 4285 CallRuntime(Runtime::kNewClosure, 3, instr); |
| 4286 } |
| 4287 } |
| 4288 |
| 4289 |
| 4290 void LCodeGen::DoTypeof(LTypeof* instr) { |
| 4291 ASSERT(ToRegister(instr->result()).is(v0)); |
| 4292 Register input = ToRegister(instr->InputAt(0)); |
| 4293 __ push(input); |
| 4294 CallRuntime(Runtime::kTypeof, 1, instr); |
| 4295 } |
| 4296 |
| 4297 |
| 4298 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
| 4299 Register input = ToRegister(instr->InputAt(0)); |
| 4300 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 4301 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 4302 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 4303 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 4304 |
| 4305 Register cmp1 = no_reg; |
| 4306 Operand cmp2 = Operand(no_reg); |
| 4307 |
| 4308 Condition final_branch_condition = EmitTypeofIs(true_label, |
| 4309 false_label, |
| 4310 input, |
| 4311 instr->type_literal(), |
| 4312 cmp1, |
| 4313 cmp2); |
| 4314 |
| 4315 ASSERT(cmp1.is_valid()); |
| 4316 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid()); |
| 4317 |
| 4318 if (final_branch_condition != kNoCondition) { |
| 4319 EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2); |
| 4320 } |
| 4321 } |
| 4322 |
| 4323 |
| 4324 Condition LCodeGen::EmitTypeofIs(Label* true_label, |
| 4325 Label* false_label, |
| 4326 Register input, |
| 4327 Handle<String> type_name, |
| 4328 Register& cmp1, |
| 4329 Operand& cmp2) { |
| 4330 // This function utilizes the delay slot heavily. This is used to load |
| 4331 // values that are always usable without depending on the type of the input |
| 4332 // register. |
| 4333 Condition final_branch_condition = kNoCondition; |
| 4334 Register scratch = scratch0(); |
| 4335 if (type_name->Equals(heap()->number_symbol())) { |
| 4336 __ JumpIfSmi(input, true_label); |
| 4337 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 4338 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4339 cmp1 = input; |
| 4340 cmp2 = Operand(at); |
| 4341 final_branch_condition = eq; |
| 4342 |
| 4343 } else if (type_name->Equals(heap()->string_symbol())) { |
| 4344 __ JumpIfSmi(input, false_label); |
| 4345 __ GetObjectType(input, input, scratch); |
| 4346 __ Branch(USE_DELAY_SLOT, false_label, |
| 4347 ge, scratch, Operand(FIRST_NONSTRING_TYPE)); |
| 4348 // input is an object so we can load the BitFieldOffset even if we take the |
| 4349 // other branch. |
| 4350 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 4351 __ And(at, at, 1 << Map::kIsUndetectable); |
| 4352 cmp1 = at; |
| 4353 cmp2 = Operand(zero_reg); |
| 4354 final_branch_condition = eq; |
| 4355 |
| 4356 } else if (type_name->Equals(heap()->boolean_symbol())) { |
| 4357 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 4358 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); |
| 4359 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 4360 cmp1 = at; |
| 4361 cmp2 = Operand(input); |
| 4362 final_branch_condition = eq; |
| 4363 |
| 4364 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { |
| 4365 __ LoadRoot(at, Heap::kNullValueRootIndex); |
| 4366 cmp1 = at; |
| 4367 cmp2 = Operand(input); |
| 4368 final_branch_condition = eq; |
| 4369 |
| 4370 } else if (type_name->Equals(heap()->undefined_symbol())) { |
| 4371 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4372 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); |
| 4373 // The first instruction of JumpIfSmi is an And - it is safe in the delay |
| 4374 // slot. |
| 4375 __ JumpIfSmi(input, false_label); |
| 4376 // Check for undetectable objects => true. |
| 4377 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 4378 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 4379 __ And(at, at, 1 << Map::kIsUndetectable); |
| 4380 cmp1 = at; |
| 4381 cmp2 = Operand(zero_reg); |
| 4382 final_branch_condition = ne; |
| 4383 |
| 4384 } else if (type_name->Equals(heap()->function_symbol())) { |
| 4385 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| 4386 __ JumpIfSmi(input, false_label); |
| 4387 __ GetObjectType(input, scratch, input); |
| 4388 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE)); |
| 4389 cmp1 = input; |
| 4390 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); |
| 4391 final_branch_condition = eq; |
| 4392 |
| 4393 } else if (type_name->Equals(heap()->object_symbol())) { |
| 4394 __ JumpIfSmi(input, false_label); |
| 4395 if (!FLAG_harmony_typeof) { |
| 4396 __ LoadRoot(at, Heap::kNullValueRootIndex); |
| 4397 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); |
| 4398 } |
| 4399 // input is an object, it is safe to use GetObjectType in the delay slot. |
| 4400 __ GetObjectType(input, input, scratch); |
| 4401 __ Branch(USE_DELAY_SLOT, false_label, |
| 4402 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 4403 // Still an object, so the InstanceType can be loaded. |
| 4404 __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset)); |
| 4405 __ Branch(USE_DELAY_SLOT, false_label, |
| 4406 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 4407 // Still an object, so the BitField can be loaded. |
| 4408 // Check for undetectable objects => false. |
| 4409 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 4410 __ And(at, at, 1 << Map::kIsUndetectable); |
| 4411 cmp1 = at; |
| 4412 cmp2 = Operand(zero_reg); |
| 4413 final_branch_condition = eq; |
| 4414 |
| 4415 } else { |
| 4416 cmp1 = at; |
| 4417 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. |
| 4418 __ Branch(false_label); |
| 4419 } |
| 4420 |
| 4421 return final_branch_condition; |
| 4422 } |
| 4423 |
| 4424 |
| 4425 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
| 4426 Register temp1 = ToRegister(instr->TempAt(0)); |
| 4427 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 4428 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 4429 |
| 4430 EmitIsConstructCall(temp1, scratch0()); |
| 4431 |
| 4432 EmitBranch(true_block, false_block, eq, temp1, |
| 4433 Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
| 4434 } |
| 4435 |
| 4436 |
| 4437 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
| 4438 ASSERT(!temp1.is(temp2)); |
| 4439 // Get the frame pointer for the calling frame. |
| 4440 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 4441 |
| 4442 // Skip the arguments adaptor frame if it exists. |
| 4443 Label check_frame_marker; |
| 4444 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
| 4445 __ Branch(&check_frame_marker, ne, temp2, |
| 4446 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 4447 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
| 4448 |
| 4449 // Check the marker in the calling frame. |
| 4450 __ bind(&check_frame_marker); |
| 4451 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
| 4452 } |
| 4453 |
| 4454 |
| 4455 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 4456 // No code for lazy bailout instruction. Used to capture environment after a |
| 4457 // call for populating the safepoint data with deoptimization data. |
| 4458 } |
| 4459 |
| 4460 |
| 4461 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 4462 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); |
| 4463 } |
| 4464 |
| 4465 |
| 4466 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { |
| 4467 Register object = ToRegister(instr->object()); |
| 4468 Register key = ToRegister(instr->key()); |
| 4469 Register strict = scratch0(); |
| 4470 __ li(strict, Operand(Smi::FromInt(strict_mode_flag()))); |
| 4471 __ Push(object, key, strict); |
| 4472 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
| 4473 LPointerMap* pointers = instr->pointer_map(); |
| 4474 LEnvironment* env = instr->deoptimization_environment(); |
| 4475 RecordPosition(pointers->position()); |
| 4476 RegisterEnvironmentForDeoptimization(env); |
| 4477 SafepointGenerator safepoint_generator(this, |
| 4478 pointers, |
| 4479 env->deoptimization_index()); |
| 4480 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator); |
| 4481 } |
| 4482 |
| 4483 |
| 4484 void LCodeGen::DoIn(LIn* instr) { |
| 4485 Register obj = ToRegister(instr->object()); |
| 4486 Register key = ToRegister(instr->key()); |
| 4487 __ Push(key, obj); |
| 4488 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
| 4489 LPointerMap* pointers = instr->pointer_map(); |
| 4490 LEnvironment* env = instr->deoptimization_environment(); |
| 4491 RecordPosition(pointers->position()); |
| 4492 RegisterEnvironmentForDeoptimization(env); |
| 4493 SafepointGenerator safepoint_generator(this, |
| 4494 pointers, |
| 4495 env->deoptimization_index()); |
| 4496 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator); |
| 4497 } |
| 4498 |
| 4499 |
| 4500 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
| 4501 { |
| 4502 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 4503 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
| 4504 RegisterLazyDeoptimization( |
| 4505 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 4506 } |
| 4507 |
| 4508 // The gap code includes the restoring of the safepoint registers. |
| 4509 int pc = masm()->pc_offset(); |
| 4510 safepoints_.SetPcAfterGap(pc); |
| 4511 } |
| 4512 |
| 4513 |
| 4514 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
| 4515 class DeferredStackCheck: public LDeferredCode { |
| 4516 public: |
| 4517 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
| 4518 : LDeferredCode(codegen), instr_(instr) { } |
| 4519 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } |
| 4520 virtual LInstruction* instr() { return instr_; } |
| 4521 private: |
| 4522 LStackCheck* instr_; |
| 4523 }; |
| 4524 |
| 4525 if (instr->hydrogen()->is_function_entry()) { |
| 4526 // Perform stack overflow check. |
| 4527 Label done; |
| 4528 __ LoadRoot(at, Heap::kStackLimitRootIndex); |
| 4529 __ Branch(&done, hs, sp, Operand(at)); |
| 4530 StackCheckStub stub; |
| 4531 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 4532 __ bind(&done); |
| 4533 } else { |
| 4534 ASSERT(instr->hydrogen()->is_backwards_branch()); |
| 4535 // Perform stack overflow check if this goto needs it before jumping. |
| 4536 DeferredStackCheck* deferred_stack_check = |
| 4537 new DeferredStackCheck(this, instr); |
| 4538 __ LoadRoot(at, Heap::kStackLimitRootIndex); |
| 4539 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); |
| 4540 __ bind(instr->done_label()); |
| 4541 deferred_stack_check->SetExit(instr->done_label()); |
| 4542 } |
| 4543 } |
| 4544 |
| 4545 |
| 4546 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
| 4547 // This is a pseudo-instruction that ensures that the environment here is |
| 4548 // properly registered for deoptimization and records the assembler's PC |
| 4549 // offset. |
| 4550 LEnvironment* environment = instr->environment(); |
| 4551 environment->SetSpilledRegisters(instr->SpilledRegisterArray(), |
| 4552 instr->SpilledDoubleRegisterArray()); |
| 4553 |
| 4554 // If the environment were already registered, we would have no way of |
| 4555 // backpatching it with the spill slot operands. |
| 4556 ASSERT(!environment->HasBeenRegistered()); |
| 4557 RegisterEnvironmentForDeoptimization(environment); |
| 4558 ASSERT(osr_pc_offset_ == -1); |
| 4559 osr_pc_offset_ = masm()->pc_offset(); |
| 4560 } |
| 4561 |
| 4562 |
| 4563 #undef __ |
| 4564 |
| 4565 } } // namespace v8::internal |
OLD | NEW |