| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "arm/lithium-codegen-arm.h" |
| 29 #include "code-stubs.h" |
| 30 #include "stub-cache.h" |
| 31 |
| 32 namespace v8 { |
| 33 namespace internal { |
| 34 |
| 35 |
| 36 class SafepointGenerator : public PostCallGenerator { |
| 37 public: |
| 38 SafepointGenerator(LCodeGen* codegen, |
| 39 LPointerMap* pointers, |
| 40 int deoptimization_index) |
| 41 : codegen_(codegen), |
| 42 pointers_(pointers), |
| 43 deoptimization_index_(deoptimization_index) { } |
| 44 virtual ~SafepointGenerator() { } |
| 45 |
| 46 virtual void Generate() { |
| 47 codegen_->RecordSafepoint(pointers_, deoptimization_index_); |
| 48 } |
| 49 |
| 50 private: |
| 51 LCodeGen* codegen_; |
| 52 LPointerMap* pointers_; |
| 53 int deoptimization_index_; |
| 54 }; |
| 55 |
| 56 |
| 57 #define __ masm()-> |
| 58 |
| 59 bool LCodeGen::GenerateCode() { |
| 60 HPhase phase("Code generation", chunk()); |
| 61 ASSERT(is_unused()); |
| 62 status_ = GENERATING; |
| 63 CpuFeatures::Scope scope1(VFP3); |
| 64 CpuFeatures::Scope scope2(ARMv7); |
| 65 return GeneratePrologue() && |
| 66 GenerateBody() && |
| 67 GenerateDeferredCode() && |
| 68 GenerateSafepointTable(); |
| 69 } |
| 70 |
| 71 |
| 72 void LCodeGen::FinishCode(Handle<Code> code) { |
| 73 ASSERT(is_done()); |
| 74 code->set_stack_slots(StackSlotCount()); |
| 75 code->set_safepoint_table_start(safepoints_.GetCodeOffset()); |
| 76 PopulateDeoptimizationData(code); |
| 77 } |
| 78 |
| 79 |
| 80 void LCodeGen::Abort(const char* format, ...) { |
| 81 if (FLAG_trace_bailout) { |
| 82 SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); |
| 83 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); |
| 84 va_list arguments; |
| 85 va_start(arguments, format); |
| 86 OS::VPrint(format, arguments); |
| 87 va_end(arguments); |
| 88 PrintF("\n"); |
| 89 } |
| 90 status_ = ABORTED; |
| 91 } |
| 92 |
| 93 |
| 94 void LCodeGen::Comment(const char* format, ...) { |
| 95 if (!FLAG_code_comments) return; |
| 96 char buffer[4 * KB]; |
| 97 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); |
| 98 va_list arguments; |
| 99 va_start(arguments, format); |
| 100 builder.AddFormattedList(format, arguments); |
| 101 va_end(arguments); |
| 102 |
| 103 // Copy the string before recording it in the assembler to avoid |
| 104 // issues when the stack allocated buffer goes out of scope. |
| 105 size_t length = builder.position(); |
| 106 Vector<char> copy = Vector<char>::New(length + 1); |
| 107 memcpy(copy.start(), builder.Finalize(), copy.length()); |
| 108 masm()->RecordComment(copy.start()); |
| 109 } |
| 110 |
| 111 |
| 112 bool LCodeGen::GeneratePrologue() { |
| 113 ASSERT(is_generating()); |
| 114 |
| 115 #ifdef DEBUG |
| 116 if (strlen(FLAG_stop_at) > 0 && |
| 117 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
| 118 __ stop("stop_at"); |
| 119 } |
| 120 #endif |
| 121 |
| 122 // r1: Callee's JS function. |
| 123 // cp: Callee's context. |
| 124 // fp: Caller's frame pointer. |
| 125 // lr: Caller's pc. |
| 126 |
| 127 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
| 128 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. |
| 129 |
| 130 // Reserve space for the stack slots needed by the code. |
| 131 int slots = StackSlotCount(); |
| 132 if (slots > 0) { |
| 133 if (FLAG_debug_code) { |
| 134 __ mov(r0, Operand(slots)); |
| 135 __ mov(r2, Operand(kSlotsZapValue)); |
| 136 Label loop; |
| 137 __ bind(&loop); |
| 138 __ push(r2); |
| 139 __ sub(r0, r0, Operand(1)); |
| 140 __ b(ne, &loop); |
| 141 } else { |
| 142 __ sub(sp, sp, Operand(slots * kPointerSize)); |
| 143 } |
| 144 } |
| 145 |
| 146 // Trace the call. |
| 147 if (FLAG_trace) { |
| 148 __ CallRuntime(Runtime::kTraceEnter, 0); |
| 149 } |
| 150 return !is_aborted(); |
| 151 } |
| 152 |
| 153 |
| 154 bool LCodeGen::GenerateBody() { |
| 155 ASSERT(is_generating()); |
| 156 bool emit_instructions = true; |
| 157 for (current_instruction_ = 0; |
| 158 !is_aborted() && current_instruction_ < instructions_->length(); |
| 159 current_instruction_++) { |
| 160 LInstruction* instr = instructions_->at(current_instruction_); |
| 161 if (instr->IsLabel()) { |
| 162 LLabel* label = LLabel::cast(instr); |
| 163 emit_instructions = !label->HasReplacement(); |
| 164 } |
| 165 |
| 166 if (emit_instructions) { |
| 167 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); |
| 168 instr->CompileToNative(this); |
| 169 } |
| 170 } |
| 171 return !is_aborted(); |
| 172 } |
| 173 |
| 174 |
| 175 LInstruction* LCodeGen::GetNextInstruction() { |
| 176 if (current_instruction_ < instructions_->length() - 1) { |
| 177 return instructions_->at(current_instruction_ + 1); |
| 178 } else { |
| 179 return NULL; |
| 180 } |
| 181 } |
| 182 |
| 183 |
| 184 bool LCodeGen::GenerateDeferredCode() { |
| 185 ASSERT(is_generating()); |
| 186 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
| 187 LDeferredCode* code = deferred_[i]; |
| 188 __ bind(code->entry()); |
| 189 code->Generate(); |
| 190 __ jmp(code->exit()); |
| 191 } |
| 192 |
| 193 // Deferred code is the last part of the instruction sequence. Mark |
| 194 // the generated code as done unless we bailed out. |
| 195 if (!is_aborted()) status_ = DONE; |
| 196 return !is_aborted(); |
| 197 } |
| 198 |
| 199 |
| 200 bool LCodeGen::GenerateSafepointTable() { |
| 201 ASSERT(is_done()); |
| 202 safepoints_.Emit(masm(), StackSlotCount()); |
| 203 return !is_aborted(); |
| 204 } |
| 205 |
| 206 |
| 207 Register LCodeGen::ToRegister(int index) const { |
| 208 return Register::FromAllocationIndex(index); |
| 209 } |
| 210 |
| 211 |
| 212 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { |
| 213 return DoubleRegister::FromAllocationIndex(index); |
| 214 } |
| 215 |
| 216 |
| 217 Register LCodeGen::ToRegister(LOperand* op) const { |
| 218 ASSERT(op->IsRegister()); |
| 219 return ToRegister(op->index()); |
| 220 } |
| 221 |
| 222 |
| 223 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
| 224 if (op->IsRegister()) { |
| 225 return ToRegister(op->index()); |
| 226 } else if (op->IsConstantOperand()) { |
| 227 __ mov(scratch, ToOperand(op)); |
| 228 return scratch; |
| 229 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 230 __ ldr(scratch, ToMemOperand(op)); |
| 231 return scratch; |
| 232 } |
| 233 UNREACHABLE(); |
| 234 return scratch; |
| 235 } |
| 236 |
| 237 |
| 238 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| 239 ASSERT(op->IsDoubleRegister()); |
| 240 return ToDoubleRegister(op->index()); |
| 241 } |
| 242 |
| 243 |
| 244 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
| 245 SwVfpRegister flt_scratch, |
| 246 DoubleRegister dbl_scratch) { |
| 247 if (op->IsDoubleRegister()) { |
| 248 return ToDoubleRegister(op->index()); |
| 249 } else if (op->IsConstantOperand()) { |
| 250 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 251 Handle<Object> literal = chunk_->LookupLiteral(const_op); |
| 252 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 253 if (r.IsInteger32()) { |
| 254 ASSERT(literal->IsNumber()); |
| 255 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); |
| 256 __ vmov(flt_scratch, ip); |
| 257 __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| 258 return dbl_scratch; |
| 259 } else if (r.IsDouble()) { |
| 260 Abort("unsupported double immediate"); |
| 261 } else if (r.IsTagged()) { |
| 262 Abort("unsupported tagged immediate"); |
| 263 } |
| 264 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 265 // TODO(regis): Why is vldr not taking a MemOperand? |
| 266 // __ vldr(dbl_scratch, ToMemOperand(op)); |
| 267 MemOperand mem_op = ToMemOperand(op); |
| 268 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); |
| 269 return dbl_scratch; |
| 270 } |
| 271 UNREACHABLE(); |
| 272 return dbl_scratch; |
| 273 } |
| 274 |
| 275 |
| 276 int LCodeGen::ToInteger32(LConstantOperand* op) const { |
| 277 Handle<Object> value = chunk_->LookupLiteral(op); |
| 278 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); |
| 279 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) == |
| 280 value->Number()); |
| 281 return static_cast<int32_t>(value->Number()); |
| 282 } |
| 283 |
| 284 |
| 285 Operand LCodeGen::ToOperand(LOperand* op) { |
| 286 if (op->IsConstantOperand()) { |
| 287 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 288 Handle<Object> literal = chunk_->LookupLiteral(const_op); |
| 289 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 290 if (r.IsInteger32()) { |
| 291 ASSERT(literal->IsNumber()); |
| 292 return Operand(static_cast<int32_t>(literal->Number())); |
| 293 } else if (r.IsDouble()) { |
| 294 Abort("ToOperand Unsupported double immediate."); |
| 295 } |
| 296 ASSERT(r.IsTagged()); |
| 297 return Operand(literal); |
| 298 } else if (op->IsRegister()) { |
| 299 return Operand(ToRegister(op)); |
| 300 } else if (op->IsDoubleRegister()) { |
| 301 Abort("ToOperand IsDoubleRegister unimplemented"); |
| 302 return Operand(0); |
| 303 } |
| 304 // Stack slots not implemented, use ToMemOperand instead. |
| 305 UNREACHABLE(); |
| 306 return Operand(0); |
| 307 } |
| 308 |
| 309 |
| 310 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
| 311 // TODO(regis): Revisit. |
| 312 ASSERT(!op->IsRegister()); |
| 313 ASSERT(!op->IsDoubleRegister()); |
| 314 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 315 int index = op->index(); |
| 316 if (index >= 0) { |
| 317 // Local or spill slot. Skip the frame pointer, function, and |
| 318 // context in the fixed part of the frame. |
| 319 return MemOperand(fp, -(index + 3) * kPointerSize); |
| 320 } else { |
| 321 // Incoming parameter. Skip the return address. |
| 322 return MemOperand(fp, -(index - 1) * kPointerSize); |
| 323 } |
| 324 } |
| 325 |
| 326 |
| 327 void LCodeGen::AddToTranslation(Translation* translation, |
| 328 LOperand* op, |
| 329 bool is_tagged) { |
| 330 if (op == NULL) { |
| 331 // TODO(twuerthinger): Introduce marker operands to indicate that this value |
| 332 // is not present and must be reconstructed from the deoptimizer. Currently |
| 333 // this is only used for the arguments object. |
| 334 translation->StoreArgumentsObject(); |
| 335 } else if (op->IsStackSlot()) { |
| 336 if (is_tagged) { |
| 337 translation->StoreStackSlot(op->index()); |
| 338 } else { |
| 339 translation->StoreInt32StackSlot(op->index()); |
| 340 } |
| 341 } else if (op->IsDoubleStackSlot()) { |
| 342 translation->StoreDoubleStackSlot(op->index()); |
| 343 } else if (op->IsArgument()) { |
| 344 ASSERT(is_tagged); |
| 345 int src_index = StackSlotCount() + op->index(); |
| 346 translation->StoreStackSlot(src_index); |
| 347 } else if (op->IsRegister()) { |
| 348 Register reg = ToRegister(op); |
| 349 if (is_tagged) { |
| 350 translation->StoreRegister(reg); |
| 351 } else { |
| 352 translation->StoreInt32Register(reg); |
| 353 } |
| 354 } else if (op->IsDoubleRegister()) { |
| 355 DoubleRegister reg = ToDoubleRegister(op); |
| 356 translation->StoreDoubleRegister(reg); |
| 357 } else if (op->IsConstantOperand()) { |
| 358 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); |
| 359 int src_index = DefineDeoptimizationLiteral(literal); |
| 360 translation->StoreLiteral(src_index); |
| 361 } else { |
| 362 UNREACHABLE(); |
| 363 } |
| 364 } |
| 365 |
| 366 |
| 367 void LCodeGen::CallCode(Handle<Code> code, |
| 368 RelocInfo::Mode mode, |
| 369 LInstruction* instr) { |
| 370 if (instr != NULL) { |
| 371 LPointerMap* pointers = instr->pointer_map(); |
| 372 RecordPosition(pointers->position()); |
| 373 __ Call(code, mode); |
| 374 RegisterLazyDeoptimization(instr); |
| 375 } else { |
| 376 LPointerMap no_pointers(0); |
| 377 RecordPosition(no_pointers.position()); |
| 378 __ Call(code, mode); |
| 379 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex); |
| 380 } |
| 381 } |
| 382 |
| 383 |
| 384 void LCodeGen::CallRuntime(const Runtime::Function* function, |
| 385 int num_arguments, |
| 386 LInstruction* instr) { |
| 387 ASSERT(instr != NULL); |
| 388 LPointerMap* pointers = instr->pointer_map(); |
| 389 ASSERT(pointers != NULL); |
| 390 RecordPosition(pointers->position()); |
| 391 |
| 392 __ CallRuntime(function, num_arguments); |
| 393 // Runtime calls to Throw are not supposed to ever return at the |
| 394 // call site, so don't register lazy deoptimization for these. We do |
| 395 // however have to record a safepoint since throwing exceptions can |
| 396 // cause garbage collections. |
| 397 if (!instr->IsThrow()) { |
| 398 RegisterLazyDeoptimization(instr); |
| 399 } else { |
| 400 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex); |
| 401 } |
| 402 } |
| 403 |
| 404 |
| 405 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) { |
| 406 // Create the environment to bailout to. If the call has side effects |
| 407 // execution has to continue after the call otherwise execution can continue |
| 408 // from a previous bailout point repeating the call. |
| 409 LEnvironment* deoptimization_environment; |
| 410 if (instr->HasDeoptimizationEnvironment()) { |
| 411 deoptimization_environment = instr->deoptimization_environment(); |
| 412 } else { |
| 413 deoptimization_environment = instr->environment(); |
| 414 } |
| 415 |
| 416 RegisterEnvironmentForDeoptimization(deoptimization_environment); |
| 417 RecordSafepoint(instr->pointer_map(), |
| 418 deoptimization_environment->deoptimization_index()); |
| 419 } |
| 420 |
| 421 |
| 422 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { |
| 423 if (!environment->HasBeenRegistered()) { |
| 424 // Physical stack frame layout: |
| 425 // -x ............. -4 0 ..................................... y |
| 426 // [incoming arguments] [spill slots] [pushed outgoing arguments] |
| 427 |
| 428 // Layout of the environment: |
| 429 // 0 ..................................................... size-1 |
| 430 // [parameters] [locals] [expression stack including arguments] |
| 431 |
| 432 // Layout of the translation: |
| 433 // 0 ........................................................ size - 1 + 4 |
| 434 // [expression stack including arguments] [locals] [4 words] [parameters] |
| 435 // |>------------ translation_size ------------<| |
| 436 |
| 437 int frame_count = 0; |
| 438 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { |
| 439 ++frame_count; |
| 440 } |
| 441 Translation translation(&translations_, frame_count); |
| 442 environment->WriteTranslation(this, &translation); |
| 443 int deoptimization_index = deoptimizations_.length(); |
| 444 environment->Register(deoptimization_index, translation.index()); |
| 445 deoptimizations_.Add(environment); |
| 446 } |
| 447 } |
| 448 |
| 449 |
| 450 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
| 451 RegisterEnvironmentForDeoptimization(environment); |
| 452 ASSERT(environment->HasBeenRegistered()); |
| 453 int id = environment->deoptimization_index(); |
| 454 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); |
| 455 ASSERT(entry != NULL); |
| 456 if (entry == NULL) { |
| 457 Abort("bailout was not prepared"); |
| 458 return; |
| 459 } |
| 460 |
| 461 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. |
| 462 |
| 463 if (FLAG_deopt_every_n_times == 1 && |
| 464 info_->shared_info()->opt_count() == id) { |
| 465 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 466 return; |
| 467 } |
| 468 |
| 469 if (cc == no_condition) { |
| 470 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); |
| 471 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 472 } else { |
| 473 if (FLAG_trap_on_deopt) { |
| 474 Label done; |
| 475 __ b(&done, NegateCondition(cc)); |
| 476 __ stop("trap_on_deopt"); |
| 477 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 478 __ bind(&done); |
| 479 } else { |
| 480 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc); |
| 481 } |
| 482 } |
| 483 } |
| 484 |
| 485 |
| 486 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 487 int length = deoptimizations_.length(); |
| 488 if (length == 0) return; |
| 489 ASSERT(FLAG_deopt); |
| 490 Handle<DeoptimizationInputData> data = |
| 491 FACTORY->NewDeoptimizationInputData(length, TENURED); |
| 492 |
| 493 data->SetTranslationByteArray(*translations_.CreateByteArray()); |
| 494 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); |
| 495 |
| 496 Handle<FixedArray> literals = |
| 497 FACTORY->NewFixedArray(deoptimization_literals_.length(), TENURED); |
| 498 for (int i = 0; i < deoptimization_literals_.length(); i++) { |
| 499 literals->set(i, *deoptimization_literals_[i]); |
| 500 } |
| 501 data->SetLiteralArray(*literals); |
| 502 |
| 503 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); |
| 504 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); |
| 505 |
| 506 // Populate the deoptimization entries. |
| 507 for (int i = 0; i < length; i++) { |
| 508 LEnvironment* env = deoptimizations_[i]; |
| 509 data->SetAstId(i, Smi::FromInt(env->ast_id())); |
| 510 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); |
| 511 data->SetArgumentsStackHeight(i, |
| 512 Smi::FromInt(env->arguments_stack_height())); |
| 513 } |
| 514 code->set_deoptimization_data(*data); |
| 515 } |
| 516 |
| 517 |
| 518 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
| 519 int result = deoptimization_literals_.length(); |
| 520 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
| 521 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
| 522 } |
| 523 deoptimization_literals_.Add(literal); |
| 524 return result; |
| 525 } |
| 526 |
| 527 |
| 528 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
| 529 ASSERT(deoptimization_literals_.length() == 0); |
| 530 |
| 531 const ZoneList<Handle<JSFunction> >* inlined_closures = |
| 532 chunk()->inlined_closures(); |
| 533 |
| 534 for (int i = 0, length = inlined_closures->length(); |
| 535 i < length; |
| 536 i++) { |
| 537 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 538 } |
| 539 |
| 540 inlined_function_count_ = deoptimization_literals_.length(); |
| 541 } |
| 542 |
| 543 |
| 544 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 545 int deoptimization_index) { |
| 546 const ZoneList<LOperand*>* operands = pointers->operands(); |
| 547 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
| 548 deoptimization_index); |
| 549 for (int i = 0; i < operands->length(); i++) { |
| 550 LOperand* pointer = operands->at(i); |
| 551 if (pointer->IsStackSlot()) { |
| 552 safepoint.DefinePointerSlot(pointer->index()); |
| 553 } |
| 554 } |
| 555 } |
| 556 |
| 557 |
| 558 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| 559 int arguments, |
| 560 int deoptimization_index) { |
| 561 const ZoneList<LOperand*>* operands = pointers->operands(); |
| 562 Safepoint safepoint = |
| 563 safepoints_.DefineSafepointWithRegisters( |
| 564 masm(), arguments, deoptimization_index); |
| 565 for (int i = 0; i < operands->length(); i++) { |
| 566 LOperand* pointer = operands->at(i); |
| 567 if (pointer->IsStackSlot()) { |
| 568 safepoint.DefinePointerSlot(pointer->index()); |
| 569 } else if (pointer->IsRegister()) { |
| 570 safepoint.DefinePointerRegister(ToRegister(pointer)); |
| 571 } |
| 572 } |
| 573 // Register cp always contains a pointer to the context. |
| 574 safepoint.DefinePointerRegister(cp); |
| 575 } |
| 576 |
| 577 |
| 578 void LCodeGen::RecordPosition(int position) { |
| 579 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; |
| 580 masm()->positions_recorder()->RecordPosition(position); |
| 581 } |
| 582 |
| 583 |
| 584 void LCodeGen::DoLabel(LLabel* label) { |
| 585 if (label->is_loop_header()) { |
| 586 Comment(";;; B%d - LOOP entry", label->block_id()); |
| 587 } else { |
| 588 Comment(";;; B%d", label->block_id()); |
| 589 } |
| 590 __ bind(label->label()); |
| 591 current_block_ = label->block_id(); |
| 592 LCodeGen::DoGap(label); |
| 593 } |
| 594 |
| 595 |
| 596 void LCodeGen::DoParallelMove(LParallelMove* move) { |
| 597 // d0 must always be a scratch register. |
| 598 DoubleRegister dbl_scratch = d0; |
| 599 LUnallocated marker_operand(LUnallocated::NONE); |
| 600 |
| 601 Register core_scratch = r9; |
| 602 bool destroys_core_scratch = false; |
| 603 |
| 604 LGapResolver resolver(move->move_operands(), &marker_operand); |
| 605 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder(); |
| 606 for (int i = moves->length() - 1; i >= 0; --i) { |
| 607 LMoveOperands move = moves->at(i); |
| 608 LOperand* from = move.from(); |
| 609 LOperand* to = move.to(); |
| 610 ASSERT(!from->IsDoubleRegister() || |
| 611 !ToDoubleRegister(from).is(dbl_scratch)); |
| 612 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); |
| 613 ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); |
| 614 ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); |
| 615 if (from == &marker_operand) { |
| 616 if (to->IsRegister()) { |
| 617 __ mov(ToRegister(to), core_scratch); |
| 618 ASSERT(destroys_core_scratch); |
| 619 } else if (to->IsStackSlot()) { |
| 620 __ str(core_scratch, ToMemOperand(to)); |
| 621 ASSERT(destroys_core_scratch); |
| 622 } else if (to->IsDoubleRegister()) { |
| 623 __ vmov(ToDoubleRegister(to), dbl_scratch); |
| 624 } else { |
| 625 ASSERT(to->IsDoubleStackSlot()); |
| 626 // TODO(regis): Why is vstr not taking a MemOperand? |
| 627 // __ vstr(dbl_scratch, ToMemOperand(to)); |
| 628 MemOperand to_operand = ToMemOperand(to); |
| 629 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); |
| 630 } |
| 631 } else if (to == &marker_operand) { |
| 632 if (from->IsRegister() || from->IsConstantOperand()) { |
| 633 __ mov(core_scratch, ToOperand(from)); |
| 634 destroys_core_scratch = true; |
| 635 } else if (from->IsStackSlot()) { |
| 636 __ ldr(core_scratch, ToMemOperand(from)); |
| 637 destroys_core_scratch = true; |
| 638 } else if (from->IsDoubleRegister()) { |
| 639 __ vmov(dbl_scratch, ToDoubleRegister(from)); |
| 640 } else { |
| 641 ASSERT(from->IsDoubleStackSlot()); |
| 642 // TODO(regis): Why is vldr not taking a MemOperand? |
| 643 // __ vldr(dbl_scratch, ToMemOperand(from)); |
| 644 MemOperand from_operand = ToMemOperand(from); |
| 645 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); |
| 646 } |
| 647 } else if (from->IsConstantOperand()) { |
| 648 if (to->IsRegister()) { |
| 649 __ mov(ToRegister(to), ToOperand(from)); |
| 650 } else { |
| 651 ASSERT(to->IsStackSlot()); |
| 652 __ mov(ip, ToOperand(from)); |
| 653 __ str(ip, ToMemOperand(to)); |
| 654 } |
| 655 } else if (from->IsRegister()) { |
| 656 if (to->IsRegister()) { |
| 657 __ mov(ToRegister(to), ToOperand(from)); |
| 658 } else { |
| 659 ASSERT(to->IsStackSlot()); |
| 660 __ str(ToRegister(from), ToMemOperand(to)); |
| 661 } |
| 662 } else if (to->IsRegister()) { |
| 663 ASSERT(from->IsStackSlot()); |
| 664 __ ldr(ToRegister(to), ToMemOperand(from)); |
| 665 } else if (from->IsStackSlot()) { |
| 666 ASSERT(to->IsStackSlot()); |
| 667 __ ldr(ip, ToMemOperand(from)); |
| 668 __ str(ip, ToMemOperand(to)); |
| 669 } else if (from->IsDoubleRegister()) { |
| 670 if (to->IsDoubleRegister()) { |
| 671 __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); |
| 672 } else { |
| 673 ASSERT(to->IsDoubleStackSlot()); |
| 674 // TODO(regis): Why is vstr not taking a MemOperand? |
| 675 // __ vstr(dbl_scratch, ToMemOperand(to)); |
| 676 MemOperand to_operand = ToMemOperand(to); |
| 677 __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); |
| 678 } |
| 679 } else if (to->IsDoubleRegister()) { |
| 680 ASSERT(from->IsDoubleStackSlot()); |
| 681 // TODO(regis): Why is vldr not taking a MemOperand? |
| 682 // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); |
| 683 MemOperand from_operand = ToMemOperand(from); |
| 684 __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); |
| 685 } else { |
| 686 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); |
| 687 // TODO(regis): Why is vldr not taking a MemOperand? |
| 688 // __ vldr(dbl_scratch, ToMemOperand(from)); |
| 689 MemOperand from_operand = ToMemOperand(from); |
| 690 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); |
| 691 // TODO(regis): Why is vstr not taking a MemOperand? |
| 692 // __ vstr(dbl_scratch, ToMemOperand(to)); |
| 693 MemOperand to_operand = ToMemOperand(to); |
| 694 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); |
| 695 } |
| 696 } |
| 697 |
| 698 if (destroys_core_scratch) { |
| 699 __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); |
| 700 } |
| 701 |
| 702 LInstruction* next = GetNextInstruction(); |
| 703 if (next != NULL && next->IsLazyBailout()) { |
| 704 int pc = masm()->pc_offset(); |
| 705 safepoints_.SetPcAfterGap(pc); |
| 706 } |
| 707 } |
| 708 |
| 709 |
| 710 void LCodeGen::DoGap(LGap* gap) { |
| 711 for (int i = LGap::FIRST_INNER_POSITION; |
| 712 i <= LGap::LAST_INNER_POSITION; |
| 713 i++) { |
| 714 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
| 715 LParallelMove* move = gap->GetParallelMove(inner_pos); |
| 716 if (move != NULL) DoParallelMove(move); |
| 717 } |
| 718 |
| 719 LInstruction* next = GetNextInstruction(); |
| 720 if (next != NULL && next->IsLazyBailout()) { |
| 721 int pc = masm()->pc_offset(); |
| 722 safepoints_.SetPcAfterGap(pc); |
| 723 } |
| 724 } |
| 725 |
| 726 |
| 727 void LCodeGen::DoParameter(LParameter* instr) { |
| 728 // Nothing to do. |
| 729 } |
| 730 |
| 731 |
| 732 void LCodeGen::DoCallStub(LCallStub* instr) { |
| 733 Abort("DoCallStub unimplemented."); |
| 734 } |
| 735 |
| 736 |
| 737 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
| 738 // Nothing to do. |
| 739 } |
| 740 |
| 741 |
| 742 void LCodeGen::DoModI(LModI* instr) { |
| 743 Abort("DoModI unimplemented."); |
| 744 } |
| 745 |
| 746 |
| 747 void LCodeGen::DoDivI(LDivI* instr) { |
| 748 Abort("DoDivI unimplemented."); |
| 749 } |
| 750 |
| 751 |
| 752 void LCodeGen::DoMulI(LMulI* instr) { |
| 753 Register left = ToRegister(instr->left()); |
| 754 Register scratch = r9; |
| 755 Register right = EmitLoadRegister(instr->right(), scratch); |
| 756 |
| 757 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && |
| 758 !instr->right()->IsConstantOperand()) { |
| 759 __ orr(ToRegister(instr->temp()), left, right); |
| 760 } |
| 761 |
| 762 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 763 // scratch:left = left * right. |
| 764 __ smull(scratch, left, left, right); |
| 765 __ mov(ip, Operand(left, ASR, 31)); |
| 766 __ cmp(ip, Operand(scratch)); |
| 767 DeoptimizeIf(ne, instr->environment()); |
| 768 } else { |
| 769 __ mul(left, left, right); |
| 770 } |
| 771 |
| 772 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 773 // Bail out if the result is supposed to be negative zero. |
| 774 Label done; |
| 775 __ tst(left, Operand(left)); |
| 776 __ b(ne, &done); |
| 777 if (instr->right()->IsConstantOperand()) { |
| 778 if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) { |
| 779 DeoptimizeIf(no_condition, instr->environment()); |
| 780 } |
| 781 } else { |
| 782 // Test the non-zero operand for negative sign. |
| 783 __ cmp(ToRegister(instr->temp()), Operand(0)); |
| 784 DeoptimizeIf(mi, instr->environment()); |
| 785 } |
| 786 __ bind(&done); |
| 787 } |
| 788 } |
| 789 |
| 790 |
| 791 void LCodeGen::DoBitI(LBitI* instr) { |
| 792 LOperand* left = instr->left(); |
| 793 LOperand* right = instr->right(); |
| 794 ASSERT(left->Equals(instr->result())); |
| 795 ASSERT(left->IsRegister()); |
| 796 Register result = ToRegister(left); |
| 797 Register right_reg = EmitLoadRegister(right, ip); |
| 798 switch (instr->op()) { |
| 799 case Token::BIT_AND: |
| 800 __ and_(result, ToRegister(left), Operand(right_reg)); |
| 801 break; |
| 802 case Token::BIT_OR: |
| 803 __ orr(result, ToRegister(left), Operand(right_reg)); |
| 804 break; |
| 805 case Token::BIT_XOR: |
| 806 __ eor(result, ToRegister(left), Operand(right_reg)); |
| 807 break; |
| 808 default: |
| 809 UNREACHABLE(); |
| 810 break; |
| 811 } |
| 812 } |
| 813 |
| 814 |
| 815 void LCodeGen::DoShiftI(LShiftI* instr) { |
| 816 LOperand* left = instr->left(); |
| 817 LOperand* right = instr->right(); |
| 818 ASSERT(left->Equals(instr->result())); |
| 819 ASSERT(left->IsRegister()); |
| 820 Register result = ToRegister(left); |
| 821 if (right->IsRegister()) { |
| 822 // Mask the right operand. |
| 823 __ and_(r9, ToRegister(right), Operand(0x1F)); |
| 824 switch (instr->op()) { |
| 825 case Token::SAR: |
| 826 __ mov(result, Operand(result, ASR, r9)); |
| 827 break; |
| 828 case Token::SHR: |
| 829 if (instr->can_deopt()) { |
| 830 __ mov(result, Operand(result, LSR, r9), SetCC); |
| 831 DeoptimizeIf(mi, instr->environment()); |
| 832 } else { |
| 833 __ mov(result, Operand(result, LSR, r9)); |
| 834 } |
| 835 break; |
| 836 case Token::SHL: |
| 837 __ mov(result, Operand(result, LSL, r9)); |
| 838 break; |
| 839 default: |
| 840 UNREACHABLE(); |
| 841 break; |
| 842 } |
| 843 } else { |
| 844 int value = ToInteger32(LConstantOperand::cast(right)); |
| 845 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
| 846 switch (instr->op()) { |
| 847 case Token::SAR: |
| 848 if (shift_count != 0) { |
| 849 __ mov(result, Operand(result, ASR, shift_count)); |
| 850 } |
| 851 break; |
| 852 case Token::SHR: |
| 853 if (shift_count == 0 && instr->can_deopt()) { |
| 854 __ tst(result, Operand(0x80000000)); |
| 855 DeoptimizeIf(ne, instr->environment()); |
| 856 } else { |
| 857 __ mov(result, Operand(result, LSR, shift_count)); |
| 858 } |
| 859 break; |
| 860 case Token::SHL: |
| 861 if (shift_count != 0) { |
| 862 __ mov(result, Operand(result, LSL, shift_count)); |
| 863 } |
| 864 break; |
| 865 default: |
| 866 UNREACHABLE(); |
| 867 break; |
| 868 } |
| 869 } |
| 870 } |
| 871 |
| 872 |
| 873 void LCodeGen::DoSubI(LSubI* instr) { |
| 874 Register left = ToRegister(instr->left()); |
| 875 Register right = EmitLoadRegister(instr->right(), ip); |
| 876 ASSERT(instr->left()->Equals(instr->result())); |
| 877 __ sub(left, left, right, SetCC); |
| 878 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 879 DeoptimizeIf(vs, instr->environment()); |
| 880 } |
| 881 } |
| 882 |
| 883 |
| 884 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 885 ASSERT(instr->result()->IsRegister()); |
| 886 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
| 887 } |
| 888 |
| 889 |
| 890 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 891 Abort("DoConstantD unimplemented."); |
| 892 } |
| 893 |
| 894 |
| 895 void LCodeGen::DoConstantT(LConstantT* instr) { |
| 896 ASSERT(instr->result()->IsRegister()); |
| 897 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
| 898 } |
| 899 |
| 900 |
| 901 void LCodeGen::DoArrayLength(LArrayLength* instr) { |
| 902 Register result = ToRegister(instr->result()); |
| 903 |
| 904 if (instr->hydrogen()->value()->IsLoadElements()) { |
| 905 // We load the length directly from the elements array. |
| 906 Register elements = ToRegister(instr->input()); |
| 907 __ ldr(result, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 908 } else { |
| 909 // Check that the receiver really is an array. |
| 910 Register array = ToRegister(instr->input()); |
| 911 Register temporary = ToRegister(instr->temporary()); |
| 912 __ CompareObjectType(array, temporary, temporary, JS_ARRAY_TYPE); |
| 913 DeoptimizeIf(ne, instr->environment()); |
| 914 |
| 915 // Load length directly from the array. |
| 916 __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset)); |
| 917 } |
| 918 Abort("DoArrayLength untested."); |
| 919 } |
| 920 |
| 921 |
| 922 void LCodeGen::DoValueOf(LValueOf* instr) { |
| 923 Abort("DoValueOf unimplemented."); |
| 924 } |
| 925 |
| 926 |
| 927 void LCodeGen::DoBitNotI(LBitNotI* instr) { |
| 928 LOperand* input = instr->input(); |
| 929 ASSERT(input->Equals(instr->result())); |
| 930 __ mvn(ToRegister(input), Operand(ToRegister(input))); |
| 931 Abort("DoBitNotI untested."); |
| 932 } |
| 933 |
| 934 |
| 935 void LCodeGen::DoThrow(LThrow* instr) { |
| 936 Register input_reg = EmitLoadRegister(instr->input(), ip); |
| 937 __ push(input_reg); |
| 938 CallRuntime(Runtime::kThrow, 1, instr); |
| 939 |
| 940 if (FLAG_debug_code) { |
| 941 __ stop("Unreachable code."); |
| 942 } |
| 943 } |
| 944 |
| 945 |
| 946 void LCodeGen::DoAddI(LAddI* instr) { |
| 947 LOperand* left = instr->left(); |
| 948 LOperand* right = instr->right(); |
| 949 ASSERT(left->Equals(instr->result())); |
| 950 |
| 951 Register right_reg = EmitLoadRegister(right, ip); |
| 952 __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC); |
| 953 |
| 954 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 955 DeoptimizeIf(vs, instr->environment()); |
| 956 } |
| 957 } |
| 958 |
| 959 |
| 960 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 961 DoubleRegister left = ToDoubleRegister(instr->left()); |
| 962 DoubleRegister right = ToDoubleRegister(instr->right()); |
| 963 switch (instr->op()) { |
| 964 case Token::ADD: |
| 965 __ vadd(left, left, right); |
| 966 break; |
| 967 case Token::SUB: |
| 968 __ vsub(left, left, right); |
| 969 break; |
| 970 case Token::MUL: |
| 971 __ vmul(left, left, right); |
| 972 break; |
| 973 case Token::DIV: |
| 974 __ vdiv(left, left, right); |
| 975 break; |
| 976 case Token::MOD: { |
| 977 Abort("DoArithmeticD unimplemented for MOD."); |
| 978 break; |
| 979 } |
| 980 default: |
| 981 UNREACHABLE(); |
| 982 break; |
| 983 } |
| 984 } |
| 985 |
| 986 |
| 987 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 988 ASSERT(ToRegister(instr->left()).is(r1)); |
| 989 ASSERT(ToRegister(instr->right()).is(r0)); |
| 990 ASSERT(ToRegister(instr->result()).is(r0)); |
| 991 |
| 992 // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current |
| 993 // GenericBinaryOpStub: |
| 994 // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); |
| 995 GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); |
| 996 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 997 } |
| 998 |
| 999 |
| 1000 int LCodeGen::GetNextEmittedBlock(int block) { |
| 1001 for (int i = block + 1; i < graph()->blocks()->length(); ++i) { |
| 1002 LLabel* label = chunk_->GetLabel(i); |
| 1003 if (!label->HasReplacement()) return i; |
| 1004 } |
| 1005 return -1; |
| 1006 } |
| 1007 |
| 1008 |
| 1009 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { |
| 1010 int next_block = GetNextEmittedBlock(current_block_); |
| 1011 right_block = chunk_->LookupDestination(right_block); |
| 1012 left_block = chunk_->LookupDestination(left_block); |
| 1013 |
| 1014 if (right_block == left_block) { |
| 1015 EmitGoto(left_block); |
| 1016 } else if (left_block == next_block) { |
| 1017 __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); |
| 1018 } else if (right_block == next_block) { |
| 1019 __ b(cc, chunk_->GetAssemblyLabel(left_block)); |
| 1020 } else { |
| 1021 __ b(cc, chunk_->GetAssemblyLabel(left_block)); |
| 1022 __ b(chunk_->GetAssemblyLabel(right_block)); |
| 1023 } |
| 1024 } |
| 1025 |
| 1026 |
| 1027 void LCodeGen::DoBranch(LBranch* instr) { |
| 1028 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1029 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1030 |
| 1031 Representation r = instr->hydrogen()->representation(); |
| 1032 if (r.IsInteger32()) { |
| 1033 Register reg = ToRegister(instr->input()); |
| 1034 __ cmp(reg, Operand(0)); |
| 1035 EmitBranch(true_block, false_block, nz); |
| 1036 } else if (r.IsDouble()) { |
| 1037 DoubleRegister reg = ToDoubleRegister(instr->input()); |
| 1038 __ vcmp(reg, 0.0); |
| 1039 EmitBranch(true_block, false_block, ne); |
| 1040 } else { |
| 1041 ASSERT(r.IsTagged()); |
| 1042 Register reg = ToRegister(instr->input()); |
| 1043 if (instr->hydrogen()->type().IsBoolean()) { |
| 1044 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 1045 __ cmp(reg, ip); |
| 1046 EmitBranch(true_block, false_block, eq); |
| 1047 } else { |
| 1048 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1049 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1050 |
| 1051 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 1052 __ cmp(reg, ip); |
| 1053 __ b(eq, false_label); |
| 1054 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 1055 __ cmp(reg, ip); |
| 1056 __ b(eq, true_label); |
| 1057 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 1058 __ cmp(reg, ip); |
| 1059 __ b(eq, false_label); |
| 1060 __ cmp(reg, Operand(0)); |
| 1061 __ b(eq, false_label); |
| 1062 __ tst(reg, Operand(kSmiTagMask)); |
| 1063 __ b(eq, true_label); |
| 1064 |
| 1065 // Test for double values. Zero is false. |
| 1066 Label call_stub; |
| 1067 DoubleRegister dbl_scratch = d0; |
| 1068 Register core_scratch = r9; |
| 1069 ASSERT(!reg.is(core_scratch)); |
| 1070 __ ldr(core_scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1071 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 1072 __ cmp(core_scratch, Operand(ip)); |
| 1073 __ b(ne, &call_stub); |
| 1074 __ sub(ip, reg, Operand(kHeapObjectTag)); |
| 1075 __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset); |
| 1076 __ vcmp(dbl_scratch, 0.0); |
| 1077 __ b(eq, false_label); |
| 1078 __ b(true_label); |
| 1079 |
| 1080 // The conversion stub doesn't cause garbage collections so it's |
| 1081 // safe to not record a safepoint after the call. |
| 1082 __ bind(&call_stub); |
| 1083 ToBooleanStub stub(reg); |
| 1084 RegList saved_regs = kJSCallerSaved | kCalleeSaved; |
| 1085 __ stm(db_w, sp, saved_regs); |
| 1086 __ CallStub(&stub); |
| 1087 __ cmp(reg, Operand(0)); |
| 1088 __ ldm(ia_w, sp, saved_regs); |
| 1089 EmitBranch(true_block, false_block, nz); |
| 1090 } |
| 1091 } |
| 1092 } |
| 1093 |
| 1094 |
| 1095 void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) { |
| 1096 // TODO(srdjan): Perform stack overflow check if this goto needs it |
| 1097 // before jumping. |
| 1098 block = chunk_->LookupDestination(block); |
| 1099 int next_block = GetNextEmittedBlock(current_block_); |
| 1100 if (block != next_block) { |
| 1101 __ jmp(chunk_->GetAssemblyLabel(block)); |
| 1102 } |
| 1103 } |
| 1104 |
| 1105 |
| 1106 void LCodeGen::DoDeferredStackCheck(LGoto* instr) { |
| 1107 UNIMPLEMENTED(); |
| 1108 } |
| 1109 |
| 1110 |
| 1111 void LCodeGen::DoGoto(LGoto* instr) { |
| 1112 // TODO(srdjan): Implement deferred stack check. |
| 1113 EmitGoto(instr->block_id(), NULL); |
| 1114 } |
| 1115 |
| 1116 |
| 1117 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
| 1118 Condition cond = no_condition; |
| 1119 switch (op) { |
| 1120 case Token::EQ: |
| 1121 case Token::EQ_STRICT: |
| 1122 cond = eq; |
| 1123 break; |
| 1124 case Token::LT: |
| 1125 cond = is_unsigned ? lo : lt; |
| 1126 break; |
| 1127 case Token::GT: |
| 1128 cond = is_unsigned ? hi : gt; |
| 1129 break; |
| 1130 case Token::LTE: |
| 1131 cond = is_unsigned ? ls : le; |
| 1132 break; |
| 1133 case Token::GTE: |
| 1134 cond = is_unsigned ? hs : ge; |
| 1135 break; |
| 1136 case Token::IN: |
| 1137 case Token::INSTANCEOF: |
| 1138 default: |
| 1139 UNREACHABLE(); |
| 1140 } |
| 1141 return cond; |
| 1142 } |
| 1143 |
| 1144 |
| 1145 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { |
| 1146 __ cmp(ToRegister(left), ToOperand(right)); |
| 1147 Abort("EmitCmpI untested."); |
| 1148 } |
| 1149 |
| 1150 |
| 1151 void LCodeGen::DoCmpID(LCmpID* instr) { |
| 1152 Abort("DoCmpID unimplemented."); |
| 1153 } |
| 1154 |
| 1155 |
| 1156 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { |
| 1157 Abort("DoCmpIDAndBranch unimplemented."); |
| 1158 } |
| 1159 |
| 1160 |
| 1161 void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { |
| 1162 Register left = ToRegister(instr->left()); |
| 1163 Register right = ToRegister(instr->right()); |
| 1164 Register result = ToRegister(instr->result()); |
| 1165 |
| 1166 __ cmp(left, Operand(right)); |
| 1167 __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); |
| 1168 __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); |
| 1169 Abort("DoCmpJSObjectEq untested."); |
| 1170 } |
| 1171 |
| 1172 |
| 1173 void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { |
| 1174 Abort("DoCmpJSObjectEqAndBranch unimplemented."); |
| 1175 } |
| 1176 |
| 1177 |
| 1178 void LCodeGen::DoIsNull(LIsNull* instr) { |
| 1179 Abort("DoIsNull unimplemented."); |
| 1180 } |
| 1181 |
| 1182 |
| 1183 void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { |
| 1184 Register reg = ToRegister(instr->input()); |
| 1185 |
| 1186 // TODO(fsc): If the expression is known to be a smi, then it's |
| 1187 // definitely not null. Jump to the false block. |
| 1188 |
| 1189 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1190 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1191 |
| 1192 __ LoadRoot(ip, Heap::kNullValueRootIndex); |
| 1193 __ cmp(reg, ip); |
| 1194 if (instr->is_strict()) { |
| 1195 EmitBranch(true_block, false_block, eq); |
| 1196 } else { |
| 1197 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 1198 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1199 __ b(eq, true_label); |
| 1200 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 1201 __ cmp(reg, ip); |
| 1202 __ b(eq, true_label); |
| 1203 __ tst(reg, Operand(kSmiTagMask)); |
| 1204 __ b(eq, false_label); |
| 1205 // Check for undetectable objects by looking in the bit field in |
| 1206 // the map. The object has already been smi checked. |
| 1207 Register scratch = ToRegister(instr->temp()); |
| 1208 __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1209 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
| 1210 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); |
| 1211 EmitBranch(true_block, false_block, ne); |
| 1212 } |
| 1213 } |
| 1214 |
| 1215 |
| 1216 void LCodeGen::DoIsSmi(LIsSmi* instr) { |
| 1217 ASSERT(instr->hydrogen()->value()->representation().IsTagged()); |
| 1218 Register result = ToRegister(instr->result()); |
| 1219 Register input_reg = EmitLoadRegister(instr->input(), ip); |
| 1220 __ tst(input_reg, Operand(kSmiTagMask)); |
| 1221 __ LoadRoot(result, Heap::kTrueValueRootIndex); |
| 1222 Label done; |
| 1223 __ b(eq, &done); |
| 1224 __ LoadRoot(result, Heap::kFalseValueRootIndex); |
| 1225 __ bind(&done); |
| 1226 } |
| 1227 |
| 1228 |
| 1229 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { |
| 1230 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1231 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1232 |
| 1233 Register input_reg = EmitLoadRegister(instr->input(), ip); |
| 1234 __ tst(input_reg, Operand(kSmiTagMask)); |
| 1235 EmitBranch(true_block, false_block, eq); |
| 1236 } |
| 1237 |
| 1238 |
| 1239 InstanceType LHasInstanceType::TestType() { |
| 1240 InstanceType from = hydrogen()->from(); |
| 1241 InstanceType to = hydrogen()->to(); |
| 1242 if (from == FIRST_TYPE) return to; |
| 1243 ASSERT(from == to || to == LAST_TYPE); |
| 1244 return from; |
| 1245 } |
| 1246 |
| 1247 |
| 1248 Condition LHasInstanceType::BranchCondition() { |
| 1249 InstanceType from = hydrogen()->from(); |
| 1250 InstanceType to = hydrogen()->to(); |
| 1251 if (from == to) return eq; |
| 1252 if (to == LAST_TYPE) return hs; |
| 1253 if (from == FIRST_TYPE) return ls; |
| 1254 UNREACHABLE(); |
| 1255 return eq; |
| 1256 } |
| 1257 |
| 1258 |
| 1259 void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { |
| 1260 Abort("DoHasInstanceType unimplemented."); |
| 1261 } |
| 1262 |
| 1263 |
| 1264 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { |
| 1265 Register input = ToRegister(instr->input()); |
| 1266 Register temp = ToRegister(instr->temp()); |
| 1267 |
| 1268 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1269 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1270 |
| 1271 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 1272 |
| 1273 __ tst(input, Operand(kSmiTagMask)); |
| 1274 __ b(eq, false_label); |
| 1275 |
| 1276 __ CompareObjectType(input, temp, temp, instr->TestType()); |
| 1277 EmitBranch(true_block, false_block, instr->BranchCondition()); |
| 1278 } |
| 1279 |
| 1280 |
| 1281 void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { |
| 1282 Abort("DoHasCachedArrayIndex unimplemented."); |
| 1283 } |
| 1284 |
| 1285 |
| 1286 void LCodeGen::DoHasCachedArrayIndexAndBranch( |
| 1287 LHasCachedArrayIndexAndBranch* instr) { |
| 1288 Abort("DoHasCachedArrayIndexAndBranch unimplemented."); |
| 1289 } |
| 1290 |
| 1291 |
| 1292 // Branches to a label or falls through with the answer in the z flag. Trashes |
| 1293 // the temp registers, but not the input. Only input and temp2 may alias. |
| 1294 void LCodeGen::EmitClassOfTest(Label* is_true, |
| 1295 Label* is_false, |
| 1296 Handle<String>class_name, |
| 1297 Register input, |
| 1298 Register temp, |
| 1299 Register temp2) { |
| 1300 Abort("EmitClassOfTest unimplemented."); |
| 1301 } |
| 1302 |
| 1303 |
| 1304 void LCodeGen::DoClassOfTest(LClassOfTest* instr) { |
| 1305 Abort("DoClassOfTest unimplemented."); |
| 1306 } |
| 1307 |
| 1308 |
| 1309 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
| 1310 Abort("DoClassOfTestAndBranch unimplemented."); |
| 1311 } |
| 1312 |
| 1313 |
| 1314 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
| 1315 Abort("DoCmpMapAndBranch unimplemented."); |
| 1316 } |
| 1317 |
| 1318 |
| 1319 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
| 1320 Abort("DoInstanceOf unimplemented."); |
| 1321 } |
| 1322 |
| 1323 |
| 1324 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { |
| 1325 Abort("DoInstanceOfAndBranch unimplemented."); |
| 1326 } |
| 1327 |
| 1328 |
| 1329 |
| 1330 static Condition ComputeCompareCondition(Token::Value op) { |
| 1331 switch (op) { |
| 1332 case Token::EQ_STRICT: |
| 1333 case Token::EQ: |
| 1334 return eq; |
| 1335 case Token::LT: |
| 1336 return lt; |
| 1337 case Token::GT: |
| 1338 return gt; |
| 1339 case Token::LTE: |
| 1340 return le; |
| 1341 case Token::GTE: |
| 1342 return ge; |
| 1343 default: |
| 1344 UNREACHABLE(); |
| 1345 return no_condition; |
| 1346 } |
| 1347 } |
| 1348 |
| 1349 |
| 1350 void LCodeGen::DoCmpT(LCmpT* instr) { |
| 1351 Token::Value op = instr->op(); |
| 1352 |
| 1353 Handle<Code> ic = CompareIC::GetUninitialized(op); |
| 1354 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1355 |
| 1356 Condition condition = ComputeCompareCondition(op); |
| 1357 if (op == Token::GT || op == Token::LTE) { |
| 1358 condition = ReverseCondition(condition); |
| 1359 } |
| 1360 __ cmp(r0, Operand(0)); |
| 1361 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex, |
| 1362 condition); |
| 1363 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex, |
| 1364 NegateCondition(condition)); |
| 1365 } |
| 1366 |
| 1367 |
| 1368 void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { |
| 1369 Abort("DoCmpTAndBranch unimplemented."); |
| 1370 } |
| 1371 |
| 1372 |
| 1373 void LCodeGen::DoReturn(LReturn* instr) { |
| 1374 if (FLAG_trace) { |
| 1375 // Push the return value on the stack as the parameter. |
| 1376 // Runtime::TraceExit returns its parameter in r0. |
| 1377 __ push(r0); |
| 1378 __ CallRuntime(Runtime::kTraceExit, 1); |
| 1379 } |
| 1380 int32_t sp_delta = (ParameterCount() + 1) * kPointerSize; |
| 1381 __ mov(sp, fp); |
| 1382 __ ldm(ia_w, sp, fp.bit() | lr.bit()); |
| 1383 __ add(sp, sp, Operand(sp_delta)); |
| 1384 __ Jump(lr); |
| 1385 } |
| 1386 |
| 1387 |
| 1388 void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) { |
| 1389 Register result = ToRegister(instr->result()); |
| 1390 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
| 1391 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
| 1392 if (instr->hydrogen()->check_hole_value()) { |
| 1393 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 1394 __ cmp(result, ip); |
| 1395 DeoptimizeIf(eq, instr->environment()); |
| 1396 } |
| 1397 } |
| 1398 |
| 1399 |
| 1400 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { |
| 1401 Register value = ToRegister(instr->input()); |
| 1402 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
| 1403 __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
| 1404 } |
| 1405 |
| 1406 |
| 1407 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
| 1408 Abort("DoLoadNamedField unimplemented."); |
| 1409 } |
| 1410 |
| 1411 |
| 1412 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
| 1413 ASSERT(ToRegister(instr->object()).is(r0)); |
| 1414 ASSERT(ToRegister(instr->result()).is(r0)); |
| 1415 |
| 1416 // Name is always in r2. |
| 1417 __ mov(r2, Operand(instr->name())); |
| 1418 Handle<Code> ic( |
| 1419 Isolate::Current()->builtins()->builtin(Builtins::LoadIC_Initialize)); |
| 1420 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1421 } |
| 1422 |
| 1423 |
| 1424 void LCodeGen::DoLoadElements(LLoadElements* instr) { |
| 1425 Abort("DoLoadElements unimplemented."); |
| 1426 } |
| 1427 |
| 1428 |
| 1429 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
| 1430 Abort("DoAccessArgumentsAt unimplemented."); |
| 1431 } |
| 1432 |
| 1433 |
| 1434 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { |
| 1435 Abort("DoLoadKeyedFastElement unimplemented."); |
| 1436 } |
| 1437 |
| 1438 |
| 1439 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
| 1440 ASSERT(ToRegister(instr->object()).is(r1)); |
| 1441 ASSERT(ToRegister(instr->key()).is(r0)); |
| 1442 |
| 1443 Handle<Code> ic(Isolate::Current()->builtins()-> |
| 1444 builtin(Builtins::KeyedLoadIC_Initialize)); |
| 1445 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1446 } |
| 1447 |
| 1448 |
| 1449 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
| 1450 Abort("DoArgumentsElements unimplemented."); |
| 1451 } |
| 1452 |
| 1453 |
| 1454 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { |
| 1455 Abort("DoArgumentsLength unimplemented."); |
| 1456 } |
| 1457 |
| 1458 |
| 1459 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| 1460 Abort("DoApplyArguments unimplemented."); |
| 1461 } |
| 1462 |
| 1463 |
| 1464 void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| 1465 LOperand* argument = instr->input(); |
| 1466 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { |
| 1467 Abort("DoPushArgument not implemented for double type."); |
| 1468 } else { |
| 1469 Register argument_reg = EmitLoadRegister(argument, ip); |
| 1470 __ push(argument_reg); |
| 1471 } |
| 1472 } |
| 1473 |
| 1474 |
| 1475 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { |
| 1476 Register result = ToRegister(instr->result()); |
| 1477 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 1478 } |
| 1479 |
| 1480 |
| 1481 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { |
| 1482 Register result = ToRegister(instr->result()); |
| 1483 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 1484 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); |
| 1485 } |
| 1486 |
| 1487 |
| 1488 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| 1489 int arity, |
| 1490 LInstruction* instr) { |
| 1491 // Change context if needed. |
| 1492 bool change_context = |
| 1493 (graph()->info()->closure()->context() != function->context()) || |
| 1494 scope()->contains_with() || |
| 1495 (scope()->num_heap_slots() > 0); |
| 1496 if (change_context) { |
| 1497 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 1498 } |
| 1499 |
| 1500 // Set r0 to arguments count if adaption is not needed. Assumes that r0 |
| 1501 // is available to write to at this point. |
| 1502 if (!function->NeedsArgumentsAdaption()) { |
| 1503 __ mov(r0, Operand(arity)); |
| 1504 } |
| 1505 |
| 1506 LPointerMap* pointers = instr->pointer_map(); |
| 1507 RecordPosition(pointers->position()); |
| 1508 |
| 1509 // Invoke function. |
| 1510 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 1511 __ Call(ip); |
| 1512 |
| 1513 // Setup deoptimization. |
| 1514 RegisterLazyDeoptimization(instr); |
| 1515 |
| 1516 // Restore context. |
| 1517 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1518 } |
| 1519 |
| 1520 |
| 1521 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
| 1522 Abort("DoCallConstantFunction unimplemented."); |
| 1523 } |
| 1524 |
| 1525 |
| 1526 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { |
| 1527 Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented."); |
| 1528 } |
| 1529 |
| 1530 |
| 1531 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| 1532 Abort("LUnaryMathOperation unimplemented."); |
| 1533 } |
| 1534 |
| 1535 |
| 1536 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 1537 Abort("DoMathFloor unimplemented."); |
| 1538 } |
| 1539 |
| 1540 |
| 1541 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| 1542 Abort("DoMathSqrt unimplemented."); |
| 1543 } |
| 1544 |
| 1545 |
| 1546 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { |
| 1547 ASSERT(instr->op() == kMathFloor || |
| 1548 instr->op() == kMathAbs); |
| 1549 |
| 1550 switch (instr->op()) { |
| 1551 case kMathAbs: |
| 1552 DoMathAbs(instr); |
| 1553 break; |
| 1554 case kMathFloor: |
| 1555 DoMathFloor(instr); |
| 1556 break; |
| 1557 case kMathSqrt: |
| 1558 DoMathSqrt(instr); |
| 1559 break; |
| 1560 default: |
| 1561 UNREACHABLE(); |
| 1562 } |
| 1563 } |
| 1564 |
| 1565 |
| 1566 void LCodeGen::DoCallKeyed(LCallKeyed* instr) { |
| 1567 Abort("DoCallKeyed unimplemented."); |
| 1568 } |
| 1569 |
| 1570 |
| 1571 void LCodeGen::DoCallNamed(LCallNamed* instr) { |
| 1572 ASSERT(ToRegister(instr->result()).is(r0)); |
| 1573 |
| 1574 int arity = instr->arity(); |
| 1575 Handle<Code> ic = Isolate::Current()->stub_cache()-> |
| 1576 ComputeCallInitialize(arity, NOT_IN_LOOP); |
| 1577 __ mov(r2, Operand(instr->name())); |
| 1578 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1579 // Restore context register. |
| 1580 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1581 } |
| 1582 |
| 1583 |
| 1584 void LCodeGen::DoCallFunction(LCallFunction* instr) { |
| 1585 Abort("DoCallFunction unimplemented."); |
| 1586 } |
| 1587 |
| 1588 |
| 1589 void LCodeGen::DoCallGlobal(LCallGlobal* instr) { |
| 1590 Abort("DoCallGlobal unimplemented."); |
| 1591 } |
| 1592 |
| 1593 |
| 1594 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { |
| 1595 ASSERT(ToRegister(instr->result()).is(r0)); |
| 1596 __ mov(r1, Operand(instr->target())); |
| 1597 CallKnownFunction(instr->target(), instr->arity(), instr); |
| 1598 } |
| 1599 |
| 1600 |
| 1601 void LCodeGen::DoCallNew(LCallNew* instr) { |
| 1602 ASSERT(ToRegister(instr->input()).is(r1)); |
| 1603 ASSERT(ToRegister(instr->result()).is(r0)); |
| 1604 |
| 1605 Handle<Code> builtin(Isolate::Current()->builtins()-> |
| 1606 builtin(Builtins::JSConstructCall)); |
| 1607 __ mov(r0, Operand(instr->arity())); |
| 1608 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); |
| 1609 } |
| 1610 |
| 1611 |
| 1612 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
| 1613 CallRuntime(instr->function(), instr->arity(), instr); |
| 1614 } |
| 1615 |
| 1616 |
| 1617 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
| 1618 Abort("DoStoreNamedField unimplemented."); |
| 1619 } |
| 1620 |
| 1621 |
| 1622 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| 1623 ASSERT(ToRegister(instr->object()).is(r1)); |
| 1624 ASSERT(ToRegister(instr->value()).is(r0)); |
| 1625 |
| 1626 // Name is always in r2. |
| 1627 __ mov(r2, Operand(instr->name())); |
| 1628 Handle<Code> ic(Isolate::Current()->builtins()-> |
| 1629 builtin(Builtins::StoreIC_Initialize)); |
| 1630 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1631 } |
| 1632 |
| 1633 |
| 1634 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
| 1635 Abort("DoBoundsCheck unimplemented."); |
| 1636 } |
| 1637 |
| 1638 |
| 1639 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { |
| 1640 Abort("DoStoreKeyedFastElement unimplemented."); |
| 1641 } |
| 1642 |
| 1643 |
| 1644 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| 1645 ASSERT(ToRegister(instr->object()).is(r2)); |
| 1646 ASSERT(ToRegister(instr->key()).is(r1)); |
| 1647 ASSERT(ToRegister(instr->value()).is(r0)); |
| 1648 |
| 1649 Handle<Code> ic(Isolate::Current()->builtins()-> |
| 1650 builtin(Builtins::KeyedStoreIC_Initialize)); |
| 1651 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 1652 } |
| 1653 |
| 1654 |
| 1655 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 1656 Abort("DoInteger32ToDouble unimplemented."); |
| 1657 } |
| 1658 |
| 1659 |
| 1660 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
| 1661 class DeferredNumberTagI: public LDeferredCode { |
| 1662 public: |
| 1663 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) |
| 1664 : LDeferredCode(codegen), instr_(instr) { } |
| 1665 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } |
| 1666 private: |
| 1667 LNumberTagI* instr_; |
| 1668 }; |
| 1669 |
| 1670 LOperand* input = instr->input(); |
| 1671 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 1672 Register reg = ToRegister(input); |
| 1673 |
| 1674 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); |
| 1675 __ SmiTag(reg, SetCC); |
| 1676 __ b(vs, deferred->entry()); |
| 1677 __ bind(deferred->exit()); |
| 1678 } |
| 1679 |
| 1680 |
| 1681 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { |
| 1682 Label slow; |
| 1683 Register reg = ToRegister(instr->input()); |
| 1684 DoubleRegister dbl_scratch = d0; |
| 1685 SwVfpRegister flt_scratch = s0; |
| 1686 |
| 1687 // Preserve the value of all registers. |
| 1688 __ PushSafepointRegisters(); |
| 1689 |
| 1690 // There was overflow, so bits 30 and 31 of the original integer |
| 1691 // disagree. Try to allocate a heap number in new space and store |
| 1692 // the value in there. If that fails, call the runtime system. |
| 1693 Label done; |
| 1694 __ SmiUntag(reg); |
| 1695 __ eor(reg, reg, Operand(0x80000000)); |
| 1696 __ vmov(flt_scratch, reg); |
| 1697 __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| 1698 if (FLAG_inline_new) { |
| 1699 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1700 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); |
| 1701 if (!reg.is(r5)) __ mov(reg, r5); |
| 1702 __ b(&done); |
| 1703 } |
| 1704 |
| 1705 // Slow case: Call the runtime system to do the number allocation. |
| 1706 __ bind(&slow); |
| 1707 |
| 1708 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
| 1709 // register is stored, as this register is in the pointer map, but contains an |
| 1710 // integer value. |
| 1711 __ mov(ip, Operand(0)); |
| 1712 int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); |
| 1713 __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); |
| 1714 |
| 1715 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 1716 RecordSafepointWithRegisters( |
| 1717 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); |
| 1718 if (!reg.is(r0)) __ mov(reg, r0); |
| 1719 |
| 1720 // Done. Put the value in dbl_scratch into the value of the allocated heap |
| 1721 // number. |
| 1722 __ bind(&done); |
| 1723 __ sub(ip, reg, Operand(kHeapObjectTag)); |
| 1724 __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); |
| 1725 __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); |
| 1726 __ PopSafepointRegisters(); |
| 1727 } |
| 1728 |
| 1729 |
| 1730 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 1731 class DeferredNumberTagD: public LDeferredCode { |
| 1732 public: |
| 1733 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| 1734 : LDeferredCode(codegen), instr_(instr) { } |
| 1735 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
| 1736 private: |
| 1737 LNumberTagD* instr_; |
| 1738 }; |
| 1739 |
| 1740 DoubleRegister input_reg = ToDoubleRegister(instr->input()); |
| 1741 Register reg = ToRegister(instr->result()); |
| 1742 Register tmp = ToRegister(instr->temp()); |
| 1743 Register scratch = r9; |
| 1744 |
| 1745 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); |
| 1746 if (FLAG_inline_new) { |
| 1747 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
| 1748 __ AllocateHeapNumber(reg, tmp, ip, scratch, deferred->entry()); |
| 1749 } else { |
| 1750 __ jmp(deferred->entry()); |
| 1751 } |
| 1752 __ bind(deferred->exit()); |
| 1753 __ sub(ip, reg, Operand(kHeapObjectTag)); |
| 1754 __ vstr(input_reg, ip, HeapNumber::kValueOffset); |
| 1755 } |
| 1756 |
| 1757 |
| 1758 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 1759 // TODO(3095996): Get rid of this. For now, we need to make the |
| 1760 // result register contain a valid pointer because it is already |
| 1761 // contained in the register pointer map. |
| 1762 Register reg = ToRegister(instr->result()); |
| 1763 __ mov(reg, Operand(0)); |
| 1764 |
| 1765 __ PushSafepointRegisters(); |
| 1766 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 1767 RecordSafepointWithRegisters( |
| 1768 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); |
| 1769 int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); |
| 1770 __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); |
| 1771 __ PopSafepointRegisters(); |
| 1772 } |
| 1773 |
| 1774 |
| 1775 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 1776 LOperand* input = instr->input(); |
| 1777 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 1778 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
| 1779 __ SmiTag(ToRegister(input)); |
| 1780 } |
| 1781 |
| 1782 |
| 1783 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 1784 Abort("DoSmiUntag unimplemented."); |
| 1785 } |
| 1786 |
| 1787 |
| 1788 void LCodeGen::EmitNumberUntagD(Register input_reg, |
| 1789 DoubleRegister result_reg, |
| 1790 LEnvironment* env) { |
| 1791 Register core_scratch = r9; |
| 1792 ASSERT(!input_reg.is(core_scratch)); |
| 1793 SwVfpRegister flt_scratch = s0; |
| 1794 ASSERT(!result_reg.is(d0)); |
| 1795 |
| 1796 Label load_smi, heap_number, done; |
| 1797 |
| 1798 // Smi check. |
| 1799 __ tst(input_reg, Operand(kSmiTagMask)); |
| 1800 __ b(eq, &load_smi); |
| 1801 |
| 1802 // Heap number map check. |
| 1803 __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 1804 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 1805 __ cmp(core_scratch, Operand(ip)); |
| 1806 __ b(eq, &heap_number); |
| 1807 |
| 1808 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 1809 __ cmp(input_reg, Operand(ip)); |
| 1810 DeoptimizeIf(ne, env); |
| 1811 |
| 1812 // Convert undefined to NaN. |
| 1813 __ LoadRoot(ip, Heap::kNanValueRootIndex); |
| 1814 __ sub(ip, ip, Operand(kHeapObjectTag)); |
| 1815 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
| 1816 __ jmp(&done); |
| 1817 |
| 1818 // Heap number to double register conversion. |
| 1819 __ bind(&heap_number); |
| 1820 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
| 1821 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
| 1822 __ jmp(&done); |
| 1823 |
| 1824 // Smi to double register conversion |
| 1825 __ bind(&load_smi); |
| 1826 __ SmiUntag(input_reg); // Untag smi before converting to float. |
| 1827 __ vmov(flt_scratch, input_reg); |
| 1828 __ vcvt_f64_s32(result_reg, flt_scratch); |
| 1829 __ SmiTag(input_reg); // Retag smi. |
| 1830 __ bind(&done); |
| 1831 } |
| 1832 |
| 1833 |
| 1834 class DeferredTaggedToI: public LDeferredCode { |
| 1835 public: |
| 1836 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 1837 : LDeferredCode(codegen), instr_(instr) { } |
| 1838 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } |
| 1839 private: |
| 1840 LTaggedToI* instr_; |
| 1841 }; |
| 1842 |
| 1843 |
| 1844 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
| 1845 Label done; |
| 1846 Register input_reg = ToRegister(instr->input()); |
| 1847 Register core_scratch = r9; |
| 1848 ASSERT(!input_reg.is(core_scratch)); |
| 1849 DoubleRegister dbl_scratch = d0; |
| 1850 SwVfpRegister flt_scratch = s0; |
| 1851 DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp()); |
| 1852 |
| 1853 // Heap number map check. |
| 1854 __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 1855 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 1856 __ cmp(core_scratch, Operand(ip)); |
| 1857 |
| 1858 if (instr->truncating()) { |
| 1859 Label heap_number; |
| 1860 __ b(eq, &heap_number); |
| 1861 // Check for undefined. Undefined is converted to zero for truncating |
| 1862 // conversions. |
| 1863 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 1864 __ cmp(input_reg, Operand(ip)); |
| 1865 DeoptimizeIf(ne, instr->environment()); |
| 1866 __ mov(input_reg, Operand(0)); |
| 1867 __ b(&done); |
| 1868 |
| 1869 __ bind(&heap_number); |
| 1870 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
| 1871 __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); |
| 1872 __ vcmp(dbl_tmp, 0.0); // Sets overflow bit if NaN. |
| 1873 __ vcvt_s32_f64(flt_scratch, dbl_tmp); |
| 1874 __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. |
| 1875 __ vmrs(pc); // Move vector status bits to normal status bits. |
| 1876 // Overflow bit is set if dbl_tmp is Nan. |
| 1877 __ cmn(input_reg, Operand(1), vc); // 0x7fffffff + 1 -> overflow. |
| 1878 __ cmp(input_reg, Operand(1), vc); // 0x80000000 - 1 -> overflow. |
| 1879 DeoptimizeIf(vs, instr->environment()); // Saturation may have occured. |
| 1880 |
| 1881 } else { |
| 1882 // Deoptimize if we don't have a heap number. |
| 1883 DeoptimizeIf(ne, instr->environment()); |
| 1884 |
| 1885 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
| 1886 __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); |
| 1887 __ vcvt_s32_f64(flt_scratch, dbl_tmp); |
| 1888 __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. |
| 1889 // Non-truncating conversion means that we cannot lose bits, so we convert |
| 1890 // back to check; note that using non-overlapping s and d regs would be |
| 1891 // slightly faster. |
| 1892 __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| 1893 __ vcmp(dbl_scratch, dbl_tmp); |
| 1894 __ vmrs(pc); // Move vector status bits to normal status bits. |
| 1895 DeoptimizeIf(ne, instr->environment()); // Not equal or unordered. |
| 1896 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1897 __ tst(input_reg, Operand(input_reg)); |
| 1898 __ b(ne, &done); |
| 1899 __ vmov(lr, ip, dbl_tmp); |
| 1900 __ tst(ip, Operand(1 << 31)); // Test sign bit. |
| 1901 DeoptimizeIf(ne, instr->environment()); |
| 1902 } |
| 1903 } |
| 1904 __ bind(&done); |
| 1905 } |
| 1906 |
| 1907 |
| 1908 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 1909 LOperand* input = instr->input(); |
| 1910 ASSERT(input->IsRegister()); |
| 1911 ASSERT(input->Equals(instr->result())); |
| 1912 |
| 1913 Register input_reg = ToRegister(input); |
| 1914 |
| 1915 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); |
| 1916 |
| 1917 // Smi check. |
| 1918 __ tst(input_reg, Operand(kSmiTagMask)); |
| 1919 __ b(ne, deferred->entry()); |
| 1920 |
| 1921 // Smi to int32 conversion |
| 1922 __ SmiUntag(input_reg); // Untag smi. |
| 1923 |
| 1924 __ bind(deferred->exit()); |
| 1925 } |
| 1926 |
| 1927 |
| 1928 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| 1929 LOperand* input = instr->input(); |
| 1930 ASSERT(input->IsRegister()); |
| 1931 LOperand* result = instr->result(); |
| 1932 ASSERT(result->IsDoubleRegister()); |
| 1933 |
| 1934 Register input_reg = ToRegister(input); |
| 1935 DoubleRegister result_reg = ToDoubleRegister(result); |
| 1936 |
| 1937 EmitNumberUntagD(input_reg, result_reg, instr->environment()); |
| 1938 } |
| 1939 |
| 1940 |
| 1941 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 1942 Abort("DoDoubleToI unimplemented."); |
| 1943 } |
| 1944 |
| 1945 |
| 1946 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 1947 LOperand* input = instr->input(); |
| 1948 ASSERT(input->IsRegister()); |
| 1949 __ tst(ToRegister(input), Operand(kSmiTagMask)); |
| 1950 DeoptimizeIf(instr->condition(), instr->environment()); |
| 1951 } |
| 1952 |
| 1953 |
| 1954 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 1955 Abort("DoCheckInstanceType unimplemented."); |
| 1956 } |
| 1957 |
| 1958 |
| 1959 void LCodeGen::DoCheckFunction(LCheckFunction* instr) { |
| 1960 ASSERT(instr->input()->IsRegister()); |
| 1961 Register reg = ToRegister(instr->input()); |
| 1962 __ cmp(reg, Operand(instr->hydrogen()->target())); |
| 1963 DeoptimizeIf(ne, instr->environment()); |
| 1964 } |
| 1965 |
| 1966 |
| 1967 void LCodeGen::DoCheckMap(LCheckMap* instr) { |
| 1968 LOperand* input = instr->input(); |
| 1969 ASSERT(input->IsRegister()); |
| 1970 Register reg = ToRegister(input); |
| 1971 __ ldr(r9, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1972 __ cmp(r9, Operand(instr->hydrogen()->map())); |
| 1973 DeoptimizeIf(ne, instr->environment()); |
| 1974 } |
| 1975 |
| 1976 |
| 1977 void LCodeGen::LoadPrototype(Register result, |
| 1978 Handle<JSObject> prototype) { |
| 1979 Abort("LoadPrototype unimplemented."); |
| 1980 } |
| 1981 |
| 1982 |
| 1983 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
| 1984 Abort("DoCheckPrototypeMaps unimplemented."); |
| 1985 } |
| 1986 |
| 1987 |
| 1988 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
| 1989 Abort("DoArrayLiteral unimplemented."); |
| 1990 } |
| 1991 |
| 1992 |
| 1993 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { |
| 1994 Abort("DoObjectLiteral unimplemented."); |
| 1995 } |
| 1996 |
| 1997 |
| 1998 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
| 1999 Abort("DoRegExpLiteral unimplemented."); |
| 2000 } |
| 2001 |
| 2002 |
| 2003 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 2004 Abort("DoFunctionLiteral unimplemented."); |
| 2005 } |
| 2006 |
| 2007 |
| 2008 void LCodeGen::DoTypeof(LTypeof* instr) { |
| 2009 Abort("DoTypeof unimplemented."); |
| 2010 } |
| 2011 |
| 2012 |
| 2013 void LCodeGen::DoTypeofIs(LTypeofIs* instr) { |
| 2014 Abort("DoTypeofIs unimplemented."); |
| 2015 } |
| 2016 |
| 2017 |
| 2018 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
| 2019 Register input = ToRegister(instr->input()); |
| 2020 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 2021 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 2022 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
| 2023 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
| 2024 |
| 2025 Condition final_branch_condition = EmitTypeofIs(true_label, |
| 2026 false_label, |
| 2027 input, |
| 2028 instr->type_literal()); |
| 2029 |
| 2030 EmitBranch(true_block, false_block, final_branch_condition); |
| 2031 } |
| 2032 |
| 2033 |
| 2034 Condition LCodeGen::EmitTypeofIs(Label* true_label, |
| 2035 Label* false_label, |
| 2036 Register input, |
| 2037 Handle<String> type_name) { |
| 2038 Condition final_branch_condition = no_condition; |
| 2039 Register core_scratch = r9; |
| 2040 ASSERT(!input.is(core_scratch)); |
| 2041 if (type_name->Equals(HEAP->number_symbol())) { |
| 2042 __ tst(input, Operand(kSmiTagMask)); |
| 2043 __ b(eq, true_label); |
| 2044 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2045 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 2046 __ cmp(input, Operand(ip)); |
| 2047 final_branch_condition = eq; |
| 2048 |
| 2049 } else if (type_name->Equals(HEAP->string_symbol())) { |
| 2050 __ tst(input, Operand(kSmiTagMask)); |
| 2051 __ b(eq, false_label); |
| 2052 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2053 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 2054 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| 2055 __ b(ne, false_label); |
| 2056 __ CompareInstanceType(input, core_scratch, FIRST_NONSTRING_TYPE); |
| 2057 final_branch_condition = lo; |
| 2058 |
| 2059 } else if (type_name->Equals(HEAP->boolean_symbol())) { |
| 2060 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 2061 __ cmp(input, ip); |
| 2062 __ b(eq, true_label); |
| 2063 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 2064 __ cmp(input, ip); |
| 2065 final_branch_condition = eq; |
| 2066 |
| 2067 } else if (type_name->Equals(HEAP->undefined_symbol())) { |
| 2068 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 2069 __ cmp(input, ip); |
| 2070 __ b(eq, true_label); |
| 2071 __ tst(input, Operand(kSmiTagMask)); |
| 2072 __ b(eq, false_label); |
| 2073 // Check for undetectable objects => true. |
| 2074 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2075 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 2076 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| 2077 final_branch_condition = ne; |
| 2078 |
| 2079 } else if (type_name->Equals(HEAP->function_symbol())) { |
| 2080 __ tst(input, Operand(kSmiTagMask)); |
| 2081 __ b(eq, false_label); |
| 2082 __ CompareObjectType(input, input, core_scratch, JS_FUNCTION_TYPE); |
| 2083 __ b(eq, true_label); |
| 2084 // Regular expressions => 'function' (they are callable). |
| 2085 __ CompareInstanceType(input, core_scratch, JS_REGEXP_TYPE); |
| 2086 final_branch_condition = eq; |
| 2087 |
| 2088 } else if (type_name->Equals(HEAP->object_symbol())) { |
| 2089 __ tst(input, Operand(kSmiTagMask)); |
| 2090 __ b(eq, false_label); |
| 2091 __ LoadRoot(ip, Heap::kNullValueRootIndex); |
| 2092 __ cmp(input, ip); |
| 2093 __ b(eq, true_label); |
| 2094 // Regular expressions => 'function', not 'object'. |
| 2095 __ CompareObjectType(input, input, core_scratch, JS_REGEXP_TYPE); |
| 2096 __ b(eq, false_label); |
| 2097 // Check for undetectable objects => false. |
| 2098 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); |
| 2099 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| 2100 __ b(ne, false_label); |
| 2101 // Check for JS objects => true. |
| 2102 __ CompareInstanceType(input, core_scratch, FIRST_JS_OBJECT_TYPE); |
| 2103 __ b(lo, false_label); |
| 2104 __ CompareInstanceType(input, core_scratch, LAST_JS_OBJECT_TYPE); |
| 2105 final_branch_condition = ls; |
| 2106 |
| 2107 } else { |
| 2108 final_branch_condition = ne; |
| 2109 __ b(false_label); |
| 2110 // A dead branch instruction will be generated after this point. |
| 2111 } |
| 2112 |
| 2113 return final_branch_condition; |
| 2114 } |
| 2115 |
| 2116 |
| 2117 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 2118 // No code for lazy bailout instruction. Used to capture environment after a |
| 2119 // call for populating the safepoint data with deoptimization data. |
| 2120 } |
| 2121 |
| 2122 |
| 2123 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 2124 DeoptimizeIf(no_condition, instr->environment()); |
| 2125 } |
| 2126 |
| 2127 |
| 2128 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { |
| 2129 Abort("DoDeleteProperty unimplemented."); |
| 2130 } |
| 2131 |
| 2132 |
| 2133 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
| 2134 // Perform stack overflow check. |
| 2135 Label ok; |
| 2136 __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
| 2137 __ cmp(sp, Operand(ip)); |
| 2138 __ b(hs, &ok); |
| 2139 StackCheckStub stub; |
| 2140 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 2141 __ bind(&ok); |
| 2142 } |
| 2143 |
| 2144 |
| 2145 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
| 2146 Abort("DoOsrEntry unimplemented."); |
| 2147 } |
| 2148 |
| 2149 |
| 2150 #undef __ |
| 2151 |
| 2152 } } // namespace v8::internal |
| OLD | NEW |