OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #include "a64/lithium-codegen-a64.h" |
| 31 #include "a64/lithium-gap-resolver-a64.h" |
| 32 #include "code-stubs.h" |
| 33 #include "stub-cache.h" |
| 34 #include "hydrogen-osr.h" |
| 35 |
| 36 namespace v8 { |
| 37 namespace internal { |
| 38 |
| 39 |
| 40 class SafepointGenerator V8_FINAL : public CallWrapper { |
| 41 public: |
| 42 SafepointGenerator(LCodeGen* codegen, |
| 43 LPointerMap* pointers, |
| 44 Safepoint::DeoptMode mode) |
| 45 : codegen_(codegen), |
| 46 pointers_(pointers), |
| 47 deopt_mode_(mode) { } |
| 48 virtual ~SafepointGenerator() { } |
| 49 |
| 50 virtual void BeforeCall(int call_size) const { } |
| 51 |
| 52 virtual void AfterCall() const { |
| 53 codegen_->RecordSafepoint(pointers_, deopt_mode_); |
| 54 } |
| 55 |
| 56 private: |
| 57 LCodeGen* codegen_; |
| 58 LPointerMap* pointers_; |
| 59 Safepoint::DeoptMode deopt_mode_; |
| 60 }; |
| 61 |
| 62 |
| 63 #define __ masm()-> |
| 64 |
| 65 // Emit code to branch if the given condition holds. |
| 66 // The code generated here doesn't modify the flags and they must have |
| 67 // been set by some prior instructions. |
| 68 // |
| 69 // The EmitInverted function simply inverts the condition. |
| 70 class BranchOnCondition : public BranchGenerator { |
| 71 public: |
| 72 BranchOnCondition(LCodeGen* codegen, Condition cond) |
| 73 : BranchGenerator(codegen), |
| 74 cond_(cond) { } |
| 75 |
| 76 virtual void Emit(Label* label) const { |
| 77 __ B(cond_, label); |
| 78 } |
| 79 |
| 80 virtual void EmitInverted(Label* label) const { |
| 81 if (cond_ != al) { |
| 82 __ B(InvertCondition(cond_), label); |
| 83 } |
| 84 } |
| 85 |
| 86 private: |
| 87 Condition cond_; |
| 88 }; |
| 89 |
| 90 |
| 91 // Emit code to compare lhs and rhs and branch if the condition holds. |
| 92 // This uses MacroAssembler's CompareAndBranch function so it will handle |
| 93 // converting the comparison to Cbz/Cbnz if the right-hand side is 0. |
| 94 // |
| 95 // EmitInverted still compares the two operands but inverts the condition. |
| 96 class CompareAndBranch : public BranchGenerator { |
| 97 public: |
| 98 CompareAndBranch(LCodeGen* codegen, |
| 99 Condition cond, |
| 100 const Register& lhs, |
| 101 const Operand& rhs) |
| 102 : BranchGenerator(codegen), |
| 103 cond_(cond), |
| 104 lhs_(lhs), |
| 105 rhs_(rhs) { } |
| 106 |
| 107 virtual void Emit(Label* label) const { |
| 108 __ CompareAndBranch(lhs_, rhs_, cond_, label); |
| 109 } |
| 110 |
| 111 virtual void EmitInverted(Label* label) const { |
| 112 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label); |
| 113 } |
| 114 |
| 115 private: |
| 116 Condition cond_; |
| 117 const Register& lhs_; |
| 118 const Operand& rhs_; |
| 119 }; |
| 120 |
| 121 |
| 122 // Test the input with the given mask and branch if the condition holds. |
| 123 // If the condition is 'eq' or 'ne' this will use MacroAssembler's |
| 124 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the |
| 125 // conversion to Tbz/Tbnz when possible. |
| 126 class TestAndBranch : public BranchGenerator { |
| 127 public: |
| 128 TestAndBranch(LCodeGen* codegen, |
| 129 Condition cond, |
| 130 const Register& value, |
| 131 uint64_t mask) |
| 132 : BranchGenerator(codegen), |
| 133 cond_(cond), |
| 134 value_(value), |
| 135 mask_(mask) { } |
| 136 |
| 137 virtual void Emit(Label* label) const { |
| 138 switch (cond_) { |
| 139 case eq: |
| 140 __ TestAndBranchIfAllClear(value_, mask_, label); |
| 141 break; |
| 142 case ne: |
| 143 __ TestAndBranchIfAnySet(value_, mask_, label); |
| 144 break; |
| 145 default: |
| 146 __ Tst(value_, mask_); |
| 147 __ B(cond_, label); |
| 148 } |
| 149 } |
| 150 |
| 151 virtual void EmitInverted(Label* label) const { |
| 152 // The inverse of "all clear" is "any set" and vice versa. |
| 153 switch (cond_) { |
| 154 case eq: |
| 155 __ TestAndBranchIfAnySet(value_, mask_, label); |
| 156 break; |
| 157 case ne: |
| 158 __ TestAndBranchIfAllClear(value_, mask_, label); |
| 159 break; |
| 160 default: |
| 161 __ Tst(value_, mask_); |
| 162 __ B(InvertCondition(cond_), label); |
| 163 } |
| 164 } |
| 165 |
| 166 private: |
| 167 Condition cond_; |
| 168 const Register& value_; |
| 169 uint64_t mask_; |
| 170 }; |
| 171 |
| 172 |
| 173 // Test the input and branch if it is non-zero and not a NaN. |
| 174 class BranchIfNonZeroNumber : public BranchGenerator { |
| 175 public: |
| 176 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value, |
| 177 const FPRegister& scratch) |
| 178 : BranchGenerator(codegen), value_(value), scratch_(scratch) { } |
| 179 |
| 180 virtual void Emit(Label* label) const { |
| 181 __ Fabs(scratch_, value_); |
| 182 // Compare with 0.0. Because scratch_ is positive, the result can be one of |
| 183 // nZCv (equal), nzCv (greater) or nzCV (unordered). |
| 184 __ Fcmp(scratch_, 0.0); |
| 185 __ B(gt, label); |
| 186 } |
| 187 |
| 188 virtual void EmitInverted(Label* label) const { |
| 189 __ Fabs(scratch_, value_); |
| 190 __ Fcmp(scratch_, 0.0); |
| 191 __ B(le, label); |
| 192 } |
| 193 |
| 194 private: |
| 195 const FPRegister& value_; |
| 196 const FPRegister& scratch_; |
| 197 }; |
| 198 |
| 199 |
| 200 // Test the input and branch if it is a heap number. |
| 201 class BranchIfHeapNumber : public BranchGenerator { |
| 202 public: |
| 203 BranchIfHeapNumber(LCodeGen* codegen, const Register& value) |
| 204 : BranchGenerator(codegen), value_(value) { } |
| 205 |
| 206 virtual void Emit(Label* label) const { |
| 207 __ JumpIfHeapNumber(value_, label); |
| 208 } |
| 209 |
| 210 virtual void EmitInverted(Label* label) const { |
| 211 __ JumpIfNotHeapNumber(value_, label); |
| 212 } |
| 213 |
| 214 private: |
| 215 const Register& value_; |
| 216 }; |
| 217 |
| 218 |
| 219 // Test the input and branch if it is the specified root value. |
| 220 class BranchIfRoot : public BranchGenerator { |
| 221 public: |
| 222 BranchIfRoot(LCodeGen* codegen, const Register& value, |
| 223 Heap::RootListIndex index) |
| 224 : BranchGenerator(codegen), value_(value), index_(index) { } |
| 225 |
| 226 virtual void Emit(Label* label) const { |
| 227 __ JumpIfRoot(value_, index_, label); |
| 228 } |
| 229 |
| 230 virtual void EmitInverted(Label* label) const { |
| 231 __ JumpIfNotRoot(value_, index_, label); |
| 232 } |
| 233 |
| 234 private: |
| 235 const Register& value_; |
| 236 const Heap::RootListIndex index_; |
| 237 }; |
| 238 |
| 239 |
| 240 void LCodeGen::WriteTranslation(LEnvironment* environment, |
| 241 Translation* translation) { |
| 242 if (environment == NULL) return; |
| 243 |
| 244 // The translation includes one command per value in the environment. |
| 245 int translation_size = environment->translation_size(); |
| 246 // The output frame height does not include the parameters. |
| 247 int height = translation_size - environment->parameter_count(); |
| 248 |
| 249 WriteTranslation(environment->outer(), translation); |
| 250 bool has_closure_id = !info()->closure().is_null() && |
| 251 !info()->closure().is_identical_to(environment->closure()); |
| 252 int closure_id = has_closure_id |
| 253 ? DefineDeoptimizationLiteral(environment->closure()) |
| 254 : Translation::kSelfLiteralId; |
| 255 |
| 256 switch (environment->frame_type()) { |
| 257 case JS_FUNCTION: |
| 258 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
| 259 break; |
| 260 case JS_CONSTRUCT: |
| 261 translation->BeginConstructStubFrame(closure_id, translation_size); |
| 262 break; |
| 263 case JS_GETTER: |
| 264 ASSERT(translation_size == 1); |
| 265 ASSERT(height == 0); |
| 266 translation->BeginGetterStubFrame(closure_id); |
| 267 break; |
| 268 case JS_SETTER: |
| 269 ASSERT(translation_size == 2); |
| 270 ASSERT(height == 0); |
| 271 translation->BeginSetterStubFrame(closure_id); |
| 272 break; |
| 273 case STUB: |
| 274 translation->BeginCompiledStubFrame(); |
| 275 break; |
| 276 case ARGUMENTS_ADAPTOR: |
| 277 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
| 278 break; |
| 279 default: |
| 280 UNREACHABLE(); |
| 281 } |
| 282 |
| 283 int object_index = 0; |
| 284 int dematerialized_index = 0; |
| 285 for (int i = 0; i < translation_size; ++i) { |
| 286 LOperand* value = environment->values()->at(i); |
| 287 |
| 288 AddToTranslation(environment, |
| 289 translation, |
| 290 value, |
| 291 environment->HasTaggedValueAt(i), |
| 292 environment->HasUint32ValueAt(i), |
| 293 &object_index, |
| 294 &dematerialized_index); |
| 295 } |
| 296 } |
| 297 |
| 298 |
| 299 void LCodeGen::AddToTranslation(LEnvironment* environment, |
| 300 Translation* translation, |
| 301 LOperand* op, |
| 302 bool is_tagged, |
| 303 bool is_uint32, |
| 304 int* object_index_pointer, |
| 305 int* dematerialized_index_pointer) { |
| 306 if (op == LEnvironment::materialization_marker()) { |
| 307 int object_index = (*object_index_pointer)++; |
| 308 if (environment->ObjectIsDuplicateAt(object_index)) { |
| 309 int dupe_of = environment->ObjectDuplicateOfAt(object_index); |
| 310 translation->DuplicateObject(dupe_of); |
| 311 return; |
| 312 } |
| 313 int object_length = environment->ObjectLengthAt(object_index); |
| 314 if (environment->ObjectIsArgumentsAt(object_index)) { |
| 315 translation->BeginArgumentsObject(object_length); |
| 316 } else { |
| 317 translation->BeginCapturedObject(object_length); |
| 318 } |
| 319 int dematerialized_index = *dematerialized_index_pointer; |
| 320 int env_offset = environment->translation_size() + dematerialized_index; |
| 321 *dematerialized_index_pointer += object_length; |
| 322 for (int i = 0; i < object_length; ++i) { |
| 323 LOperand* value = environment->values()->at(env_offset + i); |
| 324 AddToTranslation(environment, |
| 325 translation, |
| 326 value, |
| 327 environment->HasTaggedValueAt(env_offset + i), |
| 328 environment->HasUint32ValueAt(env_offset + i), |
| 329 object_index_pointer, |
| 330 dematerialized_index_pointer); |
| 331 } |
| 332 return; |
| 333 } |
| 334 |
| 335 if (op->IsStackSlot()) { |
| 336 if (is_tagged) { |
| 337 translation->StoreStackSlot(op->index()); |
| 338 } else if (is_uint32) { |
| 339 translation->StoreUint32StackSlot(op->index()); |
| 340 } else { |
| 341 translation->StoreInt32StackSlot(op->index()); |
| 342 } |
| 343 } else if (op->IsDoubleStackSlot()) { |
| 344 translation->StoreDoubleStackSlot(op->index()); |
| 345 } else if (op->IsArgument()) { |
| 346 ASSERT(is_tagged); |
| 347 int src_index = GetStackSlotCount() + op->index(); |
| 348 translation->StoreStackSlot(src_index); |
| 349 } else if (op->IsRegister()) { |
| 350 Register reg = ToRegister(op); |
| 351 if (is_tagged) { |
| 352 translation->StoreRegister(reg); |
| 353 } else if (is_uint32) { |
| 354 translation->StoreUint32Register(reg); |
| 355 } else { |
| 356 translation->StoreInt32Register(reg); |
| 357 } |
| 358 } else if (op->IsDoubleRegister()) { |
| 359 DoubleRegister reg = ToDoubleRegister(op); |
| 360 translation->StoreDoubleRegister(reg); |
| 361 } else if (op->IsConstantOperand()) { |
| 362 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); |
| 363 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); |
| 364 translation->StoreLiteral(src_index); |
| 365 } else { |
| 366 UNREACHABLE(); |
| 367 } |
| 368 } |
| 369 |
| 370 |
| 371 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
| 372 int result = deoptimization_literals_.length(); |
| 373 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
| 374 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
| 375 } |
| 376 deoptimization_literals_.Add(literal, zone()); |
| 377 return result; |
| 378 } |
| 379 |
| 380 |
| 381 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
| 382 Safepoint::DeoptMode mode) { |
| 383 if (!environment->HasBeenRegistered()) { |
| 384 int frame_count = 0; |
| 385 int jsframe_count = 0; |
| 386 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { |
| 387 ++frame_count; |
| 388 if (e->frame_type() == JS_FUNCTION) { |
| 389 ++jsframe_count; |
| 390 } |
| 391 } |
| 392 Translation translation(&translations_, frame_count, jsframe_count, zone()); |
| 393 WriteTranslation(environment, &translation); |
| 394 int deoptimization_index = deoptimizations_.length(); |
| 395 int pc_offset = masm()->pc_offset(); |
| 396 environment->Register(deoptimization_index, |
| 397 translation.index(), |
| 398 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 399 deoptimizations_.Add(environment, zone()); |
| 400 } |
| 401 } |
| 402 |
| 403 |
| 404 void LCodeGen::CallCode(Handle<Code> code, |
| 405 RelocInfo::Mode mode, |
| 406 LInstruction* instr) { |
| 407 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
| 408 } |
| 409 |
| 410 |
| 411 void LCodeGen::CallCodeGeneric(Handle<Code> code, |
| 412 RelocInfo::Mode mode, |
| 413 LInstruction* instr, |
| 414 SafepointMode safepoint_mode) { |
| 415 ASSERT(instr != NULL); |
| 416 |
| 417 Assembler::BlockConstPoolScope scope(masm_); |
| 418 __ Call(code, mode); |
| 419 RecordSafepointWithLazyDeopt(instr, safepoint_mode); |
| 420 |
| 421 if ((code->kind() == Code::BINARY_OP_IC) || |
| 422 (code->kind() == Code::COMPARE_IC)) { |
| 423 // Signal that we don't inline smi code before these stubs in the |
| 424 // optimizing code generator. |
| 425 InlineSmiCheckInfo::EmitNotInlined(masm()); |
| 426 } |
| 427 } |
| 428 |
| 429 |
| 430 void LCodeGen::DoCallFunction(LCallFunction* instr) { |
| 431 ASSERT(ToRegister(instr->context()).is(cp)); |
| 432 ASSERT(ToRegister(instr->function()).Is(x1)); |
| 433 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 434 |
| 435 int arity = instr->arity(); |
| 436 CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); |
| 437 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 438 } |
| 439 |
| 440 |
| 441 void LCodeGen::DoCallNew(LCallNew* instr) { |
| 442 ASSERT(ToRegister(instr->context()).is(cp)); |
| 443 ASSERT(instr->IsMarkedAsCall()); |
| 444 ASSERT(ToRegister(instr->constructor()).is(x1)); |
| 445 |
| 446 __ Mov(x0, instr->arity()); |
| 447 // No cell in x2 for construct type feedback in optimized code. |
| 448 Handle<Object> undefined_value(isolate()->factory()->undefined_value()); |
| 449 __ Mov(x2, Operand(undefined_value)); |
| 450 |
| 451 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
| 452 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 453 |
| 454 ASSERT(ToRegister(instr->result()).is(x0)); |
| 455 } |
| 456 |
| 457 |
| 458 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
| 459 ASSERT(instr->IsMarkedAsCall()); |
| 460 ASSERT(ToRegister(instr->context()).is(cp)); |
| 461 ASSERT(ToRegister(instr->constructor()).is(x1)); |
| 462 |
| 463 __ Mov(x0, Operand(instr->arity())); |
| 464 __ Mov(x2, Operand(factory()->undefined_value())); |
| 465 |
| 466 ElementsKind kind = instr->hydrogen()->elements_kind(); |
| 467 AllocationSiteOverrideMode override_mode = |
| 468 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
| 469 ? DISABLE_ALLOCATION_SITES |
| 470 : DONT_OVERRIDE; |
| 471 |
| 472 if (instr->arity() == 0) { |
| 473 ArrayNoArgumentConstructorStub stub(kind, override_mode); |
| 474 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 475 } else if (instr->arity() == 1) { |
| 476 Label done; |
| 477 if (IsFastPackedElementsKind(kind)) { |
| 478 Label packed_case; |
| 479 |
| 480 // We might need to create a holey array; look at the first argument. |
| 481 __ Peek(x10, 0); |
| 482 __ Cbz(x10, &packed_case); |
| 483 |
| 484 ElementsKind holey_kind = GetHoleyElementsKind(kind); |
| 485 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); |
| 486 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 487 __ B(&done); |
| 488 __ Bind(&packed_case); |
| 489 } |
| 490 |
| 491 ArraySingleArgumentConstructorStub stub(kind, override_mode); |
| 492 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 493 __ Bind(&done); |
| 494 } else { |
| 495 ArrayNArgumentsConstructorStub stub(kind, override_mode); |
| 496 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 497 } |
| 498 |
| 499 ASSERT(ToRegister(instr->result()).is(x0)); |
| 500 } |
| 501 |
| 502 |
| 503 void LCodeGen::CallRuntime(const Runtime::Function* function, |
| 504 int num_arguments, |
| 505 LInstruction* instr, |
| 506 SaveFPRegsMode save_doubles) { |
| 507 ASSERT(instr != NULL); |
| 508 |
| 509 __ CallRuntime(function, num_arguments, save_doubles); |
| 510 |
| 511 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
| 512 } |
| 513 |
| 514 |
| 515 void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
| 516 if (context->IsRegister()) { |
| 517 __ Mov(cp, ToRegister(context)); |
| 518 } else if (context->IsStackSlot()) { |
| 519 __ Ldr(cp, ToMemOperand(context)); |
| 520 } else if (context->IsConstantOperand()) { |
| 521 HConstant* constant = |
| 522 chunk_->LookupConstant(LConstantOperand::cast(context)); |
| 523 __ LoadHeapObject(cp, |
| 524 Handle<HeapObject>::cast(constant->handle(isolate()))); |
| 525 } else { |
| 526 UNREACHABLE(); |
| 527 } |
| 528 } |
| 529 |
| 530 |
| 531 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
| 532 int argc, |
| 533 LInstruction* instr, |
| 534 LOperand* context) { |
| 535 LoadContextFromDeferred(context); |
| 536 __ CallRuntimeSaveDoubles(id); |
| 537 RecordSafepointWithRegisters( |
| 538 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
| 539 } |
| 540 |
| 541 |
| 542 void LCodeGen::RecordAndWritePosition(int position) { |
| 543 if (position == RelocInfo::kNoPosition) return; |
| 544 masm()->positions_recorder()->RecordPosition(position); |
| 545 masm()->positions_recorder()->WriteRecordedPositions(); |
| 546 } |
| 547 |
| 548 |
| 549 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, |
| 550 SafepointMode safepoint_mode) { |
| 551 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| 552 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
| 553 } else { |
| 554 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 555 RecordSafepointWithRegisters( |
| 556 instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
| 557 } |
| 558 } |
| 559 |
| 560 |
| 561 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 562 Safepoint::Kind kind, |
| 563 int arguments, |
| 564 Safepoint::DeoptMode deopt_mode) { |
| 565 ASSERT(expected_safepoint_kind_ == kind); |
| 566 |
| 567 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
| 568 Safepoint safepoint = safepoints_.DefineSafepoint( |
| 569 masm(), kind, arguments, deopt_mode); |
| 570 |
| 571 for (int i = 0; i < operands->length(); i++) { |
| 572 LOperand* pointer = operands->at(i); |
| 573 if (pointer->IsStackSlot()) { |
| 574 safepoint.DefinePointerSlot(pointer->index(), zone()); |
| 575 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
| 576 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
| 577 } |
| 578 } |
| 579 |
| 580 if (kind & Safepoint::kWithRegisters) { |
| 581 // Register cp always contains a pointer to the context. |
| 582 safepoint.DefinePointerRegister(cp, zone()); |
| 583 } |
| 584 } |
| 585 |
| 586 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 587 Safepoint::DeoptMode deopt_mode) { |
| 588 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); |
| 589 } |
| 590 |
| 591 |
| 592 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
| 593 LPointerMap empty_pointers(zone()); |
| 594 RecordSafepoint(&empty_pointers, deopt_mode); |
| 595 } |
| 596 |
| 597 |
| 598 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| 599 int arguments, |
| 600 Safepoint::DeoptMode deopt_mode) { |
| 601 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
| 602 } |
| 603 |
| 604 |
| 605 void LCodeGen::RecordSafepointWithRegistersAndDoubles( |
| 606 LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) { |
| 607 RecordSafepoint( |
| 608 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); |
| 609 } |
| 610 |
| 611 |
| 612 bool LCodeGen::GenerateCode() { |
| 613 LPhase phase("Z_Code generation", chunk()); |
| 614 ASSERT(is_unused()); |
| 615 status_ = GENERATING; |
| 616 |
| 617 // Open a frame scope to indicate that there is a frame on the stack. The |
| 618 // NONE indicates that the scope shouldn't actually generate code to set up |
| 619 // the frame (that is done in GeneratePrologue). |
| 620 FrameScope frame_scope(masm_, StackFrame::NONE); |
| 621 |
| 622 return GeneratePrologue() && |
| 623 GenerateBody() && |
| 624 GenerateDeferredCode() && |
| 625 GenerateDeoptJumpTable() && |
| 626 GenerateSafepointTable(); |
| 627 } |
| 628 |
| 629 |
| 630 void LCodeGen::SaveCallerDoubles() { |
| 631 ASSERT(info()->saves_caller_doubles()); |
| 632 ASSERT(NeedsEagerFrame()); |
| 633 Comment(";;; Save clobbered callee double registers"); |
| 634 BitVector* doubles = chunk()->allocated_double_registers(); |
| 635 BitVector::Iterator iterator(doubles); |
| 636 int count = 0; |
| 637 while (!iterator.Done()) { |
| 638 // TODO(all): Is this supposed to save just the callee-saved doubles? It |
| 639 // looks like it's saving all of them. |
| 640 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); |
| 641 __ Poke(value, count * kDoubleSize); |
| 642 iterator.Advance(); |
| 643 count++; |
| 644 } |
| 645 } |
| 646 |
| 647 |
| 648 void LCodeGen::RestoreCallerDoubles() { |
| 649 ASSERT(info()->saves_caller_doubles()); |
| 650 ASSERT(NeedsEagerFrame()); |
| 651 Comment(";;; Restore clobbered callee double registers"); |
| 652 BitVector* doubles = chunk()->allocated_double_registers(); |
| 653 BitVector::Iterator iterator(doubles); |
| 654 int count = 0; |
| 655 while (!iterator.Done()) { |
| 656 // TODO(all): Is this supposed to restore just the callee-saved doubles? It |
| 657 // looks like it's restoring all of them. |
| 658 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); |
| 659 __ Peek(value, count * kDoubleSize); |
| 660 iterator.Advance(); |
| 661 count++; |
| 662 } |
| 663 } |
| 664 |
| 665 |
| 666 bool LCodeGen::GeneratePrologue() { |
| 667 ASSERT(is_generating()); |
| 668 |
| 669 if (info()->IsOptimizing()) { |
| 670 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| 671 |
| 672 // TODO(all): Add support for stop_t FLAG in DEBUG mode. |
| 673 |
| 674 // Classic mode functions and builtins need to replace the receiver with the |
| 675 // global proxy when called as functions (without an explicit receiver |
| 676 // object). |
| 677 if (info_->this_has_uses() && |
| 678 info_->is_classic_mode() && |
| 679 !info_->is_native()) { |
| 680 Label ok; |
| 681 int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes; |
| 682 __ Peek(x10, receiver_offset); |
| 683 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); |
| 684 |
| 685 __ Ldr(x10, GlobalObjectMemOperand()); |
| 686 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
| 687 __ Poke(x10, receiver_offset); |
| 688 |
| 689 __ Bind(&ok); |
| 690 } |
| 691 } |
| 692 |
| 693 ASSERT(__ StackPointer().Is(jssp)); |
| 694 info()->set_prologue_offset(masm_->pc_offset()); |
| 695 if (NeedsEagerFrame()) { |
| 696 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); |
| 697 frame_is_built_ = true; |
| 698 info_->AddNoFrameRange(0, masm_->pc_offset()); |
| 699 } |
| 700 |
| 701 // Reserve space for the stack slots needed by the code. |
| 702 int slots = GetStackSlotCount(); |
| 703 if (slots > 0) { |
| 704 __ Claim(slots, kPointerSize); |
| 705 } |
| 706 |
| 707 if (info()->saves_caller_doubles()) { |
| 708 SaveCallerDoubles(); |
| 709 } |
| 710 |
| 711 // Allocate a local context if needed. |
| 712 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| 713 if (heap_slots > 0) { |
| 714 Comment(";;; Allocate local context"); |
| 715 // Argument to NewContext is the function, which is in x1. |
| 716 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
| 717 FastNewContextStub stub(heap_slots); |
| 718 __ CallStub(&stub); |
| 719 } else { |
| 720 __ Push(x1); |
| 721 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
| 722 } |
| 723 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 724 // Context is returned in x0. It replaces the context passed to us. It's |
| 725 // saved in the stack and kept live in cp. |
| 726 __ Mov(cp, x0); |
| 727 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 728 // Copy any necessary parameters into the context. |
| 729 int num_parameters = scope()->num_parameters(); |
| 730 for (int i = 0; i < num_parameters; i++) { |
| 731 Variable* var = scope()->parameter(i); |
| 732 if (var->IsContextSlot()) { |
| 733 Register value = x0; |
| 734 Register scratch = x3; |
| 735 |
| 736 int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
| 737 (num_parameters - 1 - i) * kPointerSize; |
| 738 // Load parameter from stack. |
| 739 __ Ldr(value, MemOperand(fp, parameter_offset)); |
| 740 // Store it in the context. |
| 741 MemOperand target = ContextMemOperand(cp, var->index()); |
| 742 __ Str(value, target); |
| 743 // Update the write barrier. This clobbers value and scratch. |
| 744 __ RecordWriteContextSlot(cp, target.offset(), value, scratch, |
| 745 GetLinkRegisterState(), kSaveFPRegs); |
| 746 } |
| 747 } |
| 748 Comment(";;; End allocate local context"); |
| 749 } |
| 750 |
| 751 // Trace the call. |
| 752 if (FLAG_trace && info()->IsOptimizing()) { |
| 753 // We have not executed any compiled code yet, so cp still holds the |
| 754 // incoming context. |
| 755 __ CallRuntime(Runtime::kTraceEnter, 0); |
| 756 } |
| 757 |
| 758 return !is_aborted(); |
| 759 } |
| 760 |
| 761 |
| 762 void LCodeGen::GenerateOsrPrologue() { |
| 763 // Generate the OSR entry prologue at the first unknown OSR value, or if there |
| 764 // are none, at the OSR entrypoint instruction. |
| 765 if (osr_pc_offset_ >= 0) return; |
| 766 |
| 767 osr_pc_offset_ = masm()->pc_offset(); |
| 768 |
| 769 // Adjust the frame size, subsuming the unoptimized frame into the |
| 770 // optimized frame. |
| 771 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
| 772 ASSERT(slots >= 0); |
| 773 __ Claim(slots); |
| 774 } |
| 775 |
| 776 |
| 777 bool LCodeGen::GenerateDeferredCode() { |
| 778 ASSERT(is_generating()); |
| 779 if (deferred_.length() > 0) { |
| 780 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { |
| 781 LDeferredCode* code = deferred_[i]; |
| 782 |
| 783 HValue* value = |
| 784 instructions_->at(code->instruction_index())->hydrogen_value(); |
| 785 RecordAndWritePosition(value->position()); |
| 786 |
| 787 Comment(";;; <@%d,#%d> " |
| 788 "-------------------- Deferred %s --------------------", |
| 789 code->instruction_index(), |
| 790 code->instr()->hydrogen_value()->id(), |
| 791 code->instr()->Mnemonic()); |
| 792 |
| 793 __ Bind(code->entry()); |
| 794 |
| 795 if (NeedsDeferredFrame()) { |
| 796 Comment(";;; Build frame"); |
| 797 ASSERT(!frame_is_built_); |
| 798 ASSERT(info()->IsStub()); |
| 799 frame_is_built_ = true; |
| 800 __ Push(lr, fp, cp); |
| 801 __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB))); |
| 802 __ Push(fp); |
| 803 __ Add(fp, __ StackPointer(), |
| 804 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 805 Comment(";;; Deferred code"); |
| 806 } |
| 807 |
| 808 code->Generate(); |
| 809 |
| 810 if (NeedsDeferredFrame()) { |
| 811 Comment(";;; Destroy frame"); |
| 812 ASSERT(frame_is_built_); |
| 813 __ Pop(xzr, cp, fp, lr); |
| 814 frame_is_built_ = false; |
| 815 } |
| 816 |
| 817 __ B(code->exit()); |
| 818 } |
| 819 } |
| 820 |
| 821 // Force constant pool emission at the end of the deferred code to make |
| 822 // sure that no constant pools are emitted after deferred code because |
| 823 // deferred code generation is the last step which generates code. The two |
| 824 // following steps will only output data used by crakshaft. |
| 825 masm()->CheckConstPool(true, false); |
| 826 |
| 827 return !is_aborted(); |
| 828 } |
| 829 |
| 830 |
| 831 bool LCodeGen::GenerateDeoptJumpTable() { |
| 832 if (deopt_jump_table_.length() > 0) { |
| 833 Comment(";;; -------------------- Jump table --------------------"); |
| 834 } |
| 835 Label table_start; |
| 836 __ bind(&table_start); |
| 837 Label needs_frame; |
| 838 for (int i = 0; i < deopt_jump_table_.length(); i++) { |
| 839 __ Bind(&deopt_jump_table_[i].label); |
| 840 Address entry = deopt_jump_table_[i].address; |
| 841 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; |
| 842 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 843 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| 844 Comment(";;; jump table entry %d.", i); |
| 845 } else { |
| 846 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 847 } |
| 848 if (deopt_jump_table_[i].needs_frame) { |
| 849 ASSERT(!info()->saves_caller_doubles()); |
| 850 __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry))); |
| 851 if (needs_frame.is_bound()) { |
| 852 __ B(&needs_frame); |
| 853 } else { |
| 854 __ Bind(&needs_frame); |
| 855 // This variant of deopt can only be used with stubs. Since we don't |
| 856 // have a function pointer to install in the stack frame that we're |
| 857 // building, install a special marker there instead. |
| 858 // TODO(jochen): Revisit the use of TmpX(). |
| 859 ASSERT(info()->IsStub()); |
| 860 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB))); |
| 861 __ Push(lr, fp, cp, __ Tmp1()); |
| 862 __ Add(fp, __ StackPointer(), 2 * kPointerSize); |
| 863 __ Call(__ Tmp0()); |
| 864 } |
| 865 } else { |
| 866 if (info()->saves_caller_doubles()) { |
| 867 ASSERT(info()->IsStub()); |
| 868 RestoreCallerDoubles(); |
| 869 } |
| 870 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 871 } |
| 872 masm()->CheckConstPool(false, false); |
| 873 } |
| 874 |
| 875 // Force constant pool emission at the end of the deopt jump table to make |
| 876 // sure that no constant pools are emitted after. |
| 877 masm()->CheckConstPool(true, false); |
| 878 |
| 879 // The deoptimization jump table is the last part of the instruction |
| 880 // sequence. Mark the generated code as done unless we bailed out. |
| 881 if (!is_aborted()) status_ = DONE; |
| 882 return !is_aborted(); |
| 883 } |
| 884 |
| 885 |
| 886 bool LCodeGen::GenerateSafepointTable() { |
| 887 ASSERT(is_done()); |
| 888 safepoints_.Emit(masm(), GetStackSlotCount()); |
| 889 return !is_aborted(); |
| 890 } |
| 891 |
| 892 |
| 893 void LCodeGen::FinishCode(Handle<Code> code) { |
| 894 ASSERT(is_done()); |
| 895 code->set_stack_slots(GetStackSlotCount()); |
| 896 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 897 RegisterDependentCodeForEmbeddedMaps(code); |
| 898 PopulateDeoptimizationData(code); |
| 899 info()->CommitDependencies(code); |
| 900 } |
| 901 |
| 902 |
| 903 void LCodeGen::Abort(BailoutReason reason) { |
| 904 info()->set_bailout_reason(reason); |
| 905 status_ = ABORTED; |
| 906 } |
| 907 |
| 908 |
| 909 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 910 int length = deoptimizations_.length(); |
| 911 if (length == 0) return; |
| 912 |
| 913 Handle<DeoptimizationInputData> data = |
| 914 factory()->NewDeoptimizationInputData(length, TENURED); |
| 915 |
| 916 Handle<ByteArray> translations = |
| 917 translations_.CreateByteArray(isolate()->factory()); |
| 918 data->SetTranslationByteArray(*translations); |
| 919 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); |
| 920 |
| 921 Handle<FixedArray> literals = |
| 922 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); |
| 923 { AllowDeferredHandleDereference copy_handles; |
| 924 for (int i = 0; i < deoptimization_literals_.length(); i++) { |
| 925 literals->set(i, *deoptimization_literals_[i]); |
| 926 } |
| 927 data->SetLiteralArray(*literals); |
| 928 } |
| 929 |
| 930 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); |
| 931 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); |
| 932 |
| 933 // Populate the deoptimization entries. |
| 934 for (int i = 0; i < length; i++) { |
| 935 LEnvironment* env = deoptimizations_[i]; |
| 936 data->SetAstId(i, env->ast_id()); |
| 937 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); |
| 938 data->SetArgumentsStackHeight(i, |
| 939 Smi::FromInt(env->arguments_stack_height())); |
| 940 data->SetPc(i, Smi::FromInt(env->pc_offset())); |
| 941 } |
| 942 |
| 943 code->set_deoptimization_data(*data); |
| 944 } |
| 945 |
| 946 |
| 947 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
| 948 ASSERT(deoptimization_literals_.length() == 0); |
| 949 |
| 950 const ZoneList<Handle<JSFunction> >* inlined_closures = |
| 951 chunk()->inlined_closures(); |
| 952 |
| 953 for (int i = 0, length = inlined_closures->length(); i < length; i++) { |
| 954 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 955 } |
| 956 |
| 957 inlined_function_count_ = deoptimization_literals_.length(); |
| 958 } |
| 959 |
| 960 |
| 961 Deoptimizer::BailoutType LCodeGen::DeoptimizeHeader( |
| 962 LEnvironment* environment, |
| 963 Deoptimizer::BailoutType* override_bailout_type) { |
| 964 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 965 ASSERT(environment->HasBeenRegistered()); |
| 966 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 967 int id = environment->deoptimization_index(); |
| 968 Deoptimizer::BailoutType bailout_type = |
| 969 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
| 970 if (override_bailout_type) bailout_type = *override_bailout_type; |
| 971 Address entry = |
| 972 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 973 |
| 974 if (entry == NULL) { |
| 975 Abort(kBailoutWasNotPrepared); |
| 976 return bailout_type; |
| 977 } |
| 978 |
| 979 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
| 980 Label not_zero; |
| 981 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
| 982 |
| 983 __ Push(x0, x1, x2); |
| 984 __ Mrs(x2, NZCV); |
| 985 __ Mov(x0, Operand(count)); |
| 986 __ Ldr(w1, MemOperand(x0)); |
| 987 __ Subs(x1, x1, 1); |
| 988 __ B(gt, ¬_zero); |
| 989 __ Mov(w1, FLAG_deopt_every_n_times); |
| 990 __ Str(w1, MemOperand(x0)); |
| 991 __ Pop(x0, x1, x2); |
| 992 ASSERT(frame_is_built_); |
| 993 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 994 __ Unreachable(); |
| 995 |
| 996 __ Bind(¬_zero); |
| 997 __ Str(w1, MemOperand(x0)); |
| 998 __ Msr(NZCV, x2); |
| 999 __ Pop(x0, x1, x2); |
| 1000 } |
| 1001 |
| 1002 return bailout_type; |
| 1003 } |
| 1004 |
| 1005 |
| 1006 void LCodeGen::Deoptimize(LEnvironment* environment, |
| 1007 Deoptimizer::BailoutType bailout_type) { |
| 1008 ASSERT(environment->HasBeenRegistered()); |
| 1009 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 1010 int id = environment->deoptimization_index(); |
| 1011 Address entry = |
| 1012 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 1013 |
| 1014 if (info()->ShouldTrapOnDeopt()) { |
| 1015 __ Debug("trap_on_deopt", __LINE__, BREAK); |
| 1016 } |
| 1017 |
| 1018 ASSERT(info()->IsStub() || frame_is_built_); |
| 1019 // Go through jump table if we need to build frame, or restore caller doubles. |
| 1020 if (frame_is_built_ && !info()->saves_caller_doubles()) { |
| 1021 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 1022 } else { |
| 1023 // We often have several deopts to the same entry, reuse the last |
| 1024 // jump entry if this is the case. |
| 1025 if (deopt_jump_table_.is_empty() || |
| 1026 (deopt_jump_table_.last().address != entry) || |
| 1027 (deopt_jump_table_.last().bailout_type != bailout_type) || |
| 1028 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { |
| 1029 Deoptimizer::JumpTableEntry table_entry(entry, |
| 1030 bailout_type, |
| 1031 !frame_is_built_); |
| 1032 deopt_jump_table_.Add(table_entry, zone()); |
| 1033 } |
| 1034 __ B(&deopt_jump_table_.last().label); |
| 1035 } |
| 1036 } |
| 1037 |
| 1038 |
| 1039 void LCodeGen::Deoptimize(LEnvironment* environment) { |
| 1040 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1041 Deoptimize(environment, bailout_type); |
| 1042 } |
| 1043 |
| 1044 |
| 1045 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) { |
| 1046 Label dont_deopt; |
| 1047 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1048 __ B(InvertCondition(cond), &dont_deopt); |
| 1049 Deoptimize(environment, bailout_type); |
| 1050 __ Bind(&dont_deopt); |
| 1051 } |
| 1052 |
| 1053 |
| 1054 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) { |
| 1055 Label dont_deopt; |
| 1056 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1057 __ Cbnz(rt, &dont_deopt); |
| 1058 Deoptimize(environment, bailout_type); |
| 1059 __ Bind(&dont_deopt); |
| 1060 } |
| 1061 |
| 1062 |
| 1063 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) { |
| 1064 Label dont_deopt; |
| 1065 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1066 __ Tbz(rt, rt.Is64Bits() ? kXSignBit : kWSignBit, &dont_deopt); |
| 1067 Deoptimize(environment, bailout_type); |
| 1068 __ Bind(&dont_deopt); |
| 1069 } |
| 1070 |
| 1071 |
| 1072 void LCodeGen::DeoptimizeIfSmi(Register rt, |
| 1073 LEnvironment* environment) { |
| 1074 Label dont_deopt; |
| 1075 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1076 __ JumpIfNotSmi(rt, &dont_deopt); |
| 1077 Deoptimize(environment, bailout_type); |
| 1078 __ Bind(&dont_deopt); |
| 1079 } |
| 1080 |
| 1081 |
| 1082 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) { |
| 1083 Label dont_deopt; |
| 1084 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1085 __ JumpIfSmi(rt, &dont_deopt); |
| 1086 Deoptimize(environment, bailout_type); |
| 1087 __ Bind(&dont_deopt); |
| 1088 } |
| 1089 |
| 1090 |
| 1091 void LCodeGen::DeoptimizeIfRoot(Register rt, |
| 1092 Heap::RootListIndex index, |
| 1093 LEnvironment* environment) { |
| 1094 Label dont_deopt; |
| 1095 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1096 __ JumpIfNotRoot(rt, index, &dont_deopt); |
| 1097 Deoptimize(environment, bailout_type); |
| 1098 __ Bind(&dont_deopt); |
| 1099 } |
| 1100 |
| 1101 |
| 1102 void LCodeGen::DeoptimizeIfNotRoot(Register rt, |
| 1103 Heap::RootListIndex index, |
| 1104 LEnvironment* environment) { |
| 1105 Label dont_deopt; |
| 1106 Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL); |
| 1107 __ JumpIfRoot(rt, index, &dont_deopt); |
| 1108 Deoptimize(environment, bailout_type); |
| 1109 __ Bind(&dont_deopt); |
| 1110 } |
| 1111 |
| 1112 |
| 1113 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
| 1114 if (!info()->IsStub()) { |
| 1115 // Ensure that we have enough space after the previous lazy-bailout |
| 1116 // instruction for patching the code here. |
| 1117 intptr_t current_pc = masm()->pc_offset(); |
| 1118 |
| 1119 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { |
| 1120 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
| 1121 ASSERT((padding_size % kInstructionSize) == 0); |
| 1122 InstructionAccurateScope instruction_accurate( |
| 1123 masm(), padding_size / kInstructionSize); |
| 1124 |
| 1125 while (padding_size > 0) { |
| 1126 __ nop(); |
| 1127 padding_size -= kInstructionSize; |
| 1128 } |
| 1129 } |
| 1130 } |
| 1131 last_lazy_deopt_pc_ = masm()->pc_offset(); |
| 1132 } |
| 1133 |
| 1134 |
| 1135 Register LCodeGen::ToRegister(LOperand* op) const { |
| 1136 // TODO(all): support zero register results, as ToRegister32. |
| 1137 ASSERT((op != NULL) && op->IsRegister()); |
| 1138 return Register::FromAllocationIndex(op->index()); |
| 1139 } |
| 1140 |
| 1141 |
| 1142 Register LCodeGen::ToRegister32(LOperand* op) const { |
| 1143 ASSERT(op != NULL); |
| 1144 if (op->IsConstantOperand()) { |
| 1145 // If this is a constant operand, the result must be the zero register. |
| 1146 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0); |
| 1147 return wzr; |
| 1148 } else { |
| 1149 return ToRegister(op).W(); |
| 1150 } |
| 1151 } |
| 1152 |
| 1153 |
| 1154 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { |
| 1155 HConstant* constant = chunk_->LookupConstant(op); |
| 1156 return Smi::FromInt(constant->Integer32Value()); |
| 1157 } |
| 1158 |
| 1159 |
| 1160 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| 1161 ASSERT((op != NULL) && op->IsDoubleRegister()); |
| 1162 return DoubleRegister::FromAllocationIndex(op->index()); |
| 1163 } |
| 1164 |
| 1165 |
| 1166 Operand LCodeGen::ToOperand(LOperand* op) { |
| 1167 ASSERT(op != NULL); |
| 1168 if (op->IsConstantOperand()) { |
| 1169 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 1170 HConstant* constant = chunk()->LookupConstant(const_op); |
| 1171 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 1172 if (r.IsSmi()) { |
| 1173 ASSERT(constant->HasSmiValue()); |
| 1174 return Operand(Smi::FromInt(constant->Integer32Value())); |
| 1175 } else if (r.IsInteger32()) { |
| 1176 ASSERT(constant->HasInteger32Value()); |
| 1177 return Operand(constant->Integer32Value()); |
| 1178 } else if (r.IsDouble()) { |
| 1179 Abort(kToOperandUnsupportedDoubleImmediate); |
| 1180 } |
| 1181 ASSERT(r.IsTagged()); |
| 1182 return Operand(constant->handle(isolate())); |
| 1183 } else if (op->IsRegister()) { |
| 1184 return Operand(ToRegister(op)); |
| 1185 } else if (op->IsDoubleRegister()) { |
| 1186 Abort(kToOperandIsDoubleRegisterUnimplemented); |
| 1187 return Operand(0); |
| 1188 } |
| 1189 // Stack slots not implemented, use ToMemOperand instead. |
| 1190 UNREACHABLE(); |
| 1191 return Operand(0); |
| 1192 } |
| 1193 |
| 1194 |
| 1195 Operand LCodeGen::ToOperand32I(LOperand* op) { |
| 1196 return ToOperand32(op, SIGNED_INT32); |
| 1197 } |
| 1198 |
| 1199 |
| 1200 Operand LCodeGen::ToOperand32U(LOperand* op) { |
| 1201 return ToOperand32(op, UNSIGNED_INT32); |
| 1202 } |
| 1203 |
| 1204 |
| 1205 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) { |
| 1206 ASSERT(op != NULL); |
| 1207 if (op->IsRegister()) { |
| 1208 return Operand(ToRegister32(op)); |
| 1209 } else if (op->IsConstantOperand()) { |
| 1210 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 1211 HConstant* constant = chunk()->LookupConstant(const_op); |
| 1212 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 1213 if (r.IsInteger32()) { |
| 1214 ASSERT(constant->HasInteger32Value()); |
| 1215 return Operand(signedness == SIGNED_INT32 |
| 1216 ? constant->Integer32Value() |
| 1217 : static_cast<uint32_t>(constant->Integer32Value())); |
| 1218 } else { |
| 1219 // Other constants not implemented. |
| 1220 Abort(kToOperand32UnsupportedImmediate); |
| 1221 } |
| 1222 } |
| 1223 // Other cases are not implemented. |
| 1224 UNREACHABLE(); |
| 1225 return Operand(0); |
| 1226 } |
| 1227 |
| 1228 |
| 1229 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) { |
| 1230 ASSERT(index < 0); |
| 1231 return -(index + 1) * kPointerSize; |
| 1232 } |
| 1233 |
| 1234 |
| 1235 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
| 1236 ASSERT(op != NULL); |
| 1237 ASSERT(!op->IsRegister()); |
| 1238 ASSERT(!op->IsDoubleRegister()); |
| 1239 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 1240 if (NeedsEagerFrame()) { |
| 1241 return MemOperand(fp, StackSlotOffset(op->index())); |
| 1242 } else { |
| 1243 // Retrieve parameter without eager stack-frame relative to the |
| 1244 // stack-pointer. |
| 1245 return MemOperand(masm()->StackPointer(), |
| 1246 ArgumentsOffsetWithoutFrame(op->index())); |
| 1247 } |
| 1248 } |
| 1249 |
| 1250 |
| 1251 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { |
| 1252 HConstant* constant = chunk_->LookupConstant(op); |
| 1253 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); |
| 1254 return constant->handle(isolate()); |
| 1255 } |
| 1256 |
| 1257 |
| 1258 bool LCodeGen::IsSmi(LConstantOperand* op) const { |
| 1259 return chunk_->LookupLiteralRepresentation(op).IsSmi(); |
| 1260 } |
| 1261 |
| 1262 |
| 1263 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { |
| 1264 return op->IsConstantOperand() && |
| 1265 chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); |
| 1266 } |
| 1267 |
| 1268 |
| 1269 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
| 1270 HConstant* constant = chunk_->LookupConstant(op); |
| 1271 return constant->Integer32Value(); |
| 1272 } |
| 1273 |
| 1274 |
| 1275 double LCodeGen::ToDouble(LConstantOperand* op) const { |
| 1276 HConstant* constant = chunk_->LookupConstant(op); |
| 1277 ASSERT(constant->HasDoubleValue()); |
| 1278 return constant->DoubleValue(); |
| 1279 } |
| 1280 |
| 1281 |
| 1282 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
| 1283 Condition cond = nv; |
| 1284 switch (op) { |
| 1285 case Token::EQ: |
| 1286 case Token::EQ_STRICT: |
| 1287 cond = eq; |
| 1288 break; |
| 1289 case Token::NE: |
| 1290 case Token::NE_STRICT: |
| 1291 cond = ne; |
| 1292 break; |
| 1293 case Token::LT: |
| 1294 cond = is_unsigned ? lo : lt; |
| 1295 break; |
| 1296 case Token::GT: |
| 1297 cond = is_unsigned ? hi : gt; |
| 1298 break; |
| 1299 case Token::LTE: |
| 1300 cond = is_unsigned ? ls : le; |
| 1301 break; |
| 1302 case Token::GTE: |
| 1303 cond = is_unsigned ? hs : ge; |
| 1304 break; |
| 1305 case Token::IN: |
| 1306 case Token::INSTANCEOF: |
| 1307 default: |
| 1308 UNREACHABLE(); |
| 1309 } |
| 1310 return cond; |
| 1311 } |
| 1312 |
| 1313 |
| 1314 template<class InstrType> |
| 1315 void LCodeGen::EmitBranchGeneric(InstrType instr, |
| 1316 const BranchGenerator& branch) { |
| 1317 int left_block = instr->TrueDestination(chunk_); |
| 1318 int right_block = instr->FalseDestination(chunk_); |
| 1319 |
| 1320 int next_block = GetNextEmittedBlock(); |
| 1321 |
| 1322 if (right_block == left_block) { |
| 1323 EmitGoto(left_block); |
| 1324 } else if (left_block == next_block) { |
| 1325 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block)); |
| 1326 } else if (right_block == next_block) { |
| 1327 branch.Emit(chunk_->GetAssemblyLabel(left_block)); |
| 1328 } else { |
| 1329 branch.Emit(chunk_->GetAssemblyLabel(left_block)); |
| 1330 __ B(chunk_->GetAssemblyLabel(right_block)); |
| 1331 } |
| 1332 } |
| 1333 |
| 1334 |
| 1335 template<class InstrType> |
| 1336 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { |
| 1337 ASSERT((condition != al) && (condition != nv)); |
| 1338 BranchOnCondition branch(this, condition); |
| 1339 EmitBranchGeneric(instr, branch); |
| 1340 } |
| 1341 |
| 1342 |
| 1343 template<class InstrType> |
| 1344 void LCodeGen::EmitCompareAndBranch(InstrType instr, |
| 1345 Condition condition, |
| 1346 const Register& lhs, |
| 1347 const Operand& rhs) { |
| 1348 ASSERT((condition != al) && (condition != nv)); |
| 1349 CompareAndBranch branch(this, condition, lhs, rhs); |
| 1350 EmitBranchGeneric(instr, branch); |
| 1351 } |
| 1352 |
| 1353 |
| 1354 template<class InstrType> |
| 1355 void LCodeGen::EmitTestAndBranch(InstrType instr, |
| 1356 Condition condition, |
| 1357 const Register& value, |
| 1358 uint64_t mask) { |
| 1359 ASSERT((condition != al) && (condition != nv)); |
| 1360 TestAndBranch branch(this, condition, value, mask); |
| 1361 EmitBranchGeneric(instr, branch); |
| 1362 } |
| 1363 |
| 1364 |
| 1365 template<class InstrType> |
| 1366 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr, |
| 1367 const FPRegister& value, |
| 1368 const FPRegister& scratch) { |
| 1369 BranchIfNonZeroNumber branch(this, value, scratch); |
| 1370 EmitBranchGeneric(instr, branch); |
| 1371 } |
| 1372 |
| 1373 |
| 1374 template<class InstrType> |
| 1375 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr, |
| 1376 const Register& value) { |
| 1377 BranchIfHeapNumber branch(this, value); |
| 1378 EmitBranchGeneric(instr, branch); |
| 1379 } |
| 1380 |
| 1381 |
| 1382 template<class InstrType> |
| 1383 void LCodeGen::EmitBranchIfRoot(InstrType instr, |
| 1384 const Register& value, |
| 1385 Heap::RootListIndex index) { |
| 1386 BranchIfRoot branch(this, value, index); |
| 1387 EmitBranchGeneric(instr, branch); |
| 1388 } |
| 1389 |
| 1390 |
| 1391 void LCodeGen::DoGap(LGap* gap) { |
| 1392 for (int i = LGap::FIRST_INNER_POSITION; |
| 1393 i <= LGap::LAST_INNER_POSITION; |
| 1394 i++) { |
| 1395 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
| 1396 LParallelMove* move = gap->GetParallelMove(inner_pos); |
| 1397 if (move != NULL) { |
| 1398 resolver_.Resolve(move); |
| 1399 } |
| 1400 } |
| 1401 } |
| 1402 |
| 1403 |
| 1404 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
| 1405 // TODO(all): Try to improve this, like ARM r17925. |
| 1406 Register arguments = ToRegister(instr->arguments()); |
| 1407 Register result = ToRegister(instr->result()); |
| 1408 |
| 1409 if (instr->length()->IsConstantOperand() && |
| 1410 instr->index()->IsConstantOperand()) { |
| 1411 ASSERT(instr->temp() == NULL); |
| 1412 int index = ToInteger32(LConstantOperand::cast(instr->index())); |
| 1413 int length = ToInteger32(LConstantOperand::cast(instr->length())); |
| 1414 int offset = ((length - index) + 1) * kPointerSize; |
| 1415 __ Ldr(result, MemOperand(arguments, offset)); |
| 1416 } else { |
| 1417 ASSERT(instr->temp() != NULL); |
| 1418 Register temp = ToRegister32(instr->temp()); |
| 1419 Register length = ToRegister32(instr->length()); |
| 1420 Operand index = ToOperand32I(instr->index()); |
| 1421 // There are two words between the frame pointer and the last arguments. |
| 1422 // Subtracting from length accounts for only one, so we add one more. |
| 1423 __ Sub(temp, length, index); |
| 1424 __ Add(temp, temp, 1); |
| 1425 __ Ldr(result, MemOperand(arguments, temp, UXTW, kPointerSizeLog2)); |
| 1426 } |
| 1427 } |
| 1428 |
| 1429 |
| 1430 void LCodeGen::DoAddE(LAddE* instr) { |
| 1431 Register result = ToRegister(instr->result()); |
| 1432 Register left = ToRegister(instr->left()); |
| 1433 Operand right = (instr->right()->IsConstantOperand()) |
| 1434 ? ToInteger32(LConstantOperand::cast(instr->right())) |
| 1435 : Operand(ToRegister32(instr->right()), SXTW); |
| 1436 |
| 1437 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); |
| 1438 __ Add(result, left, right); |
| 1439 } |
| 1440 |
| 1441 |
| 1442 void LCodeGen::DoAddI(LAddI* instr) { |
| 1443 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1444 Register result = ToRegister32(instr->result()); |
| 1445 Register left = ToRegister32(instr->left()); |
| 1446 Operand right = ToOperand32I(instr->right()); |
| 1447 if (can_overflow) { |
| 1448 __ Adds(result, left, right); |
| 1449 DeoptimizeIf(vs, instr->environment()); |
| 1450 } else { |
| 1451 __ Add(result, left, right); |
| 1452 } |
| 1453 } |
| 1454 |
| 1455 |
| 1456 void LCodeGen::DoAddS(LAddS* instr) { |
| 1457 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1458 Register result = ToRegister(instr->result()); |
| 1459 Register left = ToRegister(instr->left()); |
| 1460 Operand right = ToOperand(instr->right()); |
| 1461 if (can_overflow) { |
| 1462 __ Adds(result, left, right); |
| 1463 DeoptimizeIf(vs, instr->environment()); |
| 1464 } else { |
| 1465 __ Add(result, left, right); |
| 1466 } |
| 1467 } |
| 1468 |
| 1469 |
| 1470 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 1471 class DeferredAllocate: public LDeferredCode { |
| 1472 public: |
| 1473 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| 1474 : LDeferredCode(codegen), instr_(instr) { } |
| 1475 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } |
| 1476 virtual LInstruction* instr() { return instr_; } |
| 1477 private: |
| 1478 LAllocate* instr_; |
| 1479 }; |
| 1480 |
| 1481 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); |
| 1482 |
| 1483 Register result = ToRegister(instr->result()); |
| 1484 Register temp1 = ToRegister(instr->temp1()); |
| 1485 Register temp2 = ToRegister(instr->temp2()); |
| 1486 |
| 1487 // Allocate memory for the object. |
| 1488 AllocationFlags flags = TAG_OBJECT; |
| 1489 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
| 1490 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
| 1491 } |
| 1492 |
| 1493 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
| 1494 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
| 1495 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| 1496 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); |
| 1497 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
| 1498 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| 1499 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); |
| 1500 } |
| 1501 |
| 1502 if (instr->size()->IsConstantOperand()) { |
| 1503 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| 1504 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); |
| 1505 } else { |
| 1506 Register size = ToRegister(instr->size()); |
| 1507 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); |
| 1508 } |
| 1509 |
| 1510 __ Bind(deferred->exit()); |
| 1511 |
| 1512 if (instr->hydrogen()->MustPrefillWithFiller()) { |
| 1513 if (instr->size()->IsConstantOperand()) { |
| 1514 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| 1515 __ Mov(temp1, size - kPointerSize); |
| 1516 } else { |
| 1517 __ Sub(temp1, ToRegister(instr->size()), kPointerSize); |
| 1518 } |
| 1519 __ Sub(result, result, kHeapObjectTag); |
| 1520 |
| 1521 // TODO(jbramley): Optimize this loop using stp. |
| 1522 Label loop; |
| 1523 __ Bind(&loop); |
| 1524 __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map())); |
| 1525 __ Str(temp2, MemOperand(result, temp1)); |
| 1526 __ Subs(temp1, temp1, kPointerSize); |
| 1527 __ B(ge, &loop); |
| 1528 |
| 1529 __ Add(result, result, kHeapObjectTag); |
| 1530 } |
| 1531 } |
| 1532 |
| 1533 |
| 1534 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
| 1535 // TODO(3095996): Get rid of this. For now, we need to make the |
| 1536 // result register contain a valid pointer because it is already |
| 1537 // contained in the register pointer map. |
| 1538 __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0))); |
| 1539 |
| 1540 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 1541 // We're in a SafepointRegistersScope so we can use any scratch registers. |
| 1542 Register size = x0; |
| 1543 if (instr->size()->IsConstantOperand()) { |
| 1544 __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size())))); |
| 1545 } else { |
| 1546 __ SmiTag(size, ToRegister(instr->size())); |
| 1547 } |
| 1548 int flags = AllocateDoubleAlignFlag::encode( |
| 1549 instr->hydrogen()->MustAllocateDoubleAligned()); |
| 1550 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
| 1551 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
| 1552 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| 1553 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); |
| 1554 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
| 1555 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| 1556 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); |
| 1557 } else { |
| 1558 flags = AllocateTargetSpace::update(flags, NEW_SPACE); |
| 1559 } |
| 1560 __ Mov(x10, Operand(Smi::FromInt(flags))); |
| 1561 __ Push(size, x10); |
| 1562 |
| 1563 CallRuntimeFromDeferred( |
| 1564 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); |
| 1565 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); |
| 1566 } |
| 1567 |
| 1568 |
| 1569 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| 1570 Register receiver = ToRegister(instr->receiver()); |
| 1571 Register function = ToRegister(instr->function()); |
| 1572 Register length = ToRegister(instr->length()); |
| 1573 Register elements = ToRegister(instr->elements()); |
| 1574 Register scratch = x5; |
| 1575 ASSERT(receiver.Is(x0)); // Used for parameter count. |
| 1576 ASSERT(function.Is(x1)); // Required by InvokeFunction. |
| 1577 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 1578 ASSERT(instr->IsMarkedAsCall()); |
| 1579 |
| 1580 // Copy the arguments to this function possibly from the |
| 1581 // adaptor frame below it. |
| 1582 const uint32_t kArgumentsLimit = 1 * KB; |
| 1583 __ Cmp(length, kArgumentsLimit); |
| 1584 DeoptimizeIf(hi, instr->environment()); |
| 1585 |
| 1586 // Push the receiver and use the register to keep the original |
| 1587 // number of arguments. |
| 1588 __ Push(receiver); |
| 1589 Register argc = receiver; |
| 1590 receiver = NoReg; |
| 1591 __ Mov(argc, length); |
| 1592 // The arguments are at a one pointer size offset from elements. |
| 1593 __ Add(elements, elements, 1 * kPointerSize); |
| 1594 |
| 1595 // Loop through the arguments pushing them onto the execution |
| 1596 // stack. |
| 1597 Label invoke, loop; |
| 1598 // length is a small non-negative integer, due to the test above. |
| 1599 __ Cbz(length, &invoke); |
| 1600 __ Bind(&loop); |
| 1601 __ Ldr(scratch, MemOperand(elements, length, LSL, kPointerSizeLog2)); |
| 1602 __ Push(scratch); |
| 1603 __ Subs(length, length, 1); |
| 1604 __ B(ne, &loop); |
| 1605 |
| 1606 __ Bind(&invoke); |
| 1607 ASSERT(instr->HasPointerMap()); |
| 1608 LPointerMap* pointers = instr->pointer_map(); |
| 1609 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); |
| 1610 // The number of arguments is stored in argc (receiver) which is x0, as |
| 1611 // expected by InvokeFunction. |
| 1612 ParameterCount actual(argc); |
| 1613 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); |
| 1614 } |
| 1615 |
| 1616 |
| 1617 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
| 1618 Register result = ToRegister(instr->result()); |
| 1619 |
| 1620 if (instr->hydrogen()->from_inlined()) { |
| 1621 // When we are inside an inlined function, the arguments are the last things |
| 1622 // that have been pushed on the stack. Therefore the arguments array can be |
| 1623 // accessed directly from jssp. |
| 1624 // However in the normal case, it is accessed via fp but there are two words |
| 1625 // on the stack between fp and the arguments (the saved lr and fp) and the |
| 1626 // LAccessArgumentsAt implementation take that into account. |
| 1627 // In the inlined case we need to subtract the size of 2 words to jssp to |
| 1628 // get a pointer which will work well with LAccessArgumentsAt. |
| 1629 ASSERT(masm()->StackPointer().Is(jssp)); |
| 1630 __ Sub(result, jssp, 2 * kPointerSize); |
| 1631 } else { |
| 1632 ASSERT(instr->temp() != NULL); |
| 1633 Register previous_fp = ToRegister(instr->temp()); |
| 1634 |
| 1635 __ Ldr(previous_fp, |
| 1636 MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 1637 __ Ldr(result, |
| 1638 MemOperand(previous_fp, StandardFrameConstants::kContextOffset)); |
| 1639 __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 1640 __ Csel(result, fp, previous_fp, ne); |
| 1641 } |
| 1642 } |
| 1643 |
| 1644 |
| 1645 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { |
| 1646 Register elements = ToRegister(instr->elements()); |
| 1647 Register result = ToRegister(instr->result()); |
| 1648 Label done; |
| 1649 |
| 1650 // If no arguments adaptor frame the number of arguments is fixed. |
| 1651 __ Cmp(fp, elements); |
| 1652 __ Mov(result, scope()->num_parameters()); |
| 1653 __ B(eq, &done); |
| 1654 |
| 1655 // Arguments adaptor frame present. Get argument length from there. |
| 1656 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 1657 __ Ldrsw(result, |
| 1658 UntagSmiMemOperand(result, |
| 1659 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 1660 |
| 1661 // Argument length is in result register. |
| 1662 __ Bind(&done); |
| 1663 } |
| 1664 |
| 1665 |
| 1666 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 1667 DoubleRegister left = ToDoubleRegister(instr->left()); |
| 1668 DoubleRegister right = ToDoubleRegister(instr->right()); |
| 1669 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 1670 |
| 1671 switch (instr->op()) { |
| 1672 case Token::ADD: __ Fadd(result, left, right); break; |
| 1673 case Token::SUB: __ Fsub(result, left, right); break; |
| 1674 case Token::MUL: __ Fmul(result, left, right); break; |
| 1675 case Token::DIV: __ Fdiv(result, left, right); break; |
| 1676 case Token::MOD: { |
| 1677 // The ECMA-262 remainder operator is the remainder from a truncating |
| 1678 // (round-towards-zero) division. Note that this differs from IEEE-754. |
| 1679 // |
| 1680 // TODO(jbramley): See if it's possible to do this inline, rather than by |
| 1681 // calling a helper function. With frintz (to produce the intermediate |
| 1682 // quotient) and fmsub (to calculate the remainder without loss of |
| 1683 // precision), it should be possible. However, we would need support for |
| 1684 // fdiv in round-towards-zero mode, and the A64 simulator doesn't support |
| 1685 // that yet. |
| 1686 ASSERT(left.Is(d0)); |
| 1687 ASSERT(right.Is(d1)); |
| 1688 __ CallCFunction( |
| 1689 ExternalReference::mod_two_doubles_operation(isolate()), |
| 1690 0, 2); |
| 1691 ASSERT(result.Is(d0)); |
| 1692 break; |
| 1693 } |
| 1694 default: |
| 1695 UNREACHABLE(); |
| 1696 break; |
| 1697 } |
| 1698 } |
| 1699 |
| 1700 |
| 1701 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 1702 ASSERT(ToRegister(instr->context()).is(cp)); |
| 1703 ASSERT(ToRegister(instr->left()).is(x1)); |
| 1704 ASSERT(ToRegister(instr->right()).is(x0)); |
| 1705 ASSERT(ToRegister(instr->result()).is(x0)); |
| 1706 |
| 1707 BinaryOpICStub stub(instr->op(), NO_OVERWRITE); |
| 1708 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 1709 } |
| 1710 |
| 1711 |
| 1712 void LCodeGen::DoBitI(LBitI* instr) { |
| 1713 Register result = ToRegister32(instr->result()); |
| 1714 Register left = ToRegister32(instr->left()); |
| 1715 Operand right = ToOperand32U(instr->right()); |
| 1716 |
| 1717 switch (instr->op()) { |
| 1718 case Token::BIT_AND: __ And(result, left, right); break; |
| 1719 case Token::BIT_OR: __ Orr(result, left, right); break; |
| 1720 case Token::BIT_XOR: __ Eor(result, left, right); break; |
| 1721 default: |
| 1722 UNREACHABLE(); |
| 1723 break; |
| 1724 } |
| 1725 } |
| 1726 |
| 1727 |
| 1728 void LCodeGen::DoBitS(LBitS* instr) { |
| 1729 Register result = ToRegister(instr->result()); |
| 1730 Register left = ToRegister(instr->left()); |
| 1731 Operand right = ToOperand(instr->right()); |
| 1732 |
| 1733 switch (instr->op()) { |
| 1734 case Token::BIT_AND: __ And(result, left, right); break; |
| 1735 case Token::BIT_OR: __ Orr(result, left, right); break; |
| 1736 case Token::BIT_XOR: __ Eor(result, left, right); break; |
| 1737 default: |
| 1738 UNREACHABLE(); |
| 1739 break; |
| 1740 } |
| 1741 } |
| 1742 |
| 1743 |
| 1744 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { |
| 1745 if (FLAG_debug_code && check->hydrogen()->skip_check()) { |
| 1746 __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed); |
| 1747 } else { |
| 1748 DeoptimizeIf(cc, check->environment()); |
| 1749 } |
| 1750 } |
| 1751 |
| 1752 |
| 1753 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { |
| 1754 if (instr->hydrogen()->skip_check()) return; |
| 1755 |
| 1756 Register length = ToRegister(instr->length()); |
| 1757 |
| 1758 if (instr->index()->IsConstantOperand()) { |
| 1759 int constant_index = |
| 1760 ToInteger32(LConstantOperand::cast(instr->index())); |
| 1761 |
| 1762 if (instr->hydrogen()->length()->representation().IsSmi()) { |
| 1763 __ Cmp(length, Operand(Smi::FromInt(constant_index))); |
| 1764 } else { |
| 1765 __ Cmp(length, Operand(constant_index)); |
| 1766 } |
| 1767 } else { |
| 1768 __ Cmp(length, ToRegister(instr->index())); |
| 1769 } |
| 1770 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls; |
| 1771 ApplyCheckIf(condition, instr); |
| 1772 } |
| 1773 |
| 1774 |
| 1775 void LCodeGen::DoBranch(LBranch* instr) { |
| 1776 Representation r = instr->hydrogen()->value()->representation(); |
| 1777 Label* true_label = instr->TrueLabel(chunk_); |
| 1778 Label* false_label = instr->FalseLabel(chunk_); |
| 1779 |
| 1780 if (r.IsInteger32()) { |
| 1781 ASSERT(!info()->IsStub()); |
| 1782 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); |
| 1783 } else if (r.IsSmi()) { |
| 1784 ASSERT(!info()->IsStub()); |
| 1785 STATIC_ASSERT(kSmiTag == 0); |
| 1786 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); |
| 1787 } else if (r.IsDouble()) { |
| 1788 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 1789 // Test the double value. Zero and NaN are false. |
| 1790 EmitBranchIfNonZeroNumber(instr, value, double_scratch()); |
| 1791 } else { |
| 1792 ASSERT(r.IsTagged()); |
| 1793 Register value = ToRegister(instr->value()); |
| 1794 HType type = instr->hydrogen()->value()->type(); |
| 1795 |
| 1796 if (type.IsBoolean()) { |
| 1797 ASSERT(!info()->IsStub()); |
| 1798 __ CompareRoot(value, Heap::kTrueValueRootIndex); |
| 1799 EmitBranch(instr, eq); |
| 1800 } else if (type.IsSmi()) { |
| 1801 ASSERT(!info()->IsStub()); |
| 1802 EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0))); |
| 1803 } else if (type.IsJSArray()) { |
| 1804 ASSERT(!info()->IsStub()); |
| 1805 EmitGoto(instr->TrueDestination(chunk())); |
| 1806 } else if (type.IsHeapNumber()) { |
| 1807 ASSERT(!info()->IsStub()); |
| 1808 __ Ldr(double_scratch(), FieldMemOperand(value, |
| 1809 HeapNumber::kValueOffset)); |
| 1810 // Test the double value. Zero and NaN are false. |
| 1811 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); |
| 1812 } else if (type.IsString()) { |
| 1813 ASSERT(!info()->IsStub()); |
| 1814 Register temp = ToRegister(instr->temp1()); |
| 1815 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); |
| 1816 EmitCompareAndBranch(instr, ne, temp, 0); |
| 1817 } else { |
| 1818 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
| 1819 // Avoid deopts in the case where we've never executed this path before. |
| 1820 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
| 1821 |
| 1822 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
| 1823 // undefined -> false. |
| 1824 __ JumpIfRoot( |
| 1825 value, Heap::kUndefinedValueRootIndex, false_label); |
| 1826 } |
| 1827 |
| 1828 if (expected.Contains(ToBooleanStub::BOOLEAN)) { |
| 1829 // Boolean -> its value. |
| 1830 __ JumpIfRoot( |
| 1831 value, Heap::kTrueValueRootIndex, true_label); |
| 1832 __ JumpIfRoot( |
| 1833 value, Heap::kFalseValueRootIndex, false_label); |
| 1834 } |
| 1835 |
| 1836 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { |
| 1837 // 'null' -> false. |
| 1838 __ JumpIfRoot( |
| 1839 value, Heap::kNullValueRootIndex, false_label); |
| 1840 } |
| 1841 |
| 1842 if (expected.Contains(ToBooleanStub::SMI)) { |
| 1843 // Smis: 0 -> false, all other -> true. |
| 1844 ASSERT(Smi::FromInt(0) == 0); |
| 1845 __ Cbz(value, false_label); |
| 1846 __ JumpIfSmi(value, true_label); |
| 1847 } else if (expected.NeedsMap()) { |
| 1848 // If we need a map later and have a smi, deopt. |
| 1849 DeoptimizeIfSmi(value, instr->environment()); |
| 1850 } |
| 1851 |
| 1852 Register map = NoReg; |
| 1853 Register scratch = NoReg; |
| 1854 |
| 1855 if (expected.NeedsMap()) { |
| 1856 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
| 1857 map = ToRegister(instr->temp1()); |
| 1858 scratch = ToRegister(instr->temp2()); |
| 1859 |
| 1860 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 1861 |
| 1862 if (expected.CanBeUndetectable()) { |
| 1863 // Undetectable -> false. |
| 1864 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 1865 __ TestAndBranchIfAnySet( |
| 1866 scratch, 1 << Map::kIsUndetectable, false_label); |
| 1867 } |
| 1868 } |
| 1869 |
| 1870 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { |
| 1871 // spec object -> true. |
| 1872 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE); |
| 1873 __ B(ge, true_label); |
| 1874 } |
| 1875 |
| 1876 if (expected.Contains(ToBooleanStub::STRING)) { |
| 1877 // String value -> false iff empty. |
| 1878 Label not_string; |
| 1879 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE); |
| 1880 __ B(ge, ¬_string); |
| 1881 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset)); |
| 1882 __ Cbz(scratch, false_label); |
| 1883 __ B(true_label); |
| 1884 __ Bind(¬_string); |
| 1885 } |
| 1886 |
| 1887 if (expected.Contains(ToBooleanStub::SYMBOL)) { |
| 1888 // Symbol value -> true. |
| 1889 __ CompareInstanceType(map, scratch, SYMBOL_TYPE); |
| 1890 __ B(eq, true_label); |
| 1891 } |
| 1892 |
| 1893 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
| 1894 Label not_heap_number; |
| 1895 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number); |
| 1896 |
| 1897 __ Ldr(double_scratch(), |
| 1898 FieldMemOperand(value, HeapNumber::kValueOffset)); |
| 1899 __ Fcmp(double_scratch(), 0.0); |
| 1900 // If we got a NaN (overflow bit is set), jump to the false branch. |
| 1901 __ B(vs, false_label); |
| 1902 __ B(eq, false_label); |
| 1903 __ B(true_label); |
| 1904 __ Bind(¬_heap_number); |
| 1905 } |
| 1906 |
| 1907 if (!expected.IsGeneric()) { |
| 1908 // We've seen something for the first time -> deopt. |
| 1909 // This can only happen if we are not generic already. |
| 1910 Deoptimize(instr->environment()); |
| 1911 } |
| 1912 } |
| 1913 } |
| 1914 } |
| 1915 |
| 1916 |
| 1917 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| 1918 int formal_parameter_count, |
| 1919 int arity, |
| 1920 LInstruction* instr, |
| 1921 Register function_reg) { |
| 1922 bool dont_adapt_arguments = |
| 1923 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
| 1924 bool can_invoke_directly = |
| 1925 dont_adapt_arguments || formal_parameter_count == arity; |
| 1926 |
| 1927 // The function interface relies on the following register assignments. |
| 1928 ASSERT(function_reg.Is(x1) || function_reg.IsNone()); |
| 1929 Register arity_reg = x0; |
| 1930 |
| 1931 LPointerMap* pointers = instr->pointer_map(); |
| 1932 |
| 1933 // If necessary, load the function object. |
| 1934 if (function_reg.IsNone()) { |
| 1935 function_reg = x1; |
| 1936 __ LoadObject(function_reg, function); |
| 1937 } |
| 1938 |
| 1939 if (FLAG_debug_code) { |
| 1940 Label is_not_smi; |
| 1941 // Try to confirm that function_reg (x1) is a tagged pointer. |
| 1942 __ JumpIfNotSmi(function_reg, &is_not_smi); |
| 1943 __ Abort(kExpectedFunctionObject); |
| 1944 __ Bind(&is_not_smi); |
| 1945 } |
| 1946 |
| 1947 if (can_invoke_directly) { |
| 1948 // Change context. |
| 1949 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); |
| 1950 |
| 1951 // Set the arguments count if adaption is not needed. Assumes that x0 is |
| 1952 // available to write to at this point. |
| 1953 if (dont_adapt_arguments) { |
| 1954 __ Mov(arity_reg, arity); |
| 1955 } |
| 1956 |
| 1957 // Invoke function. |
| 1958 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); |
| 1959 __ Call(x10); |
| 1960 |
| 1961 // Set up deoptimization. |
| 1962 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
| 1963 } else { |
| 1964 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
| 1965 ParameterCount count(arity); |
| 1966 ParameterCount expected(formal_parameter_count); |
| 1967 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); |
| 1968 } |
| 1969 } |
| 1970 |
| 1971 |
| 1972 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
| 1973 ASSERT(instr->IsMarkedAsCall()); |
| 1974 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 1975 |
| 1976 LPointerMap* pointers = instr->pointer_map(); |
| 1977 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
| 1978 |
| 1979 if (instr->target()->IsConstantOperand()) { |
| 1980 LConstantOperand* target = LConstantOperand::cast(instr->target()); |
| 1981 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); |
| 1982 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); |
| 1983 // TODO(all): on ARM we use a call descriptor to specify a storage mode |
| 1984 // but on A64 we only have one storage mode so it isn't necessary. Check |
| 1985 // this understanding is correct. |
| 1986 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None()); |
| 1987 } else { |
| 1988 ASSERT(instr->target()->IsRegister()); |
| 1989 Register target = ToRegister(instr->target()); |
| 1990 generator.BeforeCall(__ CallSize(target)); |
| 1991 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); |
| 1992 __ Call(target); |
| 1993 } |
| 1994 generator.AfterCall(); |
| 1995 } |
| 1996 |
| 1997 |
| 1998 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { |
| 1999 ASSERT(instr->IsMarkedAsCall()); |
| 2000 ASSERT(ToRegister(instr->function()).is(x1)); |
| 2001 |
| 2002 if (instr->hydrogen()->pass_argument_count()) { |
| 2003 __ Mov(x0, Operand(instr->arity())); |
| 2004 } |
| 2005 |
| 2006 // Change context. |
| 2007 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); |
| 2008 |
| 2009 // Load the code entry address |
| 2010 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); |
| 2011 __ Call(x10); |
| 2012 |
| 2013 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
| 2014 } |
| 2015 |
| 2016 |
| 2017 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
| 2018 CallRuntime(instr->function(), instr->arity(), instr); |
| 2019 } |
| 2020 |
| 2021 |
| 2022 void LCodeGen::DoCallStub(LCallStub* instr) { |
| 2023 ASSERT(ToRegister(instr->context()).is(cp)); |
| 2024 ASSERT(ToRegister(instr->result()).is(x0)); |
| 2025 switch (instr->hydrogen()->major_key()) { |
| 2026 case CodeStub::RegExpExec: { |
| 2027 RegExpExecStub stub; |
| 2028 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2029 break; |
| 2030 } |
| 2031 case CodeStub::SubString: { |
| 2032 SubStringStub stub; |
| 2033 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2034 break; |
| 2035 } |
| 2036 case CodeStub::StringCompare: { |
| 2037 StringCompareStub stub; |
| 2038 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2039 break; |
| 2040 } |
| 2041 default: |
| 2042 UNREACHABLE(); |
| 2043 } |
| 2044 } |
| 2045 |
| 2046 |
| 2047 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
| 2048 GenerateOsrPrologue(); |
| 2049 } |
| 2050 |
| 2051 |
| 2052 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 2053 Register temp = ToRegister(instr->temp()); |
| 2054 { |
| 2055 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 2056 __ Push(object); |
| 2057 __ Mov(cp, 0); |
| 2058 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 2059 RecordSafepointWithRegisters( |
| 2060 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 2061 __ StoreToSafepointRegisterSlot(x0, temp); |
| 2062 } |
| 2063 DeoptimizeIfSmi(temp, instr->environment()); |
| 2064 } |
| 2065 |
| 2066 |
| 2067 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 2068 class DeferredCheckMaps: public LDeferredCode { |
| 2069 public: |
| 2070 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 2071 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 2072 SetExit(check_maps()); |
| 2073 } |
| 2074 virtual void Generate() { |
| 2075 codegen()->DoDeferredInstanceMigration(instr_, object_); |
| 2076 } |
| 2077 Label* check_maps() { return &check_maps_; } |
| 2078 virtual LInstruction* instr() { return instr_; } |
| 2079 private: |
| 2080 LCheckMaps* instr_; |
| 2081 Label check_maps_; |
| 2082 Register object_; |
| 2083 }; |
| 2084 |
| 2085 if (instr->hydrogen()->CanOmitMapChecks()) { |
| 2086 ASSERT(instr->value() == NULL); |
| 2087 ASSERT(instr->temp() == NULL); |
| 2088 return; |
| 2089 } |
| 2090 |
| 2091 Register object = ToRegister(instr->value()); |
| 2092 Register map_reg = ToRegister(instr->temp()); |
| 2093 |
| 2094 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2095 |
| 2096 DeferredCheckMaps* deferred = NULL; |
| 2097 if (instr->hydrogen()->has_migration_target()) { |
| 2098 deferred = new(zone()) DeferredCheckMaps(this, instr, object); |
| 2099 __ Bind(deferred->check_maps()); |
| 2100 } |
| 2101 |
| 2102 UniqueSet<Map> map_set = instr->hydrogen()->map_set(); |
| 2103 Label success; |
| 2104 for (int i = 0; i < map_set.size(); i++) { |
| 2105 Handle<Map> map = map_set.at(i).handle(); |
| 2106 __ CompareMap(map_reg, map, &success); |
| 2107 __ B(eq, &success); |
| 2108 } |
| 2109 |
| 2110 // We didn't match a map. |
| 2111 if (instr->hydrogen()->has_migration_target()) { |
| 2112 __ B(deferred->entry()); |
| 2113 } else { |
| 2114 Deoptimize(instr->environment()); |
| 2115 } |
| 2116 |
| 2117 __ Bind(&success); |
| 2118 } |
| 2119 |
| 2120 |
| 2121 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 2122 if (!instr->hydrogen()->value()->IsHeapObject()) { |
| 2123 // TODO(all): Depending of how we chose to implement the deopt, if we could |
| 2124 // guarantee that we have a deopt handler reachable by a tbz instruction, |
| 2125 // we could use tbz here and produce less code to support this instruction. |
| 2126 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment()); |
| 2127 } |
| 2128 } |
| 2129 |
| 2130 |
| 2131 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 2132 Register value = ToRegister(instr->value()); |
| 2133 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value)); |
| 2134 // TODO(all): See DoCheckNonSmi for comments on use of tbz. |
| 2135 DeoptimizeIfNotSmi(value, instr->environment()); |
| 2136 } |
| 2137 |
| 2138 |
| 2139 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 2140 Register input = ToRegister(instr->value()); |
| 2141 Register scratch = ToRegister(instr->temp()); |
| 2142 |
| 2143 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2144 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 2145 |
| 2146 if (instr->hydrogen()->is_interval_check()) { |
| 2147 InstanceType first, last; |
| 2148 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 2149 |
| 2150 __ Cmp(scratch, first); |
| 2151 if (first == last) { |
| 2152 // If there is only one type in the interval check for equality. |
| 2153 DeoptimizeIf(ne, instr->environment()); |
| 2154 } else if (last == LAST_TYPE) { |
| 2155 // We don't need to compare with the higher bound of the interval. |
| 2156 DeoptimizeIf(lo, instr->environment()); |
| 2157 } else { |
| 2158 // If we are below the lower bound, set the C flag and clear the Z flag |
| 2159 // to force a deopt. |
| 2160 __ Ccmp(scratch, last, CFlag, hs); |
| 2161 DeoptimizeIf(hi, instr->environment()); |
| 2162 } |
| 2163 } else { |
| 2164 uint8_t mask; |
| 2165 uint8_t tag; |
| 2166 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 2167 |
| 2168 if (IsPowerOf2(mask)) { |
| 2169 ASSERT((tag == 0) || (tag == mask)); |
| 2170 // TODO(all): We might be able to use tbz/tbnz if we can guarantee that |
| 2171 // the deopt handler is reachable by a tbz instruction. |
| 2172 __ Tst(scratch, mask); |
| 2173 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); |
| 2174 } else { |
| 2175 if (tag == 0) { |
| 2176 __ Tst(scratch, mask); |
| 2177 } else { |
| 2178 __ And(scratch, scratch, mask); |
| 2179 __ Cmp(scratch, tag); |
| 2180 } |
| 2181 DeoptimizeIf(ne, instr->environment()); |
| 2182 } |
| 2183 } |
| 2184 } |
| 2185 |
| 2186 |
| 2187 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 2188 DoubleRegister input = ToDoubleRegister(instr->unclamped()); |
| 2189 Register result = ToRegister(instr->result()); |
| 2190 __ ClampDoubleToUint8(result, input, double_scratch()); |
| 2191 } |
| 2192 |
| 2193 |
| 2194 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
| 2195 Register input = ToRegister32(instr->unclamped()); |
| 2196 Register result = ToRegister32(instr->result()); |
| 2197 __ ClampInt32ToUint8(result, input); |
| 2198 } |
| 2199 |
| 2200 |
| 2201 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| 2202 Register input = ToRegister(instr->unclamped()); |
| 2203 Register result = ToRegister(instr->result()); |
| 2204 Register scratch = ToRegister(instr->temp1()); |
| 2205 Label done; |
| 2206 |
| 2207 // Both smi and heap number cases are handled. |
| 2208 Label is_not_smi; |
| 2209 __ JumpIfNotSmi(input, &is_not_smi); |
| 2210 __ SmiUntag(result, input); |
| 2211 __ ClampInt32ToUint8(result); |
| 2212 __ B(&done); |
| 2213 |
| 2214 __ Bind(&is_not_smi); |
| 2215 |
| 2216 // Check for heap number. |
| 2217 Label is_heap_number; |
| 2218 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2219 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number); |
| 2220 |
| 2221 // Check for undefined. Undefined is coverted to zero for clamping conversion. |
| 2222 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, |
| 2223 instr->environment()); |
| 2224 __ Mov(result, 0); |
| 2225 __ B(&done); |
| 2226 |
| 2227 // Heap number case. |
| 2228 __ Bind(&is_heap_number); |
| 2229 DoubleRegister dbl_scratch = double_scratch(); |
| 2230 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); |
| 2231 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 2232 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); |
| 2233 |
| 2234 __ Bind(&done); |
| 2235 } |
| 2236 |
| 2237 |
| 2238 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
| 2239 Handle<String> class_name = instr->hydrogen()->class_name(); |
| 2240 Label* true_label = instr->TrueLabel(chunk_); |
| 2241 Label* false_label = instr->FalseLabel(chunk_); |
| 2242 Register input = ToRegister(instr->value()); |
| 2243 Register scratch1 = ToRegister(instr->temp1()); |
| 2244 Register scratch2 = ToRegister(instr->temp2()); |
| 2245 |
| 2246 __ JumpIfSmi(input, false_label); |
| 2247 |
| 2248 Register map = scratch2; |
| 2249 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) { |
| 2250 // Assuming the following assertions, we can use the same compares to test |
| 2251 // for both being a function type and being in the object type range. |
| 2252 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| 2253 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| 2254 FIRST_SPEC_OBJECT_TYPE + 1); |
| 2255 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| 2256 LAST_SPEC_OBJECT_TYPE - 1); |
| 2257 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 2258 |
| 2259 // We expect CompareObjectType to load the object instance type in scratch1. |
| 2260 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE); |
| 2261 __ B(lt, false_label); |
| 2262 __ B(eq, true_label); |
| 2263 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE); |
| 2264 __ B(eq, true_label); |
| 2265 } else { |
| 2266 __ IsObjectJSObjectType(input, map, scratch1, false_label); |
| 2267 } |
| 2268 |
| 2269 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. |
| 2270 // Check if the constructor in the map is a function. |
| 2271 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset)); |
| 2272 |
| 2273 // Objects with a non-function constructor have class 'Object'. |
| 2274 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) { |
| 2275 __ JumpIfNotObjectType( |
| 2276 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label); |
| 2277 } else { |
| 2278 __ JumpIfNotObjectType( |
| 2279 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label); |
| 2280 } |
| 2281 |
| 2282 // The constructor function is in scratch1. Get its instance class name. |
| 2283 __ Ldr(scratch1, |
| 2284 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); |
| 2285 __ Ldr(scratch1, |
| 2286 FieldMemOperand(scratch1, |
| 2287 SharedFunctionInfo::kInstanceClassNameOffset)); |
| 2288 |
| 2289 // The class name we are testing against is internalized since it's a literal. |
| 2290 // The name in the constructor is internalized because of the way the context |
| 2291 // is booted. This routine isn't expected to work for random API-created |
| 2292 // classes and it doesn't have to because you can't access it with natives |
| 2293 // syntax. Since both sides are internalized it is sufficient to use an |
| 2294 // identity comparison. |
| 2295 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name)); |
| 2296 } |
| 2297 |
| 2298 |
| 2299 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { |
| 2300 ASSERT(instr->hydrogen()->representation().IsDouble()); |
| 2301 FPRegister object = ToDoubleRegister(instr->object()); |
| 2302 Register temp = ToRegister(instr->temp()); |
| 2303 |
| 2304 // If we don't have a NaN, we don't have the hole, so branch now to avoid the |
| 2305 // (relatively expensive) hole-NaN check. |
| 2306 __ Fcmp(object, object); |
| 2307 __ B(vc, instr->FalseLabel(chunk_)); |
| 2308 |
| 2309 // We have a NaN, but is it the hole? |
| 2310 __ Fmov(temp, object); |
| 2311 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64); |
| 2312 } |
| 2313 |
| 2314 |
| 2315 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { |
| 2316 ASSERT(instr->hydrogen()->representation().IsTagged()); |
| 2317 Register object = ToRegister(instr->object()); |
| 2318 |
| 2319 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); |
| 2320 } |
| 2321 |
| 2322 |
| 2323 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
| 2324 Register value = ToRegister(instr->value()); |
| 2325 Register map = ToRegister(instr->temp()); |
| 2326 |
| 2327 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 2328 EmitCompareAndBranch(instr, eq, map, Operand(instr->map())); |
| 2329 } |
| 2330 |
| 2331 |
| 2332 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
| 2333 Representation rep = instr->hydrogen()->value()->representation(); |
| 2334 ASSERT(!rep.IsInteger32()); |
| 2335 Register scratch = ToRegister(instr->temp()); |
| 2336 |
| 2337 if (rep.IsDouble()) { |
| 2338 __ JumpIfMinusZero(ToDoubleRegister(instr->value()), |
| 2339 instr->TrueLabel(chunk())); |
| 2340 } else { |
| 2341 Register value = ToRegister(instr->value()); |
| 2342 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, |
| 2343 instr->FalseLabel(chunk()), DO_SMI_CHECK); |
| 2344 __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset)); |
| 2345 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk())); |
| 2346 } |
| 2347 EmitGoto(instr->FalseDestination(chunk())); |
| 2348 } |
| 2349 |
| 2350 |
| 2351 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { |
| 2352 LOperand* left = instr->left(); |
| 2353 LOperand* right = instr->right(); |
| 2354 Condition cond = TokenToCondition(instr->op(), false); |
| 2355 |
| 2356 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
| 2357 // We can statically evaluate the comparison. |
| 2358 double left_val = ToDouble(LConstantOperand::cast(left)); |
| 2359 double right_val = ToDouble(LConstantOperand::cast(right)); |
| 2360 int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
| 2361 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
| 2362 EmitGoto(next_block); |
| 2363 } else { |
| 2364 if (instr->is_double()) { |
| 2365 if (right->IsConstantOperand()) { |
| 2366 __ Fcmp(ToDoubleRegister(left), |
| 2367 ToDouble(LConstantOperand::cast(right))); |
| 2368 } else if (left->IsConstantOperand()) { |
| 2369 // Transpose the operands and reverse the condition. |
| 2370 __ Fcmp(ToDoubleRegister(right), |
| 2371 ToDouble(LConstantOperand::cast(left))); |
| 2372 cond = ReverseConditionForCmp(cond); |
| 2373 } else { |
| 2374 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); |
| 2375 } |
| 2376 |
| 2377 // If a NaN is involved, i.e. the result is unordered (V set), |
| 2378 // jump to false block label. |
| 2379 __ B(vs, instr->FalseLabel(chunk_)); |
| 2380 EmitBranch(instr, cond); |
| 2381 } else { |
| 2382 if (instr->hydrogen_value()->representation().IsInteger32()) { |
| 2383 if (right->IsConstantOperand()) { |
| 2384 EmitCompareAndBranch(instr, |
| 2385 cond, |
| 2386 ToRegister32(left), |
| 2387 ToOperand32I(right)); |
| 2388 } else { |
| 2389 // Transpose the operands and reverse the condition. |
| 2390 EmitCompareAndBranch(instr, |
| 2391 ReverseConditionForCmp(cond), |
| 2392 ToRegister32(right), |
| 2393 ToOperand32I(left)); |
| 2394 } |
| 2395 } else { |
| 2396 ASSERT(instr->hydrogen_value()->representation().IsSmi()); |
| 2397 if (right->IsConstantOperand()) { |
| 2398 int32_t value = ToInteger32(LConstantOperand::cast(right)); |
| 2399 EmitCompareAndBranch(instr, |
| 2400 cond, |
| 2401 ToRegister(left), |
| 2402 Operand(Smi::FromInt(value))); |
| 2403 } else if (left->IsConstantOperand()) { |
| 2404 // Transpose the operands and reverse the condition. |
| 2405 int32_t value = ToInteger32(LConstantOperand::cast(left)); |
| 2406 EmitCompareAndBranch(instr, |
| 2407 ReverseConditionForCmp(cond), |
| 2408 ToRegister(right), |
| 2409 Operand(Smi::FromInt(value))); |
| 2410 } else { |
| 2411 EmitCompareAndBranch(instr, |
| 2412 cond, |
| 2413 ToRegister(left), |
| 2414 ToRegister(right)); |
| 2415 } |
| 2416 } |
| 2417 } |
| 2418 } |
| 2419 } |
| 2420 |
| 2421 |
| 2422 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { |
| 2423 Register left = ToRegister(instr->left()); |
| 2424 Register right = ToRegister(instr->right()); |
| 2425 EmitCompareAndBranch(instr, eq, left, right); |
| 2426 } |
| 2427 |
| 2428 |
| 2429 void LCodeGen::DoCmpT(LCmpT* instr) { |
| 2430 ASSERT(ToRegister(instr->context()).is(cp)); |
| 2431 Token::Value op = instr->op(); |
| 2432 Condition cond = TokenToCondition(op, false); |
| 2433 |
| 2434 ASSERT(ToRegister(instr->left()).Is(x1)); |
| 2435 ASSERT(ToRegister(instr->right()).Is(x0)); |
| 2436 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
| 2437 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2438 // Signal that we don't inline smi code before this stub. |
| 2439 InlineSmiCheckInfo::EmitNotInlined(masm()); |
| 2440 |
| 2441 // Return true or false depending on CompareIC result. |
| 2442 // This instruction is marked as call. We can clobber any register. |
| 2443 ASSERT(instr->IsMarkedAsCall()); |
| 2444 __ LoadTrueFalseRoots(x1, x2); |
| 2445 __ Cmp(x0, 0); |
| 2446 __ Csel(ToRegister(instr->result()), x1, x2, cond); |
| 2447 } |
| 2448 |
| 2449 |
| 2450 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 2451 ASSERT(instr->result()->IsDoubleRegister()); |
| 2452 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 2453 __ Fmov(result, instr->value()); |
| 2454 } |
| 2455 |
| 2456 |
| 2457 void LCodeGen::DoConstantE(LConstantE* instr) { |
| 2458 __ Mov(ToRegister(instr->result()), Operand(instr->value())); |
| 2459 } |
| 2460 |
| 2461 |
| 2462 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 2463 __ Mov(ToRegister(instr->result()), instr->value()); |
| 2464 } |
| 2465 |
| 2466 |
| 2467 void LCodeGen::DoConstantS(LConstantS* instr) { |
| 2468 __ Mov(ToRegister(instr->result()), Operand(instr->value())); |
| 2469 } |
| 2470 |
| 2471 |
| 2472 void LCodeGen::DoConstantT(LConstantT* instr) { |
| 2473 Handle<Object> value = instr->value(isolate()); |
| 2474 AllowDeferredHandleDereference smi_check; |
| 2475 __ LoadObject(ToRegister(instr->result()), value); |
| 2476 } |
| 2477 |
| 2478 |
| 2479 void LCodeGen::DoContext(LContext* instr) { |
| 2480 // If there is a non-return use, the context must be moved to a register. |
| 2481 Register result = ToRegister(instr->result()); |
| 2482 if (info()->IsOptimizing()) { |
| 2483 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2484 } else { |
| 2485 // If there is no frame, the context must be in cp. |
| 2486 ASSERT(result.is(cp)); |
| 2487 } |
| 2488 } |
| 2489 |
| 2490 |
| 2491 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 2492 Register reg = ToRegister(instr->value()); |
| 2493 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 2494 AllowDeferredHandleDereference smi_check; |
| 2495 if (isolate()->heap()->InNewSpace(*object)) { |
| 2496 Register temp = ToRegister(instr->temp()); |
| 2497 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 2498 __ Mov(temp, Operand(Handle<Object>(cell))); |
| 2499 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); |
| 2500 __ Cmp(reg, temp); |
| 2501 } else { |
| 2502 __ Cmp(reg, Operand(object)); |
| 2503 } |
| 2504 DeoptimizeIf(ne, instr->environment()); |
| 2505 } |
| 2506 |
| 2507 |
| 2508 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 2509 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| 2510 ASSERT(instr->HasEnvironment()); |
| 2511 LEnvironment* env = instr->environment(); |
| 2512 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 2513 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 2514 } |
| 2515 |
| 2516 |
| 2517 void LCodeGen::DoDateField(LDateField* instr) { |
| 2518 Register object = ToRegister(instr->date()); |
| 2519 Register result = ToRegister(instr->result()); |
| 2520 Register temp1 = x10; |
| 2521 Register temp2 = x11; |
| 2522 Smi* index = instr->index(); |
| 2523 Label runtime, done, deopt, obj_ok; |
| 2524 |
| 2525 ASSERT(object.is(result) && object.Is(x0)); |
| 2526 ASSERT(instr->IsMarkedAsCall()); |
| 2527 |
| 2528 __ JumpIfSmi(object, &deopt); |
| 2529 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); |
| 2530 __ B(eq, &obj_ok); |
| 2531 |
| 2532 __ Bind(&deopt); |
| 2533 Deoptimize(instr->environment()); |
| 2534 |
| 2535 __ Bind(&obj_ok); |
| 2536 if (index->value() == 0) { |
| 2537 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 2538 } else { |
| 2539 if (index->value() < JSDate::kFirstUncachedField) { |
| 2540 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 2541 __ Mov(temp1, Operand(stamp)); |
| 2542 __ Ldr(temp1, MemOperand(temp1)); |
| 2543 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 2544 __ Cmp(temp1, temp2); |
| 2545 __ B(ne, &runtime); |
| 2546 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset + |
| 2547 kPointerSize * index->value())); |
| 2548 __ B(&done); |
| 2549 } |
| 2550 |
| 2551 __ Bind(&runtime); |
| 2552 __ Mov(x1, Operand(index)); |
| 2553 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); |
| 2554 } |
| 2555 |
| 2556 __ Bind(&done); |
| 2557 } |
| 2558 |
| 2559 |
| 2560 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 2561 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 2562 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 2563 // needed return address), even though the implementation of LAZY and EAGER is |
| 2564 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 2565 // the special case below. |
| 2566 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { |
| 2567 type = Deoptimizer::LAZY; |
| 2568 } |
| 2569 |
| 2570 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); |
| 2571 DeoptimizeHeader(instr->environment(), &type); |
| 2572 Deoptimize(instr->environment(), type); |
| 2573 } |
| 2574 |
| 2575 |
| 2576 void LCodeGen::DoDivI(LDivI* instr) { |
| 2577 Register dividend = ToRegister32(instr->left()); |
| 2578 Register result = ToRegister32(instr->result()); |
| 2579 |
| 2580 bool has_power_of_2_divisor = instr->hydrogen()->RightIsPowerOf2(); |
| 2581 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 2582 bool bailout_on_minus_zero = |
| 2583 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 2584 bool can_be_div_by_zero = |
| 2585 instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero); |
| 2586 bool all_uses_truncating_to_int32 = |
| 2587 instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32); |
| 2588 |
| 2589 if (has_power_of_2_divisor) { |
| 2590 ASSERT(instr->temp() == NULL); |
| 2591 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); |
| 2592 int32_t power; |
| 2593 int32_t power_mask; |
| 2594 Label deopt, done; |
| 2595 |
| 2596 ASSERT(divisor != 0); |
| 2597 if (divisor > 0) { |
| 2598 power = WhichPowerOf2(divisor); |
| 2599 power_mask = divisor - 1; |
| 2600 } else { |
| 2601 // Check for (0 / -x) as that will produce negative zero. |
| 2602 if (bailout_on_minus_zero) { |
| 2603 if (all_uses_truncating_to_int32) { |
| 2604 // If all uses truncate, and the dividend is zero, the truncated |
| 2605 // result is zero. |
| 2606 __ Mov(result, 0); |
| 2607 __ Cbz(dividend, &done); |
| 2608 } else { |
| 2609 __ Cbz(dividend, &deopt); |
| 2610 } |
| 2611 } |
| 2612 // Check for (kMinInt / -1). |
| 2613 if ((divisor == -1) && can_overflow && !all_uses_truncating_to_int32) { |
| 2614 // Check for kMinInt by subtracting one and checking for overflow. |
| 2615 __ Cmp(dividend, 1); |
| 2616 __ B(vs, &deopt); |
| 2617 } |
| 2618 power = WhichPowerOf2(-divisor); |
| 2619 power_mask = -divisor - 1; |
| 2620 } |
| 2621 |
| 2622 if (power_mask != 0) { |
| 2623 if (all_uses_truncating_to_int32) { |
| 2624 __ Cmp(dividend, 0); |
| 2625 __ Cneg(result, dividend, lt); |
| 2626 __ Asr(result, result, power); |
| 2627 if (divisor > 0) __ Cneg(result, result, lt); |
| 2628 if (divisor < 0) __ Cneg(result, result, gt); |
| 2629 return; // Don't fall through to negation below. |
| 2630 } else { |
| 2631 // Deoptimize if remainder is not 0. If the least-significant |
| 2632 // power bits aren't 0, it's not a multiple of 2^power, and |
| 2633 // therefore, there will be a remainder. |
| 2634 __ TestAndBranchIfAnySet(dividend, power_mask, &deopt); |
| 2635 __ Asr(result, dividend, power); |
| 2636 if (divisor < 0) __ Neg(result, result); |
| 2637 } |
| 2638 } else { |
| 2639 ASSERT((divisor == 1) || (divisor == -1)); |
| 2640 if (divisor < 0) { |
| 2641 __ Neg(result, dividend); |
| 2642 } else { |
| 2643 __ Mov(result, dividend); |
| 2644 } |
| 2645 } |
| 2646 __ B(&done); |
| 2647 __ Bind(&deopt); |
| 2648 Deoptimize(instr->environment()); |
| 2649 __ Bind(&done); |
| 2650 } else { |
| 2651 Register divisor = ToRegister32(instr->right()); |
| 2652 |
| 2653 // Issue the division first, and then check for any deopt cases whilst the |
| 2654 // result is computed. |
| 2655 __ Sdiv(result, dividend, divisor); |
| 2656 |
| 2657 if (!all_uses_truncating_to_int32) { |
| 2658 Label deopt; |
| 2659 // Check for x / 0. |
| 2660 if (can_be_div_by_zero) { |
| 2661 __ Cbz(divisor, &deopt); |
| 2662 } |
| 2663 |
| 2664 // Check for (0 / -x) as that will produce negative zero. |
| 2665 if (bailout_on_minus_zero) { |
| 2666 __ Cmp(divisor, 0); |
| 2667 |
| 2668 // If the divisor < 0 (mi), compare the dividend, and deopt if it is |
| 2669 // zero, ie. zero dividend with negative divisor deopts. |
| 2670 // If the divisor >= 0 (pl, the opposite of mi) set the flags to |
| 2671 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. |
| 2672 __ Ccmp(dividend, 0, NoFlag, mi); |
| 2673 __ B(eq, &deopt); |
| 2674 } |
| 2675 |
| 2676 // Check for (kMinInt / -1). |
| 2677 if (can_overflow) { |
| 2678 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
| 2679 // overflow. |
| 2680 __ Cmp(dividend, 1); |
| 2681 // If overflow is set, ie. dividend = kMinInt, compare the divisor with |
| 2682 // -1. If overflow is clear, set the flags for condition ne, as the |
| 2683 // dividend isn't -1, and thus we shouldn't deopt. |
| 2684 __ Ccmp(divisor, -1, NoFlag, vs); |
| 2685 __ B(eq, &deopt); |
| 2686 } |
| 2687 |
| 2688 // Compute remainder and deopt if it's not zero. |
| 2689 Register remainder = ToRegister32(instr->temp()); |
| 2690 __ Msub(remainder, result, divisor, dividend); |
| 2691 __ Cbnz(remainder, &deopt); |
| 2692 |
| 2693 Label div_ok; |
| 2694 __ B(&div_ok); |
| 2695 __ Bind(&deopt); |
| 2696 Deoptimize(instr->environment()); |
| 2697 __ Bind(&div_ok); |
| 2698 } else { |
| 2699 ASSERT(instr->temp() == NULL); |
| 2700 } |
| 2701 } |
| 2702 } |
| 2703 |
| 2704 |
| 2705 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { |
| 2706 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 2707 Register result = ToRegister32(instr->result()); |
| 2708 Label done, deopt; |
| 2709 |
| 2710 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2711 __ JumpIfMinusZero(input, &deopt); |
| 2712 } |
| 2713 |
| 2714 __ TryConvertDoubleToInt32(result, input, double_scratch(), &done); |
| 2715 __ Bind(&deopt); |
| 2716 Deoptimize(instr->environment()); |
| 2717 __ Bind(&done); |
| 2718 |
| 2719 if (instr->tag_result()) { |
| 2720 __ SmiTag(result.X()); |
| 2721 } |
| 2722 } |
| 2723 |
| 2724 |
| 2725 void LCodeGen::DoDrop(LDrop* instr) { |
| 2726 __ Drop(instr->count()); |
| 2727 } |
| 2728 |
| 2729 |
| 2730 void LCodeGen::DoDummy(LDummy* instr) { |
| 2731 // Nothing to see here, move on! |
| 2732 } |
| 2733 |
| 2734 |
| 2735 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| 2736 // Nothing to see here, move on! |
| 2737 } |
| 2738 |
| 2739 |
| 2740 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 2741 ASSERT(ToRegister(instr->context()).is(cp)); |
| 2742 // FunctionLiteral instruction is marked as call, we can trash any register. |
| 2743 ASSERT(instr->IsMarkedAsCall()); |
| 2744 |
| 2745 // Use the fast case closure allocation code that allocates in new |
| 2746 // space for nested functions that don't need literals cloning. |
| 2747 bool pretenure = instr->hydrogen()->pretenure(); |
| 2748 if (!pretenure && instr->hydrogen()->has_no_literals()) { |
| 2749 FastNewClosureStub stub(instr->hydrogen()->language_mode(), |
| 2750 instr->hydrogen()->is_generator()); |
| 2751 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); |
| 2752 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2753 } else { |
| 2754 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); |
| 2755 __ Mov(x1, Operand(pretenure ? factory()->true_value() |
| 2756 : factory()->false_value())); |
| 2757 __ Push(cp, x2, x1); |
| 2758 CallRuntime(Runtime::kNewClosure, 3, instr); |
| 2759 } |
| 2760 } |
| 2761 |
| 2762 |
| 2763 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 2764 Register map = ToRegister(instr->map()); |
| 2765 Register result = ToRegister(instr->result()); |
| 2766 Label load_cache, done; |
| 2767 |
| 2768 __ EnumLengthUntagged(result, map); |
| 2769 __ Cbnz(result, &load_cache); |
| 2770 |
| 2771 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 2772 __ B(&done); |
| 2773 |
| 2774 __ Bind(&load_cache); |
| 2775 __ LoadInstanceDescriptors(map, result); |
| 2776 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 2777 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 2778 DeoptimizeIfZero(result, instr->environment()); |
| 2779 |
| 2780 __ Bind(&done); |
| 2781 } |
| 2782 |
| 2783 |
| 2784 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 2785 Register object = ToRegister(instr->object()); |
| 2786 Register null_value = x5; |
| 2787 |
| 2788 ASSERT(instr->IsMarkedAsCall()); |
| 2789 ASSERT(object.Is(x0)); |
| 2790 |
| 2791 Label deopt; |
| 2792 |
| 2793 __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt); |
| 2794 |
| 2795 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 2796 __ Cmp(object, null_value); |
| 2797 __ B(eq, &deopt); |
| 2798 |
| 2799 __ JumpIfSmi(object, &deopt); |
| 2800 |
| 2801 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 2802 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); |
| 2803 __ B(le, &deopt); |
| 2804 |
| 2805 Label use_cache, call_runtime; |
| 2806 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); |
| 2807 |
| 2808 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2809 __ B(&use_cache); |
| 2810 |
| 2811 __ Bind(&deopt); |
| 2812 Deoptimize(instr->environment()); |
| 2813 |
| 2814 // Get the set of properties to enumerate. |
| 2815 __ Bind(&call_runtime); |
| 2816 __ Push(object); |
| 2817 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 2818 |
| 2819 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2820 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt); |
| 2821 |
| 2822 __ Bind(&use_cache); |
| 2823 } |
| 2824 |
| 2825 |
| 2826 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
| 2827 Register input = ToRegister(instr->value()); |
| 2828 Register result = ToRegister(instr->result()); |
| 2829 |
| 2830 __ AssertString(input); |
| 2831 |
| 2832 // Assert that we can use a W register load to get the hash. |
| 2833 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize); |
| 2834 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); |
| 2835 __ IndexFromHash(result, result); |
| 2836 } |
| 2837 |
| 2838 |
| 2839 void LCodeGen::EmitGoto(int block) { |
| 2840 // Do not emit jump if we are emitting a goto to the next block. |
| 2841 if (!IsNextEmittedBlock(block)) { |
| 2842 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2843 } |
| 2844 } |
| 2845 |
| 2846 |
| 2847 void LCodeGen::DoGoto(LGoto* instr) { |
| 2848 EmitGoto(instr->block_id()); |
| 2849 } |
| 2850 |
| 2851 |
| 2852 void LCodeGen::DoHasCachedArrayIndexAndBranch( |
| 2853 LHasCachedArrayIndexAndBranch* instr) { |
| 2854 Register input = ToRegister(instr->value()); |
| 2855 Register temp = ToRegister32(instr->temp()); |
| 2856 |
| 2857 // Assert that the cache status bits fit in a W register. |
| 2858 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask)); |
| 2859 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset)); |
| 2860 __ Tst(temp, String::kContainsCachedArrayIndexMask); |
| 2861 EmitBranch(instr, eq); |
| 2862 } |
| 2863 |
| 2864 |
| 2865 // HHasInstanceTypeAndBranch instruction is built with an interval of type |
| 2866 // to test but is only used in very restricted ways. The only possible kinds |
| 2867 // of intervals are: |
| 2868 // - [ FIRST_TYPE, instr->to() ] |
| 2869 // - [ instr->form(), LAST_TYPE ] |
| 2870 // - instr->from() == instr->to() |
| 2871 // |
| 2872 // These kinds of intervals can be check with only one compare instruction |
| 2873 // providing the correct value and test condition are used. |
| 2874 // |
| 2875 // TestType() will return the value to use in the compare instruction and |
| 2876 // BranchCondition() will return the condition to use depending on the kind |
| 2877 // of interval actually specified in the instruction. |
| 2878 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
| 2879 InstanceType from = instr->from(); |
| 2880 InstanceType to = instr->to(); |
| 2881 if (from == FIRST_TYPE) return to; |
| 2882 ASSERT((from == to) || (to == LAST_TYPE)); |
| 2883 return from; |
| 2884 } |
| 2885 |
| 2886 |
| 2887 // See comment above TestType function for what this function does. |
| 2888 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
| 2889 InstanceType from = instr->from(); |
| 2890 InstanceType to = instr->to(); |
| 2891 if (from == to) return eq; |
| 2892 if (to == LAST_TYPE) return hs; |
| 2893 if (from == FIRST_TYPE) return ls; |
| 2894 UNREACHABLE(); |
| 2895 return eq; |
| 2896 } |
| 2897 |
| 2898 |
| 2899 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { |
| 2900 Register input = ToRegister(instr->value()); |
| 2901 Register scratch = ToRegister(instr->temp()); |
| 2902 |
| 2903 if (!instr->hydrogen()->value()->IsHeapObject()) { |
| 2904 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
| 2905 } |
| 2906 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); |
| 2907 EmitBranch(instr, BranchCondition(instr->hydrogen())); |
| 2908 } |
| 2909 |
| 2910 |
| 2911 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { |
| 2912 Register result = ToRegister(instr->result()); |
| 2913 Register base = ToRegister(instr->base_object()); |
| 2914 __ Add(result, base, ToOperand(instr->offset())); |
| 2915 } |
| 2916 |
| 2917 |
| 2918 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
| 2919 ASSERT(ToRegister(instr->context()).is(cp)); |
| 2920 // Assert that the arguments are in the registers expected by InstanceofStub. |
| 2921 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left())); |
| 2922 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right())); |
| 2923 |
| 2924 InstanceofStub stub(InstanceofStub::kArgsInRegisters); |
| 2925 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2926 |
| 2927 // InstanceofStub returns a result in x0: |
| 2928 // 0 => not an instance |
| 2929 // smi 1 => instance. |
| 2930 __ Cmp(x0, 0); |
| 2931 __ LoadTrueFalseRoots(x0, x1); |
| 2932 __ Csel(x0, x0, x1, eq); |
| 2933 } |
| 2934 |
| 2935 |
| 2936 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| 2937 class DeferredInstanceOfKnownGlobal: public LDeferredCode { |
| 2938 public: |
| 2939 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
| 2940 LInstanceOfKnownGlobal* instr) |
| 2941 : LDeferredCode(codegen), instr_(instr) { } |
| 2942 virtual void Generate() { |
| 2943 codegen()->DoDeferredInstanceOfKnownGlobal(instr_); |
| 2944 } |
| 2945 virtual LInstruction* instr() { return instr_; } |
| 2946 private: |
| 2947 LInstanceOfKnownGlobal* instr_; |
| 2948 }; |
| 2949 |
| 2950 DeferredInstanceOfKnownGlobal* deferred = |
| 2951 new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
| 2952 |
| 2953 Label map_check, return_false, cache_miss, done; |
| 2954 Register object = ToRegister(instr->value()); |
| 2955 Register result = ToRegister(instr->result()); |
| 2956 // x4 is expected in the associated deferred code and stub. |
| 2957 Register map_check_site = x4; |
| 2958 Register map = x5; |
| 2959 |
| 2960 // This instruction is marked as call. We can clobber any register. |
| 2961 ASSERT(instr->IsMarkedAsCall()); |
| 2962 |
| 2963 // We must take into account that object is in x11. |
| 2964 ASSERT(object.Is(x11)); |
| 2965 Register scratch = x10; |
| 2966 |
| 2967 // A Smi is not instance of anything. |
| 2968 __ JumpIfSmi(object, &return_false); |
| 2969 |
| 2970 // This is the inlined call site instanceof cache. The two occurences of the |
| 2971 // hole value will be patched to the last map/result pair generated by the |
| 2972 // instanceof stub. |
| 2973 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2974 { |
| 2975 // Below we use Factory::the_hole_value() on purpose instead of loading from |
| 2976 // the root array to force relocation and later be able to patch with a |
| 2977 // custom value. |
| 2978 InstructionAccurateScope scope(masm(), 5); |
| 2979 __ bind(&map_check); |
| 2980 // Will be patched with the cached map. |
| 2981 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); |
| 2982 __ LoadRelocated(scratch, Operand(Handle<Object>(cell))); |
| 2983 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); |
| 2984 __ cmp(map, Operand(scratch)); |
| 2985 __ b(&cache_miss, ne); |
| 2986 // The address of this instruction is computed relative to the map check |
| 2987 // above, so check the size of the code generated. |
| 2988 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4); |
| 2989 // Will be patched with the cached result. |
| 2990 __ LoadRelocated(result, Operand(factory()->the_hole_value())); |
| 2991 } |
| 2992 __ B(&done); |
| 2993 |
| 2994 // The inlined call site cache did not match. |
| 2995 // Check null and string before calling the deferred code. |
| 2996 __ Bind(&cache_miss); |
| 2997 // Compute the address of the map check. It must not be clobbered until the |
| 2998 // InstanceOfStub has used it. |
| 2999 __ Adr(map_check_site, &map_check); |
| 3000 // Null is not instance of anything. |
| 3001 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false); |
| 3002 |
| 3003 // String values are not instances of anything. |
| 3004 // Return false if the object is a string. Otherwise, jump to the deferred |
| 3005 // code. |
| 3006 // Note that we can't jump directly to deferred code from |
| 3007 // IsObjectJSStringType, because it uses tbz for the jump and the deferred |
| 3008 // code can be out of range. |
| 3009 __ IsObjectJSStringType(object, scratch, NULL, &return_false); |
| 3010 __ B(deferred->entry()); |
| 3011 |
| 3012 __ Bind(&return_false); |
| 3013 __ LoadRoot(result, Heap::kFalseValueRootIndex); |
| 3014 |
| 3015 // Here result is either true or false. |
| 3016 __ Bind(deferred->exit()); |
| 3017 __ Bind(&done); |
| 3018 } |
| 3019 |
| 3020 |
| 3021 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| 3022 Register result = ToRegister(instr->result()); |
| 3023 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0. |
| 3024 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; |
| 3025 flags = static_cast<InstanceofStub::Flags>( |
| 3026 flags | InstanceofStub::kArgsInRegisters); |
| 3027 flags = static_cast<InstanceofStub::Flags>( |
| 3028 flags | InstanceofStub::kReturnTrueFalseObject); |
| 3029 flags = static_cast<InstanceofStub::Flags>( |
| 3030 flags | InstanceofStub::kCallSiteInlineCheck); |
| 3031 |
| 3032 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3033 LoadContextFromDeferred(instr->context()); |
| 3034 |
| 3035 // Prepare InstanceofStub arguments. |
| 3036 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left())); |
| 3037 __ LoadObject(InstanceofStub::right(), instr->function()); |
| 3038 |
| 3039 InstanceofStub stub(flags); |
| 3040 CallCodeGeneric(stub.GetCode(isolate()), |
| 3041 RelocInfo::CODE_TARGET, |
| 3042 instr, |
| 3043 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 3044 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
| 3045 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 3046 |
| 3047 // Put the result value into the result register slot. |
| 3048 __ StoreToSafepointRegisterSlot(result, result); |
| 3049 } |
| 3050 |
| 3051 |
| 3052 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { |
| 3053 DoGap(instr); |
| 3054 } |
| 3055 |
| 3056 |
| 3057 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 3058 Register value = ToRegister32(instr->value()); |
| 3059 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3060 __ Scvtf(result, value); |
| 3061 } |
| 3062 |
| 3063 |
| 3064 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { |
| 3065 // A64 smis can represent all Integer32 values, so this cannot deoptimize. |
| 3066 ASSERT(!instr->hydrogen()->value()->HasRange() || |
| 3067 instr->hydrogen()->value()->range()->IsInSmiRange()); |
| 3068 |
| 3069 Register value = ToRegister(instr->value()); |
| 3070 Register result = ToRegister(instr->result()); |
| 3071 __ SmiTag(result, value); |
| 3072 } |
| 3073 |
| 3074 |
| 3075 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
| 3076 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3077 // The function is required to be in x1. |
| 3078 ASSERT(ToRegister(instr->function()).is(x1)); |
| 3079 ASSERT(instr->HasPointerMap()); |
| 3080 |
| 3081 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
| 3082 if (known_function.is_null()) { |
| 3083 LPointerMap* pointers = instr->pointer_map(); |
| 3084 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
| 3085 ParameterCount count(instr->arity()); |
| 3086 __ InvokeFunction(x1, count, CALL_FUNCTION, generator); |
| 3087 } else { |
| 3088 CallKnownFunction(known_function, |
| 3089 instr->hydrogen()->formal_parameter_count(), |
| 3090 instr->arity(), |
| 3091 instr, |
| 3092 x1); |
| 3093 } |
| 3094 } |
| 3095 |
| 3096 |
| 3097 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
| 3098 Register temp1 = ToRegister(instr->temp1()); |
| 3099 Register temp2 = ToRegister(instr->temp2()); |
| 3100 |
| 3101 // Get the frame pointer for the calling frame. |
| 3102 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3103 |
| 3104 // Skip the arguments adaptor frame if it exists. |
| 3105 Label check_frame_marker; |
| 3106 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
| 3107 __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3108 __ B(ne, &check_frame_marker); |
| 3109 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
| 3110 |
| 3111 // Check the marker in the calling frame. |
| 3112 __ Bind(&check_frame_marker); |
| 3113 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
| 3114 |
| 3115 EmitCompareAndBranch( |
| 3116 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
| 3117 } |
| 3118 |
| 3119 |
| 3120 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { |
| 3121 Label* is_object = instr->TrueLabel(chunk_); |
| 3122 Label* is_not_object = instr->FalseLabel(chunk_); |
| 3123 Register value = ToRegister(instr->value()); |
| 3124 Register map = ToRegister(instr->temp1()); |
| 3125 Register scratch = ToRegister(instr->temp2()); |
| 3126 |
| 3127 __ JumpIfSmi(value, is_not_object); |
| 3128 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object); |
| 3129 |
| 3130 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 3131 |
| 3132 // Check for undetectable objects. |
| 3133 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 3134 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object); |
| 3135 |
| 3136 // Check that instance type is in object type range. |
| 3137 __ IsInstanceJSObjectType(map, scratch, NULL); |
| 3138 // Flags have been updated by IsInstanceJSObjectType. We can now test the |
| 3139 // flags for "le" condition to check if the object's type is a valid |
| 3140 // JS object type. |
| 3141 EmitBranch(instr, le); |
| 3142 } |
| 3143 |
| 3144 |
| 3145 Condition LCodeGen::EmitIsString(Register input, |
| 3146 Register temp1, |
| 3147 Label* is_not_string, |
| 3148 SmiCheck check_needed = INLINE_SMI_CHECK) { |
| 3149 if (check_needed == INLINE_SMI_CHECK) { |
| 3150 __ JumpIfSmi(input, is_not_string); |
| 3151 } |
| 3152 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); |
| 3153 |
| 3154 return lt; |
| 3155 } |
| 3156 |
| 3157 |
| 3158 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { |
| 3159 Register val = ToRegister(instr->value()); |
| 3160 Register scratch = ToRegister(instr->temp()); |
| 3161 |
| 3162 SmiCheck check_needed = |
| 3163 instr->hydrogen()->value()->IsHeapObject() |
| 3164 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 3165 Condition true_cond = |
| 3166 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed); |
| 3167 |
| 3168 EmitBranch(instr, true_cond); |
| 3169 } |
| 3170 |
| 3171 |
| 3172 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { |
| 3173 Register value = ToRegister(instr->value()); |
| 3174 STATIC_ASSERT(kSmiTag == 0); |
| 3175 EmitTestAndBranch(instr, eq, value, kSmiTagMask); |
| 3176 } |
| 3177 |
| 3178 |
| 3179 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { |
| 3180 Register input = ToRegister(instr->value()); |
| 3181 Register temp = ToRegister(instr->temp()); |
| 3182 |
| 3183 if (!instr->hydrogen()->value()->IsHeapObject()) { |
| 3184 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
| 3185 } |
| 3186 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3187 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
| 3188 |
| 3189 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable); |
| 3190 } |
| 3191 |
| 3192 |
| 3193 static const char* LabelType(LLabel* label) { |
| 3194 if (label->is_loop_header()) return " (loop header)"; |
| 3195 if (label->is_osr_entry()) return " (OSR entry)"; |
| 3196 return ""; |
| 3197 } |
| 3198 |
| 3199 |
| 3200 void LCodeGen::DoLabel(LLabel* label) { |
| 3201 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", |
| 3202 current_instruction_, |
| 3203 label->hydrogen_value()->id(), |
| 3204 label->block_id(), |
| 3205 LabelType(label)); |
| 3206 |
| 3207 __ Bind(label->label()); |
| 3208 current_block_ = label->block_id(); |
| 3209 DoGap(label); |
| 3210 } |
| 3211 |
| 3212 |
| 3213 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 3214 Register context = ToRegister(instr->context()); |
| 3215 Register result = ToRegister(instr->result()); |
| 3216 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); |
| 3217 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3218 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3219 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, |
| 3220 instr->environment()); |
| 3221 } else { |
| 3222 Label not_the_hole; |
| 3223 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); |
| 3224 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 3225 __ Bind(¬_the_hole); |
| 3226 } |
| 3227 } |
| 3228 } |
| 3229 |
| 3230 |
| 3231 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
| 3232 Register function = ToRegister(instr->function()); |
| 3233 Register result = ToRegister(instr->result()); |
| 3234 Register temp = ToRegister(instr->temp()); |
| 3235 Label deopt; |
| 3236 |
| 3237 // Check that the function really is a function. Leaves map in the result |
| 3238 // register. |
| 3239 __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt); |
| 3240 |
| 3241 // Make sure that the function has an instance prototype. |
| 3242 Label non_instance; |
| 3243 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 3244 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance); |
| 3245 |
| 3246 // Get the prototype or initial map from the function. |
| 3247 __ Ldr(result, FieldMemOperand(function, |
| 3248 JSFunction::kPrototypeOrInitialMapOffset)); |
| 3249 |
| 3250 // Check that the function has a prototype or an initial map. |
| 3251 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt); |
| 3252 |
| 3253 // If the function does not have an initial map, we're done. |
| 3254 Label done; |
| 3255 __ CompareObjectType(result, temp, temp, MAP_TYPE); |
| 3256 __ B(ne, &done); |
| 3257 |
| 3258 // Get the prototype from the initial map. |
| 3259 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3260 __ B(&done); |
| 3261 |
| 3262 // Non-instance prototype: fetch prototype from constructor field in initial |
| 3263 // map. |
| 3264 __ Bind(&non_instance); |
| 3265 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 3266 __ B(&done); |
| 3267 |
| 3268 // Deoptimize case. |
| 3269 __ Bind(&deopt); |
| 3270 Deoptimize(instr->environment()); |
| 3271 |
| 3272 // All done. |
| 3273 __ Bind(&done); |
| 3274 } |
| 3275 |
| 3276 |
| 3277 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 3278 Register result = ToRegister(instr->result()); |
| 3279 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 3280 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
| 3281 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3282 DeoptimizeIfRoot( |
| 3283 result, Heap::kTheHoleValueRootIndex, instr->environment()); |
| 3284 } |
| 3285 } |
| 3286 |
| 3287 |
| 3288 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
| 3289 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3290 ASSERT(ToRegister(instr->global_object()).Is(x0)); |
| 3291 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 3292 __ Mov(x2, Operand(instr->name())); |
| 3293 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; |
| 3294 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); |
| 3295 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3296 } |
| 3297 |
| 3298 |
| 3299 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand( |
| 3300 Register key, |
| 3301 Register base, |
| 3302 Register scratch, |
| 3303 bool key_is_smi, |
| 3304 bool key_is_constant, |
| 3305 int constant_key, |
| 3306 ElementsKind elements_kind, |
| 3307 int additional_index) { |
| 3308 int element_size_shift = ElementsKindToShiftSize(elements_kind); |
| 3309 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) |
| 3310 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag |
| 3311 : 0; |
| 3312 |
| 3313 if (key_is_constant) { |
| 3314 int base_offset = ((constant_key + additional_index) << element_size_shift); |
| 3315 return MemOperand(base, base_offset + additional_offset); |
| 3316 } |
| 3317 |
| 3318 if (additional_index == 0) { |
| 3319 if (key_is_smi) { |
| 3320 // Key is smi: untag, and scale by element size. |
| 3321 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); |
| 3322 return MemOperand(scratch, additional_offset); |
| 3323 } else { |
| 3324 // Key is not smi, and element size is not byte: scale by element size. |
| 3325 if (additional_offset == 0) { |
| 3326 return MemOperand(base, key, LSL, element_size_shift); |
| 3327 } else { |
| 3328 __ Add(scratch, base, Operand(key, LSL, element_size_shift)); |
| 3329 return MemOperand(scratch, additional_offset); |
| 3330 } |
| 3331 } |
| 3332 } else { |
| 3333 // TODO(all): Try to combine these cases a bit more intelligently. |
| 3334 if (additional_offset == 0) { |
| 3335 if (key_is_smi) { |
| 3336 __ SmiUntag(scratch, key); |
| 3337 __ Add(scratch, scratch, additional_index); |
| 3338 } else { |
| 3339 __ Add(scratch, key, additional_index); |
| 3340 } |
| 3341 return MemOperand(base, scratch, LSL, element_size_shift); |
| 3342 } else { |
| 3343 if (key_is_smi) { |
| 3344 __ Add(scratch, base, |
| 3345 Operand::UntagSmiAndScale(key, element_size_shift)); |
| 3346 } else { |
| 3347 __ Add(scratch, base, Operand(key, LSL, element_size_shift)); |
| 3348 } |
| 3349 return MemOperand( |
| 3350 scratch, |
| 3351 (additional_index << element_size_shift) + additional_offset); |
| 3352 } |
| 3353 } |
| 3354 } |
| 3355 |
| 3356 |
| 3357 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { |
| 3358 Register ext_ptr = ToRegister(instr->elements()); |
| 3359 Register scratch; |
| 3360 ElementsKind elements_kind = instr->elements_kind(); |
| 3361 |
| 3362 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
| 3363 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 3364 Register key = no_reg; |
| 3365 int constant_key = 0; |
| 3366 if (key_is_constant) { |
| 3367 ASSERT(instr->temp() == NULL); |
| 3368 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3369 if (constant_key & 0xf0000000) { |
| 3370 Abort(kArrayIndexConstantValueTooBig); |
| 3371 } |
| 3372 } else { |
| 3373 scratch = ToRegister(instr->temp()); |
| 3374 key = ToRegister(instr->key()); |
| 3375 } |
| 3376 |
| 3377 MemOperand mem_op = |
| 3378 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, |
| 3379 key_is_constant, constant_key, |
| 3380 elements_kind, |
| 3381 instr->additional_index()); |
| 3382 |
| 3383 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || |
| 3384 (elements_kind == FLOAT32_ELEMENTS)) { |
| 3385 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3386 __ Ldr(result.S(), mem_op); |
| 3387 __ Fcvt(result, result.S()); |
| 3388 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) || |
| 3389 (elements_kind == FLOAT64_ELEMENTS)) { |
| 3390 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3391 __ Ldr(result, mem_op); |
| 3392 } else { |
| 3393 Register result = ToRegister(instr->result()); |
| 3394 |
| 3395 switch (elements_kind) { |
| 3396 case EXTERNAL_INT8_ELEMENTS: |
| 3397 case INT8_ELEMENTS: |
| 3398 __ Ldrsb(result, mem_op); |
| 3399 break; |
| 3400 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
| 3401 case EXTERNAL_UINT8_ELEMENTS: |
| 3402 case UINT8_ELEMENTS: |
| 3403 case UINT8_CLAMPED_ELEMENTS: |
| 3404 __ Ldrb(result, mem_op); |
| 3405 break; |
| 3406 case EXTERNAL_INT16_ELEMENTS: |
| 3407 case INT16_ELEMENTS: |
| 3408 __ Ldrsh(result, mem_op); |
| 3409 break; |
| 3410 case EXTERNAL_UINT16_ELEMENTS: |
| 3411 case UINT16_ELEMENTS: |
| 3412 __ Ldrh(result, mem_op); |
| 3413 break; |
| 3414 case EXTERNAL_INT32_ELEMENTS: |
| 3415 case INT32_ELEMENTS: |
| 3416 __ Ldrsw(result, mem_op); |
| 3417 break; |
| 3418 case EXTERNAL_UINT32_ELEMENTS: |
| 3419 case UINT32_ELEMENTS: |
| 3420 __ Ldr(result.W(), mem_op); |
| 3421 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3422 // Deopt if value > 0x80000000. |
| 3423 __ Tst(result, 0xFFFFFFFF80000000); |
| 3424 DeoptimizeIf(ne, instr->environment()); |
| 3425 } |
| 3426 break; |
| 3427 case FLOAT32_ELEMENTS: |
| 3428 case FLOAT64_ELEMENTS: |
| 3429 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3430 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3431 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3432 case FAST_HOLEY_ELEMENTS: |
| 3433 case FAST_HOLEY_SMI_ELEMENTS: |
| 3434 case FAST_DOUBLE_ELEMENTS: |
| 3435 case FAST_ELEMENTS: |
| 3436 case FAST_SMI_ELEMENTS: |
| 3437 case DICTIONARY_ELEMENTS: |
| 3438 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3439 UNREACHABLE(); |
| 3440 break; |
| 3441 } |
| 3442 } |
| 3443 } |
| 3444 |
| 3445 |
| 3446 void LCodeGen::CalcKeyedArrayBaseRegister(Register base, |
| 3447 Register elements, |
| 3448 Register key, |
| 3449 bool key_is_tagged, |
| 3450 ElementsKind elements_kind) { |
| 3451 int element_size_shift = ElementsKindToShiftSize(elements_kind); |
| 3452 |
| 3453 // Even though the HLoad/StoreKeyed instructions force the input |
| 3454 // representation for the key to be an integer, the input gets replaced during |
| 3455 // bounds check elimination with the index argument to the bounds check, which |
| 3456 // can be tagged, so that case must be handled here, too. |
| 3457 if (key_is_tagged) { |
| 3458 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); |
| 3459 } else { |
| 3460 // Sign extend key because it could be a 32-bit negative value and the |
| 3461 // address computation happens in 64-bit. |
| 3462 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); |
| 3463 __ Add(base, elements, Operand(key, SXTW, element_size_shift)); |
| 3464 } |
| 3465 } |
| 3466 |
| 3467 |
| 3468 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { |
| 3469 Register elements = ToRegister(instr->elements()); |
| 3470 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3471 Register load_base; |
| 3472 int offset = 0; |
| 3473 |
| 3474 if (instr->key()->IsConstantOperand()) { |
| 3475 ASSERT(instr->hydrogen()->RequiresHoleCheck() || |
| 3476 (instr->temp() == NULL)); |
| 3477 |
| 3478 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3479 if (constant_key & 0xf0000000) { |
| 3480 Abort(kArrayIndexConstantValueTooBig); |
| 3481 } |
| 3482 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + |
| 3483 instr->additional_index()); |
| 3484 load_base = elements; |
| 3485 } else { |
| 3486 load_base = ToRegister(instr->temp()); |
| 3487 Register key = ToRegister(instr->key()); |
| 3488 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 3489 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, |
| 3490 instr->hydrogen()->elements_kind()); |
| 3491 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); |
| 3492 } |
| 3493 __ Ldr(result, FieldMemOperand(load_base, offset)); |
| 3494 |
| 3495 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3496 Register scratch = ToRegister(instr->temp()); |
| 3497 |
| 3498 // TODO(all): Is it faster to reload this value to an integer register, or |
| 3499 // move from fp to integer? |
| 3500 __ Fmov(scratch, result); |
| 3501 __ Cmp(scratch, kHoleNanInt64); |
| 3502 DeoptimizeIf(eq, instr->environment()); |
| 3503 } |
| 3504 } |
| 3505 |
| 3506 |
| 3507 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { |
| 3508 Register elements = ToRegister(instr->elements()); |
| 3509 Register result = ToRegister(instr->result()); |
| 3510 Register load_base; |
| 3511 int offset = 0; |
| 3512 |
| 3513 if (instr->key()->IsConstantOperand()) { |
| 3514 ASSERT(instr->temp() == NULL); |
| 3515 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
| 3516 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + |
| 3517 instr->additional_index()); |
| 3518 load_base = elements; |
| 3519 } else { |
| 3520 load_base = ToRegister(instr->temp()); |
| 3521 Register key = ToRegister(instr->key()); |
| 3522 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 3523 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, |
| 3524 instr->hydrogen()->elements_kind()); |
| 3525 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); |
| 3526 } |
| 3527 Representation representation = instr->hydrogen()->representation(); |
| 3528 |
| 3529 if (representation.IsInteger32() && |
| 3530 instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) { |
| 3531 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 3532 __ Load(result, UntagSmiFieldMemOperand(load_base, offset), |
| 3533 Representation::Integer32()); |
| 3534 } else { |
| 3535 __ Load(result, FieldMemOperand(load_base, offset), |
| 3536 representation); |
| 3537 } |
| 3538 |
| 3539 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3540 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3541 DeoptimizeIfNotSmi(result, instr->environment()); |
| 3542 } else { |
| 3543 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, |
| 3544 instr->environment()); |
| 3545 } |
| 3546 } |
| 3547 } |
| 3548 |
| 3549 |
| 3550 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
| 3551 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3552 ASSERT(ToRegister(instr->object()).Is(x1)); |
| 3553 ASSERT(ToRegister(instr->key()).Is(x0)); |
| 3554 |
| 3555 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
| 3556 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3557 |
| 3558 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 3559 } |
| 3560 |
| 3561 |
| 3562 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
| 3563 HObjectAccess access = instr->hydrogen()->access(); |
| 3564 int offset = access.offset(); |
| 3565 Register object = ToRegister(instr->object()); |
| 3566 |
| 3567 if (access.IsExternalMemory()) { |
| 3568 Register result = ToRegister(instr->result()); |
| 3569 __ Load(result, MemOperand(object, offset), access.representation()); |
| 3570 return; |
| 3571 } |
| 3572 |
| 3573 if (instr->hydrogen()->representation().IsDouble()) { |
| 3574 FPRegister result = ToDoubleRegister(instr->result()); |
| 3575 __ Ldr(result, FieldMemOperand(object, offset)); |
| 3576 return; |
| 3577 } |
| 3578 |
| 3579 Register result = ToRegister(instr->result()); |
| 3580 Register source; |
| 3581 if (access.IsInobject()) { |
| 3582 source = object; |
| 3583 } else { |
| 3584 // Load the properties array, using result as a scratch register. |
| 3585 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 3586 source = result; |
| 3587 } |
| 3588 |
| 3589 if (access.representation().IsSmi() && |
| 3590 instr->hydrogen()->representation().IsInteger32()) { |
| 3591 // Read int value directly from upper half of the smi. |
| 3592 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 3593 __ Load(result, UntagSmiFieldMemOperand(source, offset), |
| 3594 Representation::Integer32()); |
| 3595 } else { |
| 3596 __ Load(result, FieldMemOperand(source, offset), access.representation()); |
| 3597 } |
| 3598 } |
| 3599 |
| 3600 |
| 3601 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
| 3602 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3603 // LoadIC expects x2 to hold the name, and x0 to hold the receiver. |
| 3604 ASSERT(ToRegister(instr->object()).is(x0)); |
| 3605 __ Mov(x2, Operand(instr->name())); |
| 3606 |
| 3607 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); |
| 3608 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3609 |
| 3610 ASSERT(ToRegister(instr->result()).is(x0)); |
| 3611 } |
| 3612 |
| 3613 |
| 3614 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { |
| 3615 Register result = ToRegister(instr->result()); |
| 3616 __ LoadRoot(result, instr->index()); |
| 3617 } |
| 3618 |
| 3619 |
| 3620 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { |
| 3621 Register result = ToRegister(instr->result()); |
| 3622 Register map = ToRegister(instr->value()); |
| 3623 __ EnumLengthSmi(result, map); |
| 3624 } |
| 3625 |
| 3626 |
| 3627 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3628 Representation r = instr->hydrogen()->value()->representation(); |
| 3629 if (r.IsDouble()) { |
| 3630 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3631 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3632 __ Fabs(result, input); |
| 3633 } else if (r.IsSmi() || r.IsInteger32()) { |
| 3634 Register input = r.IsSmi() ? ToRegister(instr->value()) |
| 3635 : ToRegister32(instr->value()); |
| 3636 Register result = r.IsSmi() ? ToRegister(instr->result()) |
| 3637 : ToRegister32(instr->result()); |
| 3638 Label done; |
| 3639 __ Abs(result, input, NULL, &done); |
| 3640 Deoptimize(instr->environment()); |
| 3641 __ Bind(&done); |
| 3642 } |
| 3643 } |
| 3644 |
| 3645 |
| 3646 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
| 3647 Label* exit, |
| 3648 Label* allocation_entry) { |
| 3649 // Handle the tricky cases of MathAbsTagged: |
| 3650 // - HeapNumber inputs. |
| 3651 // - Negative inputs produce a positive result, so a new HeapNumber is |
| 3652 // allocated to hold it. |
| 3653 // - Positive inputs are returned as-is, since there is no need to allocate |
| 3654 // a new HeapNumber for the result. |
| 3655 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit |
| 3656 // a smi. In this case, the inline code sets the result and jumps directly |
| 3657 // to the allocation_entry label. |
| 3658 ASSERT(instr->context() != NULL); |
| 3659 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3660 Register input = ToRegister(instr->value()); |
| 3661 Register temp1 = ToRegister(instr->temp1()); |
| 3662 Register temp2 = ToRegister(instr->temp2()); |
| 3663 Register result_bits = ToRegister(instr->temp3()); |
| 3664 Register result = ToRegister(instr->result()); |
| 3665 |
| 3666 Label runtime_allocation; |
| 3667 |
| 3668 // Deoptimize if the input is not a HeapNumber. |
| 3669 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3670 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex, |
| 3671 instr->environment()); |
| 3672 |
| 3673 // If the argument is positive, we can return it as-is, without any need to |
| 3674 // allocate a new HeapNumber for the result. We have to do this in integer |
| 3675 // registers (rather than with fabs) because we need to be able to distinguish |
| 3676 // the two zeroes. |
| 3677 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 3678 __ Mov(result, input); |
| 3679 __ Tbz(result_bits, kXSignBit, exit); |
| 3680 |
| 3681 // Calculate abs(input) by clearing the sign bit. |
| 3682 __ Bic(result_bits, result_bits, kXSignMask); |
| 3683 |
| 3684 // Allocate a new HeapNumber to hold the result. |
| 3685 // result_bits The bit representation of the (double) result. |
| 3686 __ Bind(allocation_entry); |
| 3687 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2); |
| 3688 // The inline (non-deferred) code will store result_bits into result. |
| 3689 __ B(exit); |
| 3690 |
| 3691 __ Bind(&runtime_allocation); |
| 3692 if (FLAG_debug_code) { |
| 3693 // Because result is in the pointer map, we need to make sure it has a valid |
| 3694 // tagged value before we call the runtime. We speculatively set it to the |
| 3695 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already |
| 3696 // be valid. |
| 3697 Label result_ok; |
| 3698 Register input = ToRegister(instr->value()); |
| 3699 __ JumpIfSmi(result, &result_ok); |
| 3700 __ Cmp(input, result); |
| 3701 // TODO(all): Shouldn't we assert here? |
| 3702 DeoptimizeIf(ne, instr->environment()); |
| 3703 __ Bind(&result_ok); |
| 3704 } |
| 3705 |
| 3706 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3707 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, |
| 3708 instr->context()); |
| 3709 __ StoreToSafepointRegisterSlot(x0, result); |
| 3710 } |
| 3711 // The inline (non-deferred) code will store result_bits into result. |
| 3712 } |
| 3713 |
| 3714 |
| 3715 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) { |
| 3716 // Class for deferred case. |
| 3717 class DeferredMathAbsTagged: public LDeferredCode { |
| 3718 public: |
| 3719 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr) |
| 3720 : LDeferredCode(codegen), instr_(instr) { } |
| 3721 virtual void Generate() { |
| 3722 codegen()->DoDeferredMathAbsTagged(instr_, exit(), |
| 3723 allocation_entry()); |
| 3724 } |
| 3725 virtual LInstruction* instr() { return instr_; } |
| 3726 Label* allocation_entry() { return &allocation; } |
| 3727 private: |
| 3728 LMathAbsTagged* instr_; |
| 3729 Label allocation; |
| 3730 }; |
| 3731 |
| 3732 // TODO(jbramley): The early-exit mechanism would skip the new frame handling |
| 3733 // in GenerateDeferredCode. Tidy this up. |
| 3734 ASSERT(!NeedsDeferredFrame()); |
| 3735 |
| 3736 DeferredMathAbsTagged* deferred = |
| 3737 new(zone()) DeferredMathAbsTagged(this, instr); |
| 3738 |
| 3739 ASSERT(instr->hydrogen()->value()->representation().IsTagged() || |
| 3740 instr->hydrogen()->value()->representation().IsSmi()); |
| 3741 Register input = ToRegister(instr->value()); |
| 3742 Register result_bits = ToRegister(instr->temp3()); |
| 3743 Register result = ToRegister(instr->result()); |
| 3744 Label done; |
| 3745 |
| 3746 // Handle smis inline. |
| 3747 // We can treat smis as 64-bit integers, since the (low-order) tag bits will |
| 3748 // never get set by the negation. This is therefore the same as the Integer32 |
| 3749 // case in DoMathAbs, except that it operates on 64-bit values. |
| 3750 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); |
| 3751 |
| 3752 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses |
| 3753 // doesn't always have enough range. Consider making a variant of it, or a |
| 3754 // TestIsSmi helper. |
| 3755 STATIC_ASSERT(kSmiTag == 0); |
| 3756 __ Tst(input, kSmiTagMask); |
| 3757 __ B(ne, deferred->entry()); |
| 3758 |
| 3759 __ Abs(result, input, NULL, &done); |
| 3760 |
| 3761 // The result is the magnitude (abs) of the smallest value a smi can |
| 3762 // represent, encoded as a double. |
| 3763 __ Mov(result_bits, double_to_rawbits(0x80000000)); |
| 3764 __ B(deferred->allocation_entry()); |
| 3765 |
| 3766 __ Bind(deferred->exit()); |
| 3767 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 3768 |
| 3769 __ Bind(&done); |
| 3770 } |
| 3771 |
| 3772 |
| 3773 void LCodeGen::DoMathExp(LMathExp* instr) { |
| 3774 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3775 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3776 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1()); |
| 3777 DoubleRegister double_temp2 = double_scratch(); |
| 3778 Register temp1 = ToRegister(instr->temp1()); |
| 3779 Register temp2 = ToRegister(instr->temp2()); |
| 3780 Register temp3 = ToRegister(instr->temp3()); |
| 3781 |
| 3782 MathExpGenerator::EmitMathExp(masm(), input, result, |
| 3783 double_temp1, double_temp2, |
| 3784 temp1, temp2, temp3); |
| 3785 } |
| 3786 |
| 3787 |
| 3788 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| 3789 // TODO(jbramley): If we could provide a double result, we could use frintm |
| 3790 // and produce a valid double result in a single instruction. |
| 3791 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3792 Register result = ToRegister(instr->result()); |
| 3793 Label deopt; |
| 3794 Label done; |
| 3795 |
| 3796 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3797 __ JumpIfMinusZero(input, &deopt); |
| 3798 } |
| 3799 |
| 3800 __ Fcvtms(result, input); |
| 3801 |
| 3802 // Check that the result fits into a 32-bit integer. |
| 3803 // - The result did not overflow. |
| 3804 __ Cmp(result, Operand(result, SXTW)); |
| 3805 // - The input was not NaN. |
| 3806 __ Fccmp(input, input, NoFlag, eq); |
| 3807 __ B(&done, eq); |
| 3808 |
| 3809 __ Bind(&deopt); |
| 3810 Deoptimize(instr->environment()); |
| 3811 |
| 3812 __ Bind(&done); |
| 3813 } |
| 3814 |
| 3815 |
| 3816 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { |
| 3817 Register result = ToRegister32(instr->result()); |
| 3818 Register left = ToRegister32(instr->left()); |
| 3819 Register right = ToRegister32(instr->right()); |
| 3820 Register remainder = ToRegister32(instr->temp()); |
| 3821 |
| 3822 // This can't cause an exception on ARM, so we can speculatively |
| 3823 // execute it already now. |
| 3824 __ Sdiv(result, left, right); |
| 3825 |
| 3826 // Check for x / 0. |
| 3827 DeoptimizeIfZero(right, instr->environment()); |
| 3828 |
| 3829 // Check for (kMinInt / -1). |
| 3830 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 3831 // The V flag will be set iff left == kMinInt. |
| 3832 __ Cmp(left, 1); |
| 3833 __ Ccmp(right, -1, NoFlag, vs); |
| 3834 DeoptimizeIf(eq, instr->environment()); |
| 3835 } |
| 3836 |
| 3837 // Check for (0 / -x) that will produce negative zero. |
| 3838 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3839 __ Cmp(right, 0); |
| 3840 __ Ccmp(left, 0, ZFlag, mi); |
| 3841 // "right" can't be null because the code would have already been |
| 3842 // deoptimized. The Z flag is set only if (right < 0) and (left == 0). |
| 3843 // In this case we need to deoptimize to produce a -0. |
| 3844 DeoptimizeIf(eq, instr->environment()); |
| 3845 } |
| 3846 |
| 3847 Label done; |
| 3848 // If both operands have the same sign then we are done. |
| 3849 __ Eor(remainder, left, right); |
| 3850 __ Tbz(remainder, kWSignBit, &done); |
| 3851 |
| 3852 // Check if the result needs to be corrected. |
| 3853 __ Msub(remainder, result, right, left); |
| 3854 __ Cbz(remainder, &done); |
| 3855 __ Sub(result, result, 1); |
| 3856 |
| 3857 __ Bind(&done); |
| 3858 } |
| 3859 |
| 3860 |
| 3861 void LCodeGen::DoMathLog(LMathLog* instr) { |
| 3862 ASSERT(instr->IsMarkedAsCall()); |
| 3863 ASSERT(ToDoubleRegister(instr->value()).is(d0)); |
| 3864 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), |
| 3865 0, 1); |
| 3866 ASSERT(ToDoubleRegister(instr->result()).Is(d0)); |
| 3867 } |
| 3868 |
| 3869 |
| 3870 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
| 3871 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3872 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3873 Label done; |
| 3874 |
| 3875 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases: |
| 3876 // Math.pow(-Infinity, 0.5) == +Infinity |
| 3877 // Math.pow(-0.0, 0.5) == +0.0 |
| 3878 |
| 3879 // Catch -infinity inputs first. |
| 3880 // TODO(jbramley): A constant infinity register would be helpful here. |
| 3881 __ Fmov(double_scratch(), kFP64NegativeInfinity); |
| 3882 __ Fcmp(double_scratch(), input); |
| 3883 __ Fabs(result, input); |
| 3884 __ B(&done, eq); |
| 3885 |
| 3886 // Add +0.0 to convert -0.0 to +0.0. |
| 3887 // TODO(jbramley): A constant zero register would be helpful here. |
| 3888 __ Fmov(double_scratch(), 0.0); |
| 3889 __ Fadd(double_scratch(), input, double_scratch()); |
| 3890 __ Fsqrt(result, double_scratch()); |
| 3891 |
| 3892 __ Bind(&done); |
| 3893 } |
| 3894 |
| 3895 |
| 3896 void LCodeGen::DoPower(LPower* instr) { |
| 3897 Representation exponent_type = instr->hydrogen()->right()->representation(); |
| 3898 // Having marked this as a call, we can use any registers. |
| 3899 // Just make sure that the input/output registers are the expected ones. |
| 3900 ASSERT(!instr->right()->IsDoubleRegister() || |
| 3901 ToDoubleRegister(instr->right()).is(d1)); |
| 3902 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() || |
| 3903 ToRegister(instr->right()).is(x11)); |
| 3904 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12)); |
| 3905 ASSERT(ToDoubleRegister(instr->left()).is(d0)); |
| 3906 ASSERT(ToDoubleRegister(instr->result()).is(d0)); |
| 3907 |
| 3908 if (exponent_type.IsSmi()) { |
| 3909 MathPowStub stub(MathPowStub::TAGGED); |
| 3910 __ CallStub(&stub); |
| 3911 } else if (exponent_type.IsTagged()) { |
| 3912 Label no_deopt; |
| 3913 __ JumpIfSmi(x11, &no_deopt); |
| 3914 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset)); |
| 3915 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, |
| 3916 instr->environment()); |
| 3917 __ Bind(&no_deopt); |
| 3918 MathPowStub stub(MathPowStub::TAGGED); |
| 3919 __ CallStub(&stub); |
| 3920 } else if (exponent_type.IsInteger32()) { |
| 3921 MathPowStub stub(MathPowStub::INTEGER); |
| 3922 __ CallStub(&stub); |
| 3923 } else { |
| 3924 ASSERT(exponent_type.IsDouble()); |
| 3925 MathPowStub stub(MathPowStub::DOUBLE); |
| 3926 __ CallStub(&stub); |
| 3927 } |
| 3928 } |
| 3929 |
| 3930 |
| 3931 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3932 // TODO(jbramley): We could provide a double result here using frint. |
| 3933 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3934 DoubleRegister temp1 = ToDoubleRegister(instr->temp1()); |
| 3935 Register result = ToRegister(instr->result()); |
| 3936 Label try_rounding; |
| 3937 Label deopt; |
| 3938 Label done; |
| 3939 |
| 3940 // Math.round() rounds to the nearest integer, with ties going towards |
| 3941 // +infinity. This does not match any IEEE-754 rounding mode. |
| 3942 // - Infinities and NaNs are propagated unchanged, but cause deopts because |
| 3943 // they can't be represented as integers. |
| 3944 // - The sign of the result is the same as the sign of the input. This means |
| 3945 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a |
| 3946 // result of -0.0. |
| 3947 |
| 3948 DoubleRegister dot_five = double_scratch(); |
| 3949 __ Fmov(dot_five, 0.5); |
| 3950 __ Fabs(temp1, input); |
| 3951 __ Fcmp(temp1, dot_five); |
| 3952 // If input is in [-0.5, -0], the result is -0. |
| 3953 // If input is in [+0, +0.5[, the result is +0. |
| 3954 // If the input is +0.5, the result is 1. |
| 3955 __ B(hi, &try_rounding); // hi so NaN will also branch. |
| 3956 |
| 3957 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3958 __ Fmov(result, input); |
| 3959 __ Cmp(result, 0); |
| 3960 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0]. |
| 3961 } |
| 3962 __ Fcmp(input, dot_five); |
| 3963 __ Mov(result, 1); // +0.5. |
| 3964 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
| 3965 // flag kBailoutOnMinusZero, will return 0 (xzr). |
| 3966 __ Csel(result, result, xzr, eq); |
| 3967 __ B(&done); |
| 3968 |
| 3969 __ Bind(&deopt); |
| 3970 Deoptimize(instr->environment()); |
| 3971 |
| 3972 __ Bind(&try_rounding); |
| 3973 // Since we're providing a 32-bit result, we can implement ties-to-infinity by |
| 3974 // adding 0.5 to the input, then taking the floor of the result. This does not |
| 3975 // work for very large positive doubles because adding 0.5 would cause an |
| 3976 // intermediate rounding stage, so a different approach will be necessary if a |
| 3977 // double result is needed. |
| 3978 __ Fadd(temp1, input, dot_five); |
| 3979 __ Fcvtms(result, temp1); |
| 3980 |
| 3981 // Deopt if |
| 3982 // * the input was NaN |
| 3983 // * the result is not representable using a 32-bit integer. |
| 3984 __ Fcmp(input, 0.0); |
| 3985 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc); |
| 3986 __ B(ne, &deopt); |
| 3987 |
| 3988 __ Bind(&done); |
| 3989 } |
| 3990 |
| 3991 |
| 3992 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
| 3993 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3994 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3995 __ Fsqrt(result, input); |
| 3996 } |
| 3997 |
| 3998 |
| 3999 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 4000 HMathMinMax::Operation op = instr->hydrogen()->operation(); |
| 4001 if (instr->hydrogen()->representation().IsInteger32()) { |
| 4002 Register result = ToRegister32(instr->result()); |
| 4003 Register left = ToRegister32(instr->left()); |
| 4004 Operand right = ToOperand32I(instr->right()); |
| 4005 |
| 4006 __ Cmp(left, right); |
| 4007 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); |
| 4008 } else if (instr->hydrogen()->representation().IsSmi()) { |
| 4009 Register result = ToRegister(instr->result()); |
| 4010 Register left = ToRegister(instr->left()); |
| 4011 Operand right = ToOperand(instr->right()); |
| 4012 |
| 4013 __ Cmp(left, right); |
| 4014 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); |
| 4015 } else { |
| 4016 ASSERT(instr->hydrogen()->representation().IsDouble()); |
| 4017 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 4018 DoubleRegister left = ToDoubleRegister(instr->left()); |
| 4019 DoubleRegister right = ToDoubleRegister(instr->right()); |
| 4020 |
| 4021 if (op == HMathMinMax::kMathMax) { |
| 4022 __ Fmax(result, left, right); |
| 4023 } else { |
| 4024 ASSERT(op == HMathMinMax::kMathMin); |
| 4025 __ Fmin(result, left, right); |
| 4026 } |
| 4027 } |
| 4028 } |
| 4029 |
| 4030 |
| 4031 void LCodeGen::DoModI(LModI* instr) { |
| 4032 HMod* hmod = instr->hydrogen(); |
| 4033 HValue* hleft = hmod->left(); |
| 4034 HValue* hright = hmod->right(); |
| 4035 |
| 4036 Label done; |
| 4037 Register result = ToRegister32(instr->result()); |
| 4038 Register dividend = ToRegister32(instr->left()); |
| 4039 |
| 4040 bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) && |
| 4041 hleft->CanBeNegative() && hmod->CanBeZero()); |
| 4042 |
| 4043 if (hmod->RightIsPowerOf2()) { |
| 4044 // Note: The code below even works when right contains kMinInt. |
| 4045 int32_t divisor = Abs(hright->GetInteger32Constant()); |
| 4046 |
| 4047 if (hleft->CanBeNegative()) { |
| 4048 __ Cmp(dividend, 0); |
| 4049 __ Cneg(result, dividend, mi); |
| 4050 __ And(result, result, divisor - 1); |
| 4051 __ Cneg(result, result, mi); |
| 4052 if (need_minus_zero_check) { |
| 4053 __ Cbnz(result, &done); |
| 4054 // The result is 0. Deoptimize if the dividend was negative. |
| 4055 DeoptimizeIf(mi, instr->environment()); |
| 4056 } |
| 4057 } else { |
| 4058 __ And(result, dividend, divisor - 1); |
| 4059 } |
| 4060 |
| 4061 } else { |
| 4062 Label deopt; |
| 4063 Register divisor = ToRegister32(instr->right()); |
| 4064 // Compute: |
| 4065 // modulo = dividend - quotient * divisor |
| 4066 __ Sdiv(result, dividend, divisor); |
| 4067 if (hright->CanBeZero()) { |
| 4068 // Combine the deoptimization sites. |
| 4069 Label ok; |
| 4070 __ Cbnz(divisor, &ok); |
| 4071 __ Bind(&deopt); |
| 4072 Deoptimize(instr->environment()); |
| 4073 __ Bind(&ok); |
| 4074 } |
| 4075 __ Msub(result, result, divisor, dividend); |
| 4076 if (need_minus_zero_check) { |
| 4077 __ Cbnz(result, &done); |
| 4078 if (deopt.is_bound()) { |
| 4079 __ Tbnz(dividend, kWSignBit, &deopt); |
| 4080 } else { |
| 4081 DeoptimizeIfNegative(dividend, instr->environment()); |
| 4082 } |
| 4083 } |
| 4084 } |
| 4085 __ Bind(&done); |
| 4086 } |
| 4087 |
| 4088 |
| 4089 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { |
| 4090 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); |
| 4091 bool is_smi = instr->hydrogen()->representation().IsSmi(); |
| 4092 Register result = |
| 4093 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); |
| 4094 Register left = |
| 4095 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; |
| 4096 int32_t right = ToInteger32(instr->right()); |
| 4097 |
| 4098 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4099 bool bailout_on_minus_zero = |
| 4100 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4101 |
| 4102 if (bailout_on_minus_zero) { |
| 4103 if (right < 0) { |
| 4104 // The result is -0 if right is negative and left is zero. |
| 4105 DeoptimizeIfZero(left, instr->environment()); |
| 4106 } else if (right == 0) { |
| 4107 // The result is -0 if the right is zero and the left is negative. |
| 4108 DeoptimizeIfNegative(left, instr->environment()); |
| 4109 } |
| 4110 } |
| 4111 |
| 4112 switch (right) { |
| 4113 // Cases which can detect overflow. |
| 4114 case -1: |
| 4115 if (can_overflow) { |
| 4116 // Only 0x80000000 can overflow here. |
| 4117 __ Negs(result, left); |
| 4118 DeoptimizeIf(vs, instr->environment()); |
| 4119 } else { |
| 4120 __ Neg(result, left); |
| 4121 } |
| 4122 break; |
| 4123 case 0: |
| 4124 // This case can never overflow. |
| 4125 __ Mov(result, 0); |
| 4126 break; |
| 4127 case 1: |
| 4128 // This case can never overflow. |
| 4129 __ Mov(result, left, kDiscardForSameWReg); |
| 4130 break; |
| 4131 case 2: |
| 4132 if (can_overflow) { |
| 4133 __ Adds(result, left, left); |
| 4134 DeoptimizeIf(vs, instr->environment()); |
| 4135 } else { |
| 4136 __ Add(result, left, left); |
| 4137 } |
| 4138 break; |
| 4139 |
| 4140 // All other cases cannot detect overflow, because it would probably be no |
| 4141 // faster than using the smull method in LMulI. |
| 4142 // TODO(jbramley): Investigate this, and add overflow support if it would |
| 4143 // be useful. |
| 4144 default: |
| 4145 ASSERT(!can_overflow); |
| 4146 |
| 4147 // Multiplication by constant powers of two (and some related values) |
| 4148 // can be done efficiently with shifted operands. |
| 4149 if (right >= 0) { |
| 4150 if (IsPowerOf2(right)) { |
| 4151 // result = left << log2(right) |
| 4152 __ Lsl(result, left, WhichPowerOf2(right)); |
| 4153 } else if (IsPowerOf2(right - 1)) { |
| 4154 // result = left + left << log2(right - 1) |
| 4155 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1))); |
| 4156 } else if (IsPowerOf2(right + 1)) { |
| 4157 // result = -left + left << log2(right + 1) |
| 4158 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1))); |
| 4159 __ Neg(result, result); |
| 4160 } else { |
| 4161 UNREACHABLE(); |
| 4162 } |
| 4163 } else { |
| 4164 if (IsPowerOf2(-right)) { |
| 4165 // result = -left << log2(-right) |
| 4166 __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right))); |
| 4167 } else if (IsPowerOf2(-right + 1)) { |
| 4168 // result = left - left << log2(-right + 1) |
| 4169 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1))); |
| 4170 } else if (IsPowerOf2(-right - 1)) { |
| 4171 // result = -left - left << log2(-right - 1) |
| 4172 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1))); |
| 4173 __ Neg(result, result); |
| 4174 } else { |
| 4175 UNREACHABLE(); |
| 4176 } |
| 4177 } |
| 4178 break; |
| 4179 } |
| 4180 } |
| 4181 |
| 4182 |
| 4183 void LCodeGen::DoMulI(LMulI* instr) { |
| 4184 Register result = ToRegister32(instr->result()); |
| 4185 Register left = ToRegister32(instr->left()); |
| 4186 Register right = ToRegister32(instr->right()); |
| 4187 |
| 4188 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4189 bool bailout_on_minus_zero = |
| 4190 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4191 |
| 4192 if (bailout_on_minus_zero) { |
| 4193 // If one operand is zero and the other is negative, the result is -0. |
| 4194 // - Set Z (eq) if either left or right, or both, are 0. |
| 4195 __ Cmp(left, 0); |
| 4196 __ Ccmp(right, 0, ZFlag, ne); |
| 4197 // - If so (eq), set N (mi) if left + right is negative. |
| 4198 // - Otherwise, clear N. |
| 4199 __ Ccmn(left, right, NoFlag, eq); |
| 4200 DeoptimizeIf(mi, instr->environment()); |
| 4201 } |
| 4202 |
| 4203 if (can_overflow) { |
| 4204 __ Smull(result.X(), left, right); |
| 4205 __ Cmp(result.X(), Operand(result, SXTW)); |
| 4206 DeoptimizeIf(ne, instr->environment()); |
| 4207 } else { |
| 4208 __ Mul(result, left, right); |
| 4209 } |
| 4210 } |
| 4211 |
| 4212 |
| 4213 void LCodeGen::DoMulS(LMulS* instr) { |
| 4214 Register result = ToRegister(instr->result()); |
| 4215 Register left = ToRegister(instr->left()); |
| 4216 Register right = ToRegister(instr->right()); |
| 4217 |
| 4218 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4219 bool bailout_on_minus_zero = |
| 4220 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4221 |
| 4222 if (bailout_on_minus_zero) { |
| 4223 // If one operand is zero and the other is negative, the result is -0. |
| 4224 // - Set Z (eq) if either left or right, or both, are 0. |
| 4225 __ Cmp(left, 0); |
| 4226 __ Ccmp(right, 0, ZFlag, ne); |
| 4227 // - If so (eq), set N (mi) if left + right is negative. |
| 4228 // - Otherwise, clear N. |
| 4229 __ Ccmn(left, right, NoFlag, eq); |
| 4230 DeoptimizeIf(mi, instr->environment()); |
| 4231 } |
| 4232 |
| 4233 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
| 4234 if (can_overflow) { |
| 4235 __ Smulh(result, left, right); |
| 4236 __ Cmp(result, Operand(result.W(), SXTW)); |
| 4237 __ SmiTag(result); |
| 4238 DeoptimizeIf(ne, instr->environment()); |
| 4239 } else { |
| 4240 // TODO(jbramley): This could be rewritten to support UseRegisterAtStart. |
| 4241 ASSERT(!AreAliased(result, right)); |
| 4242 __ SmiUntag(result, left); |
| 4243 __ Mul(result, result, right); |
| 4244 } |
| 4245 } |
| 4246 |
| 4247 |
| 4248 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 4249 // TODO(3095996): Get rid of this. For now, we need to make the |
| 4250 // result register contain a valid pointer because it is already |
| 4251 // contained in the register pointer map. |
| 4252 Register result = ToRegister(instr->result()); |
| 4253 __ Mov(result, 0); |
| 4254 |
| 4255 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 4256 // NumberTagU and NumberTagD use the context from the frame, rather than |
| 4257 // the environment's HContext or HInlinedContext value. |
| 4258 // They only call Runtime::kAllocateHeapNumber. |
| 4259 // The corresponding HChange instructions are added in a phase that does |
| 4260 // not have easy access to the local context. |
| 4261 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 4262 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 4263 RecordSafepointWithRegisters( |
| 4264 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 4265 __ StoreToSafepointRegisterSlot(x0, result); |
| 4266 } |
| 4267 |
| 4268 |
| 4269 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 4270 class DeferredNumberTagD: public LDeferredCode { |
| 4271 public: |
| 4272 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| 4273 : LDeferredCode(codegen), instr_(instr) { } |
| 4274 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
| 4275 virtual LInstruction* instr() { return instr_; } |
| 4276 private: |
| 4277 LNumberTagD* instr_; |
| 4278 }; |
| 4279 |
| 4280 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 4281 Register result = ToRegister(instr->result()); |
| 4282 Register temp1 = ToRegister(instr->temp1()); |
| 4283 Register temp2 = ToRegister(instr->temp2()); |
| 4284 |
| 4285 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 4286 if (FLAG_inline_new) { |
| 4287 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2); |
| 4288 } else { |
| 4289 __ B(deferred->entry()); |
| 4290 } |
| 4291 |
| 4292 __ Bind(deferred->exit()); |
| 4293 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 4294 } |
| 4295 |
| 4296 |
| 4297 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr, |
| 4298 LOperand* value, |
| 4299 LOperand* temp1, |
| 4300 LOperand* temp2) { |
| 4301 Label slow, convert_and_store; |
| 4302 Register src = ToRegister32(value); |
| 4303 Register dst = ToRegister(instr->result()); |
| 4304 Register scratch1 = ToRegister(temp1); |
| 4305 |
| 4306 if (FLAG_inline_new) { |
| 4307 Register scratch2 = ToRegister(temp2); |
| 4308 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2); |
| 4309 __ B(&convert_and_store); |
| 4310 } |
| 4311 |
| 4312 // Slow case: call the runtime system to do the number allocation. |
| 4313 __ Bind(&slow); |
| 4314 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
| 4315 // register is stored, as this register is in the pointer map, but contains an |
| 4316 // integer value. |
| 4317 __ Mov(dst, 0); |
| 4318 { |
| 4319 // Preserve the value of all registers. |
| 4320 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 4321 |
| 4322 // NumberTagU and NumberTagD use the context from the frame, rather than |
| 4323 // the environment's HContext or HInlinedContext value. |
| 4324 // They only call Runtime::kAllocateHeapNumber. |
| 4325 // The corresponding HChange instructions are added in a phase that does |
| 4326 // not have easy access to the local context. |
| 4327 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 4328 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 4329 RecordSafepointWithRegisters( |
| 4330 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 4331 __ StoreToSafepointRegisterSlot(x0, dst); |
| 4332 } |
| 4333 |
| 4334 // Convert number to floating point and store in the newly allocated heap |
| 4335 // number. |
| 4336 __ Bind(&convert_and_store); |
| 4337 DoubleRegister dbl_scratch = double_scratch(); |
| 4338 __ Ucvtf(dbl_scratch, src); |
| 4339 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); |
| 4340 } |
| 4341 |
| 4342 |
| 4343 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
| 4344 class DeferredNumberTagU: public LDeferredCode { |
| 4345 public: |
| 4346 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
| 4347 : LDeferredCode(codegen), instr_(instr) { } |
| 4348 virtual void Generate() { |
| 4349 codegen()->DoDeferredNumberTagU(instr_, |
| 4350 instr_->value(), |
| 4351 instr_->temp1(), |
| 4352 instr_->temp2()); |
| 4353 } |
| 4354 virtual LInstruction* instr() { return instr_; } |
| 4355 private: |
| 4356 LNumberTagU* instr_; |
| 4357 }; |
| 4358 |
| 4359 Register value = ToRegister(instr->value()); |
| 4360 Register result = ToRegister(instr->result()); |
| 4361 |
| 4362 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); |
| 4363 __ Cmp(value, Smi::kMaxValue); |
| 4364 __ B(hi, deferred->entry()); |
| 4365 __ SmiTag(result, value); |
| 4366 __ Bind(deferred->exit()); |
| 4367 } |
| 4368 |
| 4369 |
| 4370 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| 4371 Register input = ToRegister(instr->value()); |
| 4372 Register scratch = ToRegister(instr->temp()); |
| 4373 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 4374 bool can_convert_undefined_to_nan = |
| 4375 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4376 |
| 4377 Label done, load_smi; |
| 4378 |
| 4379 // Work out what untag mode we're working with. |
| 4380 HValue* value = instr->hydrogen()->value(); |
| 4381 NumberUntagDMode mode = value->representation().IsSmi() |
| 4382 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| 4383 |
| 4384 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4385 __ JumpIfSmi(input, &load_smi); |
| 4386 |
| 4387 Label convert_undefined, deopt; |
| 4388 |
| 4389 // Heap number map check. |
| 4390 Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined |
| 4391 : &deopt; |
| 4392 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 4393 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number); |
| 4394 |
| 4395 // Load heap number. |
| 4396 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 4397 if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
| 4398 __ JumpIfMinusZero(result, &deopt); |
| 4399 } |
| 4400 __ B(&done); |
| 4401 |
| 4402 if (can_convert_undefined_to_nan) { |
| 4403 __ Bind(&convert_undefined); |
| 4404 __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt); |
| 4405 |
| 4406 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4407 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4408 __ B(&done); |
| 4409 } |
| 4410 |
| 4411 __ Bind(&deopt); |
| 4412 Deoptimize(instr->environment()); |
| 4413 } else { |
| 4414 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4415 // Fall through to load_smi. |
| 4416 } |
| 4417 |
| 4418 // Smi to double register conversion. |
| 4419 __ Bind(&load_smi); |
| 4420 __ SmiUntagToDouble(result, input); |
| 4421 |
| 4422 __ Bind(&done); |
| 4423 } |
| 4424 |
| 4425 |
| 4426 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
| 4427 // This is a pseudo-instruction that ensures that the environment here is |
| 4428 // properly registered for deoptimization and records the assembler's PC |
| 4429 // offset. |
| 4430 LEnvironment* environment = instr->environment(); |
| 4431 |
| 4432 // If the environment were already registered, we would have no way of |
| 4433 // backpatching it with the spill slot operands. |
| 4434 ASSERT(!environment->HasBeenRegistered()); |
| 4435 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 4436 |
| 4437 GenerateOsrPrologue(); |
| 4438 } |
| 4439 |
| 4440 |
| 4441 void LCodeGen::DoParameter(LParameter* instr) { |
| 4442 // Nothing to do. |
| 4443 } |
| 4444 |
| 4445 |
| 4446 void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| 4447 LOperand* argument = instr->value(); |
| 4448 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { |
| 4449 Abort(kDoPushArgumentNotImplementedForDoubleType); |
| 4450 } else { |
| 4451 __ Push(ToRegister(argument)); |
| 4452 } |
| 4453 } |
| 4454 |
| 4455 |
| 4456 void LCodeGen::DoReturn(LReturn* instr) { |
| 4457 if (FLAG_trace && info()->IsOptimizing()) { |
| 4458 // Push the return value on the stack as the parameter. |
| 4459 // Runtime::TraceExit returns its parameter in x0. We're leaving the code |
| 4460 // managed by the register allocator and tearing down the frame, it's |
| 4461 // safe to write to the context register. |
| 4462 __ Push(x0); |
| 4463 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 4464 __ CallRuntime(Runtime::kTraceExit, 1); |
| 4465 } |
| 4466 |
| 4467 if (info()->saves_caller_doubles()) { |
| 4468 RestoreCallerDoubles(); |
| 4469 } |
| 4470 |
| 4471 int no_frame_start = -1; |
| 4472 if (NeedsEagerFrame()) { |
| 4473 Register stack_pointer = masm()->StackPointer(); |
| 4474 __ Mov(stack_pointer, fp); |
| 4475 no_frame_start = masm_->pc_offset(); |
| 4476 __ Pop(fp, lr); |
| 4477 } |
| 4478 |
| 4479 if (instr->has_constant_parameter_count()) { |
| 4480 int parameter_count = ToInteger32(instr->constant_parameter_count()); |
| 4481 __ Drop(parameter_count + 1); |
| 4482 } else { |
| 4483 Register parameter_count = ToRegister(instr->parameter_count()); |
| 4484 __ DropBySMI(parameter_count); |
| 4485 } |
| 4486 __ Ret(); |
| 4487 |
| 4488 if (no_frame_start != -1) { |
| 4489 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
| 4490 } |
| 4491 } |
| 4492 |
| 4493 |
| 4494 MemOperand LCodeGen::BuildSeqStringOperand(Register string, |
| 4495 Register temp, |
| 4496 LOperand* index, |
| 4497 String::Encoding encoding) { |
| 4498 if (index->IsConstantOperand()) { |
| 4499 int offset = ToInteger32(LConstantOperand::cast(index)); |
| 4500 if (encoding == String::TWO_BYTE_ENCODING) { |
| 4501 offset *= kUC16Size; |
| 4502 } |
| 4503 STATIC_ASSERT(kCharSize == 1); |
| 4504 return FieldMemOperand(string, SeqString::kHeaderSize + offset); |
| 4505 } |
| 4506 ASSERT(!temp.is(string)); |
| 4507 ASSERT(!temp.is(ToRegister(index))); |
| 4508 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4509 __ Add(temp, string, Operand(ToRegister(index))); |
| 4510 } else { |
| 4511 STATIC_ASSERT(kUC16Size == 2); |
| 4512 __ Add(temp, string, Operand(ToRegister(index), LSL, 1)); |
| 4513 } |
| 4514 return FieldMemOperand(temp, SeqString::kHeaderSize); |
| 4515 } |
| 4516 |
| 4517 |
| 4518 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { |
| 4519 String::Encoding encoding = instr->hydrogen()->encoding(); |
| 4520 Register string = ToRegister(instr->string()); |
| 4521 Register result = ToRegister(instr->result()); |
| 4522 Register temp = ToRegister(instr->temp()); |
| 4523 |
| 4524 if (FLAG_debug_code) { |
| 4525 __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 4526 __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset)); |
| 4527 |
| 4528 __ And(temp, temp, |
| 4529 Operand(kStringRepresentationMask | kStringEncodingMask)); |
| 4530 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| 4531 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| 4532 __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING |
| 4533 ? one_byte_seq_type : two_byte_seq_type)); |
| 4534 __ Check(eq, kUnexpectedStringType); |
| 4535 } |
| 4536 |
| 4537 MemOperand operand = |
| 4538 BuildSeqStringOperand(string, temp, instr->index(), encoding); |
| 4539 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4540 __ Ldrb(result, operand); |
| 4541 } else { |
| 4542 __ Ldrh(result, operand); |
| 4543 } |
| 4544 } |
| 4545 |
| 4546 |
| 4547 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { |
| 4548 String::Encoding encoding = instr->hydrogen()->encoding(); |
| 4549 Register string = ToRegister(instr->string()); |
| 4550 Register value = ToRegister(instr->value()); |
| 4551 Register temp = ToRegister(instr->temp()); |
| 4552 |
| 4553 if (FLAG_debug_code) { |
| 4554 ASSERT(ToRegister(instr->context()).is(cp)); |
| 4555 Register index = ToRegister(instr->index()); |
| 4556 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| 4557 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| 4558 int encoding_mask = |
| 4559 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING |
| 4560 ? one_byte_seq_type : two_byte_seq_type; |
| 4561 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp, |
| 4562 encoding_mask); |
| 4563 } |
| 4564 MemOperand operand = |
| 4565 BuildSeqStringOperand(string, temp, instr->index(), encoding); |
| 4566 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4567 __ Strb(value, operand); |
| 4568 } else { |
| 4569 __ Strh(value, operand); |
| 4570 } |
| 4571 } |
| 4572 |
| 4573 |
| 4574 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4575 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
| 4576 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value())); |
| 4577 } |
| 4578 |
| 4579 |
| 4580 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4581 Register input = ToRegister(instr->value()); |
| 4582 Register result = ToRegister(instr->result()); |
| 4583 Label done, untag; |
| 4584 |
| 4585 if (instr->needs_check()) { |
| 4586 DeoptimizeIfNotSmi(input, instr->environment()); |
| 4587 } |
| 4588 |
| 4589 __ Bind(&untag); |
| 4590 __ SmiUntag(result, input); |
| 4591 __ Bind(&done); |
| 4592 } |
| 4593 |
| 4594 |
| 4595 void LCodeGen::DoShiftI(LShiftI* instr) { |
| 4596 LOperand* right_op = instr->right(); |
| 4597 Register left = ToRegister32(instr->left()); |
| 4598 Register result = ToRegister32(instr->result()); |
| 4599 |
| 4600 if (right_op->IsRegister()) { |
| 4601 Register right = ToRegister32(instr->right()); |
| 4602 switch (instr->op()) { |
| 4603 case Token::ROR: __ Ror(result, left, right); break; |
| 4604 case Token::SAR: __ Asr(result, left, right); break; |
| 4605 case Token::SHL: __ Lsl(result, left, right); break; |
| 4606 case Token::SHR: |
| 4607 if (instr->can_deopt()) { |
| 4608 Label right_not_zero; |
| 4609 __ Cbnz(right, &right_not_zero); |
| 4610 DeoptimizeIfNegative(left, instr->environment()); |
| 4611 __ Bind(&right_not_zero); |
| 4612 } |
| 4613 __ Lsr(result, left, right); |
| 4614 break; |
| 4615 default: UNREACHABLE(); |
| 4616 } |
| 4617 } else { |
| 4618 ASSERT(right_op->IsConstantOperand()); |
| 4619 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; |
| 4620 if (shift_count == 0) { |
| 4621 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
| 4622 DeoptimizeIfNegative(left, instr->environment()); |
| 4623 } |
| 4624 __ Mov(result, left, kDiscardForSameWReg); |
| 4625 } else { |
| 4626 switch (instr->op()) { |
| 4627 case Token::ROR: __ Ror(result, left, shift_count); break; |
| 4628 case Token::SAR: __ Asr(result, left, shift_count); break; |
| 4629 case Token::SHL: __ Lsl(result, left, shift_count); break; |
| 4630 case Token::SHR: __ Lsr(result, left, shift_count); break; |
| 4631 default: UNREACHABLE(); |
| 4632 } |
| 4633 } |
| 4634 } |
| 4635 } |
| 4636 |
| 4637 |
| 4638 void LCodeGen::DoShiftS(LShiftS* instr) { |
| 4639 LOperand* right_op = instr->right(); |
| 4640 Register left = ToRegister(instr->left()); |
| 4641 Register result = ToRegister(instr->result()); |
| 4642 |
| 4643 // Only ROR by register needs a temp. |
| 4644 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) || |
| 4645 (instr->temp() == NULL)); |
| 4646 |
| 4647 if (right_op->IsRegister()) { |
| 4648 Register right = ToRegister(instr->right()); |
| 4649 switch (instr->op()) { |
| 4650 case Token::ROR: { |
| 4651 Register temp = ToRegister(instr->temp()); |
| 4652 __ Ubfx(temp, right, kSmiShift, 5); |
| 4653 __ SmiUntag(result, left); |
| 4654 __ Ror(result.W(), result.W(), temp.W()); |
| 4655 __ SmiTag(result); |
| 4656 break; |
| 4657 } |
| 4658 case Token::SAR: |
| 4659 __ Ubfx(result, right, kSmiShift, 5); |
| 4660 __ Asr(result, left, result); |
| 4661 __ Bic(result, result, kSmiShiftMask); |
| 4662 break; |
| 4663 case Token::SHL: |
| 4664 __ Ubfx(result, right, kSmiShift, 5); |
| 4665 __ Lsl(result, left, result); |
| 4666 break; |
| 4667 case Token::SHR: |
| 4668 if (instr->can_deopt()) { |
| 4669 Label right_not_zero; |
| 4670 __ Cbnz(right, &right_not_zero); |
| 4671 DeoptimizeIfNegative(left, instr->environment()); |
| 4672 __ Bind(&right_not_zero); |
| 4673 } |
| 4674 __ Ubfx(result, right, kSmiShift, 5); |
| 4675 __ Lsr(result, left, result); |
| 4676 __ Bic(result, result, kSmiShiftMask); |
| 4677 break; |
| 4678 default: UNREACHABLE(); |
| 4679 } |
| 4680 } else { |
| 4681 ASSERT(right_op->IsConstantOperand()); |
| 4682 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; |
| 4683 if (shift_count == 0) { |
| 4684 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
| 4685 DeoptimizeIfNegative(left, instr->environment()); |
| 4686 } |
| 4687 __ Mov(result, left); |
| 4688 } else { |
| 4689 switch (instr->op()) { |
| 4690 case Token::ROR: |
| 4691 __ SmiUntag(result, left); |
| 4692 __ Ror(result.W(), result.W(), shift_count); |
| 4693 __ SmiTag(result); |
| 4694 break; |
| 4695 case Token::SAR: |
| 4696 __ Asr(result, left, shift_count); |
| 4697 __ Bic(result, result, kSmiShiftMask); |
| 4698 break; |
| 4699 case Token::SHL: |
| 4700 __ Lsl(result, left, shift_count); |
| 4701 break; |
| 4702 case Token::SHR: |
| 4703 __ Lsr(result, left, shift_count); |
| 4704 __ Bic(result, result, kSmiShiftMask); |
| 4705 break; |
| 4706 default: UNREACHABLE(); |
| 4707 } |
| 4708 } |
| 4709 } |
| 4710 } |
| 4711 |
| 4712 |
| 4713 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
| 4714 __ Debug("LDebugBreak", 0, BREAK); |
| 4715 } |
| 4716 |
| 4717 |
| 4718 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { |
| 4719 ASSERT(ToRegister(instr->context()).is(cp)); |
| 4720 Register scratch1 = x5; |
| 4721 Register scratch2 = x6; |
| 4722 ASSERT(instr->IsMarkedAsCall()); |
| 4723 |
| 4724 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals"); |
| 4725 // TODO(all): if Mov could handle object in new space then it could be used |
| 4726 // here. |
| 4727 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); |
| 4728 __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags()))); |
| 4729 __ Push(cp, scratch1, scratch2); // The context is the first argument. |
| 4730 CallRuntime(Runtime::kDeclareGlobals, 3, instr); |
| 4731 } |
| 4732 |
| 4733 |
| 4734 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
| 4735 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 4736 LoadContextFromDeferred(instr->context()); |
| 4737 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
| 4738 RecordSafepointWithLazyDeopt( |
| 4739 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 4740 ASSERT(instr->HasEnvironment()); |
| 4741 LEnvironment* env = instr->environment(); |
| 4742 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 4743 } |
| 4744 |
| 4745 |
| 4746 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
| 4747 class DeferredStackCheck: public LDeferredCode { |
| 4748 public: |
| 4749 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
| 4750 : LDeferredCode(codegen), instr_(instr) { } |
| 4751 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } |
| 4752 virtual LInstruction* instr() { return instr_; } |
| 4753 private: |
| 4754 LStackCheck* instr_; |
| 4755 }; |
| 4756 |
| 4757 ASSERT(instr->HasEnvironment()); |
| 4758 LEnvironment* env = instr->environment(); |
| 4759 // There is no LLazyBailout instruction for stack-checks. We have to |
| 4760 // prepare for lazy deoptimization explicitly here. |
| 4761 if (instr->hydrogen()->is_function_entry()) { |
| 4762 // Perform stack overflow check. |
| 4763 Label done; |
| 4764 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); |
| 4765 __ B(hs, &done); |
| 4766 |
| 4767 PredictableCodeSizeScope predictable(masm_, |
| 4768 Assembler::kCallSizeWithRelocation); |
| 4769 ASSERT(instr->context()->IsRegister()); |
| 4770 ASSERT(ToRegister(instr->context()).is(cp)); |
| 4771 CallCode(isolate()->builtins()->StackCheck(), |
| 4772 RelocInfo::CODE_TARGET, |
| 4773 instr); |
| 4774 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| 4775 |
| 4776 __ Bind(&done); |
| 4777 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 4778 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 4779 } else { |
| 4780 ASSERT(instr->hydrogen()->is_backwards_branch()); |
| 4781 // Perform stack overflow check if this goto needs it before jumping. |
| 4782 DeferredStackCheck* deferred_stack_check = |
| 4783 new(zone()) DeferredStackCheck(this, instr); |
| 4784 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); |
| 4785 __ B(lo, deferred_stack_check->entry()); |
| 4786 |
| 4787 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| 4788 __ Bind(instr->done_label()); |
| 4789 deferred_stack_check->SetExit(instr->done_label()); |
| 4790 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 4791 // Don't record a deoptimization index for the safepoint here. |
| 4792 // This will be done explicitly when emitting call and the safepoint in |
| 4793 // the deferred code. |
| 4794 } |
| 4795 } |
| 4796 |
| 4797 |
| 4798 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { |
| 4799 Register function = ToRegister(instr->function()); |
| 4800 Register code_object = ToRegister(instr->code_object()); |
| 4801 Register temp = ToRegister(instr->temp()); |
| 4802 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag); |
| 4803 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
| 4804 } |
| 4805 |
| 4806 |
| 4807 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 4808 Register context = ToRegister(instr->context()); |
| 4809 Register value = ToRegister(instr->value()); |
| 4810 Register scratch = ToRegister(instr->temp()); |
| 4811 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
| 4812 |
| 4813 Label skip_assignment; |
| 4814 |
| 4815 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 4816 __ Ldr(scratch, target); |
| 4817 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 4818 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, |
| 4819 instr->environment()); |
| 4820 } else { |
| 4821 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); |
| 4822 } |
| 4823 } |
| 4824 |
| 4825 __ Str(value, target); |
| 4826 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 4827 SmiCheck check_needed = |
| 4828 instr->hydrogen()->value()->IsHeapObject() |
| 4829 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 4830 __ RecordWriteContextSlot(context, |
| 4831 target.offset(), |
| 4832 value, |
| 4833 scratch, |
| 4834 GetLinkRegisterState(), |
| 4835 kSaveFPRegs, |
| 4836 EMIT_REMEMBERED_SET, |
| 4837 check_needed); |
| 4838 } |
| 4839 __ Bind(&skip_assignment); |
| 4840 } |
| 4841 |
| 4842 |
| 4843 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
| 4844 Register value = ToRegister(instr->value()); |
| 4845 Register cell = ToRegister(instr->temp1()); |
| 4846 |
| 4847 // Load the cell. |
| 4848 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); |
| 4849 |
| 4850 // If the cell we are storing to contains the hole it could have |
| 4851 // been deleted from the property dictionary. In that case, we need |
| 4852 // to update the property details in the property dictionary to mark |
| 4853 // it as no longer deleted. We deoptimize in that case. |
| 4854 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 4855 Register payload = ToRegister(instr->temp2()); |
| 4856 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 4857 DeoptimizeIfRoot( |
| 4858 payload, Heap::kTheHoleValueRootIndex, instr->environment()); |
| 4859 } |
| 4860 |
| 4861 // Store the value. |
| 4862 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 4863 // Cells are always rescanned, so no write barrier here. |
| 4864 } |
| 4865 |
| 4866 |
| 4867 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { |
| 4868 Register ext_ptr = ToRegister(instr->elements()); |
| 4869 Register key = no_reg; |
| 4870 Register scratch; |
| 4871 ElementsKind elements_kind = instr->elements_kind(); |
| 4872 |
| 4873 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
| 4874 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4875 int constant_key = 0; |
| 4876 if (key_is_constant) { |
| 4877 ASSERT(instr->temp() == NULL); |
| 4878 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 4879 if (constant_key & 0xf0000000) { |
| 4880 Abort(kArrayIndexConstantValueTooBig); |
| 4881 } |
| 4882 } else { |
| 4883 key = ToRegister(instr->key()); |
| 4884 scratch = ToRegister(instr->temp()); |
| 4885 } |
| 4886 |
| 4887 MemOperand dst = |
| 4888 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, |
| 4889 key_is_constant, constant_key, |
| 4890 elements_kind, |
| 4891 instr->additional_index()); |
| 4892 |
| 4893 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || |
| 4894 (elements_kind == FLOAT32_ELEMENTS)) { |
| 4895 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 4896 DoubleRegister dbl_scratch = double_scratch(); |
| 4897 __ Fcvt(dbl_scratch.S(), value); |
| 4898 __ Str(dbl_scratch.S(), dst); |
| 4899 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) || |
| 4900 (elements_kind == FLOAT64_ELEMENTS)) { |
| 4901 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 4902 __ Str(value, dst); |
| 4903 } else { |
| 4904 Register value = ToRegister(instr->value()); |
| 4905 |
| 4906 switch (elements_kind) { |
| 4907 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
| 4908 case EXTERNAL_INT8_ELEMENTS: |
| 4909 case EXTERNAL_UINT8_ELEMENTS: |
| 4910 case UINT8_ELEMENTS: |
| 4911 case UINT8_CLAMPED_ELEMENTS: |
| 4912 case INT8_ELEMENTS: |
| 4913 __ Strb(value, dst); |
| 4914 break; |
| 4915 case EXTERNAL_INT16_ELEMENTS: |
| 4916 case EXTERNAL_UINT16_ELEMENTS: |
| 4917 case INT16_ELEMENTS: |
| 4918 case UINT16_ELEMENTS: |
| 4919 __ Strh(value, dst); |
| 4920 break; |
| 4921 case EXTERNAL_INT32_ELEMENTS: |
| 4922 case EXTERNAL_UINT32_ELEMENTS: |
| 4923 case INT32_ELEMENTS: |
| 4924 case UINT32_ELEMENTS: |
| 4925 __ Str(value.W(), dst); |
| 4926 break; |
| 4927 case FLOAT32_ELEMENTS: |
| 4928 case FLOAT64_ELEMENTS: |
| 4929 case EXTERNAL_FLOAT32_ELEMENTS: |
| 4930 case EXTERNAL_FLOAT64_ELEMENTS: |
| 4931 case FAST_DOUBLE_ELEMENTS: |
| 4932 case FAST_ELEMENTS: |
| 4933 case FAST_SMI_ELEMENTS: |
| 4934 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 4935 case FAST_HOLEY_ELEMENTS: |
| 4936 case FAST_HOLEY_SMI_ELEMENTS: |
| 4937 case DICTIONARY_ELEMENTS: |
| 4938 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 4939 UNREACHABLE(); |
| 4940 break; |
| 4941 } |
| 4942 } |
| 4943 } |
| 4944 |
| 4945 |
| 4946 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { |
| 4947 Register elements = ToRegister(instr->elements()); |
| 4948 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 4949 Register store_base = ToRegister(instr->temp()); |
| 4950 int offset = 0; |
| 4951 |
| 4952 if (instr->key()->IsConstantOperand()) { |
| 4953 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 4954 if (constant_key & 0xf0000000) { |
| 4955 Abort(kArrayIndexConstantValueTooBig); |
| 4956 } |
| 4957 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + |
| 4958 instr->additional_index()); |
| 4959 store_base = elements; |
| 4960 } else { |
| 4961 Register key = ToRegister(instr->key()); |
| 4962 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 4963 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, |
| 4964 instr->hydrogen()->elements_kind()); |
| 4965 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); |
| 4966 } |
| 4967 |
| 4968 if (instr->NeedsCanonicalization()) { |
| 4969 DoubleRegister dbl_scratch = double_scratch(); |
| 4970 __ Fmov(dbl_scratch, |
| 4971 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
| 4972 __ Fmaxnm(dbl_scratch, dbl_scratch, value); |
| 4973 __ Str(dbl_scratch, FieldMemOperand(store_base, offset)); |
| 4974 } else { |
| 4975 __ Str(value, FieldMemOperand(store_base, offset)); |
| 4976 } |
| 4977 } |
| 4978 |
| 4979 |
| 4980 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { |
| 4981 Register value = ToRegister(instr->value()); |
| 4982 Register elements = ToRegister(instr->elements()); |
| 4983 Register store_base = ToRegister(instr->temp()); |
| 4984 Register key = no_reg; |
| 4985 int offset = 0; |
| 4986 |
| 4987 if (instr->key()->IsConstantOperand()) { |
| 4988 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
| 4989 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
| 4990 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + |
| 4991 instr->additional_index()); |
| 4992 store_base = elements; |
| 4993 } else { |
| 4994 key = ToRegister(instr->key()); |
| 4995 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 4996 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, |
| 4997 instr->hydrogen()->elements_kind()); |
| 4998 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); |
| 4999 } |
| 5000 Representation representation = instr->hydrogen()->value()->representation(); |
| 5001 if (representation.IsInteger32()) { |
| 5002 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| 5003 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); |
| 5004 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 5005 __ Store(value, UntagSmiFieldMemOperand(store_base, offset), |
| 5006 Representation::Integer32()); |
| 5007 } else { |
| 5008 __ Store(value, FieldMemOperand(store_base, offset), representation); |
| 5009 } |
| 5010 |
| 5011 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 5012 SmiCheck check_needed = |
| 5013 instr->hydrogen()->value()->IsHeapObject() |
| 5014 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 5015 // Compute address of modified element and store it into key register. |
| 5016 __ Add(key, store_base, offset - kHeapObjectTag); |
| 5017 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, |
| 5018 EMIT_REMEMBERED_SET, check_needed); |
| 5019 } |
| 5020 } |
| 5021 |
| 5022 |
| 5023 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| 5024 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5025 ASSERT(ToRegister(instr->object()).Is(x2)); |
| 5026 ASSERT(ToRegister(instr->key()).Is(x1)); |
| 5027 ASSERT(ToRegister(instr->value()).Is(x0)); |
| 5028 |
| 5029 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
| 5030 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
| 5031 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
| 5032 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 5033 } |
| 5034 |
| 5035 |
| 5036 // TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try |
| 5037 // to tidy up this function. |
| 5038 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
| 5039 Representation representation = instr->representation(); |
| 5040 |
| 5041 Register object = ToRegister(instr->object()); |
| 5042 Register temp0 = ToRegister(instr->temp0()); |
| 5043 Register temp1 = ToRegister(instr->temp1()); |
| 5044 HObjectAccess access = instr->hydrogen()->access(); |
| 5045 int offset = access.offset(); |
| 5046 |
| 5047 if (access.IsExternalMemory()) { |
| 5048 Register value = ToRegister(instr->value()); |
| 5049 __ Store(value, MemOperand(object, offset), representation); |
| 5050 return; |
| 5051 } |
| 5052 |
| 5053 Handle<Map> transition = instr->transition(); |
| 5054 |
| 5055 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
| 5056 Register value = ToRegister(instr->value()); |
| 5057 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5058 DeoptimizeIfSmi(value, instr->environment()); |
| 5059 } |
| 5060 } else if (representation.IsDouble()) { |
| 5061 ASSERT(transition.is_null()); |
| 5062 ASSERT(access.IsInobject()); |
| 5063 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
| 5064 FPRegister value = ToDoubleRegister(instr->value()); |
| 5065 __ Str(value, FieldMemOperand(object, offset)); |
| 5066 return; |
| 5067 } |
| 5068 |
| 5069 if (!transition.is_null()) { |
| 5070 // Store the new map value. |
| 5071 Register new_map_value = temp0; |
| 5072 __ Mov(new_map_value, Operand(transition)); |
| 5073 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5074 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { |
| 5075 // Update the write barrier for the map field. |
| 5076 __ RecordWriteField(object, |
| 5077 HeapObject::kMapOffset, |
| 5078 new_map_value, |
| 5079 temp1, |
| 5080 GetLinkRegisterState(), |
| 5081 kSaveFPRegs, |
| 5082 OMIT_REMEMBERED_SET, |
| 5083 OMIT_SMI_CHECK); |
| 5084 } |
| 5085 } |
| 5086 |
| 5087 // Do the store. |
| 5088 Register value = ToRegister(instr->value()); |
| 5089 Register destination; |
| 5090 SmiCheck check_needed = |
| 5091 instr->hydrogen()->value()->IsHeapObject() |
| 5092 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 5093 if (access.IsInobject()) { |
| 5094 destination = object; |
| 5095 } else { |
| 5096 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 5097 destination = temp0; |
| 5098 } |
| 5099 |
| 5100 if (representation.IsSmi() && |
| 5101 instr->hydrogen()->value()->representation().IsInteger32()) { |
| 5102 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| 5103 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 5104 __ Store(value, UntagSmiFieldMemOperand(destination, offset), |
| 5105 Representation::Integer32()); |
| 5106 } else { |
| 5107 __ Store(value, FieldMemOperand(destination, offset), representation); |
| 5108 } |
| 5109 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 5110 __ RecordWriteField(destination, |
| 5111 offset, |
| 5112 value, // Clobbered. |
| 5113 temp1, // Clobbered. |
| 5114 GetLinkRegisterState(), |
| 5115 kSaveFPRegs, |
| 5116 EMIT_REMEMBERED_SET, |
| 5117 check_needed); |
| 5118 } |
| 5119 } |
| 5120 |
| 5121 |
| 5122 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| 5123 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5124 ASSERT(ToRegister(instr->value()).is(x0)); |
| 5125 ASSERT(ToRegister(instr->object()).is(x1)); |
| 5126 |
| 5127 // Name must be in x2. |
| 5128 __ Mov(x2, Operand(instr->name())); |
| 5129 Handle<Code> ic = StoreIC::initialize_stub(isolate(), |
| 5130 instr->strict_mode_flag()); |
| 5131 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 5132 } |
| 5133 |
| 5134 |
| 5135 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 5136 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5137 ASSERT(ToRegister(instr->left()).Is(x1)); |
| 5138 ASSERT(ToRegister(instr->right()).Is(x0)); |
| 5139 StringAddStub stub(instr->hydrogen()->flags(), |
| 5140 instr->hydrogen()->pretenure_flag()); |
| 5141 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 5142 } |
| 5143 |
| 5144 |
| 5145 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
| 5146 class DeferredStringCharCodeAt: public LDeferredCode { |
| 5147 public: |
| 5148 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
| 5149 : LDeferredCode(codegen), instr_(instr) { } |
| 5150 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } |
| 5151 virtual LInstruction* instr() { return instr_; } |
| 5152 private: |
| 5153 LStringCharCodeAt* instr_; |
| 5154 }; |
| 5155 |
| 5156 DeferredStringCharCodeAt* deferred = |
| 5157 new(zone()) DeferredStringCharCodeAt(this, instr); |
| 5158 |
| 5159 StringCharLoadGenerator::Generate(masm(), |
| 5160 ToRegister(instr->string()), |
| 5161 ToRegister(instr->index()), |
| 5162 ToRegister(instr->result()), |
| 5163 deferred->entry()); |
| 5164 __ Bind(deferred->exit()); |
| 5165 } |
| 5166 |
| 5167 |
| 5168 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
| 5169 Register string = ToRegister(instr->string()); |
| 5170 Register result = ToRegister(instr->result()); |
| 5171 |
| 5172 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5173 // result register contain a valid pointer because it is already |
| 5174 // contained in the register pointer map. |
| 5175 __ Mov(result, 0); |
| 5176 |
| 5177 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 5178 __ Push(string); |
| 5179 // Push the index as a smi. This is safe because of the checks in |
| 5180 // DoStringCharCodeAt above. |
| 5181 Register index = ToRegister(instr->index()); |
| 5182 __ SmiTag(index); |
| 5183 __ Push(index); |
| 5184 |
| 5185 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr, |
| 5186 instr->context()); |
| 5187 __ AssertSmi(x0); |
| 5188 __ SmiUntag(x0); |
| 5189 __ StoreToSafepointRegisterSlot(x0, result); |
| 5190 } |
| 5191 |
| 5192 |
| 5193 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
| 5194 class DeferredStringCharFromCode: public LDeferredCode { |
| 5195 public: |
| 5196 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
| 5197 : LDeferredCode(codegen), instr_(instr) { } |
| 5198 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } |
| 5199 virtual LInstruction* instr() { return instr_; } |
| 5200 private: |
| 5201 LStringCharFromCode* instr_; |
| 5202 }; |
| 5203 |
| 5204 DeferredStringCharFromCode* deferred = |
| 5205 new(zone()) DeferredStringCharFromCode(this, instr); |
| 5206 |
| 5207 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
| 5208 Register char_code = ToRegister(instr->char_code()); |
| 5209 Register result = ToRegister(instr->result()); |
| 5210 |
| 5211 __ Cmp(char_code, Operand(String::kMaxOneByteCharCode)); |
| 5212 __ B(hi, deferred->entry()); |
| 5213 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
| 5214 __ Add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); |
| 5215 __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
| 5216 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); |
| 5217 __ B(eq, deferred->entry()); |
| 5218 __ Bind(deferred->exit()); |
| 5219 } |
| 5220 |
| 5221 |
| 5222 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { |
| 5223 Register char_code = ToRegister(instr->char_code()); |
| 5224 Register result = ToRegister(instr->result()); |
| 5225 |
| 5226 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5227 // result register contain a valid pointer because it is already |
| 5228 // contained in the register pointer map. |
| 5229 __ Mov(result, 0); |
| 5230 |
| 5231 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 5232 __ SmiTag(char_code); |
| 5233 __ Push(char_code); |
| 5234 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); |
| 5235 __ StoreToSafepointRegisterSlot(x0, result); |
| 5236 } |
| 5237 |
| 5238 |
| 5239 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
| 5240 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5241 Token::Value op = instr->op(); |
| 5242 |
| 5243 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
| 5244 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 5245 InlineSmiCheckInfo::EmitNotInlined(masm()); |
| 5246 |
| 5247 Condition condition = TokenToCondition(op, false); |
| 5248 |
| 5249 EmitCompareAndBranch(instr, condition, x0, 0); |
| 5250 } |
| 5251 |
| 5252 |
| 5253 void LCodeGen::DoSubI(LSubI* instr) { |
| 5254 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 5255 Register result = ToRegister32(instr->result()); |
| 5256 Register left = ToRegister32(instr->left()); |
| 5257 Operand right = ToOperand32I(instr->right()); |
| 5258 if (can_overflow) { |
| 5259 __ Subs(result, left, right); |
| 5260 DeoptimizeIf(vs, instr->environment()); |
| 5261 } else { |
| 5262 __ Sub(result, left, right); |
| 5263 } |
| 5264 } |
| 5265 |
| 5266 |
| 5267 void LCodeGen::DoSubS(LSubS* instr) { |
| 5268 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 5269 Register result = ToRegister(instr->result()); |
| 5270 Register left = ToRegister(instr->left()); |
| 5271 Operand right = ToOperand(instr->right()); |
| 5272 if (can_overflow) { |
| 5273 __ Subs(result, left, right); |
| 5274 DeoptimizeIf(vs, instr->environment()); |
| 5275 } else { |
| 5276 __ Sub(result, left, right); |
| 5277 } |
| 5278 } |
| 5279 |
| 5280 |
| 5281 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, |
| 5282 LOperand* value, |
| 5283 LOperand* temp1, |
| 5284 LOperand* temp2) { |
| 5285 Register input = ToRegister(value); |
| 5286 Register scratch1 = ToRegister(temp1); |
| 5287 DoubleRegister dbl_scratch1 = double_scratch(); |
| 5288 |
| 5289 Label done; |
| 5290 |
| 5291 // Load heap object map. |
| 5292 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 5293 |
| 5294 if (instr->truncating()) { |
| 5295 Register output = ToRegister(instr->result()); |
| 5296 Register scratch2 = ToRegister(temp2); |
| 5297 Label check_bools; |
| 5298 |
| 5299 // If it's not a heap number, jump to undefined check. |
| 5300 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools); |
| 5301 |
| 5302 // A heap number: load value and convert to int32 using truncating function. |
| 5303 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 5304 __ ECMA262ToInt32(output, dbl_scratch1, scratch1, scratch2); |
| 5305 __ B(&done); |
| 5306 |
| 5307 __ Bind(&check_bools); |
| 5308 |
| 5309 Register true_root = output; |
| 5310 Register false_root = scratch2; |
| 5311 __ LoadTrueFalseRoots(true_root, false_root); |
| 5312 __ Cmp(scratch1, true_root); |
| 5313 __ Cset(output, eq); |
| 5314 __ Ccmp(scratch1, false_root, ZFlag, ne); |
| 5315 __ B(eq, &done); |
| 5316 |
| 5317 // Output contains zero, undefined is converted to zero for truncating |
| 5318 // conversions. |
| 5319 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, |
| 5320 instr->environment()); |
| 5321 } else { |
| 5322 Register output = ToRegister32(instr->result()); |
| 5323 |
| 5324 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
| 5325 Label converted; |
| 5326 |
| 5327 // Deoptimized if it's not a heap number. |
| 5328 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, |
| 5329 instr->environment()); |
| 5330 |
| 5331 // A heap number: load value and convert to int32 using non-truncating |
| 5332 // function. If the result is out of range, branch to deoptimize. |
| 5333 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 5334 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted); |
| 5335 Deoptimize(instr->environment()); |
| 5336 |
| 5337 __ Bind(&converted); |
| 5338 |
| 5339 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5340 __ Cmp(output, 0); |
| 5341 __ B(ne, &done); |
| 5342 __ Fmov(scratch1, dbl_scratch1); |
| 5343 DeoptimizeIfNegative(scratch1, instr->environment()); |
| 5344 } |
| 5345 } |
| 5346 __ Bind(&done); |
| 5347 } |
| 5348 |
| 5349 |
| 5350 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5351 class DeferredTaggedToI: public LDeferredCode { |
| 5352 public: |
| 5353 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 5354 : LDeferredCode(codegen), instr_(instr) { } |
| 5355 virtual void Generate() { |
| 5356 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(), |
| 5357 instr_->temp2()); |
| 5358 } |
| 5359 |
| 5360 virtual LInstruction* instr() { return instr_; } |
| 5361 private: |
| 5362 LTaggedToI* instr_; |
| 5363 }; |
| 5364 |
| 5365 Register input = ToRegister(instr->value()); |
| 5366 Register output = ToRegister(instr->result()); |
| 5367 |
| 5368 if (instr->hydrogen()->value()->representation().IsSmi()) { |
| 5369 __ SmiUntag(input); |
| 5370 } else { |
| 5371 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
| 5372 |
| 5373 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses |
| 5374 // doesn't always have enough range. Consider making a variant of it, or a |
| 5375 // TestIsSmi helper. |
| 5376 STATIC_ASSERT(kSmiTag == 0); |
| 5377 __ Tst(input, kSmiTagMask); |
| 5378 __ B(ne, deferred->entry()); |
| 5379 |
| 5380 __ SmiUntag(output, input); |
| 5381 __ Bind(deferred->exit()); |
| 5382 } |
| 5383 } |
| 5384 |
| 5385 |
| 5386 void LCodeGen::DoThisFunction(LThisFunction* instr) { |
| 5387 Register result = ToRegister(instr->result()); |
| 5388 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 5389 } |
| 5390 |
| 5391 |
| 5392 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
| 5393 ASSERT(ToRegister(instr->value()).Is(x0)); |
| 5394 ASSERT(ToRegister(instr->result()).Is(x0)); |
| 5395 __ Push(x0); |
| 5396 CallRuntime(Runtime::kToFastProperties, 1, instr); |
| 5397 } |
| 5398 |
| 5399 |
| 5400 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
| 5401 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5402 Label materialized; |
| 5403 // Registers will be used as follows: |
| 5404 // x7 = literals array. |
| 5405 // x1 = regexp literal. |
| 5406 // x0 = regexp literal clone. |
| 5407 // x10-x12 are used as temporaries. |
| 5408 int literal_offset = |
| 5409 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
| 5410 __ LoadObject(x7, instr->hydrogen()->literals()); |
| 5411 __ Ldr(x1, FieldMemOperand(x7, literal_offset)); |
| 5412 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized); |
| 5413 |
| 5414 // Create regexp literal using runtime function |
| 5415 // Result will be in x0. |
| 5416 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); |
| 5417 __ Mov(x11, Operand(instr->hydrogen()->pattern())); |
| 5418 __ Mov(x10, Operand(instr->hydrogen()->flags())); |
| 5419 __ Push(x7, x12, x11, x10); |
| 5420 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); |
| 5421 __ Mov(x1, x0); |
| 5422 |
| 5423 __ Bind(&materialized); |
| 5424 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
| 5425 Label allocated, runtime_allocate; |
| 5426 |
| 5427 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT); |
| 5428 __ B(&allocated); |
| 5429 |
| 5430 __ Bind(&runtime_allocate); |
| 5431 __ Mov(x0, Operand(Smi::FromInt(size))); |
| 5432 __ Push(x1, x0); |
| 5433 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); |
| 5434 __ Pop(x1); |
| 5435 |
| 5436 __ Bind(&allocated); |
| 5437 // Copy the content into the newly allocated memory. |
| 5438 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize); |
| 5439 } |
| 5440 |
| 5441 |
| 5442 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| 5443 Register object = ToRegister(instr->object()); |
| 5444 Register temp1 = ToRegister(instr->temp1()); |
| 5445 |
| 5446 Handle<Map> from_map = instr->original_map(); |
| 5447 Handle<Map> to_map = instr->transitioned_map(); |
| 5448 ElementsKind from_kind = instr->from_kind(); |
| 5449 ElementsKind to_kind = instr->to_kind(); |
| 5450 |
| 5451 Label not_applicable; |
| 5452 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK); |
| 5453 |
| 5454 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
| 5455 Register new_map = ToRegister(instr->temp2()); |
| 5456 __ Mov(new_map, Operand(to_map)); |
| 5457 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5458 // Write barrier. |
| 5459 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1, |
| 5460 GetLinkRegisterState(), kDontSaveFPRegs); |
| 5461 } else { |
| 5462 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5463 PushSafepointRegistersScope scope( |
| 5464 this, Safepoint::kWithRegistersAndDoubles); |
| 5465 __ Mov(x0, object); |
| 5466 __ Mov(x1, Operand(to_map)); |
| 5467 TransitionElementsKindStub stub(from_kind, to_kind); |
| 5468 __ CallStub(&stub); |
| 5469 RecordSafepointWithRegistersAndDoubles( |
| 5470 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 5471 } |
| 5472 __ Bind(¬_applicable); |
| 5473 } |
| 5474 |
| 5475 |
| 5476 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 5477 Register object = ToRegister(instr->object()); |
| 5478 Register temp1 = ToRegister(instr->temp1()); |
| 5479 Register temp2 = ToRegister(instr->temp2()); |
| 5480 |
| 5481 Label no_memento_found; |
| 5482 __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found); |
| 5483 Deoptimize(instr->environment()); |
| 5484 __ Bind(&no_memento_found); |
| 5485 } |
| 5486 |
| 5487 |
| 5488 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { |
| 5489 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 5490 Register result = ToRegister(instr->result()); |
| 5491 __ ECMA262ToInt32(result, input, |
| 5492 ToRegister(instr->temp1()), |
| 5493 ToRegister(instr->temp2()), |
| 5494 instr->tag_result() |
| 5495 ? MacroAssembler::SMI |
| 5496 : MacroAssembler::INT32_IN_W); |
| 5497 } |
| 5498 |
| 5499 |
| 5500 void LCodeGen::DoTypeof(LTypeof* instr) { |
| 5501 Register input = ToRegister(instr->value()); |
| 5502 __ Push(input); |
| 5503 CallRuntime(Runtime::kTypeof, 1, instr); |
| 5504 } |
| 5505 |
| 5506 |
| 5507 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
| 5508 Handle<String> type_name = instr->type_literal(); |
| 5509 Label* true_label = instr->TrueLabel(chunk_); |
| 5510 Label* false_label = instr->FalseLabel(chunk_); |
| 5511 Register value = ToRegister(instr->value()); |
| 5512 |
| 5513 if (type_name->Equals(heap()->number_string())) { |
| 5514 ASSERT(instr->temp1() != NULL); |
| 5515 Register map = ToRegister(instr->temp1()); |
| 5516 |
| 5517 __ JumpIfSmi(value, true_label); |
| 5518 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 5519 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| 5520 EmitBranch(instr, eq); |
| 5521 |
| 5522 } else if (type_name->Equals(heap()->string_string())) { |
| 5523 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
| 5524 Register map = ToRegister(instr->temp1()); |
| 5525 Register scratch = ToRegister(instr->temp2()); |
| 5526 |
| 5527 __ JumpIfSmi(value, false_label); |
| 5528 __ JumpIfObjectType( |
| 5529 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge); |
| 5530 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 5531 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); |
| 5532 |
| 5533 } else if (type_name->Equals(heap()->symbol_string())) { |
| 5534 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
| 5535 Register map = ToRegister(instr->temp1()); |
| 5536 Register scratch = ToRegister(instr->temp2()); |
| 5537 |
| 5538 __ JumpIfSmi(value, false_label); |
| 5539 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); |
| 5540 EmitBranch(instr, eq); |
| 5541 |
| 5542 } else if (type_name->Equals(heap()->boolean_string())) { |
| 5543 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); |
| 5544 __ CompareRoot(value, Heap::kFalseValueRootIndex); |
| 5545 EmitBranch(instr, eq); |
| 5546 |
| 5547 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { |
| 5548 __ CompareRoot(value, Heap::kNullValueRootIndex); |
| 5549 EmitBranch(instr, eq); |
| 5550 |
| 5551 } else if (type_name->Equals(heap()->undefined_string())) { |
| 5552 ASSERT(instr->temp1() != NULL); |
| 5553 Register scratch = ToRegister(instr->temp1()); |
| 5554 |
| 5555 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label); |
| 5556 __ JumpIfSmi(value, false_label); |
| 5557 // Check for undetectable objects and jump to the true branch in this case. |
| 5558 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 5559 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
| 5560 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); |
| 5561 |
| 5562 } else if (type_name->Equals(heap()->function_string())) { |
| 5563 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| 5564 ASSERT(instr->temp1() != NULL); |
| 5565 Register type = ToRegister(instr->temp1()); |
| 5566 |
| 5567 __ JumpIfSmi(value, false_label); |
| 5568 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label); |
| 5569 // HeapObject's type has been loaded into type register by JumpIfObjectType. |
| 5570 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE); |
| 5571 |
| 5572 } else if (type_name->Equals(heap()->object_string())) { |
| 5573 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
| 5574 Register map = ToRegister(instr->temp1()); |
| 5575 Register scratch = ToRegister(instr->temp2()); |
| 5576 |
| 5577 __ JumpIfSmi(value, false_label); |
| 5578 if (!FLAG_harmony_typeof) { |
| 5579 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); |
| 5580 } |
| 5581 __ JumpIfObjectType(value, map, scratch, |
| 5582 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt); |
| 5583 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
| 5584 __ B(gt, false_label); |
| 5585 // Check for undetectable objects => false. |
| 5586 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset)); |
| 5587 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); |
| 5588 |
| 5589 } else { |
| 5590 __ B(false_label); |
| 5591 } |
| 5592 } |
| 5593 |
| 5594 |
| 5595 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| 5596 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); |
| 5597 } |
| 5598 |
| 5599 |
| 5600 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { |
| 5601 Register value = ToRegister(instr->value()); |
| 5602 Register result = ToRegister(instr->result()); |
| 5603 |
| 5604 if (!instr->hydrogen()->value()->HasRange() || |
| 5605 !instr->hydrogen()->value()->range()->IsInSmiRange() || |
| 5606 instr->hydrogen()->value()->range()->upper() == kMaxInt) { |
| 5607 // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32] |
| 5608 // interval, so we treat kMaxInt as a sentinel for this entire interval. |
| 5609 DeoptimizeIfNegative(value.W(), instr->environment()); |
| 5610 } |
| 5611 __ SmiTag(result, value); |
| 5612 } |
| 5613 |
| 5614 |
| 5615 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5616 Register object = ToRegister(instr->value()); |
| 5617 Register map = ToRegister(instr->map()); |
| 5618 Register temp = ToRegister(instr->temp()); |
| 5619 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5620 __ Cmp(map, temp); |
| 5621 DeoptimizeIf(ne, instr->environment()); |
| 5622 } |
| 5623 |
| 5624 |
| 5625 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
| 5626 Register receiver = ToRegister(instr->receiver()); |
| 5627 Register function = ToRegister(instr->function()); |
| 5628 Register result = ToRegister(instr->result()); |
| 5629 |
| 5630 // If the receiver is null or undefined, we have to pass the global object as |
| 5631 // a receiver to normal functions. Values have to be passed unchanged to |
| 5632 // builtins and strict-mode functions. |
| 5633 Label global_object, done, deopt; |
| 5634 |
| 5635 if (!instr->hydrogen()->known_function()) { |
| 5636 __ Ldr(result, FieldMemOperand(function, |
| 5637 JSFunction::kSharedFunctionInfoOffset)); |
| 5638 |
| 5639 // CompilerHints is an int32 field. See objects.h. |
| 5640 __ Ldr(result.W(), |
| 5641 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset)); |
| 5642 |
| 5643 // Do not transform the receiver to object for strict mode functions. |
| 5644 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done); |
| 5645 |
| 5646 // Do not transform the receiver to object for builtins. |
| 5647 __ Tbnz(result, SharedFunctionInfo::kNative, &done); |
| 5648 } |
| 5649 |
| 5650 // Normal function. Replace undefined or null with global receiver. |
| 5651 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); |
| 5652 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); |
| 5653 |
| 5654 // Deoptimize if the receiver is not a JS object. |
| 5655 __ JumpIfSmi(receiver, &deopt); |
| 5656 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); |
| 5657 __ B(ge, &done); |
| 5658 // Otherwise, fall through to deopt. |
| 5659 |
| 5660 __ Bind(&deopt); |
| 5661 Deoptimize(instr->environment()); |
| 5662 |
| 5663 __ Bind(&global_object); |
| 5664 // We could load directly into the result register here, but the additional |
| 5665 // branches required are likely to be more time consuming than one additional |
| 5666 // move. |
| 5667 __ Ldr(receiver, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 5668 __ Ldr(receiver, ContextMemOperand(receiver, Context::GLOBAL_OBJECT_INDEX)); |
| 5669 __ Ldr(receiver, |
| 5670 FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset)); |
| 5671 |
| 5672 __ Bind(&done); |
| 5673 __ Mov(result, receiver); |
| 5674 } |
| 5675 |
| 5676 |
| 5677 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
| 5678 Register object = ToRegister(instr->object()); |
| 5679 Register index = ToRegister(instr->index()); |
| 5680 Register result = ToRegister(instr->result()); |
| 5681 |
| 5682 __ AssertSmi(index); |
| 5683 |
| 5684 Label out_of_object, done; |
| 5685 __ Cmp(index, Operand(Smi::FromInt(0))); |
| 5686 __ B(lt, &out_of_object); |
| 5687 |
| 5688 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); |
| 5689 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); |
| 5690 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize)); |
| 5691 |
| 5692 __ B(&done); |
| 5693 |
| 5694 __ Bind(&out_of_object); |
| 5695 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 5696 // Index is equal to negated out of object property index plus 1. |
| 5697 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); |
| 5698 __ Ldr(result, FieldMemOperand(result, |
| 5699 FixedArray::kHeaderSize - kPointerSize)); |
| 5700 __ Bind(&done); |
| 5701 } |
| 5702 |
| 5703 } } // namespace v8::internal |
OLD | NEW |