OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_IA32 |
8 | 8 |
9 #include "ia32/lithium-codegen-ia32.h" | 9 #include "ia32/lithium-codegen-ia32.h" |
10 #include "ic.h" | 10 #include "ic.h" |
11 #include "code-stubs.h" | 11 #include "code-stubs.h" |
12 #include "deoptimizer.h" | 12 #include "deoptimizer.h" |
13 #include "stub-cache.h" | 13 #include "stub-cache.h" |
14 #include "codegen.h" | 14 #include "codegen.h" |
15 #include "hydrogen-osr.h" | 15 #include "hydrogen-osr.h" |
16 | 16 |
17 namespace v8 { | 17 namespace v8 { |
18 namespace internal { | 18 namespace internal { |
19 | 19 |
20 | |
21 static SaveFPRegsMode GetSaveFPRegsMode(Isolate* isolate) { | |
22 // We don't need to save floating point regs when generating the snapshot | |
23 return CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ? kSaveFPRegs | |
24 : kDontSaveFPRegs; | |
25 } | |
26 | |
27 | |
28 // When invoking builtins, we need to record the safepoint in the middle of | 20 // When invoking builtins, we need to record the safepoint in the middle of |
29 // the invoke instruction sequence generated by the macro assembler. | 21 // the invoke instruction sequence generated by the macro assembler. |
30 class SafepointGenerator V8_FINAL : public CallWrapper { | 22 class SafepointGenerator V8_FINAL : public CallWrapper { |
31 public: | 23 public: |
32 SafepointGenerator(LCodeGen* codegen, | 24 SafepointGenerator(LCodeGen* codegen, |
33 LPointerMap* pointers, | 25 LPointerMap* pointers, |
34 Safepoint::DeoptMode mode) | 26 Safepoint::DeoptMode mode) |
35 : codegen_(codegen), | 27 : codegen_(codegen), |
36 pointers_(pointers), | 28 pointers_(pointers), |
37 deopt_mode_(mode) {} | 29 deopt_mode_(mode) {} |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 __ mov(Operand(esp, offset), eax); | 88 __ mov(Operand(esp, offset), eax); |
97 } | 89 } |
98 } | 90 } |
99 #endif | 91 #endif |
100 | 92 |
101 | 93 |
102 void LCodeGen::SaveCallerDoubles() { | 94 void LCodeGen::SaveCallerDoubles() { |
103 ASSERT(info()->saves_caller_doubles()); | 95 ASSERT(info()->saves_caller_doubles()); |
104 ASSERT(NeedsEagerFrame()); | 96 ASSERT(NeedsEagerFrame()); |
105 Comment(";;; Save clobbered callee double registers"); | 97 Comment(";;; Save clobbered callee double registers"); |
106 CpuFeatureScope scope(masm(), SSE2); | |
107 int count = 0; | 98 int count = 0; |
108 BitVector* doubles = chunk()->allocated_double_registers(); | 99 BitVector* doubles = chunk()->allocated_double_registers(); |
109 BitVector::Iterator save_iterator(doubles); | 100 BitVector::Iterator save_iterator(doubles); |
110 while (!save_iterator.Done()) { | 101 while (!save_iterator.Done()) { |
111 __ movsd(MemOperand(esp, count * kDoubleSize), | 102 __ movsd(MemOperand(esp, count * kDoubleSize), |
112 XMMRegister::FromAllocationIndex(save_iterator.Current())); | 103 XMMRegister::FromAllocationIndex(save_iterator.Current())); |
113 save_iterator.Advance(); | 104 save_iterator.Advance(); |
114 count++; | 105 count++; |
115 } | 106 } |
116 } | 107 } |
117 | 108 |
118 | 109 |
119 void LCodeGen::RestoreCallerDoubles() { | 110 void LCodeGen::RestoreCallerDoubles() { |
120 ASSERT(info()->saves_caller_doubles()); | 111 ASSERT(info()->saves_caller_doubles()); |
121 ASSERT(NeedsEagerFrame()); | 112 ASSERT(NeedsEagerFrame()); |
122 Comment(";;; Restore clobbered callee double registers"); | 113 Comment(";;; Restore clobbered callee double registers"); |
123 CpuFeatureScope scope(masm(), SSE2); | |
124 BitVector* doubles = chunk()->allocated_double_registers(); | 114 BitVector* doubles = chunk()->allocated_double_registers(); |
125 BitVector::Iterator save_iterator(doubles); | 115 BitVector::Iterator save_iterator(doubles); |
126 int count = 0; | 116 int count = 0; |
127 while (!save_iterator.Done()) { | 117 while (!save_iterator.Done()) { |
128 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), | 118 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), |
129 MemOperand(esp, count * kDoubleSize)); | 119 MemOperand(esp, count * kDoubleSize)); |
130 save_iterator.Advance(); | 120 save_iterator.Advance(); |
131 count++; | 121 count++; |
132 } | 122 } |
133 } | 123 } |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
246 // Store dynamic frame alignment state in the first local. | 236 // Store dynamic frame alignment state in the first local. |
247 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; | 237 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; |
248 if (dynamic_frame_alignment_) { | 238 if (dynamic_frame_alignment_) { |
249 __ mov(Operand(ebp, offset), edx); | 239 __ mov(Operand(ebp, offset), edx); |
250 } else { | 240 } else { |
251 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); | 241 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); |
252 } | 242 } |
253 } | 243 } |
254 } | 244 } |
255 | 245 |
256 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { | 246 if (info()->saves_caller_doubles()) SaveCallerDoubles(); |
257 SaveCallerDoubles(); | |
258 } | |
259 } | 247 } |
260 | 248 |
261 // Possibly allocate a local context. | 249 // Possibly allocate a local context. |
262 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 250 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
263 if (heap_slots > 0) { | 251 if (heap_slots > 0) { |
264 Comment(";;; Allocate local context"); | 252 Comment(";;; Allocate local context"); |
265 // Argument to NewContext is the function, which is still in edi. | 253 // Argument to NewContext is the function, which is still in edi. |
266 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 254 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
267 FastNewContextStub stub(isolate(), heap_slots); | 255 FastNewContextStub stub(isolate(), heap_slots); |
268 __ CallStub(&stub); | 256 __ CallStub(&stub); |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
360 } | 348 } |
361 | 349 |
362 | 350 |
363 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 351 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
364 if (instr->IsCall()) { | 352 if (instr->IsCall()) { |
365 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 353 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
366 } | 354 } |
367 if (!instr->IsLazyBailout() && !instr->IsGap()) { | 355 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
368 safepoints_.BumpLastLazySafepointIndex(); | 356 safepoints_.BumpLastLazySafepointIndex(); |
369 } | 357 } |
370 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); | |
371 } | |
372 | |
373 | |
374 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { | |
375 if (!CpuFeatures::IsSupported(SSE2)) { | |
376 if (instr->IsGoto()) { | |
377 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); | |
378 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && | |
379 !instr->IsGap() && !instr->IsReturn()) { | |
380 if (instr->ClobbersDoubleRegisters(isolate())) { | |
381 if (instr->HasDoubleRegisterResult()) { | |
382 ASSERT_EQ(1, x87_stack_.depth()); | |
383 } else { | |
384 ASSERT_EQ(0, x87_stack_.depth()); | |
385 } | |
386 } | |
387 __ VerifyX87StackDepth(x87_stack_.depth()); | |
388 } | |
389 } | |
390 } | 358 } |
391 | 359 |
392 | 360 |
| 361 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } |
| 362 |
| 363 |
393 bool LCodeGen::GenerateJumpTable() { | 364 bool LCodeGen::GenerateJumpTable() { |
394 Label needs_frame; | 365 Label needs_frame; |
395 if (jump_table_.length() > 0) { | 366 if (jump_table_.length() > 0) { |
396 Comment(";;; -------------------- Jump table --------------------"); | 367 Comment(";;; -------------------- Jump table --------------------"); |
397 } | 368 } |
398 for (int i = 0; i < jump_table_.length(); i++) { | 369 for (int i = 0; i < jump_table_.length(); i++) { |
399 __ bind(&jump_table_[i].label); | 370 __ bind(&jump_table_[i].label); |
400 Address entry = jump_table_[i].address; | 371 Address entry = jump_table_[i].address; |
401 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; | 372 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
402 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 373 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
(...skipping 23 matching lines...) Expand all Loading... |
426 __ call(&push_approx_pc); | 397 __ call(&push_approx_pc); |
427 __ bind(&push_approx_pc); | 398 __ bind(&push_approx_pc); |
428 // Push the continuation which was stashed were the ebp should | 399 // Push the continuation which was stashed were the ebp should |
429 // be. Replace it with the saved ebp. | 400 // be. Replace it with the saved ebp. |
430 __ push(MemOperand(esp, 3 * kPointerSize)); | 401 __ push(MemOperand(esp, 3 * kPointerSize)); |
431 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); | 402 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); |
432 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); | 403 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); |
433 __ ret(0); // Call the continuation without clobbering registers. | 404 __ ret(0); // Call the continuation without clobbering registers. |
434 } | 405 } |
435 } else { | 406 } else { |
436 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { | 407 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); |
437 RestoreCallerDoubles(); | |
438 } | |
439 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 408 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
440 } | 409 } |
441 } | 410 } |
442 return !is_aborted(); | 411 return !is_aborted(); |
443 } | 412 } |
444 | 413 |
445 | 414 |
446 bool LCodeGen::GenerateDeferredCode() { | 415 bool LCodeGen::GenerateDeferredCode() { |
447 ASSERT(is_generating()); | 416 ASSERT(is_generating()); |
448 if (deferred_.length() > 0) { | 417 if (deferred_.length() > 0) { |
449 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 418 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
450 LDeferredCode* code = deferred_[i]; | 419 LDeferredCode* code = deferred_[i]; |
451 X87Stack copy(code->x87_stack()); | |
452 x87_stack_ = copy; | |
453 | 420 |
454 HValue* value = | 421 HValue* value = |
455 instructions_->at(code->instruction_index())->hydrogen_value(); | 422 instructions_->at(code->instruction_index())->hydrogen_value(); |
456 RecordAndWritePosition( | 423 RecordAndWritePosition( |
457 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 424 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
458 | 425 |
459 Comment(";;; <@%d,#%d> " | 426 Comment(";;; <@%d,#%d> " |
460 "-------------------- Deferred %s --------------------", | 427 "-------------------- Deferred %s --------------------", |
461 code->instruction_index(), | 428 code->instruction_index(), |
462 code->instr()->hydrogen_value()->id(), | 429 code->instr()->hydrogen_value()->id(), |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
508 safepoints_.Emit(masm(), GetStackSlotCount()); | 475 safepoints_.Emit(masm(), GetStackSlotCount()); |
509 return !is_aborted(); | 476 return !is_aborted(); |
510 } | 477 } |
511 | 478 |
512 | 479 |
513 Register LCodeGen::ToRegister(int index) const { | 480 Register LCodeGen::ToRegister(int index) const { |
514 return Register::FromAllocationIndex(index); | 481 return Register::FromAllocationIndex(index); |
515 } | 482 } |
516 | 483 |
517 | 484 |
518 X87Register LCodeGen::ToX87Register(int index) const { | |
519 return X87Register::FromAllocationIndex(index); | |
520 } | |
521 | |
522 | |
523 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 485 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
524 return XMMRegister::FromAllocationIndex(index); | 486 return XMMRegister::FromAllocationIndex(index); |
525 } | 487 } |
526 | 488 |
527 | 489 |
528 void LCodeGen::X87LoadForUsage(X87Register reg) { | |
529 ASSERT(x87_stack_.Contains(reg)); | |
530 x87_stack_.Fxch(reg); | |
531 x87_stack_.pop(); | |
532 } | |
533 | |
534 | |
535 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { | |
536 ASSERT(x87_stack_.Contains(reg1)); | |
537 ASSERT(x87_stack_.Contains(reg2)); | |
538 x87_stack_.Fxch(reg1, 1); | |
539 x87_stack_.Fxch(reg2); | |
540 x87_stack_.pop(); | |
541 x87_stack_.pop(); | |
542 } | |
543 | |
544 | |
545 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { | |
546 ASSERT(is_mutable_); | |
547 ASSERT(Contains(reg) && stack_depth_ > other_slot); | |
548 int i = ArrayIndex(reg); | |
549 int st = st2idx(i); | |
550 if (st != other_slot) { | |
551 int other_i = st2idx(other_slot); | |
552 X87Register other = stack_[other_i]; | |
553 stack_[other_i] = reg; | |
554 stack_[i] = other; | |
555 if (st == 0) { | |
556 __ fxch(other_slot); | |
557 } else if (other_slot == 0) { | |
558 __ fxch(st); | |
559 } else { | |
560 __ fxch(st); | |
561 __ fxch(other_slot); | |
562 __ fxch(st); | |
563 } | |
564 } | |
565 } | |
566 | |
567 | |
568 int LCodeGen::X87Stack::st2idx(int pos) { | |
569 return stack_depth_ - pos - 1; | |
570 } | |
571 | |
572 | |
573 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { | |
574 for (int i = 0; i < stack_depth_; i++) { | |
575 if (stack_[i].is(reg)) return i; | |
576 } | |
577 UNREACHABLE(); | |
578 return -1; | |
579 } | |
580 | |
581 | |
582 bool LCodeGen::X87Stack::Contains(X87Register reg) { | |
583 for (int i = 0; i < stack_depth_; i++) { | |
584 if (stack_[i].is(reg)) return true; | |
585 } | |
586 return false; | |
587 } | |
588 | |
589 | |
590 void LCodeGen::X87Stack::Free(X87Register reg) { | |
591 ASSERT(is_mutable_); | |
592 ASSERT(Contains(reg)); | |
593 int i = ArrayIndex(reg); | |
594 int st = st2idx(i); | |
595 if (st > 0) { | |
596 // keep track of how fstp(i) changes the order of elements | |
597 int tos_i = st2idx(0); | |
598 stack_[i] = stack_[tos_i]; | |
599 } | |
600 pop(); | |
601 __ fstp(st); | |
602 } | |
603 | |
604 | |
605 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { | |
606 if (x87_stack_.Contains(dst)) { | |
607 x87_stack_.Fxch(dst); | |
608 __ fstp(0); | |
609 } else { | |
610 x87_stack_.push(dst); | |
611 } | |
612 X87Fld(src, opts); | |
613 } | |
614 | |
615 | |
616 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { | |
617 ASSERT(!src.is_reg_only()); | |
618 switch (opts) { | |
619 case kX87DoubleOperand: | |
620 __ fld_d(src); | |
621 break; | |
622 case kX87FloatOperand: | |
623 __ fld_s(src); | |
624 break; | |
625 case kX87IntOperand: | |
626 __ fild_s(src); | |
627 break; | |
628 default: | |
629 UNREACHABLE(); | |
630 } | |
631 } | |
632 | |
633 | |
634 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { | |
635 ASSERT(!dst.is_reg_only()); | |
636 x87_stack_.Fxch(src); | |
637 switch (opts) { | |
638 case kX87DoubleOperand: | |
639 __ fst_d(dst); | |
640 break; | |
641 case kX87IntOperand: | |
642 __ fist_s(dst); | |
643 break; | |
644 default: | |
645 UNREACHABLE(); | |
646 } | |
647 } | |
648 | |
649 | |
650 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { | |
651 ASSERT(is_mutable_); | |
652 if (Contains(reg)) { | |
653 Free(reg); | |
654 } | |
655 // Mark this register as the next register to write to | |
656 stack_[stack_depth_] = reg; | |
657 } | |
658 | |
659 | |
660 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { | |
661 ASSERT(is_mutable_); | |
662 // Assert the reg is prepared to write, but not on the virtual stack yet | |
663 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && | |
664 stack_depth_ < X87Register::kNumAllocatableRegisters); | |
665 stack_depth_++; | |
666 } | |
667 | |
668 | |
669 void LCodeGen::X87PrepareBinaryOp( | |
670 X87Register left, X87Register right, X87Register result) { | |
671 // You need to use DefineSameAsFirst for x87 instructions | |
672 ASSERT(result.is(left)); | |
673 x87_stack_.Fxch(right, 1); | |
674 x87_stack_.Fxch(left); | |
675 } | |
676 | |
677 | |
678 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { | |
679 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { | |
680 bool double_inputs = instr->HasDoubleRegisterInput(); | |
681 | |
682 // Flush stack from tos down, since FreeX87() will mess with tos | |
683 for (int i = stack_depth_-1; i >= 0; i--) { | |
684 X87Register reg = stack_[i]; | |
685 // Skip registers which contain the inputs for the next instruction | |
686 // when flushing the stack | |
687 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { | |
688 continue; | |
689 } | |
690 Free(reg); | |
691 if (i < stack_depth_-1) i++; | |
692 } | |
693 } | |
694 if (instr->IsReturn()) { | |
695 while (stack_depth_ > 0) { | |
696 __ fstp(0); | |
697 stack_depth_--; | |
698 } | |
699 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); | |
700 } | |
701 } | |
702 | |
703 | |
704 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { | |
705 ASSERT(stack_depth_ <= 1); | |
706 // If ever used for new stubs producing two pairs of doubles joined into two | |
707 // phis this assert hits. That situation is not handled, since the two stacks | |
708 // might have st0 and st1 swapped. | |
709 if (current_block_id + 1 != goto_instr->block_id()) { | |
710 // If we have a value on the x87 stack on leaving a block, it must be a | |
711 // phi input. If the next block we compile is not the join block, we have | |
712 // to discard the stack state. | |
713 stack_depth_ = 0; | |
714 } | |
715 } | |
716 | |
717 | |
718 void LCodeGen::EmitFlushX87ForDeopt() { | |
719 // The deoptimizer does not support X87 Registers. But as long as we | |
720 // deopt from a stub its not a problem, since we will re-materialize the | |
721 // original stub inputs, which can't be double registers. | |
722 ASSERT(info()->IsStub()); | |
723 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | |
724 __ pushfd(); | |
725 __ VerifyX87StackDepth(x87_stack_.depth()); | |
726 __ popfd(); | |
727 } | |
728 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); | |
729 } | |
730 | |
731 | |
732 Register LCodeGen::ToRegister(LOperand* op) const { | 490 Register LCodeGen::ToRegister(LOperand* op) const { |
733 ASSERT(op->IsRegister()); | 491 ASSERT(op->IsRegister()); |
734 return ToRegister(op->index()); | 492 return ToRegister(op->index()); |
735 } | 493 } |
736 | 494 |
737 | 495 |
738 X87Register LCodeGen::ToX87Register(LOperand* op) const { | |
739 ASSERT(op->IsDoubleRegister()); | |
740 return ToX87Register(op->index()); | |
741 } | |
742 | |
743 | |
744 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 496 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
745 ASSERT(op->IsDoubleRegister()); | 497 ASSERT(op->IsDoubleRegister()); |
746 return ToDoubleRegister(op->index()); | 498 return ToDoubleRegister(op->index()); |
747 } | 499 } |
748 | 500 |
749 | 501 |
750 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | 502 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
751 return ToRepresentation(op, Representation::Integer32()); | 503 return ToRepresentation(op, Representation::Integer32()); |
752 } | 504 } |
753 | 505 |
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1085 __ pop(eax); | 837 __ pop(eax); |
1086 __ popfd(); | 838 __ popfd(); |
1087 ASSERT(frame_is_built_); | 839 ASSERT(frame_is_built_); |
1088 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 840 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
1089 __ bind(&no_deopt); | 841 __ bind(&no_deopt); |
1090 __ mov(Operand::StaticVariable(count), eax); | 842 __ mov(Operand::StaticVariable(count), eax); |
1091 __ pop(eax); | 843 __ pop(eax); |
1092 __ popfd(); | 844 __ popfd(); |
1093 } | 845 } |
1094 | 846 |
1095 // Before Instructions which can deopt, we normally flush the x87 stack. But | |
1096 // we can have inputs or outputs of the current instruction on the stack, | |
1097 // thus we need to flush them here from the physical stack to leave it in a | |
1098 // consistent state. | |
1099 if (x87_stack_.depth() > 0) { | |
1100 Label done; | |
1101 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | |
1102 EmitFlushX87ForDeopt(); | |
1103 __ bind(&done); | |
1104 } | |
1105 | |
1106 if (info()->ShouldTrapOnDeopt()) { | 847 if (info()->ShouldTrapOnDeopt()) { |
1107 Label done; | 848 Label done; |
1108 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 849 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
1109 __ int3(); | 850 __ int3(); |
1110 __ bind(&done); | 851 __ bind(&done); |
1111 } | 852 } |
1112 | 853 |
1113 ASSERT(info()->IsStub() || frame_is_built_); | 854 ASSERT(info()->IsStub() || frame_is_built_); |
1114 if (cc == no_condition && frame_is_built_) { | 855 if (cc == no_condition && frame_is_built_) { |
1115 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 856 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
(...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1956 } | 1697 } |
1957 | 1698 |
1958 | 1699 |
1959 void LCodeGen::DoConstantD(LConstantD* instr) { | 1700 void LCodeGen::DoConstantD(LConstantD* instr) { |
1960 double v = instr->value(); | 1701 double v = instr->value(); |
1961 uint64_t int_val = BitCast<uint64_t, double>(v); | 1702 uint64_t int_val = BitCast<uint64_t, double>(v); |
1962 int32_t lower = static_cast<int32_t>(int_val); | 1703 int32_t lower = static_cast<int32_t>(int_val); |
1963 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1704 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
1964 ASSERT(instr->result()->IsDoubleRegister()); | 1705 ASSERT(instr->result()->IsDoubleRegister()); |
1965 | 1706 |
1966 if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 1707 XMMRegister res = ToDoubleRegister(instr->result()); |
1967 __ push(Immediate(upper)); | 1708 if (int_val == 0) { |
1968 __ push(Immediate(lower)); | 1709 __ xorps(res, res); |
1969 X87Register reg = ToX87Register(instr->result()); | |
1970 X87Mov(reg, Operand(esp, 0)); | |
1971 __ add(Operand(esp), Immediate(kDoubleSize)); | |
1972 } else { | 1710 } else { |
1973 CpuFeatureScope scope1(masm(), SSE2); | 1711 Register temp = ToRegister(instr->temp()); |
1974 XMMRegister res = ToDoubleRegister(instr->result()); | 1712 if (CpuFeatures::IsSupported(SSE4_1)) { |
1975 if (int_val == 0) { | 1713 CpuFeatureScope scope2(masm(), SSE4_1); |
1976 __ xorps(res, res); | 1714 if (lower != 0) { |
| 1715 __ Move(temp, Immediate(lower)); |
| 1716 __ movd(res, Operand(temp)); |
| 1717 __ Move(temp, Immediate(upper)); |
| 1718 __ pinsrd(res, Operand(temp), 1); |
| 1719 } else { |
| 1720 __ xorps(res, res); |
| 1721 __ Move(temp, Immediate(upper)); |
| 1722 __ pinsrd(res, Operand(temp), 1); |
| 1723 } |
1977 } else { | 1724 } else { |
1978 Register temp = ToRegister(instr->temp()); | 1725 __ Move(temp, Immediate(upper)); |
1979 if (CpuFeatures::IsSupported(SSE4_1)) { | 1726 __ movd(res, Operand(temp)); |
1980 CpuFeatureScope scope2(masm(), SSE4_1); | 1727 __ psllq(res, 32); |
1981 if (lower != 0) { | 1728 if (lower != 0) { |
1982 __ Move(temp, Immediate(lower)); | 1729 XMMRegister xmm_scratch = double_scratch0(); |
1983 __ movd(res, Operand(temp)); | 1730 __ Move(temp, Immediate(lower)); |
1984 __ Move(temp, Immediate(upper)); | 1731 __ movd(xmm_scratch, Operand(temp)); |
1985 __ pinsrd(res, Operand(temp), 1); | 1732 __ orps(res, xmm_scratch); |
1986 } else { | |
1987 __ xorps(res, res); | |
1988 __ Move(temp, Immediate(upper)); | |
1989 __ pinsrd(res, Operand(temp), 1); | |
1990 } | |
1991 } else { | |
1992 __ Move(temp, Immediate(upper)); | |
1993 __ movd(res, Operand(temp)); | |
1994 __ psllq(res, 32); | |
1995 if (lower != 0) { | |
1996 XMMRegister xmm_scratch = double_scratch0(); | |
1997 __ Move(temp, Immediate(lower)); | |
1998 __ movd(xmm_scratch, Operand(temp)); | |
1999 __ orps(res, xmm_scratch); | |
2000 } | |
2001 } | 1733 } |
2002 } | 1734 } |
2003 } | 1735 } |
2004 } | 1736 } |
2005 | 1737 |
2006 | 1738 |
2007 void LCodeGen::DoConstantE(LConstantE* instr) { | 1739 void LCodeGen::DoConstantE(LConstantE* instr) { |
2008 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); | 1740 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); |
2009 } | 1741 } |
2010 | 1742 |
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2173 __ add(ToRegister(left), ToOperand(right)); | 1905 __ add(ToRegister(left), ToOperand(right)); |
2174 } | 1906 } |
2175 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1907 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
2176 DeoptimizeIf(overflow, instr->environment()); | 1908 DeoptimizeIf(overflow, instr->environment()); |
2177 } | 1909 } |
2178 } | 1910 } |
2179 } | 1911 } |
2180 | 1912 |
2181 | 1913 |
2182 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1914 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
2183 CpuFeatureScope scope(masm(), SSE2); | |
2184 LOperand* left = instr->left(); | 1915 LOperand* left = instr->left(); |
2185 LOperand* right = instr->right(); | 1916 LOperand* right = instr->right(); |
2186 ASSERT(left->Equals(instr->result())); | 1917 ASSERT(left->Equals(instr->result())); |
2187 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1918 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
2188 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 1919 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
2189 Label return_left; | 1920 Label return_left; |
2190 Condition condition = (operation == HMathMinMax::kMathMin) | 1921 Condition condition = (operation == HMathMinMax::kMathMin) |
2191 ? less_equal | 1922 ? less_equal |
2192 : greater_equal; | 1923 : greater_equal; |
2193 if (right->IsConstantOperand()) { | 1924 if (right->IsConstantOperand()) { |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2236 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | 1967 __ j(parity_even, &return_left, Label::kNear); // left == NaN. |
2237 __ bind(&return_right); | 1968 __ bind(&return_right); |
2238 __ movaps(left_reg, right_reg); | 1969 __ movaps(left_reg, right_reg); |
2239 | 1970 |
2240 __ bind(&return_left); | 1971 __ bind(&return_left); |
2241 } | 1972 } |
2242 } | 1973 } |
2243 | 1974 |
2244 | 1975 |
2245 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1976 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
2246 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 1977 XMMRegister left = ToDoubleRegister(instr->left()); |
2247 CpuFeatureScope scope(masm(), SSE2); | 1978 XMMRegister right = ToDoubleRegister(instr->right()); |
2248 XMMRegister left = ToDoubleRegister(instr->left()); | 1979 XMMRegister result = ToDoubleRegister(instr->result()); |
2249 XMMRegister right = ToDoubleRegister(instr->right()); | 1980 switch (instr->op()) { |
2250 XMMRegister result = ToDoubleRegister(instr->result()); | 1981 case Token::ADD: |
2251 switch (instr->op()) { | 1982 __ addsd(left, right); |
2252 case Token::ADD: | 1983 break; |
2253 __ addsd(left, right); | 1984 case Token::SUB: |
2254 break; | 1985 __ subsd(left, right); |
2255 case Token::SUB: | 1986 break; |
2256 __ subsd(left, right); | 1987 case Token::MUL: |
2257 break; | 1988 __ mulsd(left, right); |
2258 case Token::MUL: | 1989 break; |
2259 __ mulsd(left, right); | 1990 case Token::DIV: |
2260 break; | 1991 __ divsd(left, right); |
2261 case Token::DIV: | 1992 // Don't delete this mov. It may improve performance on some CPUs, |
2262 __ divsd(left, right); | 1993 // when there is a mulsd depending on the result |
2263 // Don't delete this mov. It may improve performance on some CPUs, | 1994 __ movaps(left, left); |
2264 // when there is a mulsd depending on the result | 1995 break; |
2265 __ movaps(left, left); | 1996 case Token::MOD: { |
2266 break; | 1997 // Pass two doubles as arguments on the stack. |
2267 case Token::MOD: { | 1998 __ PrepareCallCFunction(4, eax); |
2268 // Pass two doubles as arguments on the stack. | 1999 __ movsd(Operand(esp, 0 * kDoubleSize), left); |
2269 __ PrepareCallCFunction(4, eax); | 2000 __ movsd(Operand(esp, 1 * kDoubleSize), right); |
2270 __ movsd(Operand(esp, 0 * kDoubleSize), left); | 2001 __ CallCFunction( |
2271 __ movsd(Operand(esp, 1 * kDoubleSize), right); | 2002 ExternalReference::mod_two_doubles_operation(isolate()), |
2272 __ CallCFunction( | 2003 4); |
2273 ExternalReference::mod_two_doubles_operation(isolate()), | |
2274 4); | |
2275 | 2004 |
2276 // Return value is in st(0) on ia32. | 2005 // Return value is in st(0) on ia32. |
2277 // Store it into the result register. | 2006 // Store it into the result register. |
2278 __ sub(Operand(esp), Immediate(kDoubleSize)); | 2007 __ sub(Operand(esp), Immediate(kDoubleSize)); |
2279 __ fstp_d(Operand(esp, 0)); | 2008 __ fstp_d(Operand(esp, 0)); |
2280 __ movsd(result, Operand(esp, 0)); | 2009 __ movsd(result, Operand(esp, 0)); |
2281 __ add(Operand(esp), Immediate(kDoubleSize)); | 2010 __ add(Operand(esp), Immediate(kDoubleSize)); |
2282 break; | 2011 break; |
2283 } | |
2284 default: | |
2285 UNREACHABLE(); | |
2286 break; | |
2287 } | 2012 } |
2288 } else { | 2013 default: |
2289 X87Register left = ToX87Register(instr->left()); | 2014 UNREACHABLE(); |
2290 X87Register right = ToX87Register(instr->right()); | 2015 break; |
2291 X87Register result = ToX87Register(instr->result()); | |
2292 if (instr->op() != Token::MOD) { | |
2293 X87PrepareBinaryOp(left, right, result); | |
2294 } | |
2295 switch (instr->op()) { | |
2296 case Token::ADD: | |
2297 __ fadd_i(1); | |
2298 break; | |
2299 case Token::SUB: | |
2300 __ fsub_i(1); | |
2301 break; | |
2302 case Token::MUL: | |
2303 __ fmul_i(1); | |
2304 break; | |
2305 case Token::DIV: | |
2306 __ fdiv_i(1); | |
2307 break; | |
2308 case Token::MOD: { | |
2309 // Pass two doubles as arguments on the stack. | |
2310 __ PrepareCallCFunction(4, eax); | |
2311 X87Mov(Operand(esp, 1 * kDoubleSize), right); | |
2312 X87Mov(Operand(esp, 0), left); | |
2313 X87Free(right); | |
2314 ASSERT(left.is(result)); | |
2315 X87PrepareToWrite(result); | |
2316 __ CallCFunction( | |
2317 ExternalReference::mod_two_doubles_operation(isolate()), | |
2318 4); | |
2319 | |
2320 // Return value is in st(0) on ia32. | |
2321 X87CommitWrite(result); | |
2322 break; | |
2323 } | |
2324 default: | |
2325 UNREACHABLE(); | |
2326 break; | |
2327 } | |
2328 } | 2016 } |
2329 } | 2017 } |
2330 | 2018 |
2331 | 2019 |
2332 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2020 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
2333 ASSERT(ToRegister(instr->context()).is(esi)); | 2021 ASSERT(ToRegister(instr->context()).is(esi)); |
2334 ASSERT(ToRegister(instr->left()).is(edx)); | 2022 ASSERT(ToRegister(instr->left()).is(edx)); |
2335 ASSERT(ToRegister(instr->right()).is(eax)); | 2023 ASSERT(ToRegister(instr->right()).is(eax)); |
2336 ASSERT(ToRegister(instr->result()).is(eax)); | 2024 ASSERT(ToRegister(instr->result()).is(eax)); |
2337 | 2025 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2372 | 2060 |
2373 | 2061 |
2374 void LCodeGen::DoBranch(LBranch* instr) { | 2062 void LCodeGen::DoBranch(LBranch* instr) { |
2375 Representation r = instr->hydrogen()->value()->representation(); | 2063 Representation r = instr->hydrogen()->value()->representation(); |
2376 if (r.IsSmiOrInteger32()) { | 2064 if (r.IsSmiOrInteger32()) { |
2377 Register reg = ToRegister(instr->value()); | 2065 Register reg = ToRegister(instr->value()); |
2378 __ test(reg, Operand(reg)); | 2066 __ test(reg, Operand(reg)); |
2379 EmitBranch(instr, not_zero); | 2067 EmitBranch(instr, not_zero); |
2380 } else if (r.IsDouble()) { | 2068 } else if (r.IsDouble()) { |
2381 ASSERT(!info()->IsStub()); | 2069 ASSERT(!info()->IsStub()); |
2382 CpuFeatureScope scope(masm(), SSE2); | |
2383 XMMRegister reg = ToDoubleRegister(instr->value()); | 2070 XMMRegister reg = ToDoubleRegister(instr->value()); |
2384 XMMRegister xmm_scratch = double_scratch0(); | 2071 XMMRegister xmm_scratch = double_scratch0(); |
2385 __ xorps(xmm_scratch, xmm_scratch); | 2072 __ xorps(xmm_scratch, xmm_scratch); |
2386 __ ucomisd(reg, xmm_scratch); | 2073 __ ucomisd(reg, xmm_scratch); |
2387 EmitBranch(instr, not_equal); | 2074 EmitBranch(instr, not_equal); |
2388 } else { | 2075 } else { |
2389 ASSERT(r.IsTagged()); | 2076 ASSERT(r.IsTagged()); |
2390 Register reg = ToRegister(instr->value()); | 2077 Register reg = ToRegister(instr->value()); |
2391 HType type = instr->hydrogen()->value()->type(); | 2078 HType type = instr->hydrogen()->value()->type(); |
2392 if (type.IsBoolean()) { | 2079 if (type.IsBoolean()) { |
2393 ASSERT(!info()->IsStub()); | 2080 ASSERT(!info()->IsStub()); |
2394 __ cmp(reg, factory()->true_value()); | 2081 __ cmp(reg, factory()->true_value()); |
2395 EmitBranch(instr, equal); | 2082 EmitBranch(instr, equal); |
2396 } else if (type.IsSmi()) { | 2083 } else if (type.IsSmi()) { |
2397 ASSERT(!info()->IsStub()); | 2084 ASSERT(!info()->IsStub()); |
2398 __ test(reg, Operand(reg)); | 2085 __ test(reg, Operand(reg)); |
2399 EmitBranch(instr, not_equal); | 2086 EmitBranch(instr, not_equal); |
2400 } else if (type.IsJSArray()) { | 2087 } else if (type.IsJSArray()) { |
2401 ASSERT(!info()->IsStub()); | 2088 ASSERT(!info()->IsStub()); |
2402 EmitBranch(instr, no_condition); | 2089 EmitBranch(instr, no_condition); |
2403 } else if (type.IsHeapNumber()) { | 2090 } else if (type.IsHeapNumber()) { |
2404 ASSERT(!info()->IsStub()); | 2091 ASSERT(!info()->IsStub()); |
2405 CpuFeatureScope scope(masm(), SSE2); | |
2406 XMMRegister xmm_scratch = double_scratch0(); | 2092 XMMRegister xmm_scratch = double_scratch0(); |
2407 __ xorps(xmm_scratch, xmm_scratch); | 2093 __ xorps(xmm_scratch, xmm_scratch); |
2408 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2094 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
2409 EmitBranch(instr, not_equal); | 2095 EmitBranch(instr, not_equal); |
2410 } else if (type.IsString()) { | 2096 } else if (type.IsString()) { |
2411 ASSERT(!info()->IsStub()); | 2097 ASSERT(!info()->IsStub()); |
2412 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); | 2098 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); |
2413 EmitBranch(instr, not_equal); | 2099 EmitBranch(instr, not_equal); |
2414 } else { | 2100 } else { |
2415 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2101 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2481 __ CmpInstanceType(map, SYMBOL_TYPE); | 2167 __ CmpInstanceType(map, SYMBOL_TYPE); |
2482 __ j(equal, instr->TrueLabel(chunk_)); | 2168 __ j(equal, instr->TrueLabel(chunk_)); |
2483 } | 2169 } |
2484 | 2170 |
2485 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2171 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
2486 // heap number -> false iff +0, -0, or NaN. | 2172 // heap number -> false iff +0, -0, or NaN. |
2487 Label not_heap_number; | 2173 Label not_heap_number; |
2488 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), | 2174 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), |
2489 factory()->heap_number_map()); | 2175 factory()->heap_number_map()); |
2490 __ j(not_equal, ¬_heap_number, Label::kNear); | 2176 __ j(not_equal, ¬_heap_number, Label::kNear); |
2491 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 2177 XMMRegister xmm_scratch = double_scratch0(); |
2492 CpuFeatureScope scope(masm(), SSE2); | 2178 __ xorps(xmm_scratch, xmm_scratch); |
2493 XMMRegister xmm_scratch = double_scratch0(); | 2179 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
2494 __ xorps(xmm_scratch, xmm_scratch); | |
2495 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | |
2496 } else { | |
2497 __ fldz(); | |
2498 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
2499 __ FCmp(); | |
2500 } | |
2501 __ j(zero, instr->FalseLabel(chunk_)); | 2180 __ j(zero, instr->FalseLabel(chunk_)); |
2502 __ jmp(instr->TrueLabel(chunk_)); | 2181 __ jmp(instr->TrueLabel(chunk_)); |
2503 __ bind(¬_heap_number); | 2182 __ bind(¬_heap_number); |
2504 } | 2183 } |
2505 | 2184 |
2506 if (!expected.IsGeneric()) { | 2185 if (!expected.IsGeneric()) { |
2507 // We've seen something for the first time -> deopt. | 2186 // We've seen something for the first time -> deopt. |
2508 // This can only happen if we are not generic already. | 2187 // This can only happen if we are not generic already. |
2509 DeoptimizeIf(no_condition, instr->environment()); | 2188 DeoptimizeIf(no_condition, instr->environment()); |
2510 } | 2189 } |
2511 } | 2190 } |
2512 } | 2191 } |
2513 } | 2192 } |
2514 | 2193 |
2515 | 2194 |
2516 void LCodeGen::EmitGoto(int block) { | 2195 void LCodeGen::EmitGoto(int block) { |
2517 if (!IsNextEmittedBlock(block)) { | 2196 if (!IsNextEmittedBlock(block)) { |
2518 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2197 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
2519 } | 2198 } |
2520 } | 2199 } |
2521 | 2200 |
2522 | 2201 |
2523 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { | |
2524 } | |
2525 | |
2526 | |
2527 void LCodeGen::DoGoto(LGoto* instr) { | 2202 void LCodeGen::DoGoto(LGoto* instr) { |
2528 EmitGoto(instr->block_id()); | 2203 EmitGoto(instr->block_id()); |
2529 } | 2204 } |
2530 | 2205 |
2531 | 2206 |
2532 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | 2207 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
2533 Condition cond = no_condition; | 2208 Condition cond = no_condition; |
2534 switch (op) { | 2209 switch (op) { |
2535 case Token::EQ: | 2210 case Token::EQ: |
2536 case Token::EQ_STRICT: | 2211 case Token::EQ_STRICT: |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2568 | 2243 |
2569 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 2244 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
2570 // We can statically evaluate the comparison. | 2245 // We can statically evaluate the comparison. |
2571 double left_val = ToDouble(LConstantOperand::cast(left)); | 2246 double left_val = ToDouble(LConstantOperand::cast(left)); |
2572 double right_val = ToDouble(LConstantOperand::cast(right)); | 2247 double right_val = ToDouble(LConstantOperand::cast(right)); |
2573 int next_block = EvalComparison(instr->op(), left_val, right_val) ? | 2248 int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
2574 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | 2249 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
2575 EmitGoto(next_block); | 2250 EmitGoto(next_block); |
2576 } else { | 2251 } else { |
2577 if (instr->is_double()) { | 2252 if (instr->is_double()) { |
2578 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 2253 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); |
2579 CpuFeatureScope scope(masm(), SSE2); | |
2580 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); | |
2581 } else { | |
2582 X87LoadForUsage(ToX87Register(right), ToX87Register(left)); | |
2583 __ FCmp(); | |
2584 } | |
2585 // Don't base result on EFLAGS when a NaN is involved. Instead | 2254 // Don't base result on EFLAGS when a NaN is involved. Instead |
2586 // jump to the false block. | 2255 // jump to the false block. |
2587 __ j(parity_even, instr->FalseLabel(chunk_)); | 2256 __ j(parity_even, instr->FalseLabel(chunk_)); |
2588 } else { | 2257 } else { |
2589 if (right->IsConstantOperand()) { | 2258 if (right->IsConstantOperand()) { |
2590 __ cmp(ToOperand(left), | 2259 __ cmp(ToOperand(left), |
2591 ToImmediate(right, instr->hydrogen()->representation())); | 2260 ToImmediate(right, instr->hydrogen()->representation())); |
2592 } else if (left->IsConstantOperand()) { | 2261 } else if (left->IsConstantOperand()) { |
2593 __ cmp(ToOperand(right), | 2262 __ cmp(ToOperand(right), |
2594 ToImmediate(left, instr->hydrogen()->representation())); | 2263 ToImmediate(left, instr->hydrogen()->representation())); |
(...skipping 23 matching lines...) Expand all Loading... |
2618 | 2287 |
2619 | 2288 |
2620 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { | 2289 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
2621 if (instr->hydrogen()->representation().IsTagged()) { | 2290 if (instr->hydrogen()->representation().IsTagged()) { |
2622 Register input_reg = ToRegister(instr->object()); | 2291 Register input_reg = ToRegister(instr->object()); |
2623 __ cmp(input_reg, factory()->the_hole_value()); | 2292 __ cmp(input_reg, factory()->the_hole_value()); |
2624 EmitBranch(instr, equal); | 2293 EmitBranch(instr, equal); |
2625 return; | 2294 return; |
2626 } | 2295 } |
2627 | 2296 |
2628 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | 2297 XMMRegister input_reg = ToDoubleRegister(instr->object()); |
2629 if (use_sse2) { | 2298 __ ucomisd(input_reg, input_reg); |
2630 CpuFeatureScope scope(masm(), SSE2); | 2299 EmitFalseBranch(instr, parity_odd); |
2631 XMMRegister input_reg = ToDoubleRegister(instr->object()); | |
2632 __ ucomisd(input_reg, input_reg); | |
2633 EmitFalseBranch(instr, parity_odd); | |
2634 } else { | |
2635 // Put the value to the top of stack | |
2636 X87Register src = ToX87Register(instr->object()); | |
2637 X87LoadForUsage(src); | |
2638 __ fld(0); | |
2639 __ fld(0); | |
2640 __ FCmp(); | |
2641 Label ok; | |
2642 __ j(parity_even, &ok, Label::kNear); | |
2643 __ fstp(0); | |
2644 EmitFalseBranch(instr, no_condition); | |
2645 __ bind(&ok); | |
2646 } | |
2647 | |
2648 | 2300 |
2649 __ sub(esp, Immediate(kDoubleSize)); | 2301 __ sub(esp, Immediate(kDoubleSize)); |
2650 if (use_sse2) { | 2302 __ movsd(MemOperand(esp, 0), input_reg); |
2651 CpuFeatureScope scope(masm(), SSE2); | |
2652 XMMRegister input_reg = ToDoubleRegister(instr->object()); | |
2653 __ movsd(MemOperand(esp, 0), input_reg); | |
2654 } else { | |
2655 __ fstp_d(MemOperand(esp, 0)); | |
2656 } | |
2657 | 2303 |
2658 __ add(esp, Immediate(kDoubleSize)); | 2304 __ add(esp, Immediate(kDoubleSize)); |
2659 int offset = sizeof(kHoleNanUpper32); | 2305 int offset = sizeof(kHoleNanUpper32); |
2660 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); | 2306 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); |
2661 EmitBranch(instr, equal); | 2307 EmitBranch(instr, equal); |
2662 } | 2308 } |
2663 | 2309 |
2664 | 2310 |
2665 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2311 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
2666 Representation rep = instr->hydrogen()->value()->representation(); | 2312 Representation rep = instr->hydrogen()->value()->representation(); |
2667 ASSERT(!rep.IsInteger32()); | 2313 ASSERT(!rep.IsInteger32()); |
2668 Register scratch = ToRegister(instr->temp()); | 2314 Register scratch = ToRegister(instr->temp()); |
2669 | 2315 |
2670 if (rep.IsDouble()) { | 2316 if (rep.IsDouble()) { |
2671 CpuFeatureScope use_sse2(masm(), SSE2); | |
2672 XMMRegister value = ToDoubleRegister(instr->value()); | 2317 XMMRegister value = ToDoubleRegister(instr->value()); |
2673 XMMRegister xmm_scratch = double_scratch0(); | 2318 XMMRegister xmm_scratch = double_scratch0(); |
2674 __ xorps(xmm_scratch, xmm_scratch); | 2319 __ xorps(xmm_scratch, xmm_scratch); |
2675 __ ucomisd(xmm_scratch, value); | 2320 __ ucomisd(xmm_scratch, value); |
2676 EmitFalseBranch(instr, not_equal); | 2321 EmitFalseBranch(instr, not_equal); |
2677 __ movmskpd(scratch, value); | 2322 __ movmskpd(scratch, value); |
2678 __ test(scratch, Immediate(1)); | 2323 __ test(scratch, Immediate(1)); |
2679 EmitBranch(instr, not_zero); | 2324 EmitBranch(instr, not_zero); |
2680 } else { | 2325 } else { |
2681 Register value = ToRegister(instr->value()); | 2326 Register value = ToRegister(instr->value()); |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2964 __ bind(&true_value); | 2609 __ bind(&true_value); |
2965 __ mov(ToRegister(instr->result()), factory()->true_value()); | 2610 __ mov(ToRegister(instr->result()), factory()->true_value()); |
2966 __ bind(&done); | 2611 __ bind(&done); |
2967 } | 2612 } |
2968 | 2613 |
2969 | 2614 |
2970 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2615 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
2971 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { | 2616 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
2972 public: | 2617 public: |
2973 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2618 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
2974 LInstanceOfKnownGlobal* instr, | 2619 LInstanceOfKnownGlobal* instr) |
2975 const X87Stack& x87_stack) | 2620 : LDeferredCode(codegen), instr_(instr) { } |
2976 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
2977 virtual void Generate() V8_OVERRIDE { | 2621 virtual void Generate() V8_OVERRIDE { |
2978 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); | 2622 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
2979 } | 2623 } |
2980 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 2624 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
2981 Label* map_check() { return &map_check_; } | 2625 Label* map_check() { return &map_check_; } |
2982 private: | 2626 private: |
2983 LInstanceOfKnownGlobal* instr_; | 2627 LInstanceOfKnownGlobal* instr_; |
2984 Label map_check_; | 2628 Label map_check_; |
2985 }; | 2629 }; |
2986 | 2630 |
2987 DeferredInstanceOfKnownGlobal* deferred; | 2631 DeferredInstanceOfKnownGlobal* deferred; |
2988 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); | 2632 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
2989 | 2633 |
2990 Label done, false_result; | 2634 Label done, false_result; |
2991 Register object = ToRegister(instr->value()); | 2635 Register object = ToRegister(instr->value()); |
2992 Register temp = ToRegister(instr->temp()); | 2636 Register temp = ToRegister(instr->temp()); |
2993 | 2637 |
2994 // A Smi is not an instance of anything. | 2638 // A Smi is not an instance of anything. |
2995 __ JumpIfSmi(object, &false_result, Label::kNear); | 2639 __ JumpIfSmi(object, &false_result, Label::kNear); |
2996 | 2640 |
2997 // This is the inlined call site instanceof cache. The two occurences of the | 2641 // This is the inlined call site instanceof cache. The two occurences of the |
2998 // hole value will be patched to the last map/result pair generated by the | 2642 // hole value will be patched to the last map/result pair generated by the |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3127 void LCodeGen::DoReturn(LReturn* instr) { | 2771 void LCodeGen::DoReturn(LReturn* instr) { |
3128 if (FLAG_trace && info()->IsOptimizing()) { | 2772 if (FLAG_trace && info()->IsOptimizing()) { |
3129 // Preserve the return value on the stack and rely on the runtime call | 2773 // Preserve the return value on the stack and rely on the runtime call |
3130 // to return the value in the same register. We're leaving the code | 2774 // to return the value in the same register. We're leaving the code |
3131 // managed by the register allocator and tearing down the frame, it's | 2775 // managed by the register allocator and tearing down the frame, it's |
3132 // safe to write to the context register. | 2776 // safe to write to the context register. |
3133 __ push(eax); | 2777 __ push(eax); |
3134 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 2778 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
3135 __ CallRuntime(Runtime::kTraceExit, 1); | 2779 __ CallRuntime(Runtime::kTraceExit, 1); |
3136 } | 2780 } |
3137 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { | 2781 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); |
3138 RestoreCallerDoubles(); | |
3139 } | |
3140 if (dynamic_frame_alignment_) { | 2782 if (dynamic_frame_alignment_) { |
3141 // Fetch the state of the dynamic frame alignment. | 2783 // Fetch the state of the dynamic frame alignment. |
3142 __ mov(edx, Operand(ebp, | 2784 __ mov(edx, Operand(ebp, |
3143 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 2785 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
3144 } | 2786 } |
3145 int no_frame_start = -1; | 2787 int no_frame_start = -1; |
3146 if (NeedsEagerFrame()) { | 2788 if (NeedsEagerFrame()) { |
3147 __ mov(esp, ebp); | 2789 __ mov(esp, ebp); |
3148 __ pop(ebp); | 2790 __ pop(ebp); |
3149 no_frame_start = masm_->pc_offset(); | 2791 no_frame_start = masm_->pc_offset(); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3244 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2886 if (instr->hydrogen()->NeedsWriteBarrier()) { |
3245 SmiCheck check_needed = | 2887 SmiCheck check_needed = |
3246 instr->hydrogen()->value()->IsHeapObject() | 2888 instr->hydrogen()->value()->IsHeapObject() |
3247 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2889 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
3248 Register temp = ToRegister(instr->temp()); | 2890 Register temp = ToRegister(instr->temp()); |
3249 int offset = Context::SlotOffset(instr->slot_index()); | 2891 int offset = Context::SlotOffset(instr->slot_index()); |
3250 __ RecordWriteContextSlot(context, | 2892 __ RecordWriteContextSlot(context, |
3251 offset, | 2893 offset, |
3252 value, | 2894 value, |
3253 temp, | 2895 temp, |
3254 GetSaveFPRegsMode(isolate()), | 2896 kSaveFPRegs, |
3255 EMIT_REMEMBERED_SET, | 2897 EMIT_REMEMBERED_SET, |
3256 check_needed); | 2898 check_needed); |
3257 } | 2899 } |
3258 | 2900 |
3259 __ bind(&skip_assignment); | 2901 __ bind(&skip_assignment); |
3260 } | 2902 } |
3261 | 2903 |
3262 | 2904 |
3263 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | 2905 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
3264 HObjectAccess access = instr->hydrogen()->access(); | 2906 HObjectAccess access = instr->hydrogen()->access(); |
3265 int offset = access.offset(); | 2907 int offset = access.offset(); |
3266 | 2908 |
3267 if (access.IsExternalMemory()) { | 2909 if (access.IsExternalMemory()) { |
3268 Register result = ToRegister(instr->result()); | 2910 Register result = ToRegister(instr->result()); |
3269 MemOperand operand = instr->object()->IsConstantOperand() | 2911 MemOperand operand = instr->object()->IsConstantOperand() |
3270 ? MemOperand::StaticVariable(ToExternalReference( | 2912 ? MemOperand::StaticVariable(ToExternalReference( |
3271 LConstantOperand::cast(instr->object()))) | 2913 LConstantOperand::cast(instr->object()))) |
3272 : MemOperand(ToRegister(instr->object()), offset); | 2914 : MemOperand(ToRegister(instr->object()), offset); |
3273 __ Load(result, operand, access.representation()); | 2915 __ Load(result, operand, access.representation()); |
3274 return; | 2916 return; |
3275 } | 2917 } |
3276 | 2918 |
3277 Register object = ToRegister(instr->object()); | 2919 Register object = ToRegister(instr->object()); |
3278 if (instr->hydrogen()->representation().IsDouble()) { | 2920 if (instr->hydrogen()->representation().IsDouble()) { |
3279 if (CpuFeatures::IsSupported(SSE2)) { | 2921 XMMRegister result = ToDoubleRegister(instr->result()); |
3280 CpuFeatureScope scope(masm(), SSE2); | 2922 __ movsd(result, FieldOperand(object, offset)); |
3281 XMMRegister result = ToDoubleRegister(instr->result()); | |
3282 __ movsd(result, FieldOperand(object, offset)); | |
3283 } else { | |
3284 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); | |
3285 } | |
3286 return; | 2923 return; |
3287 } | 2924 } |
3288 | 2925 |
3289 Register result = ToRegister(instr->result()); | 2926 Register result = ToRegister(instr->result()); |
3290 if (!access.IsInobject()) { | 2927 if (!access.IsInobject()) { |
3291 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); | 2928 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
3292 object = result; | 2929 object = result; |
3293 } | 2930 } |
3294 __ Load(result, FieldOperand(object, offset), access.representation()); | 2931 __ Load(result, FieldOperand(object, offset), access.representation()); |
3295 } | 2932 } |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3402 } | 3039 } |
3403 Operand operand(BuildFastArrayOperand( | 3040 Operand operand(BuildFastArrayOperand( |
3404 instr->elements(), | 3041 instr->elements(), |
3405 key, | 3042 key, |
3406 instr->hydrogen()->key()->representation(), | 3043 instr->hydrogen()->key()->representation(), |
3407 elements_kind, | 3044 elements_kind, |
3408 0, | 3045 0, |
3409 instr->additional_index())); | 3046 instr->additional_index())); |
3410 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 3047 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
3411 elements_kind == FLOAT32_ELEMENTS) { | 3048 elements_kind == FLOAT32_ELEMENTS) { |
3412 if (CpuFeatures::IsSupported(SSE2)) { | 3049 XMMRegister result(ToDoubleRegister(instr->result())); |
3413 CpuFeatureScope scope(masm(), SSE2); | 3050 __ movss(result, operand); |
3414 XMMRegister result(ToDoubleRegister(instr->result())); | 3051 __ cvtss2sd(result, result); |
3415 __ movss(result, operand); | |
3416 __ cvtss2sd(result, result); | |
3417 } else { | |
3418 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); | |
3419 } | |
3420 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 3052 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
3421 elements_kind == FLOAT64_ELEMENTS) { | 3053 elements_kind == FLOAT64_ELEMENTS) { |
3422 if (CpuFeatures::IsSupported(SSE2)) { | 3054 __ movsd(ToDoubleRegister(instr->result()), operand); |
3423 CpuFeatureScope scope(masm(), SSE2); | |
3424 __ movsd(ToDoubleRegister(instr->result()), operand); | |
3425 } else { | |
3426 X87Mov(ToX87Register(instr->result()), operand); | |
3427 } | |
3428 } else { | 3055 } else { |
3429 Register result(ToRegister(instr->result())); | 3056 Register result(ToRegister(instr->result())); |
3430 switch (elements_kind) { | 3057 switch (elements_kind) { |
3431 case EXTERNAL_INT8_ELEMENTS: | 3058 case EXTERNAL_INT8_ELEMENTS: |
3432 case INT8_ELEMENTS: | 3059 case INT8_ELEMENTS: |
3433 __ movsx_b(result, operand); | 3060 __ movsx_b(result, operand); |
3434 break; | 3061 break; |
3435 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 3062 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
3436 case EXTERNAL_UINT8_ELEMENTS: | 3063 case EXTERNAL_UINT8_ELEMENTS: |
3437 case UINT8_ELEMENTS: | 3064 case UINT8_ELEMENTS: |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3491 DeoptimizeIf(equal, instr->environment()); | 3118 DeoptimizeIf(equal, instr->environment()); |
3492 } | 3119 } |
3493 | 3120 |
3494 Operand double_load_operand = BuildFastArrayOperand( | 3121 Operand double_load_operand = BuildFastArrayOperand( |
3495 instr->elements(), | 3122 instr->elements(), |
3496 instr->key(), | 3123 instr->key(), |
3497 instr->hydrogen()->key()->representation(), | 3124 instr->hydrogen()->key()->representation(), |
3498 FAST_DOUBLE_ELEMENTS, | 3125 FAST_DOUBLE_ELEMENTS, |
3499 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 3126 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
3500 instr->additional_index()); | 3127 instr->additional_index()); |
3501 if (CpuFeatures::IsSupported(SSE2)) { | 3128 XMMRegister result = ToDoubleRegister(instr->result()); |
3502 CpuFeatureScope scope(masm(), SSE2); | 3129 __ movsd(result, double_load_operand); |
3503 XMMRegister result = ToDoubleRegister(instr->result()); | |
3504 __ movsd(result, double_load_operand); | |
3505 } else { | |
3506 X87Mov(ToX87Register(instr->result()), double_load_operand); | |
3507 } | |
3508 } | 3130 } |
3509 | 3131 |
3510 | 3132 |
3511 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3133 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3512 Register result = ToRegister(instr->result()); | 3134 Register result = ToRegister(instr->result()); |
3513 | 3135 |
3514 // Load the result. | 3136 // Load the result. |
3515 __ mov(result, | 3137 __ mov(result, |
3516 BuildFastArrayOperand(instr->elements(), | 3138 BuildFastArrayOperand(instr->elements(), |
3517 instr->key(), | 3139 instr->key(), |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3919 DeoptimizeIf(negative, instr->environment()); | 3541 DeoptimizeIf(negative, instr->environment()); |
3920 __ bind(&is_positive); | 3542 __ bind(&is_positive); |
3921 } | 3543 } |
3922 | 3544 |
3923 | 3545 |
3924 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3546 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3925 // Class for deferred case. | 3547 // Class for deferred case. |
3926 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { | 3548 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
3927 public: | 3549 public: |
3928 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3550 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
3929 LMathAbs* instr, | 3551 LMathAbs* instr) |
3930 const X87Stack& x87_stack) | 3552 : LDeferredCode(codegen), instr_(instr) { } |
3931 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
3932 virtual void Generate() V8_OVERRIDE { | 3553 virtual void Generate() V8_OVERRIDE { |
3933 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3554 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3934 } | 3555 } |
3935 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 3556 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
3936 private: | 3557 private: |
3937 LMathAbs* instr_; | 3558 LMathAbs* instr_; |
3938 }; | 3559 }; |
3939 | 3560 |
3940 ASSERT(instr->value()->Equals(instr->result())); | 3561 ASSERT(instr->value()->Equals(instr->result())); |
3941 Representation r = instr->hydrogen()->value()->representation(); | 3562 Representation r = instr->hydrogen()->value()->representation(); |
3942 | 3563 |
3943 CpuFeatureScope scope(masm(), SSE2); | |
3944 if (r.IsDouble()) { | 3564 if (r.IsDouble()) { |
3945 XMMRegister scratch = double_scratch0(); | 3565 XMMRegister scratch = double_scratch0(); |
3946 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3566 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3947 __ xorps(scratch, scratch); | 3567 __ xorps(scratch, scratch); |
3948 __ subsd(scratch, input_reg); | 3568 __ subsd(scratch, input_reg); |
3949 __ andps(input_reg, scratch); | 3569 __ andps(input_reg, scratch); |
3950 } else if (r.IsSmiOrInteger32()) { | 3570 } else if (r.IsSmiOrInteger32()) { |
3951 EmitIntegerMathAbs(instr); | 3571 EmitIntegerMathAbs(instr); |
3952 } else { // Tagged case. | 3572 } else { // Tagged case. |
3953 DeferredMathAbsTaggedHeapNumber* deferred = | 3573 DeferredMathAbsTaggedHeapNumber* deferred = |
3954 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); | 3574 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
3955 Register input_reg = ToRegister(instr->value()); | 3575 Register input_reg = ToRegister(instr->value()); |
3956 // Smi check. | 3576 // Smi check. |
3957 __ JumpIfNotSmi(input_reg, deferred->entry()); | 3577 __ JumpIfNotSmi(input_reg, deferred->entry()); |
3958 EmitIntegerMathAbs(instr); | 3578 EmitIntegerMathAbs(instr); |
3959 __ bind(deferred->exit()); | 3579 __ bind(deferred->exit()); |
3960 } | 3580 } |
3961 } | 3581 } |
3962 | 3582 |
3963 | 3583 |
3964 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3584 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3965 CpuFeatureScope scope(masm(), SSE2); | |
3966 XMMRegister xmm_scratch = double_scratch0(); | 3585 XMMRegister xmm_scratch = double_scratch0(); |
3967 Register output_reg = ToRegister(instr->result()); | 3586 Register output_reg = ToRegister(instr->result()); |
3968 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3587 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3969 | 3588 |
3970 if (CpuFeatures::IsSupported(SSE4_1)) { | 3589 if (CpuFeatures::IsSupported(SSE4_1)) { |
3971 CpuFeatureScope scope(masm(), SSE4_1); | 3590 CpuFeatureScope scope(masm(), SSE4_1); |
3972 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3591 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3973 // Deoptimize on negative zero. | 3592 // Deoptimize on negative zero. |
3974 Label non_zero; | 3593 Label non_zero; |
3975 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3594 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4021 __ j(equal, &done, Label::kNear); | 3640 __ j(equal, &done, Label::kNear); |
4022 __ sub(output_reg, Immediate(1)); | 3641 __ sub(output_reg, Immediate(1)); |
4023 DeoptimizeIf(overflow, instr->environment()); | 3642 DeoptimizeIf(overflow, instr->environment()); |
4024 | 3643 |
4025 __ bind(&done); | 3644 __ bind(&done); |
4026 } | 3645 } |
4027 } | 3646 } |
4028 | 3647 |
4029 | 3648 |
4030 void LCodeGen::DoMathRound(LMathRound* instr) { | 3649 void LCodeGen::DoMathRound(LMathRound* instr) { |
4031 CpuFeatureScope scope(masm(), SSE2); | |
4032 Register output_reg = ToRegister(instr->result()); | 3650 Register output_reg = ToRegister(instr->result()); |
4033 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3651 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4034 XMMRegister xmm_scratch = double_scratch0(); | 3652 XMMRegister xmm_scratch = double_scratch0(); |
4035 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | 3653 XMMRegister input_temp = ToDoubleRegister(instr->temp()); |
4036 ExternalReference one_half = ExternalReference::address_of_one_half(); | 3654 ExternalReference one_half = ExternalReference::address_of_one_half(); |
4037 ExternalReference minus_one_half = | 3655 ExternalReference minus_one_half = |
4038 ExternalReference::address_of_minus_one_half(); | 3656 ExternalReference::address_of_minus_one_half(); |
4039 | 3657 |
4040 Label done, round_to_zero, below_one_half, do_not_compensate; | 3658 Label done, round_to_zero, below_one_half, do_not_compensate; |
4041 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 3659 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4084 __ test(output_reg, Immediate(1)); | 3702 __ test(output_reg, Immediate(1)); |
4085 __ RecordComment("Minus zero"); | 3703 __ RecordComment("Minus zero"); |
4086 DeoptimizeIf(not_zero, instr->environment()); | 3704 DeoptimizeIf(not_zero, instr->environment()); |
4087 } | 3705 } |
4088 __ Move(output_reg, Immediate(0)); | 3706 __ Move(output_reg, Immediate(0)); |
4089 __ bind(&done); | 3707 __ bind(&done); |
4090 } | 3708 } |
4091 | 3709 |
4092 | 3710 |
4093 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | 3711 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
4094 CpuFeatureScope scope(masm(), SSE2); | |
4095 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3712 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4096 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3713 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
4097 __ sqrtsd(input_reg, input_reg); | 3714 __ sqrtsd(input_reg, input_reg); |
4098 } | 3715 } |
4099 | 3716 |
4100 | 3717 |
4101 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3718 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
4102 CpuFeatureScope scope(masm(), SSE2); | |
4103 XMMRegister xmm_scratch = double_scratch0(); | 3719 XMMRegister xmm_scratch = double_scratch0(); |
4104 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3720 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4105 Register scratch = ToRegister(instr->temp()); | 3721 Register scratch = ToRegister(instr->temp()); |
4106 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3722 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
4107 | 3723 |
4108 // Note that according to ECMA-262 15.8.2.13: | 3724 // Note that according to ECMA-262 15.8.2.13: |
4109 // Math.pow(-Infinity, 0.5) == Infinity | 3725 // Math.pow(-Infinity, 0.5) == Infinity |
4110 // Math.sqrt(-Infinity) == NaN | 3726 // Math.sqrt(-Infinity) == NaN |
4111 Label done, sqrt; | 3727 Label done, sqrt; |
4112 // Check base for -Infinity. According to IEEE-754, single-precision | 3728 // Check base for -Infinity. According to IEEE-754, single-precision |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4160 __ CallStub(&stub); | 3776 __ CallStub(&stub); |
4161 } else { | 3777 } else { |
4162 ASSERT(exponent_type.IsDouble()); | 3778 ASSERT(exponent_type.IsDouble()); |
4163 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3779 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
4164 __ CallStub(&stub); | 3780 __ CallStub(&stub); |
4165 } | 3781 } |
4166 } | 3782 } |
4167 | 3783 |
4168 | 3784 |
4169 void LCodeGen::DoMathLog(LMathLog* instr) { | 3785 void LCodeGen::DoMathLog(LMathLog* instr) { |
4170 CpuFeatureScope scope(masm(), SSE2); | |
4171 ASSERT(instr->value()->Equals(instr->result())); | 3786 ASSERT(instr->value()->Equals(instr->result())); |
4172 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3787 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4173 XMMRegister xmm_scratch = double_scratch0(); | 3788 XMMRegister xmm_scratch = double_scratch0(); |
4174 Label positive, done, zero; | 3789 Label positive, done, zero; |
4175 __ xorps(xmm_scratch, xmm_scratch); | 3790 __ xorps(xmm_scratch, xmm_scratch); |
4176 __ ucomisd(input_reg, xmm_scratch); | 3791 __ ucomisd(input_reg, xmm_scratch); |
4177 __ j(above, &positive, Label::kNear); | 3792 __ j(above, &positive, Label::kNear); |
4178 __ j(not_carry, &zero, Label::kNear); | 3793 __ j(not_carry, &zero, Label::kNear); |
4179 ExternalReference nan = | 3794 ExternalReference nan = |
4180 ExternalReference::address_of_canonical_non_hole_nan(); | 3795 ExternalReference::address_of_canonical_non_hole_nan(); |
(...skipping 11 matching lines...) Expand all Loading... |
4192 __ fld_d(Operand(esp, 0)); | 3807 __ fld_d(Operand(esp, 0)); |
4193 __ fyl2x(); | 3808 __ fyl2x(); |
4194 __ fstp_d(Operand(esp, 0)); | 3809 __ fstp_d(Operand(esp, 0)); |
4195 __ movsd(input_reg, Operand(esp, 0)); | 3810 __ movsd(input_reg, Operand(esp, 0)); |
4196 __ add(Operand(esp), Immediate(kDoubleSize)); | 3811 __ add(Operand(esp), Immediate(kDoubleSize)); |
4197 __ bind(&done); | 3812 __ bind(&done); |
4198 } | 3813 } |
4199 | 3814 |
4200 | 3815 |
4201 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3816 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
4202 CpuFeatureScope scope(masm(), SSE2); | |
4203 Register input = ToRegister(instr->value()); | 3817 Register input = ToRegister(instr->value()); |
4204 Register result = ToRegister(instr->result()); | 3818 Register result = ToRegister(instr->result()); |
4205 Label not_zero_input; | 3819 Label not_zero_input; |
4206 __ bsr(result, input); | 3820 __ bsr(result, input); |
4207 | 3821 |
4208 __ j(not_zero, ¬_zero_input); | 3822 __ j(not_zero, ¬_zero_input); |
4209 __ Move(result, Immediate(63)); // 63^31 == 32 | 3823 __ Move(result, Immediate(63)); // 63^31 == 32 |
4210 | 3824 |
4211 __ bind(¬_zero_input); | 3825 __ bind(¬_zero_input); |
4212 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. | 3826 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. |
4213 } | 3827 } |
4214 | 3828 |
4215 | 3829 |
4216 void LCodeGen::DoMathExp(LMathExp* instr) { | 3830 void LCodeGen::DoMathExp(LMathExp* instr) { |
4217 CpuFeatureScope scope(masm(), SSE2); | |
4218 XMMRegister input = ToDoubleRegister(instr->value()); | 3831 XMMRegister input = ToDoubleRegister(instr->value()); |
4219 XMMRegister result = ToDoubleRegister(instr->result()); | 3832 XMMRegister result = ToDoubleRegister(instr->result()); |
4220 XMMRegister temp0 = double_scratch0(); | 3833 XMMRegister temp0 = double_scratch0(); |
4221 Register temp1 = ToRegister(instr->temp1()); | 3834 Register temp1 = ToRegister(instr->temp1()); |
4222 Register temp2 = ToRegister(instr->temp2()); | 3835 Register temp2 = ToRegister(instr->temp2()); |
4223 | 3836 |
4224 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); | 3837 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); |
4225 } | 3838 } |
4226 | 3839 |
4227 | 3840 |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4386 DeoptimizeIf(zero, instr->environment()); | 3999 DeoptimizeIf(zero, instr->environment()); |
4387 | 4000 |
4388 // We know now that value is not a smi, so we can omit the check below. | 4001 // We know now that value is not a smi, so we can omit the check below. |
4389 check_needed = OMIT_SMI_CHECK; | 4002 check_needed = OMIT_SMI_CHECK; |
4390 } | 4003 } |
4391 } | 4004 } |
4392 } else if (representation.IsDouble()) { | 4005 } else if (representation.IsDouble()) { |
4393 ASSERT(access.IsInobject()); | 4006 ASSERT(access.IsInobject()); |
4394 ASSERT(!instr->hydrogen()->has_transition()); | 4007 ASSERT(!instr->hydrogen()->has_transition()); |
4395 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 4008 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
4396 if (CpuFeatures::IsSupported(SSE2)) { | 4009 XMMRegister value = ToDoubleRegister(instr->value()); |
4397 CpuFeatureScope scope(masm(), SSE2); | 4010 __ movsd(FieldOperand(object, offset), value); |
4398 XMMRegister value = ToDoubleRegister(instr->value()); | |
4399 __ movsd(FieldOperand(object, offset), value); | |
4400 } else { | |
4401 X87Register value = ToX87Register(instr->value()); | |
4402 X87Mov(FieldOperand(object, offset), value); | |
4403 } | |
4404 return; | 4011 return; |
4405 } | 4012 } |
4406 | 4013 |
4407 if (instr->hydrogen()->has_transition()) { | 4014 if (instr->hydrogen()->has_transition()) { |
4408 Handle<Map> transition = instr->hydrogen()->transition_map(); | 4015 Handle<Map> transition = instr->hydrogen()->transition_map(); |
4409 AddDeprecationDependency(transition); | 4016 AddDeprecationDependency(transition); |
4410 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { | 4017 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { |
4411 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); | 4018 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); |
4412 } else { | 4019 } else { |
4413 Register temp = ToRegister(instr->temp()); | 4020 Register temp = ToRegister(instr->temp()); |
4414 Register temp_map = ToRegister(instr->temp_map()); | 4021 Register temp_map = ToRegister(instr->temp_map()); |
4415 __ mov(temp_map, transition); | 4022 __ mov(temp_map, transition); |
4416 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); | 4023 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); |
4417 // Update the write barrier for the map field. | 4024 // Update the write barrier for the map field. |
4418 __ RecordWriteField(object, | 4025 __ RecordWriteField(object, |
4419 HeapObject::kMapOffset, | 4026 HeapObject::kMapOffset, |
4420 temp_map, | 4027 temp_map, |
4421 temp, | 4028 temp, |
4422 GetSaveFPRegsMode(isolate()), | 4029 kSaveFPRegs, |
4423 OMIT_REMEMBERED_SET, | 4030 OMIT_REMEMBERED_SET, |
4424 OMIT_SMI_CHECK); | 4031 OMIT_SMI_CHECK); |
4425 } | 4032 } |
4426 } | 4033 } |
4427 | 4034 |
4428 // Do the store. | 4035 // Do the store. |
4429 Register write_register = object; | 4036 Register write_register = object; |
4430 if (!access.IsInobject()) { | 4037 if (!access.IsInobject()) { |
4431 write_register = ToRegister(instr->temp()); | 4038 write_register = ToRegister(instr->temp()); |
4432 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); | 4039 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); |
(...skipping 20 matching lines...) Expand all Loading... |
4453 } | 4060 } |
4454 | 4061 |
4455 if (instr->hydrogen()->NeedsWriteBarrier()) { | 4062 if (instr->hydrogen()->NeedsWriteBarrier()) { |
4456 Register value = ToRegister(instr->value()); | 4063 Register value = ToRegister(instr->value()); |
4457 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; | 4064 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; |
4458 // Update the write barrier for the object for in-object properties. | 4065 // Update the write barrier for the object for in-object properties. |
4459 __ RecordWriteField(write_register, | 4066 __ RecordWriteField(write_register, |
4460 offset, | 4067 offset, |
4461 value, | 4068 value, |
4462 temp, | 4069 temp, |
4463 GetSaveFPRegsMode(isolate()), | 4070 kSaveFPRegs, |
4464 EMIT_REMEMBERED_SET, | 4071 EMIT_REMEMBERED_SET, |
4465 check_needed); | 4072 check_needed); |
4466 } | 4073 } |
4467 } | 4074 } |
4468 | 4075 |
4469 | 4076 |
4470 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 4077 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
4471 ASSERT(ToRegister(instr->context()).is(esi)); | 4078 ASSERT(ToRegister(instr->context()).is(esi)); |
4472 ASSERT(ToRegister(instr->object()).is(edx)); | 4079 ASSERT(ToRegister(instr->object()).is(edx)); |
4473 ASSERT(ToRegister(instr->value()).is(eax)); | 4080 ASSERT(ToRegister(instr->value()).is(eax)); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4513 } | 4120 } |
4514 Operand operand(BuildFastArrayOperand( | 4121 Operand operand(BuildFastArrayOperand( |
4515 instr->elements(), | 4122 instr->elements(), |
4516 key, | 4123 key, |
4517 instr->hydrogen()->key()->representation(), | 4124 instr->hydrogen()->key()->representation(), |
4518 elements_kind, | 4125 elements_kind, |
4519 0, | 4126 0, |
4520 instr->additional_index())); | 4127 instr->additional_index())); |
4521 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 4128 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
4522 elements_kind == FLOAT32_ELEMENTS) { | 4129 elements_kind == FLOAT32_ELEMENTS) { |
4523 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4130 XMMRegister xmm_scratch = double_scratch0(); |
4524 CpuFeatureScope scope(masm(), SSE2); | 4131 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); |
4525 XMMRegister xmm_scratch = double_scratch0(); | 4132 __ movss(operand, xmm_scratch); |
4526 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); | |
4527 __ movss(operand, xmm_scratch); | |
4528 } else { | |
4529 __ fld(0); | |
4530 __ fstp_s(operand); | |
4531 } | |
4532 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 4133 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
4533 elements_kind == FLOAT64_ELEMENTS) { | 4134 elements_kind == FLOAT64_ELEMENTS) { |
4534 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4135 __ movsd(operand, ToDoubleRegister(instr->value())); |
4535 CpuFeatureScope scope(masm(), SSE2); | |
4536 __ movsd(operand, ToDoubleRegister(instr->value())); | |
4537 } else { | |
4538 X87Mov(operand, ToX87Register(instr->value())); | |
4539 } | |
4540 } else { | 4136 } else { |
4541 Register value = ToRegister(instr->value()); | 4137 Register value = ToRegister(instr->value()); |
4542 switch (elements_kind) { | 4138 switch (elements_kind) { |
4543 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 4139 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
4544 case EXTERNAL_UINT8_ELEMENTS: | 4140 case EXTERNAL_UINT8_ELEMENTS: |
4545 case EXTERNAL_INT8_ELEMENTS: | 4141 case EXTERNAL_INT8_ELEMENTS: |
4546 case UINT8_ELEMENTS: | 4142 case UINT8_ELEMENTS: |
4547 case INT8_ELEMENTS: | 4143 case INT8_ELEMENTS: |
4548 case UINT8_CLAMPED_ELEMENTS: | 4144 case UINT8_CLAMPED_ELEMENTS: |
4549 __ mov_b(operand, value); | 4145 __ mov_b(operand, value); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4583 ExternalReference canonical_nan_reference = | 4179 ExternalReference canonical_nan_reference = |
4584 ExternalReference::address_of_canonical_non_hole_nan(); | 4180 ExternalReference::address_of_canonical_non_hole_nan(); |
4585 Operand double_store_operand = BuildFastArrayOperand( | 4181 Operand double_store_operand = BuildFastArrayOperand( |
4586 instr->elements(), | 4182 instr->elements(), |
4587 instr->key(), | 4183 instr->key(), |
4588 instr->hydrogen()->key()->representation(), | 4184 instr->hydrogen()->key()->representation(), |
4589 FAST_DOUBLE_ELEMENTS, | 4185 FAST_DOUBLE_ELEMENTS, |
4590 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 4186 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
4591 instr->additional_index()); | 4187 instr->additional_index()); |
4592 | 4188 |
4593 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4189 XMMRegister value = ToDoubleRegister(instr->value()); |
4594 CpuFeatureScope scope(masm(), SSE2); | |
4595 XMMRegister value = ToDoubleRegister(instr->value()); | |
4596 | 4190 |
4597 if (instr->NeedsCanonicalization()) { | 4191 if (instr->NeedsCanonicalization()) { |
4598 Label have_value; | 4192 Label have_value; |
4599 | 4193 |
4600 __ ucomisd(value, value); | 4194 __ ucomisd(value, value); |
4601 __ j(parity_odd, &have_value, Label::kNear); // NaN. | 4195 __ j(parity_odd, &have_value, Label::kNear); // NaN. |
4602 | 4196 |
4603 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); | 4197 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); |
4604 __ bind(&have_value); | 4198 __ bind(&have_value); |
4605 } | 4199 } |
4606 | 4200 |
4607 __ movsd(double_store_operand, value); | 4201 __ movsd(double_store_operand, value); |
4608 } else { | |
4609 // Can't use SSE2 in the serializer | |
4610 if (instr->hydrogen()->IsConstantHoleStore()) { | |
4611 // This means we should store the (double) hole. No floating point | |
4612 // registers required. | |
4613 double nan_double = FixedDoubleArray::hole_nan_as_double(); | |
4614 uint64_t int_val = BitCast<uint64_t, double>(nan_double); | |
4615 int32_t lower = static_cast<int32_t>(int_val); | |
4616 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | |
4617 | |
4618 __ mov(double_store_operand, Immediate(lower)); | |
4619 Operand double_store_operand2 = BuildFastArrayOperand( | |
4620 instr->elements(), | |
4621 instr->key(), | |
4622 instr->hydrogen()->key()->representation(), | |
4623 FAST_DOUBLE_ELEMENTS, | |
4624 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, | |
4625 instr->additional_index()); | |
4626 __ mov(double_store_operand2, Immediate(upper)); | |
4627 } else { | |
4628 Label no_special_nan_handling; | |
4629 X87Register value = ToX87Register(instr->value()); | |
4630 X87Fxch(value); | |
4631 | |
4632 if (instr->NeedsCanonicalization()) { | |
4633 __ fld(0); | |
4634 __ fld(0); | |
4635 __ FCmp(); | |
4636 | |
4637 __ j(parity_odd, &no_special_nan_handling, Label::kNear); | |
4638 __ sub(esp, Immediate(kDoubleSize)); | |
4639 __ fst_d(MemOperand(esp, 0)); | |
4640 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), | |
4641 Immediate(kHoleNanUpper32)); | |
4642 __ add(esp, Immediate(kDoubleSize)); | |
4643 Label canonicalize; | |
4644 __ j(not_equal, &canonicalize, Label::kNear); | |
4645 __ jmp(&no_special_nan_handling, Label::kNear); | |
4646 __ bind(&canonicalize); | |
4647 __ fstp(0); | |
4648 __ fld_d(Operand::StaticVariable(canonical_nan_reference)); | |
4649 } | |
4650 | |
4651 __ bind(&no_special_nan_handling); | |
4652 __ fst_d(double_store_operand); | |
4653 } | |
4654 } | |
4655 } | 4202 } |
4656 | 4203 |
4657 | 4204 |
4658 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4205 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
4659 Register elements = ToRegister(instr->elements()); | 4206 Register elements = ToRegister(instr->elements()); |
4660 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | 4207 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
4661 | 4208 |
4662 Operand operand = BuildFastArrayOperand( | 4209 Operand operand = BuildFastArrayOperand( |
4663 instr->elements(), | 4210 instr->elements(), |
4664 instr->key(), | 4211 instr->key(), |
(...skipping 20 matching lines...) Expand all Loading... |
4685 Register value = ToRegister(instr->value()); | 4232 Register value = ToRegister(instr->value()); |
4686 ASSERT(!instr->key()->IsConstantOperand()); | 4233 ASSERT(!instr->key()->IsConstantOperand()); |
4687 SmiCheck check_needed = | 4234 SmiCheck check_needed = |
4688 instr->hydrogen()->value()->IsHeapObject() | 4235 instr->hydrogen()->value()->IsHeapObject() |
4689 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 4236 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
4690 // Compute address of modified element and store it into key register. | 4237 // Compute address of modified element and store it into key register. |
4691 __ lea(key, operand); | 4238 __ lea(key, operand); |
4692 __ RecordWrite(elements, | 4239 __ RecordWrite(elements, |
4693 key, | 4240 key, |
4694 value, | 4241 value, |
4695 GetSaveFPRegsMode(isolate()), | 4242 kSaveFPRegs, |
4696 EMIT_REMEMBERED_SET, | 4243 EMIT_REMEMBERED_SET, |
4697 check_needed); | 4244 check_needed); |
4698 } | 4245 } |
4699 } | 4246 } |
4700 | 4247 |
4701 | 4248 |
4702 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { | 4249 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { |
4703 // By cases...external, fast-double, fast | 4250 // By cases...external, fast-double, fast |
4704 if (instr->is_typed_elements()) { | 4251 if (instr->is_typed_elements()) { |
4705 DoStoreKeyedExternalArray(instr); | 4252 DoStoreKeyedExternalArray(instr); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4770 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4317 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
4771 } | 4318 } |
4772 __ bind(¬_applicable); | 4319 __ bind(¬_applicable); |
4773 } | 4320 } |
4774 | 4321 |
4775 | 4322 |
4776 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4323 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4777 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { | 4324 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4778 public: | 4325 public: |
4779 DeferredStringCharCodeAt(LCodeGen* codegen, | 4326 DeferredStringCharCodeAt(LCodeGen* codegen, |
4780 LStringCharCodeAt* instr, | 4327 LStringCharCodeAt* instr) |
4781 const X87Stack& x87_stack) | 4328 : LDeferredCode(codegen), instr_(instr) { } |
4782 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
4783 virtual void Generate() V8_OVERRIDE { | 4329 virtual void Generate() V8_OVERRIDE { |
4784 codegen()->DoDeferredStringCharCodeAt(instr_); | 4330 codegen()->DoDeferredStringCharCodeAt(instr_); |
4785 } | 4331 } |
4786 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4332 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4787 private: | 4333 private: |
4788 LStringCharCodeAt* instr_; | 4334 LStringCharCodeAt* instr_; |
4789 }; | 4335 }; |
4790 | 4336 |
4791 DeferredStringCharCodeAt* deferred = | 4337 DeferredStringCharCodeAt* deferred = |
4792 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); | 4338 new(zone()) DeferredStringCharCodeAt(this, instr); |
4793 | 4339 |
4794 StringCharLoadGenerator::Generate(masm(), | 4340 StringCharLoadGenerator::Generate(masm(), |
4795 factory(), | 4341 factory(), |
4796 ToRegister(instr->string()), | 4342 ToRegister(instr->string()), |
4797 ToRegister(instr->index()), | 4343 ToRegister(instr->index()), |
4798 ToRegister(instr->result()), | 4344 ToRegister(instr->result()), |
4799 deferred->entry()); | 4345 deferred->entry()); |
4800 __ bind(deferred->exit()); | 4346 __ bind(deferred->exit()); |
4801 } | 4347 } |
4802 | 4348 |
(...skipping 26 matching lines...) Expand all Loading... |
4829 __ AssertSmi(eax); | 4375 __ AssertSmi(eax); |
4830 __ SmiUntag(eax); | 4376 __ SmiUntag(eax); |
4831 __ StoreToSafepointRegisterSlot(result, eax); | 4377 __ StoreToSafepointRegisterSlot(result, eax); |
4832 } | 4378 } |
4833 | 4379 |
4834 | 4380 |
4835 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | 4381 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
4836 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { | 4382 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
4837 public: | 4383 public: |
4838 DeferredStringCharFromCode(LCodeGen* codegen, | 4384 DeferredStringCharFromCode(LCodeGen* codegen, |
4839 LStringCharFromCode* instr, | 4385 LStringCharFromCode* instr) |
4840 const X87Stack& x87_stack) | 4386 : LDeferredCode(codegen), instr_(instr) { } |
4841 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
4842 virtual void Generate() V8_OVERRIDE { | 4387 virtual void Generate() V8_OVERRIDE { |
4843 codegen()->DoDeferredStringCharFromCode(instr_); | 4388 codegen()->DoDeferredStringCharFromCode(instr_); |
4844 } | 4389 } |
4845 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4390 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4846 private: | 4391 private: |
4847 LStringCharFromCode* instr_; | 4392 LStringCharFromCode* instr_; |
4848 }; | 4393 }; |
4849 | 4394 |
4850 DeferredStringCharFromCode* deferred = | 4395 DeferredStringCharFromCode* deferred = |
4851 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); | 4396 new(zone()) DeferredStringCharFromCode(this, instr); |
4852 | 4397 |
4853 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4398 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
4854 Register char_code = ToRegister(instr->char_code()); | 4399 Register char_code = ToRegister(instr->char_code()); |
4855 Register result = ToRegister(instr->result()); | 4400 Register result = ToRegister(instr->result()); |
4856 ASSERT(!char_code.is(result)); | 4401 ASSERT(!char_code.is(result)); |
4857 | 4402 |
4858 __ cmp(char_code, String::kMaxOneByteCharCode); | 4403 __ cmp(char_code, String::kMaxOneByteCharCode); |
4859 __ j(above, deferred->entry()); | 4404 __ j(above, deferred->entry()); |
4860 __ Move(result, Immediate(factory()->single_character_string_cache())); | 4405 __ Move(result, Immediate(factory()->single_character_string_cache())); |
4861 __ mov(result, FieldOperand(result, | 4406 __ mov(result, FieldOperand(result, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4893 instr->hydrogen()->pretenure_flag()); | 4438 instr->hydrogen()->pretenure_flag()); |
4894 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4439 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4895 } | 4440 } |
4896 | 4441 |
4897 | 4442 |
4898 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4443 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4899 LOperand* input = instr->value(); | 4444 LOperand* input = instr->value(); |
4900 LOperand* output = instr->result(); | 4445 LOperand* output = instr->result(); |
4901 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4446 ASSERT(input->IsRegister() || input->IsStackSlot()); |
4902 ASSERT(output->IsDoubleRegister()); | 4447 ASSERT(output->IsDoubleRegister()); |
4903 if (CpuFeatures::IsSupported(SSE2)) { | 4448 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
4904 CpuFeatureScope scope(masm(), SSE2); | |
4905 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | |
4906 } else if (input->IsRegister()) { | |
4907 Register input_reg = ToRegister(input); | |
4908 __ push(input_reg); | |
4909 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); | |
4910 __ pop(input_reg); | |
4911 } else { | |
4912 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); | |
4913 } | |
4914 } | 4449 } |
4915 | 4450 |
4916 | 4451 |
4917 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4452 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
4918 LOperand* input = instr->value(); | 4453 LOperand* input = instr->value(); |
4919 LOperand* output = instr->result(); | 4454 LOperand* output = instr->result(); |
4920 if (CpuFeatures::IsSupported(SSE2)) { | 4455 LOperand* temp = instr->temp(); |
4921 CpuFeatureScope scope(masm(), SSE2); | 4456 __ LoadUint32(ToDoubleRegister(output), |
4922 LOperand* temp = instr->temp(); | 4457 ToRegister(input), |
4923 | 4458 ToDoubleRegister(temp)); |
4924 __ LoadUint32(ToDoubleRegister(output), | |
4925 ToRegister(input), | |
4926 ToDoubleRegister(temp)); | |
4927 } else { | |
4928 X87Register res = ToX87Register(output); | |
4929 X87PrepareToWrite(res); | |
4930 __ LoadUint32NoSSE2(ToRegister(input)); | |
4931 X87CommitWrite(res); | |
4932 } | |
4933 } | 4459 } |
4934 | 4460 |
4935 | 4461 |
4936 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4462 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
4937 class DeferredNumberTagI V8_FINAL : public LDeferredCode { | 4463 class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
4938 public: | 4464 public: |
4939 DeferredNumberTagI(LCodeGen* codegen, | 4465 DeferredNumberTagI(LCodeGen* codegen, |
4940 LNumberTagI* instr, | 4466 LNumberTagI* instr) |
4941 const X87Stack& x87_stack) | 4467 : LDeferredCode(codegen), instr_(instr) { } |
4942 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
4943 virtual void Generate() V8_OVERRIDE { | 4468 virtual void Generate() V8_OVERRIDE { |
4944 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), | 4469 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
4945 NULL, SIGNED_INT32); | 4470 NULL, SIGNED_INT32); |
4946 } | 4471 } |
4947 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4472 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4948 private: | 4473 private: |
4949 LNumberTagI* instr_; | 4474 LNumberTagI* instr_; |
4950 }; | 4475 }; |
4951 | 4476 |
4952 LOperand* input = instr->value(); | 4477 LOperand* input = instr->value(); |
4953 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4478 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4954 Register reg = ToRegister(input); | 4479 Register reg = ToRegister(input); |
4955 | 4480 |
4956 DeferredNumberTagI* deferred = | 4481 DeferredNumberTagI* deferred = |
4957 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); | 4482 new(zone()) DeferredNumberTagI(this, instr); |
4958 __ SmiTag(reg); | 4483 __ SmiTag(reg); |
4959 __ j(overflow, deferred->entry()); | 4484 __ j(overflow, deferred->entry()); |
4960 __ bind(deferred->exit()); | 4485 __ bind(deferred->exit()); |
4961 } | 4486 } |
4962 | 4487 |
4963 | 4488 |
4964 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | 4489 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
4965 class DeferredNumberTagU V8_FINAL : public LDeferredCode { | 4490 class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
4966 public: | 4491 public: |
4967 DeferredNumberTagU(LCodeGen* codegen, | 4492 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
4968 LNumberTagU* instr, | 4493 : LDeferredCode(codegen), instr_(instr) { } |
4969 const X87Stack& x87_stack) | |
4970 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
4971 virtual void Generate() V8_OVERRIDE { | 4494 virtual void Generate() V8_OVERRIDE { |
4972 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), | 4495 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), |
4973 instr_->temp2(), UNSIGNED_INT32); | 4496 instr_->temp2(), UNSIGNED_INT32); |
4974 } | 4497 } |
4975 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4498 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4976 private: | 4499 private: |
4977 LNumberTagU* instr_; | 4500 LNumberTagU* instr_; |
4978 }; | 4501 }; |
4979 | 4502 |
4980 LOperand* input = instr->value(); | 4503 LOperand* input = instr->value(); |
4981 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4504 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4982 Register reg = ToRegister(input); | 4505 Register reg = ToRegister(input); |
4983 | 4506 |
4984 DeferredNumberTagU* deferred = | 4507 DeferredNumberTagU* deferred = |
4985 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); | 4508 new(zone()) DeferredNumberTagU(this, instr); |
4986 __ cmp(reg, Immediate(Smi::kMaxValue)); | 4509 __ cmp(reg, Immediate(Smi::kMaxValue)); |
4987 __ j(above, deferred->entry()); | 4510 __ j(above, deferred->entry()); |
4988 __ SmiTag(reg); | 4511 __ SmiTag(reg); |
4989 __ bind(deferred->exit()); | 4512 __ bind(deferred->exit()); |
4990 } | 4513 } |
4991 | 4514 |
4992 | 4515 |
4993 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, | 4516 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
4994 LOperand* value, | 4517 LOperand* value, |
4995 LOperand* temp1, | 4518 LOperand* temp1, |
4996 LOperand* temp2, | 4519 LOperand* temp2, |
4997 IntegerSignedness signedness) { | 4520 IntegerSignedness signedness) { |
4998 Label done, slow; | 4521 Label done, slow; |
4999 Register reg = ToRegister(value); | 4522 Register reg = ToRegister(value); |
5000 Register tmp = ToRegister(temp1); | 4523 Register tmp = ToRegister(temp1); |
5001 XMMRegister xmm_scratch = double_scratch0(); | 4524 XMMRegister xmm_scratch = double_scratch0(); |
5002 | 4525 |
5003 if (signedness == SIGNED_INT32) { | 4526 if (signedness == SIGNED_INT32) { |
5004 // There was overflow, so bits 30 and 31 of the original integer | 4527 // There was overflow, so bits 30 and 31 of the original integer |
5005 // disagree. Try to allocate a heap number in new space and store | 4528 // disagree. Try to allocate a heap number in new space and store |
5006 // the value in there. If that fails, call the runtime system. | 4529 // the value in there. If that fails, call the runtime system. |
5007 __ SmiUntag(reg); | 4530 __ SmiUntag(reg); |
5008 __ xor_(reg, 0x80000000); | 4531 __ xor_(reg, 0x80000000); |
5009 if (CpuFeatures::IsSupported(SSE2)) { | 4532 __ Cvtsi2sd(xmm_scratch, Operand(reg)); |
5010 CpuFeatureScope feature_scope(masm(), SSE2); | |
5011 __ Cvtsi2sd(xmm_scratch, Operand(reg)); | |
5012 } else { | |
5013 __ push(reg); | |
5014 __ fild_s(Operand(esp, 0)); | |
5015 __ pop(reg); | |
5016 } | |
5017 } else { | 4533 } else { |
5018 if (CpuFeatures::IsSupported(SSE2)) { | 4534 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); |
5019 CpuFeatureScope feature_scope(masm(), SSE2); | |
5020 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); | |
5021 } else { | |
5022 // There's no fild variant for unsigned values, so zero-extend to a 64-bit | |
5023 // int manually. | |
5024 __ push(Immediate(0)); | |
5025 __ push(reg); | |
5026 __ fild_d(Operand(esp, 0)); | |
5027 __ pop(reg); | |
5028 __ pop(reg); | |
5029 } | |
5030 } | 4535 } |
5031 | 4536 |
5032 if (FLAG_inline_new) { | 4537 if (FLAG_inline_new) { |
5033 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4538 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
5034 __ jmp(&done, Label::kNear); | 4539 __ jmp(&done, Label::kNear); |
5035 } | 4540 } |
5036 | 4541 |
5037 // Slow case: Call the runtime system to do the number allocation. | 4542 // Slow case: Call the runtime system to do the number allocation. |
5038 __ bind(&slow); | 4543 __ bind(&slow); |
5039 { | 4544 { |
(...skipping 13 matching lines...) Expand all Loading... |
5053 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4558 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
5054 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); | 4559 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); |
5055 RecordSafepointWithRegisters( | 4560 RecordSafepointWithRegisters( |
5056 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4561 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
5057 __ StoreToSafepointRegisterSlot(reg, eax); | 4562 __ StoreToSafepointRegisterSlot(reg, eax); |
5058 } | 4563 } |
5059 | 4564 |
5060 // Done. Put the value in xmm_scratch into the value of the allocated heap | 4565 // Done. Put the value in xmm_scratch into the value of the allocated heap |
5061 // number. | 4566 // number. |
5062 __ bind(&done); | 4567 __ bind(&done); |
5063 if (CpuFeatures::IsSupported(SSE2)) { | 4568 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); |
5064 CpuFeatureScope feature_scope(masm(), SSE2); | |
5065 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); | |
5066 } else { | |
5067 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
5068 } | |
5069 } | 4569 } |
5070 | 4570 |
5071 | 4571 |
5072 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4572 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
5073 class DeferredNumberTagD V8_FINAL : public LDeferredCode { | 4573 class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
5074 public: | 4574 public: |
5075 DeferredNumberTagD(LCodeGen* codegen, | 4575 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
5076 LNumberTagD* instr, | 4576 : LDeferredCode(codegen), instr_(instr) { } |
5077 const X87Stack& x87_stack) | |
5078 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
5079 virtual void Generate() V8_OVERRIDE { | 4577 virtual void Generate() V8_OVERRIDE { |
5080 codegen()->DoDeferredNumberTagD(instr_); | 4578 codegen()->DoDeferredNumberTagD(instr_); |
5081 } | 4579 } |
5082 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4580 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5083 private: | 4581 private: |
5084 LNumberTagD* instr_; | 4582 LNumberTagD* instr_; |
5085 }; | 4583 }; |
5086 | 4584 |
5087 Register reg = ToRegister(instr->result()); | 4585 Register reg = ToRegister(instr->result()); |
5088 | 4586 |
5089 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | |
5090 if (!use_sse2) { | |
5091 // Put the value to the top of stack | |
5092 X87Register src = ToX87Register(instr->value()); | |
5093 X87LoadForUsage(src); | |
5094 } | |
5095 | |
5096 DeferredNumberTagD* deferred = | 4587 DeferredNumberTagD* deferred = |
5097 new(zone()) DeferredNumberTagD(this, instr, x87_stack_); | 4588 new(zone()) DeferredNumberTagD(this, instr); |
5098 if (FLAG_inline_new) { | 4589 if (FLAG_inline_new) { |
5099 Register tmp = ToRegister(instr->temp()); | 4590 Register tmp = ToRegister(instr->temp()); |
5100 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4591 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
5101 } else { | 4592 } else { |
5102 __ jmp(deferred->entry()); | 4593 __ jmp(deferred->entry()); |
5103 } | 4594 } |
5104 __ bind(deferred->exit()); | 4595 __ bind(deferred->exit()); |
5105 if (use_sse2) { | 4596 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
5106 CpuFeatureScope scope(masm(), SSE2); | 4597 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
5107 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
5108 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | |
5109 } else { | |
5110 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
5111 } | |
5112 } | 4598 } |
5113 | 4599 |
5114 | 4600 |
5115 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4601 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
5116 // TODO(3095996): Get rid of this. For now, we need to make the | 4602 // TODO(3095996): Get rid of this. For now, we need to make the |
5117 // result register contain a valid pointer because it is already | 4603 // result register contain a valid pointer because it is already |
5118 // contained in the register pointer map. | 4604 // contained in the register pointer map. |
5119 Register reg = ToRegister(instr->result()); | 4605 Register reg = ToRegister(instr->result()); |
5120 __ Move(reg, Immediate(0)); | 4606 __ Move(reg, Immediate(0)); |
5121 | 4607 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5156 if (instr->needs_check()) { | 4642 if (instr->needs_check()) { |
5157 __ test(result, Immediate(kSmiTagMask)); | 4643 __ test(result, Immediate(kSmiTagMask)); |
5158 DeoptimizeIf(not_zero, instr->environment()); | 4644 DeoptimizeIf(not_zero, instr->environment()); |
5159 } else { | 4645 } else { |
5160 __ AssertSmi(result); | 4646 __ AssertSmi(result); |
5161 } | 4647 } |
5162 __ SmiUntag(result); | 4648 __ SmiUntag(result); |
5163 } | 4649 } |
5164 | 4650 |
5165 | 4651 |
5166 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, | |
5167 Register temp_reg, | |
5168 X87Register res_reg, | |
5169 bool can_convert_undefined_to_nan, | |
5170 bool deoptimize_on_minus_zero, | |
5171 LEnvironment* env, | |
5172 NumberUntagDMode mode) { | |
5173 Label load_smi, done; | |
5174 | |
5175 X87PrepareToWrite(res_reg); | |
5176 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | |
5177 // Smi check. | |
5178 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | |
5179 | |
5180 // Heap number map check. | |
5181 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | |
5182 factory()->heap_number_map()); | |
5183 if (!can_convert_undefined_to_nan) { | |
5184 DeoptimizeIf(not_equal, env); | |
5185 } else { | |
5186 Label heap_number, convert; | |
5187 __ j(equal, &heap_number, Label::kNear); | |
5188 | |
5189 // Convert undefined (or hole) to NaN. | |
5190 __ cmp(input_reg, factory()->undefined_value()); | |
5191 DeoptimizeIf(not_equal, env); | |
5192 | |
5193 __ bind(&convert); | |
5194 ExternalReference nan = | |
5195 ExternalReference::address_of_canonical_non_hole_nan(); | |
5196 __ fld_d(Operand::StaticVariable(nan)); | |
5197 __ jmp(&done, Label::kNear); | |
5198 | |
5199 __ bind(&heap_number); | |
5200 } | |
5201 // Heap number to x87 conversion. | |
5202 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); | |
5203 if (deoptimize_on_minus_zero) { | |
5204 __ fldz(); | |
5205 __ FCmp(); | |
5206 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); | |
5207 __ j(not_zero, &done, Label::kNear); | |
5208 | |
5209 // Use general purpose registers to check if we have -0.0 | |
5210 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | |
5211 __ test(temp_reg, Immediate(HeapNumber::kSignMask)); | |
5212 __ j(zero, &done, Label::kNear); | |
5213 | |
5214 // Pop FPU stack before deoptimizing. | |
5215 __ fstp(0); | |
5216 DeoptimizeIf(not_zero, env); | |
5217 } | |
5218 __ jmp(&done, Label::kNear); | |
5219 } else { | |
5220 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | |
5221 } | |
5222 | |
5223 __ bind(&load_smi); | |
5224 // Clobbering a temp is faster than re-tagging the | |
5225 // input register since we avoid dependencies. | |
5226 __ mov(temp_reg, input_reg); | |
5227 __ SmiUntag(temp_reg); // Untag smi before converting to float. | |
5228 __ push(temp_reg); | |
5229 __ fild_s(Operand(esp, 0)); | |
5230 __ add(esp, Immediate(kPointerSize)); | |
5231 __ bind(&done); | |
5232 X87CommitWrite(res_reg); | |
5233 } | |
5234 | |
5235 | |
5236 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4652 void LCodeGen::EmitNumberUntagD(Register input_reg, |
5237 Register temp_reg, | 4653 Register temp_reg, |
5238 XMMRegister result_reg, | 4654 XMMRegister result_reg, |
5239 bool can_convert_undefined_to_nan, | 4655 bool can_convert_undefined_to_nan, |
5240 bool deoptimize_on_minus_zero, | 4656 bool deoptimize_on_minus_zero, |
5241 LEnvironment* env, | 4657 LEnvironment* env, |
5242 NumberUntagDMode mode) { | 4658 NumberUntagDMode mode) { |
5243 Label convert, load_smi, done; | 4659 Label convert, load_smi, done; |
5244 | 4660 |
5245 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4661 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5341 __ jmp(done); | 4757 __ jmp(done); |
5342 __ bind(&bailout); | 4758 __ bind(&bailout); |
5343 DeoptimizeIf(no_condition, instr->environment()); | 4759 DeoptimizeIf(no_condition, instr->environment()); |
5344 } | 4760 } |
5345 } | 4761 } |
5346 | 4762 |
5347 | 4763 |
5348 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4764 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
5349 class DeferredTaggedToI V8_FINAL : public LDeferredCode { | 4765 class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
5350 public: | 4766 public: |
5351 DeferredTaggedToI(LCodeGen* codegen, | 4767 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
5352 LTaggedToI* instr, | 4768 : LDeferredCode(codegen), instr_(instr) { } |
5353 const X87Stack& x87_stack) | |
5354 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
5355 virtual void Generate() V8_OVERRIDE { | 4769 virtual void Generate() V8_OVERRIDE { |
5356 codegen()->DoDeferredTaggedToI(instr_, done()); | 4770 codegen()->DoDeferredTaggedToI(instr_, done()); |
5357 } | 4771 } |
5358 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4772 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5359 private: | 4773 private: |
5360 LTaggedToI* instr_; | 4774 LTaggedToI* instr_; |
5361 }; | 4775 }; |
5362 | 4776 |
5363 LOperand* input = instr->value(); | 4777 LOperand* input = instr->value(); |
5364 ASSERT(input->IsRegister()); | 4778 ASSERT(input->IsRegister()); |
5365 Register input_reg = ToRegister(input); | 4779 Register input_reg = ToRegister(input); |
5366 ASSERT(input_reg.is(ToRegister(instr->result()))); | 4780 ASSERT(input_reg.is(ToRegister(instr->result()))); |
5367 | 4781 |
5368 if (instr->hydrogen()->value()->representation().IsSmi()) { | 4782 if (instr->hydrogen()->value()->representation().IsSmi()) { |
5369 __ SmiUntag(input_reg); | 4783 __ SmiUntag(input_reg); |
5370 } else { | 4784 } else { |
5371 DeferredTaggedToI* deferred = | 4785 DeferredTaggedToI* deferred = |
5372 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); | 4786 new(zone()) DeferredTaggedToI(this, instr); |
5373 // Optimistically untag the input. | 4787 // Optimistically untag the input. |
5374 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4788 // If the input is a HeapObject, SmiUntag will set the carry flag. |
5375 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 4789 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
5376 __ SmiUntag(input_reg); | 4790 __ SmiUntag(input_reg); |
5377 // Branch to deferred code if the input was tagged. | 4791 // Branch to deferred code if the input was tagged. |
5378 // The deferred code will take care of restoring the tag. | 4792 // The deferred code will take care of restoring the tag. |
5379 __ j(carry, deferred->entry()); | 4793 __ j(carry, deferred->entry()); |
5380 __ bind(deferred->exit()); | 4794 __ bind(deferred->exit()); |
5381 } | 4795 } |
5382 } | 4796 } |
5383 | 4797 |
5384 | 4798 |
5385 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4799 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
5386 LOperand* input = instr->value(); | 4800 LOperand* input = instr->value(); |
5387 ASSERT(input->IsRegister()); | 4801 ASSERT(input->IsRegister()); |
5388 LOperand* temp = instr->temp(); | 4802 LOperand* temp = instr->temp(); |
5389 ASSERT(temp->IsRegister()); | 4803 ASSERT(temp->IsRegister()); |
5390 LOperand* result = instr->result(); | 4804 LOperand* result = instr->result(); |
5391 ASSERT(result->IsDoubleRegister()); | 4805 ASSERT(result->IsDoubleRegister()); |
5392 | 4806 |
5393 Register input_reg = ToRegister(input); | 4807 Register input_reg = ToRegister(input); |
5394 bool deoptimize_on_minus_zero = | 4808 bool deoptimize_on_minus_zero = |
5395 instr->hydrogen()->deoptimize_on_minus_zero(); | 4809 instr->hydrogen()->deoptimize_on_minus_zero(); |
5396 Register temp_reg = ToRegister(temp); | 4810 Register temp_reg = ToRegister(temp); |
5397 | 4811 |
5398 HValue* value = instr->hydrogen()->value(); | 4812 HValue* value = instr->hydrogen()->value(); |
5399 NumberUntagDMode mode = value->representation().IsSmi() | 4813 NumberUntagDMode mode = value->representation().IsSmi() |
5400 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 4814 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
5401 | 4815 |
5402 if (CpuFeatures::IsSupported(SSE2)) { | 4816 XMMRegister result_reg = ToDoubleRegister(result); |
5403 CpuFeatureScope scope(masm(), SSE2); | 4817 EmitNumberUntagD(input_reg, |
5404 XMMRegister result_reg = ToDoubleRegister(result); | 4818 temp_reg, |
5405 EmitNumberUntagD(input_reg, | 4819 result_reg, |
5406 temp_reg, | 4820 instr->hydrogen()->can_convert_undefined_to_nan(), |
5407 result_reg, | 4821 deoptimize_on_minus_zero, |
5408 instr->hydrogen()->can_convert_undefined_to_nan(), | 4822 instr->environment(), |
5409 deoptimize_on_minus_zero, | 4823 mode); |
5410 instr->environment(), | |
5411 mode); | |
5412 } else { | |
5413 EmitNumberUntagDNoSSE2(input_reg, | |
5414 temp_reg, | |
5415 ToX87Register(instr->result()), | |
5416 instr->hydrogen()->can_convert_undefined_to_nan(), | |
5417 deoptimize_on_minus_zero, | |
5418 instr->environment(), | |
5419 mode); | |
5420 } | |
5421 } | 4824 } |
5422 | 4825 |
5423 | 4826 |
5424 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4827 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
5425 LOperand* input = instr->value(); | 4828 LOperand* input = instr->value(); |
5426 ASSERT(input->IsDoubleRegister()); | 4829 ASSERT(input->IsDoubleRegister()); |
5427 LOperand* result = instr->result(); | 4830 LOperand* result = instr->result(); |
5428 ASSERT(result->IsRegister()); | 4831 ASSERT(result->IsRegister()); |
5429 Register result_reg = ToRegister(result); | 4832 Register result_reg = ToRegister(result); |
5430 | 4833 |
5431 if (instr->truncating()) { | 4834 if (instr->truncating()) { |
5432 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4835 XMMRegister input_reg = ToDoubleRegister(input); |
5433 CpuFeatureScope scope(masm(), SSE2); | 4836 __ TruncateDoubleToI(result_reg, input_reg); |
5434 XMMRegister input_reg = ToDoubleRegister(input); | |
5435 __ TruncateDoubleToI(result_reg, input_reg); | |
5436 } else { | |
5437 X87Register input_reg = ToX87Register(input); | |
5438 X87Fxch(input_reg); | |
5439 __ TruncateX87TOSToI(result_reg); | |
5440 } | |
5441 } else { | 4837 } else { |
5442 Label bailout, done; | 4838 Label bailout, done; |
5443 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4839 XMMRegister input_reg = ToDoubleRegister(input); |
5444 CpuFeatureScope scope(masm(), SSE2); | 4840 XMMRegister xmm_scratch = double_scratch0(); |
5445 XMMRegister input_reg = ToDoubleRegister(input); | 4841 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
5446 XMMRegister xmm_scratch = double_scratch0(); | 4842 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
5447 __ DoubleToI(result_reg, input_reg, xmm_scratch, | |
5448 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | |
5449 } else { | |
5450 X87Register input_reg = ToX87Register(input); | |
5451 X87Fxch(input_reg); | |
5452 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), | |
5453 &bailout, Label::kNear); | |
5454 } | |
5455 __ jmp(&done, Label::kNear); | 4843 __ jmp(&done, Label::kNear); |
5456 __ bind(&bailout); | 4844 __ bind(&bailout); |
5457 DeoptimizeIf(no_condition, instr->environment()); | 4845 DeoptimizeIf(no_condition, instr->environment()); |
5458 __ bind(&done); | 4846 __ bind(&done); |
5459 } | 4847 } |
5460 } | 4848 } |
5461 | 4849 |
5462 | 4850 |
5463 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4851 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
5464 LOperand* input = instr->value(); | 4852 LOperand* input = instr->value(); |
5465 ASSERT(input->IsDoubleRegister()); | 4853 ASSERT(input->IsDoubleRegister()); |
5466 LOperand* result = instr->result(); | 4854 LOperand* result = instr->result(); |
5467 ASSERT(result->IsRegister()); | 4855 ASSERT(result->IsRegister()); |
5468 Register result_reg = ToRegister(result); | 4856 Register result_reg = ToRegister(result); |
5469 | 4857 |
5470 Label bailout, done; | 4858 Label bailout, done; |
5471 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 4859 XMMRegister input_reg = ToDoubleRegister(input); |
5472 CpuFeatureScope scope(masm(), SSE2); | 4860 XMMRegister xmm_scratch = double_scratch0(); |
5473 XMMRegister input_reg = ToDoubleRegister(input); | 4861 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
5474 XMMRegister xmm_scratch = double_scratch0(); | 4862 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
5475 __ DoubleToI(result_reg, input_reg, xmm_scratch, | |
5476 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | |
5477 } else { | |
5478 X87Register input_reg = ToX87Register(input); | |
5479 X87Fxch(input_reg); | |
5480 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), | |
5481 &bailout, Label::kNear); | |
5482 } | |
5483 __ jmp(&done, Label::kNear); | 4863 __ jmp(&done, Label::kNear); |
5484 __ bind(&bailout); | 4864 __ bind(&bailout); |
5485 DeoptimizeIf(no_condition, instr->environment()); | 4865 DeoptimizeIf(no_condition, instr->environment()); |
5486 __ bind(&done); | 4866 __ bind(&done); |
5487 | 4867 |
5488 __ SmiTag(result_reg); | 4868 __ SmiTag(result_reg); |
5489 DeoptimizeIf(overflow, instr->environment()); | 4869 DeoptimizeIf(overflow, instr->environment()); |
5490 } | 4870 } |
5491 | 4871 |
5492 | 4872 |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5576 | 4956 |
5577 __ test(eax, Immediate(kSmiTagMask)); | 4957 __ test(eax, Immediate(kSmiTagMask)); |
5578 } | 4958 } |
5579 DeoptimizeIf(zero, instr->environment()); | 4959 DeoptimizeIf(zero, instr->environment()); |
5580 } | 4960 } |
5581 | 4961 |
5582 | 4962 |
5583 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4963 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
5584 class DeferredCheckMaps V8_FINAL : public LDeferredCode { | 4964 class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
5585 public: | 4965 public: |
5586 DeferredCheckMaps(LCodeGen* codegen, | 4966 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
5587 LCheckMaps* instr, | 4967 : LDeferredCode(codegen), instr_(instr), object_(object) { |
5588 Register object, | |
5589 const X87Stack& x87_stack) | |
5590 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { | |
5591 SetExit(check_maps()); | 4968 SetExit(check_maps()); |
5592 } | 4969 } |
5593 virtual void Generate() V8_OVERRIDE { | 4970 virtual void Generate() V8_OVERRIDE { |
5594 codegen()->DoDeferredInstanceMigration(instr_, object_); | 4971 codegen()->DoDeferredInstanceMigration(instr_, object_); |
5595 } | 4972 } |
5596 Label* check_maps() { return &check_maps_; } | 4973 Label* check_maps() { return &check_maps_; } |
5597 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4974 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5598 private: | 4975 private: |
5599 LCheckMaps* instr_; | 4976 LCheckMaps* instr_; |
5600 Label check_maps_; | 4977 Label check_maps_; |
5601 Register object_; | 4978 Register object_; |
5602 }; | 4979 }; |
5603 | 4980 |
5604 if (instr->hydrogen()->IsStabilityCheck()) { | 4981 if (instr->hydrogen()->IsStabilityCheck()) { |
5605 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4982 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
5606 for (int i = 0; i < maps->size(); ++i) { | 4983 for (int i = 0; i < maps->size(); ++i) { |
5607 AddStabilityDependency(maps->at(i).handle()); | 4984 AddStabilityDependency(maps->at(i).handle()); |
5608 } | 4985 } |
5609 return; | 4986 return; |
5610 } | 4987 } |
5611 | 4988 |
5612 LOperand* input = instr->value(); | 4989 LOperand* input = instr->value(); |
5613 ASSERT(input->IsRegister()); | 4990 ASSERT(input->IsRegister()); |
5614 Register reg = ToRegister(input); | 4991 Register reg = ToRegister(input); |
5615 | 4992 |
5616 DeferredCheckMaps* deferred = NULL; | 4993 DeferredCheckMaps* deferred = NULL; |
5617 if (instr->hydrogen()->HasMigrationTarget()) { | 4994 if (instr->hydrogen()->HasMigrationTarget()) { |
5618 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); | 4995 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
5619 __ bind(deferred->check_maps()); | 4996 __ bind(deferred->check_maps()); |
5620 } | 4997 } |
5621 | 4998 |
5622 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4999 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
5623 Label success; | 5000 Label success; |
5624 for (int i = 0; i < maps->size() - 1; i++) { | 5001 for (int i = 0; i < maps->size() - 1; i++) { |
5625 Handle<Map> map = maps->at(i).handle(); | 5002 Handle<Map> map = maps->at(i).handle(); |
5626 __ CompareMap(reg, map); | 5003 __ CompareMap(reg, map); |
5627 __ j(equal, &success, Label::kNear); | 5004 __ j(equal, &success, Label::kNear); |
5628 } | 5005 } |
5629 | 5006 |
5630 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5007 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5631 __ CompareMap(reg, map); | 5008 __ CompareMap(reg, map); |
5632 if (instr->hydrogen()->HasMigrationTarget()) { | 5009 if (instr->hydrogen()->HasMigrationTarget()) { |
5633 __ j(not_equal, deferred->entry()); | 5010 __ j(not_equal, deferred->entry()); |
5634 } else { | 5011 } else { |
5635 DeoptimizeIf(not_equal, instr->environment()); | 5012 DeoptimizeIf(not_equal, instr->environment()); |
5636 } | 5013 } |
5637 | 5014 |
5638 __ bind(&success); | 5015 __ bind(&success); |
5639 } | 5016 } |
5640 | 5017 |
5641 | 5018 |
5642 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5019 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5643 CpuFeatureScope scope(masm(), SSE2); | |
5644 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5020 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
5645 XMMRegister xmm_scratch = double_scratch0(); | 5021 XMMRegister xmm_scratch = double_scratch0(); |
5646 Register result_reg = ToRegister(instr->result()); | 5022 Register result_reg = ToRegister(instr->result()); |
5647 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); | 5023 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); |
5648 } | 5024 } |
5649 | 5025 |
5650 | 5026 |
5651 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 5027 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
5652 ASSERT(instr->unclamped()->Equals(instr->result())); | 5028 ASSERT(instr->unclamped()->Equals(instr->result())); |
5653 Register value_reg = ToRegister(instr->result()); | 5029 Register value_reg = ToRegister(instr->result()); |
5654 __ ClampUint8(value_reg); | 5030 __ ClampUint8(value_reg); |
5655 } | 5031 } |
5656 | 5032 |
5657 | 5033 |
5658 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 5034 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
5659 CpuFeatureScope scope(masm(), SSE2); | |
5660 | |
5661 ASSERT(instr->unclamped()->Equals(instr->result())); | 5035 ASSERT(instr->unclamped()->Equals(instr->result())); |
5662 Register input_reg = ToRegister(instr->unclamped()); | 5036 Register input_reg = ToRegister(instr->unclamped()); |
5663 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); | 5037 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); |
5664 XMMRegister xmm_scratch = double_scratch0(); | 5038 XMMRegister xmm_scratch = double_scratch0(); |
5665 Label is_smi, done, heap_number; | 5039 Label is_smi, done, heap_number; |
5666 | 5040 |
5667 __ JumpIfSmi(input_reg, &is_smi); | 5041 __ JumpIfSmi(input_reg, &is_smi); |
5668 | 5042 |
5669 // Check for heap number | 5043 // Check for heap number |
5670 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5044 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
(...skipping 14 matching lines...) Expand all Loading... |
5685 __ jmp(&done, Label::kNear); | 5059 __ jmp(&done, Label::kNear); |
5686 | 5060 |
5687 // smi | 5061 // smi |
5688 __ bind(&is_smi); | 5062 __ bind(&is_smi); |
5689 __ SmiUntag(input_reg); | 5063 __ SmiUntag(input_reg); |
5690 __ ClampUint8(input_reg); | 5064 __ ClampUint8(input_reg); |
5691 __ bind(&done); | 5065 __ bind(&done); |
5692 } | 5066 } |
5693 | 5067 |
5694 | 5068 |
5695 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { | |
5696 Register input_reg = ToRegister(instr->unclamped()); | |
5697 Register result_reg = ToRegister(instr->result()); | |
5698 Register scratch = ToRegister(instr->scratch()); | |
5699 Register scratch2 = ToRegister(instr->scratch2()); | |
5700 Register scratch3 = ToRegister(instr->scratch3()); | |
5701 Label is_smi, done, heap_number, valid_exponent, | |
5702 largest_value, zero_result, maybe_nan_or_infinity; | |
5703 | |
5704 __ JumpIfSmi(input_reg, &is_smi); | |
5705 | |
5706 // Check for heap number | |
5707 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | |
5708 factory()->heap_number_map()); | |
5709 __ j(equal, &heap_number, Label::kNear); | |
5710 | |
5711 // Check for undefined. Undefined is converted to zero for clamping | |
5712 // conversions. | |
5713 __ cmp(input_reg, factory()->undefined_value()); | |
5714 DeoptimizeIf(not_equal, instr->environment()); | |
5715 __ jmp(&zero_result, Label::kNear); | |
5716 | |
5717 // Heap number | |
5718 __ bind(&heap_number); | |
5719 | |
5720 // Surprisingly, all of the hand-crafted bit-manipulations below are much | |
5721 // faster than the x86 FPU built-in instruction, especially since "banker's | |
5722 // rounding" would be additionally very expensive | |
5723 | |
5724 // Get exponent word. | |
5725 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | |
5726 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); | |
5727 | |
5728 // Test for negative values --> clamp to zero | |
5729 __ test(scratch, scratch); | |
5730 __ j(negative, &zero_result, Label::kNear); | |
5731 | |
5732 // Get exponent alone in scratch2. | |
5733 __ mov(scratch2, scratch); | |
5734 __ and_(scratch2, HeapNumber::kExponentMask); | |
5735 __ shr(scratch2, HeapNumber::kExponentShift); | |
5736 __ j(zero, &zero_result, Label::kNear); | |
5737 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); | |
5738 __ j(negative, &zero_result, Label::kNear); | |
5739 | |
5740 const uint32_t non_int8_exponent = 7; | |
5741 __ cmp(scratch2, Immediate(non_int8_exponent + 1)); | |
5742 // If the exponent is too big, check for special values. | |
5743 __ j(greater, &maybe_nan_or_infinity, Label::kNear); | |
5744 | |
5745 __ bind(&valid_exponent); | |
5746 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent | |
5747 // < 7. The shift bias is the number of bits to shift the mantissa such that | |
5748 // with an exponent of 7 such the that top-most one is in bit 30, allowing | |
5749 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to | |
5750 // 1). | |
5751 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; | |
5752 __ lea(result_reg, MemOperand(scratch2, shift_bias)); | |
5753 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the | |
5754 // top bits of the mantissa. | |
5755 __ and_(scratch, HeapNumber::kMantissaMask); | |
5756 // Put back the implicit 1 of the mantissa | |
5757 __ or_(scratch, 1 << HeapNumber::kExponentShift); | |
5758 // Shift up to round | |
5759 __ shl_cl(scratch); | |
5760 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then | |
5761 // use the bit in the "ones" place and add it to the "halves" place, which has | |
5762 // the effect of rounding to even. | |
5763 __ mov(scratch2, scratch); | |
5764 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; | |
5765 const uint32_t one_bit_shift = one_half_bit_shift + 1; | |
5766 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); | |
5767 __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); | |
5768 Label no_round; | |
5769 __ j(less, &no_round, Label::kNear); | |
5770 Label round_up; | |
5771 __ mov(scratch2, Immediate(1 << one_half_bit_shift)); | |
5772 __ j(greater, &round_up, Label::kNear); | |
5773 __ test(scratch3, scratch3); | |
5774 __ j(not_zero, &round_up, Label::kNear); | |
5775 __ mov(scratch2, scratch); | |
5776 __ and_(scratch2, Immediate(1 << one_bit_shift)); | |
5777 __ shr(scratch2, 1); | |
5778 __ bind(&round_up); | |
5779 __ add(scratch, scratch2); | |
5780 __ j(overflow, &largest_value, Label::kNear); | |
5781 __ bind(&no_round); | |
5782 __ shr(scratch, 23); | |
5783 __ mov(result_reg, scratch); | |
5784 __ jmp(&done, Label::kNear); | |
5785 | |
5786 __ bind(&maybe_nan_or_infinity); | |
5787 // Check for NaN/Infinity, all other values map to 255 | |
5788 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); | |
5789 __ j(not_equal, &largest_value, Label::kNear); | |
5790 | |
5791 // Check for NaN, which differs from Infinity in that at least one mantissa | |
5792 // bit is set. | |
5793 __ and_(scratch, HeapNumber::kMantissaMask); | |
5794 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); | |
5795 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN | |
5796 // Infinity -> Fall through to map to 255. | |
5797 | |
5798 __ bind(&largest_value); | |
5799 __ mov(result_reg, Immediate(255)); | |
5800 __ jmp(&done, Label::kNear); | |
5801 | |
5802 __ bind(&zero_result); | |
5803 __ xor_(result_reg, result_reg); | |
5804 __ jmp(&done, Label::kNear); | |
5805 | |
5806 // smi | |
5807 __ bind(&is_smi); | |
5808 if (!input_reg.is(result_reg)) { | |
5809 __ mov(result_reg, input_reg); | |
5810 } | |
5811 __ SmiUntag(result_reg); | |
5812 __ ClampUint8(result_reg); | |
5813 __ bind(&done); | |
5814 } | |
5815 | |
5816 | |
5817 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | 5069 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
5818 CpuFeatureScope scope(masm(), SSE2); | |
5819 XMMRegister value_reg = ToDoubleRegister(instr->value()); | 5070 XMMRegister value_reg = ToDoubleRegister(instr->value()); |
5820 Register result_reg = ToRegister(instr->result()); | 5071 Register result_reg = ToRegister(instr->result()); |
5821 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | 5072 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { |
5822 if (CpuFeatures::IsSupported(SSE4_1)) { | 5073 if (CpuFeatures::IsSupported(SSE4_1)) { |
5823 CpuFeatureScope scope2(masm(), SSE4_1); | 5074 CpuFeatureScope scope2(masm(), SSE4_1); |
5824 __ pextrd(result_reg, value_reg, 1); | 5075 __ pextrd(result_reg, value_reg, 1); |
5825 } else { | 5076 } else { |
5826 XMMRegister xmm_scratch = double_scratch0(); | 5077 XMMRegister xmm_scratch = double_scratch0(); |
5827 __ pshufd(xmm_scratch, value_reg, 1); | 5078 __ pshufd(xmm_scratch, value_reg, 1); |
5828 __ movd(result_reg, xmm_scratch); | 5079 __ movd(result_reg, xmm_scratch); |
5829 } | 5080 } |
5830 } else { | 5081 } else { |
5831 __ movd(result_reg, value_reg); | 5082 __ movd(result_reg, value_reg); |
5832 } | 5083 } |
5833 } | 5084 } |
5834 | 5085 |
5835 | 5086 |
5836 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | 5087 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
5837 Register hi_reg = ToRegister(instr->hi()); | 5088 Register hi_reg = ToRegister(instr->hi()); |
5838 Register lo_reg = ToRegister(instr->lo()); | 5089 Register lo_reg = ToRegister(instr->lo()); |
5839 XMMRegister result_reg = ToDoubleRegister(instr->result()); | 5090 XMMRegister result_reg = ToDoubleRegister(instr->result()); |
5840 CpuFeatureScope scope(masm(), SSE2); | |
5841 | 5091 |
5842 if (CpuFeatures::IsSupported(SSE4_1)) { | 5092 if (CpuFeatures::IsSupported(SSE4_1)) { |
5843 CpuFeatureScope scope2(masm(), SSE4_1); | 5093 CpuFeatureScope scope2(masm(), SSE4_1); |
5844 __ movd(result_reg, lo_reg); | 5094 __ movd(result_reg, lo_reg); |
5845 __ pinsrd(result_reg, hi_reg, 1); | 5095 __ pinsrd(result_reg, hi_reg, 1); |
5846 } else { | 5096 } else { |
5847 XMMRegister xmm_scratch = double_scratch0(); | 5097 XMMRegister xmm_scratch = double_scratch0(); |
5848 __ movd(result_reg, hi_reg); | 5098 __ movd(result_reg, hi_reg); |
5849 __ psllq(result_reg, 32); | 5099 __ psllq(result_reg, 32); |
5850 __ movd(xmm_scratch, lo_reg); | 5100 __ movd(xmm_scratch, lo_reg); |
5851 __ orps(result_reg, xmm_scratch); | 5101 __ orps(result_reg, xmm_scratch); |
5852 } | 5102 } |
5853 } | 5103 } |
5854 | 5104 |
5855 | 5105 |
5856 void LCodeGen::DoAllocate(LAllocate* instr) { | 5106 void LCodeGen::DoAllocate(LAllocate* instr) { |
5857 class DeferredAllocate V8_FINAL : public LDeferredCode { | 5107 class DeferredAllocate V8_FINAL : public LDeferredCode { |
5858 public: | 5108 public: |
5859 DeferredAllocate(LCodeGen* codegen, | 5109 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
5860 LAllocate* instr, | 5110 : LDeferredCode(codegen), instr_(instr) { } |
5861 const X87Stack& x87_stack) | |
5862 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
5863 virtual void Generate() V8_OVERRIDE { | 5111 virtual void Generate() V8_OVERRIDE { |
5864 codegen()->DoDeferredAllocate(instr_); | 5112 codegen()->DoDeferredAllocate(instr_); |
5865 } | 5113 } |
5866 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5114 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5867 private: | 5115 private: |
5868 LAllocate* instr_; | 5116 LAllocate* instr_; |
5869 }; | 5117 }; |
5870 | 5118 |
5871 DeferredAllocate* deferred = | 5119 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); |
5872 new(zone()) DeferredAllocate(this, instr, x87_stack_); | |
5873 | 5120 |
5874 Register result = ToRegister(instr->result()); | 5121 Register result = ToRegister(instr->result()); |
5875 Register temp = ToRegister(instr->temp()); | 5122 Register temp = ToRegister(instr->temp()); |
5876 | 5123 |
5877 // Allocate memory for the object. | 5124 // Allocate memory for the object. |
5878 AllocationFlags flags = TAG_OBJECT; | 5125 AllocationFlags flags = TAG_OBJECT; |
5879 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | 5126 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
5880 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | 5127 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
5881 } | 5128 } |
5882 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5129 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6227 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 5474 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
6228 ASSERT(instr->HasEnvironment()); | 5475 ASSERT(instr->HasEnvironment()); |
6229 LEnvironment* env = instr->environment(); | 5476 LEnvironment* env = instr->environment(); |
6230 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5477 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
6231 } | 5478 } |
6232 | 5479 |
6233 | 5480 |
6234 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 5481 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
6235 class DeferredStackCheck V8_FINAL : public LDeferredCode { | 5482 class DeferredStackCheck V8_FINAL : public LDeferredCode { |
6236 public: | 5483 public: |
6237 DeferredStackCheck(LCodeGen* codegen, | 5484 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
6238 LStackCheck* instr, | 5485 : LDeferredCode(codegen), instr_(instr) { } |
6239 const X87Stack& x87_stack) | |
6240 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | |
6241 virtual void Generate() V8_OVERRIDE { | 5486 virtual void Generate() V8_OVERRIDE { |
6242 codegen()->DoDeferredStackCheck(instr_); | 5487 codegen()->DoDeferredStackCheck(instr_); |
6243 } | 5488 } |
6244 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5489 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
6245 private: | 5490 private: |
6246 LStackCheck* instr_; | 5491 LStackCheck* instr_; |
6247 }; | 5492 }; |
6248 | 5493 |
6249 ASSERT(instr->HasEnvironment()); | 5494 ASSERT(instr->HasEnvironment()); |
6250 LEnvironment* env = instr->environment(); | 5495 LEnvironment* env = instr->environment(); |
(...skipping 10 matching lines...) Expand all Loading... |
6261 ASSERT(instr->context()->IsRegister()); | 5506 ASSERT(instr->context()->IsRegister()); |
6262 ASSERT(ToRegister(instr->context()).is(esi)); | 5507 ASSERT(ToRegister(instr->context()).is(esi)); |
6263 CallCode(isolate()->builtins()->StackCheck(), | 5508 CallCode(isolate()->builtins()->StackCheck(), |
6264 RelocInfo::CODE_TARGET, | 5509 RelocInfo::CODE_TARGET, |
6265 instr); | 5510 instr); |
6266 __ bind(&done); | 5511 __ bind(&done); |
6267 } else { | 5512 } else { |
6268 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5513 ASSERT(instr->hydrogen()->is_backwards_branch()); |
6269 // Perform stack overflow check if this goto needs it before jumping. | 5514 // Perform stack overflow check if this goto needs it before jumping. |
6270 DeferredStackCheck* deferred_stack_check = | 5515 DeferredStackCheck* deferred_stack_check = |
6271 new(zone()) DeferredStackCheck(this, instr, x87_stack_); | 5516 new(zone()) DeferredStackCheck(this, instr); |
6272 ExternalReference stack_limit = | 5517 ExternalReference stack_limit = |
6273 ExternalReference::address_of_stack_limit(isolate()); | 5518 ExternalReference::address_of_stack_limit(isolate()); |
6274 __ cmp(esp, Operand::StaticVariable(stack_limit)); | 5519 __ cmp(esp, Operand::StaticVariable(stack_limit)); |
6275 __ j(below, deferred_stack_check->entry()); | 5520 __ j(below, deferred_stack_check->entry()); |
6276 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 5521 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
6277 __ bind(instr->done_label()); | 5522 __ bind(instr->done_label()); |
6278 deferred_stack_check->SetExit(instr->done_label()); | 5523 deferred_stack_check->SetExit(instr->done_label()); |
6279 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5524 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
6280 // Don't record a deoptimization index for the safepoint here. | 5525 // Don't record a deoptimization index for the safepoint here. |
6281 // This will be done explicitly when emitting call and the safepoint in | 5526 // This will be done explicitly when emitting call and the safepoint in |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6375 __ StoreToSafepointRegisterSlot(object, eax); | 5620 __ StoreToSafepointRegisterSlot(object, eax); |
6376 } | 5621 } |
6377 | 5622 |
6378 | 5623 |
6379 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | 5624 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
6380 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { | 5625 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { |
6381 public: | 5626 public: |
6382 DeferredLoadMutableDouble(LCodeGen* codegen, | 5627 DeferredLoadMutableDouble(LCodeGen* codegen, |
6383 LLoadFieldByIndex* instr, | 5628 LLoadFieldByIndex* instr, |
6384 Register object, | 5629 Register object, |
6385 Register index, | 5630 Register index) |
6386 const X87Stack& x87_stack) | 5631 : LDeferredCode(codegen), |
6387 : LDeferredCode(codegen, x87_stack), | |
6388 instr_(instr), | 5632 instr_(instr), |
6389 object_(object), | 5633 object_(object), |
6390 index_(index) { | 5634 index_(index) { |
6391 } | 5635 } |
6392 virtual void Generate() V8_OVERRIDE { | 5636 virtual void Generate() V8_OVERRIDE { |
6393 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); | 5637 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); |
6394 } | 5638 } |
6395 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5639 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
6396 private: | 5640 private: |
6397 LLoadFieldByIndex* instr_; | 5641 LLoadFieldByIndex* instr_; |
6398 Register object_; | 5642 Register object_; |
6399 Register index_; | 5643 Register index_; |
6400 }; | 5644 }; |
6401 | 5645 |
6402 Register object = ToRegister(instr->object()); | 5646 Register object = ToRegister(instr->object()); |
6403 Register index = ToRegister(instr->index()); | 5647 Register index = ToRegister(instr->index()); |
6404 | 5648 |
6405 DeferredLoadMutableDouble* deferred; | 5649 DeferredLoadMutableDouble* deferred; |
6406 deferred = new(zone()) DeferredLoadMutableDouble( | 5650 deferred = new(zone()) DeferredLoadMutableDouble( |
6407 this, instr, object, index, x87_stack_); | 5651 this, instr, object, index); |
6408 | 5652 |
6409 Label out_of_object, done; | 5653 Label out_of_object, done; |
6410 __ test(index, Immediate(Smi::FromInt(1))); | 5654 __ test(index, Immediate(Smi::FromInt(1))); |
6411 __ j(not_zero, deferred->entry()); | 5655 __ j(not_zero, deferred->entry()); |
6412 | 5656 |
6413 __ sar(index, 1); | 5657 __ sar(index, 1); |
6414 | 5658 |
6415 __ cmp(index, Immediate(0)); | 5659 __ cmp(index, Immediate(0)); |
6416 __ j(less, &out_of_object, Label::kNear); | 5660 __ j(less, &out_of_object, Label::kNear); |
6417 __ mov(object, FieldOperand(object, | 5661 __ mov(object, FieldOperand(object, |
(...skipping 13 matching lines...) Expand all Loading... |
6431 __ bind(deferred->exit()); | 5675 __ bind(deferred->exit()); |
6432 __ bind(&done); | 5676 __ bind(&done); |
6433 } | 5677 } |
6434 | 5678 |
6435 | 5679 |
6436 #undef __ | 5680 #undef __ |
6437 | 5681 |
6438 } } // namespace v8::internal | 5682 } } // namespace v8::internal |
6439 | 5683 |
6440 #endif // V8_TARGET_ARCH_IA32 | 5684 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |