Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(329)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 275433004: Require SSE2 support for the ia32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "v8.h" 5 #include "v8.h"
6 6
7 #if V8_TARGET_ARCH_IA32 7 #if V8_TARGET_ARCH_IA32
8 8
9 #include "ia32/lithium-codegen-ia32.h" 9 #include "ia32/lithium-codegen-ia32.h"
10 #include "ic.h" 10 #include "ic.h"
11 #include "code-stubs.h" 11 #include "code-stubs.h"
12 #include "deoptimizer.h" 12 #include "deoptimizer.h"
13 #include "stub-cache.h" 13 #include "stub-cache.h"
14 #include "codegen.h" 14 #include "codegen.h"
15 #include "hydrogen-osr.h" 15 #include "hydrogen-osr.h"
16 16
17 namespace v8 { 17 namespace v8 {
18 namespace internal { 18 namespace internal {
19 19
20
21 static SaveFPRegsMode GetSaveFPRegsMode(Isolate* isolate) {
22 // We don't need to save floating point regs when generating the snapshot
23 return CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ? kSaveFPRegs
24 : kDontSaveFPRegs;
25 }
26
27
28 // When invoking builtins, we need to record the safepoint in the middle of 20 // When invoking builtins, we need to record the safepoint in the middle of
29 // the invoke instruction sequence generated by the macro assembler. 21 // the invoke instruction sequence generated by the macro assembler.
30 class SafepointGenerator V8_FINAL : public CallWrapper { 22 class SafepointGenerator V8_FINAL : public CallWrapper {
31 public: 23 public:
32 SafepointGenerator(LCodeGen* codegen, 24 SafepointGenerator(LCodeGen* codegen,
33 LPointerMap* pointers, 25 LPointerMap* pointers,
34 Safepoint::DeoptMode mode) 26 Safepoint::DeoptMode mode)
35 : codegen_(codegen), 27 : codegen_(codegen),
36 pointers_(pointers), 28 pointers_(pointers),
37 deopt_mode_(mode) {} 29 deopt_mode_(mode) {}
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 __ mov(Operand(esp, offset), eax); 88 __ mov(Operand(esp, offset), eax);
97 } 89 }
98 } 90 }
99 #endif 91 #endif
100 92
101 93
102 void LCodeGen::SaveCallerDoubles() { 94 void LCodeGen::SaveCallerDoubles() {
103 ASSERT(info()->saves_caller_doubles()); 95 ASSERT(info()->saves_caller_doubles());
104 ASSERT(NeedsEagerFrame()); 96 ASSERT(NeedsEagerFrame());
105 Comment(";;; Save clobbered callee double registers"); 97 Comment(";;; Save clobbered callee double registers");
106 CpuFeatureScope scope(masm(), SSE2);
107 int count = 0; 98 int count = 0;
108 BitVector* doubles = chunk()->allocated_double_registers(); 99 BitVector* doubles = chunk()->allocated_double_registers();
109 BitVector::Iterator save_iterator(doubles); 100 BitVector::Iterator save_iterator(doubles);
110 while (!save_iterator.Done()) { 101 while (!save_iterator.Done()) {
111 __ movsd(MemOperand(esp, count * kDoubleSize), 102 __ movsd(MemOperand(esp, count * kDoubleSize),
112 XMMRegister::FromAllocationIndex(save_iterator.Current())); 103 XMMRegister::FromAllocationIndex(save_iterator.Current()));
113 save_iterator.Advance(); 104 save_iterator.Advance();
114 count++; 105 count++;
115 } 106 }
116 } 107 }
117 108
118 109
119 void LCodeGen::RestoreCallerDoubles() { 110 void LCodeGen::RestoreCallerDoubles() {
120 ASSERT(info()->saves_caller_doubles()); 111 ASSERT(info()->saves_caller_doubles());
121 ASSERT(NeedsEagerFrame()); 112 ASSERT(NeedsEagerFrame());
122 Comment(";;; Restore clobbered callee double registers"); 113 Comment(";;; Restore clobbered callee double registers");
123 CpuFeatureScope scope(masm(), SSE2);
124 BitVector* doubles = chunk()->allocated_double_registers(); 114 BitVector* doubles = chunk()->allocated_double_registers();
125 BitVector::Iterator save_iterator(doubles); 115 BitVector::Iterator save_iterator(doubles);
126 int count = 0; 116 int count = 0;
127 while (!save_iterator.Done()) { 117 while (!save_iterator.Done()) {
128 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), 118 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
129 MemOperand(esp, count * kDoubleSize)); 119 MemOperand(esp, count * kDoubleSize));
130 save_iterator.Advance(); 120 save_iterator.Advance();
131 count++; 121 count++;
132 } 122 }
133 } 123 }
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
246 // Store dynamic frame alignment state in the first local. 236 // Store dynamic frame alignment state in the first local.
247 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; 237 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
248 if (dynamic_frame_alignment_) { 238 if (dynamic_frame_alignment_) {
249 __ mov(Operand(ebp, offset), edx); 239 __ mov(Operand(ebp, offset), edx);
250 } else { 240 } else {
251 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); 241 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
252 } 242 }
253 } 243 }
254 } 244 }
255 245
256 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 246 if (info()->saves_caller_doubles()) SaveCallerDoubles();
257 SaveCallerDoubles();
258 }
259 } 247 }
260 248
261 // Possibly allocate a local context. 249 // Possibly allocate a local context.
262 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 250 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
263 if (heap_slots > 0) { 251 if (heap_slots > 0) {
264 Comment(";;; Allocate local context"); 252 Comment(";;; Allocate local context");
265 // Argument to NewContext is the function, which is still in edi. 253 // Argument to NewContext is the function, which is still in edi.
266 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 254 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
267 FastNewContextStub stub(isolate(), heap_slots); 255 FastNewContextStub stub(isolate(), heap_slots);
268 __ CallStub(&stub); 256 __ CallStub(&stub);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
360 } 348 }
361 349
362 350
363 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 351 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
364 if (instr->IsCall()) { 352 if (instr->IsCall()) {
365 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 353 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
366 } 354 }
367 if (!instr->IsLazyBailout() && !instr->IsGap()) { 355 if (!instr->IsLazyBailout() && !instr->IsGap()) {
368 safepoints_.BumpLastLazySafepointIndex(); 356 safepoints_.BumpLastLazySafepointIndex();
369 } 357 }
370 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
371 }
372
373
374 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
375 if (!CpuFeatures::IsSupported(SSE2)) {
376 if (instr->IsGoto()) {
377 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
378 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
379 !instr->IsGap() && !instr->IsReturn()) {
380 if (instr->ClobbersDoubleRegisters(isolate())) {
381 if (instr->HasDoubleRegisterResult()) {
382 ASSERT_EQ(1, x87_stack_.depth());
383 } else {
384 ASSERT_EQ(0, x87_stack_.depth());
385 }
386 }
387 __ VerifyX87StackDepth(x87_stack_.depth());
388 }
389 }
390 } 358 }
391 359
392 360
361 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
362
363
393 bool LCodeGen::GenerateJumpTable() { 364 bool LCodeGen::GenerateJumpTable() {
394 Label needs_frame; 365 Label needs_frame;
395 if (jump_table_.length() > 0) { 366 if (jump_table_.length() > 0) {
396 Comment(";;; -------------------- Jump table --------------------"); 367 Comment(";;; -------------------- Jump table --------------------");
397 } 368 }
398 for (int i = 0; i < jump_table_.length(); i++) { 369 for (int i = 0; i < jump_table_.length(); i++) {
399 __ bind(&jump_table_[i].label); 370 __ bind(&jump_table_[i].label);
400 Address entry = jump_table_[i].address; 371 Address entry = jump_table_[i].address;
401 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; 372 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
402 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 373 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
(...skipping 23 matching lines...) Expand all
426 __ call(&push_approx_pc); 397 __ call(&push_approx_pc);
427 __ bind(&push_approx_pc); 398 __ bind(&push_approx_pc);
428 // Push the continuation which was stashed were the ebp should 399 // Push the continuation which was stashed were the ebp should
429 // be. Replace it with the saved ebp. 400 // be. Replace it with the saved ebp.
430 __ push(MemOperand(esp, 3 * kPointerSize)); 401 __ push(MemOperand(esp, 3 * kPointerSize));
431 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); 402 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
432 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); 403 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
433 __ ret(0); // Call the continuation without clobbering registers. 404 __ ret(0); // Call the continuation without clobbering registers.
434 } 405 }
435 } else { 406 } else {
436 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 407 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
437 RestoreCallerDoubles();
438 }
439 __ call(entry, RelocInfo::RUNTIME_ENTRY); 408 __ call(entry, RelocInfo::RUNTIME_ENTRY);
440 } 409 }
441 } 410 }
442 return !is_aborted(); 411 return !is_aborted();
443 } 412 }
444 413
445 414
446 bool LCodeGen::GenerateDeferredCode() { 415 bool LCodeGen::GenerateDeferredCode() {
447 ASSERT(is_generating()); 416 ASSERT(is_generating());
448 if (deferred_.length() > 0) { 417 if (deferred_.length() > 0) {
449 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 418 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
450 LDeferredCode* code = deferred_[i]; 419 LDeferredCode* code = deferred_[i];
451 X87Stack copy(code->x87_stack());
452 x87_stack_ = copy;
453 420
454 HValue* value = 421 HValue* value =
455 instructions_->at(code->instruction_index())->hydrogen_value(); 422 instructions_->at(code->instruction_index())->hydrogen_value();
456 RecordAndWritePosition( 423 RecordAndWritePosition(
457 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 424 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
458 425
459 Comment(";;; <@%d,#%d> " 426 Comment(";;; <@%d,#%d> "
460 "-------------------- Deferred %s --------------------", 427 "-------------------- Deferred %s --------------------",
461 code->instruction_index(), 428 code->instruction_index(),
462 code->instr()->hydrogen_value()->id(), 429 code->instr()->hydrogen_value()->id(),
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 safepoints_.Emit(masm(), GetStackSlotCount()); 475 safepoints_.Emit(masm(), GetStackSlotCount());
509 return !is_aborted(); 476 return !is_aborted();
510 } 477 }
511 478
512 479
513 Register LCodeGen::ToRegister(int index) const { 480 Register LCodeGen::ToRegister(int index) const {
514 return Register::FromAllocationIndex(index); 481 return Register::FromAllocationIndex(index);
515 } 482 }
516 483
517 484
518 X87Register LCodeGen::ToX87Register(int index) const {
519 return X87Register::FromAllocationIndex(index);
520 }
521
522
523 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 485 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
524 return XMMRegister::FromAllocationIndex(index); 486 return XMMRegister::FromAllocationIndex(index);
525 } 487 }
526 488
527 489
528 void LCodeGen::X87LoadForUsage(X87Register reg) {
529 ASSERT(x87_stack_.Contains(reg));
530 x87_stack_.Fxch(reg);
531 x87_stack_.pop();
532 }
533
534
535 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
536 ASSERT(x87_stack_.Contains(reg1));
537 ASSERT(x87_stack_.Contains(reg2));
538 x87_stack_.Fxch(reg1, 1);
539 x87_stack_.Fxch(reg2);
540 x87_stack_.pop();
541 x87_stack_.pop();
542 }
543
544
545 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
546 ASSERT(is_mutable_);
547 ASSERT(Contains(reg) && stack_depth_ > other_slot);
548 int i = ArrayIndex(reg);
549 int st = st2idx(i);
550 if (st != other_slot) {
551 int other_i = st2idx(other_slot);
552 X87Register other = stack_[other_i];
553 stack_[other_i] = reg;
554 stack_[i] = other;
555 if (st == 0) {
556 __ fxch(other_slot);
557 } else if (other_slot == 0) {
558 __ fxch(st);
559 } else {
560 __ fxch(st);
561 __ fxch(other_slot);
562 __ fxch(st);
563 }
564 }
565 }
566
567
568 int LCodeGen::X87Stack::st2idx(int pos) {
569 return stack_depth_ - pos - 1;
570 }
571
572
573 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
574 for (int i = 0; i < stack_depth_; i++) {
575 if (stack_[i].is(reg)) return i;
576 }
577 UNREACHABLE();
578 return -1;
579 }
580
581
582 bool LCodeGen::X87Stack::Contains(X87Register reg) {
583 for (int i = 0; i < stack_depth_; i++) {
584 if (stack_[i].is(reg)) return true;
585 }
586 return false;
587 }
588
589
590 void LCodeGen::X87Stack::Free(X87Register reg) {
591 ASSERT(is_mutable_);
592 ASSERT(Contains(reg));
593 int i = ArrayIndex(reg);
594 int st = st2idx(i);
595 if (st > 0) {
596 // keep track of how fstp(i) changes the order of elements
597 int tos_i = st2idx(0);
598 stack_[i] = stack_[tos_i];
599 }
600 pop();
601 __ fstp(st);
602 }
603
604
605 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
606 if (x87_stack_.Contains(dst)) {
607 x87_stack_.Fxch(dst);
608 __ fstp(0);
609 } else {
610 x87_stack_.push(dst);
611 }
612 X87Fld(src, opts);
613 }
614
615
616 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
617 ASSERT(!src.is_reg_only());
618 switch (opts) {
619 case kX87DoubleOperand:
620 __ fld_d(src);
621 break;
622 case kX87FloatOperand:
623 __ fld_s(src);
624 break;
625 case kX87IntOperand:
626 __ fild_s(src);
627 break;
628 default:
629 UNREACHABLE();
630 }
631 }
632
633
634 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
635 ASSERT(!dst.is_reg_only());
636 x87_stack_.Fxch(src);
637 switch (opts) {
638 case kX87DoubleOperand:
639 __ fst_d(dst);
640 break;
641 case kX87IntOperand:
642 __ fist_s(dst);
643 break;
644 default:
645 UNREACHABLE();
646 }
647 }
648
649
650 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
651 ASSERT(is_mutable_);
652 if (Contains(reg)) {
653 Free(reg);
654 }
655 // Mark this register as the next register to write to
656 stack_[stack_depth_] = reg;
657 }
658
659
660 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
661 ASSERT(is_mutable_);
662 // Assert the reg is prepared to write, but not on the virtual stack yet
663 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
664 stack_depth_ < X87Register::kNumAllocatableRegisters);
665 stack_depth_++;
666 }
667
668
669 void LCodeGen::X87PrepareBinaryOp(
670 X87Register left, X87Register right, X87Register result) {
671 // You need to use DefineSameAsFirst for x87 instructions
672 ASSERT(result.is(left));
673 x87_stack_.Fxch(right, 1);
674 x87_stack_.Fxch(left);
675 }
676
677
678 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
679 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
680 bool double_inputs = instr->HasDoubleRegisterInput();
681
682 // Flush stack from tos down, since FreeX87() will mess with tos
683 for (int i = stack_depth_-1; i >= 0; i--) {
684 X87Register reg = stack_[i];
685 // Skip registers which contain the inputs for the next instruction
686 // when flushing the stack
687 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
688 continue;
689 }
690 Free(reg);
691 if (i < stack_depth_-1) i++;
692 }
693 }
694 if (instr->IsReturn()) {
695 while (stack_depth_ > 0) {
696 __ fstp(0);
697 stack_depth_--;
698 }
699 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
700 }
701 }
702
703
704 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
705 ASSERT(stack_depth_ <= 1);
706 // If ever used for new stubs producing two pairs of doubles joined into two
707 // phis this assert hits. That situation is not handled, since the two stacks
708 // might have st0 and st1 swapped.
709 if (current_block_id + 1 != goto_instr->block_id()) {
710 // If we have a value on the x87 stack on leaving a block, it must be a
711 // phi input. If the next block we compile is not the join block, we have
712 // to discard the stack state.
713 stack_depth_ = 0;
714 }
715 }
716
717
718 void LCodeGen::EmitFlushX87ForDeopt() {
719 // The deoptimizer does not support X87 Registers. But as long as we
720 // deopt from a stub its not a problem, since we will re-materialize the
721 // original stub inputs, which can't be double registers.
722 ASSERT(info()->IsStub());
723 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
724 __ pushfd();
725 __ VerifyX87StackDepth(x87_stack_.depth());
726 __ popfd();
727 }
728 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
729 }
730
731
732 Register LCodeGen::ToRegister(LOperand* op) const { 490 Register LCodeGen::ToRegister(LOperand* op) const {
733 ASSERT(op->IsRegister()); 491 ASSERT(op->IsRegister());
734 return ToRegister(op->index()); 492 return ToRegister(op->index());
735 } 493 }
736 494
737 495
738 X87Register LCodeGen::ToX87Register(LOperand* op) const {
739 ASSERT(op->IsDoubleRegister());
740 return ToX87Register(op->index());
741 }
742
743
744 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 496 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
745 ASSERT(op->IsDoubleRegister()); 497 ASSERT(op->IsDoubleRegister());
746 return ToDoubleRegister(op->index()); 498 return ToDoubleRegister(op->index());
747 } 499 }
748 500
749 501
750 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 502 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
751 return ToRepresentation(op, Representation::Integer32()); 503 return ToRepresentation(op, Representation::Integer32());
752 } 504 }
753 505
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after
1085 __ pop(eax); 837 __ pop(eax);
1086 __ popfd(); 838 __ popfd();
1087 ASSERT(frame_is_built_); 839 ASSERT(frame_is_built_);
1088 __ call(entry, RelocInfo::RUNTIME_ENTRY); 840 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1089 __ bind(&no_deopt); 841 __ bind(&no_deopt);
1090 __ mov(Operand::StaticVariable(count), eax); 842 __ mov(Operand::StaticVariable(count), eax);
1091 __ pop(eax); 843 __ pop(eax);
1092 __ popfd(); 844 __ popfd();
1093 } 845 }
1094 846
1095 // Before Instructions which can deopt, we normally flush the x87 stack. But
1096 // we can have inputs or outputs of the current instruction on the stack,
1097 // thus we need to flush them here from the physical stack to leave it in a
1098 // consistent state.
1099 if (x87_stack_.depth() > 0) {
1100 Label done;
1101 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1102 EmitFlushX87ForDeopt();
1103 __ bind(&done);
1104 }
1105
1106 if (info()->ShouldTrapOnDeopt()) { 847 if (info()->ShouldTrapOnDeopt()) {
1107 Label done; 848 Label done;
1108 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); 849 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1109 __ int3(); 850 __ int3();
1110 __ bind(&done); 851 __ bind(&done);
1111 } 852 }
1112 853
1113 ASSERT(info()->IsStub() || frame_is_built_); 854 ASSERT(info()->IsStub() || frame_is_built_);
1114 if (cc == no_condition && frame_is_built_) { 855 if (cc == no_condition && frame_is_built_) {
1115 __ call(entry, RelocInfo::RUNTIME_ENTRY); 856 __ call(entry, RelocInfo::RUNTIME_ENTRY);
(...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after
1956 } 1697 }
1957 1698
1958 1699
1959 void LCodeGen::DoConstantD(LConstantD* instr) { 1700 void LCodeGen::DoConstantD(LConstantD* instr) {
1960 double v = instr->value(); 1701 double v = instr->value();
1961 uint64_t int_val = BitCast<uint64_t, double>(v); 1702 uint64_t int_val = BitCast<uint64_t, double>(v);
1962 int32_t lower = static_cast<int32_t>(int_val); 1703 int32_t lower = static_cast<int32_t>(int_val);
1963 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1704 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1964 ASSERT(instr->result()->IsDoubleRegister()); 1705 ASSERT(instr->result()->IsDoubleRegister());
1965 1706
1966 if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 1707 XMMRegister res = ToDoubleRegister(instr->result());
1967 __ push(Immediate(upper)); 1708 if (int_val == 0) {
1968 __ push(Immediate(lower)); 1709 __ xorps(res, res);
1969 X87Register reg = ToX87Register(instr->result());
1970 X87Mov(reg, Operand(esp, 0));
1971 __ add(Operand(esp), Immediate(kDoubleSize));
1972 } else { 1710 } else {
1973 CpuFeatureScope scope1(masm(), SSE2); 1711 Register temp = ToRegister(instr->temp());
1974 XMMRegister res = ToDoubleRegister(instr->result()); 1712 if (CpuFeatures::IsSupported(SSE4_1)) {
1975 if (int_val == 0) { 1713 CpuFeatureScope scope2(masm(), SSE4_1);
1976 __ xorps(res, res); 1714 if (lower != 0) {
1715 __ Move(temp, Immediate(lower));
1716 __ movd(res, Operand(temp));
1717 __ Move(temp, Immediate(upper));
1718 __ pinsrd(res, Operand(temp), 1);
1719 } else {
1720 __ xorps(res, res);
1721 __ Move(temp, Immediate(upper));
1722 __ pinsrd(res, Operand(temp), 1);
1723 }
1977 } else { 1724 } else {
1978 Register temp = ToRegister(instr->temp()); 1725 __ Move(temp, Immediate(upper));
1979 if (CpuFeatures::IsSupported(SSE4_1)) { 1726 __ movd(res, Operand(temp));
1980 CpuFeatureScope scope2(masm(), SSE4_1); 1727 __ psllq(res, 32);
1981 if (lower != 0) { 1728 if (lower != 0) {
1982 __ Move(temp, Immediate(lower)); 1729 XMMRegister xmm_scratch = double_scratch0();
1983 __ movd(res, Operand(temp)); 1730 __ Move(temp, Immediate(lower));
1984 __ Move(temp, Immediate(upper)); 1731 __ movd(xmm_scratch, Operand(temp));
1985 __ pinsrd(res, Operand(temp), 1); 1732 __ orps(res, xmm_scratch);
1986 } else {
1987 __ xorps(res, res);
1988 __ Move(temp, Immediate(upper));
1989 __ pinsrd(res, Operand(temp), 1);
1990 }
1991 } else {
1992 __ Move(temp, Immediate(upper));
1993 __ movd(res, Operand(temp));
1994 __ psllq(res, 32);
1995 if (lower != 0) {
1996 XMMRegister xmm_scratch = double_scratch0();
1997 __ Move(temp, Immediate(lower));
1998 __ movd(xmm_scratch, Operand(temp));
1999 __ orps(res, xmm_scratch);
2000 }
2001 } 1733 }
2002 } 1734 }
2003 } 1735 }
2004 } 1736 }
2005 1737
2006 1738
2007 void LCodeGen::DoConstantE(LConstantE* instr) { 1739 void LCodeGen::DoConstantE(LConstantE* instr) {
2008 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); 1740 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
2009 } 1741 }
2010 1742
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
2173 __ add(ToRegister(left), ToOperand(right)); 1905 __ add(ToRegister(left), ToOperand(right));
2174 } 1906 }
2175 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1907 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2176 DeoptimizeIf(overflow, instr->environment()); 1908 DeoptimizeIf(overflow, instr->environment());
2177 } 1909 }
2178 } 1910 }
2179 } 1911 }
2180 1912
2181 1913
2182 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1914 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2183 CpuFeatureScope scope(masm(), SSE2);
2184 LOperand* left = instr->left(); 1915 LOperand* left = instr->left();
2185 LOperand* right = instr->right(); 1916 LOperand* right = instr->right();
2186 ASSERT(left->Equals(instr->result())); 1917 ASSERT(left->Equals(instr->result()));
2187 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1918 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2188 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1919 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2189 Label return_left; 1920 Label return_left;
2190 Condition condition = (operation == HMathMinMax::kMathMin) 1921 Condition condition = (operation == HMathMinMax::kMathMin)
2191 ? less_equal 1922 ? less_equal
2192 : greater_equal; 1923 : greater_equal;
2193 if (right->IsConstantOperand()) { 1924 if (right->IsConstantOperand()) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2236 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1967 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2237 __ bind(&return_right); 1968 __ bind(&return_right);
2238 __ movaps(left_reg, right_reg); 1969 __ movaps(left_reg, right_reg);
2239 1970
2240 __ bind(&return_left); 1971 __ bind(&return_left);
2241 } 1972 }
2242 } 1973 }
2243 1974
2244 1975
2245 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1976 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2246 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 1977 XMMRegister left = ToDoubleRegister(instr->left());
2247 CpuFeatureScope scope(masm(), SSE2); 1978 XMMRegister right = ToDoubleRegister(instr->right());
2248 XMMRegister left = ToDoubleRegister(instr->left()); 1979 XMMRegister result = ToDoubleRegister(instr->result());
2249 XMMRegister right = ToDoubleRegister(instr->right()); 1980 switch (instr->op()) {
2250 XMMRegister result = ToDoubleRegister(instr->result()); 1981 case Token::ADD:
2251 switch (instr->op()) { 1982 __ addsd(left, right);
2252 case Token::ADD: 1983 break;
2253 __ addsd(left, right); 1984 case Token::SUB:
2254 break; 1985 __ subsd(left, right);
2255 case Token::SUB: 1986 break;
2256 __ subsd(left, right); 1987 case Token::MUL:
2257 break; 1988 __ mulsd(left, right);
2258 case Token::MUL: 1989 break;
2259 __ mulsd(left, right); 1990 case Token::DIV:
2260 break; 1991 __ divsd(left, right);
2261 case Token::DIV: 1992 // Don't delete this mov. It may improve performance on some CPUs,
2262 __ divsd(left, right); 1993 // when there is a mulsd depending on the result
2263 // Don't delete this mov. It may improve performance on some CPUs, 1994 __ movaps(left, left);
2264 // when there is a mulsd depending on the result 1995 break;
2265 __ movaps(left, left); 1996 case Token::MOD: {
2266 break; 1997 // Pass two doubles as arguments on the stack.
2267 case Token::MOD: { 1998 __ PrepareCallCFunction(4, eax);
2268 // Pass two doubles as arguments on the stack. 1999 __ movsd(Operand(esp, 0 * kDoubleSize), left);
2269 __ PrepareCallCFunction(4, eax); 2000 __ movsd(Operand(esp, 1 * kDoubleSize), right);
2270 __ movsd(Operand(esp, 0 * kDoubleSize), left); 2001 __ CallCFunction(
2271 __ movsd(Operand(esp, 1 * kDoubleSize), right); 2002 ExternalReference::mod_two_doubles_operation(isolate()),
2272 __ CallCFunction( 2003 4);
2273 ExternalReference::mod_two_doubles_operation(isolate()),
2274 4);
2275 2004
2276 // Return value is in st(0) on ia32. 2005 // Return value is in st(0) on ia32.
2277 // Store it into the result register. 2006 // Store it into the result register.
2278 __ sub(Operand(esp), Immediate(kDoubleSize)); 2007 __ sub(Operand(esp), Immediate(kDoubleSize));
2279 __ fstp_d(Operand(esp, 0)); 2008 __ fstp_d(Operand(esp, 0));
2280 __ movsd(result, Operand(esp, 0)); 2009 __ movsd(result, Operand(esp, 0));
2281 __ add(Operand(esp), Immediate(kDoubleSize)); 2010 __ add(Operand(esp), Immediate(kDoubleSize));
2282 break; 2011 break;
2283 }
2284 default:
2285 UNREACHABLE();
2286 break;
2287 } 2012 }
2288 } else { 2013 default:
2289 X87Register left = ToX87Register(instr->left()); 2014 UNREACHABLE();
2290 X87Register right = ToX87Register(instr->right()); 2015 break;
2291 X87Register result = ToX87Register(instr->result());
2292 if (instr->op() != Token::MOD) {
2293 X87PrepareBinaryOp(left, right, result);
2294 }
2295 switch (instr->op()) {
2296 case Token::ADD:
2297 __ fadd_i(1);
2298 break;
2299 case Token::SUB:
2300 __ fsub_i(1);
2301 break;
2302 case Token::MUL:
2303 __ fmul_i(1);
2304 break;
2305 case Token::DIV:
2306 __ fdiv_i(1);
2307 break;
2308 case Token::MOD: {
2309 // Pass two doubles as arguments on the stack.
2310 __ PrepareCallCFunction(4, eax);
2311 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2312 X87Mov(Operand(esp, 0), left);
2313 X87Free(right);
2314 ASSERT(left.is(result));
2315 X87PrepareToWrite(result);
2316 __ CallCFunction(
2317 ExternalReference::mod_two_doubles_operation(isolate()),
2318 4);
2319
2320 // Return value is in st(0) on ia32.
2321 X87CommitWrite(result);
2322 break;
2323 }
2324 default:
2325 UNREACHABLE();
2326 break;
2327 }
2328 } 2016 }
2329 } 2017 }
2330 2018
2331 2019
2332 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2020 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2333 ASSERT(ToRegister(instr->context()).is(esi)); 2021 ASSERT(ToRegister(instr->context()).is(esi));
2334 ASSERT(ToRegister(instr->left()).is(edx)); 2022 ASSERT(ToRegister(instr->left()).is(edx));
2335 ASSERT(ToRegister(instr->right()).is(eax)); 2023 ASSERT(ToRegister(instr->right()).is(eax));
2336 ASSERT(ToRegister(instr->result()).is(eax)); 2024 ASSERT(ToRegister(instr->result()).is(eax));
2337 2025
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2372 2060
2373 2061
2374 void LCodeGen::DoBranch(LBranch* instr) { 2062 void LCodeGen::DoBranch(LBranch* instr) {
2375 Representation r = instr->hydrogen()->value()->representation(); 2063 Representation r = instr->hydrogen()->value()->representation();
2376 if (r.IsSmiOrInteger32()) { 2064 if (r.IsSmiOrInteger32()) {
2377 Register reg = ToRegister(instr->value()); 2065 Register reg = ToRegister(instr->value());
2378 __ test(reg, Operand(reg)); 2066 __ test(reg, Operand(reg));
2379 EmitBranch(instr, not_zero); 2067 EmitBranch(instr, not_zero);
2380 } else if (r.IsDouble()) { 2068 } else if (r.IsDouble()) {
2381 ASSERT(!info()->IsStub()); 2069 ASSERT(!info()->IsStub());
2382 CpuFeatureScope scope(masm(), SSE2);
2383 XMMRegister reg = ToDoubleRegister(instr->value()); 2070 XMMRegister reg = ToDoubleRegister(instr->value());
2384 XMMRegister xmm_scratch = double_scratch0(); 2071 XMMRegister xmm_scratch = double_scratch0();
2385 __ xorps(xmm_scratch, xmm_scratch); 2072 __ xorps(xmm_scratch, xmm_scratch);
2386 __ ucomisd(reg, xmm_scratch); 2073 __ ucomisd(reg, xmm_scratch);
2387 EmitBranch(instr, not_equal); 2074 EmitBranch(instr, not_equal);
2388 } else { 2075 } else {
2389 ASSERT(r.IsTagged()); 2076 ASSERT(r.IsTagged());
2390 Register reg = ToRegister(instr->value()); 2077 Register reg = ToRegister(instr->value());
2391 HType type = instr->hydrogen()->value()->type(); 2078 HType type = instr->hydrogen()->value()->type();
2392 if (type.IsBoolean()) { 2079 if (type.IsBoolean()) {
2393 ASSERT(!info()->IsStub()); 2080 ASSERT(!info()->IsStub());
2394 __ cmp(reg, factory()->true_value()); 2081 __ cmp(reg, factory()->true_value());
2395 EmitBranch(instr, equal); 2082 EmitBranch(instr, equal);
2396 } else if (type.IsSmi()) { 2083 } else if (type.IsSmi()) {
2397 ASSERT(!info()->IsStub()); 2084 ASSERT(!info()->IsStub());
2398 __ test(reg, Operand(reg)); 2085 __ test(reg, Operand(reg));
2399 EmitBranch(instr, not_equal); 2086 EmitBranch(instr, not_equal);
2400 } else if (type.IsJSArray()) { 2087 } else if (type.IsJSArray()) {
2401 ASSERT(!info()->IsStub()); 2088 ASSERT(!info()->IsStub());
2402 EmitBranch(instr, no_condition); 2089 EmitBranch(instr, no_condition);
2403 } else if (type.IsHeapNumber()) { 2090 } else if (type.IsHeapNumber()) {
2404 ASSERT(!info()->IsStub()); 2091 ASSERT(!info()->IsStub());
2405 CpuFeatureScope scope(masm(), SSE2);
2406 XMMRegister xmm_scratch = double_scratch0(); 2092 XMMRegister xmm_scratch = double_scratch0();
2407 __ xorps(xmm_scratch, xmm_scratch); 2093 __ xorps(xmm_scratch, xmm_scratch);
2408 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 2094 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2409 EmitBranch(instr, not_equal); 2095 EmitBranch(instr, not_equal);
2410 } else if (type.IsString()) { 2096 } else if (type.IsString()) {
2411 ASSERT(!info()->IsStub()); 2097 ASSERT(!info()->IsStub());
2412 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2098 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2413 EmitBranch(instr, not_equal); 2099 EmitBranch(instr, not_equal);
2414 } else { 2100 } else {
2415 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2101 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
2481 __ CmpInstanceType(map, SYMBOL_TYPE); 2167 __ CmpInstanceType(map, SYMBOL_TYPE);
2482 __ j(equal, instr->TrueLabel(chunk_)); 2168 __ j(equal, instr->TrueLabel(chunk_));
2483 } 2169 }
2484 2170
2485 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2171 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2486 // heap number -> false iff +0, -0, or NaN. 2172 // heap number -> false iff +0, -0, or NaN.
2487 Label not_heap_number; 2173 Label not_heap_number;
2488 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 2174 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2489 factory()->heap_number_map()); 2175 factory()->heap_number_map());
2490 __ j(not_equal, &not_heap_number, Label::kNear); 2176 __ j(not_equal, &not_heap_number, Label::kNear);
2491 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 2177 XMMRegister xmm_scratch = double_scratch0();
2492 CpuFeatureScope scope(masm(), SSE2); 2178 __ xorps(xmm_scratch, xmm_scratch);
2493 XMMRegister xmm_scratch = double_scratch0(); 2179 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2494 __ xorps(xmm_scratch, xmm_scratch);
2495 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2496 } else {
2497 __ fldz();
2498 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2499 __ FCmp();
2500 }
2501 __ j(zero, instr->FalseLabel(chunk_)); 2180 __ j(zero, instr->FalseLabel(chunk_));
2502 __ jmp(instr->TrueLabel(chunk_)); 2181 __ jmp(instr->TrueLabel(chunk_));
2503 __ bind(&not_heap_number); 2182 __ bind(&not_heap_number);
2504 } 2183 }
2505 2184
2506 if (!expected.IsGeneric()) { 2185 if (!expected.IsGeneric()) {
2507 // We've seen something for the first time -> deopt. 2186 // We've seen something for the first time -> deopt.
2508 // This can only happen if we are not generic already. 2187 // This can only happen if we are not generic already.
2509 DeoptimizeIf(no_condition, instr->environment()); 2188 DeoptimizeIf(no_condition, instr->environment());
2510 } 2189 }
2511 } 2190 }
2512 } 2191 }
2513 } 2192 }
2514 2193
2515 2194
2516 void LCodeGen::EmitGoto(int block) { 2195 void LCodeGen::EmitGoto(int block) {
2517 if (!IsNextEmittedBlock(block)) { 2196 if (!IsNextEmittedBlock(block)) {
2518 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2197 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2519 } 2198 }
2520 } 2199 }
2521 2200
2522 2201
2523 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2524 }
2525
2526
2527 void LCodeGen::DoGoto(LGoto* instr) { 2202 void LCodeGen::DoGoto(LGoto* instr) {
2528 EmitGoto(instr->block_id()); 2203 EmitGoto(instr->block_id());
2529 } 2204 }
2530 2205
2531 2206
2532 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2207 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2533 Condition cond = no_condition; 2208 Condition cond = no_condition;
2534 switch (op) { 2209 switch (op) {
2535 case Token::EQ: 2210 case Token::EQ:
2536 case Token::EQ_STRICT: 2211 case Token::EQ_STRICT:
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2568 2243
2569 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2244 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2570 // We can statically evaluate the comparison. 2245 // We can statically evaluate the comparison.
2571 double left_val = ToDouble(LConstantOperand::cast(left)); 2246 double left_val = ToDouble(LConstantOperand::cast(left));
2572 double right_val = ToDouble(LConstantOperand::cast(right)); 2247 double right_val = ToDouble(LConstantOperand::cast(right));
2573 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2248 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2574 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2249 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2575 EmitGoto(next_block); 2250 EmitGoto(next_block);
2576 } else { 2251 } else {
2577 if (instr->is_double()) { 2252 if (instr->is_double()) {
2578 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 2253 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2579 CpuFeatureScope scope(masm(), SSE2);
2580 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2581 } else {
2582 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2583 __ FCmp();
2584 }
2585 // Don't base result on EFLAGS when a NaN is involved. Instead 2254 // Don't base result on EFLAGS when a NaN is involved. Instead
2586 // jump to the false block. 2255 // jump to the false block.
2587 __ j(parity_even, instr->FalseLabel(chunk_)); 2256 __ j(parity_even, instr->FalseLabel(chunk_));
2588 } else { 2257 } else {
2589 if (right->IsConstantOperand()) { 2258 if (right->IsConstantOperand()) {
2590 __ cmp(ToOperand(left), 2259 __ cmp(ToOperand(left),
2591 ToImmediate(right, instr->hydrogen()->representation())); 2260 ToImmediate(right, instr->hydrogen()->representation()));
2592 } else if (left->IsConstantOperand()) { 2261 } else if (left->IsConstantOperand()) {
2593 __ cmp(ToOperand(right), 2262 __ cmp(ToOperand(right),
2594 ToImmediate(left, instr->hydrogen()->representation())); 2263 ToImmediate(left, instr->hydrogen()->representation()));
(...skipping 23 matching lines...) Expand all
2618 2287
2619 2288
2620 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2289 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2621 if (instr->hydrogen()->representation().IsTagged()) { 2290 if (instr->hydrogen()->representation().IsTagged()) {
2622 Register input_reg = ToRegister(instr->object()); 2291 Register input_reg = ToRegister(instr->object());
2623 __ cmp(input_reg, factory()->the_hole_value()); 2292 __ cmp(input_reg, factory()->the_hole_value());
2624 EmitBranch(instr, equal); 2293 EmitBranch(instr, equal);
2625 return; 2294 return;
2626 } 2295 }
2627 2296
2628 bool use_sse2 = CpuFeatures::IsSupported(SSE2); 2297 XMMRegister input_reg = ToDoubleRegister(instr->object());
2629 if (use_sse2) { 2298 __ ucomisd(input_reg, input_reg);
2630 CpuFeatureScope scope(masm(), SSE2); 2299 EmitFalseBranch(instr, parity_odd);
2631 XMMRegister input_reg = ToDoubleRegister(instr->object());
2632 __ ucomisd(input_reg, input_reg);
2633 EmitFalseBranch(instr, parity_odd);
2634 } else {
2635 // Put the value to the top of stack
2636 X87Register src = ToX87Register(instr->object());
2637 X87LoadForUsage(src);
2638 __ fld(0);
2639 __ fld(0);
2640 __ FCmp();
2641 Label ok;
2642 __ j(parity_even, &ok, Label::kNear);
2643 __ fstp(0);
2644 EmitFalseBranch(instr, no_condition);
2645 __ bind(&ok);
2646 }
2647
2648 2300
2649 __ sub(esp, Immediate(kDoubleSize)); 2301 __ sub(esp, Immediate(kDoubleSize));
2650 if (use_sse2) { 2302 __ movsd(MemOperand(esp, 0), input_reg);
2651 CpuFeatureScope scope(masm(), SSE2);
2652 XMMRegister input_reg = ToDoubleRegister(instr->object());
2653 __ movsd(MemOperand(esp, 0), input_reg);
2654 } else {
2655 __ fstp_d(MemOperand(esp, 0));
2656 }
2657 2303
2658 __ add(esp, Immediate(kDoubleSize)); 2304 __ add(esp, Immediate(kDoubleSize));
2659 int offset = sizeof(kHoleNanUpper32); 2305 int offset = sizeof(kHoleNanUpper32);
2660 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); 2306 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2661 EmitBranch(instr, equal); 2307 EmitBranch(instr, equal);
2662 } 2308 }
2663 2309
2664 2310
2665 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2311 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2666 Representation rep = instr->hydrogen()->value()->representation(); 2312 Representation rep = instr->hydrogen()->value()->representation();
2667 ASSERT(!rep.IsInteger32()); 2313 ASSERT(!rep.IsInteger32());
2668 Register scratch = ToRegister(instr->temp()); 2314 Register scratch = ToRegister(instr->temp());
2669 2315
2670 if (rep.IsDouble()) { 2316 if (rep.IsDouble()) {
2671 CpuFeatureScope use_sse2(masm(), SSE2);
2672 XMMRegister value = ToDoubleRegister(instr->value()); 2317 XMMRegister value = ToDoubleRegister(instr->value());
2673 XMMRegister xmm_scratch = double_scratch0(); 2318 XMMRegister xmm_scratch = double_scratch0();
2674 __ xorps(xmm_scratch, xmm_scratch); 2319 __ xorps(xmm_scratch, xmm_scratch);
2675 __ ucomisd(xmm_scratch, value); 2320 __ ucomisd(xmm_scratch, value);
2676 EmitFalseBranch(instr, not_equal); 2321 EmitFalseBranch(instr, not_equal);
2677 __ movmskpd(scratch, value); 2322 __ movmskpd(scratch, value);
2678 __ test(scratch, Immediate(1)); 2323 __ test(scratch, Immediate(1));
2679 EmitBranch(instr, not_zero); 2324 EmitBranch(instr, not_zero);
2680 } else { 2325 } else {
2681 Register value = ToRegister(instr->value()); 2326 Register value = ToRegister(instr->value());
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
2964 __ bind(&true_value); 2609 __ bind(&true_value);
2965 __ mov(ToRegister(instr->result()), factory()->true_value()); 2610 __ mov(ToRegister(instr->result()), factory()->true_value());
2966 __ bind(&done); 2611 __ bind(&done);
2967 } 2612 }
2968 2613
2969 2614
2970 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2615 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2971 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { 2616 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2972 public: 2617 public:
2973 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2618 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2974 LInstanceOfKnownGlobal* instr, 2619 LInstanceOfKnownGlobal* instr)
2975 const X87Stack& x87_stack) 2620 : LDeferredCode(codegen), instr_(instr) { }
2976 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2977 virtual void Generate() V8_OVERRIDE { 2621 virtual void Generate() V8_OVERRIDE {
2978 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); 2622 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2979 } 2623 }
2980 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 2624 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2981 Label* map_check() { return &map_check_; } 2625 Label* map_check() { return &map_check_; }
2982 private: 2626 private:
2983 LInstanceOfKnownGlobal* instr_; 2627 LInstanceOfKnownGlobal* instr_;
2984 Label map_check_; 2628 Label map_check_;
2985 }; 2629 };
2986 2630
2987 DeferredInstanceOfKnownGlobal* deferred; 2631 DeferredInstanceOfKnownGlobal* deferred;
2988 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); 2632 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2989 2633
2990 Label done, false_result; 2634 Label done, false_result;
2991 Register object = ToRegister(instr->value()); 2635 Register object = ToRegister(instr->value());
2992 Register temp = ToRegister(instr->temp()); 2636 Register temp = ToRegister(instr->temp());
2993 2637
2994 // A Smi is not an instance of anything. 2638 // A Smi is not an instance of anything.
2995 __ JumpIfSmi(object, &false_result, Label::kNear); 2639 __ JumpIfSmi(object, &false_result, Label::kNear);
2996 2640
2997 // This is the inlined call site instanceof cache. The two occurences of the 2641 // This is the inlined call site instanceof cache. The two occurences of the
2998 // hole value will be patched to the last map/result pair generated by the 2642 // hole value will be patched to the last map/result pair generated by the
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
3127 void LCodeGen::DoReturn(LReturn* instr) { 2771 void LCodeGen::DoReturn(LReturn* instr) {
3128 if (FLAG_trace && info()->IsOptimizing()) { 2772 if (FLAG_trace && info()->IsOptimizing()) {
3129 // Preserve the return value on the stack and rely on the runtime call 2773 // Preserve the return value on the stack and rely on the runtime call
3130 // to return the value in the same register. We're leaving the code 2774 // to return the value in the same register. We're leaving the code
3131 // managed by the register allocator and tearing down the frame, it's 2775 // managed by the register allocator and tearing down the frame, it's
3132 // safe to write to the context register. 2776 // safe to write to the context register.
3133 __ push(eax); 2777 __ push(eax);
3134 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2778 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3135 __ CallRuntime(Runtime::kTraceExit, 1); 2779 __ CallRuntime(Runtime::kTraceExit, 1);
3136 } 2780 }
3137 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 2781 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
3138 RestoreCallerDoubles();
3139 }
3140 if (dynamic_frame_alignment_) { 2782 if (dynamic_frame_alignment_) {
3141 // Fetch the state of the dynamic frame alignment. 2783 // Fetch the state of the dynamic frame alignment.
3142 __ mov(edx, Operand(ebp, 2784 __ mov(edx, Operand(ebp,
3143 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2785 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3144 } 2786 }
3145 int no_frame_start = -1; 2787 int no_frame_start = -1;
3146 if (NeedsEagerFrame()) { 2788 if (NeedsEagerFrame()) {
3147 __ mov(esp, ebp); 2789 __ mov(esp, ebp);
3148 __ pop(ebp); 2790 __ pop(ebp);
3149 no_frame_start = masm_->pc_offset(); 2791 no_frame_start = masm_->pc_offset();
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
3244 if (instr->hydrogen()->NeedsWriteBarrier()) { 2886 if (instr->hydrogen()->NeedsWriteBarrier()) {
3245 SmiCheck check_needed = 2887 SmiCheck check_needed =
3246 instr->hydrogen()->value()->IsHeapObject() 2888 instr->hydrogen()->value()->IsHeapObject()
3247 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2889 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3248 Register temp = ToRegister(instr->temp()); 2890 Register temp = ToRegister(instr->temp());
3249 int offset = Context::SlotOffset(instr->slot_index()); 2891 int offset = Context::SlotOffset(instr->slot_index());
3250 __ RecordWriteContextSlot(context, 2892 __ RecordWriteContextSlot(context,
3251 offset, 2893 offset,
3252 value, 2894 value,
3253 temp, 2895 temp,
3254 GetSaveFPRegsMode(isolate()), 2896 kSaveFPRegs,
3255 EMIT_REMEMBERED_SET, 2897 EMIT_REMEMBERED_SET,
3256 check_needed); 2898 check_needed);
3257 } 2899 }
3258 2900
3259 __ bind(&skip_assignment); 2901 __ bind(&skip_assignment);
3260 } 2902 }
3261 2903
3262 2904
3263 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2905 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3264 HObjectAccess access = instr->hydrogen()->access(); 2906 HObjectAccess access = instr->hydrogen()->access();
3265 int offset = access.offset(); 2907 int offset = access.offset();
3266 2908
3267 if (access.IsExternalMemory()) { 2909 if (access.IsExternalMemory()) {
3268 Register result = ToRegister(instr->result()); 2910 Register result = ToRegister(instr->result());
3269 MemOperand operand = instr->object()->IsConstantOperand() 2911 MemOperand operand = instr->object()->IsConstantOperand()
3270 ? MemOperand::StaticVariable(ToExternalReference( 2912 ? MemOperand::StaticVariable(ToExternalReference(
3271 LConstantOperand::cast(instr->object()))) 2913 LConstantOperand::cast(instr->object())))
3272 : MemOperand(ToRegister(instr->object()), offset); 2914 : MemOperand(ToRegister(instr->object()), offset);
3273 __ Load(result, operand, access.representation()); 2915 __ Load(result, operand, access.representation());
3274 return; 2916 return;
3275 } 2917 }
3276 2918
3277 Register object = ToRegister(instr->object()); 2919 Register object = ToRegister(instr->object());
3278 if (instr->hydrogen()->representation().IsDouble()) { 2920 if (instr->hydrogen()->representation().IsDouble()) {
3279 if (CpuFeatures::IsSupported(SSE2)) { 2921 XMMRegister result = ToDoubleRegister(instr->result());
3280 CpuFeatureScope scope(masm(), SSE2); 2922 __ movsd(result, FieldOperand(object, offset));
3281 XMMRegister result = ToDoubleRegister(instr->result());
3282 __ movsd(result, FieldOperand(object, offset));
3283 } else {
3284 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3285 }
3286 return; 2923 return;
3287 } 2924 }
3288 2925
3289 Register result = ToRegister(instr->result()); 2926 Register result = ToRegister(instr->result());
3290 if (!access.IsInobject()) { 2927 if (!access.IsInobject()) {
3291 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2928 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3292 object = result; 2929 object = result;
3293 } 2930 }
3294 __ Load(result, FieldOperand(object, offset), access.representation()); 2931 __ Load(result, FieldOperand(object, offset), access.representation());
3295 } 2932 }
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
3402 } 3039 }
3403 Operand operand(BuildFastArrayOperand( 3040 Operand operand(BuildFastArrayOperand(
3404 instr->elements(), 3041 instr->elements(),
3405 key, 3042 key,
3406 instr->hydrogen()->key()->representation(), 3043 instr->hydrogen()->key()->representation(),
3407 elements_kind, 3044 elements_kind,
3408 0, 3045 0,
3409 instr->additional_index())); 3046 instr->additional_index()));
3410 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3047 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3411 elements_kind == FLOAT32_ELEMENTS) { 3048 elements_kind == FLOAT32_ELEMENTS) {
3412 if (CpuFeatures::IsSupported(SSE2)) { 3049 XMMRegister result(ToDoubleRegister(instr->result()));
3413 CpuFeatureScope scope(masm(), SSE2); 3050 __ movss(result, operand);
3414 XMMRegister result(ToDoubleRegister(instr->result())); 3051 __ cvtss2sd(result, result);
3415 __ movss(result, operand);
3416 __ cvtss2sd(result, result);
3417 } else {
3418 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3419 }
3420 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3052 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3421 elements_kind == FLOAT64_ELEMENTS) { 3053 elements_kind == FLOAT64_ELEMENTS) {
3422 if (CpuFeatures::IsSupported(SSE2)) { 3054 __ movsd(ToDoubleRegister(instr->result()), operand);
3423 CpuFeatureScope scope(masm(), SSE2);
3424 __ movsd(ToDoubleRegister(instr->result()), operand);
3425 } else {
3426 X87Mov(ToX87Register(instr->result()), operand);
3427 }
3428 } else { 3055 } else {
3429 Register result(ToRegister(instr->result())); 3056 Register result(ToRegister(instr->result()));
3430 switch (elements_kind) { 3057 switch (elements_kind) {
3431 case EXTERNAL_INT8_ELEMENTS: 3058 case EXTERNAL_INT8_ELEMENTS:
3432 case INT8_ELEMENTS: 3059 case INT8_ELEMENTS:
3433 __ movsx_b(result, operand); 3060 __ movsx_b(result, operand);
3434 break; 3061 break;
3435 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 3062 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3436 case EXTERNAL_UINT8_ELEMENTS: 3063 case EXTERNAL_UINT8_ELEMENTS:
3437 case UINT8_ELEMENTS: 3064 case UINT8_ELEMENTS:
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
3491 DeoptimizeIf(equal, instr->environment()); 3118 DeoptimizeIf(equal, instr->environment());
3492 } 3119 }
3493 3120
3494 Operand double_load_operand = BuildFastArrayOperand( 3121 Operand double_load_operand = BuildFastArrayOperand(
3495 instr->elements(), 3122 instr->elements(),
3496 instr->key(), 3123 instr->key(),
3497 instr->hydrogen()->key()->representation(), 3124 instr->hydrogen()->key()->representation(),
3498 FAST_DOUBLE_ELEMENTS, 3125 FAST_DOUBLE_ELEMENTS,
3499 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 3126 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3500 instr->additional_index()); 3127 instr->additional_index());
3501 if (CpuFeatures::IsSupported(SSE2)) { 3128 XMMRegister result = ToDoubleRegister(instr->result());
3502 CpuFeatureScope scope(masm(), SSE2); 3129 __ movsd(result, double_load_operand);
3503 XMMRegister result = ToDoubleRegister(instr->result());
3504 __ movsd(result, double_load_operand);
3505 } else {
3506 X87Mov(ToX87Register(instr->result()), double_load_operand);
3507 }
3508 } 3130 }
3509 3131
3510 3132
3511 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3133 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3512 Register result = ToRegister(instr->result()); 3134 Register result = ToRegister(instr->result());
3513 3135
3514 // Load the result. 3136 // Load the result.
3515 __ mov(result, 3137 __ mov(result,
3516 BuildFastArrayOperand(instr->elements(), 3138 BuildFastArrayOperand(instr->elements(),
3517 instr->key(), 3139 instr->key(),
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
3919 DeoptimizeIf(negative, instr->environment()); 3541 DeoptimizeIf(negative, instr->environment());
3920 __ bind(&is_positive); 3542 __ bind(&is_positive);
3921 } 3543 }
3922 3544
3923 3545
3924 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3546 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3925 // Class for deferred case. 3547 // Class for deferred case.
3926 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { 3548 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3927 public: 3549 public:
3928 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3550 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3929 LMathAbs* instr, 3551 LMathAbs* instr)
3930 const X87Stack& x87_stack) 3552 : LDeferredCode(codegen), instr_(instr) { }
3931 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3932 virtual void Generate() V8_OVERRIDE { 3553 virtual void Generate() V8_OVERRIDE {
3933 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3554 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3934 } 3555 }
3935 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 3556 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3936 private: 3557 private:
3937 LMathAbs* instr_; 3558 LMathAbs* instr_;
3938 }; 3559 };
3939 3560
3940 ASSERT(instr->value()->Equals(instr->result())); 3561 ASSERT(instr->value()->Equals(instr->result()));
3941 Representation r = instr->hydrogen()->value()->representation(); 3562 Representation r = instr->hydrogen()->value()->representation();
3942 3563
3943 CpuFeatureScope scope(masm(), SSE2);
3944 if (r.IsDouble()) { 3564 if (r.IsDouble()) {
3945 XMMRegister scratch = double_scratch0(); 3565 XMMRegister scratch = double_scratch0();
3946 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3566 XMMRegister input_reg = ToDoubleRegister(instr->value());
3947 __ xorps(scratch, scratch); 3567 __ xorps(scratch, scratch);
3948 __ subsd(scratch, input_reg); 3568 __ subsd(scratch, input_reg);
3949 __ andps(input_reg, scratch); 3569 __ andps(input_reg, scratch);
3950 } else if (r.IsSmiOrInteger32()) { 3570 } else if (r.IsSmiOrInteger32()) {
3951 EmitIntegerMathAbs(instr); 3571 EmitIntegerMathAbs(instr);
3952 } else { // Tagged case. 3572 } else { // Tagged case.
3953 DeferredMathAbsTaggedHeapNumber* deferred = 3573 DeferredMathAbsTaggedHeapNumber* deferred =
3954 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); 3574 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3955 Register input_reg = ToRegister(instr->value()); 3575 Register input_reg = ToRegister(instr->value());
3956 // Smi check. 3576 // Smi check.
3957 __ JumpIfNotSmi(input_reg, deferred->entry()); 3577 __ JumpIfNotSmi(input_reg, deferred->entry());
3958 EmitIntegerMathAbs(instr); 3578 EmitIntegerMathAbs(instr);
3959 __ bind(deferred->exit()); 3579 __ bind(deferred->exit());
3960 } 3580 }
3961 } 3581 }
3962 3582
3963 3583
3964 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3584 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3965 CpuFeatureScope scope(masm(), SSE2);
3966 XMMRegister xmm_scratch = double_scratch0(); 3585 XMMRegister xmm_scratch = double_scratch0();
3967 Register output_reg = ToRegister(instr->result()); 3586 Register output_reg = ToRegister(instr->result());
3968 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3587 XMMRegister input_reg = ToDoubleRegister(instr->value());
3969 3588
3970 if (CpuFeatures::IsSupported(SSE4_1)) { 3589 if (CpuFeatures::IsSupported(SSE4_1)) {
3971 CpuFeatureScope scope(masm(), SSE4_1); 3590 CpuFeatureScope scope(masm(), SSE4_1);
3972 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3591 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3973 // Deoptimize on negative zero. 3592 // Deoptimize on negative zero.
3974 Label non_zero; 3593 Label non_zero;
3975 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3594 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
4021 __ j(equal, &done, Label::kNear); 3640 __ j(equal, &done, Label::kNear);
4022 __ sub(output_reg, Immediate(1)); 3641 __ sub(output_reg, Immediate(1));
4023 DeoptimizeIf(overflow, instr->environment()); 3642 DeoptimizeIf(overflow, instr->environment());
4024 3643
4025 __ bind(&done); 3644 __ bind(&done);
4026 } 3645 }
4027 } 3646 }
4028 3647
4029 3648
4030 void LCodeGen::DoMathRound(LMathRound* instr) { 3649 void LCodeGen::DoMathRound(LMathRound* instr) {
4031 CpuFeatureScope scope(masm(), SSE2);
4032 Register output_reg = ToRegister(instr->result()); 3650 Register output_reg = ToRegister(instr->result());
4033 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3651 XMMRegister input_reg = ToDoubleRegister(instr->value());
4034 XMMRegister xmm_scratch = double_scratch0(); 3652 XMMRegister xmm_scratch = double_scratch0();
4035 XMMRegister input_temp = ToDoubleRegister(instr->temp()); 3653 XMMRegister input_temp = ToDoubleRegister(instr->temp());
4036 ExternalReference one_half = ExternalReference::address_of_one_half(); 3654 ExternalReference one_half = ExternalReference::address_of_one_half();
4037 ExternalReference minus_one_half = 3655 ExternalReference minus_one_half =
4038 ExternalReference::address_of_minus_one_half(); 3656 ExternalReference::address_of_minus_one_half();
4039 3657
4040 Label done, round_to_zero, below_one_half, do_not_compensate; 3658 Label done, round_to_zero, below_one_half, do_not_compensate;
4041 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3659 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4084 __ test(output_reg, Immediate(1)); 3702 __ test(output_reg, Immediate(1));
4085 __ RecordComment("Minus zero"); 3703 __ RecordComment("Minus zero");
4086 DeoptimizeIf(not_zero, instr->environment()); 3704 DeoptimizeIf(not_zero, instr->environment());
4087 } 3705 }
4088 __ Move(output_reg, Immediate(0)); 3706 __ Move(output_reg, Immediate(0));
4089 __ bind(&done); 3707 __ bind(&done);
4090 } 3708 }
4091 3709
4092 3710
4093 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3711 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4094 CpuFeatureScope scope(masm(), SSE2);
4095 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3712 XMMRegister input_reg = ToDoubleRegister(instr->value());
4096 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3713 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4097 __ sqrtsd(input_reg, input_reg); 3714 __ sqrtsd(input_reg, input_reg);
4098 } 3715 }
4099 3716
4100 3717
4101 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3718 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4102 CpuFeatureScope scope(masm(), SSE2);
4103 XMMRegister xmm_scratch = double_scratch0(); 3719 XMMRegister xmm_scratch = double_scratch0();
4104 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3720 XMMRegister input_reg = ToDoubleRegister(instr->value());
4105 Register scratch = ToRegister(instr->temp()); 3721 Register scratch = ToRegister(instr->temp());
4106 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3722 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4107 3723
4108 // Note that according to ECMA-262 15.8.2.13: 3724 // Note that according to ECMA-262 15.8.2.13:
4109 // Math.pow(-Infinity, 0.5) == Infinity 3725 // Math.pow(-Infinity, 0.5) == Infinity
4110 // Math.sqrt(-Infinity) == NaN 3726 // Math.sqrt(-Infinity) == NaN
4111 Label done, sqrt; 3727 Label done, sqrt;
4112 // Check base for -Infinity. According to IEEE-754, single-precision 3728 // Check base for -Infinity. According to IEEE-754, single-precision
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
4160 __ CallStub(&stub); 3776 __ CallStub(&stub);
4161 } else { 3777 } else {
4162 ASSERT(exponent_type.IsDouble()); 3778 ASSERT(exponent_type.IsDouble());
4163 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3779 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4164 __ CallStub(&stub); 3780 __ CallStub(&stub);
4165 } 3781 }
4166 } 3782 }
4167 3783
4168 3784
4169 void LCodeGen::DoMathLog(LMathLog* instr) { 3785 void LCodeGen::DoMathLog(LMathLog* instr) {
4170 CpuFeatureScope scope(masm(), SSE2);
4171 ASSERT(instr->value()->Equals(instr->result())); 3786 ASSERT(instr->value()->Equals(instr->result()));
4172 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3787 XMMRegister input_reg = ToDoubleRegister(instr->value());
4173 XMMRegister xmm_scratch = double_scratch0(); 3788 XMMRegister xmm_scratch = double_scratch0();
4174 Label positive, done, zero; 3789 Label positive, done, zero;
4175 __ xorps(xmm_scratch, xmm_scratch); 3790 __ xorps(xmm_scratch, xmm_scratch);
4176 __ ucomisd(input_reg, xmm_scratch); 3791 __ ucomisd(input_reg, xmm_scratch);
4177 __ j(above, &positive, Label::kNear); 3792 __ j(above, &positive, Label::kNear);
4178 __ j(not_carry, &zero, Label::kNear); 3793 __ j(not_carry, &zero, Label::kNear);
4179 ExternalReference nan = 3794 ExternalReference nan =
4180 ExternalReference::address_of_canonical_non_hole_nan(); 3795 ExternalReference::address_of_canonical_non_hole_nan();
(...skipping 11 matching lines...) Expand all
4192 __ fld_d(Operand(esp, 0)); 3807 __ fld_d(Operand(esp, 0));
4193 __ fyl2x(); 3808 __ fyl2x();
4194 __ fstp_d(Operand(esp, 0)); 3809 __ fstp_d(Operand(esp, 0));
4195 __ movsd(input_reg, Operand(esp, 0)); 3810 __ movsd(input_reg, Operand(esp, 0));
4196 __ add(Operand(esp), Immediate(kDoubleSize)); 3811 __ add(Operand(esp), Immediate(kDoubleSize));
4197 __ bind(&done); 3812 __ bind(&done);
4198 } 3813 }
4199 3814
4200 3815
4201 void LCodeGen::DoMathClz32(LMathClz32* instr) { 3816 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4202 CpuFeatureScope scope(masm(), SSE2);
4203 Register input = ToRegister(instr->value()); 3817 Register input = ToRegister(instr->value());
4204 Register result = ToRegister(instr->result()); 3818 Register result = ToRegister(instr->result());
4205 Label not_zero_input; 3819 Label not_zero_input;
4206 __ bsr(result, input); 3820 __ bsr(result, input);
4207 3821
4208 __ j(not_zero, &not_zero_input); 3822 __ j(not_zero, &not_zero_input);
4209 __ Move(result, Immediate(63)); // 63^31 == 32 3823 __ Move(result, Immediate(63)); // 63^31 == 32
4210 3824
4211 __ bind(&not_zero_input); 3825 __ bind(&not_zero_input);
4212 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. 3826 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4213 } 3827 }
4214 3828
4215 3829
4216 void LCodeGen::DoMathExp(LMathExp* instr) { 3830 void LCodeGen::DoMathExp(LMathExp* instr) {
4217 CpuFeatureScope scope(masm(), SSE2);
4218 XMMRegister input = ToDoubleRegister(instr->value()); 3831 XMMRegister input = ToDoubleRegister(instr->value());
4219 XMMRegister result = ToDoubleRegister(instr->result()); 3832 XMMRegister result = ToDoubleRegister(instr->result());
4220 XMMRegister temp0 = double_scratch0(); 3833 XMMRegister temp0 = double_scratch0();
4221 Register temp1 = ToRegister(instr->temp1()); 3834 Register temp1 = ToRegister(instr->temp1());
4222 Register temp2 = ToRegister(instr->temp2()); 3835 Register temp2 = ToRegister(instr->temp2());
4223 3836
4224 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); 3837 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4225 } 3838 }
4226 3839
4227 3840
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
4386 DeoptimizeIf(zero, instr->environment()); 3999 DeoptimizeIf(zero, instr->environment());
4387 4000
4388 // We know now that value is not a smi, so we can omit the check below. 4001 // We know now that value is not a smi, so we can omit the check below.
4389 check_needed = OMIT_SMI_CHECK; 4002 check_needed = OMIT_SMI_CHECK;
4390 } 4003 }
4391 } 4004 }
4392 } else if (representation.IsDouble()) { 4005 } else if (representation.IsDouble()) {
4393 ASSERT(access.IsInobject()); 4006 ASSERT(access.IsInobject());
4394 ASSERT(!instr->hydrogen()->has_transition()); 4007 ASSERT(!instr->hydrogen()->has_transition());
4395 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4008 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4396 if (CpuFeatures::IsSupported(SSE2)) { 4009 XMMRegister value = ToDoubleRegister(instr->value());
4397 CpuFeatureScope scope(masm(), SSE2); 4010 __ movsd(FieldOperand(object, offset), value);
4398 XMMRegister value = ToDoubleRegister(instr->value());
4399 __ movsd(FieldOperand(object, offset), value);
4400 } else {
4401 X87Register value = ToX87Register(instr->value());
4402 X87Mov(FieldOperand(object, offset), value);
4403 }
4404 return; 4011 return;
4405 } 4012 }
4406 4013
4407 if (instr->hydrogen()->has_transition()) { 4014 if (instr->hydrogen()->has_transition()) {
4408 Handle<Map> transition = instr->hydrogen()->transition_map(); 4015 Handle<Map> transition = instr->hydrogen()->transition_map();
4409 AddDeprecationDependency(transition); 4016 AddDeprecationDependency(transition);
4410 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { 4017 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4411 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); 4018 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4412 } else { 4019 } else {
4413 Register temp = ToRegister(instr->temp()); 4020 Register temp = ToRegister(instr->temp());
4414 Register temp_map = ToRegister(instr->temp_map()); 4021 Register temp_map = ToRegister(instr->temp_map());
4415 __ mov(temp_map, transition); 4022 __ mov(temp_map, transition);
4416 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); 4023 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4417 // Update the write barrier for the map field. 4024 // Update the write barrier for the map field.
4418 __ RecordWriteField(object, 4025 __ RecordWriteField(object,
4419 HeapObject::kMapOffset, 4026 HeapObject::kMapOffset,
4420 temp_map, 4027 temp_map,
4421 temp, 4028 temp,
4422 GetSaveFPRegsMode(isolate()), 4029 kSaveFPRegs,
4423 OMIT_REMEMBERED_SET, 4030 OMIT_REMEMBERED_SET,
4424 OMIT_SMI_CHECK); 4031 OMIT_SMI_CHECK);
4425 } 4032 }
4426 } 4033 }
4427 4034
4428 // Do the store. 4035 // Do the store.
4429 Register write_register = object; 4036 Register write_register = object;
4430 if (!access.IsInobject()) { 4037 if (!access.IsInobject()) {
4431 write_register = ToRegister(instr->temp()); 4038 write_register = ToRegister(instr->temp());
4432 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 4039 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
(...skipping 20 matching lines...) Expand all
4453 } 4060 }
4454 4061
4455 if (instr->hydrogen()->NeedsWriteBarrier()) { 4062 if (instr->hydrogen()->NeedsWriteBarrier()) {
4456 Register value = ToRegister(instr->value()); 4063 Register value = ToRegister(instr->value());
4457 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 4064 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4458 // Update the write barrier for the object for in-object properties. 4065 // Update the write barrier for the object for in-object properties.
4459 __ RecordWriteField(write_register, 4066 __ RecordWriteField(write_register,
4460 offset, 4067 offset,
4461 value, 4068 value,
4462 temp, 4069 temp,
4463 GetSaveFPRegsMode(isolate()), 4070 kSaveFPRegs,
4464 EMIT_REMEMBERED_SET, 4071 EMIT_REMEMBERED_SET,
4465 check_needed); 4072 check_needed);
4466 } 4073 }
4467 } 4074 }
4468 4075
4469 4076
4470 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4077 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4471 ASSERT(ToRegister(instr->context()).is(esi)); 4078 ASSERT(ToRegister(instr->context()).is(esi));
4472 ASSERT(ToRegister(instr->object()).is(edx)); 4079 ASSERT(ToRegister(instr->object()).is(edx));
4473 ASSERT(ToRegister(instr->value()).is(eax)); 4080 ASSERT(ToRegister(instr->value()).is(eax));
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
4513 } 4120 }
4514 Operand operand(BuildFastArrayOperand( 4121 Operand operand(BuildFastArrayOperand(
4515 instr->elements(), 4122 instr->elements(),
4516 key, 4123 key,
4517 instr->hydrogen()->key()->representation(), 4124 instr->hydrogen()->key()->representation(),
4518 elements_kind, 4125 elements_kind,
4519 0, 4126 0,
4520 instr->additional_index())); 4127 instr->additional_index()));
4521 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4128 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4522 elements_kind == FLOAT32_ELEMENTS) { 4129 elements_kind == FLOAT32_ELEMENTS) {
4523 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4130 XMMRegister xmm_scratch = double_scratch0();
4524 CpuFeatureScope scope(masm(), SSE2); 4131 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4525 XMMRegister xmm_scratch = double_scratch0(); 4132 __ movss(operand, xmm_scratch);
4526 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4527 __ movss(operand, xmm_scratch);
4528 } else {
4529 __ fld(0);
4530 __ fstp_s(operand);
4531 }
4532 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4133 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4533 elements_kind == FLOAT64_ELEMENTS) { 4134 elements_kind == FLOAT64_ELEMENTS) {
4534 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4135 __ movsd(operand, ToDoubleRegister(instr->value()));
4535 CpuFeatureScope scope(masm(), SSE2);
4536 __ movsd(operand, ToDoubleRegister(instr->value()));
4537 } else {
4538 X87Mov(operand, ToX87Register(instr->value()));
4539 }
4540 } else { 4136 } else {
4541 Register value = ToRegister(instr->value()); 4137 Register value = ToRegister(instr->value());
4542 switch (elements_kind) { 4138 switch (elements_kind) {
4543 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 4139 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4544 case EXTERNAL_UINT8_ELEMENTS: 4140 case EXTERNAL_UINT8_ELEMENTS:
4545 case EXTERNAL_INT8_ELEMENTS: 4141 case EXTERNAL_INT8_ELEMENTS:
4546 case UINT8_ELEMENTS: 4142 case UINT8_ELEMENTS:
4547 case INT8_ELEMENTS: 4143 case INT8_ELEMENTS:
4548 case UINT8_CLAMPED_ELEMENTS: 4144 case UINT8_CLAMPED_ELEMENTS:
4549 __ mov_b(operand, value); 4145 __ mov_b(operand, value);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
4583 ExternalReference canonical_nan_reference = 4179 ExternalReference canonical_nan_reference =
4584 ExternalReference::address_of_canonical_non_hole_nan(); 4180 ExternalReference::address_of_canonical_non_hole_nan();
4585 Operand double_store_operand = BuildFastArrayOperand( 4181 Operand double_store_operand = BuildFastArrayOperand(
4586 instr->elements(), 4182 instr->elements(),
4587 instr->key(), 4183 instr->key(),
4588 instr->hydrogen()->key()->representation(), 4184 instr->hydrogen()->key()->representation(),
4589 FAST_DOUBLE_ELEMENTS, 4185 FAST_DOUBLE_ELEMENTS,
4590 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 4186 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4591 instr->additional_index()); 4187 instr->additional_index());
4592 4188
4593 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4189 XMMRegister value = ToDoubleRegister(instr->value());
4594 CpuFeatureScope scope(masm(), SSE2);
4595 XMMRegister value = ToDoubleRegister(instr->value());
4596 4190
4597 if (instr->NeedsCanonicalization()) { 4191 if (instr->NeedsCanonicalization()) {
4598 Label have_value; 4192 Label have_value;
4599 4193
4600 __ ucomisd(value, value); 4194 __ ucomisd(value, value);
4601 __ j(parity_odd, &have_value, Label::kNear); // NaN. 4195 __ j(parity_odd, &have_value, Label::kNear); // NaN.
4602 4196
4603 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); 4197 __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4604 __ bind(&have_value); 4198 __ bind(&have_value);
4605 } 4199 }
4606 4200
4607 __ movsd(double_store_operand, value); 4201 __ movsd(double_store_operand, value);
4608 } else {
4609 // Can't use SSE2 in the serializer
4610 if (instr->hydrogen()->IsConstantHoleStore()) {
4611 // This means we should store the (double) hole. No floating point
4612 // registers required.
4613 double nan_double = FixedDoubleArray::hole_nan_as_double();
4614 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4615 int32_t lower = static_cast<int32_t>(int_val);
4616 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4617
4618 __ mov(double_store_operand, Immediate(lower));
4619 Operand double_store_operand2 = BuildFastArrayOperand(
4620 instr->elements(),
4621 instr->key(),
4622 instr->hydrogen()->key()->representation(),
4623 FAST_DOUBLE_ELEMENTS,
4624 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4625 instr->additional_index());
4626 __ mov(double_store_operand2, Immediate(upper));
4627 } else {
4628 Label no_special_nan_handling;
4629 X87Register value = ToX87Register(instr->value());
4630 X87Fxch(value);
4631
4632 if (instr->NeedsCanonicalization()) {
4633 __ fld(0);
4634 __ fld(0);
4635 __ FCmp();
4636
4637 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4638 __ sub(esp, Immediate(kDoubleSize));
4639 __ fst_d(MemOperand(esp, 0));
4640 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4641 Immediate(kHoleNanUpper32));
4642 __ add(esp, Immediate(kDoubleSize));
4643 Label canonicalize;
4644 __ j(not_equal, &canonicalize, Label::kNear);
4645 __ jmp(&no_special_nan_handling, Label::kNear);
4646 __ bind(&canonicalize);
4647 __ fstp(0);
4648 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4649 }
4650
4651 __ bind(&no_special_nan_handling);
4652 __ fst_d(double_store_operand);
4653 }
4654 }
4655 } 4202 }
4656 4203
4657 4204
4658 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4205 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4659 Register elements = ToRegister(instr->elements()); 4206 Register elements = ToRegister(instr->elements());
4660 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4207 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4661 4208
4662 Operand operand = BuildFastArrayOperand( 4209 Operand operand = BuildFastArrayOperand(
4663 instr->elements(), 4210 instr->elements(),
4664 instr->key(), 4211 instr->key(),
(...skipping 20 matching lines...) Expand all
4685 Register value = ToRegister(instr->value()); 4232 Register value = ToRegister(instr->value());
4686 ASSERT(!instr->key()->IsConstantOperand()); 4233 ASSERT(!instr->key()->IsConstantOperand());
4687 SmiCheck check_needed = 4234 SmiCheck check_needed =
4688 instr->hydrogen()->value()->IsHeapObject() 4235 instr->hydrogen()->value()->IsHeapObject()
4689 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4236 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4690 // Compute address of modified element and store it into key register. 4237 // Compute address of modified element and store it into key register.
4691 __ lea(key, operand); 4238 __ lea(key, operand);
4692 __ RecordWrite(elements, 4239 __ RecordWrite(elements,
4693 key, 4240 key,
4694 value, 4241 value,
4695 GetSaveFPRegsMode(isolate()), 4242 kSaveFPRegs,
4696 EMIT_REMEMBERED_SET, 4243 EMIT_REMEMBERED_SET,
4697 check_needed); 4244 check_needed);
4698 } 4245 }
4699 } 4246 }
4700 4247
4701 4248
4702 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4249 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4703 // By cases...external, fast-double, fast 4250 // By cases...external, fast-double, fast
4704 if (instr->is_typed_elements()) { 4251 if (instr->is_typed_elements()) {
4705 DoStoreKeyedExternalArray(instr); 4252 DoStoreKeyedExternalArray(instr);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
4779 ASSERT(ToRegister(instr->result()).is(eax)); 4326 ASSERT(ToRegister(instr->result()).is(eax));
4780 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind()); 4327 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind());
4781 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4328 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4782 } 4329 }
4783 4330
4784 4331
4785 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4332 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4786 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { 4333 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4787 public: 4334 public:
4788 DeferredStringCharCodeAt(LCodeGen* codegen, 4335 DeferredStringCharCodeAt(LCodeGen* codegen,
4789 LStringCharCodeAt* instr, 4336 LStringCharCodeAt* instr)
4790 const X87Stack& x87_stack) 4337 : LDeferredCode(codegen), instr_(instr) { }
4791 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4792 virtual void Generate() V8_OVERRIDE { 4338 virtual void Generate() V8_OVERRIDE {
4793 codegen()->DoDeferredStringCharCodeAt(instr_); 4339 codegen()->DoDeferredStringCharCodeAt(instr_);
4794 } 4340 }
4795 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4341 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4796 private: 4342 private:
4797 LStringCharCodeAt* instr_; 4343 LStringCharCodeAt* instr_;
4798 }; 4344 };
4799 4345
4800 DeferredStringCharCodeAt* deferred = 4346 DeferredStringCharCodeAt* deferred =
4801 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); 4347 new(zone()) DeferredStringCharCodeAt(this, instr);
4802 4348
4803 StringCharLoadGenerator::Generate(masm(), 4349 StringCharLoadGenerator::Generate(masm(),
4804 factory(), 4350 factory(),
4805 ToRegister(instr->string()), 4351 ToRegister(instr->string()),
4806 ToRegister(instr->index()), 4352 ToRegister(instr->index()),
4807 ToRegister(instr->result()), 4353 ToRegister(instr->result()),
4808 deferred->entry()); 4354 deferred->entry());
4809 __ bind(deferred->exit()); 4355 __ bind(deferred->exit());
4810 } 4356 }
4811 4357
(...skipping 26 matching lines...) Expand all
4838 __ AssertSmi(eax); 4384 __ AssertSmi(eax);
4839 __ SmiUntag(eax); 4385 __ SmiUntag(eax);
4840 __ StoreToSafepointRegisterSlot(result, eax); 4386 __ StoreToSafepointRegisterSlot(result, eax);
4841 } 4387 }
4842 4388
4843 4389
4844 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4390 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4845 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { 4391 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4846 public: 4392 public:
4847 DeferredStringCharFromCode(LCodeGen* codegen, 4393 DeferredStringCharFromCode(LCodeGen* codegen,
4848 LStringCharFromCode* instr, 4394 LStringCharFromCode* instr)
4849 const X87Stack& x87_stack) 4395 : LDeferredCode(codegen), instr_(instr) { }
4850 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4851 virtual void Generate() V8_OVERRIDE { 4396 virtual void Generate() V8_OVERRIDE {
4852 codegen()->DoDeferredStringCharFromCode(instr_); 4397 codegen()->DoDeferredStringCharFromCode(instr_);
4853 } 4398 }
4854 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4399 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4855 private: 4400 private:
4856 LStringCharFromCode* instr_; 4401 LStringCharFromCode* instr_;
4857 }; 4402 };
4858 4403
4859 DeferredStringCharFromCode* deferred = 4404 DeferredStringCharFromCode* deferred =
4860 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); 4405 new(zone()) DeferredStringCharFromCode(this, instr);
4861 4406
4862 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 4407 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4863 Register char_code = ToRegister(instr->char_code()); 4408 Register char_code = ToRegister(instr->char_code());
4864 Register result = ToRegister(instr->result()); 4409 Register result = ToRegister(instr->result());
4865 ASSERT(!char_code.is(result)); 4410 ASSERT(!char_code.is(result));
4866 4411
4867 __ cmp(char_code, String::kMaxOneByteCharCode); 4412 __ cmp(char_code, String::kMaxOneByteCharCode);
4868 __ j(above, deferred->entry()); 4413 __ j(above, deferred->entry());
4869 __ Move(result, Immediate(factory()->single_character_string_cache())); 4414 __ Move(result, Immediate(factory()->single_character_string_cache()));
4870 __ mov(result, FieldOperand(result, 4415 __ mov(result, FieldOperand(result,
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4902 instr->hydrogen()->pretenure_flag()); 4447 instr->hydrogen()->pretenure_flag());
4903 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4448 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4904 } 4449 }
4905 4450
4906 4451
4907 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4452 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4908 LOperand* input = instr->value(); 4453 LOperand* input = instr->value();
4909 LOperand* output = instr->result(); 4454 LOperand* output = instr->result();
4910 ASSERT(input->IsRegister() || input->IsStackSlot()); 4455 ASSERT(input->IsRegister() || input->IsStackSlot());
4911 ASSERT(output->IsDoubleRegister()); 4456 ASSERT(output->IsDoubleRegister());
4912 if (CpuFeatures::IsSupported(SSE2)) { 4457 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4913 CpuFeatureScope scope(masm(), SSE2);
4914 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4915 } else if (input->IsRegister()) {
4916 Register input_reg = ToRegister(input);
4917 __ push(input_reg);
4918 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4919 __ pop(input_reg);
4920 } else {
4921 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4922 }
4923 } 4458 }
4924 4459
4925 4460
4926 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4461 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4927 LOperand* input = instr->value(); 4462 LOperand* input = instr->value();
4928 LOperand* output = instr->result(); 4463 LOperand* output = instr->result();
4929 if (CpuFeatures::IsSupported(SSE2)) { 4464 LOperand* temp = instr->temp();
4930 CpuFeatureScope scope(masm(), SSE2); 4465 __ LoadUint32(ToDoubleRegister(output),
4931 LOperand* temp = instr->temp(); 4466 ToRegister(input),
4932 4467 ToDoubleRegister(temp));
4933 __ LoadUint32(ToDoubleRegister(output),
4934 ToRegister(input),
4935 ToDoubleRegister(temp));
4936 } else {
4937 X87Register res = ToX87Register(output);
4938 X87PrepareToWrite(res);
4939 __ LoadUint32NoSSE2(ToRegister(input));
4940 X87CommitWrite(res);
4941 }
4942 } 4468 }
4943 4469
4944 4470
4945 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4471 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4946 class DeferredNumberTagI V8_FINAL : public LDeferredCode { 4472 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4947 public: 4473 public:
4948 DeferredNumberTagI(LCodeGen* codegen, 4474 DeferredNumberTagI(LCodeGen* codegen,
4949 LNumberTagI* instr, 4475 LNumberTagI* instr)
4950 const X87Stack& x87_stack) 4476 : LDeferredCode(codegen), instr_(instr) { }
4951 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4952 virtual void Generate() V8_OVERRIDE { 4477 virtual void Generate() V8_OVERRIDE {
4953 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), 4478 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4954 NULL, SIGNED_INT32); 4479 NULL, SIGNED_INT32);
4955 } 4480 }
4956 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4481 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4957 private: 4482 private:
4958 LNumberTagI* instr_; 4483 LNumberTagI* instr_;
4959 }; 4484 };
4960 4485
4961 LOperand* input = instr->value(); 4486 LOperand* input = instr->value();
4962 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4487 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4963 Register reg = ToRegister(input); 4488 Register reg = ToRegister(input);
4964 4489
4965 DeferredNumberTagI* deferred = 4490 DeferredNumberTagI* deferred =
4966 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); 4491 new(zone()) DeferredNumberTagI(this, instr);
4967 __ SmiTag(reg); 4492 __ SmiTag(reg);
4968 __ j(overflow, deferred->entry()); 4493 __ j(overflow, deferred->entry());
4969 __ bind(deferred->exit()); 4494 __ bind(deferred->exit());
4970 } 4495 }
4971 4496
4972 4497
4973 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4498 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4974 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4499 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4975 public: 4500 public:
4976 DeferredNumberTagU(LCodeGen* codegen, 4501 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4977 LNumberTagU* instr, 4502 : LDeferredCode(codegen), instr_(instr) { }
4978 const X87Stack& x87_stack)
4979 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4980 virtual void Generate() V8_OVERRIDE { 4503 virtual void Generate() V8_OVERRIDE {
4981 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4504 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4982 instr_->temp2(), UNSIGNED_INT32); 4505 instr_->temp2(), UNSIGNED_INT32);
4983 } 4506 }
4984 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4507 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4985 private: 4508 private:
4986 LNumberTagU* instr_; 4509 LNumberTagU* instr_;
4987 }; 4510 };
4988 4511
4989 LOperand* input = instr->value(); 4512 LOperand* input = instr->value();
4990 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4513 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4991 Register reg = ToRegister(input); 4514 Register reg = ToRegister(input);
4992 4515
4993 DeferredNumberTagU* deferred = 4516 DeferredNumberTagU* deferred =
4994 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); 4517 new(zone()) DeferredNumberTagU(this, instr);
4995 __ cmp(reg, Immediate(Smi::kMaxValue)); 4518 __ cmp(reg, Immediate(Smi::kMaxValue));
4996 __ j(above, deferred->entry()); 4519 __ j(above, deferred->entry());
4997 __ SmiTag(reg); 4520 __ SmiTag(reg);
4998 __ bind(deferred->exit()); 4521 __ bind(deferred->exit());
4999 } 4522 }
5000 4523
5001 4524
5002 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4525 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
5003 LOperand* value, 4526 LOperand* value,
5004 LOperand* temp1, 4527 LOperand* temp1,
5005 LOperand* temp2, 4528 LOperand* temp2,
5006 IntegerSignedness signedness) { 4529 IntegerSignedness signedness) {
5007 Label done, slow; 4530 Label done, slow;
5008 Register reg = ToRegister(value); 4531 Register reg = ToRegister(value);
5009 Register tmp = ToRegister(temp1); 4532 Register tmp = ToRegister(temp1);
5010 XMMRegister xmm_scratch = double_scratch0(); 4533 XMMRegister xmm_scratch = double_scratch0();
5011 4534
5012 if (signedness == SIGNED_INT32) { 4535 if (signedness == SIGNED_INT32) {
5013 // There was overflow, so bits 30 and 31 of the original integer 4536 // There was overflow, so bits 30 and 31 of the original integer
5014 // disagree. Try to allocate a heap number in new space and store 4537 // disagree. Try to allocate a heap number in new space and store
5015 // the value in there. If that fails, call the runtime system. 4538 // the value in there. If that fails, call the runtime system.
5016 __ SmiUntag(reg); 4539 __ SmiUntag(reg);
5017 __ xor_(reg, 0x80000000); 4540 __ xor_(reg, 0x80000000);
5018 if (CpuFeatures::IsSupported(SSE2)) { 4541 __ Cvtsi2sd(xmm_scratch, Operand(reg));
5019 CpuFeatureScope feature_scope(masm(), SSE2);
5020 __ Cvtsi2sd(xmm_scratch, Operand(reg));
5021 } else {
5022 __ push(reg);
5023 __ fild_s(Operand(esp, 0));
5024 __ pop(reg);
5025 }
5026 } else { 4542 } else {
5027 if (CpuFeatures::IsSupported(SSE2)) { 4543 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
5028 CpuFeatureScope feature_scope(masm(), SSE2);
5029 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
5030 } else {
5031 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5032 // int manually.
5033 __ push(Immediate(0));
5034 __ push(reg);
5035 __ fild_d(Operand(esp, 0));
5036 __ pop(reg);
5037 __ pop(reg);
5038 }
5039 } 4544 }
5040 4545
5041 if (FLAG_inline_new) { 4546 if (FLAG_inline_new) {
5042 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4547 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5043 __ jmp(&done, Label::kNear); 4548 __ jmp(&done, Label::kNear);
5044 } 4549 }
5045 4550
5046 // Slow case: Call the runtime system to do the number allocation. 4551 // Slow case: Call the runtime system to do the number allocation.
5047 __ bind(&slow); 4552 __ bind(&slow);
5048 { 4553 {
(...skipping 13 matching lines...) Expand all
5062 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4567 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5063 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); 4568 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5064 RecordSafepointWithRegisters( 4569 RecordSafepointWithRegisters(
5065 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4570 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5066 __ StoreToSafepointRegisterSlot(reg, eax); 4571 __ StoreToSafepointRegisterSlot(reg, eax);
5067 } 4572 }
5068 4573
5069 // Done. Put the value in xmm_scratch into the value of the allocated heap 4574 // Done. Put the value in xmm_scratch into the value of the allocated heap
5070 // number. 4575 // number.
5071 __ bind(&done); 4576 __ bind(&done);
5072 if (CpuFeatures::IsSupported(SSE2)) { 4577 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5073 CpuFeatureScope feature_scope(masm(), SSE2);
5074 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5075 } else {
5076 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5077 }
5078 } 4578 }
5079 4579
5080 4580
5081 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4581 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5082 class DeferredNumberTagD V8_FINAL : public LDeferredCode { 4582 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5083 public: 4583 public:
5084 DeferredNumberTagD(LCodeGen* codegen, 4584 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5085 LNumberTagD* instr, 4585 : LDeferredCode(codegen), instr_(instr) { }
5086 const X87Stack& x87_stack)
5087 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5088 virtual void Generate() V8_OVERRIDE { 4586 virtual void Generate() V8_OVERRIDE {
5089 codegen()->DoDeferredNumberTagD(instr_); 4587 codegen()->DoDeferredNumberTagD(instr_);
5090 } 4588 }
5091 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4589 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5092 private: 4590 private:
5093 LNumberTagD* instr_; 4591 LNumberTagD* instr_;
5094 }; 4592 };
5095 4593
5096 Register reg = ToRegister(instr->result()); 4594 Register reg = ToRegister(instr->result());
5097 4595
5098 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5099 if (!use_sse2) {
5100 // Put the value to the top of stack
5101 X87Register src = ToX87Register(instr->value());
5102 X87LoadForUsage(src);
5103 }
5104
5105 DeferredNumberTagD* deferred = 4596 DeferredNumberTagD* deferred =
5106 new(zone()) DeferredNumberTagD(this, instr, x87_stack_); 4597 new(zone()) DeferredNumberTagD(this, instr);
5107 if (FLAG_inline_new) { 4598 if (FLAG_inline_new) {
5108 Register tmp = ToRegister(instr->temp()); 4599 Register tmp = ToRegister(instr->temp());
5109 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4600 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5110 } else { 4601 } else {
5111 __ jmp(deferred->entry()); 4602 __ jmp(deferred->entry());
5112 } 4603 }
5113 __ bind(deferred->exit()); 4604 __ bind(deferred->exit());
5114 if (use_sse2) { 4605 XMMRegister input_reg = ToDoubleRegister(instr->value());
5115 CpuFeatureScope scope(masm(), SSE2); 4606 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5116 XMMRegister input_reg = ToDoubleRegister(instr->value());
5117 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5118 } else {
5119 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5120 }
5121 } 4607 }
5122 4608
5123 4609
5124 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4610 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5125 // TODO(3095996): Get rid of this. For now, we need to make the 4611 // TODO(3095996): Get rid of this. For now, we need to make the
5126 // result register contain a valid pointer because it is already 4612 // result register contain a valid pointer because it is already
5127 // contained in the register pointer map. 4613 // contained in the register pointer map.
5128 Register reg = ToRegister(instr->result()); 4614 Register reg = ToRegister(instr->result());
5129 __ Move(reg, Immediate(0)); 4615 __ Move(reg, Immediate(0));
5130 4616
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5165 if (instr->needs_check()) { 4651 if (instr->needs_check()) {
5166 __ test(result, Immediate(kSmiTagMask)); 4652 __ test(result, Immediate(kSmiTagMask));
5167 DeoptimizeIf(not_zero, instr->environment()); 4653 DeoptimizeIf(not_zero, instr->environment());
5168 } else { 4654 } else {
5169 __ AssertSmi(result); 4655 __ AssertSmi(result);
5170 } 4656 }
5171 __ SmiUntag(result); 4657 __ SmiUntag(result);
5172 } 4658 }
5173 4659
5174 4660
5175 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5176 Register temp_reg,
5177 X87Register res_reg,
5178 bool can_convert_undefined_to_nan,
5179 bool deoptimize_on_minus_zero,
5180 LEnvironment* env,
5181 NumberUntagDMode mode) {
5182 Label load_smi, done;
5183
5184 X87PrepareToWrite(res_reg);
5185 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5186 // Smi check.
5187 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5188
5189 // Heap number map check.
5190 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5191 factory()->heap_number_map());
5192 if (!can_convert_undefined_to_nan) {
5193 DeoptimizeIf(not_equal, env);
5194 } else {
5195 Label heap_number, convert;
5196 __ j(equal, &heap_number, Label::kNear);
5197
5198 // Convert undefined (or hole) to NaN.
5199 __ cmp(input_reg, factory()->undefined_value());
5200 DeoptimizeIf(not_equal, env);
5201
5202 __ bind(&convert);
5203 ExternalReference nan =
5204 ExternalReference::address_of_canonical_non_hole_nan();
5205 __ fld_d(Operand::StaticVariable(nan));
5206 __ jmp(&done, Label::kNear);
5207
5208 __ bind(&heap_number);
5209 }
5210 // Heap number to x87 conversion.
5211 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5212 if (deoptimize_on_minus_zero) {
5213 __ fldz();
5214 __ FCmp();
5215 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5216 __ j(not_zero, &done, Label::kNear);
5217
5218 // Use general purpose registers to check if we have -0.0
5219 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5220 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5221 __ j(zero, &done, Label::kNear);
5222
5223 // Pop FPU stack before deoptimizing.
5224 __ fstp(0);
5225 DeoptimizeIf(not_zero, env);
5226 }
5227 __ jmp(&done, Label::kNear);
5228 } else {
5229 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5230 }
5231
5232 __ bind(&load_smi);
5233 // Clobbering a temp is faster than re-tagging the
5234 // input register since we avoid dependencies.
5235 __ mov(temp_reg, input_reg);
5236 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5237 __ push(temp_reg);
5238 __ fild_s(Operand(esp, 0));
5239 __ add(esp, Immediate(kPointerSize));
5240 __ bind(&done);
5241 X87CommitWrite(res_reg);
5242 }
5243
5244
5245 void LCodeGen::EmitNumberUntagD(Register input_reg, 4661 void LCodeGen::EmitNumberUntagD(Register input_reg,
5246 Register temp_reg, 4662 Register temp_reg,
5247 XMMRegister result_reg, 4663 XMMRegister result_reg,
5248 bool can_convert_undefined_to_nan, 4664 bool can_convert_undefined_to_nan,
5249 bool deoptimize_on_minus_zero, 4665 bool deoptimize_on_minus_zero,
5250 LEnvironment* env, 4666 LEnvironment* env,
5251 NumberUntagDMode mode) { 4667 NumberUntagDMode mode) {
5252 Label convert, load_smi, done; 4668 Label convert, load_smi, done;
5253 4669
5254 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4670 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
5350 __ jmp(done); 4766 __ jmp(done);
5351 __ bind(&bailout); 4767 __ bind(&bailout);
5352 DeoptimizeIf(no_condition, instr->environment()); 4768 DeoptimizeIf(no_condition, instr->environment());
5353 } 4769 }
5354 } 4770 }
5355 4771
5356 4772
5357 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4773 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5358 class DeferredTaggedToI V8_FINAL : public LDeferredCode { 4774 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5359 public: 4775 public:
5360 DeferredTaggedToI(LCodeGen* codegen, 4776 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5361 LTaggedToI* instr, 4777 : LDeferredCode(codegen), instr_(instr) { }
5362 const X87Stack& x87_stack)
5363 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5364 virtual void Generate() V8_OVERRIDE { 4778 virtual void Generate() V8_OVERRIDE {
5365 codegen()->DoDeferredTaggedToI(instr_, done()); 4779 codegen()->DoDeferredTaggedToI(instr_, done());
5366 } 4780 }
5367 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4781 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5368 private: 4782 private:
5369 LTaggedToI* instr_; 4783 LTaggedToI* instr_;
5370 }; 4784 };
5371 4785
5372 LOperand* input = instr->value(); 4786 LOperand* input = instr->value();
5373 ASSERT(input->IsRegister()); 4787 ASSERT(input->IsRegister());
5374 Register input_reg = ToRegister(input); 4788 Register input_reg = ToRegister(input);
5375 ASSERT(input_reg.is(ToRegister(instr->result()))); 4789 ASSERT(input_reg.is(ToRegister(instr->result())));
5376 4790
5377 if (instr->hydrogen()->value()->representation().IsSmi()) { 4791 if (instr->hydrogen()->value()->representation().IsSmi()) {
5378 __ SmiUntag(input_reg); 4792 __ SmiUntag(input_reg);
5379 } else { 4793 } else {
5380 DeferredTaggedToI* deferred = 4794 DeferredTaggedToI* deferred =
5381 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); 4795 new(zone()) DeferredTaggedToI(this, instr);
5382 // Optimistically untag the input. 4796 // Optimistically untag the input.
5383 // If the input is a HeapObject, SmiUntag will set the carry flag. 4797 // If the input is a HeapObject, SmiUntag will set the carry flag.
5384 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 4798 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5385 __ SmiUntag(input_reg); 4799 __ SmiUntag(input_reg);
5386 // Branch to deferred code if the input was tagged. 4800 // Branch to deferred code if the input was tagged.
5387 // The deferred code will take care of restoring the tag. 4801 // The deferred code will take care of restoring the tag.
5388 __ j(carry, deferred->entry()); 4802 __ j(carry, deferred->entry());
5389 __ bind(deferred->exit()); 4803 __ bind(deferred->exit());
5390 } 4804 }
5391 } 4805 }
5392 4806
5393 4807
5394 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4808 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5395 LOperand* input = instr->value(); 4809 LOperand* input = instr->value();
5396 ASSERT(input->IsRegister()); 4810 ASSERT(input->IsRegister());
5397 LOperand* temp = instr->temp(); 4811 LOperand* temp = instr->temp();
5398 ASSERT(temp->IsRegister()); 4812 ASSERT(temp->IsRegister());
5399 LOperand* result = instr->result(); 4813 LOperand* result = instr->result();
5400 ASSERT(result->IsDoubleRegister()); 4814 ASSERT(result->IsDoubleRegister());
5401 4815
5402 Register input_reg = ToRegister(input); 4816 Register input_reg = ToRegister(input);
5403 bool deoptimize_on_minus_zero = 4817 bool deoptimize_on_minus_zero =
5404 instr->hydrogen()->deoptimize_on_minus_zero(); 4818 instr->hydrogen()->deoptimize_on_minus_zero();
5405 Register temp_reg = ToRegister(temp); 4819 Register temp_reg = ToRegister(temp);
5406 4820
5407 HValue* value = instr->hydrogen()->value(); 4821 HValue* value = instr->hydrogen()->value();
5408 NumberUntagDMode mode = value->representation().IsSmi() 4822 NumberUntagDMode mode = value->representation().IsSmi()
5409 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4823 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5410 4824
5411 if (CpuFeatures::IsSupported(SSE2)) { 4825 XMMRegister result_reg = ToDoubleRegister(result);
5412 CpuFeatureScope scope(masm(), SSE2); 4826 EmitNumberUntagD(input_reg,
5413 XMMRegister result_reg = ToDoubleRegister(result); 4827 temp_reg,
5414 EmitNumberUntagD(input_reg, 4828 result_reg,
5415 temp_reg, 4829 instr->hydrogen()->can_convert_undefined_to_nan(),
5416 result_reg, 4830 deoptimize_on_minus_zero,
5417 instr->hydrogen()->can_convert_undefined_to_nan(), 4831 instr->environment(),
5418 deoptimize_on_minus_zero, 4832 mode);
5419 instr->environment(),
5420 mode);
5421 } else {
5422 EmitNumberUntagDNoSSE2(input_reg,
5423 temp_reg,
5424 ToX87Register(instr->result()),
5425 instr->hydrogen()->can_convert_undefined_to_nan(),
5426 deoptimize_on_minus_zero,
5427 instr->environment(),
5428 mode);
5429 }
5430 } 4833 }
5431 4834
5432 4835
5433 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4836 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5434 LOperand* input = instr->value(); 4837 LOperand* input = instr->value();
5435 ASSERT(input->IsDoubleRegister()); 4838 ASSERT(input->IsDoubleRegister());
5436 LOperand* result = instr->result(); 4839 LOperand* result = instr->result();
5437 ASSERT(result->IsRegister()); 4840 ASSERT(result->IsRegister());
5438 Register result_reg = ToRegister(result); 4841 Register result_reg = ToRegister(result);
5439 4842
5440 if (instr->truncating()) { 4843 if (instr->truncating()) {
5441 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4844 XMMRegister input_reg = ToDoubleRegister(input);
5442 CpuFeatureScope scope(masm(), SSE2); 4845 __ TruncateDoubleToI(result_reg, input_reg);
5443 XMMRegister input_reg = ToDoubleRegister(input);
5444 __ TruncateDoubleToI(result_reg, input_reg);
5445 } else {
5446 X87Register input_reg = ToX87Register(input);
5447 X87Fxch(input_reg);
5448 __ TruncateX87TOSToI(result_reg);
5449 }
5450 } else { 4846 } else {
5451 Label bailout, done; 4847 Label bailout, done;
5452 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4848 XMMRegister input_reg = ToDoubleRegister(input);
5453 CpuFeatureScope scope(masm(), SSE2); 4849 XMMRegister xmm_scratch = double_scratch0();
5454 XMMRegister input_reg = ToDoubleRegister(input); 4850 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5455 XMMRegister xmm_scratch = double_scratch0(); 4851 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5456 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5457 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5458 } else {
5459 X87Register input_reg = ToX87Register(input);
5460 X87Fxch(input_reg);
5461 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5462 &bailout, Label::kNear);
5463 }
5464 __ jmp(&done, Label::kNear); 4852 __ jmp(&done, Label::kNear);
5465 __ bind(&bailout); 4853 __ bind(&bailout);
5466 DeoptimizeIf(no_condition, instr->environment()); 4854 DeoptimizeIf(no_condition, instr->environment());
5467 __ bind(&done); 4855 __ bind(&done);
5468 } 4856 }
5469 } 4857 }
5470 4858
5471 4859
5472 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4860 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5473 LOperand* input = instr->value(); 4861 LOperand* input = instr->value();
5474 ASSERT(input->IsDoubleRegister()); 4862 ASSERT(input->IsDoubleRegister());
5475 LOperand* result = instr->result(); 4863 LOperand* result = instr->result();
5476 ASSERT(result->IsRegister()); 4864 ASSERT(result->IsRegister());
5477 Register result_reg = ToRegister(result); 4865 Register result_reg = ToRegister(result);
5478 4866
5479 Label bailout, done; 4867 Label bailout, done;
5480 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { 4868 XMMRegister input_reg = ToDoubleRegister(input);
5481 CpuFeatureScope scope(masm(), SSE2); 4869 XMMRegister xmm_scratch = double_scratch0();
5482 XMMRegister input_reg = ToDoubleRegister(input); 4870 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5483 XMMRegister xmm_scratch = double_scratch0(); 4871 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5484 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5485 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5486 } else {
5487 X87Register input_reg = ToX87Register(input);
5488 X87Fxch(input_reg);
5489 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5490 &bailout, Label::kNear);
5491 }
5492 __ jmp(&done, Label::kNear); 4872 __ jmp(&done, Label::kNear);
5493 __ bind(&bailout); 4873 __ bind(&bailout);
5494 DeoptimizeIf(no_condition, instr->environment()); 4874 DeoptimizeIf(no_condition, instr->environment());
5495 __ bind(&done); 4875 __ bind(&done);
5496 4876
5497 __ SmiTag(result_reg); 4877 __ SmiTag(result_reg);
5498 DeoptimizeIf(overflow, instr->environment()); 4878 DeoptimizeIf(overflow, instr->environment());
5499 } 4879 }
5500 4880
5501 4881
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
5585 4965
5586 __ test(eax, Immediate(kSmiTagMask)); 4966 __ test(eax, Immediate(kSmiTagMask));
5587 } 4967 }
5588 DeoptimizeIf(zero, instr->environment()); 4968 DeoptimizeIf(zero, instr->environment());
5589 } 4969 }
5590 4970
5591 4971
5592 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4972 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5593 class DeferredCheckMaps V8_FINAL : public LDeferredCode { 4973 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5594 public: 4974 public:
5595 DeferredCheckMaps(LCodeGen* codegen, 4975 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5596 LCheckMaps* instr, 4976 : LDeferredCode(codegen), instr_(instr), object_(object) {
5597 Register object,
5598 const X87Stack& x87_stack)
5599 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5600 SetExit(check_maps()); 4977 SetExit(check_maps());
5601 } 4978 }
5602 virtual void Generate() V8_OVERRIDE { 4979 virtual void Generate() V8_OVERRIDE {
5603 codegen()->DoDeferredInstanceMigration(instr_, object_); 4980 codegen()->DoDeferredInstanceMigration(instr_, object_);
5604 } 4981 }
5605 Label* check_maps() { return &check_maps_; } 4982 Label* check_maps() { return &check_maps_; }
5606 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4983 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5607 private: 4984 private:
5608 LCheckMaps* instr_; 4985 LCheckMaps* instr_;
5609 Label check_maps_; 4986 Label check_maps_;
5610 Register object_; 4987 Register object_;
5611 }; 4988 };
5612 4989
5613 if (instr->hydrogen()->IsStabilityCheck()) { 4990 if (instr->hydrogen()->IsStabilityCheck()) {
5614 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 4991 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5615 for (int i = 0; i < maps->size(); ++i) { 4992 for (int i = 0; i < maps->size(); ++i) {
5616 AddStabilityDependency(maps->at(i).handle()); 4993 AddStabilityDependency(maps->at(i).handle());
5617 } 4994 }
5618 return; 4995 return;
5619 } 4996 }
5620 4997
5621 LOperand* input = instr->value(); 4998 LOperand* input = instr->value();
5622 ASSERT(input->IsRegister()); 4999 ASSERT(input->IsRegister());
5623 Register reg = ToRegister(input); 5000 Register reg = ToRegister(input);
5624 5001
5625 DeferredCheckMaps* deferred = NULL; 5002 DeferredCheckMaps* deferred = NULL;
5626 if (instr->hydrogen()->HasMigrationTarget()) { 5003 if (instr->hydrogen()->HasMigrationTarget()) {
5627 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); 5004 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5628 __ bind(deferred->check_maps()); 5005 __ bind(deferred->check_maps());
5629 } 5006 }
5630 5007
5631 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5008 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5632 Label success; 5009 Label success;
5633 for (int i = 0; i < maps->size() - 1; i++) { 5010 for (int i = 0; i < maps->size() - 1; i++) {
5634 Handle<Map> map = maps->at(i).handle(); 5011 Handle<Map> map = maps->at(i).handle();
5635 __ CompareMap(reg, map); 5012 __ CompareMap(reg, map);
5636 __ j(equal, &success, Label::kNear); 5013 __ j(equal, &success, Label::kNear);
5637 } 5014 }
5638 5015
5639 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5016 Handle<Map> map = maps->at(maps->size() - 1).handle();
5640 __ CompareMap(reg, map); 5017 __ CompareMap(reg, map);
5641 if (instr->hydrogen()->HasMigrationTarget()) { 5018 if (instr->hydrogen()->HasMigrationTarget()) {
5642 __ j(not_equal, deferred->entry()); 5019 __ j(not_equal, deferred->entry());
5643 } else { 5020 } else {
5644 DeoptimizeIf(not_equal, instr->environment()); 5021 DeoptimizeIf(not_equal, instr->environment());
5645 } 5022 }
5646 5023
5647 __ bind(&success); 5024 __ bind(&success);
5648 } 5025 }
5649 5026
5650 5027
5651 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5028 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5652 CpuFeatureScope scope(masm(), SSE2);
5653 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 5029 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5654 XMMRegister xmm_scratch = double_scratch0(); 5030 XMMRegister xmm_scratch = double_scratch0();
5655 Register result_reg = ToRegister(instr->result()); 5031 Register result_reg = ToRegister(instr->result());
5656 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); 5032 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5657 } 5033 }
5658 5034
5659 5035
5660 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5036 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5661 ASSERT(instr->unclamped()->Equals(instr->result())); 5037 ASSERT(instr->unclamped()->Equals(instr->result()));
5662 Register value_reg = ToRegister(instr->result()); 5038 Register value_reg = ToRegister(instr->result());
5663 __ ClampUint8(value_reg); 5039 __ ClampUint8(value_reg);
5664 } 5040 }
5665 5041
5666 5042
5667 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5043 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5668 CpuFeatureScope scope(masm(), SSE2);
5669
5670 ASSERT(instr->unclamped()->Equals(instr->result())); 5044 ASSERT(instr->unclamped()->Equals(instr->result()));
5671 Register input_reg = ToRegister(instr->unclamped()); 5045 Register input_reg = ToRegister(instr->unclamped());
5672 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); 5046 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5673 XMMRegister xmm_scratch = double_scratch0(); 5047 XMMRegister xmm_scratch = double_scratch0();
5674 Label is_smi, done, heap_number; 5048 Label is_smi, done, heap_number;
5675 5049
5676 __ JumpIfSmi(input_reg, &is_smi); 5050 __ JumpIfSmi(input_reg, &is_smi);
5677 5051
5678 // Check for heap number 5052 // Check for heap number
5679 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5053 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
(...skipping 14 matching lines...) Expand all
5694 __ jmp(&done, Label::kNear); 5068 __ jmp(&done, Label::kNear);
5695 5069
5696 // smi 5070 // smi
5697 __ bind(&is_smi); 5071 __ bind(&is_smi);
5698 __ SmiUntag(input_reg); 5072 __ SmiUntag(input_reg);
5699 __ ClampUint8(input_reg); 5073 __ ClampUint8(input_reg);
5700 __ bind(&done); 5074 __ bind(&done);
5701 } 5075 }
5702 5076
5703 5077
5704 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5705 Register input_reg = ToRegister(instr->unclamped());
5706 Register result_reg = ToRegister(instr->result());
5707 Register scratch = ToRegister(instr->scratch());
5708 Register scratch2 = ToRegister(instr->scratch2());
5709 Register scratch3 = ToRegister(instr->scratch3());
5710 Label is_smi, done, heap_number, valid_exponent,
5711 largest_value, zero_result, maybe_nan_or_infinity;
5712
5713 __ JumpIfSmi(input_reg, &is_smi);
5714
5715 // Check for heap number
5716 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5717 factory()->heap_number_map());
5718 __ j(equal, &heap_number, Label::kNear);
5719
5720 // Check for undefined. Undefined is converted to zero for clamping
5721 // conversions.
5722 __ cmp(input_reg, factory()->undefined_value());
5723 DeoptimizeIf(not_equal, instr->environment());
5724 __ jmp(&zero_result, Label::kNear);
5725
5726 // Heap number
5727 __ bind(&heap_number);
5728
5729 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5730 // faster than the x86 FPU built-in instruction, especially since "banker's
5731 // rounding" would be additionally very expensive
5732
5733 // Get exponent word.
5734 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5735 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5736
5737 // Test for negative values --> clamp to zero
5738 __ test(scratch, scratch);
5739 __ j(negative, &zero_result, Label::kNear);
5740
5741 // Get exponent alone in scratch2.
5742 __ mov(scratch2, scratch);
5743 __ and_(scratch2, HeapNumber::kExponentMask);
5744 __ shr(scratch2, HeapNumber::kExponentShift);
5745 __ j(zero, &zero_result, Label::kNear);
5746 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5747 __ j(negative, &zero_result, Label::kNear);
5748
5749 const uint32_t non_int8_exponent = 7;
5750 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5751 // If the exponent is too big, check for special values.
5752 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5753
5754 __ bind(&valid_exponent);
5755 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5756 // < 7. The shift bias is the number of bits to shift the mantissa such that
5757 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5758 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5759 // 1).
5760 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5761 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5762 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5763 // top bits of the mantissa.
5764 __ and_(scratch, HeapNumber::kMantissaMask);
5765 // Put back the implicit 1 of the mantissa
5766 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5767 // Shift up to round
5768 __ shl_cl(scratch);
5769 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5770 // use the bit in the "ones" place and add it to the "halves" place, which has
5771 // the effect of rounding to even.
5772 __ mov(scratch2, scratch);
5773 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5774 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5775 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5776 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5777 Label no_round;
5778 __ j(less, &no_round, Label::kNear);
5779 Label round_up;
5780 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5781 __ j(greater, &round_up, Label::kNear);
5782 __ test(scratch3, scratch3);
5783 __ j(not_zero, &round_up, Label::kNear);
5784 __ mov(scratch2, scratch);
5785 __ and_(scratch2, Immediate(1 << one_bit_shift));
5786 __ shr(scratch2, 1);
5787 __ bind(&round_up);
5788 __ add(scratch, scratch2);
5789 __ j(overflow, &largest_value, Label::kNear);
5790 __ bind(&no_round);
5791 __ shr(scratch, 23);
5792 __ mov(result_reg, scratch);
5793 __ jmp(&done, Label::kNear);
5794
5795 __ bind(&maybe_nan_or_infinity);
5796 // Check for NaN/Infinity, all other values map to 255
5797 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5798 __ j(not_equal, &largest_value, Label::kNear);
5799
5800 // Check for NaN, which differs from Infinity in that at least one mantissa
5801 // bit is set.
5802 __ and_(scratch, HeapNumber::kMantissaMask);
5803 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5804 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5805 // Infinity -> Fall through to map to 255.
5806
5807 __ bind(&largest_value);
5808 __ mov(result_reg, Immediate(255));
5809 __ jmp(&done, Label::kNear);
5810
5811 __ bind(&zero_result);
5812 __ xor_(result_reg, result_reg);
5813 __ jmp(&done, Label::kNear);
5814
5815 // smi
5816 __ bind(&is_smi);
5817 if (!input_reg.is(result_reg)) {
5818 __ mov(result_reg, input_reg);
5819 }
5820 __ SmiUntag(result_reg);
5821 __ ClampUint8(result_reg);
5822 __ bind(&done);
5823 }
5824
5825
5826 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5078 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5827 CpuFeatureScope scope(masm(), SSE2);
5828 XMMRegister value_reg = ToDoubleRegister(instr->value()); 5079 XMMRegister value_reg = ToDoubleRegister(instr->value());
5829 Register result_reg = ToRegister(instr->result()); 5080 Register result_reg = ToRegister(instr->result());
5830 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5081 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5831 if (CpuFeatures::IsSupported(SSE4_1)) { 5082 if (CpuFeatures::IsSupported(SSE4_1)) {
5832 CpuFeatureScope scope2(masm(), SSE4_1); 5083 CpuFeatureScope scope2(masm(), SSE4_1);
5833 __ pextrd(result_reg, value_reg, 1); 5084 __ pextrd(result_reg, value_reg, 1);
5834 } else { 5085 } else {
5835 XMMRegister xmm_scratch = double_scratch0(); 5086 XMMRegister xmm_scratch = double_scratch0();
5836 __ pshufd(xmm_scratch, value_reg, 1); 5087 __ pshufd(xmm_scratch, value_reg, 1);
5837 __ movd(result_reg, xmm_scratch); 5088 __ movd(result_reg, xmm_scratch);
5838 } 5089 }
5839 } else { 5090 } else {
5840 __ movd(result_reg, value_reg); 5091 __ movd(result_reg, value_reg);
5841 } 5092 }
5842 } 5093 }
5843 5094
5844 5095
5845 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5096 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5846 Register hi_reg = ToRegister(instr->hi()); 5097 Register hi_reg = ToRegister(instr->hi());
5847 Register lo_reg = ToRegister(instr->lo()); 5098 Register lo_reg = ToRegister(instr->lo());
5848 XMMRegister result_reg = ToDoubleRegister(instr->result()); 5099 XMMRegister result_reg = ToDoubleRegister(instr->result());
5849 CpuFeatureScope scope(masm(), SSE2);
5850 5100
5851 if (CpuFeatures::IsSupported(SSE4_1)) { 5101 if (CpuFeatures::IsSupported(SSE4_1)) {
5852 CpuFeatureScope scope2(masm(), SSE4_1); 5102 CpuFeatureScope scope2(masm(), SSE4_1);
5853 __ movd(result_reg, lo_reg); 5103 __ movd(result_reg, lo_reg);
5854 __ pinsrd(result_reg, hi_reg, 1); 5104 __ pinsrd(result_reg, hi_reg, 1);
5855 } else { 5105 } else {
5856 XMMRegister xmm_scratch = double_scratch0(); 5106 XMMRegister xmm_scratch = double_scratch0();
5857 __ movd(result_reg, hi_reg); 5107 __ movd(result_reg, hi_reg);
5858 __ psllq(result_reg, 32); 5108 __ psllq(result_reg, 32);
5859 __ movd(xmm_scratch, lo_reg); 5109 __ movd(xmm_scratch, lo_reg);
5860 __ orps(result_reg, xmm_scratch); 5110 __ orps(result_reg, xmm_scratch);
5861 } 5111 }
5862 } 5112 }
5863 5113
5864 5114
5865 void LCodeGen::DoAllocate(LAllocate* instr) { 5115 void LCodeGen::DoAllocate(LAllocate* instr) {
5866 class DeferredAllocate V8_FINAL : public LDeferredCode { 5116 class DeferredAllocate V8_FINAL : public LDeferredCode {
5867 public: 5117 public:
5868 DeferredAllocate(LCodeGen* codegen, 5118 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5869 LAllocate* instr, 5119 : LDeferredCode(codegen), instr_(instr) { }
5870 const X87Stack& x87_stack)
5871 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5872 virtual void Generate() V8_OVERRIDE { 5120 virtual void Generate() V8_OVERRIDE {
5873 codegen()->DoDeferredAllocate(instr_); 5121 codegen()->DoDeferredAllocate(instr_);
5874 } 5122 }
5875 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5123 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5876 private: 5124 private:
5877 LAllocate* instr_; 5125 LAllocate* instr_;
5878 }; 5126 };
5879 5127
5880 DeferredAllocate* deferred = 5128 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
5881 new(zone()) DeferredAllocate(this, instr, x87_stack_);
5882 5129
5883 Register result = ToRegister(instr->result()); 5130 Register result = ToRegister(instr->result());
5884 Register temp = ToRegister(instr->temp()); 5131 Register temp = ToRegister(instr->temp());
5885 5132
5886 // Allocate memory for the object. 5133 // Allocate memory for the object.
5887 AllocationFlags flags = TAG_OBJECT; 5134 AllocationFlags flags = TAG_OBJECT;
5888 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5135 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5889 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5136 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5890 } 5137 }
5891 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5138 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
6236 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5483 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6237 ASSERT(instr->HasEnvironment()); 5484 ASSERT(instr->HasEnvironment());
6238 LEnvironment* env = instr->environment(); 5485 LEnvironment* env = instr->environment();
6239 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5486 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6240 } 5487 }
6241 5488
6242 5489
6243 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5490 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6244 class DeferredStackCheck V8_FINAL : public LDeferredCode { 5491 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6245 public: 5492 public:
6246 DeferredStackCheck(LCodeGen* codegen, 5493 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6247 LStackCheck* instr, 5494 : LDeferredCode(codegen), instr_(instr) { }
6248 const X87Stack& x87_stack)
6249 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6250 virtual void Generate() V8_OVERRIDE { 5495 virtual void Generate() V8_OVERRIDE {
6251 codegen()->DoDeferredStackCheck(instr_); 5496 codegen()->DoDeferredStackCheck(instr_);
6252 } 5497 }
6253 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5498 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6254 private: 5499 private:
6255 LStackCheck* instr_; 5500 LStackCheck* instr_;
6256 }; 5501 };
6257 5502
6258 ASSERT(instr->HasEnvironment()); 5503 ASSERT(instr->HasEnvironment());
6259 LEnvironment* env = instr->environment(); 5504 LEnvironment* env = instr->environment();
(...skipping 10 matching lines...) Expand all
6270 ASSERT(instr->context()->IsRegister()); 5515 ASSERT(instr->context()->IsRegister());
6271 ASSERT(ToRegister(instr->context()).is(esi)); 5516 ASSERT(ToRegister(instr->context()).is(esi));
6272 CallCode(isolate()->builtins()->StackCheck(), 5517 CallCode(isolate()->builtins()->StackCheck(),
6273 RelocInfo::CODE_TARGET, 5518 RelocInfo::CODE_TARGET,
6274 instr); 5519 instr);
6275 __ bind(&done); 5520 __ bind(&done);
6276 } else { 5521 } else {
6277 ASSERT(instr->hydrogen()->is_backwards_branch()); 5522 ASSERT(instr->hydrogen()->is_backwards_branch());
6278 // Perform stack overflow check if this goto needs it before jumping. 5523 // Perform stack overflow check if this goto needs it before jumping.
6279 DeferredStackCheck* deferred_stack_check = 5524 DeferredStackCheck* deferred_stack_check =
6280 new(zone()) DeferredStackCheck(this, instr, x87_stack_); 5525 new(zone()) DeferredStackCheck(this, instr);
6281 ExternalReference stack_limit = 5526 ExternalReference stack_limit =
6282 ExternalReference::address_of_stack_limit(isolate()); 5527 ExternalReference::address_of_stack_limit(isolate());
6283 __ cmp(esp, Operand::StaticVariable(stack_limit)); 5528 __ cmp(esp, Operand::StaticVariable(stack_limit));
6284 __ j(below, deferred_stack_check->entry()); 5529 __ j(below, deferred_stack_check->entry());
6285 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5530 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6286 __ bind(instr->done_label()); 5531 __ bind(instr->done_label());
6287 deferred_stack_check->SetExit(instr->done_label()); 5532 deferred_stack_check->SetExit(instr->done_label());
6288 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5533 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6289 // Don't record a deoptimization index for the safepoint here. 5534 // Don't record a deoptimization index for the safepoint here.
6290 // This will be done explicitly when emitting call and the safepoint in 5535 // This will be done explicitly when emitting call and the safepoint in
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
6384 __ StoreToSafepointRegisterSlot(object, eax); 5629 __ StoreToSafepointRegisterSlot(object, eax);
6385 } 5630 }
6386 5631
6387 5632
6388 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5633 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6389 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { 5634 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
6390 public: 5635 public:
6391 DeferredLoadMutableDouble(LCodeGen* codegen, 5636 DeferredLoadMutableDouble(LCodeGen* codegen,
6392 LLoadFieldByIndex* instr, 5637 LLoadFieldByIndex* instr,
6393 Register object, 5638 Register object,
6394 Register index, 5639 Register index)
6395 const X87Stack& x87_stack) 5640 : LDeferredCode(codegen),
6396 : LDeferredCode(codegen, x87_stack),
6397 instr_(instr), 5641 instr_(instr),
6398 object_(object), 5642 object_(object),
6399 index_(index) { 5643 index_(index) {
6400 } 5644 }
6401 virtual void Generate() V8_OVERRIDE { 5645 virtual void Generate() V8_OVERRIDE {
6402 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); 5646 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6403 } 5647 }
6404 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5648 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6405 private: 5649 private:
6406 LLoadFieldByIndex* instr_; 5650 LLoadFieldByIndex* instr_;
6407 Register object_; 5651 Register object_;
6408 Register index_; 5652 Register index_;
6409 }; 5653 };
6410 5654
6411 Register object = ToRegister(instr->object()); 5655 Register object = ToRegister(instr->object());
6412 Register index = ToRegister(instr->index()); 5656 Register index = ToRegister(instr->index());
6413 5657
6414 DeferredLoadMutableDouble* deferred; 5658 DeferredLoadMutableDouble* deferred;
6415 deferred = new(zone()) DeferredLoadMutableDouble( 5659 deferred = new(zone()) DeferredLoadMutableDouble(
6416 this, instr, object, index, x87_stack_); 5660 this, instr, object, index);
6417 5661
6418 Label out_of_object, done; 5662 Label out_of_object, done;
6419 __ test(index, Immediate(Smi::FromInt(1))); 5663 __ test(index, Immediate(Smi::FromInt(1)));
6420 __ j(not_zero, deferred->entry()); 5664 __ j(not_zero, deferred->entry());
6421 5665
6422 __ sar(index, 1); 5666 __ sar(index, 1);
6423 5667
6424 __ cmp(index, Immediate(0)); 5668 __ cmp(index, Immediate(0));
6425 __ j(less, &out_of_object, Label::kNear); 5669 __ j(less, &out_of_object, Label::kNear);
6426 __ mov(object, FieldOperand(object, 5670 __ mov(object, FieldOperand(object,
(...skipping 13 matching lines...) Expand all
6440 __ bind(deferred->exit()); 5684 __ bind(deferred->exit());
6441 __ bind(&done); 5685 __ bind(&done);
6442 } 5686 }
6443 5687
6444 5688
6445 #undef __ 5689 #undef __
6446 5690
6447 } } // namespace v8::internal 5691 } } // namespace v8::internal
6448 5692
6449 #endif // V8_TARGET_ARCH_IA32 5693 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698