OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
140 | 140 |
141 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 141 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
142 Label* slow, | 142 Label* slow, |
143 Condition cc); | 143 Condition cc); |
144 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 144 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
145 Register lhs, | 145 Register lhs, |
146 Register rhs, | 146 Register rhs, |
147 Label* rhs_not_nan, | 147 Label* rhs_not_nan, |
148 Label* slow, | 148 Label* slow, |
149 bool strict); | 149 bool strict); |
150 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | |
151 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 150 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
152 Register lhs, | 151 Register lhs, |
153 Register rhs); | 152 Register rhs); |
154 | 153 |
155 | 154 |
156 // Check if the operand is a heap number. | 155 // Check if the operand is a heap number. |
157 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, | 156 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, |
158 Register scratch1, Register scratch2, | 157 Register scratch1, Register scratch2, |
159 Label* not_a_heap_number) { | 158 Label* not_a_heap_number) { |
160 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); | 159 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); |
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
509 | 508 |
510 __ Ret(USE_DELAY_SLOT); | 509 __ Ret(USE_DELAY_SLOT); |
511 __ or_(exponent, exponent, source_); | 510 __ or_(exponent, exponent, source_); |
512 } | 511 } |
513 | 512 |
514 | 513 |
515 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 514 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
516 FloatingPointHelper::Destination destination, | 515 FloatingPointHelper::Destination destination, |
517 Register scratch1, | 516 Register scratch1, |
518 Register scratch2) { | 517 Register scratch2) { |
519 if (CpuFeatures::IsSupported(FPU)) { | 518 __ sra(scratch1, a0, kSmiTagSize); |
520 CpuFeatureScope scope(masm, FPU); | 519 __ mtc1(scratch1, f14); |
521 __ sra(scratch1, a0, kSmiTagSize); | 520 __ cvt_d_w(f14, f14); |
522 __ mtc1(scratch1, f14); | 521 __ sra(scratch1, a1, kSmiTagSize); |
523 __ cvt_d_w(f14, f14); | 522 __ mtc1(scratch1, f12); |
524 __ sra(scratch1, a1, kSmiTagSize); | 523 __ cvt_d_w(f12, f12); |
525 __ mtc1(scratch1, f12); | 524 if (destination == kCoreRegisters) { |
526 __ cvt_d_w(f12, f12); | 525 __ Move(a2, a3, f14); |
527 if (destination == kCoreRegisters) { | 526 __ Move(a0, a1, f12); |
528 __ Move(a2, a3, f14); | |
529 __ Move(a0, a1, f12); | |
530 } | |
531 } else { | |
532 ASSERT(destination == kCoreRegisters); | |
533 // Write Smi from a0 to a3 and a2 in double format. | |
534 __ mov(scratch1, a0); | |
535 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); | |
536 __ push(ra); | |
537 __ Call(stub1.GetCode(masm->isolate())); | |
538 // Write Smi from a1 to a1 and a0 in double format. | |
539 __ mov(scratch1, a1); | |
540 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); | |
541 __ Call(stub2.GetCode(masm->isolate())); | |
542 __ pop(ra); | |
543 } | 527 } |
544 } | 528 } |
545 | 529 |
546 | 530 |
547 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 531 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
548 Destination destination, | 532 Destination destination, |
549 Register object, | 533 Register object, |
550 FPURegister dst, | 534 FPURegister dst, |
551 Register dst1, | 535 Register dst1, |
552 Register dst2, | 536 Register dst2, |
553 Register heap_number_map, | 537 Register heap_number_map, |
554 Register scratch1, | 538 Register scratch1, |
555 Register scratch2, | 539 Register scratch2, |
556 Label* not_number) { | 540 Label* not_number) { |
557 __ AssertRootValue(heap_number_map, | 541 __ AssertRootValue(heap_number_map, |
558 Heap::kHeapNumberMapRootIndex, | 542 Heap::kHeapNumberMapRootIndex, |
559 "HeapNumberMap register clobbered."); | 543 "HeapNumberMap register clobbered."); |
560 | 544 |
561 Label is_smi, done; | 545 Label is_smi, done; |
562 | 546 |
563 // Smi-check | 547 // Smi-check |
564 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | 548 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
565 // Heap number check | 549 // Heap number check |
566 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 550 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
567 | 551 |
568 // Handle loading a double from a heap number. | 552 // Handle loading a double from a heap number. |
569 if (CpuFeatures::IsSupported(FPU) && | 553 if (destination == kFPURegisters) { |
570 destination == kFPURegisters) { | |
571 CpuFeatureScope scope(masm, FPU); | |
572 // Load the double from tagged HeapNumber to double register. | 554 // Load the double from tagged HeapNumber to double register. |
573 | 555 |
574 // ARM uses a workaround here because of the unaligned HeapNumber | 556 // ARM uses a workaround here because of the unaligned HeapNumber |
575 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no | 557 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no |
576 // point in generating even more instructions. | 558 // point in generating even more instructions. |
577 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 559 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
578 } else { | 560 } else { |
579 ASSERT(destination == kCoreRegisters); | 561 ASSERT(destination == kCoreRegisters); |
580 // Load the double from heap number to dst1 and dst2 in double format. | 562 // Load the double from heap number to dst1 and dst2 in double format. |
581 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); | 563 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); |
582 __ lw(dst2, FieldMemOperand(object, | 564 __ lw(dst2, FieldMemOperand(object, |
583 HeapNumber::kValueOffset + kPointerSize)); | 565 HeapNumber::kValueOffset + kPointerSize)); |
584 } | 566 } |
585 __ Branch(&done); | 567 __ Branch(&done); |
586 | 568 |
587 // Handle loading a double from a smi. | 569 // Handle loading a double from a smi. |
588 __ bind(&is_smi); | 570 __ bind(&is_smi); |
589 if (CpuFeatures::IsSupported(FPU)) { | 571 // Convert smi to double using FPU instructions. |
590 CpuFeatureScope scope(masm, FPU); | 572 __ mtc1(scratch1, dst); |
591 // Convert smi to double using FPU instructions. | 573 __ cvt_d_w(dst, dst); |
592 __ mtc1(scratch1, dst); | 574 if (destination == kCoreRegisters) { |
593 __ cvt_d_w(dst, dst); | 575 // Load the converted smi to dst1 and dst2 in double format. |
594 if (destination == kCoreRegisters) { | 576 __ Move(dst1, dst2, dst); |
595 // Load the converted smi to dst1 and dst2 in double format. | |
596 __ Move(dst1, dst2, dst); | |
597 } | |
598 } else { | |
599 ASSERT(destination == kCoreRegisters); | |
600 // Write smi to dst1 and dst2 double format. | |
601 __ mov(scratch1, object); | |
602 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | |
603 __ push(ra); | |
604 __ Call(stub.GetCode(masm->isolate())); | |
605 __ pop(ra); | |
606 } | 577 } |
607 | |
608 __ bind(&done); | 578 __ bind(&done); |
609 } | 579 } |
610 | 580 |
611 | 581 |
612 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 582 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
613 Register object, | 583 Register object, |
614 Register dst, | 584 Register dst, |
615 Register heap_number_map, | 585 Register heap_number_map, |
616 Register scratch1, | 586 Register scratch1, |
617 Register scratch2, | 587 Register scratch2, |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
653 Destination destination, | 623 Destination destination, |
654 FPURegister double_dst, | 624 FPURegister double_dst, |
655 Register dst_mantissa, | 625 Register dst_mantissa, |
656 Register dst_exponent, | 626 Register dst_exponent, |
657 Register scratch2, | 627 Register scratch2, |
658 FPURegister single_scratch) { | 628 FPURegister single_scratch) { |
659 ASSERT(!int_scratch.is(scratch2)); | 629 ASSERT(!int_scratch.is(scratch2)); |
660 ASSERT(!int_scratch.is(dst_mantissa)); | 630 ASSERT(!int_scratch.is(dst_mantissa)); |
661 ASSERT(!int_scratch.is(dst_exponent)); | 631 ASSERT(!int_scratch.is(dst_exponent)); |
662 | 632 |
663 Label done; | 633 __ mtc1(int_scratch, single_scratch); |
664 | 634 __ cvt_d_w(double_dst, single_scratch); |
665 if (CpuFeatures::IsSupported(FPU)) { | 635 if (destination == kCoreRegisters) { |
666 CpuFeatureScope scope(masm, FPU); | 636 __ Move(dst_mantissa, dst_exponent, double_dst); |
667 __ mtc1(int_scratch, single_scratch); | |
668 __ cvt_d_w(double_dst, single_scratch); | |
669 if (destination == kCoreRegisters) { | |
670 __ Move(dst_mantissa, dst_exponent, double_dst); | |
671 } | |
672 } else { | |
673 Label fewer_than_20_useful_bits; | |
674 // Expected output: | |
675 // | dst_exponent | dst_mantissa | | |
676 // | s | exp | mantissa | | |
677 | |
678 // Check for zero. | |
679 __ mov(dst_exponent, int_scratch); | |
680 __ mov(dst_mantissa, int_scratch); | |
681 __ Branch(&done, eq, int_scratch, Operand(zero_reg)); | |
682 | |
683 // Preload the sign of the value. | |
684 __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask)); | |
685 // Get the absolute value of the object (as an unsigned integer). | |
686 Label skip_sub; | |
687 __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg)); | |
688 __ Subu(int_scratch, zero_reg, int_scratch); | |
689 __ bind(&skip_sub); | |
690 | |
691 // Get mantissa[51:20]. | |
692 | |
693 // Get the position of the first set bit. | |
694 __ Clz(dst_mantissa, int_scratch); | |
695 __ li(scratch2, 31); | |
696 __ Subu(dst_mantissa, scratch2, dst_mantissa); | |
697 | |
698 // Set the exponent. | |
699 __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); | |
700 __ Ins(dst_exponent, scratch2, | |
701 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
702 | |
703 // Clear the first non null bit. | |
704 __ li(scratch2, Operand(1)); | |
705 __ sllv(scratch2, scratch2, dst_mantissa); | |
706 __ li(at, -1); | |
707 __ Xor(scratch2, scratch2, at); | |
708 __ And(int_scratch, int_scratch, scratch2); | |
709 | |
710 // Get the number of bits to set in the lower part of the mantissa. | |
711 __ Subu(scratch2, dst_mantissa, | |
712 Operand(HeapNumber::kMantissaBitsInTopWord)); | |
713 __ Branch(&fewer_than_20_useful_bits, le, scratch2, Operand(zero_reg)); | |
714 // Set the higher 20 bits of the mantissa. | |
715 __ srlv(at, int_scratch, scratch2); | |
716 __ or_(dst_exponent, dst_exponent, at); | |
717 __ li(at, 32); | |
718 __ subu(scratch2, at, scratch2); | |
719 __ sllv(dst_mantissa, int_scratch, scratch2); | |
720 __ Branch(&done); | |
721 | |
722 __ bind(&fewer_than_20_useful_bits); | |
723 __ li(at, HeapNumber::kMantissaBitsInTopWord); | |
724 __ subu(scratch2, at, dst_mantissa); | |
725 __ sllv(scratch2, int_scratch, scratch2); | |
726 __ Or(dst_exponent, dst_exponent, scratch2); | |
727 // Set dst_mantissa to 0. | |
728 __ mov(dst_mantissa, zero_reg); | |
729 } | 637 } |
730 __ bind(&done); | |
731 } | 638 } |
732 | 639 |
733 | 640 |
734 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 641 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
735 Register object, | 642 Register object, |
736 Destination destination, | 643 Destination destination, |
737 DoubleRegister double_dst, | 644 DoubleRegister double_dst, |
738 DoubleRegister double_scratch, | 645 DoubleRegister double_scratch, |
739 Register dst_mantissa, | 646 Register dst_mantissa, |
740 Register dst_exponent, | 647 Register dst_exponent, |
(...skipping 16 matching lines...) Expand all Loading... |
757 dst_exponent, scratch2, single_scratch); | 664 dst_exponent, scratch2, single_scratch); |
758 __ Branch(&done); | 665 __ Branch(&done); |
759 | 666 |
760 __ bind(&obj_is_not_smi); | 667 __ bind(&obj_is_not_smi); |
761 __ AssertRootValue(heap_number_map, | 668 __ AssertRootValue(heap_number_map, |
762 Heap::kHeapNumberMapRootIndex, | 669 Heap::kHeapNumberMapRootIndex, |
763 "HeapNumberMap register clobbered."); | 670 "HeapNumberMap register clobbered."); |
764 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 671 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
765 | 672 |
766 // Load the number. | 673 // Load the number. |
767 if (CpuFeatures::IsSupported(FPU)) { | 674 // Load the double value. |
768 CpuFeatureScope scope(masm, FPU); | 675 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
769 // Load the double value. | |
770 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
771 | 676 |
772 Register except_flag = scratch2; | 677 Register except_flag = scratch2; |
773 __ EmitFPUTruncate(kRoundToZero, | 678 __ EmitFPUTruncate(kRoundToZero, |
774 scratch1, | 679 scratch1, |
775 double_dst, | 680 double_dst, |
776 at, | 681 at, |
777 double_scratch, | 682 double_scratch, |
778 except_flag, | 683 except_flag, |
779 kCheckForInexactConversion); | 684 kCheckForInexactConversion); |
780 | 685 |
781 // Jump to not_int32 if the operation did not succeed. | 686 // Jump to not_int32 if the operation did not succeed. |
782 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); | 687 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
783 | 688 if (destination == kCoreRegisters) { |
784 if (destination == kCoreRegisters) { | 689 __ Move(dst_mantissa, dst_exponent, double_dst); |
785 __ Move(dst_mantissa, dst_exponent, double_dst); | |
786 } | |
787 | |
788 } else { | |
789 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
790 // Load the double value in the destination registers. | |
791 bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); | |
792 if (save_registers) { | |
793 // Save both output registers, because the other one probably holds | |
794 // an important value too. | |
795 __ Push(dst_exponent, dst_mantissa); | |
796 } | |
797 if (object.is(dst_mantissa)) { | |
798 __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
799 __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
800 } else { | |
801 __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
802 __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
803 } | |
804 | |
805 // Check for 0 and -0. | |
806 Label zero; | |
807 __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask)); | |
808 __ Or(scratch1, scratch1, Operand(dst_mantissa)); | |
809 __ Branch(&zero, eq, scratch1, Operand(zero_reg)); | |
810 | |
811 // Check that the value can be exactly represented by a 32-bit integer. | |
812 // Jump to not_int32 if that's not the case. | |
813 Label restore_input_and_miss; | |
814 DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, | |
815 &restore_input_and_miss); | |
816 | |
817 // dst_* were trashed. Reload the double value. | |
818 if (save_registers) { | |
819 __ Pop(dst_exponent, dst_mantissa); | |
820 } | |
821 if (object.is(dst_mantissa)) { | |
822 __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
823 __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
824 } else { | |
825 __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
826 __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
827 } | |
828 | |
829 __ Branch(&done); | |
830 | |
831 __ bind(&restore_input_and_miss); | |
832 if (save_registers) { | |
833 __ Pop(dst_exponent, dst_mantissa); | |
834 } | |
835 __ Branch(not_int32); | |
836 | |
837 __ bind(&zero); | |
838 if (save_registers) { | |
839 __ Drop(2); | |
840 } | |
841 } | 690 } |
842 | |
843 __ bind(&done); | 691 __ bind(&done); |
844 } | 692 } |
845 | 693 |
846 | 694 |
847 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 695 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
848 Register object, | 696 Register object, |
849 Register dst, | 697 Register dst, |
850 Register heap_number_map, | 698 Register heap_number_map, |
851 Register scratch1, | 699 Register scratch1, |
852 Register scratch2, | 700 Register scratch2, |
(...skipping 12 matching lines...) Expand all Loading... |
865 __ UntagAndJumpIfSmi(dst, object, &done); | 713 __ UntagAndJumpIfSmi(dst, object, &done); |
866 | 714 |
867 __ AssertRootValue(heap_number_map, | 715 __ AssertRootValue(heap_number_map, |
868 Heap::kHeapNumberMapRootIndex, | 716 Heap::kHeapNumberMapRootIndex, |
869 "HeapNumberMap register clobbered."); | 717 "HeapNumberMap register clobbered."); |
870 | 718 |
871 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | 719 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
872 | 720 |
873 // Object is a heap number. | 721 // Object is a heap number. |
874 // Convert the floating point value to a 32-bit integer. | 722 // Convert the floating point value to a 32-bit integer. |
875 if (CpuFeatures::IsSupported(FPU)) { | 723 // Load the double value. |
876 CpuFeatureScope scope(masm, FPU); | 724 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); |
877 // Load the double value. | |
878 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
879 | 725 |
880 Register except_flag = scratch2; | 726 Register except_flag = scratch2; |
881 __ EmitFPUTruncate(kRoundToZero, | 727 __ EmitFPUTruncate(kRoundToZero, |
882 dst, | 728 dst, |
883 double_scratch0, | 729 double_scratch0, |
884 scratch1, | 730 scratch1, |
885 double_scratch1, | 731 double_scratch1, |
886 except_flag, | 732 except_flag, |
887 kCheckForInexactConversion); | 733 kCheckForInexactConversion); |
888 | 734 |
889 // Jump to not_int32 if the operation did not succeed. | 735 // Jump to not_int32 if the operation did not succeed. |
890 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); | 736 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
891 } else { | |
892 // Load the double value in the destination registers. | |
893 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
894 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
895 | |
896 // Check for 0 and -0. | |
897 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); | |
898 __ Or(dst, scratch2, Operand(dst)); | |
899 __ Branch(&done, eq, dst, Operand(zero_reg)); | |
900 | |
901 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); | |
902 | |
903 // Registers state after DoubleIs32BitInteger. | |
904 // dst: mantissa[51:20]. | |
905 // scratch2: 1 | |
906 | |
907 // Shift back the higher bits of the mantissa. | |
908 __ srlv(dst, dst, scratch3); | |
909 // Set the implicit first bit. | |
910 __ li(at, 32); | |
911 __ subu(scratch3, at, scratch3); | |
912 __ sllv(scratch2, scratch2, scratch3); | |
913 __ Or(dst, dst, scratch2); | |
914 // Set the sign. | |
915 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
916 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | |
917 Label skip_sub; | |
918 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg)); | |
919 __ Subu(dst, zero_reg, dst); | |
920 __ bind(&skip_sub); | |
921 } | |
922 __ Branch(&done); | 737 __ Branch(&done); |
923 | 738 |
924 __ bind(&maybe_undefined); | 739 __ bind(&maybe_undefined); |
925 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 740 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
926 __ Branch(not_int32, ne, object, Operand(at)); | 741 __ Branch(not_int32, ne, object, Operand(at)); |
927 // |undefined| is truncated to 0. | 742 // |undefined| is truncated to 0. |
928 __ li(dst, Operand(Smi::FromInt(0))); | 743 __ li(dst, Operand(Smi::FromInt(0))); |
929 // Fall through. | 744 // Fall through. |
930 | 745 |
931 __ bind(&done); | 746 __ bind(&done); |
932 } | 747 } |
933 | 748 |
934 | 749 |
935 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | |
936 Register src_exponent, | |
937 Register src_mantissa, | |
938 Register dst, | |
939 Register scratch, | |
940 Label* not_int32) { | |
941 // Get exponent alone in scratch. | |
942 __ Ext(scratch, | |
943 src_exponent, | |
944 HeapNumber::kExponentShift, | |
945 HeapNumber::kExponentBits); | |
946 | |
947 // Substract the bias from the exponent. | |
948 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias)); | |
949 | |
950 // src1: higher (exponent) part of the double value. | |
951 // src2: lower (mantissa) part of the double value. | |
952 // scratch: unbiased exponent. | |
953 | |
954 // Fast cases. Check for obvious non 32-bit integer values. | |
955 // Negative exponent cannot yield 32-bit integers. | |
956 __ Branch(not_int32, lt, scratch, Operand(zero_reg)); | |
957 // Exponent greater than 31 cannot yield 32-bit integers. | |
958 // Also, a positive value with an exponent equal to 31 is outside of the | |
959 // signed 32-bit integer range. | |
960 // Another way to put it is that if (exponent - signbit) > 30 then the | |
961 // number cannot be represented as an int32. | |
962 Register tmp = dst; | |
963 __ srl(at, src_exponent, 31); | |
964 __ subu(tmp, scratch, at); | |
965 __ Branch(not_int32, gt, tmp, Operand(30)); | |
966 // - Bits [21:0] in the mantissa are not null. | |
967 __ And(tmp, src_mantissa, 0x3fffff); | |
968 __ Branch(not_int32, ne, tmp, Operand(zero_reg)); | |
969 | |
970 // Otherwise the exponent needs to be big enough to shift left all the | |
971 // non zero bits left. So we need the (30 - exponent) last bits of the | |
972 // 31 higher bits of the mantissa to be null. | |
973 // Because bits [21:0] are null, we can check instead that the | |
974 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. | |
975 | |
976 // Get the 32 higher bits of the mantissa in dst. | |
977 __ Ext(dst, | |
978 src_mantissa, | |
979 HeapNumber::kMantissaBitsInTopWord, | |
980 32 - HeapNumber::kMantissaBitsInTopWord); | |
981 __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord); | |
982 __ or_(dst, dst, at); | |
983 | |
984 // Create the mask and test the lower bits (of the higher bits). | |
985 __ li(at, 32); | |
986 __ subu(scratch, at, scratch); | |
987 __ li(src_mantissa, 1); | |
988 __ sllv(src_exponent, src_mantissa, scratch); | |
989 __ Subu(src_exponent, src_exponent, Operand(1)); | |
990 __ And(src_exponent, dst, src_exponent); | |
991 __ Branch(not_int32, ne, src_exponent, Operand(zero_reg)); | |
992 } | |
993 | |
994 | |
995 void FloatingPointHelper::CallCCodeForDoubleOperation( | 750 void FloatingPointHelper::CallCCodeForDoubleOperation( |
996 MacroAssembler* masm, | 751 MacroAssembler* masm, |
997 Token::Value op, | 752 Token::Value op, |
998 Register heap_number_result, | 753 Register heap_number_result, |
999 Register scratch) { | 754 Register scratch) { |
1000 // Using core registers: | 755 // Using core registers: |
1001 // a0: Left value (least significant part of mantissa). | 756 // a0: Left value (least significant part of mantissa). |
1002 // a1: Left value (sign, exponent, top of mantissa). | 757 // a1: Left value (sign, exponent, top of mantissa). |
1003 // a2: Right value (least significant part of mantissa). | 758 // a2: Right value (least significant part of mantissa). |
1004 // a3: Right value (sign, exponent, top of mantissa). | 759 // a3: Right value (sign, exponent, top of mantissa). |
1005 | 760 |
1006 // Assert that heap_number_result is saved. | 761 // Assert that heap_number_result is saved. |
1007 // We currently always use s0 to pass it. | 762 // We currently always use s0 to pass it. |
1008 ASSERT(heap_number_result.is(s0)); | 763 ASSERT(heap_number_result.is(s0)); |
1009 | 764 |
1010 // Push the current return address before the C call. | 765 // Push the current return address before the C call. |
1011 __ push(ra); | 766 __ push(ra); |
1012 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | 767 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
1013 if (!IsMipsSoftFloatABI) { | 768 if (!IsMipsSoftFloatABI) { |
1014 CpuFeatureScope scope(masm, FPU); | |
1015 // We are not using MIPS FPU instructions, and parameters for the runtime | 769 // We are not using MIPS FPU instructions, and parameters for the runtime |
1016 // function call are prepaired in a0-a3 registers, but function we are | 770 // function call are prepaired in a0-a3 registers, but function we are |
1017 // calling is compiled with hard-float flag and expecting hard float ABI | 771 // calling is compiled with hard-float flag and expecting hard float ABI |
1018 // (parameters in f12/f14 registers). We need to copy parameters from | 772 // (parameters in f12/f14 registers). We need to copy parameters from |
1019 // a0-a3 registers to f12/f14 register pairs. | 773 // a0-a3 registers to f12/f14 register pairs. |
1020 __ Move(f12, a0, a1); | 774 __ Move(f12, a0, a1); |
1021 __ Move(f14, a2, a3); | 775 __ Move(f14, a2, a3); |
1022 } | 776 } |
1023 { | 777 { |
1024 AllowExternalCallThatCantCauseGC scope(masm); | 778 AllowExternalCallThatCantCauseGC scope(masm); |
1025 __ CallCFunction( | 779 __ CallCFunction( |
1026 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 780 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
1027 } | 781 } |
1028 // Store answer in the overwritable heap number. | 782 // Store answer in the overwritable heap number. |
1029 if (!IsMipsSoftFloatABI) { | 783 if (!IsMipsSoftFloatABI) { |
1030 CpuFeatureScope scope(masm, FPU); | |
1031 // Double returned in register f0. | 784 // Double returned in register f0. |
1032 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 785 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
1033 } else { | 786 } else { |
1034 // Double returned in registers v0 and v1. | 787 // Double returned in registers v0 and v1. |
1035 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); | 788 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); |
1036 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); | 789 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); |
1037 } | 790 } |
1038 // Place heap_number_result in v0 and return to the pushed return address. | 791 // Place heap_number_result in v0 and return to the pushed return address. |
1039 __ pop(ra); | 792 __ pop(ra); |
1040 __ Ret(USE_DELAY_SLOT); | 793 __ Ret(USE_DELAY_SLOT); |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1243 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 996 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1244 __ mov(v0, lhs); | 997 __ mov(v0, lhs); |
1245 } else { | 998 } else { |
1246 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 999 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1247 // the runtime. | 1000 // the runtime. |
1248 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 1001 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1249 } | 1002 } |
1250 | 1003 |
1251 // Rhs is a smi, lhs is a number. | 1004 // Rhs is a smi, lhs is a number. |
1252 // Convert smi rhs to double. | 1005 // Convert smi rhs to double. |
1253 if (CpuFeatures::IsSupported(FPU)) { | 1006 __ sra(at, rhs, kSmiTagSize); |
1254 CpuFeatureScope scope(masm, FPU); | 1007 __ mtc1(at, f14); |
1255 __ sra(at, rhs, kSmiTagSize); | 1008 __ cvt_d_w(f14, f14); |
1256 __ mtc1(at, f14); | 1009 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1257 __ cvt_d_w(f14, f14); | |
1258 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1259 } else { | |
1260 // Load lhs to a double in a2, a3. | |
1261 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); | |
1262 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1263 | |
1264 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. | |
1265 __ mov(t6, rhs); | |
1266 ConvertToDoubleStub stub1(a1, a0, t6, t5); | |
1267 __ push(ra); | |
1268 __ Call(stub1.GetCode(masm->isolate())); | |
1269 | |
1270 __ pop(ra); | |
1271 } | |
1272 | 1010 |
1273 // We now have both loaded as doubles. | 1011 // We now have both loaded as doubles. |
1274 __ jmp(both_loaded_as_doubles); | 1012 __ jmp(both_loaded_as_doubles); |
1275 | 1013 |
1276 __ bind(&lhs_is_smi); | 1014 __ bind(&lhs_is_smi); |
1277 // Lhs is a Smi. Check whether the non-smi is a heap number. | 1015 // Lhs is a Smi. Check whether the non-smi is a heap number. |
1278 __ GetObjectType(rhs, t4, t4); | 1016 __ GetObjectType(rhs, t4, t4); |
1279 if (strict) { | 1017 if (strict) { |
1280 // If lhs was not a number and rhs was a Smi then strict equality cannot | 1018 // If lhs was not a number and rhs was a Smi then strict equality cannot |
1281 // succeed. Return non-equal. | 1019 // succeed. Return non-equal. |
1282 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 1020 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1283 __ li(v0, Operand(1)); | 1021 __ li(v0, Operand(1)); |
1284 } else { | 1022 } else { |
1285 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1023 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1286 // the runtime. | 1024 // the runtime. |
1287 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 1025 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1288 } | 1026 } |
1289 | 1027 |
1290 // Lhs is a smi, rhs is a number. | 1028 // Lhs is a smi, rhs is a number. |
1291 // Convert smi lhs to double. | 1029 // Convert smi lhs to double. |
1292 if (CpuFeatures::IsSupported(FPU)) { | 1030 __ sra(at, lhs, kSmiTagSize); |
1293 CpuFeatureScope scope(masm, FPU); | 1031 __ mtc1(at, f12); |
1294 __ sra(at, lhs, kSmiTagSize); | 1032 __ cvt_d_w(f12, f12); |
1295 __ mtc1(at, f12); | 1033 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1296 __ cvt_d_w(f12, f12); | |
1297 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1298 } else { | |
1299 // Convert lhs to a double format. t5 is scratch. | |
1300 __ mov(t6, lhs); | |
1301 ConvertToDoubleStub stub2(a3, a2, t6, t5); | |
1302 __ push(ra); | |
1303 __ Call(stub2.GetCode(masm->isolate())); | |
1304 __ pop(ra); | |
1305 // Load rhs to a double in a1, a0. | |
1306 if (rhs.is(a0)) { | |
1307 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | |
1308 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1309 } else { | |
1310 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1311 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | |
1312 } | |
1313 } | |
1314 // Fall through to both_loaded_as_doubles. | 1034 // Fall through to both_loaded_as_doubles. |
1315 } | 1035 } |
1316 | 1036 |
1317 | 1037 |
1318 void EmitNanCheck(MacroAssembler* masm, Condition cc) { | |
1319 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
1320 if (CpuFeatures::IsSupported(FPU)) { | |
1321 CpuFeatureScope scope(masm, FPU); | |
1322 // Lhs and rhs are already loaded to f12 and f14 register pairs. | |
1323 __ Move(t0, t1, f14); | |
1324 __ Move(t2, t3, f12); | |
1325 } else { | |
1326 // Lhs and rhs are already loaded to GP registers. | |
1327 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | |
1328 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | |
1329 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | |
1330 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | |
1331 } | |
1332 Register rhs_exponent = exp_first ? t0 : t1; | |
1333 Register lhs_exponent = exp_first ? t2 : t3; | |
1334 Register rhs_mantissa = exp_first ? t1 : t0; | |
1335 Register lhs_mantissa = exp_first ? t3 : t2; | |
1336 Label one_is_nan, neither_is_nan; | |
1337 Label lhs_not_nan_exp_mask_is_loaded; | |
1338 | |
1339 Register exp_mask_reg = t4; | |
1340 __ li(exp_mask_reg, HeapNumber::kExponentMask); | |
1341 __ and_(t5, lhs_exponent, exp_mask_reg); | |
1342 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg)); | |
1343 | |
1344 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); | |
1345 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); | |
1346 | |
1347 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg)); | |
1348 | |
1349 __ li(exp_mask_reg, HeapNumber::kExponentMask); | |
1350 __ bind(&lhs_not_nan_exp_mask_is_loaded); | |
1351 __ and_(t5, rhs_exponent, exp_mask_reg); | |
1352 | |
1353 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg)); | |
1354 | |
1355 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); | |
1356 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); | |
1357 | |
1358 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg)); | |
1359 | |
1360 __ bind(&one_is_nan); | |
1361 // NaN comparisons always fail. | |
1362 // Load whatever we need in v0 to make the comparison fail. | |
1363 | |
1364 if (cc == lt || cc == le) { | |
1365 __ li(v0, Operand(GREATER)); | |
1366 } else { | |
1367 __ li(v0, Operand(LESS)); | |
1368 } | |
1369 __ Ret(); | |
1370 | |
1371 __ bind(&neither_is_nan); | |
1372 } | |
1373 | |
1374 | |
1375 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { | |
1376 // f12 and f14 have the two doubles. Neither is a NaN. | |
1377 // Call a native function to do a comparison between two non-NaNs. | |
1378 // Call C routine that may not cause GC or other trouble. | |
1379 // We use a call_was and return manually because we need arguments slots to | |
1380 // be freed. | |
1381 | |
1382 Label return_result_not_equal, return_result_equal; | |
1383 if (cc == eq) { | |
1384 // Doubles are not equal unless they have the same bit pattern. | |
1385 // Exception: 0 and -0. | |
1386 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
1387 if (CpuFeatures::IsSupported(FPU)) { | |
1388 CpuFeatureScope scope(masm, FPU); | |
1389 // Lhs and rhs are already loaded to f12 and f14 register pairs. | |
1390 __ Move(t0, t1, f14); | |
1391 __ Move(t2, t3, f12); | |
1392 } else { | |
1393 // Lhs and rhs are already loaded to GP registers. | |
1394 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | |
1395 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | |
1396 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | |
1397 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | |
1398 } | |
1399 Register rhs_exponent = exp_first ? t0 : t1; | |
1400 Register lhs_exponent = exp_first ? t2 : t3; | |
1401 Register rhs_mantissa = exp_first ? t1 : t0; | |
1402 Register lhs_mantissa = exp_first ? t3 : t2; | |
1403 | |
1404 __ xor_(v0, rhs_mantissa, lhs_mantissa); | |
1405 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg)); | |
1406 | |
1407 __ subu(v0, rhs_exponent, lhs_exponent); | |
1408 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg)); | |
1409 // 0, -0 case. | |
1410 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize); | |
1411 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize); | |
1412 __ or_(t4, rhs_exponent, lhs_exponent); | |
1413 __ or_(t4, t4, rhs_mantissa); | |
1414 | |
1415 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg)); | |
1416 | |
1417 __ bind(&return_result_equal); | |
1418 | |
1419 __ li(v0, Operand(EQUAL)); | |
1420 __ Ret(); | |
1421 } | |
1422 | |
1423 __ bind(&return_result_not_equal); | |
1424 | |
1425 if (!CpuFeatures::IsSupported(FPU)) { | |
1426 __ push(ra); | |
1427 __ PrepareCallCFunction(0, 2, t4); | |
1428 if (!IsMipsSoftFloatABI) { | |
1429 // We are not using MIPS FPU instructions, and parameters for the runtime | |
1430 // function call are prepaired in a0-a3 registers, but function we are | |
1431 // calling is compiled with hard-float flag and expecting hard float ABI | |
1432 // (parameters in f12/f14 registers). We need to copy parameters from | |
1433 // a0-a3 registers to f12/f14 register pairs. | |
1434 __ Move(f12, a0, a1); | |
1435 __ Move(f14, a2, a3); | |
1436 } | |
1437 | |
1438 AllowExternalCallThatCantCauseGC scope(masm); | |
1439 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | |
1440 0, 2); | |
1441 __ pop(ra); // Because this function returns int, result is in v0. | |
1442 __ Ret(); | |
1443 } else { | |
1444 CpuFeatureScope scope(masm, FPU); | |
1445 Label equal, less_than; | |
1446 __ BranchF(&equal, NULL, eq, f12, f14); | |
1447 __ BranchF(&less_than, NULL, lt, f12, f14); | |
1448 | |
1449 // Not equal, not less, not NaN, must be greater. | |
1450 | |
1451 __ li(v0, Operand(GREATER)); | |
1452 __ Ret(); | |
1453 | |
1454 __ bind(&equal); | |
1455 __ li(v0, Operand(EQUAL)); | |
1456 __ Ret(); | |
1457 | |
1458 __ bind(&less_than); | |
1459 __ li(v0, Operand(LESS)); | |
1460 __ Ret(); | |
1461 } | |
1462 } | |
1463 | |
1464 | |
1465 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 1038 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
1466 Register lhs, | 1039 Register lhs, |
1467 Register rhs) { | 1040 Register rhs) { |
1468 // If either operand is a JS object or an oddball value, then they are | 1041 // If either operand is a JS object or an oddball value, then they are |
1469 // not equal since their pointers are different. | 1042 // not equal since their pointers are different. |
1470 // There is no test for undetectability in strict equality. | 1043 // There is no test for undetectability in strict equality. |
1471 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 1044 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
1472 Label first_non_object; | 1045 Label first_non_object; |
1473 // Get the type of the first operand into a2 and compare it with | 1046 // Get the type of the first operand into a2 and compare it with |
1474 // FIRST_SPEC_OBJECT_TYPE. | 1047 // FIRST_SPEC_OBJECT_TYPE. |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1509 Label* not_heap_numbers, | 1082 Label* not_heap_numbers, |
1510 Label* slow) { | 1083 Label* slow) { |
1511 __ GetObjectType(lhs, a3, a2); | 1084 __ GetObjectType(lhs, a3, a2); |
1512 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 1085 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
1513 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 1086 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
1514 // If first was a heap number & second wasn't, go to slow case. | 1087 // If first was a heap number & second wasn't, go to slow case. |
1515 __ Branch(slow, ne, a3, Operand(a2)); | 1088 __ Branch(slow, ne, a3, Operand(a2)); |
1516 | 1089 |
1517 // Both are heap numbers. Load them up then jump to the code we have | 1090 // Both are heap numbers. Load them up then jump to the code we have |
1518 // for that. | 1091 // for that. |
1519 if (CpuFeatures::IsSupported(FPU)) { | 1092 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1520 CpuFeatureScope scope(masm, FPU); | 1093 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1521 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1094 |
1522 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1523 } else { | |
1524 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1525 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); | |
1526 if (rhs.is(a0)) { | |
1527 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | |
1528 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1529 } else { | |
1530 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1531 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | |
1532 } | |
1533 } | |
1534 __ jmp(both_loaded_as_doubles); | 1095 __ jmp(both_loaded_as_doubles); |
1535 } | 1096 } |
1536 | 1097 |
1537 | 1098 |
1538 // Fast negative check for internalized-to-internalized equality. | 1099 // Fast negative check for internalized-to-internalized equality. |
1539 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 1100 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
1540 Register lhs, | 1101 Register lhs, |
1541 Register rhs, | 1102 Register rhs, |
1542 Label* possible_strings, | 1103 Label* possible_strings, |
1543 Label* not_both_strings) { | 1104 Label* not_both_strings) { |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1604 | 1165 |
1605 // Calculate the entry in the number string cache. The hash value in the | 1166 // Calculate the entry in the number string cache. The hash value in the |
1606 // number string cache for smis is just the smi value, and the hash for | 1167 // number string cache for smis is just the smi value, and the hash for |
1607 // doubles is the xor of the upper and lower words. See | 1168 // doubles is the xor of the upper and lower words. See |
1608 // Heap::GetNumberStringCache. | 1169 // Heap::GetNumberStringCache. |
1609 Isolate* isolate = masm->isolate(); | 1170 Isolate* isolate = masm->isolate(); |
1610 Label is_smi; | 1171 Label is_smi; |
1611 Label load_result_from_cache; | 1172 Label load_result_from_cache; |
1612 if (!object_is_smi) { | 1173 if (!object_is_smi) { |
1613 __ JumpIfSmi(object, &is_smi); | 1174 __ JumpIfSmi(object, &is_smi); |
1614 if (CpuFeatures::IsSupported(FPU)) { | 1175 __ CheckMap(object, |
1615 CpuFeatureScope scope(masm, FPU); | 1176 scratch1, |
1616 __ CheckMap(object, | 1177 Heap::kHeapNumberMapRootIndex, |
1617 scratch1, | 1178 not_found, |
1618 Heap::kHeapNumberMapRootIndex, | 1179 DONT_DO_SMI_CHECK); |
1619 not_found, | |
1620 DONT_DO_SMI_CHECK); | |
1621 | 1180 |
1622 STATIC_ASSERT(8 == kDoubleSize); | 1181 STATIC_ASSERT(8 == kDoubleSize); |
1623 __ Addu(scratch1, | 1182 __ Addu(scratch1, |
1624 object, | 1183 object, |
1625 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1184 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
1626 __ lw(scratch2, MemOperand(scratch1, kPointerSize)); | 1185 __ lw(scratch2, MemOperand(scratch1, kPointerSize)); |
1627 __ lw(scratch1, MemOperand(scratch1, 0)); | 1186 __ lw(scratch1, MemOperand(scratch1, 0)); |
1628 __ Xor(scratch1, scratch1, Operand(scratch2)); | 1187 __ Xor(scratch1, scratch1, Operand(scratch2)); |
1629 __ And(scratch1, scratch1, Operand(mask)); | 1188 __ And(scratch1, scratch1, Operand(mask)); |
1630 | 1189 |
1631 // Calculate address of entry in string cache: each entry consists | 1190 // Calculate address of entry in string cache: each entry consists |
1632 // of two pointer sized fields. | 1191 // of two pointer sized fields. |
1633 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); | 1192 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); |
1634 __ Addu(scratch1, number_string_cache, scratch1); | 1193 __ Addu(scratch1, number_string_cache, scratch1); |
1635 | 1194 |
1636 Register probe = mask; | 1195 Register probe = mask; |
1637 __ lw(probe, | 1196 __ lw(probe, |
1638 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1197 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
1639 __ JumpIfSmi(probe, not_found); | 1198 __ JumpIfSmi(probe, not_found); |
1640 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); | 1199 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); |
1641 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); | 1200 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
1642 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); | 1201 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); |
1643 __ Branch(not_found); | 1202 __ Branch(not_found); |
1644 } else { | |
1645 // Note that there is no cache check for non-FPU case, even though | |
1646 // it seems there could be. May be a tiny opimization for non-FPU | |
1647 // cores. | |
1648 __ Branch(not_found); | |
1649 } | |
1650 } | 1203 } |
1651 | 1204 |
1652 __ bind(&is_smi); | 1205 __ bind(&is_smi); |
1653 Register scratch = scratch1; | 1206 Register scratch = scratch1; |
1654 __ sra(scratch, object, 1); // Shift away the tag. | 1207 __ sra(scratch, object, 1); // Shift away the tag. |
1655 __ And(scratch, mask, Operand(scratch)); | 1208 __ And(scratch, mask, Operand(scratch)); |
1656 | 1209 |
1657 // Calculate address of entry in string cache: each entry consists | 1210 // Calculate address of entry in string cache: each entry consists |
1658 // of two pointer sized fields. | 1211 // of two pointer sized fields. |
1659 __ sll(scratch, scratch, kPointerSizeLog2 + 1); | 1212 __ sll(scratch, scratch, kPointerSizeLog2 + 1); |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1757 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. | 1310 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
1758 EmitSmiNonsmiComparison(masm, lhs, rhs, | 1311 EmitSmiNonsmiComparison(masm, lhs, rhs, |
1759 &both_loaded_as_doubles, &slow, strict()); | 1312 &both_loaded_as_doubles, &slow, strict()); |
1760 | 1313 |
1761 __ bind(&both_loaded_as_doubles); | 1314 __ bind(&both_loaded_as_doubles); |
1762 // f12, f14 are the double representations of the left hand side | 1315 // f12, f14 are the double representations of the left hand side |
1763 // and the right hand side if we have FPU. Otherwise a2, a3 represent | 1316 // and the right hand side if we have FPU. Otherwise a2, a3 represent |
1764 // left hand side and a0, a1 represent right hand side. | 1317 // left hand side and a0, a1 represent right hand side. |
1765 | 1318 |
1766 Isolate* isolate = masm->isolate(); | 1319 Isolate* isolate = masm->isolate(); |
1767 if (CpuFeatures::IsSupported(FPU)) { | 1320 Label nan; |
1768 CpuFeatureScope scope(masm, FPU); | 1321 __ li(t0, Operand(LESS)); |
1769 Label nan; | 1322 __ li(t1, Operand(GREATER)); |
1770 __ li(t0, Operand(LESS)); | 1323 __ li(t2, Operand(EQUAL)); |
1771 __ li(t1, Operand(GREATER)); | |
1772 __ li(t2, Operand(EQUAL)); | |
1773 | 1324 |
1774 // Check if either rhs or lhs is NaN. | 1325 // Check if either rhs or lhs is NaN. |
1775 __ BranchF(NULL, &nan, eq, f12, f14); | 1326 __ BranchF(NULL, &nan, eq, f12, f14); |
1776 | 1327 |
1777 // Check if LESS condition is satisfied. If true, move conditionally | 1328 // Check if LESS condition is satisfied. If true, move conditionally |
1778 // result to v0. | 1329 // result to v0. |
1779 __ c(OLT, D, f12, f14); | 1330 __ c(OLT, D, f12, f14); |
1780 __ Movt(v0, t0); | 1331 __ Movt(v0, t0); |
1781 // Use previous check to store conditionally to v0 oposite condition | 1332 // Use previous check to store conditionally to v0 oposite condition |
1782 // (GREATER). If rhs is equal to lhs, this will be corrected in next | 1333 // (GREATER). If rhs is equal to lhs, this will be corrected in next |
1783 // check. | 1334 // check. |
1784 __ Movf(v0, t1); | 1335 __ Movf(v0, t1); |
1785 // Check if EQUAL condition is satisfied. If true, move conditionally | 1336 // Check if EQUAL condition is satisfied. If true, move conditionally |
1786 // result to v0. | 1337 // result to v0. |
1787 __ c(EQ, D, f12, f14); | 1338 __ c(EQ, D, f12, f14); |
1788 __ Movt(v0, t2); | 1339 __ Movt(v0, t2); |
1789 | 1340 |
1790 __ Ret(); | 1341 __ Ret(); |
1791 | 1342 |
1792 __ bind(&nan); | 1343 __ bind(&nan); |
1793 // NaN comparisons always fail. | 1344 // NaN comparisons always fail. |
1794 // Load whatever we need in v0 to make the comparison fail. | 1345 // Load whatever we need in v0 to make the comparison fail. |
1795 if (cc == lt || cc == le) { | 1346 if (cc == lt || cc == le) { |
1796 __ li(v0, Operand(GREATER)); | 1347 __ li(v0, Operand(GREATER)); |
1797 } else { | |
1798 __ li(v0, Operand(LESS)); | |
1799 } | |
1800 __ Ret(); | |
1801 } else { | 1348 } else { |
1802 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1349 __ li(v0, Operand(LESS)); |
1803 // fall through if neither is a NaN. Also binds rhs_not_nan. | |
1804 EmitNanCheck(masm, cc); | |
1805 | |
1806 // Compares two doubles that are not NaNs. Returns the answer. | |
1807 // Never falls through. | |
1808 EmitTwoNonNanDoubleComparison(masm, cc); | |
1809 } | 1350 } |
| 1351 __ Ret(); |
1810 | 1352 |
1811 __ bind(¬_smis); | 1353 __ bind(¬_smis); |
1812 // At this point we know we are dealing with two different objects, | 1354 // At this point we know we are dealing with two different objects, |
1813 // and neither of them is a Smi. The objects are in lhs_ and rhs_. | 1355 // and neither of them is a Smi. The objects are in lhs_ and rhs_. |
1814 if (strict()) { | 1356 if (strict()) { |
1815 // This returns non-equal for some object types, or falls through if it | 1357 // This returns non-equal for some object types, or falls through if it |
1816 // was not lucky. | 1358 // was not lucky. |
1817 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 1359 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
1818 } | 1360 } |
1819 | 1361 |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1892 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1434 __ InvokeBuiltin(native, JUMP_FUNCTION); |
1893 | 1435 |
1894 __ bind(&miss); | 1436 __ bind(&miss); |
1895 GenerateMiss(masm); | 1437 GenerateMiss(masm); |
1896 } | 1438 } |
1897 | 1439 |
1898 | 1440 |
1899 // The stub expects its argument in the tos_ register and returns its result in | 1441 // The stub expects its argument in the tos_ register and returns its result in |
1900 // it, too: zero for false, and a non-zero value for true. | 1442 // it, too: zero for false, and a non-zero value for true. |
1901 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1443 void ToBooleanStub::Generate(MacroAssembler* masm) { |
1902 // This stub uses FPU instructions. | |
1903 CpuFeatureScope scope(masm, FPU); | |
1904 | |
1905 Label patch; | 1444 Label patch; |
1906 const Register map = t5.is(tos_) ? t3 : t5; | 1445 const Register map = t5.is(tos_) ? t3 : t5; |
1907 | 1446 |
1908 // undefined -> false. | 1447 // undefined -> false. |
1909 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); | 1448 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
1910 | 1449 |
1911 // Boolean -> its value. | 1450 // Boolean -> its value. |
1912 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); | 1451 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
1913 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); | 1452 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
1914 | 1453 |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2008 1); | 1547 1); |
2009 } | 1548 } |
2010 | 1549 |
2011 | 1550 |
2012 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 1551 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
2013 // We don't allow a GC during a store buffer overflow so there is no need to | 1552 // We don't allow a GC during a store buffer overflow so there is no need to |
2014 // store the registers in any particular way, but we do have to store and | 1553 // store the registers in any particular way, but we do have to store and |
2015 // restore them. | 1554 // restore them. |
2016 __ MultiPush(kJSCallerSaved | ra.bit()); | 1555 __ MultiPush(kJSCallerSaved | ra.bit()); |
2017 if (save_doubles_ == kSaveFPRegs) { | 1556 if (save_doubles_ == kSaveFPRegs) { |
2018 CpuFeatureScope scope(masm, FPU); | |
2019 __ MultiPushFPU(kCallerSavedFPU); | 1557 __ MultiPushFPU(kCallerSavedFPU); |
2020 } | 1558 } |
2021 const int argument_count = 1; | 1559 const int argument_count = 1; |
2022 const int fp_argument_count = 0; | 1560 const int fp_argument_count = 0; |
2023 const Register scratch = a1; | 1561 const Register scratch = a1; |
2024 | 1562 |
2025 AllowExternalCallThatCantCauseGC scope(masm); | 1563 AllowExternalCallThatCantCauseGC scope(masm); |
2026 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 1564 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
2027 __ li(a0, Operand(ExternalReference::isolate_address())); | 1565 __ li(a0, Operand(ExternalReference::isolate_address())); |
2028 __ CallCFunction( | 1566 __ CallCFunction( |
2029 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 1567 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
2030 argument_count); | 1568 argument_count); |
2031 if (save_doubles_ == kSaveFPRegs) { | 1569 if (save_doubles_ == kSaveFPRegs) { |
2032 CpuFeatureScope scope(masm, FPU); | |
2033 __ MultiPopFPU(kCallerSavedFPU); | 1570 __ MultiPopFPU(kCallerSavedFPU); |
2034 } | 1571 } |
2035 | 1572 |
2036 __ MultiPop(kJSCallerSaved | ra.bit()); | 1573 __ MultiPop(kJSCallerSaved | ra.bit()); |
2037 __ Ret(); | 1574 __ Ret(); |
2038 } | 1575 } |
2039 | 1576 |
2040 | 1577 |
2041 void UnaryOpStub::PrintName(StringStream* stream) { | 1578 void UnaryOpStub::PrintName(StringStream* stream) { |
2042 const char* op_name = Token::Name(op_); | 1579 const char* op_name = Token::Name(op_); |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2253 // This can't go slow-case because it's the same number we already | 1790 // This can't go slow-case because it's the same number we already |
2254 // converted once again. | 1791 // converted once again. |
2255 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); | 1792 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); |
2256 // Negate the result. | 1793 // Negate the result. |
2257 __ Xor(a1, a1, -1); | 1794 __ Xor(a1, a1, -1); |
2258 | 1795 |
2259 __ bind(&heapnumber_allocated); | 1796 __ bind(&heapnumber_allocated); |
2260 __ mov(v0, a2); // Move newly allocated heap number to v0. | 1797 __ mov(v0, a2); // Move newly allocated heap number to v0. |
2261 } | 1798 } |
2262 | 1799 |
2263 if (CpuFeatures::IsSupported(FPU)) { | 1800 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. |
2264 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. | 1801 __ mtc1(a1, f0); |
2265 CpuFeatureScope scope(masm, FPU); | 1802 __ cvt_d_w(f0, f0); |
2266 __ mtc1(a1, f0); | 1803 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
2267 __ cvt_d_w(f0, f0); | 1804 __ Ret(); |
2268 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
2269 __ Ret(); | |
2270 } else { | |
2271 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | |
2272 // have to set up a frame. | |
2273 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); | |
2274 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
2275 } | |
2276 | 1805 |
2277 __ bind(&impossible); | 1806 __ bind(&impossible); |
2278 if (FLAG_debug_code) { | 1807 if (FLAG_debug_code) { |
2279 __ stop("Incorrect assumption in bit-not stub"); | 1808 __ stop("Incorrect assumption in bit-not stub"); |
2280 } | 1809 } |
2281 } | 1810 } |
2282 | 1811 |
2283 | 1812 |
2284 // TODO(svenpanne): Use virtual functions instead of switch. | 1813 // TODO(svenpanne): Use virtual functions instead of switch. |
2285 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { | 1814 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2327 case Token::BIT_NOT: | 1856 case Token::BIT_NOT: |
2328 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 1857 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
2329 break; | 1858 break; |
2330 default: | 1859 default: |
2331 UNREACHABLE(); | 1860 UNREACHABLE(); |
2332 } | 1861 } |
2333 } | 1862 } |
2334 | 1863 |
2335 | 1864 |
2336 void BinaryOpStub::Initialize() { | 1865 void BinaryOpStub::Initialize() { |
2337 platform_specific_bit_ = CpuFeatures::IsSupported(FPU); | 1866 platform_specific_bit_ = true; // FPU is a base requirement for V8. |
2338 } | 1867 } |
2339 | 1868 |
2340 | 1869 |
2341 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 1870 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
2342 Label get_result; | 1871 Label get_result; |
2343 | 1872 |
2344 __ Push(a1, a0); | 1873 __ Push(a1, a0); |
2345 | 1874 |
2346 __ li(a2, Operand(Smi::FromInt(MinorKey()))); | 1875 __ li(a2, Operand(Smi::FromInt(MinorKey()))); |
2347 __ push(a2); | 1876 __ push(a2); |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2554 Register heap_number_map = t2; | 2083 Register heap_number_map = t2; |
2555 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2084 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2556 | 2085 |
2557 switch (op) { | 2086 switch (op) { |
2558 case Token::ADD: | 2087 case Token::ADD: |
2559 case Token::SUB: | 2088 case Token::SUB: |
2560 case Token::MUL: | 2089 case Token::MUL: |
2561 case Token::DIV: | 2090 case Token::DIV: |
2562 case Token::MOD: { | 2091 case Token::MOD: { |
2563 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 | 2092 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 |
2564 // depending on whether FPU is available or not. | 2093 // depending on operation. |
2565 FloatingPointHelper::Destination destination = | 2094 FloatingPointHelper::Destination destination = |
2566 CpuFeatures::IsSupported(FPU) && | |
2567 op != Token::MOD ? | 2095 op != Token::MOD ? |
2568 FloatingPointHelper::kFPURegisters : | 2096 FloatingPointHelper::kFPURegisters : |
2569 FloatingPointHelper::kCoreRegisters; | 2097 FloatingPointHelper::kCoreRegisters; |
2570 | 2098 |
2571 // Allocate new heap number for result. | 2099 // Allocate new heap number for result. |
2572 Register result = s0; | 2100 Register result = s0; |
2573 BinaryOpStub_GenerateHeapResultAllocation( | 2101 BinaryOpStub_GenerateHeapResultAllocation( |
2574 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 2102 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
2575 | 2103 |
2576 // Load the operands. | 2104 // Load the operands. |
(...skipping 23 matching lines...) Expand all Loading... |
2600 masm, destination, left, f12, a0, a1, heap_number_map, | 2128 masm, destination, left, f12, a0, a1, heap_number_map, |
2601 scratch1, scratch2, fail); | 2129 scratch1, scratch2, fail); |
2602 } | 2130 } |
2603 } | 2131 } |
2604 | 2132 |
2605 // Calculate the result. | 2133 // Calculate the result. |
2606 if (destination == FloatingPointHelper::kFPURegisters) { | 2134 if (destination == FloatingPointHelper::kFPURegisters) { |
2607 // Using FPU registers: | 2135 // Using FPU registers: |
2608 // f12: Left value. | 2136 // f12: Left value. |
2609 // f14: Right value. | 2137 // f14: Right value. |
2610 CpuFeatureScope scope(masm, FPU); | |
2611 switch (op) { | 2138 switch (op) { |
2612 case Token::ADD: | 2139 case Token::ADD: |
2613 __ add_d(f10, f12, f14); | 2140 __ add_d(f10, f12, f14); |
2614 break; | 2141 break; |
2615 case Token::SUB: | 2142 case Token::SUB: |
2616 __ sub_d(f10, f12, f14); | 2143 __ sub_d(f10, f12, f14); |
2617 break; | 2144 break; |
2618 case Token::MUL: | 2145 case Token::MUL: |
2619 __ mul_d(f10, f12, f14); | 2146 __ mul_d(f10, f12, f14); |
2620 break; | 2147 break; |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2690 __ srav(a2, a3, a2); | 2217 __ srav(a2, a3, a2); |
2691 break; | 2218 break; |
2692 case Token::SHR: | 2219 case Token::SHR: |
2693 // Use only the 5 least significant bits of the shift count. | 2220 // Use only the 5 least significant bits of the shift count. |
2694 __ GetLeastBitsFromInt32(a2, a2, 5); | 2221 __ GetLeastBitsFromInt32(a2, a2, 5); |
2695 __ srlv(a2, a3, a2); | 2222 __ srlv(a2, a3, a2); |
2696 // SHR is special because it is required to produce a positive answer. | 2223 // SHR is special because it is required to produce a positive answer. |
2697 // The code below for writing into heap numbers isn't capable of | 2224 // The code below for writing into heap numbers isn't capable of |
2698 // writing the register as an unsigned int so we go to slow case if we | 2225 // writing the register as an unsigned int so we go to slow case if we |
2699 // hit this case. | 2226 // hit this case. |
2700 if (CpuFeatures::IsSupported(FPU)) { | 2227 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); |
2701 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); | |
2702 } else { | |
2703 __ Branch(not_numbers, lt, a2, Operand(zero_reg)); | |
2704 } | |
2705 break; | 2228 break; |
2706 case Token::SHL: | 2229 case Token::SHL: |
2707 // Use only the 5 least significant bits of the shift count. | 2230 // Use only the 5 least significant bits of the shift count. |
2708 __ GetLeastBitsFromInt32(a2, a2, 5); | 2231 __ GetLeastBitsFromInt32(a2, a2, 5); |
2709 __ sllv(a2, a3, a2); | 2232 __ sllv(a2, a3, a2); |
2710 break; | 2233 break; |
2711 default: | 2234 default: |
2712 UNREACHABLE(); | 2235 UNREACHABLE(); |
2713 } | 2236 } |
2714 // Check that the *signed* result fits in a smi. | 2237 // Check that the *signed* result fits in a smi. |
(...skipping 13 matching lines...) Expand all Loading... |
2728 masm, result, heap_number_map, scratch1, scratch2, gc_required, | 2251 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
2729 mode); | 2252 mode); |
2730 } | 2253 } |
2731 | 2254 |
2732 // a2: Answer as signed int32. | 2255 // a2: Answer as signed int32. |
2733 // t1: Heap number to write answer into. | 2256 // t1: Heap number to write answer into. |
2734 | 2257 |
2735 // Nothing can go wrong now, so move the heap number to v0, which is the | 2258 // Nothing can go wrong now, so move the heap number to v0, which is the |
2736 // result. | 2259 // result. |
2737 __ mov(v0, t1); | 2260 __ mov(v0, t1); |
2738 | 2261 // Convert the int32 in a2 to the heap number in a0. As |
2739 if (CpuFeatures::IsSupported(FPU)) { | 2262 // mentioned above SHR needs to always produce a positive result. |
2740 // Convert the int32 in a2 to the heap number in a0. As | 2263 __ mtc1(a2, f0); |
2741 // mentioned above SHR needs to always produce a positive result. | 2264 if (op == Token::SHR) { |
2742 CpuFeatureScope scope(masm, FPU); | 2265 __ Cvt_d_uw(f0, f0, f22); |
2743 __ mtc1(a2, f0); | |
2744 if (op == Token::SHR) { | |
2745 __ Cvt_d_uw(f0, f0, f22); | |
2746 } else { | |
2747 __ cvt_d_w(f0, f0); | |
2748 } | |
2749 // ARM uses a workaround here because of the unaligned HeapNumber | |
2750 // kValueOffset. On MIPS this workaround is built into sdc1 so | |
2751 // there's no point in generating even more instructions. | |
2752 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
2753 __ Ret(); | |
2754 } else { | 2266 } else { |
2755 // Tail call that writes the int32 in a2 to the heap number in v0, using | 2267 __ cvt_d_w(f0, f0); |
2756 // a3 and a0 as scratch. v0 is preserved and returned. | |
2757 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); | |
2758 __ TailCallStub(&stub); | |
2759 } | 2268 } |
| 2269 // ARM uses a workaround here because of the unaligned HeapNumber |
| 2270 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 2271 // there's no point in generating even more instructions. |
| 2272 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2273 __ Ret(); |
2760 break; | 2274 break; |
2761 } | 2275 } |
2762 default: | 2276 default: |
2763 UNREACHABLE(); | 2277 UNREACHABLE(); |
2764 } | 2278 } |
2765 } | 2279 } |
2766 | 2280 |
2767 | 2281 |
2768 // Generate the smi code. If the operation on smis are successful this return is | 2282 // Generate the smi code. If the operation on smis are successful this return is |
2769 // generated. If the result is not a smi and heap number allocation is not | 2283 // generated. If the result is not a smi and heap number allocation is not |
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2896 // again if this changes. | 2410 // again if this changes. |
2897 if (left_type_ == BinaryOpIC::SMI) { | 2411 if (left_type_ == BinaryOpIC::SMI) { |
2898 __ JumpIfNotSmi(left, &transition); | 2412 __ JumpIfNotSmi(left, &transition); |
2899 } | 2413 } |
2900 if (right_type_ == BinaryOpIC::SMI) { | 2414 if (right_type_ == BinaryOpIC::SMI) { |
2901 __ JumpIfNotSmi(right, &transition); | 2415 __ JumpIfNotSmi(right, &transition); |
2902 } | 2416 } |
2903 // Load both operands and check that they are 32-bit integer. | 2417 // Load both operands and check that they are 32-bit integer. |
2904 // Jump to type transition if they are not. The registers a0 and a1 (right | 2418 // Jump to type transition if they are not. The registers a0 and a1 (right |
2905 // and left) are preserved for the runtime call. | 2419 // and left) are preserved for the runtime call. |
2906 FloatingPointHelper::Destination destination = | 2420 FloatingPointHelper::Destination destination = (op_ != Token::MOD) |
2907 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD) | |
2908 ? FloatingPointHelper::kFPURegisters | 2421 ? FloatingPointHelper::kFPURegisters |
2909 : FloatingPointHelper::kCoreRegisters; | 2422 : FloatingPointHelper::kCoreRegisters; |
2910 | 2423 |
2911 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2424 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2912 right, | 2425 right, |
2913 destination, | 2426 destination, |
2914 f14, | 2427 f14, |
2915 f16, | 2428 f16, |
2916 a2, | 2429 a2, |
2917 a3, | 2430 a3, |
2918 heap_number_map, | 2431 heap_number_map, |
2919 scratch1, | 2432 scratch1, |
2920 scratch2, | 2433 scratch2, |
2921 f2, | 2434 f2, |
2922 &transition); | 2435 &transition); |
2923 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2436 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2924 left, | 2437 left, |
2925 destination, | 2438 destination, |
2926 f12, | 2439 f12, |
2927 f16, | 2440 f16, |
2928 t0, | 2441 t0, |
2929 t1, | 2442 t1, |
2930 heap_number_map, | 2443 heap_number_map, |
2931 scratch1, | 2444 scratch1, |
2932 scratch2, | 2445 scratch2, |
2933 f2, | 2446 f2, |
2934 &transition); | 2447 &transition); |
2935 | 2448 |
2936 if (destination == FloatingPointHelper::kFPURegisters) { | 2449 if (destination == FloatingPointHelper::kFPURegisters) { |
2937 CpuFeatureScope scope(masm, FPU); | |
2938 Label return_heap_number; | 2450 Label return_heap_number; |
2939 switch (op_) { | 2451 switch (op_) { |
2940 case Token::ADD: | 2452 case Token::ADD: |
2941 __ add_d(f10, f12, f14); | 2453 __ add_d(f10, f12, f14); |
2942 break; | 2454 break; |
2943 case Token::SUB: | 2455 case Token::SUB: |
2944 __ sub_d(f10, f12, f14); | 2456 __ sub_d(f10, f12, f14); |
2945 break; | 2457 break; |
2946 case Token::MUL: | 2458 case Token::MUL: |
2947 __ mul_d(f10, f12, f14); | 2459 __ mul_d(f10, f12, f14); |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3096 __ And(a2, a2, Operand(0x1f)); | 2608 __ And(a2, a2, Operand(0x1f)); |
3097 __ srav(a2, a3, a2); | 2609 __ srav(a2, a3, a2); |
3098 break; | 2610 break; |
3099 case Token::SHR: | 2611 case Token::SHR: |
3100 __ And(a2, a2, Operand(0x1f)); | 2612 __ And(a2, a2, Operand(0x1f)); |
3101 __ srlv(a2, a3, a2); | 2613 __ srlv(a2, a3, a2); |
3102 // SHR is special because it is required to produce a positive answer. | 2614 // SHR is special because it is required to produce a positive answer. |
3103 // We only get a negative result if the shift value (a2) is 0. | 2615 // We only get a negative result if the shift value (a2) is 0. |
3104 // This result cannot be respresented as a signed 32-bit integer, try | 2616 // This result cannot be respresented as a signed 32-bit integer, try |
3105 // to return a heap number if we can. | 2617 // to return a heap number if we can. |
3106 // The non FPU code does not support this special case, so jump to | 2618 __ Branch((result_type_ <= BinaryOpIC::INT32) |
3107 // runtime if we don't support it. | 2619 ? &transition |
3108 if (CpuFeatures::IsSupported(FPU)) { | 2620 : &return_heap_number, |
3109 __ Branch((result_type_ <= BinaryOpIC::INT32) | 2621 lt, |
3110 ? &transition | 2622 a2, |
3111 : &return_heap_number, | 2623 Operand(zero_reg)); |
3112 lt, | |
3113 a2, | |
3114 Operand(zero_reg)); | |
3115 } else { | |
3116 __ Branch((result_type_ <= BinaryOpIC::INT32) | |
3117 ? &transition | |
3118 : &call_runtime, | |
3119 lt, | |
3120 a2, | |
3121 Operand(zero_reg)); | |
3122 } | |
3123 break; | 2624 break; |
3124 case Token::SHL: | 2625 case Token::SHL: |
3125 __ And(a2, a2, Operand(0x1f)); | 2626 __ And(a2, a2, Operand(0x1f)); |
3126 __ sllv(a2, a3, a2); | 2627 __ sllv(a2, a3, a2); |
3127 break; | 2628 break; |
3128 default: | 2629 default: |
3129 UNREACHABLE(); | 2630 UNREACHABLE(); |
3130 } | 2631 } |
3131 | 2632 |
3132 // Check if the result fits in a smi. | 2633 // Check if the result fits in a smi. |
3133 __ Addu(scratch1, a2, Operand(0x40000000)); | 2634 __ Addu(scratch1, a2, Operand(0x40000000)); |
3134 // If not try to return a heap number. (We know the result is an int32.) | 2635 // If not try to return a heap number. (We know the result is an int32.) |
3135 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); | 2636 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); |
3136 // Tag the result and return. | 2637 // Tag the result and return. |
3137 __ SmiTag(v0, a2); | 2638 __ SmiTag(v0, a2); |
3138 __ Ret(); | 2639 __ Ret(); |
3139 | 2640 |
3140 __ bind(&return_heap_number); | 2641 __ bind(&return_heap_number); |
3141 heap_number_result = t1; | 2642 heap_number_result = t1; |
3142 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2643 BinaryOpStub_GenerateHeapResultAllocation(masm, |
3143 heap_number_result, | 2644 heap_number_result, |
3144 heap_number_map, | 2645 heap_number_map, |
3145 scratch1, | 2646 scratch1, |
3146 scratch2, | 2647 scratch2, |
3147 &call_runtime, | 2648 &call_runtime, |
3148 mode_); | 2649 mode_); |
3149 | 2650 |
3150 if (CpuFeatures::IsSupported(FPU)) { | 2651 if (op_ != Token::SHR) { |
3151 CpuFeatureScope scope(masm, FPU); | 2652 // Convert the result to a floating point value. |
| 2653 __ mtc1(a2, double_scratch); |
| 2654 __ cvt_d_w(double_scratch, double_scratch); |
| 2655 } else { |
| 2656 // The result must be interpreted as an unsigned 32-bit integer. |
| 2657 __ mtc1(a2, double_scratch); |
| 2658 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); |
| 2659 } |
3152 | 2660 |
3153 if (op_ != Token::SHR) { | 2661 // Store the result. |
3154 // Convert the result to a floating point value. | 2662 __ mov(v0, heap_number_result); |
3155 __ mtc1(a2, double_scratch); | 2663 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
3156 __ cvt_d_w(double_scratch, double_scratch); | 2664 __ Ret(); |
3157 } else { | |
3158 // The result must be interpreted as an unsigned 32-bit integer. | |
3159 __ mtc1(a2, double_scratch); | |
3160 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); | |
3161 } | |
3162 | |
3163 // Store the result. | |
3164 __ mov(v0, heap_number_result); | |
3165 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
3166 __ Ret(); | |
3167 } else { | |
3168 // Tail call that writes the int32 in a2 to the heap number in v0, using | |
3169 // a3 and a0 as scratch. v0 is preserved and returned. | |
3170 __ mov(v0, t1); | |
3171 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); | |
3172 __ TailCallStub(&stub); | |
3173 } | |
3174 | 2665 |
3175 break; | 2666 break; |
3176 } | 2667 } |
3177 | 2668 |
3178 default: | 2669 default: |
3179 UNREACHABLE(); | 2670 UNREACHABLE(); |
3180 } | 2671 } |
3181 | 2672 |
3182 // We never expect DIV to yield an integer result, so we always generate | 2673 // We never expect DIV to yield an integer result, so we always generate |
3183 // type transition code for DIV operations expecting an integer result: the | 2674 // type transition code for DIV operations expecting an integer result: the |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3344 | 2835 |
3345 Label input_not_smi; | 2836 Label input_not_smi; |
3346 Label loaded; | 2837 Label loaded; |
3347 Label calculate; | 2838 Label calculate; |
3348 Label invalid_cache; | 2839 Label invalid_cache; |
3349 const Register scratch0 = t5; | 2840 const Register scratch0 = t5; |
3350 const Register scratch1 = t3; | 2841 const Register scratch1 = t3; |
3351 const Register cache_entry = a0; | 2842 const Register cache_entry = a0; |
3352 const bool tagged = (argument_type_ == TAGGED); | 2843 const bool tagged = (argument_type_ == TAGGED); |
3353 | 2844 |
3354 if (CpuFeatures::IsSupported(FPU)) { | 2845 if (tagged) { |
3355 CpuFeatureScope scope(masm, FPU); | 2846 // Argument is a number and is on stack and in a0. |
| 2847 // Load argument and check if it is a smi. |
| 2848 __ JumpIfNotSmi(a0, &input_not_smi); |
3356 | 2849 |
3357 if (tagged) { | 2850 // Input is a smi. Convert to double and load the low and high words |
3358 // Argument is a number and is on stack and in a0. | 2851 // of the double into a2, a3. |
3359 // Load argument and check if it is a smi. | 2852 __ sra(t0, a0, kSmiTagSize); |
3360 __ JumpIfNotSmi(a0, &input_not_smi); | 2853 __ mtc1(t0, f4); |
| 2854 __ cvt_d_w(f4, f4); |
| 2855 __ Move(a2, a3, f4); |
| 2856 __ Branch(&loaded); |
3361 | 2857 |
3362 // Input is a smi. Convert to double and load the low and high words | 2858 __ bind(&input_not_smi); |
3363 // of the double into a2, a3. | 2859 // Check if input is a HeapNumber. |
3364 __ sra(t0, a0, kSmiTagSize); | 2860 __ CheckMap(a0, |
3365 __ mtc1(t0, f4); | 2861 a1, |
3366 __ cvt_d_w(f4, f4); | 2862 Heap::kHeapNumberMapRootIndex, |
3367 __ Move(a2, a3, f4); | 2863 &calculate, |
3368 __ Branch(&loaded); | 2864 DONT_DO_SMI_CHECK); |
| 2865 // Input is a HeapNumber. Store the |
| 2866 // low and high words into a2, a3. |
| 2867 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); |
| 2868 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); |
| 2869 } else { |
| 2870 // Input is untagged double in f4. Output goes to f4. |
| 2871 __ Move(a2, a3, f4); |
| 2872 } |
| 2873 __ bind(&loaded); |
| 2874 // a2 = low 32 bits of double value. |
| 2875 // a3 = high 32 bits of double value. |
| 2876 // Compute hash (the shifts are arithmetic): |
| 2877 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| 2878 __ Xor(a1, a2, a3); |
| 2879 __ sra(t0, a1, 16); |
| 2880 __ Xor(a1, a1, t0); |
| 2881 __ sra(t0, a1, 8); |
| 2882 __ Xor(a1, a1, t0); |
| 2883 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
| 2884 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
3369 | 2885 |
3370 __ bind(&input_not_smi); | 2886 // a2 = low 32 bits of double value. |
3371 // Check if input is a HeapNumber. | 2887 // a3 = high 32 bits of double value. |
3372 __ CheckMap(a0, | 2888 // a1 = TranscendentalCache::hash(double value). |
3373 a1, | 2889 __ li(cache_entry, Operand( |
3374 Heap::kHeapNumberMapRootIndex, | 2890 ExternalReference::transcendental_cache_array_address( |
3375 &calculate, | 2891 masm->isolate()))); |
3376 DONT_DO_SMI_CHECK); | 2892 // a0 points to cache array. |
3377 // Input is a HeapNumber. Store the | 2893 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof( |
3378 // low and high words into a2, a3. | 2894 Isolate::Current()->transcendental_cache()->caches_[0]))); |
3379 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); | 2895 // a0 points to the cache for the type type_. |
3380 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); | 2896 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
3381 } else { | 2897 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg)); |
3382 // Input is untagged double in f4. Output goes to f4. | |
3383 __ Move(a2, a3, f4); | |
3384 } | |
3385 __ bind(&loaded); | |
3386 // a2 = low 32 bits of double value. | |
3387 // a3 = high 32 bits of double value. | |
3388 // Compute hash (the shifts are arithmetic): | |
3389 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
3390 __ Xor(a1, a2, a3); | |
3391 __ sra(t0, a1, 16); | |
3392 __ Xor(a1, a1, t0); | |
3393 __ sra(t0, a1, 8); | |
3394 __ Xor(a1, a1, t0); | |
3395 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | |
3396 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | |
3397 | |
3398 // a2 = low 32 bits of double value. | |
3399 // a3 = high 32 bits of double value. | |
3400 // a1 = TranscendentalCache::hash(double value). | |
3401 __ li(cache_entry, Operand( | |
3402 ExternalReference::transcendental_cache_array_address( | |
3403 masm->isolate()))); | |
3404 // a0 points to cache array. | |
3405 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof( | |
3406 Isolate::Current()->transcendental_cache()->caches_[0]))); | |
3407 // a0 points to the cache for the type type_. | |
3408 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
3409 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg)); | |
3410 | 2898 |
3411 #ifdef DEBUG | 2899 #ifdef DEBUG |
3412 // Check that the layout of cache elements match expectations. | 2900 // Check that the layout of cache elements match expectations. |
3413 { TranscendentalCache::SubCache::Element test_elem[2]; | 2901 { TranscendentalCache::SubCache::Element test_elem[2]; |
3414 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | 2902 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
3415 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | 2903 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
3416 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | 2904 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
3417 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 2905 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
3418 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 2906 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
3419 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | 2907 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
3420 CHECK_EQ(0, elem_in0 - elem_start); | 2908 CHECK_EQ(0, elem_in0 - elem_start); |
3421 CHECK_EQ(kIntSize, elem_in1 - elem_start); | 2909 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
3422 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 2910 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
3423 } | 2911 } |
3424 #endif | 2912 #endif |
3425 | 2913 |
3426 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. | 2914 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. |
3427 __ sll(t0, a1, 1); | 2915 __ sll(t0, a1, 1); |
3428 __ Addu(a1, a1, t0); | 2916 __ Addu(a1, a1, t0); |
3429 __ sll(t0, a1, 2); | 2917 __ sll(t0, a1, 2); |
3430 __ Addu(cache_entry, cache_entry, t0); | 2918 __ Addu(cache_entry, cache_entry, t0); |
3431 | 2919 |
3432 // Check if cache matches: Double value is stored in uint32_t[2] array. | 2920 // Check if cache matches: Double value is stored in uint32_t[2] array. |
3433 __ lw(t0, MemOperand(cache_entry, 0)); | 2921 __ lw(t0, MemOperand(cache_entry, 0)); |
3434 __ lw(t1, MemOperand(cache_entry, 4)); | 2922 __ lw(t1, MemOperand(cache_entry, 4)); |
3435 __ lw(t2, MemOperand(cache_entry, 8)); | 2923 __ lw(t2, MemOperand(cache_entry, 8)); |
3436 __ Branch(&calculate, ne, a2, Operand(t0)); | 2924 __ Branch(&calculate, ne, a2, Operand(t0)); |
3437 __ Branch(&calculate, ne, a3, Operand(t1)); | 2925 __ Branch(&calculate, ne, a3, Operand(t1)); |
3438 // Cache hit. Load result, cleanup and return. | 2926 // Cache hit. Load result, cleanup and return. |
3439 Counters* counters = masm->isolate()->counters(); | 2927 Counters* counters = masm->isolate()->counters(); |
3440 __ IncrementCounter( | 2928 __ IncrementCounter( |
3441 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 2929 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
3442 if (tagged) { | 2930 if (tagged) { |
3443 // Pop input value from stack and load result into v0. | 2931 // Pop input value from stack and load result into v0. |
3444 __ Drop(1); | 2932 __ Drop(1); |
3445 __ mov(v0, t2); | 2933 __ mov(v0, t2); |
3446 } else { | 2934 } else { |
3447 // Load result into f4. | 2935 // Load result into f4. |
3448 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); | 2936 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
3449 } | 2937 } |
3450 __ Ret(); | 2938 __ Ret(); |
3451 } // if (CpuFeatures::IsSupported(FPU)) | |
3452 | 2939 |
3453 __ bind(&calculate); | 2940 __ bind(&calculate); |
3454 Counters* counters = masm->isolate()->counters(); | |
3455 __ IncrementCounter( | 2941 __ IncrementCounter( |
3456 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | 2942 counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
3457 if (tagged) { | 2943 if (tagged) { |
3458 __ bind(&invalid_cache); | 2944 __ bind(&invalid_cache); |
3459 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), | 2945 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), |
3460 masm->isolate()), | 2946 masm->isolate()), |
3461 1, | 2947 1, |
3462 1); | 2948 1); |
3463 } else { | 2949 } else { |
3464 ASSERT(CpuFeatures::IsSupported(FPU)); | |
3465 CpuFeatureScope scope(masm, FPU); | |
3466 | |
3467 Label no_update; | 2950 Label no_update; |
3468 Label skip_cache; | 2951 Label skip_cache; |
3469 | 2952 |
3470 // Call C function to calculate the result and update the cache. | 2953 // Call C function to calculate the result and update the cache. |
3471 // a0: precalculated cache entry address. | 2954 // a0: precalculated cache entry address. |
3472 // a2 and a3: parts of the double value. | 2955 // a2 and a3: parts of the double value. |
3473 // Store a0, a2 and a3 on stack for later before calling C function. | 2956 // Store a0, a2 and a3 on stack for later before calling C function. |
3474 __ Push(a3, a2, cache_entry); | 2957 __ Push(a3, a2, cache_entry); |
3475 GenerateCallCFunction(masm, scratch0); | 2958 GenerateCallCFunction(masm, scratch0); |
3476 __ GetCFunctionDoubleResult(f4); | 2959 __ GetCFunctionDoubleResult(f4); |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3583 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 3066 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
3584 } | 3067 } |
3585 | 3068 |
3586 | 3069 |
3587 void InterruptStub::Generate(MacroAssembler* masm) { | 3070 void InterruptStub::Generate(MacroAssembler* masm) { |
3588 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); | 3071 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
3589 } | 3072 } |
3590 | 3073 |
3591 | 3074 |
3592 void MathPowStub::Generate(MacroAssembler* masm) { | 3075 void MathPowStub::Generate(MacroAssembler* masm) { |
3593 CpuFeatureScope fpu_scope(masm, FPU); | |
3594 const Register base = a1; | 3076 const Register base = a1; |
3595 const Register exponent = a2; | 3077 const Register exponent = a2; |
3596 const Register heapnumbermap = t1; | 3078 const Register heapnumbermap = t1; |
3597 const Register heapnumber = v0; | 3079 const Register heapnumber = v0; |
3598 const DoubleRegister double_base = f2; | 3080 const DoubleRegister double_base = f2; |
3599 const DoubleRegister double_exponent = f4; | 3081 const DoubleRegister double_exponent = f4; |
3600 const DoubleRegister double_result = f0; | 3082 const DoubleRegister double_result = f0; |
3601 const DoubleRegister double_scratch = f6; | 3083 const DoubleRegister double_scratch = f6; |
3602 const FPURegister single_scratch = f8; | 3084 const FPURegister single_scratch = f8; |
3603 const Register scratch = t5; | 3085 const Register scratch = t5; |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3819 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 3301 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
3820 CEntryStub::GenerateAheadOfTime(isolate); | 3302 CEntryStub::GenerateAheadOfTime(isolate); |
3821 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3303 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3822 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3304 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3823 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 3305 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
3824 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3306 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3825 } | 3307 } |
3826 | 3308 |
3827 | 3309 |
3828 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 3310 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
3829 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) | 3311 SaveFPRegsMode mode = kSaveFPRegs; |
3830 ? kSaveFPRegs | |
3831 : kDontSaveFPRegs; | |
3832 CEntryStub save_doubles(1, mode); | 3312 CEntryStub save_doubles(1, mode); |
3833 StoreBufferOverflowStub stub(mode); | 3313 StoreBufferOverflowStub stub(mode); |
3834 // These stubs might already be in the snapshot, detect that and don't | 3314 // These stubs might already be in the snapshot, detect that and don't |
3835 // regenerate, which would lead to code stub initialization state being messed | 3315 // regenerate, which would lead to code stub initialization state being messed |
3836 // up. | 3316 // up. |
3837 Code* save_doubles_code; | 3317 Code* save_doubles_code; |
3838 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { | 3318 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
3839 save_doubles_code = *save_doubles.GetCode(isolate); | 3319 save_doubles_code = *save_doubles.GetCode(isolate); |
3840 } | 3320 } |
3841 Code* store_buffer_overflow_code; | 3321 Code* store_buffer_overflow_code; |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4092 // a2: receiver | 3572 // a2: receiver |
4093 // a3: argc | 3573 // a3: argc |
4094 // | 3574 // |
4095 // Stack: | 3575 // Stack: |
4096 // 4 args slots | 3576 // 4 args slots |
4097 // args | 3577 // args |
4098 | 3578 |
4099 // Save callee saved registers on the stack. | 3579 // Save callee saved registers on the stack. |
4100 __ MultiPush(kCalleeSaved | ra.bit()); | 3580 __ MultiPush(kCalleeSaved | ra.bit()); |
4101 | 3581 |
4102 if (CpuFeatures::IsSupported(FPU)) { | 3582 // Save callee-saved FPU registers. |
4103 CpuFeatureScope scope(masm, FPU); | 3583 __ MultiPushFPU(kCalleeSavedFPU); |
4104 // Save callee-saved FPU registers. | 3584 // Set up the reserved register for 0.0. |
4105 __ MultiPushFPU(kCalleeSavedFPU); | 3585 __ Move(kDoubleRegZero, 0.0); |
4106 // Set up the reserved register for 0.0. | |
4107 __ Move(kDoubleRegZero, 0.0); | |
4108 } | |
4109 | 3586 |
4110 | 3587 |
4111 // Load argv in s0 register. | 3588 // Load argv in s0 register. |
4112 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 3589 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
4113 if (CpuFeatures::IsSupported(FPU)) { | 3590 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; |
4114 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; | |
4115 } | |
4116 | 3591 |
4117 __ InitializeRootRegister(); | 3592 __ InitializeRootRegister(); |
4118 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); | 3593 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); |
4119 | 3594 |
4120 // We build an EntryFrame. | 3595 // We build an EntryFrame. |
4121 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 3596 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
4122 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 3597 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
4123 __ li(t2, Operand(Smi::FromInt(marker))); | 3598 __ li(t2, Operand(Smi::FromInt(marker))); |
4124 __ li(t1, Operand(Smi::FromInt(marker))); | 3599 __ li(t1, Operand(Smi::FromInt(marker))); |
4125 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 3600 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4241 | 3716 |
4242 // Restore the top frame descriptors from the stack. | 3717 // Restore the top frame descriptors from the stack. |
4243 __ pop(t1); | 3718 __ pop(t1); |
4244 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 3719 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
4245 isolate))); | 3720 isolate))); |
4246 __ sw(t1, MemOperand(t0)); | 3721 __ sw(t1, MemOperand(t0)); |
4247 | 3722 |
4248 // Reset the stack to the callee saved registers. | 3723 // Reset the stack to the callee saved registers. |
4249 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); | 3724 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
4250 | 3725 |
4251 if (CpuFeatures::IsSupported(FPU)) { | 3726 // Restore callee-saved fpu registers. |
4252 CpuFeatureScope scope(masm, FPU); | 3727 __ MultiPopFPU(kCalleeSavedFPU); |
4253 // Restore callee-saved fpu registers. | |
4254 __ MultiPopFPU(kCalleeSavedFPU); | |
4255 } | |
4256 | 3728 |
4257 // Restore callee saved registers from the stack. | 3729 // Restore callee saved registers from the stack. |
4258 __ MultiPop(kCalleeSaved | ra.bit()); | 3730 __ MultiPop(kCalleeSaved | ra.bit()); |
4259 // Return. | 3731 // Return. |
4260 __ Jump(ra); | 3732 __ Jump(ra); |
4261 } | 3733 } |
4262 | 3734 |
4263 | 3735 |
4264 // Uses registers a0 to t0. | 3736 // Uses registers a0 to t0. |
4265 // Expected input (depending on whether args are in registers or on the stack): | 3737 // Expected input (depending on whether args are in registers or on the stack): |
(...skipping 2718 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6984 Label miss; | 6456 Label miss; |
6985 | 6457 |
6986 if (left_ == CompareIC::SMI) { | 6458 if (left_ == CompareIC::SMI) { |
6987 __ JumpIfNotSmi(a1, &miss); | 6459 __ JumpIfNotSmi(a1, &miss); |
6988 } | 6460 } |
6989 if (right_ == CompareIC::SMI) { | 6461 if (right_ == CompareIC::SMI) { |
6990 __ JumpIfNotSmi(a0, &miss); | 6462 __ JumpIfNotSmi(a0, &miss); |
6991 } | 6463 } |
6992 | 6464 |
6993 // Inlining the double comparison and falling back to the general compare | 6465 // Inlining the double comparison and falling back to the general compare |
6994 // stub if NaN is involved or FPU is unsupported. | 6466 // stub if NaN is involved. |
6995 if (CpuFeatures::IsSupported(FPU)) { | 6467 // Load left and right operand. |
6996 CpuFeatureScope scope(masm, FPU); | 6468 Label done, left, left_smi, right_smi; |
| 6469 __ JumpIfSmi(a0, &right_smi); |
| 6470 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 6471 DONT_DO_SMI_CHECK); |
| 6472 __ Subu(a2, a0, Operand(kHeapObjectTag)); |
| 6473 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
| 6474 __ Branch(&left); |
| 6475 __ bind(&right_smi); |
| 6476 __ SmiUntag(a2, a0); // Can't clobber a0 yet. |
| 6477 FPURegister single_scratch = f6; |
| 6478 __ mtc1(a2, single_scratch); |
| 6479 __ cvt_d_w(f2, single_scratch); |
6997 | 6480 |
6998 // Load left and right operand. | 6481 __ bind(&left); |
6999 Label done, left, left_smi, right_smi; | 6482 __ JumpIfSmi(a1, &left_smi); |
7000 __ JumpIfSmi(a0, &right_smi); | 6483 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
7001 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 6484 DONT_DO_SMI_CHECK); |
7002 DONT_DO_SMI_CHECK); | 6485 __ Subu(a2, a1, Operand(kHeapObjectTag)); |
7003 __ Subu(a2, a0, Operand(kHeapObjectTag)); | 6486 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
7004 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 6487 __ Branch(&done); |
7005 __ Branch(&left); | 6488 __ bind(&left_smi); |
7006 __ bind(&right_smi); | 6489 __ SmiUntag(a2, a1); // Can't clobber a1 yet. |
7007 __ SmiUntag(a2, a0); // Can't clobber a0 yet. | 6490 single_scratch = f8; |
7008 FPURegister single_scratch = f6; | 6491 __ mtc1(a2, single_scratch); |
7009 __ mtc1(a2, single_scratch); | 6492 __ cvt_d_w(f0, single_scratch); |
7010 __ cvt_d_w(f2, single_scratch); | |
7011 | 6493 |
7012 __ bind(&left); | 6494 __ bind(&done); |
7013 __ JumpIfSmi(a1, &left_smi); | |
7014 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | |
7015 DONT_DO_SMI_CHECK); | |
7016 __ Subu(a2, a1, Operand(kHeapObjectTag)); | |
7017 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); | |
7018 __ Branch(&done); | |
7019 __ bind(&left_smi); | |
7020 __ SmiUntag(a2, a1); // Can't clobber a1 yet. | |
7021 single_scratch = f8; | |
7022 __ mtc1(a2, single_scratch); | |
7023 __ cvt_d_w(f0, single_scratch); | |
7024 | 6495 |
7025 __ bind(&done); | 6496 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
| 6497 Label fpu_eq, fpu_lt; |
| 6498 // Test if equal, and also handle the unordered/NaN case. |
| 6499 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
7026 | 6500 |
7027 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 6501 // Test if less (unordered case is already handled). |
7028 Label fpu_eq, fpu_lt; | 6502 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
7029 // Test if equal, and also handle the unordered/NaN case. | |
7030 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); | |
7031 | 6503 |
7032 // Test if less (unordered case is already handled). | 6504 // Otherwise it's greater, so just fall thru, and return. |
7033 __ BranchF(&fpu_lt, NULL, lt, f0, f2); | 6505 __ li(v0, Operand(GREATER)); |
| 6506 __ Ret(); |
7034 | 6507 |
7035 // Otherwise it's greater, so just fall thru, and return. | 6508 __ bind(&fpu_eq); |
7036 __ li(v0, Operand(GREATER)); | 6509 __ li(v0, Operand(EQUAL)); |
7037 __ Ret(); | 6510 __ Ret(); |
7038 | 6511 |
7039 __ bind(&fpu_eq); | 6512 __ bind(&fpu_lt); |
7040 __ li(v0, Operand(EQUAL)); | 6513 __ li(v0, Operand(LESS)); |
7041 __ Ret(); | 6514 __ Ret(); |
7042 | |
7043 __ bind(&fpu_lt); | |
7044 __ li(v0, Operand(LESS)); | |
7045 __ Ret(); | |
7046 } | |
7047 | 6515 |
7048 __ bind(&unordered); | 6516 __ bind(&unordered); |
7049 __ bind(&generic_stub); | 6517 __ bind(&generic_stub); |
7050 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | 6518 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
7051 CompareIC::GENERIC); | 6519 CompareIC::GENERIC); |
7052 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6520 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
7053 | 6521 |
7054 __ bind(&maybe_undefined1); | 6522 __ bind(&maybe_undefined1); |
7055 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6523 if (Token::IsOrderedRelationalCompareOp(op_)) { |
7056 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 6524 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
(...skipping 642 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7699 entry->value, | 7167 entry->value, |
7700 entry->address, | 7168 entry->address, |
7701 entry->action, | 7169 entry->action, |
7702 kDontSaveFPRegs); | 7170 kDontSaveFPRegs); |
7703 stub.GetCode(isolate)->set_is_pregenerated(true); | 7171 stub.GetCode(isolate)->set_is_pregenerated(true); |
7704 } | 7172 } |
7705 } | 7173 } |
7706 | 7174 |
7707 | 7175 |
7708 bool CodeStub::CanUseFPRegisters() { | 7176 bool CodeStub::CanUseFPRegisters() { |
7709 return CpuFeatures::IsSupported(FPU); | 7177 return true; // FPU is a base requirement for V8. |
7710 } | 7178 } |
7711 | 7179 |
7712 | 7180 |
7713 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 7181 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
7714 // the value has just been written into the object, now this stub makes sure | 7182 // the value has just been written into the object, now this stub makes sure |
7715 // we keep the GC informed. The word in the object where the value has been | 7183 // we keep the GC informed. The word in the object where the value has been |
7716 // written is in the address register. | 7184 // written is in the address register. |
7717 void RecordWriteStub::Generate(MacroAssembler* masm) { | 7185 void RecordWriteStub::Generate(MacroAssembler* masm) { |
7718 Label skip_to_incremental_noncompacting; | 7186 Label skip_to_incremental_noncompacting; |
7719 Label skip_to_incremental_compacting; | 7187 Label skip_to_incremental_compacting; |
(...skipping 327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8047 __ Pop(ra, t1, a1); | 7515 __ Pop(ra, t1, a1); |
8048 __ Ret(); | 7516 __ Ret(); |
8049 } | 7517 } |
8050 | 7518 |
8051 | 7519 |
8052 #undef __ | 7520 #undef __ |
8053 | 7521 |
8054 } } // namespace v8::internal | 7522 } } // namespace v8::internal |
8055 | 7523 |
8056 #endif // V8_TARGET_ARCH_MIPS | 7524 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |