OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
596 // One of first or second must be non-Smi when entering. | 596 // One of first or second must be non-Smi when entering. |
597 static void NumbersToSmis(MacroAssembler* masm, | 597 static void NumbersToSmis(MacroAssembler* masm, |
598 Register first, | 598 Register first, |
599 Register second, | 599 Register second, |
600 Register scratch1, | 600 Register scratch1, |
601 Register scratch2, | 601 Register scratch2, |
602 Register scratch3, | 602 Register scratch3, |
603 Label* on_success, | 603 Label* on_success, |
604 Label* on_not_smis, | 604 Label* on_not_smis, |
605 ConvertUndefined convert_undefined); | 605 ConvertUndefined convert_undefined); |
| 606 |
| 607 // Checks that |operand| has an int32 value. If |int32_result| is different |
| 608 // from |scratch|, it will contain that int32 value. |
| 609 static void CheckSSE2OperandIsInt32(MacroAssembler* masm, |
| 610 Label* non_int32, |
| 611 XMMRegister operand, |
| 612 Register int32_result, |
| 613 Register scratch, |
| 614 XMMRegister xmm_scratch); |
606 }; | 615 }; |
607 | 616 |
608 | 617 |
609 void DoubleToIStub::Generate(MacroAssembler* masm) { | 618 void DoubleToIStub::Generate(MacroAssembler* masm) { |
610 Register input_reg = this->source(); | 619 Register input_reg = this->source(); |
611 Register final_result_reg = this->destination(); | 620 Register final_result_reg = this->destination(); |
612 ASSERT(is_truncating()); | 621 ASSERT(is_truncating()); |
613 | 622 |
614 Label check_negative, process_64_bits, done; | 623 Label check_negative, process_64_bits, done; |
615 | 624 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
714 | 723 |
715 static void BinaryOpStub_GenerateSmiCode( | 724 static void BinaryOpStub_GenerateSmiCode( |
716 MacroAssembler* masm, | 725 MacroAssembler* masm, |
717 Label* slow, | 726 Label* slow, |
718 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 727 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
719 Token::Value op) { | 728 Token::Value op) { |
720 | 729 |
721 // Arguments to BinaryOpStub are in rdx and rax. | 730 // Arguments to BinaryOpStub are in rdx and rax. |
722 const Register left = rdx; | 731 const Register left = rdx; |
723 const Register right = rax; | 732 const Register right = rax; |
| 733 const Register shift_op_result = (SmiValuesAre32Bits() || op == Token::SAR) ? |
| 734 left : r9; |
724 | 735 |
725 // We only generate heapnumber answers for overflowing calculations | 736 // We only generate heapnumber answers for overflowing calculations. |
726 // for the four basic arithmetic operations and logical right shift by 0. | |
727 bool generate_inline_heapnumber_results = | 737 bool generate_inline_heapnumber_results = |
728 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && | 738 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
729 (op == Token::ADD || op == Token::SUB || | 739 MacroAssembler::IsUnsafeSmiOperator(op); |
730 op == Token::MUL || op == Token::DIV || op == Token::SHR); | |
731 | 740 |
732 // Smi check of both operands. If op is BIT_OR, the check is delayed | 741 // Smi check of both operands. If op is BIT_OR, the check is delayed |
733 // until after the OR operation. | 742 // until after the OR operation. |
734 Label not_smis; | 743 Label not_smis; |
735 Label use_fp_on_smis; | 744 Label use_fp_on_smis; |
736 Label fail; | 745 Label fail; |
737 | 746 |
738 if (op != Token::BIT_OR) { | 747 if (op != Token::BIT_OR) { |
739 Comment smi_check_comment(masm, "-- Smi check arguments"); | 748 Comment smi_check_comment(masm, "-- Smi check arguments"); |
740 __ JumpIfNotBothSmi(left, right, ¬_smis); | 749 __ JumpIfNotBothSmi(left, right, ¬_smis); |
741 } | 750 } |
742 | 751 |
743 Label smi_values; | 752 Label smi_values; |
744 __ bind(&smi_values); | 753 __ bind(&smi_values); |
745 // Perform the operation. | 754 // Perform the operation. |
746 Comment perform_smi(masm, "-- Perform smi operation"); | 755 Comment perform_smi(masm, "-- Perform smi operation"); |
| 756 MacroAssembler::StrictSmiInstructionWrapper wrapper(masm, &use_fp_on_smis); |
747 switch (op) { | 757 switch (op) { |
748 case Token::ADD: | 758 case Token::ADD: |
749 ASSERT(right.is(rax)); | 759 ASSERT(right.is(rax)); |
750 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. | 760 __ SmiAdd(right, right, left, wrapper); // ADD is commutative. |
751 break; | 761 break; |
752 | 762 |
753 case Token::SUB: | 763 case Token::SUB: |
754 __ SmiSub(left, left, right, &use_fp_on_smis); | 764 __ SmiSub(left, left, right, wrapper); |
755 __ movq(rax, left); | 765 __ movq(rax, left); |
756 break; | 766 break; |
757 | 767 |
758 case Token::MUL: | 768 case Token::MUL: |
759 ASSERT(right.is(rax)); | 769 ASSERT(right.is(rax)); |
760 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. | 770 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
761 break; | 771 break; |
762 | 772 |
763 case Token::DIV: | 773 case Token::DIV: |
764 // SmiDiv will not accept left in rdx or right in rax. | 774 // SmiDiv will not accept left in rdx or right in rax. |
(...skipping 18 matching lines...) Expand all Loading... |
783 ASSERT(right.is(rax)); | 793 ASSERT(right.is(rax)); |
784 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 794 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
785 break; | 795 break; |
786 | 796 |
787 case Token::BIT_AND: | 797 case Token::BIT_AND: |
788 ASSERT(right.is(rax)); | 798 ASSERT(right.is(rax)); |
789 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 799 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
790 break; | 800 break; |
791 | 801 |
792 case Token::SHL: | 802 case Token::SHL: |
793 __ SmiShiftLeft(left, left, right); | 803 __ SmiShiftLeft(shift_op_result, left, right, &use_fp_on_smis); |
794 __ movq(rax, left); | 804 __ movq(rax, shift_op_result); |
795 break; | 805 break; |
796 | 806 |
797 case Token::SAR: | 807 case Token::SAR: |
798 __ SmiShiftArithmeticRight(left, left, right); | 808 __ SmiShiftArithmeticRight(shift_op_result, left, right); |
799 __ movq(rax, left); | 809 __ movq(rax, shift_op_result); |
800 break; | 810 break; |
801 | 811 |
802 case Token::SHR: | 812 case Token::SHR: |
803 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); | 813 __ SmiShiftLogicalRight(shift_op_result, left, right, &use_fp_on_smis); |
804 __ movq(rax, left); | 814 __ movq(rax, shift_op_result); |
805 break; | 815 break; |
806 | 816 |
807 default: | 817 default: |
808 UNREACHABLE(); | 818 UNREACHABLE(); |
809 } | 819 } |
810 | 820 |
811 // 5. Emit return of result in rax. Some operations have registers pushed. | 821 // 5. Emit return of result in rax. Some operations have registers pushed. |
812 __ ret(0); | 822 __ ret(0); |
813 | 823 |
814 if (use_fp_on_smis.is_linked()) { | 824 if (use_fp_on_smis.is_linked()) { |
815 // 6. For some operations emit inline code to perform floating point | 825 // 6. For some operations emit inline code to perform floating point |
816 // operations on known smis (e.g., if the result of the operation | 826 // operations on known smis (e.g., if the result of the operation |
817 // overflowed the smi range). | 827 // overflowed the smi range). |
818 __ bind(&use_fp_on_smis); | 828 __ bind(&use_fp_on_smis); |
819 if (op == Token::DIV || op == Token::MOD) { | 829 if (op == Token::DIV || op == Token::MOD) { |
820 // Restore left and right to rdx and rax. | 830 // Restore left and right to rdx and rax. |
821 __ movq(rdx, rcx); | 831 __ movq(rdx, rcx); |
822 __ movq(rax, rbx); | 832 __ movq(rax, rbx); |
823 } | 833 } |
824 | 834 |
825 if (generate_inline_heapnumber_results) { | 835 if (generate_inline_heapnumber_results) { |
826 __ AllocateHeapNumber(rcx, rbx, slow); | 836 __ AllocateHeapNumber(rcx, rbx, slow); |
827 Comment perform_float(masm, "-- Perform float operation on smis"); | 837 Comment perform_float(masm, "-- Perform float operation on smis"); |
828 if (op == Token::SHR) { | 838 if (op == Token::SHR) { |
829 __ SmiToInteger32(left, left); | 839 if (SmiValuesAre32Bits()) { |
830 __ cvtqsi2sd(xmm0, left); | 840 __ SmiToInteger32(shift_op_result, shift_op_result); |
| 841 } |
| 842 __ cvtqsi2sd(xmm0, shift_op_result); |
| 843 } else if (op == Token::SHL) { |
| 844 ASSERT(SmiValuesAre31Bits()); |
| 845 __ cvtlsi2sd(xmm0, shift_op_result); |
831 } else { | 846 } else { |
832 FloatingPointHelper::LoadSSE2SmiOperands(masm); | 847 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
833 switch (op) { | 848 switch (op) { |
834 case Token::ADD: __ addsd(xmm0, xmm1); break; | 849 case Token::ADD: __ addsd(xmm0, xmm1); break; |
835 case Token::SUB: __ subsd(xmm0, xmm1); break; | 850 case Token::SUB: __ subsd(xmm0, xmm1); break; |
836 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 851 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
837 case Token::DIV: __ divsd(xmm0, xmm1); break; | 852 case Token::DIV: __ divsd(xmm0, xmm1); break; |
838 default: UNREACHABLE(); | 853 default: UNREACHABLE(); |
839 } | 854 } |
840 } | 855 } |
841 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | 856 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
842 __ movq(rax, rcx); | 857 __ movq(rax, rcx); |
843 __ ret(0); | 858 __ ret(0); |
844 } else { | 859 } else { |
845 __ jmp(&fail); | 860 __ jmp(&fail); |
846 } | 861 } |
847 } | 862 } |
848 | 863 |
(...skipping 20 matching lines...) Expand all Loading... |
869 __ jmp(&smi_values); | 884 __ jmp(&smi_values); |
870 __ bind(&fail); | 885 __ bind(&fail); |
871 } | 886 } |
872 | 887 |
873 | 888 |
874 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 889 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
875 Label* alloc_failure, | 890 Label* alloc_failure, |
876 OverwriteMode mode); | 891 OverwriteMode mode); |
877 | 892 |
878 | 893 |
879 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, | 894 static void BinaryOpStub_GenerateFloatingPointCode( |
880 Label* allocation_failure, | 895 MacroAssembler* masm, |
881 Label* non_numeric_failure, | 896 Label* allocation_failure, |
882 Token::Value op, | 897 Label* non_numeric_failure, |
883 OverwriteMode mode) { | 898 Token::Value op, |
| 899 BinaryOpIC::TypeInfo result_type, |
| 900 Label* non_int32_failure, |
| 901 OverwriteMode mode) { |
884 switch (op) { | 902 switch (op) { |
885 case Token::ADD: | 903 case Token::ADD: |
886 case Token::SUB: | 904 case Token::SUB: |
887 case Token::MUL: | 905 case Token::MUL: |
888 case Token::DIV: { | 906 case Token::DIV: { |
889 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); | 907 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
890 | 908 |
891 switch (op) { | 909 switch (op) { |
892 case Token::ADD: __ addsd(xmm0, xmm1); break; | 910 case Token::ADD: __ addsd(xmm0, xmm1); break; |
893 case Token::SUB: __ subsd(xmm0, xmm1); break; | 911 case Token::SUB: __ subsd(xmm0, xmm1); break; |
894 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 912 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
895 case Token::DIV: __ divsd(xmm0, xmm1); break; | 913 case Token::DIV: __ divsd(xmm0, xmm1); break; |
896 default: UNREACHABLE(); | 914 default: UNREACHABLE(); |
897 } | 915 } |
| 916 |
| 917 if (SmiValuesAre31Bits() && non_int32_failure != NULL) { |
| 918 if (result_type <= BinaryOpIC::INT32) { |
| 919 FloatingPointHelper::CheckSSE2OperandIsInt32( |
| 920 masm, non_int32_failure, xmm0, rcx, rcx, xmm2); |
| 921 } |
| 922 } |
| 923 |
898 BinaryOpStub_GenerateHeapResultAllocation( | 924 BinaryOpStub_GenerateHeapResultAllocation( |
899 masm, allocation_failure, mode); | 925 masm, allocation_failure, mode); |
900 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 926 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
901 __ ret(0); | 927 __ ret(0); |
902 break; | 928 break; |
903 } | 929 } |
904 case Token::MOD: { | 930 case Token::MOD: { |
905 // For MOD we jump to the allocation_failure label, to call runtime. | 931 // For MOD we jump to the allocation_failure label, to call runtime. |
906 __ jmp(allocation_failure); | 932 __ jmp(allocation_failure); |
907 break; | 933 break; |
908 } | 934 } |
909 case Token::BIT_OR: | 935 case Token::BIT_OR: |
910 case Token::BIT_AND: | 936 case Token::BIT_AND: |
911 case Token::BIT_XOR: | 937 case Token::BIT_XOR: |
912 case Token::SAR: | 938 case Token::SAR: |
913 case Token::SHL: | 939 case Token::SHL: |
914 case Token::SHR: { | 940 case Token::SHR: { |
915 Label non_smi_shr_result; | 941 Label non_smi_result; |
916 Register heap_number_map = r9; | 942 Register heap_number_map = r9; |
| 943 Register saved_right = r11; |
| 944 if (SmiValuesAre31Bits() || (SmiValuesAre32Bits() && op == Token::SHR)) { |
| 945 __ movq(saved_right, rax); |
| 946 } |
917 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 947 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
918 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, | 948 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
919 heap_number_map); | 949 heap_number_map); |
920 switch (op) { | 950 switch (op) { |
921 case Token::BIT_OR: __ orl(rax, rcx); break; | 951 case Token::BIT_OR: __ orl(rax, rcx); break; |
922 case Token::BIT_AND: __ andl(rax, rcx); break; | 952 case Token::BIT_AND: __ andl(rax, rcx); break; |
923 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 953 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
924 case Token::SAR: __ sarl_cl(rax); break; | 954 case Token::SAR: __ sarl_cl(rax); break; |
925 case Token::SHL: __ shll_cl(rax); break; | 955 case Token::SHL: __ shll_cl(rax); break; |
926 case Token::SHR: { | 956 case Token::SHR: __ shrl_cl(rax); break; |
927 __ shrl_cl(rax); | |
928 // Check if result is negative. This can only happen for a shift | |
929 // by zero. | |
930 __ testl(rax, rax); | |
931 __ j(negative, &non_smi_shr_result); | |
932 break; | |
933 } | |
934 default: UNREACHABLE(); | 957 default: UNREACHABLE(); |
935 } | 958 } |
936 STATIC_ASSERT(kSmiValueSize == 32); | 959 |
| 960 if (op == Token::SHR) { |
| 961 __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
| 962 } else { |
| 963 if (SmiValuesAre31Bits()) { |
| 964 __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
| 965 } |
| 966 } |
| 967 |
937 // Tag smi result and return. | 968 // Tag smi result and return. |
938 __ Integer32ToSmi(rax, rax); | 969 __ Integer32ToSmi(rax, rax); |
939 __ Ret(); | 970 __ Ret(); |
940 | 971 |
941 // Logical shift right can produce an unsigned int32 that is not | 972 if (SmiValuesAre31Bits() || (SmiValuesAre32Bits() && op == Token::SHR)) { |
942 // an int32, and so is not in the smi range. Allocate a heap number | 973 __ bind(&non_smi_result); |
943 // in that case. | 974 __ movl(rbx, rax); // rbx holds result value. |
944 if (op == Token::SHR) { | |
945 __ bind(&non_smi_shr_result); | |
946 Label allocation_failed; | 975 Label allocation_failed; |
947 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | 976 Label skip_allocation; |
948 // Allocate heap number in new space. | 977 // Allocate heap number in new space if we could not overwrite |
949 // Not using AllocateHeapNumber macro in order to reuse | 978 // the left or right operand. Not using AllocateHeapNumber macro |
950 // already loaded heap_number_map. | 979 // in order to reuse already loaded heap_number_map. |
951 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, | 980 switch (mode) { |
952 TAG_OBJECT); | 981 case OVERWRITE_LEFT: |
953 // Set the map. | 982 __ movq(rax, rdx); |
954 __ AssertRootValue(heap_number_map, | 983 __ JumpIfNotSmi(rax, &skip_allocation); |
955 Heap::kHeapNumberMapRootIndex, | 984 __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg, |
956 kHeapNumberMapRegisterClobbered); | 985 &allocation_failed, TAG_OBJECT); |
957 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | 986 // Set the map. |
958 heap_number_map); | 987 __ AssertRootValue(heap_number_map, |
959 __ cvtqsi2sd(xmm0, rbx); | 988 Heap::kHeapNumberMapRootIndex, |
| 989 kHeapNumberMapRegisterClobbered); |
| 990 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 991 heap_number_map); |
| 992 __ bind(&skip_allocation); |
| 993 break; |
| 994 case OVERWRITE_RIGHT: |
| 995 __ movq(rax, saved_right); |
| 996 __ JumpIfNotSmi(rax, &skip_allocation); |
| 997 // Fall through! |
| 998 case NO_OVERWRITE: |
| 999 __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg, |
| 1000 &allocation_failed, TAG_OBJECT); |
| 1001 // Set the map. |
| 1002 __ AssertRootValue(heap_number_map, |
| 1003 Heap::kHeapNumberMapRootIndex, |
| 1004 kHeapNumberMapRegisterClobbered); |
| 1005 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 1006 heap_number_map); |
| 1007 __ bind(&skip_allocation); |
| 1008 break; |
| 1009 default: UNREACHABLE(); |
| 1010 } |
| 1011 |
| 1012 if (op == Token::SHR) { |
| 1013 // Logical shift right can produce an unsigned int32 that is not |
| 1014 // an int32, or an int32 not in the smi range for 31 bits SMI value. |
| 1015 __ cvtqsi2sd(xmm0, rbx); |
| 1016 } else { |
| 1017 // All other operations returns a signed int32, so we |
| 1018 // use lsi2sd here to retain the sign bit. |
| 1019 ASSERT(SmiValuesAre31Bits()); |
| 1020 __ cvtlsi2sd(xmm0, rbx); |
| 1021 } |
960 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 1022 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
961 __ Ret(); | 1023 __ Ret(); |
962 | 1024 |
963 __ bind(&allocation_failed); | 1025 __ bind(&allocation_failed); |
964 // We need tagged values in rdx and rax for the following code, | 1026 // We need tagged values in rdx and rax for the following code, |
965 // not int32 in rax and rcx. | 1027 // rdx is un-changed and rax is saved at beginning. |
966 __ Integer32ToSmi(rax, rcx); | 1028 __ movq(rax, saved_right); |
967 __ Integer32ToSmi(rdx, rbx); | |
968 __ jmp(allocation_failure); | 1029 __ jmp(allocation_failure); |
969 } | 1030 } |
970 break; | 1031 break; |
971 } | 1032 } |
972 default: UNREACHABLE(); break; | 1033 default: UNREACHABLE(); break; |
973 } | 1034 } |
974 // No fall-through from this generated code. | 1035 // No fall-through from this generated code. |
975 if (FLAG_debug_code) { | 1036 if (FLAG_debug_code) { |
976 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode); | 1037 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode); |
977 } | 1038 } |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1054 FrameScope scope(masm, StackFrame::INTERNAL); | 1115 FrameScope scope(masm, StackFrame::INTERNAL); |
1055 GenerateRegisterArgsPush(masm); | 1116 GenerateRegisterArgsPush(masm); |
1056 GenerateCallRuntime(masm); | 1117 GenerateCallRuntime(masm); |
1057 } | 1118 } |
1058 __ Ret(); | 1119 __ Ret(); |
1059 } | 1120 } |
1060 } | 1121 } |
1061 | 1122 |
1062 | 1123 |
1063 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1124 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
1064 // The int32 case is identical to the Smi case. We avoid creating this | 1125 ASSERT(SmiValuesAre31Bits()); |
1065 // ic state on x64. | 1126 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
1066 UNREACHABLE(); | 1127 |
| 1128 Label gc_required, not_number, not_int32; |
| 1129 BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, op_, |
| 1130 result_type_, ¬_int32, mode_); |
| 1131 |
| 1132 __ bind(¬_number); |
| 1133 __ bind(¬_int32); |
| 1134 GenerateTypeTransition(masm); |
| 1135 |
| 1136 __ bind(&gc_required); |
| 1137 { |
| 1138 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1139 GenerateRegisterArgsPush(masm); |
| 1140 GenerateCallRuntime(masm); |
| 1141 } |
| 1142 __ Ret(); |
1067 } | 1143 } |
1068 | 1144 |
1069 | 1145 |
1070 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 1146 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
1071 Label call_runtime; | 1147 Label call_runtime; |
1072 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | 1148 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
1073 ASSERT(op_ == Token::ADD); | 1149 ASSERT(op_ == Token::ADD); |
1074 // If both arguments are strings, call the string add stub. | 1150 // If both arguments are strings, call the string add stub. |
1075 // Otherwise, do a transition. | 1151 // Otherwise, do a transition. |
1076 | 1152 |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1162 // or the right operand. For precise type feedback, patch the IC | 1238 // or the right operand. For precise type feedback, patch the IC |
1163 // again if this changes. | 1239 // again if this changes. |
1164 if (left_type_ == BinaryOpIC::SMI) { | 1240 if (left_type_ == BinaryOpIC::SMI) { |
1165 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); | 1241 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); |
1166 } | 1242 } |
1167 if (right_type_ == BinaryOpIC::SMI) { | 1243 if (right_type_ == BinaryOpIC::SMI) { |
1168 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); | 1244 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
1169 } | 1245 } |
1170 | 1246 |
1171 BinaryOpStub_GenerateFloatingPointCode( | 1247 BinaryOpStub_GenerateFloatingPointCode( |
1172 masm, &gc_required, ¬_number, op_, mode_); | 1248 masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
1173 | 1249 |
1174 __ bind(¬_number); | 1250 __ bind(¬_number); |
1175 GenerateTypeTransition(masm); | 1251 GenerateTypeTransition(masm); |
1176 | 1252 |
1177 __ bind(&gc_required); | 1253 __ bind(&gc_required); |
1178 { | 1254 { |
1179 FrameScope scope(masm, StackFrame::INTERNAL); | 1255 FrameScope scope(masm, StackFrame::INTERNAL); |
1180 GenerateRegisterArgsPush(masm); | 1256 GenerateRegisterArgsPush(masm); |
1181 GenerateCallRuntime(masm); | 1257 GenerateCallRuntime(masm); |
1182 } | 1258 } |
1183 __ Ret(); | 1259 __ Ret(); |
1184 } | 1260 } |
1185 | 1261 |
1186 | 1262 |
1187 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1263 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
1188 Label call_runtime, call_string_add_or_runtime; | 1264 Label call_runtime, call_string_add_or_runtime; |
1189 | 1265 |
1190 BinaryOpStub_GenerateSmiCode( | 1266 BinaryOpStub_GenerateSmiCode( |
1191 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); | 1267 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
1192 | 1268 |
1193 BinaryOpStub_GenerateFloatingPointCode( | 1269 BinaryOpStub_GenerateFloatingPointCode( |
1194 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); | 1270 masm, &call_runtime, &call_string_add_or_runtime, op_, |
| 1271 result_type_, NULL, mode_); |
1195 | 1272 |
1196 __ bind(&call_string_add_or_runtime); | 1273 __ bind(&call_string_add_or_runtime); |
1197 if (op_ == Token::ADD) { | 1274 if (op_ == Token::ADD) { |
1198 GenerateAddStrings(masm); | 1275 GenerateAddStrings(masm); |
1199 } | 1276 } |
1200 | 1277 |
1201 __ bind(&call_runtime); | 1278 __ bind(&call_runtime); |
1202 { | 1279 { |
1203 FrameScope scope(masm, StackFrame::INTERNAL); | 1280 FrameScope scope(masm, StackFrame::INTERNAL); |
1204 GenerateRegisterArgsPush(masm); | 1281 GenerateRegisterArgsPush(masm); |
(...skipping 530 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1735 // Convert HeapNumber to smi if possible. | 1812 // Convert HeapNumber to smi if possible. |
1736 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); | 1813 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); |
1737 __ movq(scratch2, xmm0); | 1814 __ movq(scratch2, xmm0); |
1738 __ cvttsd2siq(smi_result, xmm0); | 1815 __ cvttsd2siq(smi_result, xmm0); |
1739 // Check if conversion was successful by converting back and | 1816 // Check if conversion was successful by converting back and |
1740 // comparing to the original double's bits. | 1817 // comparing to the original double's bits. |
1741 __ cvtlsi2sd(xmm1, smi_result); | 1818 __ cvtlsi2sd(xmm1, smi_result); |
1742 __ movq(kScratchRegister, xmm1); | 1819 __ movq(kScratchRegister, xmm1); |
1743 __ cmpq(scratch2, kScratchRegister); | 1820 __ cmpq(scratch2, kScratchRegister); |
1744 __ j(not_equal, on_not_smis); | 1821 __ j(not_equal, on_not_smis); |
| 1822 __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
1745 __ Integer32ToSmi(first, smi_result); | 1823 __ Integer32ToSmi(first, smi_result); |
1746 | 1824 |
1747 __ bind(&first_done); | 1825 __ bind(&first_done); |
1748 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); | 1826 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); |
1749 __ bind(&first_smi); | 1827 __ bind(&first_smi); |
1750 __ AssertNotSmi(second); | 1828 __ AssertNotSmi(second); |
1751 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); | 1829 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); |
1752 __ j(not_equal, | 1830 __ j(not_equal, |
1753 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) | 1831 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
1754 ? &maybe_undefined_second | 1832 ? &maybe_undefined_second |
1755 : on_not_smis); | 1833 : on_not_smis); |
1756 // Convert second to smi, if possible. | 1834 // Convert second to smi, if possible. |
1757 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); | 1835 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); |
1758 __ movq(scratch2, xmm0); | 1836 __ movq(scratch2, xmm0); |
1759 __ cvttsd2siq(smi_result, xmm0); | 1837 __ cvttsd2siq(smi_result, xmm0); |
1760 __ cvtlsi2sd(xmm1, smi_result); | 1838 __ cvtlsi2sd(xmm1, smi_result); |
1761 __ movq(kScratchRegister, xmm1); | 1839 __ movq(kScratchRegister, xmm1); |
1762 __ cmpq(scratch2, kScratchRegister); | 1840 __ cmpq(scratch2, kScratchRegister); |
1763 __ j(not_equal, on_not_smis); | 1841 __ j(not_equal, on_not_smis); |
| 1842 __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
1764 __ Integer32ToSmi(second, smi_result); | 1843 __ Integer32ToSmi(second, smi_result); |
1765 if (on_success != NULL) { | 1844 if (on_success != NULL) { |
1766 __ jmp(on_success); | 1845 __ jmp(on_success); |
1767 } else { | 1846 } else { |
1768 __ jmp(&done); | 1847 __ jmp(&done); |
1769 } | 1848 } |
1770 | 1849 |
1771 __ bind(&maybe_undefined_first); | 1850 __ bind(&maybe_undefined_first); |
1772 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); | 1851 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); |
1773 __ j(not_equal, on_not_smis); | 1852 __ j(not_equal, on_not_smis); |
1774 __ xor_(first, first); | 1853 __ xor_(first, first); |
1775 __ jmp(&first_done); | 1854 __ jmp(&first_done); |
1776 | 1855 |
1777 __ bind(&maybe_undefined_second); | 1856 __ bind(&maybe_undefined_second); |
1778 __ CompareRoot(second, Heap::kUndefinedValueRootIndex); | 1857 __ CompareRoot(second, Heap::kUndefinedValueRootIndex); |
1779 __ j(not_equal, on_not_smis); | 1858 __ j(not_equal, on_not_smis); |
1780 __ xor_(second, second); | 1859 __ xor_(second, second); |
1781 if (on_success != NULL) { | 1860 if (on_success != NULL) { |
1782 __ jmp(on_success); | 1861 __ jmp(on_success); |
1783 } | 1862 } |
1784 // Else: fall through. | 1863 // Else: fall through. |
1785 | 1864 |
1786 __ bind(&done); | 1865 __ bind(&done); |
1787 } | 1866 } |
1788 | 1867 |
1789 | 1868 |
| 1869 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, |
| 1870 Label* non_int32, |
| 1871 XMMRegister operand, |
| 1872 Register int32_result, |
| 1873 Register scratch, |
| 1874 XMMRegister xmm_scratch) { |
| 1875 __ cvttsd2si(int32_result, operand); |
| 1876 __ cvtlsi2sd(xmm_scratch, int32_result); |
| 1877 __ pcmpeqd(xmm_scratch, operand); |
| 1878 __ movmskps(scratch, xmm_scratch); |
| 1879 // Two least significant bits should be both set. |
| 1880 __ notl(scratch); |
| 1881 __ testl(scratch, Immediate(3)); |
| 1882 __ j(not_zero, non_int32); |
| 1883 } |
| 1884 |
| 1885 |
1790 void MathPowStub::Generate(MacroAssembler* masm) { | 1886 void MathPowStub::Generate(MacroAssembler* masm) { |
1791 const Register exponent = rdx; | 1887 const Register exponent = rdx; |
1792 const Register base = rax; | 1888 const Register base = rax; |
1793 const Register scratch = rcx; | 1889 const Register scratch = rcx; |
1794 const XMMRegister double_result = xmm3; | 1890 const XMMRegister double_result = xmm3; |
1795 const XMMRegister double_base = xmm2; | 1891 const XMMRegister double_base = xmm2; |
1796 const XMMRegister double_exponent = xmm1; | 1892 const XMMRegister double_exponent = xmm1; |
1797 const XMMRegister double_scratch = xmm4; | 1893 const XMMRegister double_scratch = xmm4; |
1798 | 1894 |
1799 Label call_runtime, done, exponent_not_smi, int_exponent; | 1895 Label call_runtime, done, exponent_not_smi, int_exponent; |
(...skipping 2762 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4562 // by the code above. | 4658 // by the code above. |
4563 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 4659 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
4564 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); | 4660 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); |
4565 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); | 4661 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); |
4566 } | 4662 } |
4567 // Get the instance types of the two strings as they will be needed soon. | 4663 // Get the instance types of the two strings as they will be needed soon. |
4568 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); | 4664 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); |
4569 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); | 4665 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
4570 | 4666 |
4571 // Look at the length of the result of adding the two strings. | 4667 // Look at the length of the result of adding the two strings. |
4572 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); | 4668 MacroAssembler::StrictSmiInstructionWrapper wrapper(masm, &call_runtime); |
4573 __ SmiAdd(rbx, rbx, rcx); | 4669 __ SmiAdd(rbx, rbx, rcx, wrapper); |
| 4670 |
4574 // Use the string table when adding two one character strings, as it | 4671 // Use the string table when adding two one character strings, as it |
4575 // helps later optimizations to return an internalized string here. | 4672 // helps later optimizations to return an internalized string here. |
4576 __ SmiCompare(rbx, Smi::FromInt(2)); | 4673 __ SmiCompare(rbx, Smi::FromInt(2)); |
4577 __ j(not_equal, &longer_than_two); | 4674 __ j(not_equal, &longer_than_two); |
4578 | 4675 |
4579 // Check that both strings are non-external ASCII strings. | 4676 // Check that both strings are non-external ASCII strings. |
4580 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, | 4677 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, |
4581 &call_runtime); | 4678 &call_runtime); |
4582 | 4679 |
4583 // Get the two characters forming the sub string. | 4680 // Get the two characters forming the sub string. |
(...skipping 2227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6811 __ bind(&fast_elements_case); | 6908 __ bind(&fast_elements_case); |
6812 GenerateCase(masm, FAST_ELEMENTS); | 6909 GenerateCase(masm, FAST_ELEMENTS); |
6813 } | 6910 } |
6814 | 6911 |
6815 | 6912 |
6816 #undef __ | 6913 #undef __ |
6817 | 6914 |
6818 } } // namespace v8::internal | 6915 } } // namespace v8::internal |
6819 | 6916 |
6820 #endif // V8_TARGET_ARCH_X64 | 6917 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |