OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 705 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
716 static void BinaryOpStub_GenerateSmiCode( | 716 static void BinaryOpStub_GenerateSmiCode( |
717 MacroAssembler* masm, | 717 MacroAssembler* masm, |
718 Label* slow, | 718 Label* slow, |
719 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 719 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
720 Token::Value op) { | 720 Token::Value op) { |
721 | 721 |
722 // Arguments to BinaryOpStub are in rdx and rax. | 722 // Arguments to BinaryOpStub are in rdx and rax. |
723 const Register left = rdx; | 723 const Register left = rdx; |
724 const Register right = rax; | 724 const Register right = rax; |
725 | 725 |
| 726 #if !V8_USE_31_BITS_SMI_VALUE |
726 // We only generate heapnumber answers for overflowing calculations | 727 // We only generate heapnumber answers for overflowing calculations |
727 // for the four basic arithmetic operations and logical right shift by 0. | 728 // for the four basic arithmetic operations and logical right shift by 0. |
728 bool generate_inline_heapnumber_results = | 729 bool generate_inline_heapnumber_results = |
729 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && | 730 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
730 (op == Token::ADD || op == Token::SUB || | 731 (op == Token::ADD || op == Token::SUB || |
731 op == Token::MUL || op == Token::DIV || op == Token::SHR); | 732 op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| 733 #else |
| 734 bool generate_inline_heapnumber_results = |
| 735 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
| 736 (op == Token::ADD || op == Token::SUB || op == Token::SHL || |
| 737 op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| 738 #endif |
732 | 739 |
733 // Smi check of both operands. If op is BIT_OR, the check is delayed | 740 // Smi check of both operands. If op is BIT_OR, the check is delayed |
734 // until after the OR operation. | 741 // until after the OR operation. |
735 Label not_smis; | 742 Label not_smis; |
736 Label use_fp_on_smis; | 743 Label use_fp_on_smis; |
737 Label fail; | 744 Label fail; |
738 | 745 |
739 if (op != Token::BIT_OR) { | 746 if (op != Token::BIT_OR) { |
740 Comment smi_check_comment(masm, "-- Smi check arguments"); | 747 Comment smi_check_comment(masm, "-- Smi check arguments"); |
741 __ JumpIfNotBothSmi(left, right, ¬_smis); | 748 __ JumpIfNotBothSmi(left, right, ¬_smis); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
784 ASSERT(right.is(rax)); | 791 ASSERT(right.is(rax)); |
785 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 792 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
786 break; | 793 break; |
787 | 794 |
788 case Token::BIT_AND: | 795 case Token::BIT_AND: |
789 ASSERT(right.is(rax)); | 796 ASSERT(right.is(rax)); |
790 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 797 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
791 break; | 798 break; |
792 | 799 |
793 case Token::SHL: | 800 case Token::SHL: |
| 801 #if !V8_USE_31_BITS_SMI_VALUE |
794 __ SmiShiftLeft(left, left, right); | 802 __ SmiShiftLeft(left, left, right); |
| 803 #else |
| 804 __ push(left); |
| 805 __ push(right); |
| 806 __ SmiShiftLeft(left, left, right, &use_fp_on_smis); |
| 807 #endif |
795 __ movq(rax, left); | 808 __ movq(rax, left); |
796 break; | 809 break; |
797 | 810 |
798 case Token::SAR: | 811 case Token::SAR: |
799 __ SmiShiftArithmeticRight(left, left, right); | 812 __ SmiShiftArithmeticRight(left, left, right); |
800 __ movq(rax, left); | 813 __ movq(rax, left); |
801 break; | 814 break; |
802 | 815 |
803 case Token::SHR: | 816 case Token::SHR: |
| 817 #if V8_USE_31_BITS_SMI_VALUE |
| 818 __ push(left); |
| 819 __ push(right); |
| 820 #endif |
804 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); | 821 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
805 __ movq(rax, left); | 822 __ movq(rax, left); |
806 break; | 823 break; |
807 | 824 |
808 default: | 825 default: |
809 UNREACHABLE(); | 826 UNREACHABLE(); |
810 } | 827 } |
811 | 828 |
812 // 5. Emit return of result in rax. Some operations have registers pushed. | 829 // 5. Emit return of result in rax. Some operations have registers pushed. |
| 830 #ifdef V8_USE_31_BITS_SMI_VALUE |
| 831 if (op == Token::SHL || op == Token::SHR) { |
| 832 // drop arguments. |
| 833 __ addq(rsp, Immediate(2 * kRegisterSize)); |
| 834 } |
| 835 #endif |
813 __ ret(0); | 836 __ ret(0); |
814 | 837 |
815 if (use_fp_on_smis.is_linked()) { | 838 if (use_fp_on_smis.is_linked()) { |
816 // 6. For some operations emit inline code to perform floating point | 839 // 6. For some operations emit inline code to perform floating point |
817 // operations on known smis (e.g., if the result of the operation | 840 // operations on known smis (e.g., if the result of the operation |
818 // overflowed the smi range). | 841 // overflowed the smi range). |
819 __ bind(&use_fp_on_smis); | 842 __ bind(&use_fp_on_smis); |
820 if (op == Token::DIV || op == Token::MOD) { | 843 if (op == Token::DIV || op == Token::MOD) { |
821 // Restore left and right to rdx and rax. | 844 // Restore left and right to rdx and rax. |
822 __ movq(rdx, rcx); | 845 __ movq(rdx, rcx); |
823 __ movq(rax, rbx); | 846 __ movq(rax, rbx); |
824 } | 847 } |
825 | 848 |
826 if (generate_inline_heapnumber_results) { | 849 if (generate_inline_heapnumber_results) { |
| 850 #if !V8_USE_31_BITS_SMI_VALUE |
827 __ AllocateHeapNumber(rcx, rbx, slow); | 851 __ AllocateHeapNumber(rcx, rbx, slow); |
828 Comment perform_float(masm, "-- Perform float operation on smis"); | 852 Comment perform_float(masm, "-- Perform float operation on smis"); |
829 if (op == Token::SHR) { | 853 if (op == Token::SHR) { |
830 __ SmiToInteger32(left, left); | 854 __ SmiToInteger32(left, left); |
831 __ cvtqsi2sd(xmm0, left); | 855 __ cvtqsi2sd(xmm0, left); |
832 } else { | 856 } else { |
| 857 #else |
| 858 Label goto_slow; |
| 859 __ AllocateHeapNumber(rcx, rbx, &goto_slow); |
| 860 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 861 if (op == Token::SHL) { |
| 862 __ cvtlsi2sd(xmm0, left); |
| 863 // drop arguments. |
| 864 __ addq(rsp, Immediate(2 * kRegisterSize)); |
| 865 } else if (op == Token::SHR) { |
| 866 // The value of left is from MacroAssembler::SmiShiftLogicalRight |
| 867 // We allow logical shift value: |
| 868 // 0 : might turn a signed integer into unsigned integer |
| 869 // 1 : the value might be above 2^30 - 1 |
| 870 __ cvtqsi2sd(xmm0, left); |
| 871 // drop arguments. |
| 872 __ addq(rsp, Immediate(2 * kRegisterSize)); |
| 873 } else { |
| 874 #endif |
833 FloatingPointHelper::LoadSSE2SmiOperands(masm); | 875 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
834 switch (op) { | 876 switch (op) { |
835 case Token::ADD: __ addsd(xmm0, xmm1); break; | 877 case Token::ADD: __ addsd(xmm0, xmm1); break; |
836 case Token::SUB: __ subsd(xmm0, xmm1); break; | 878 case Token::SUB: __ subsd(xmm0, xmm1); break; |
837 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 879 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
838 case Token::DIV: __ divsd(xmm0, xmm1); break; | 880 case Token::DIV: __ divsd(xmm0, xmm1); break; |
839 default: UNREACHABLE(); | 881 default: UNREACHABLE(); |
840 } | 882 } |
841 } | 883 } |
842 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | 884 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
843 __ movq(rax, rcx); | 885 __ movq(rax, rcx); |
844 __ ret(0); | 886 __ ret(0); |
| 887 #if !V8_USE_31_BITS_SMI_VALUE |
845 } else { | 888 } else { |
| 889 #else |
| 890 __ bind(&goto_slow); |
| 891 if (op == Token::SHL || op == Token::SHR) { |
| 892 __ pop(right); |
| 893 __ pop(left); |
| 894 } |
| 895 __ jmp(slow); |
| 896 } else { |
| 897 // Restore the orignial left value |
| 898 if (op == Token::SHL || op == Token::SHR) { |
| 899 __ pop(right); |
| 900 __ pop(left); |
| 901 } |
| 902 #endif |
846 __ jmp(&fail); | 903 __ jmp(&fail); |
847 } | 904 } |
848 } | 905 } |
849 | 906 |
850 // 7. Non-smi operands reach the end of the code generated by | 907 // 7. Non-smi operands reach the end of the code generated by |
851 // GenerateSmiCode, and fall through to subsequent code, | 908 // GenerateSmiCode, and fall through to subsequent code, |
852 // with the operands in rdx and rax. | 909 // with the operands in rdx and rax. |
853 // But first we check if non-smi values are HeapNumbers holding | 910 // But first we check if non-smi values are HeapNumbers holding |
854 // values that could be smi. | 911 // values that could be smi. |
855 __ bind(¬_smis); | 912 __ bind(¬_smis); |
(...skipping 18 matching lines...) Expand all Loading... |
874 | 931 |
875 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 932 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
876 Label* alloc_failure, | 933 Label* alloc_failure, |
877 OverwriteMode mode); | 934 OverwriteMode mode); |
878 | 935 |
879 | 936 |
880 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, | 937 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
881 Label* allocation_failure, | 938 Label* allocation_failure, |
882 Label* non_numeric_failure, | 939 Label* non_numeric_failure, |
883 Token::Value op, | 940 Token::Value op, |
| 941 #if V8_USE_31_BITS_SMI_VALUE |
| 942 BinaryOpIC::TypeInfo |
| 943 result_type, |
| 944 Label* non_int32_failure, |
| 945 #endif |
884 OverwriteMode mode) { | 946 OverwriteMode mode) { |
885 switch (op) { | 947 switch (op) { |
886 case Token::ADD: | 948 case Token::ADD: |
887 case Token::SUB: | 949 case Token::SUB: |
888 case Token::MUL: | 950 case Token::MUL: |
889 case Token::DIV: { | 951 case Token::DIV: { |
890 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); | 952 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
891 | 953 |
892 switch (op) { | 954 switch (op) { |
893 case Token::ADD: __ addsd(xmm0, xmm1); break; | 955 case Token::ADD: __ addsd(xmm0, xmm1); break; |
894 case Token::SUB: __ subsd(xmm0, xmm1); break; | 956 case Token::SUB: __ subsd(xmm0, xmm1); break; |
895 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 957 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
896 case Token::DIV: __ divsd(xmm0, xmm1); break; | 958 case Token::DIV: __ divsd(xmm0, xmm1); break; |
897 default: UNREACHABLE(); | 959 default: UNREACHABLE(); |
898 } | 960 } |
| 961 #if V8_USE_31_BITS_SMI_VALUE |
| 962 if (non_int32_failure != NULL) { |
| 963 if (result_type <= BinaryOpIC::INT32) { |
| 964 __ cvttsd2si(kScratchRegister, xmm0); |
| 965 __ cvtlsi2sd(xmm2, kScratchRegister); |
| 966 __ pcmpeqd(xmm2, xmm0); |
| 967 __ movmskpd(rcx, xmm2); |
| 968 __ testl(rcx, Immediate(1)); |
| 969 __ j(zero, non_int32_failure); |
| 970 } |
| 971 } |
| 972 #endif |
899 BinaryOpStub_GenerateHeapResultAllocation( | 973 BinaryOpStub_GenerateHeapResultAllocation( |
900 masm, allocation_failure, mode); | 974 masm, allocation_failure, mode); |
901 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 975 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
902 __ ret(0); | 976 __ ret(0); |
903 break; | 977 break; |
904 } | 978 } |
905 case Token::MOD: { | 979 case Token::MOD: { |
906 // For MOD we jump to the allocation_failure label, to call runtime. | 980 // For MOD we jump to the allocation_failure label, to call runtime. |
907 __ jmp(allocation_failure); | 981 __ jmp(allocation_failure); |
908 break; | 982 break; |
909 } | 983 } |
910 case Token::BIT_OR: | 984 case Token::BIT_OR: |
911 case Token::BIT_AND: | 985 case Token::BIT_AND: |
912 case Token::BIT_XOR: | 986 case Token::BIT_XOR: |
913 case Token::SAR: | 987 case Token::SAR: |
914 case Token::SHL: | 988 case Token::SHL: |
915 case Token::SHR: { | 989 case Token::SHR: { |
916 Label non_smi_shr_result; | 990 Label non_smi_shr_result; |
917 Register heap_number_map = r9; | 991 Register heap_number_map = r9; |
| 992 #if V8_USE_31_BITS_SMI_VALUE |
| 993 Label goto_non_numeric_failure; |
| 994 // Push arguments on stack |
| 995 __ push(rdx); |
| 996 __ push(rax); |
| 997 #endif |
918 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 998 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 999 #if !V8_USE_31_BITS_SMI_VALUE |
919 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, | 1000 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
920 heap_number_map); | 1001 heap_number_map); |
| 1002 #else |
| 1003 FloatingPointHelper::LoadAsIntegers(masm, &goto_non_numeric_failure, |
| 1004 heap_number_map); |
| 1005 #endif |
921 switch (op) { | 1006 switch (op) { |
922 case Token::BIT_OR: __ orl(rax, rcx); break; | 1007 case Token::BIT_OR: __ orl(rax, rcx); break; |
923 case Token::BIT_AND: __ andl(rax, rcx); break; | 1008 case Token::BIT_AND: __ andl(rax, rcx); break; |
924 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 1009 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
925 case Token::SAR: __ sarl_cl(rax); break; | 1010 case Token::SAR: __ sarl_cl(rax); break; |
926 case Token::SHL: __ shll_cl(rax); break; | 1011 case Token::SHL: __ shll_cl(rax); break; |
927 case Token::SHR: { | 1012 case Token::SHR: { |
928 __ shrl_cl(rax); | 1013 __ shrl_cl(rax); |
| 1014 #if !V8_USE_31_BITS_SMI_VALUE |
929 // Check if result is negative. This can only happen for a shift | 1015 // Check if result is negative. This can only happen for a shift |
930 // by zero. | 1016 // by zero. |
931 __ testl(rax, rax); | 1017 __ testl(rax, rax); |
932 __ j(negative, &non_smi_shr_result); | 1018 __ j(negative, &non_smi_shr_result); |
| 1019 #endif |
933 break; | 1020 break; |
934 } | 1021 } |
935 default: UNREACHABLE(); | 1022 default: UNREACHABLE(); |
936 } | 1023 } |
| 1024 #if !V8_USE_31_BITS_SMI_VALUE |
937 STATIC_ASSERT(kSmiValueSize == 32); | 1025 STATIC_ASSERT(kSmiValueSize == 32); |
| 1026 #else |
| 1027 STATIC_ASSERT(kSmiValueSize == 31); |
| 1028 if (op == Token::SHR) { |
| 1029 __ testl(rax, Immediate(0xc0000000)); |
| 1030 __ j(not_zero, &non_smi_shr_result); |
| 1031 } else { |
| 1032 __ cmpl(rax, Immediate(0xc0000000)); |
| 1033 __ j(negative, &non_smi_shr_result, Label::kNear); |
| 1034 } |
| 1035 // drop arguments. |
| 1036 __ addq(rsp, Immediate(2 * kRegisterSize)); |
| 1037 #endif |
938 // Tag smi result and return. | 1038 // Tag smi result and return. |
939 __ Integer32ToSmi(rax, rax); | 1039 __ Integer32ToSmi(rax, rax); |
940 __ Ret(); | 1040 __ Ret(); |
941 | 1041 |
| 1042 #ifdef V8_USE_31_BITS_SMI_VALUE |
| 1043 __ bind(&goto_non_numeric_failure); |
| 1044 // drop arguments. |
| 1045 __ pop(rax); |
| 1046 __ pop(rdx); |
| 1047 __ jmp(non_numeric_failure); |
| 1048 #endif |
| 1049 |
| 1050 #if !V8_USE_31_BITS_SMI_VALUE |
942 // Logical shift right can produce an unsigned int32 that is not | 1051 // Logical shift right can produce an unsigned int32 that is not |
943 // an int32, and so is not in the smi range. Allocate a heap number | 1052 // an int32, and so is not in the smi range. Allocate a heap number |
944 // in that case. | 1053 // in that case. |
945 if (op == Token::SHR) { | 1054 if (op == Token::SHR) { |
946 __ bind(&non_smi_shr_result); | 1055 __ bind(&non_smi_shr_result); |
947 Label allocation_failed; | 1056 Label allocation_failed; |
948 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | 1057 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
949 // Allocate heap number in new space. | 1058 // Allocate heap number in new space. |
950 // Not using AllocateHeapNumber macro in order to reuse | 1059 // Not using AllocateHeapNumber macro in order to reuse |
951 // already loaded heap_number_map. | 1060 // already loaded heap_number_map. |
952 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, | 1061 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
953 TAG_OBJECT); | 1062 TAG_OBJECT); |
954 // Set the map. | 1063 // Set the map. |
955 __ AssertRootValue(heap_number_map, | 1064 __ AssertRootValue(heap_number_map, |
956 Heap::kHeapNumberMapRootIndex, | 1065 Heap::kHeapNumberMapRootIndex, |
957 "HeapNumberMap register clobbered."); | 1066 "HeapNumberMap register clobbered."); |
958 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | 1067 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
959 heap_number_map); | 1068 heap_number_map); |
960 __ cvtqsi2sd(xmm0, rbx); | 1069 __ cvtqsi2sd(xmm0, rbx); |
961 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 1070 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
962 __ Ret(); | 1071 __ Ret(); |
963 | 1072 |
964 __ bind(&allocation_failed); | 1073 __ bind(&allocation_failed); |
965 // We need tagged values in rdx and rax for the following code, | 1074 // We need tagged values in rdx and rax for the following code, |
966 // not int32 in rax and rcx. | 1075 // not int32 in rax and rcx. |
967 __ Integer32ToSmi(rax, rcx); | 1076 __ Integer32ToSmi(rax, rcx); |
968 __ Integer32ToSmi(rdx, rbx); | 1077 __ Integer32ToSmi(rdx, rbx); |
969 __ jmp(allocation_failure); | 1078 __ jmp(allocation_failure); |
970 } | 1079 } |
| 1080 #else |
| 1081 __ bind(&non_smi_shr_result); |
| 1082 Label allocation_failed; |
| 1083 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 1084 // Allocate heap number in new space. |
| 1085 // Not using AllocateHeapNumber macro in order to reuse |
| 1086 // already loaded heap_number_map. |
| 1087 Label skip_allocation; |
| 1088 switch (mode) { |
| 1089 case OVERWRITE_LEFT: { |
| 1090 __ movq(rax, Operand(rsp, 1 * kRegisterSize)); |
| 1091 __ JumpIfNotSmi(rax, &skip_allocation); |
| 1092 __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
| 1093 TAG_OBJECT); |
| 1094 __ bind(&skip_allocation); |
| 1095 break; |
| 1096 } |
| 1097 case OVERWRITE_RIGHT: |
| 1098 __ movq(rax, Operand(rsp, 0 * kRegisterSize)); |
| 1099 __ JumpIfNotSmi(rax, &skip_allocation); |
| 1100 // Fall through! |
| 1101 case NO_OVERWRITE: |
| 1102 __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
| 1103 TAG_OBJECT); |
| 1104 __ bind(&skip_allocation); |
| 1105 break; |
| 1106 default: UNREACHABLE(); |
| 1107 } |
| 1108 // Set the map. |
| 1109 __ AssertRootValue(heap_number_map, |
| 1110 Heap::kHeapNumberMapRootIndex, |
| 1111 "HeapNumberMap register clobbered."); |
| 1112 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 1113 heap_number_map); |
| 1114 if (op == Token::SHR) { |
| 1115 __ cvtqsi2sd(xmm0, rbx); |
| 1116 } else { |
| 1117 // All other operations returns a signed int32, so we |
| 1118 // use lsi2sd here to retain the sign bit. |
| 1119 __ cvtlsi2sd(xmm0, rbx); |
| 1120 } |
| 1121 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1122 // drop arguments. |
| 1123 __ addq(rsp, Immediate(2 * kRegisterSize)); |
| 1124 __ Ret(); |
| 1125 |
| 1126 __ bind(&allocation_failed); |
| 1127 // Restore arguments from stack. |
| 1128 __ pop(rax); |
| 1129 __ pop(rdx); |
| 1130 __ jmp(allocation_failure); |
| 1131 #endif |
971 break; | 1132 break; |
972 } | 1133 } |
973 default: UNREACHABLE(); break; | 1134 default: UNREACHABLE(); break; |
974 } | 1135 } |
975 // No fall-through from this generated code. | 1136 // No fall-through from this generated code. |
976 if (FLAG_debug_code) { | 1137 if (FLAG_debug_code) { |
977 __ Abort("Unexpected fall-through in " | 1138 __ Abort("Unexpected fall-through in " |
978 "BinaryStub_GenerateFloatingPointCode."); | 1139 "BinaryStub_GenerateFloatingPointCode."); |
979 } | 1140 } |
980 } | 1141 } |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1056 FrameScope scope(masm, StackFrame::INTERNAL); | 1217 FrameScope scope(masm, StackFrame::INTERNAL); |
1057 GenerateRegisterArgsPush(masm); | 1218 GenerateRegisterArgsPush(masm); |
1058 GenerateCallRuntime(masm); | 1219 GenerateCallRuntime(masm); |
1059 } | 1220 } |
1060 __ Ret(); | 1221 __ Ret(); |
1061 } | 1222 } |
1062 } | 1223 } |
1063 | 1224 |
1064 | 1225 |
1065 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1226 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 1227 #if !V8_USE_31_BITS_SMI_VALUE |
1066 // The int32 case is identical to the Smi case. We avoid creating this | 1228 // The int32 case is identical to the Smi case. We avoid creating this |
1067 // ic state on x64. | 1229 // ic state on x64. |
1068 UNREACHABLE(); | 1230 UNREACHABLE(); |
| 1231 #else |
| 1232 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| 1233 |
| 1234 Label gc_required, not_number, not_int32; |
| 1235 BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, |
| 1236 op_, result_type_, ¬_int32, mode_); |
| 1237 |
| 1238 __ bind(¬_number); |
| 1239 __ bind(¬_int32); |
| 1240 GenerateTypeTransition(masm); |
| 1241 |
| 1242 __ bind(&gc_required); |
| 1243 { |
| 1244 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1245 GenerateRegisterArgsPush(masm); |
| 1246 GenerateCallRuntime(masm); |
| 1247 } |
| 1248 __ Ret(); |
| 1249 #endif |
1069 } | 1250 } |
1070 | 1251 |
1071 | 1252 |
1072 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 1253 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
1073 Label call_runtime; | 1254 Label call_runtime; |
1074 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | 1255 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
1075 ASSERT(op_ == Token::ADD); | 1256 ASSERT(op_ == Token::ADD); |
1076 // If both arguments are strings, call the string add stub. | 1257 // If both arguments are strings, call the string add stub. |
1077 // Otherwise, do a transition. | 1258 // Otherwise, do a transition. |
1078 | 1259 |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1163 // It could be that only SMIs have been seen at either the left | 1344 // It could be that only SMIs have been seen at either the left |
1164 // or the right operand. For precise type feedback, patch the IC | 1345 // or the right operand. For precise type feedback, patch the IC |
1165 // again if this changes. | 1346 // again if this changes. |
1166 if (left_type_ == BinaryOpIC::SMI) { | 1347 if (left_type_ == BinaryOpIC::SMI) { |
1167 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); | 1348 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); |
1168 } | 1349 } |
1169 if (right_type_ == BinaryOpIC::SMI) { | 1350 if (right_type_ == BinaryOpIC::SMI) { |
1170 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); | 1351 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
1171 } | 1352 } |
1172 | 1353 |
| 1354 #if !V8_USE_31_BITS_SMI_VALUE |
1173 BinaryOpStub_GenerateFloatingPointCode( | 1355 BinaryOpStub_GenerateFloatingPointCode( |
1174 masm, &gc_required, ¬_number, op_, mode_); | 1356 masm, &gc_required, ¬_number, op_, mode_); |
| 1357 #else |
| 1358 BinaryOpStub_GenerateFloatingPointCode( |
| 1359 masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
| 1360 #endif |
1175 | 1361 |
1176 __ bind(¬_number); | 1362 __ bind(¬_number); |
1177 GenerateTypeTransition(masm); | 1363 GenerateTypeTransition(masm); |
1178 | 1364 |
1179 __ bind(&gc_required); | 1365 __ bind(&gc_required); |
1180 { | 1366 { |
1181 FrameScope scope(masm, StackFrame::INTERNAL); | 1367 FrameScope scope(masm, StackFrame::INTERNAL); |
1182 GenerateRegisterArgsPush(masm); | 1368 GenerateRegisterArgsPush(masm); |
1183 GenerateCallRuntime(masm); | 1369 GenerateCallRuntime(masm); |
1184 } | 1370 } |
1185 __ Ret(); | 1371 __ Ret(); |
1186 } | 1372 } |
1187 | 1373 |
1188 | 1374 |
1189 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1375 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
1190 Label call_runtime, call_string_add_or_runtime; | 1376 Label call_runtime, call_string_add_or_runtime; |
1191 | 1377 |
1192 BinaryOpStub_GenerateSmiCode( | 1378 BinaryOpStub_GenerateSmiCode( |
1193 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); | 1379 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
1194 | 1380 |
| 1381 #if !V8_USE_31_BITS_SMI_VALUE |
1195 BinaryOpStub_GenerateFloatingPointCode( | 1382 BinaryOpStub_GenerateFloatingPointCode( |
1196 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); | 1383 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
| 1384 #else |
| 1385 BinaryOpStub_GenerateFloatingPointCode( |
| 1386 masm, &call_runtime, &call_string_add_or_runtime, op_, |
| 1387 result_type_, NULL, mode_); |
| 1388 #endif |
1197 | 1389 |
1198 __ bind(&call_string_add_or_runtime); | 1390 __ bind(&call_string_add_or_runtime); |
1199 if (op_ == Token::ADD) { | 1391 if (op_ == Token::ADD) { |
1200 GenerateAddStrings(masm); | 1392 GenerateAddStrings(masm); |
1201 } | 1393 } |
1202 | 1394 |
1203 __ bind(&call_runtime); | 1395 __ bind(&call_runtime); |
1204 { | 1396 { |
1205 FrameScope scope(masm, StackFrame::INTERNAL); | 1397 FrameScope scope(masm, StackFrame::INTERNAL); |
1206 GenerateRegisterArgsPush(masm); | 1398 GenerateRegisterArgsPush(masm); |
(...skipping 530 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1737 // Convert HeapNumber to smi if possible. | 1929 // Convert HeapNumber to smi if possible. |
1738 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); | 1930 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); |
1739 __ movq(scratch2, xmm0); | 1931 __ movq(scratch2, xmm0); |
1740 __ cvttsd2siq(smi_result, xmm0); | 1932 __ cvttsd2siq(smi_result, xmm0); |
1741 // Check if conversion was successful by converting back and | 1933 // Check if conversion was successful by converting back and |
1742 // comparing to the original double's bits. | 1934 // comparing to the original double's bits. |
1743 __ cvtlsi2sd(xmm1, smi_result); | 1935 __ cvtlsi2sd(xmm1, smi_result); |
1744 __ movq(kScratchRegister, xmm1); | 1936 __ movq(kScratchRegister, xmm1); |
1745 __ cmpq(scratch2, kScratchRegister); | 1937 __ cmpq(scratch2, kScratchRegister); |
1746 __ j(not_equal, on_not_smis); | 1938 __ j(not_equal, on_not_smis); |
| 1939 #if V8_USE_31_BITS_SMI_VALUE |
| 1940 __ cmpl(smi_result, Immediate(0xc0000000)); |
| 1941 __ j(negative, on_not_smis); |
| 1942 #endif |
1747 __ Integer32ToSmi(first, smi_result); | 1943 __ Integer32ToSmi(first, smi_result); |
1748 | 1944 |
1749 __ bind(&first_done); | 1945 __ bind(&first_done); |
1750 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); | 1946 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); |
1751 __ bind(&first_smi); | 1947 __ bind(&first_smi); |
1752 __ AssertNotSmi(second); | 1948 __ AssertNotSmi(second); |
1753 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); | 1949 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); |
1754 __ j(not_equal, | 1950 __ j(not_equal, |
1755 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) | 1951 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
1756 ? &maybe_undefined_second | 1952 ? &maybe_undefined_second |
1757 : on_not_smis); | 1953 : on_not_smis); |
1758 // Convert second to smi, if possible. | 1954 // Convert second to smi, if possible. |
1759 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); | 1955 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); |
1760 __ movq(scratch2, xmm0); | 1956 __ movq(scratch2, xmm0); |
1761 __ cvttsd2siq(smi_result, xmm0); | 1957 __ cvttsd2siq(smi_result, xmm0); |
1762 __ cvtlsi2sd(xmm1, smi_result); | 1958 __ cvtlsi2sd(xmm1, smi_result); |
1763 __ movq(kScratchRegister, xmm1); | 1959 __ movq(kScratchRegister, xmm1); |
1764 __ cmpq(scratch2, kScratchRegister); | 1960 __ cmpq(scratch2, kScratchRegister); |
1765 __ j(not_equal, on_not_smis); | 1961 __ j(not_equal, on_not_smis); |
| 1962 #if V8_USE_31_BITS_SMI_VALUE |
| 1963 __ cmpl(smi_result, Immediate(0xc0000000)); |
| 1964 __ j(negative, on_not_smis); |
| 1965 #endif |
1766 __ Integer32ToSmi(second, smi_result); | 1966 __ Integer32ToSmi(second, smi_result); |
1767 if (on_success != NULL) { | 1967 if (on_success != NULL) { |
1768 __ jmp(on_success); | 1968 __ jmp(on_success); |
1769 } else { | 1969 } else { |
1770 __ jmp(&done); | 1970 __ jmp(&done); |
1771 } | 1971 } |
1772 | 1972 |
1773 __ bind(&maybe_undefined_first); | 1973 __ bind(&maybe_undefined_first); |
1774 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); | 1974 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); |
1775 __ j(not_equal, on_not_smis); | 1975 __ j(not_equal, on_not_smis); |
(...skipping 1477 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3253 Condition cc = GetCondition(); | 3453 Condition cc = GetCondition(); |
3254 Factory* factory = masm->isolate()->factory(); | 3454 Factory* factory = masm->isolate()->factory(); |
3255 | 3455 |
3256 Label miss; | 3456 Label miss; |
3257 CheckInputType(masm, rdx, left_, &miss); | 3457 CheckInputType(masm, rdx, left_, &miss); |
3258 CheckInputType(masm, rax, right_, &miss); | 3458 CheckInputType(masm, rax, right_, &miss); |
3259 | 3459 |
3260 // Compare two smis. | 3460 // Compare two smis. |
3261 Label non_smi, smi_done; | 3461 Label non_smi, smi_done; |
3262 __ JumpIfNotBothSmi(rax, rdx, &non_smi); | 3462 __ JumpIfNotBothSmi(rax, rdx, &non_smi); |
| 3463 #if !V8_USE_31_BITS_SMI_VALUE |
3263 __ subq(rdx, rax); | 3464 __ subq(rdx, rax); |
| 3465 #else |
| 3466 __ subl(rdx, rax); |
| 3467 #endif |
3264 __ j(no_overflow, &smi_done); | 3468 __ j(no_overflow, &smi_done); |
| 3469 #if !V8_USE_31_BITS_SMI_VALUE |
3265 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. | 3470 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. |
| 3471 #else |
| 3472 __ notl(rdx); |
| 3473 #endif |
3266 __ bind(&smi_done); | 3474 __ bind(&smi_done); |
| 3475 #if V8_USE_31_BITS_SMI_VALUE |
| 3476 __ movsxlq(rdx, rdx); |
| 3477 #endif |
3267 __ movq(rax, rdx); | 3478 __ movq(rax, rdx); |
3268 __ ret(0); | 3479 __ ret(0); |
3269 __ bind(&non_smi); | 3480 __ bind(&non_smi); |
3270 | 3481 |
3271 // The compare stub returns a positive, negative, or zero 64-bit integer | 3482 // The compare stub returns a positive, negative, or zero 64-bit integer |
3272 // value in rax, corresponding to result of comparing the two inputs. | 3483 // value in rax, corresponding to result of comparing the two inputs. |
3273 // NOTICE! This code is only reached after a smi-fast-case check, so | 3484 // NOTICE! This code is only reached after a smi-fast-case check, so |
3274 // it is certain that at least one operand isn't a smi. | 3485 // it is certain that at least one operand isn't a smi. |
3275 | 3486 |
3276 // Two identical objects are equal unless they are both NaN or undefined. | 3487 // Two identical objects are equal unless they are both NaN or undefined. |
(...skipping 1288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4565 // by the code above. | 4776 // by the code above. |
4566 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 4777 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
4567 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); | 4778 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); |
4568 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); | 4779 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); |
4569 } | 4780 } |
4570 // Get the instance types of the two strings as they will be needed soon. | 4781 // Get the instance types of the two strings as they will be needed soon. |
4571 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); | 4782 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); |
4572 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); | 4783 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
4573 | 4784 |
4574 // Look at the length of the result of adding the two strings. | 4785 // Look at the length of the result of adding the two strings. |
| 4786 #ifndef V8_USE_31_BITS_SMI_VALUE |
4575 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); | 4787 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
4576 __ SmiAdd(rbx, rbx, rcx); | 4788 __ SmiAdd(rbx, rbx, rcx); |
| 4789 #else |
| 4790 __ SmiAdd(rbx, rbx, rcx, &call_runtime); |
| 4791 #endif |
4577 // Use the string table when adding two one character strings, as it | 4792 // Use the string table when adding two one character strings, as it |
4578 // helps later optimizations to return an internalized string here. | 4793 // helps later optimizations to return an internalized string here. |
4579 __ SmiCompare(rbx, Smi::FromInt(2)); | 4794 __ SmiCompare(rbx, Smi::FromInt(2)); |
4580 __ j(not_equal, &longer_than_two); | 4795 __ j(not_equal, &longer_than_two); |
4581 | 4796 |
4582 // Check that both strings are non-external ASCII strings. | 4797 // Check that both strings are non-external ASCII strings. |
4583 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, | 4798 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, |
4584 &call_runtime); | 4799 &call_runtime); |
4585 | 4800 |
4586 // Get the two characters forming the sub string. | 4801 // Get the two characters forming the sub string. |
(...skipping 957 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5544 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 5759 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
5545 ASSERT(state_ == CompareIC::SMI); | 5760 ASSERT(state_ == CompareIC::SMI); |
5546 Label miss; | 5761 Label miss; |
5547 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear); | 5762 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear); |
5548 | 5763 |
5549 if (GetCondition() == equal) { | 5764 if (GetCondition() == equal) { |
5550 // For equality we do not care about the sign of the result. | 5765 // For equality we do not care about the sign of the result. |
5551 __ subq(rax, rdx); | 5766 __ subq(rax, rdx); |
5552 } else { | 5767 } else { |
5553 Label done; | 5768 Label done; |
| 5769 #if !V8_USE_31_BITS_SMI_VALUE |
5554 __ subq(rdx, rax); | 5770 __ subq(rdx, rax); |
| 5771 #else |
| 5772 __ subl(rdx, rax); |
| 5773 #endif |
5555 __ j(no_overflow, &done, Label::kNear); | 5774 __ j(no_overflow, &done, Label::kNear); |
| 5775 #if !V8_USE_31_BITS_SMI_VALUE |
5556 // Correct sign of result in case of overflow. | 5776 // Correct sign of result in case of overflow. |
5557 __ not_(rdx); | 5777 __ not_(rdx); |
| 5778 #else |
| 5779 __ notl(rdx); |
| 5780 #endif |
5558 __ bind(&done); | 5781 __ bind(&done); |
| 5782 #if V8_USE_31_BITS_SMI_VALUE |
| 5783 __ movsxlq(rdx, rdx); |
| 5784 #endif |
5559 __ movq(rax, rdx); | 5785 __ movq(rax, rdx); |
5560 } | 5786 } |
5561 __ ret(0); | 5787 __ ret(0); |
5562 | 5788 |
5563 __ bind(&miss); | 5789 __ bind(&miss); |
5564 GenerateMiss(masm); | 5790 GenerateMiss(masm); |
5565 } | 5791 } |
5566 | 5792 |
5567 | 5793 |
5568 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 5794 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
(...skipping 1247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6816 __ bind(&fast_elements_case); | 7042 __ bind(&fast_elements_case); |
6817 GenerateCase(masm, FAST_ELEMENTS); | 7043 GenerateCase(masm, FAST_ELEMENTS); |
6818 } | 7044 } |
6819 | 7045 |
6820 | 7046 |
6821 #undef __ | 7047 #undef __ |
6822 | 7048 |
6823 } } // namespace v8::internal | 7049 } } // namespace v8::internal |
6824 | 7050 |
6825 #endif // V8_TARGET_ARCH_X64 | 7051 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |