Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 717 MacroAssembler* masm, | 717 MacroAssembler* masm, |
| 718 Label* slow, | 718 Label* slow, |
| 719 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 719 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| 720 Token::Value op) { | 720 Token::Value op) { |
| 721 | 721 |
| 722 // Arguments to BinaryOpStub are in rdx and rax. | 722 // Arguments to BinaryOpStub are in rdx and rax. |
| 723 const Register left = rdx; | 723 const Register left = rdx; |
| 724 const Register right = rax; | 724 const Register right = rax; |
| 725 | 725 |
| 726 // We only generate heapnumber answers for overflowing calculations | 726 // We only generate heapnumber answers for overflowing calculations |
| 727 // for the four basic arithmetic operations and logical right shift by 0. | 727 // for the four basic arithmetic operations and logical right shift by 0 |
| 728 // for 32 bit SMI value, for 31 bit SMI value, we plus SHL and logical right | |
| 729 // shift 1. | |
| 728 bool generate_inline_heapnumber_results = | 730 bool generate_inline_heapnumber_results = |
| 729 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && | 731 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
| 730 (op == Token::ADD || op == Token::SUB || | 732 (op == Token::ADD || op == Token::SUB || |
| 733 (kSmiValueSize == 31 && op == Token::SHL) || | |
|
danno
2013/08/01 16:45:41
Is think you should turn this into a predicate:
S
haitao.feng
2013/08/02 09:35:51
Done. Add IsUnsafeSmiOperator function in the macr
| |
| 731 op == Token::MUL || op == Token::DIV || op == Token::SHR); | 734 op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| 732 | 735 |
| 733 // Smi check of both operands. If op is BIT_OR, the check is delayed | 736 // Smi check of both operands. If op is BIT_OR, the check is delayed |
| 734 // until after the OR operation. | 737 // until after the OR operation. |
| 735 Label not_smis; | 738 Label not_smis; |
| 736 Label use_fp_on_smis; | 739 Label use_fp_on_smis; |
| 737 Label fail; | 740 Label fail; |
| 738 | 741 |
| 739 if (op != Token::BIT_OR) { | 742 if (op != Token::BIT_OR) { |
| 740 Comment smi_check_comment(masm, "-- Smi check arguments"); | 743 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 784 ASSERT(right.is(rax)); | 787 ASSERT(right.is(rax)); |
| 785 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 788 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 786 break; | 789 break; |
| 787 | 790 |
| 788 case Token::BIT_AND: | 791 case Token::BIT_AND: |
| 789 ASSERT(right.is(rax)); | 792 ASSERT(right.is(rax)); |
| 790 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 793 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| 791 break; | 794 break; |
| 792 | 795 |
| 793 case Token::SHL: | 796 case Token::SHL: |
| 794 __ SmiShiftLeft(left, left, right); | 797 if (kSmiValueSize == 31) { |
|
danno
2013/08/01 16:45:41
Why can't you put this extra stack manipulation in
haitao.feng
2013/08/02 09:35:51
Done.
| |
| 798 __ push(left); | |
| 799 __ push(right); | |
| 800 } | |
| 801 __ SmiShiftLeft(left, left, right, &use_fp_on_smis); | |
| 795 __ movq(rax, left); | 802 __ movq(rax, left); |
| 803 if (kSmiValueSize == 31) { | |
| 804 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 805 } | |
| 796 break; | 806 break; |
| 797 | 807 |
| 798 case Token::SAR: | 808 case Token::SAR: |
| 799 __ SmiShiftArithmeticRight(left, left, right); | 809 __ SmiShiftArithmeticRight(left, left, right); |
| 800 __ movq(rax, left); | 810 __ movq(rax, left); |
| 801 break; | 811 break; |
| 802 | 812 |
| 803 case Token::SHR: | 813 case Token::SHR: |
| 814 if (kSmiValueSize == 31) { | |
|
danno
2013/08/01 16:45:41
Same here.
haitao.feng
2013/08/02 09:35:51
Done.
| |
| 815 __ push(left); | |
| 816 __ push(right); | |
| 817 } | |
| 804 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); | 818 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
| 805 __ movq(rax, left); | 819 __ movq(rax, left); |
| 820 if (kSmiValueSize == 31) { | |
| 821 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 822 } | |
| 806 break; | 823 break; |
| 807 | 824 |
| 808 default: | 825 default: |
| 809 UNREACHABLE(); | 826 UNREACHABLE(); |
| 810 } | 827 } |
| 811 | 828 |
| 812 // 5. Emit return of result in rax. Some operations have registers pushed. | 829 // 5. Emit return of result in rax. Some operations have registers pushed. |
| 813 __ ret(0); | 830 __ ret(0); |
| 814 | 831 |
| 815 if (use_fp_on_smis.is_linked()) { | 832 if (use_fp_on_smis.is_linked()) { |
|
danno
2013/08/01 16:45:41
All of this use_fp_on_smis code can be encapsulate
haitao.feng
2013/08/02 09:35:51
The X64 implementation utilizes a fact that when S
| |
| 816 // 6. For some operations emit inline code to perform floating point | 833 // 6. For some operations emit inline code to perform floating point |
| 817 // operations on known smis (e.g., if the result of the operation | 834 // operations on known smis (e.g., if the result of the operation |
| 818 // overflowed the smi range). | 835 // overflowed the smi range). |
| 819 __ bind(&use_fp_on_smis); | 836 __ bind(&use_fp_on_smis); |
| 820 if (op == Token::DIV || op == Token::MOD) { | 837 if (op == Token::DIV || op == Token::MOD) { |
| 821 // Restore left and right to rdx and rax. | 838 // Restore left and right to rdx and rax. |
| 822 __ movq(rdx, rcx); | 839 __ movq(rdx, rcx); |
| 823 __ movq(rax, rbx); | 840 __ movq(rax, rbx); |
| 824 } | 841 } |
| 825 | 842 |
| 826 if (generate_inline_heapnumber_results) { | 843 if (generate_inline_heapnumber_results) { |
| 827 __ AllocateHeapNumber(rcx, rbx, slow); | 844 if (kSmiValueSize == 32) { |
| 828 Comment perform_float(masm, "-- Perform float operation on smis"); | 845 __ AllocateHeapNumber(rcx, rbx, slow); |
| 829 if (op == Token::SHR) { | 846 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 830 __ SmiToInteger32(left, left); | 847 if (op == Token::SHR) { |
| 831 __ cvtqsi2sd(xmm0, left); | 848 __ SmiToInteger32(left, left); |
| 849 __ cvtqsi2sd(xmm0, left); | |
| 850 } else { | |
| 851 FloatingPointHelper::LoadSSE2SmiOperands(masm); | |
| 852 switch (op) { | |
| 853 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
| 854 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
| 855 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
| 856 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
| 857 default: UNREACHABLE(); | |
| 858 } | |
| 859 } | |
| 860 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | |
| 861 __ movq(rax, rcx); | |
| 862 __ ret(0); | |
| 832 } else { | 863 } else { |
| 833 FloatingPointHelper::LoadSSE2SmiOperands(masm); | 864 ASSERT(kSmiValueSize == 31); |
| 834 switch (op) { | 865 Label goto_slow; |
| 835 case Token::ADD: __ addsd(xmm0, xmm1); break; | 866 __ AllocateHeapNumber(rcx, rbx, &goto_slow); |
| 836 case Token::SUB: __ subsd(xmm0, xmm1); break; | 867 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 837 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 868 if (op == Token::SHL) { |
| 838 case Token::DIV: __ divsd(xmm0, xmm1); break; | 869 __ cvtlsi2sd(xmm0, left); |
| 839 default: UNREACHABLE(); | 870 // drop left and right on the stack. |
| 871 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 872 } else if (op == Token::SHR) { | |
| 873 // The value of left is from MacroAssembler::SmiShiftLogicalRight | |
| 874 // We allow logical shift value: | |
| 875 // 0 : might turn a signed integer into unsigned integer | |
| 876 // 1 : the value might be above 2^30 - 1 | |
| 877 __ cvtqsi2sd(xmm0, left); | |
| 878 // drop left and right on the stack. | |
| 879 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 880 } else { | |
| 881 FloatingPointHelper::LoadSSE2SmiOperands(masm); | |
| 882 switch (op) { | |
| 883 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
| 884 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
| 885 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
| 886 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
| 887 default: UNREACHABLE(); | |
|
danno
2013/08/01 16:45:41
Also, I think you can share much of this code with
haitao.feng
2013/08/02 09:35:51
Done.
| |
| 888 } | |
| 840 } | 889 } |
| 890 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | |
| 891 __ movq(rax, rcx); | |
| 892 __ ret(0); | |
| 893 | |
| 894 __ bind(&goto_slow); | |
| 895 if (op == Token::SHL || op == Token::SHR) { | |
| 896 // Restore left and right from stack. | |
| 897 __ pop(right); | |
| 898 __ pop(left); | |
| 899 } | |
| 900 __ jmp(slow); | |
| 841 } | 901 } |
| 842 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | |
| 843 __ movq(rax, rcx); | |
| 844 __ ret(0); | |
| 845 } else { | 902 } else { |
| 903 if (kSmiValueSize == 31 && (op == Token::SHL || op == Token::SHR)) { | |
| 904 // Restore left and right from stack. | |
| 905 __ pop(right); | |
| 906 __ pop(left); | |
| 907 } | |
| 846 __ jmp(&fail); | 908 __ jmp(&fail); |
| 847 } | 909 } |
| 848 } | 910 } |
| 849 | 911 |
| 850 // 7. Non-smi operands reach the end of the code generated by | 912 // 7. Non-smi operands reach the end of the code generated by |
| 851 // GenerateSmiCode, and fall through to subsequent code, | 913 // GenerateSmiCode, and fall through to subsequent code, |
| 852 // with the operands in rdx and rax. | 914 // with the operands in rdx and rax. |
| 853 // But first we check if non-smi values are HeapNumbers holding | 915 // But first we check if non-smi values are HeapNumbers holding |
| 854 // values that could be smi. | 916 // values that could be smi. |
| 855 __ bind(¬_smis); | 917 __ bind(¬_smis); |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 870 __ jmp(&smi_values); | 932 __ jmp(&smi_values); |
| 871 __ bind(&fail); | 933 __ bind(&fail); |
| 872 } | 934 } |
| 873 | 935 |
| 874 | 936 |
| 875 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 937 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 876 Label* alloc_failure, | 938 Label* alloc_failure, |
| 877 OverwriteMode mode); | 939 OverwriteMode mode); |
| 878 | 940 |
| 879 | 941 |
| 880 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, | 942 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
|
danno
2013/08/01 16:45:41
start the parameter list on the next line with a 4
haitao.feng
2013/08/02 09:35:51
Done.
| |
| 881 Label* allocation_failure, | 943 Label* allocation_failure, |
| 882 Label* non_numeric_failure, | 944 Label* non_numeric_failure, |
| 883 Token::Value op, | 945 Token::Value op, |
| 946 BinaryOpIC::TypeInfo | |
| 947 result_type, | |
| 948 Label* non_int32_failure, | |
| 884 OverwriteMode mode) { | 949 OverwriteMode mode) { |
| 885 switch (op) { | 950 switch (op) { |
| 886 case Token::ADD: | 951 case Token::ADD: |
| 887 case Token::SUB: | 952 case Token::SUB: |
| 888 case Token::MUL: | 953 case Token::MUL: |
| 889 case Token::DIV: { | 954 case Token::DIV: { |
| 890 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); | 955 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
| 891 | 956 |
| 892 switch (op) { | 957 switch (op) { |
| 893 case Token::ADD: __ addsd(xmm0, xmm1); break; | 958 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 894 case Token::SUB: __ subsd(xmm0, xmm1); break; | 959 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 895 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 960 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 896 case Token::DIV: __ divsd(xmm0, xmm1); break; | 961 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 897 default: UNREACHABLE(); | 962 default: UNREACHABLE(); |
| 898 } | 963 } |
| 964 | |
| 965 if (kSmiValueSize == 31 && non_int32_failure != NULL) { | |
| 966 if (result_type <= BinaryOpIC::INT32) { | |
| 967 __ cvttsd2si(kScratchRegister, xmm0); | |
| 968 __ cvtlsi2sd(xmm2, kScratchRegister); | |
| 969 __ pcmpeqd(xmm2, xmm0); | |
| 970 __ movmskpd(rcx, xmm2); | |
| 971 __ testl(rcx, Immediate(1)); | |
| 972 __ j(zero, non_int32_failure); | |
| 973 } | |
| 974 } | |
| 975 | |
| 899 BinaryOpStub_GenerateHeapResultAllocation( | 976 BinaryOpStub_GenerateHeapResultAllocation( |
| 900 masm, allocation_failure, mode); | 977 masm, allocation_failure, mode); |
| 901 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 978 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 902 __ ret(0); | 979 __ ret(0); |
| 903 break; | 980 break; |
| 904 } | 981 } |
| 905 case Token::MOD: { | 982 case Token::MOD: { |
| 906 // For MOD we jump to the allocation_failure label, to call runtime. | 983 // For MOD we jump to the allocation_failure label, to call runtime. |
| 907 __ jmp(allocation_failure); | 984 __ jmp(allocation_failure); |
| 908 break; | 985 break; |
| 909 } | 986 } |
| 910 case Token::BIT_OR: | 987 case Token::BIT_OR: |
| 911 case Token::BIT_AND: | 988 case Token::BIT_AND: |
| 912 case Token::BIT_XOR: | 989 case Token::BIT_XOR: |
| 913 case Token::SAR: | 990 case Token::SAR: |
| 914 case Token::SHL: | 991 case Token::SHL: |
| 915 case Token::SHR: { | 992 case Token::SHR: { |
| 916 Label non_smi_shr_result; | 993 Label non_smi_result; |
| 994 Label goto_non_numeric_failure; | |
| 917 Register heap_number_map = r9; | 995 Register heap_number_map = r9; |
| 996 if (kSmiValueSize == 31) { | |
| 997 // Push arguments on stack | |
| 998 __ push(rdx); | |
| 999 __ push(rax); | |
| 1000 } | |
| 918 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1001 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 919 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, | 1002 if (kSmiValueSize == 32) { |
| 920 heap_number_map); | 1003 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
| 1004 heap_number_map); | |
| 1005 } else { | |
| 1006 ASSERT(kSmiValueSize == 31); | |
| 1007 FloatingPointHelper::LoadAsIntegers(masm, &goto_non_numeric_failure, | |
| 1008 heap_number_map); | |
| 1009 } | |
| 921 switch (op) { | 1010 switch (op) { |
| 922 case Token::BIT_OR: __ orl(rax, rcx); break; | 1011 case Token::BIT_OR: __ orl(rax, rcx); break; |
| 923 case Token::BIT_AND: __ andl(rax, rcx); break; | 1012 case Token::BIT_AND: __ andl(rax, rcx); break; |
| 924 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 1013 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
| 925 case Token::SAR: __ sarl_cl(rax); break; | 1014 case Token::SAR: __ sarl_cl(rax); break; |
| 926 case Token::SHL: __ shll_cl(rax); break; | 1015 case Token::SHL: __ shll_cl(rax); break; |
| 927 case Token::SHR: { | 1016 case Token::SHR: { |
| 928 __ shrl_cl(rax); | 1017 __ shrl_cl(rax); |
| 929 // Check if result is negative. This can only happen for a shift | 1018 if (kSmiValueSize == 32) { |
| 930 // by zero. | 1019 // Check if result is negative. This can only happen for a shift |
| 931 __ testl(rax, rax); | 1020 // by zero. |
| 932 __ j(negative, &non_smi_shr_result); | 1021 __ testl(rax, rax); |
| 1022 __ j(negative, &non_smi_result); | |
| 1023 } | |
| 933 break; | 1024 break; |
| 934 } | 1025 } |
| 935 default: UNREACHABLE(); | 1026 default: UNREACHABLE(); |
| 936 } | 1027 } |
| 937 STATIC_ASSERT(kSmiValueSize == 32); | 1028 |
| 1029 if (kSmiValueSize == 31) { | |
| 1030 if (op == Token::SHR) { | |
| 1031 __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear); | |
| 1032 } else { | |
| 1033 __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear); | |
| 1034 } | |
| 1035 // Drop saved arguments. | |
| 1036 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 1037 } | |
| 1038 | |
| 938 // Tag smi result and return. | 1039 // Tag smi result and return. |
| 939 __ Integer32ToSmi(rax, rax); | 1040 __ Integer32ToSmi(rax, rax); |
| 940 __ Ret(); | 1041 __ Ret(); |
| 941 | 1042 |
| 942 // Logical shift right can produce an unsigned int32 that is not | 1043 if (kSmiValueSize == 31) { |
| 943 // an int32, and so is not in the smi range. Allocate a heap number | 1044 __ bind(&goto_non_numeric_failure); |
| 944 // in that case. | 1045 // Restore arguments. |
| 945 if (op == Token::SHR) { | 1046 __ pop(rax); |
| 946 __ bind(&non_smi_shr_result); | 1047 __ pop(rdx); |
| 1048 __ jmp(non_numeric_failure); | |
| 1049 } | |
| 1050 | |
| 1051 if (kSmiValueSize == 32) { | |
| 1052 if (op == Token::SHR) { | |
| 1053 // Logical shift right can produce an unsigned int32 that is not | |
| 1054 // an int32, and so is not in the smi range. Allocate a heap number | |
| 1055 // in that case. | |
| 1056 __ bind(&non_smi_result); | |
| 1057 Label allocation_failed; | |
| 1058 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | |
| 1059 // Allocate heap number in new space. | |
| 1060 // Not using AllocateHeapNumber macro in order to reuse | |
| 1061 // already loaded heap_number_map. | |
| 1062 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, | |
| 1063 TAG_OBJECT); | |
| 1064 // Set the map. | |
| 1065 __ AssertRootValue(heap_number_map, | |
| 1066 Heap::kHeapNumberMapRootIndex, | |
| 1067 "HeapNumberMap register clobbered."); | |
| 1068 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | |
| 1069 heap_number_map); | |
| 1070 __ cvtqsi2sd(xmm0, rbx); | |
| 1071 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | |
| 1072 __ Ret(); | |
| 1073 | |
| 1074 __ bind(&allocation_failed); | |
| 1075 // We need tagged values in rdx and rax for the following code, | |
| 1076 // not int32 in rax and rcx. | |
| 1077 __ Integer32ToSmi(rax, rcx); | |
| 1078 __ Integer32ToSmi(rdx, rbx); | |
| 1079 __ jmp(allocation_failure); | |
| 1080 } | |
| 1081 } else { | |
| 1082 ASSERT(kSmiValueSize == 31); | |
| 1083 __ bind(&non_smi_result); | |
| 947 Label allocation_failed; | 1084 Label allocation_failed; |
| 948 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | 1085 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 949 // Allocate heap number in new space. | 1086 // Allocate heap number in new space. |
| 950 // Not using AllocateHeapNumber macro in order to reuse | 1087 // Not using AllocateHeapNumber macro in order to reuse |
| 951 // already loaded heap_number_map. | 1088 // already loaded heap_number_map. |
| 952 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, | 1089 Label skip_allocation; |
| 953 TAG_OBJECT); | 1090 switch (mode) { |
| 1091 case OVERWRITE_LEFT: { | |
| 1092 __ movq(rax, Operand(rsp, 1 * kRegisterSize)); | |
| 1093 __ JumpIfNotSmi(rax, &skip_allocation); | |
| 1094 __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, | |
| 1095 TAG_OBJECT); | |
| 1096 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | |
| 1097 heap_number_map); | |
| 1098 __ bind(&skip_allocation); | |
| 1099 break; | |
| 1100 } | |
| 1101 case OVERWRITE_RIGHT: | |
| 1102 __ movq(rax, Operand(rsp, 0 * kRegisterSize)); | |
| 1103 __ JumpIfNotSmi(rax, &skip_allocation); | |
| 1104 // Fall through! | |
| 1105 case NO_OVERWRITE: | |
| 1106 __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, | |
| 1107 TAG_OBJECT); | |
| 1108 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | |
| 1109 heap_number_map); | |
| 1110 __ bind(&skip_allocation); | |
| 1111 break; | |
| 1112 default: UNREACHABLE(); | |
| 1113 } | |
| 954 // Set the map. | 1114 // Set the map. |
| 955 __ AssertRootValue(heap_number_map, | 1115 __ AssertRootValue(heap_number_map, |
| 956 Heap::kHeapNumberMapRootIndex, | 1116 Heap::kHeapNumberMapRootIndex, |
| 957 "HeapNumberMap register clobbered."); | 1117 "HeapNumberMap register clobbered."); |
| 958 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | 1118 if (op == Token::SHR) { |
| 959 heap_number_map); | 1119 __ cvtqsi2sd(xmm0, rbx); |
| 960 __ cvtqsi2sd(xmm0, rbx); | 1120 } else { |
| 1121 // All other operations returns a signed int32, so we | |
| 1122 // use lsi2sd here to retain the sign bit. | |
| 1123 __ cvtlsi2sd(xmm0, rbx); | |
| 1124 } | |
| 961 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 1125 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1126 // Drop saved arguments. | |
| 1127 __ addq(rsp, Immediate(2 * kRegisterSize)); | |
| 962 __ Ret(); | 1128 __ Ret(); |
| 963 | 1129 |
| 964 __ bind(&allocation_failed); | 1130 __ bind(&allocation_failed); |
| 965 // We need tagged values in rdx and rax for the following code, | 1131 // Restore arguments from stack. |
| 966 // not int32 in rax and rcx. | 1132 __ pop(rax); |
| 967 __ Integer32ToSmi(rax, rcx); | 1133 __ pop(rdx); |
| 968 __ Integer32ToSmi(rdx, rbx); | |
| 969 __ jmp(allocation_failure); | 1134 __ jmp(allocation_failure); |
| 970 } | 1135 } |
| 971 break; | 1136 break; |
| 972 } | 1137 } |
| 973 default: UNREACHABLE(); break; | 1138 default: UNREACHABLE(); break; |
| 974 } | 1139 } |
| 975 // No fall-through from this generated code. | 1140 // No fall-through from this generated code. |
| 976 if (FLAG_debug_code) { | 1141 if (FLAG_debug_code) { |
| 977 __ Abort("Unexpected fall-through in " | 1142 __ Abort("Unexpected fall-through in " |
| 978 "BinaryStub_GenerateFloatingPointCode."); | 1143 "BinaryStub_GenerateFloatingPointCode."); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1056 FrameScope scope(masm, StackFrame::INTERNAL); | 1221 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1057 GenerateRegisterArgsPush(masm); | 1222 GenerateRegisterArgsPush(masm); |
| 1058 GenerateCallRuntime(masm); | 1223 GenerateCallRuntime(masm); |
| 1059 } | 1224 } |
| 1060 __ Ret(); | 1225 __ Ret(); |
| 1061 } | 1226 } |
| 1062 } | 1227 } |
| 1063 | 1228 |
| 1064 | 1229 |
| 1065 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1230 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 1066 // The int32 case is identical to the Smi case. We avoid creating this | 1231 if (kSmiValueSize == 32) { |
| 1067 // ic state on x64. | 1232 // The int32 case is identical to the Smi case. We avoid creating this |
| 1068 UNREACHABLE(); | 1233 // ic state on x64. |
| 1234 UNREACHABLE(); | |
| 1235 } else { | |
| 1236 ASSERT(kSmiValueSize == 31); | |
| 1237 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | |
| 1238 | |
| 1239 Label gc_required, not_number, not_int32; | |
| 1240 BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, | |
| 1241 op_, result_type_, ¬_int32, mode_); | |
|
danno
2013/08/01 16:45:41
strange indentation
haitao.feng
2013/08/02 09:35:51
Done.
| |
| 1242 | |
| 1243 __ bind(¬_number); | |
| 1244 __ bind(¬_int32); | |
| 1245 GenerateTypeTransition(masm); | |
| 1246 | |
| 1247 __ bind(&gc_required); | |
| 1248 { | |
| 1249 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 1250 GenerateRegisterArgsPush(masm); | |
| 1251 GenerateCallRuntime(masm); | |
| 1252 } | |
| 1253 __ Ret(); | |
| 1254 } | |
| 1069 } | 1255 } |
| 1070 | 1256 |
| 1071 | 1257 |
| 1072 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 1258 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| 1073 Label call_runtime; | 1259 Label call_runtime; |
| 1074 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | 1260 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
| 1075 ASSERT(op_ == Token::ADD); | 1261 ASSERT(op_ == Token::ADD); |
| 1076 // If both arguments are strings, call the string add stub. | 1262 // If both arguments are strings, call the string add stub. |
| 1077 // Otherwise, do a transition. | 1263 // Otherwise, do a transition. |
| 1078 | 1264 |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1164 // or the right operand. For precise type feedback, patch the IC | 1350 // or the right operand. For precise type feedback, patch the IC |
| 1165 // again if this changes. | 1351 // again if this changes. |
| 1166 if (left_type_ == BinaryOpIC::SMI) { | 1352 if (left_type_ == BinaryOpIC::SMI) { |
| 1167 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); | 1353 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); |
| 1168 } | 1354 } |
| 1169 if (right_type_ == BinaryOpIC::SMI) { | 1355 if (right_type_ == BinaryOpIC::SMI) { |
| 1170 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); | 1356 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
| 1171 } | 1357 } |
| 1172 | 1358 |
| 1173 BinaryOpStub_GenerateFloatingPointCode( | 1359 BinaryOpStub_GenerateFloatingPointCode( |
| 1174 masm, &gc_required, ¬_number, op_, mode_); | 1360 masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
| 1175 | 1361 |
| 1176 __ bind(¬_number); | 1362 __ bind(¬_number); |
| 1177 GenerateTypeTransition(masm); | 1363 GenerateTypeTransition(masm); |
| 1178 | 1364 |
| 1179 __ bind(&gc_required); | 1365 __ bind(&gc_required); |
| 1180 { | 1366 { |
| 1181 FrameScope scope(masm, StackFrame::INTERNAL); | 1367 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1182 GenerateRegisterArgsPush(masm); | 1368 GenerateRegisterArgsPush(masm); |
| 1183 GenerateCallRuntime(masm); | 1369 GenerateCallRuntime(masm); |
| 1184 } | 1370 } |
| 1185 __ Ret(); | 1371 __ Ret(); |
| 1186 } | 1372 } |
| 1187 | 1373 |
| 1188 | 1374 |
| 1189 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1375 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 1190 Label call_runtime, call_string_add_or_runtime; | 1376 Label call_runtime, call_string_add_or_runtime; |
| 1191 | 1377 |
| 1192 BinaryOpStub_GenerateSmiCode( | 1378 BinaryOpStub_GenerateSmiCode( |
| 1193 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); | 1379 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
| 1194 | 1380 |
| 1195 BinaryOpStub_GenerateFloatingPointCode( | 1381 BinaryOpStub_GenerateFloatingPointCode( |
| 1196 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); | 1382 masm, &call_runtime, &call_string_add_or_runtime, op_, |
| 1383 result_type_, NULL, mode_); | |
| 1197 | 1384 |
| 1198 __ bind(&call_string_add_or_runtime); | 1385 __ bind(&call_string_add_or_runtime); |
| 1199 if (op_ == Token::ADD) { | 1386 if (op_ == Token::ADD) { |
| 1200 GenerateAddStrings(masm); | 1387 GenerateAddStrings(masm); |
| 1201 } | 1388 } |
| 1202 | 1389 |
| 1203 __ bind(&call_runtime); | 1390 __ bind(&call_runtime); |
| 1204 { | 1391 { |
| 1205 FrameScope scope(masm, StackFrame::INTERNAL); | 1392 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1206 GenerateRegisterArgsPush(masm); | 1393 GenerateRegisterArgsPush(masm); |
| (...skipping 530 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1737 // Convert HeapNumber to smi if possible. | 1924 // Convert HeapNumber to smi if possible. |
| 1738 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); | 1925 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); |
| 1739 __ movq(scratch2, xmm0); | 1926 __ movq(scratch2, xmm0); |
| 1740 __ cvttsd2siq(smi_result, xmm0); | 1927 __ cvttsd2siq(smi_result, xmm0); |
| 1741 // Check if conversion was successful by converting back and | 1928 // Check if conversion was successful by converting back and |
| 1742 // comparing to the original double's bits. | 1929 // comparing to the original double's bits. |
| 1743 __ cvtlsi2sd(xmm1, smi_result); | 1930 __ cvtlsi2sd(xmm1, smi_result); |
| 1744 __ movq(kScratchRegister, xmm1); | 1931 __ movq(kScratchRegister, xmm1); |
| 1745 __ cmpq(scratch2, kScratchRegister); | 1932 __ cmpq(scratch2, kScratchRegister); |
| 1746 __ j(not_equal, on_not_smis); | 1933 __ j(not_equal, on_not_smis); |
| 1934 __ JumpIfNotValidSmiValue(smi_result, on_not_smis); | |
| 1747 __ Integer32ToSmi(first, smi_result); | 1935 __ Integer32ToSmi(first, smi_result); |
| 1748 | 1936 |
| 1749 __ bind(&first_done); | 1937 __ bind(&first_done); |
| 1750 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); | 1938 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); |
| 1751 __ bind(&first_smi); | 1939 __ bind(&first_smi); |
| 1752 __ AssertNotSmi(second); | 1940 __ AssertNotSmi(second); |
| 1753 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); | 1941 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); |
| 1754 __ j(not_equal, | 1942 __ j(not_equal, |
| 1755 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) | 1943 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
| 1756 ? &maybe_undefined_second | 1944 ? &maybe_undefined_second |
| 1757 : on_not_smis); | 1945 : on_not_smis); |
| 1758 // Convert second to smi, if possible. | 1946 // Convert second to smi, if possible. |
| 1759 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); | 1947 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); |
| 1760 __ movq(scratch2, xmm0); | 1948 __ movq(scratch2, xmm0); |
| 1761 __ cvttsd2siq(smi_result, xmm0); | 1949 __ cvttsd2siq(smi_result, xmm0); |
| 1762 __ cvtlsi2sd(xmm1, smi_result); | 1950 __ cvtlsi2sd(xmm1, smi_result); |
| 1763 __ movq(kScratchRegister, xmm1); | 1951 __ movq(kScratchRegister, xmm1); |
| 1764 __ cmpq(scratch2, kScratchRegister); | 1952 __ cmpq(scratch2, kScratchRegister); |
| 1765 __ j(not_equal, on_not_smis); | 1953 __ j(not_equal, on_not_smis); |
| 1954 __ JumpIfNotValidSmiValue(smi_result, on_not_smis); | |
| 1766 __ Integer32ToSmi(second, smi_result); | 1955 __ Integer32ToSmi(second, smi_result); |
| 1767 if (on_success != NULL) { | 1956 if (on_success != NULL) { |
| 1768 __ jmp(on_success); | 1957 __ jmp(on_success); |
| 1769 } else { | 1958 } else { |
| 1770 __ jmp(&done); | 1959 __ jmp(&done); |
| 1771 } | 1960 } |
| 1772 | 1961 |
| 1773 __ bind(&maybe_undefined_first); | 1962 __ bind(&maybe_undefined_first); |
| 1774 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); | 1963 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); |
| 1775 __ j(not_equal, on_not_smis); | 1964 __ j(not_equal, on_not_smis); |
| (...skipping 2789 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4565 // by the code above. | 4754 // by the code above. |
| 4566 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 4755 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
| 4567 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); | 4756 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); |
| 4568 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); | 4757 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); |
| 4569 } | 4758 } |
| 4570 // Get the instance types of the two strings as they will be needed soon. | 4759 // Get the instance types of the two strings as they will be needed soon. |
| 4571 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); | 4760 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); |
| 4572 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); | 4761 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
| 4573 | 4762 |
| 4574 // Look at the length of the result of adding the two strings. | 4763 // Look at the length of the result of adding the two strings. |
| 4575 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); | 4764 if (kSmiValueSize == 32) { |
| 4576 __ SmiAdd(rbx, rbx, rcx); | 4765 ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
| 4766 __ SmiAdd(rbx, rbx, rcx); | |
|
danno
2013/08/01 16:45:41
As noted above, I think you should change SmiAdd t
haitao.feng
2013/08/02 09:35:51
There are two SmiAdd interfaces. SmiAdd (with no f
| |
| 4767 } else { | |
| 4768 ASSERT(kSmiValueSize == 31); | |
| 4769 __ SmiAdd(rbx, rbx, rcx, &call_runtime); | |
| 4770 } | |
| 4771 | |
| 4577 // Use the string table when adding two one character strings, as it | 4772 // Use the string table when adding two one character strings, as it |
| 4578 // helps later optimizations to return an internalized string here. | 4773 // helps later optimizations to return an internalized string here. |
| 4579 __ SmiCompare(rbx, Smi::FromInt(2)); | 4774 __ SmiCompare(rbx, Smi::FromInt(2)); |
| 4580 __ j(not_equal, &longer_than_two); | 4775 __ j(not_equal, &longer_than_two); |
| 4581 | 4776 |
| 4582 // Check that both strings are non-external ASCII strings. | 4777 // Check that both strings are non-external ASCII strings. |
| 4583 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, | 4778 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, |
| 4584 &call_runtime); | 4779 &call_runtime); |
| 4585 | 4780 |
| 4586 // Get the two characters forming the sub string. | 4781 // Get the two characters forming the sub string. |
| (...skipping 934 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5521 __ IncrementCounter(counters->string_compare_native(), 1); | 5716 __ IncrementCounter(counters->string_compare_native(), 1); |
| 5522 __ ret(2 * kPointerSize); | 5717 __ ret(2 * kPointerSize); |
| 5523 | 5718 |
| 5524 __ bind(¬_same); | 5719 __ bind(¬_same); |
| 5525 | 5720 |
| 5526 // Check that both are sequential ASCII strings. | 5721 // Check that both are sequential ASCII strings. |
| 5527 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); | 5722 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); |
| 5528 | 5723 |
| 5529 // Inline comparison of ASCII strings. | 5724 // Inline comparison of ASCII strings. |
| 5530 __ IncrementCounter(counters->string_compare_native(), 1); | 5725 __ IncrementCounter(counters->string_compare_native(), 1); |
| 5531 // Drop arguments from the stack | 5726 // Drop saved arguments. |
| 5532 __ pop(rcx); | 5727 __ pop(rcx); |
| 5533 __ addq(rsp, Immediate(2 * kPointerSize)); | 5728 __ addq(rsp, Immediate(2 * kPointerSize)); |
| 5534 __ push(rcx); | 5729 __ push(rcx); |
| 5535 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); | 5730 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); |
| 5536 | 5731 |
| 5537 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 5732 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
| 5538 // tagged as a small integer. | 5733 // tagged as a small integer. |
| 5539 __ bind(&runtime); | 5734 __ bind(&runtime); |
| 5540 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 5735 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 5541 } | 5736 } |
| (...skipping 1274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6816 __ bind(&fast_elements_case); | 7011 __ bind(&fast_elements_case); |
| 6817 GenerateCase(masm, FAST_ELEMENTS); | 7012 GenerateCase(masm, FAST_ELEMENTS); |
| 6818 } | 7013 } |
| 6819 | 7014 |
| 6820 | 7015 |
| 6821 #undef __ | 7016 #undef __ |
| 6822 | 7017 |
| 6823 } } // namespace v8::internal | 7018 } } // namespace v8::internal |
| 6824 | 7019 |
| 6825 #endif // V8_TARGET_ARCH_X64 | 7020 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |