Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(503)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 21014003: Optionally use 31-bits SMI value for 64-bit system (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Introduce SmiFunctionInvoker to abstract the difference between FullCodeGen and LCodeGen Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 703 matching lines...) Expand 10 before | Expand all | Expand 10 after
714 714
715 static void BinaryOpStub_GenerateSmiCode( 715 static void BinaryOpStub_GenerateSmiCode(
716 MacroAssembler* masm, 716 MacroAssembler* masm,
717 Label* slow, 717 Label* slow,
718 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 718 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
719 Token::Value op) { 719 Token::Value op) {
720 720
721 // Arguments to BinaryOpStub are in rdx and rax. 721 // Arguments to BinaryOpStub are in rdx and rax.
722 const Register left = rdx; 722 const Register left = rdx;
723 const Register right = rax; 723 const Register right = rax;
724 const Register shift_op_result = r9;
724 725
725 // We only generate heapnumber answers for overflowing calculations 726 // We only generate heapnumber answers for overflowing calculations.
726 // for the four basic arithmetic operations and logical right shift by 0.
727 bool generate_inline_heapnumber_results = 727 bool generate_inline_heapnumber_results =
728 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && 728 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
729 (op == Token::ADD || op == Token::SUB || 729 MacroAssembler::IsUnsafeSmiOperator(op);
730 op == Token::MUL || op == Token::DIV || op == Token::SHR);
731 730
732 // Smi check of both operands. If op is BIT_OR, the check is delayed 731 // Smi check of both operands. If op is BIT_OR, the check is delayed
733 // until after the OR operation. 732 // until after the OR operation.
734 Label not_smis; 733 Label not_smis;
735 Label use_fp_on_smis; 734 Label use_fp_on_smis;
736 Label fail; 735 Label fail;
737 736
738 if (op != Token::BIT_OR) { 737 if (op != Token::BIT_OR) {
739 Comment smi_check_comment(masm, "-- Smi check arguments"); 738 Comment smi_check_comment(masm, "-- Smi check arguments");
740 __ JumpIfNotBothSmi(left, right, &not_smis); 739 __ JumpIfNotBothSmi(left, right, &not_smis);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
783 ASSERT(right.is(rax)); 782 ASSERT(right.is(rax));
784 __ SmiXor(right, right, left); // BIT_XOR is commutative. 783 __ SmiXor(right, right, left); // BIT_XOR is commutative.
785 break; 784 break;
786 785
787 case Token::BIT_AND: 786 case Token::BIT_AND:
788 ASSERT(right.is(rax)); 787 ASSERT(right.is(rax));
789 __ SmiAnd(right, right, left); // BIT_AND is commutative. 788 __ SmiAnd(right, right, left); // BIT_AND is commutative.
790 break; 789 break;
791 790
792 case Token::SHL: 791 case Token::SHL:
793 __ SmiShiftLeft(left, left, right); 792 __ SmiShiftLeft(shift_op_result, left, right, &use_fp_on_smis);
794 __ movq(rax, left); 793 __ movq(rax, shift_op_result);
795 break; 794 break;
796 795
797 case Token::SAR: 796 case Token::SAR:
798 __ SmiShiftArithmeticRight(left, left, right); 797 __ SmiShiftArithmeticRight(shift_op_result, left, right);
799 __ movq(rax, left); 798 __ movq(rax, shift_op_result);
800 break; 799 break;
801 800
802 case Token::SHR: 801 case Token::SHR:
803 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); 802 __ SmiShiftLogicalRight(shift_op_result, left, right, &use_fp_on_smis);
804 __ movq(rax, left); 803 __ movq(rax, shift_op_result);
805 break; 804 break;
806 805
807 default: 806 default:
808 UNREACHABLE(); 807 UNREACHABLE();
809 } 808 }
810 809
811 // 5. Emit return of result in rax. Some operations have registers pushed. 810 // 5. Emit return of result in rax. Some operations have registers pushed.
812 __ ret(0); 811 __ ret(0);
813 812
814 if (use_fp_on_smis.is_linked()) { 813 if (use_fp_on_smis.is_linked()) {
815 // 6. For some operations emit inline code to perform floating point 814 // 6. For some operations emit inline code to perform floating point
816 // operations on known smis (e.g., if the result of the operation 815 // operations on known smis (e.g., if the result of the operation
817 // overflowed the smi range). 816 // overflowed the smi range).
818 __ bind(&use_fp_on_smis); 817 __ bind(&use_fp_on_smis);
819 if (op == Token::DIV || op == Token::MOD) { 818 if (op == Token::DIV || op == Token::MOD) {
820 // Restore left and right to rdx and rax. 819 // Restore left and right to rdx and rax.
821 __ movq(rdx, rcx); 820 __ movq(rdx, rcx);
822 __ movq(rax, rbx); 821 __ movq(rax, rbx);
823 } 822 }
824 823
825 if (generate_inline_heapnumber_results) { 824 if (generate_inline_heapnumber_results) {
826 __ AllocateHeapNumber(rcx, rbx, slow); 825 __ AllocateHeapNumber(rcx, rbx, slow);
827 Comment perform_float(masm, "-- Perform float operation on smis"); 826 Comment perform_float(masm, "-- Perform float operation on smis");
828 if (op == Token::SHR) { 827 if (op == Token::SHR) {
829 __ SmiToInteger32(left, left); 828 __ cvtqsi2sd(xmm0, shift_op_result);
830 __ cvtqsi2sd(xmm0, left); 829 } else if (op == Token::SHL) {
830 ASSERT(kSmiValueSize == 31);
831 __ cvtlsi2sd(xmm0, shift_op_result);
831 } else { 832 } else {
832 FloatingPointHelper::LoadSSE2SmiOperands(masm); 833 FloatingPointHelper::LoadSSE2SmiOperands(masm);
833 switch (op) { 834 switch (op) {
834 case Token::ADD: __ addsd(xmm0, xmm1); break; 835 case Token::ADD: __ addsd(xmm0, xmm1); break;
835 case Token::SUB: __ subsd(xmm0, xmm1); break; 836 case Token::SUB: __ subsd(xmm0, xmm1); break;
836 case Token::MUL: __ mulsd(xmm0, xmm1); break; 837 case Token::MUL: __ mulsd(xmm0, xmm1); break;
837 case Token::DIV: __ divsd(xmm0, xmm1); break; 838 case Token::DIV: __ divsd(xmm0, xmm1); break;
838 default: UNREACHABLE(); 839 default: UNREACHABLE();
839 } 840 }
840 } 841 }
841 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); 842 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
842 __ movq(rax, rcx); 843 __ movq(rax, rcx);
843 __ ret(0); 844 __ ret(0);
844 } else { 845 } else {
845 __ jmp(&fail); 846 __ jmp(&fail);
846 } 847 }
847 } 848 }
848 849
(...skipping 20 matching lines...) Expand all
869 __ jmp(&smi_values); 870 __ jmp(&smi_values);
870 __ bind(&fail); 871 __ bind(&fail);
871 } 872 }
872 873
873 874
874 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, 875 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
875 Label* alloc_failure, 876 Label* alloc_failure,
876 OverwriteMode mode); 877 OverwriteMode mode);
877 878
878 879
879 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, 880 static void BinaryOpStub_GenerateFloatingPointCode(
880 Label* allocation_failure, 881 MacroAssembler* masm,
881 Label* non_numeric_failure, 882 Label* allocation_failure,
882 Token::Value op, 883 Label* non_numeric_failure,
883 OverwriteMode mode) { 884 Token::Value op,
885 BinaryOpIC::TypeInfo result_type,
886 Label* non_int32_failure,
887 OverwriteMode mode) {
884 switch (op) { 888 switch (op) {
885 case Token::ADD: 889 case Token::ADD:
886 case Token::SUB: 890 case Token::SUB:
887 case Token::MUL: 891 case Token::MUL:
888 case Token::DIV: { 892 case Token::DIV: {
889 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); 893 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
890 894
891 switch (op) { 895 switch (op) {
892 case Token::ADD: __ addsd(xmm0, xmm1); break; 896 case Token::ADD: __ addsd(xmm0, xmm1); break;
893 case Token::SUB: __ subsd(xmm0, xmm1); break; 897 case Token::SUB: __ subsd(xmm0, xmm1); break;
894 case Token::MUL: __ mulsd(xmm0, xmm1); break; 898 case Token::MUL: __ mulsd(xmm0, xmm1); break;
895 case Token::DIV: __ divsd(xmm0, xmm1); break; 899 case Token::DIV: __ divsd(xmm0, xmm1); break;
896 default: UNREACHABLE(); 900 default: UNREACHABLE();
897 } 901 }
902
903 if (kSmiValueSize == 31 && non_int32_failure != NULL) {
904 if (result_type <= BinaryOpIC::INT32) {
danno 2013/08/07 18:41:26 Why is UNITIALIZED also included? It's much cleare
haitao.feng 2013/08/12 09:54:24 Actually this code was taken from https://chromium
905 __ cvttsd2si(kScratchRegister, xmm0);
danno 2013/08/07 18:41:26 Don't you need to check that bit 30 and 31 of scra
haitao.feng 2013/08/12 09:54:24 The test here is for Int32, instead of SMI.
906 __ cvtlsi2sd(xmm2, kScratchRegister);
907 __ pcmpeqd(xmm2, xmm0);
908 __ movmskpd(rcx, xmm2);
909 __ testl(rcx, Immediate(1));
910 __ j(zero, non_int32_failure);
danno 2013/08/07 18:41:26 Again, why is this non_int32_failure and not non_i
haitao.feng 2013/08/12 09:54:24 The main logic is at https://code.google.com/p/v8/
911 }
912 }
913
898 BinaryOpStub_GenerateHeapResultAllocation( 914 BinaryOpStub_GenerateHeapResultAllocation(
899 masm, allocation_failure, mode); 915 masm, allocation_failure, mode);
900 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); 916 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
901 __ ret(0); 917 __ ret(0);
902 break; 918 break;
903 } 919 }
904 case Token::MOD: { 920 case Token::MOD: {
905 // For MOD we jump to the allocation_failure label, to call runtime. 921 // For MOD we jump to the allocation_failure label, to call runtime.
906 __ jmp(allocation_failure); 922 __ jmp(allocation_failure);
907 break; 923 break;
908 } 924 }
909 case Token::BIT_OR: 925 case Token::BIT_OR:
910 case Token::BIT_AND: 926 case Token::BIT_AND:
911 case Token::BIT_XOR: 927 case Token::BIT_XOR:
912 case Token::SAR: 928 case Token::SAR:
913 case Token::SHL: 929 case Token::SHL:
914 case Token::SHR: { 930 case Token::SHR: {
915 Label non_smi_shr_result; 931 Label non_smi_result;
916 Register heap_number_map = r9; 932 Register heap_number_map = r9;
933 if (kSmiValueSize == 31 || (kSmiValueSize == 32 && op == Token::SHR)) {
934 // Save rax in r11, rdx is un-modified below.
935 __ movq(r11, rax);
danno 2013/08/07 18:41:26 Can you give r11 an alias, like saved_right?
haitao.feng 2013/08/12 09:54:24 Done.
936 }
917 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 937 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
918 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, 938 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
919 heap_number_map); 939 heap_number_map);
920 switch (op) { 940 switch (op) {
921 case Token::BIT_OR: __ orl(rax, rcx); break; 941 case Token::BIT_OR: __ orl(rax, rcx); break;
922 case Token::BIT_AND: __ andl(rax, rcx); break; 942 case Token::BIT_AND: __ andl(rax, rcx); break;
923 case Token::BIT_XOR: __ xorl(rax, rcx); break; 943 case Token::BIT_XOR: __ xorl(rax, rcx); break;
924 case Token::SAR: __ sarl_cl(rax); break; 944 case Token::SAR: __ sarl_cl(rax); break;
925 case Token::SHL: __ shll_cl(rax); break; 945 case Token::SHL: __ shll_cl(rax); break;
926 case Token::SHR: { 946 case Token::SHR: __ shrl_cl(rax); break;
927 __ shrl_cl(rax);
928 // Check if result is negative. This can only happen for a shift
929 // by zero.
930 __ testl(rax, rax);
931 __ j(negative, &non_smi_shr_result);
932 break;
933 }
934 default: UNREACHABLE(); 947 default: UNREACHABLE();
935 } 948 }
936 STATIC_ASSERT(kSmiValueSize == 32); 949
950 if (op == Token::SHR) {
951 __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear);
952 } else {
953 if (kSmiValueSize == 31) {
954 __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear);
955 }
956 }
957
937 // Tag smi result and return. 958 // Tag smi result and return.
938 __ Integer32ToSmi(rax, rax); 959 __ Integer32ToSmi(rax, rax);
939 __ Ret(); 960 __ Ret();
940 961
941 // Logical shift right can produce an unsigned int32 that is not 962 if (kSmiValueSize == 31 || (kSmiValueSize == 32 && op == Token::SHR)) {
942 // an int32, and so is not in the smi range. Allocate a heap number 963 __ bind(&non_smi_result);
943 // in that case. 964 __ movl(rbx, rax); // rbx holds result value.
944 if (op == Token::SHR) {
945 __ bind(&non_smi_shr_result);
946 Label allocation_failed; 965 Label allocation_failed;
947 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). 966 if (kSmiValueSize == 32) {
948 // Allocate heap number in new space. 967 ASSERT(op == Token::SHR);
danno 2013/08/07 18:41:26 It looks like all three four below are identical (
haitao.feng 2013/08/12 09:54:24 Done.
949 // Not using AllocateHeapNumber macro in order to reuse 968 // Allocate heap number in new space.
950 // already loaded heap_number_map. 969 // Not using AllocateHeapNumber macro in order to reuse
951 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, 970 // already loaded heap_number_map.
952 TAG_OBJECT); 971 __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg,
953 // Set the map. 972 &allocation_failed, TAG_OBJECT);
954 __ AssertRootValue(heap_number_map, 973 // Set the map.
955 Heap::kHeapNumberMapRootIndex, 974 __ AssertRootValue(heap_number_map,
956 kHeapNumberMapRegisterClobbered); 975 Heap::kHeapNumberMapRootIndex,
957 __ movq(FieldOperand(rax, HeapObject::kMapOffset), 976 kHeapNumberMapRegisterClobbered);
958 heap_number_map); 977 __ movq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
959 __ cvtqsi2sd(xmm0, rbx); 978 // Logical shift right can produce an unsigned int32 that is not
960 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); 979 // an int32, and so is not in the smi range.
961 __ Ret(); 980 __ cvtqsi2sd(xmm0, rbx);
981 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
982 __ Ret();
983 } else {
984 ASSERT(kSmiValueSize == 31);
985 Label skip_allocation;
986 switch (mode) {
987 case OVERWRITE_LEFT: {
988 __ movq(rax, rdx);
989 __ JumpIfNotSmi(rax, &skip_allocation);
990 __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg,
991 &allocation_failed, TAG_OBJECT);
992 // Set the map.
993 __ AssertRootValue(heap_number_map,
994 Heap::kHeapNumberMapRootIndex,
995 kHeapNumberMapRegisterClobbered);
996 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
997 heap_number_map);
998 __ bind(&skip_allocation);
999 break;
1000 }
1001 case OVERWRITE_RIGHT:
1002 __ movq(rax, r11);
1003 __ JumpIfNotSmi(rax, &skip_allocation);
1004 // Fall through!
1005 case NO_OVERWRITE:
1006 __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg,
1007 &allocation_failed, TAG_OBJECT);
1008 // Set the map.
1009 __ AssertRootValue(heap_number_map,
1010 Heap::kHeapNumberMapRootIndex,
1011 kHeapNumberMapRegisterClobbered);
1012 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
1013 heap_number_map);
1014 __ bind(&skip_allocation);
1015 break;
1016 default: UNREACHABLE();
1017 }
1018
1019 if (op == Token::SHR) {
1020 // Logical shift right can produce an unsigned int32 that is not
1021 // an int32, and so is not in the smi range.
1022 __ cvtqsi2sd(xmm0, rbx);
1023 } else {
1024 // All other operations returns a signed int32, so we
1025 // use lsi2sd here to retain the sign bit.
1026 __ cvtlsi2sd(xmm0, rbx);
1027 }
1028 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1029 __ Ret();
1030 }
962 1031
963 __ bind(&allocation_failed); 1032 __ bind(&allocation_failed);
964 // We need tagged values in rdx and rax for the following code, 1033 // We need tagged values in rdx and rax for the following code,
965 // not int32 in rax and rcx. 1034 // rdx is un-changed and rax is saved in r11.
966 __ Integer32ToSmi(rax, rcx); 1035 __ movq(rax, r11);
967 __ Integer32ToSmi(rdx, rbx);
968 __ jmp(allocation_failure); 1036 __ jmp(allocation_failure);
969 } 1037 }
970 break; 1038 break;
971 } 1039 }
972 default: UNREACHABLE(); break; 1040 default: UNREACHABLE(); break;
973 } 1041 }
974 // No fall-through from this generated code. 1042 // No fall-through from this generated code.
975 if (FLAG_debug_code) { 1043 if (FLAG_debug_code) {
976 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode); 1044 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
977 } 1045 }
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1054 FrameScope scope(masm, StackFrame::INTERNAL); 1122 FrameScope scope(masm, StackFrame::INTERNAL);
1055 GenerateRegisterArgsPush(masm); 1123 GenerateRegisterArgsPush(masm);
1056 GenerateCallRuntime(masm); 1124 GenerateCallRuntime(masm);
1057 } 1125 }
1058 __ Ret(); 1126 __ Ret();
1059 } 1127 }
1060 } 1128 }
1061 1129
1062 1130
1063 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1131 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1064 // The int32 case is identical to the Smi case. We avoid creating this 1132 if (kSmiValueSize == 32) {
danno 2013/08/07 18:41:26 Remove the if and the "then" part of this code, ju
haitao.feng 2013/08/12 09:54:24 Done.
1065 // ic state on x64. 1133 // The int32 case is identical to the Smi case. We avoid creating this
1066 UNREACHABLE(); 1134 // ic state on x64.
1135 UNREACHABLE();
1136 } else {
1137 ASSERT(kSmiValueSize == 31);
1138 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1139
1140 Label gc_required, not_number, not_int32;
1141 BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, &not_number, op_,
1142 result_type_, &not_int32, mode_);
1143
1144 __ bind(&not_number);
1145 __ bind(&not_int32);
1146 GenerateTypeTransition(masm);
1147
1148 __ bind(&gc_required);
1149 {
1150 FrameScope scope(masm, StackFrame::INTERNAL);
1151 GenerateRegisterArgsPush(masm);
1152 GenerateCallRuntime(masm);
1153 }
1154 __ Ret();
1155 }
1067 } 1156 }
1068 1157
1069 1158
1070 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { 1159 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1071 Label call_runtime; 1160 Label call_runtime;
1072 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); 1161 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1073 ASSERT(op_ == Token::ADD); 1162 ASSERT(op_ == Token::ADD);
1074 // If both arguments are strings, call the string add stub. 1163 // If both arguments are strings, call the string add stub.
1075 // Otherwise, do a transition. 1164 // Otherwise, do a transition.
1076 1165
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1162 // or the right operand. For precise type feedback, patch the IC 1251 // or the right operand. For precise type feedback, patch the IC
1163 // again if this changes. 1252 // again if this changes.
1164 if (left_type_ == BinaryOpIC::SMI) { 1253 if (left_type_ == BinaryOpIC::SMI) {
1165 BinaryOpStub_CheckSmiInput(masm, rdx, &not_number); 1254 BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
1166 } 1255 }
1167 if (right_type_ == BinaryOpIC::SMI) { 1256 if (right_type_ == BinaryOpIC::SMI) {
1168 BinaryOpStub_CheckSmiInput(masm, rax, &not_number); 1257 BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
1169 } 1258 }
1170 1259
1171 BinaryOpStub_GenerateFloatingPointCode( 1260 BinaryOpStub_GenerateFloatingPointCode(
1172 masm, &gc_required, &not_number, op_, mode_); 1261 masm, &gc_required, &not_number, op_, result_type_, NULL, mode_);
1173 1262
1174 __ bind(&not_number); 1263 __ bind(&not_number);
1175 GenerateTypeTransition(masm); 1264 GenerateTypeTransition(masm);
1176 1265
1177 __ bind(&gc_required); 1266 __ bind(&gc_required);
1178 { 1267 {
1179 FrameScope scope(masm, StackFrame::INTERNAL); 1268 FrameScope scope(masm, StackFrame::INTERNAL);
1180 GenerateRegisterArgsPush(masm); 1269 GenerateRegisterArgsPush(masm);
1181 GenerateCallRuntime(masm); 1270 GenerateCallRuntime(masm);
1182 } 1271 }
1183 __ Ret(); 1272 __ Ret();
1184 } 1273 }
1185 1274
1186 1275
1187 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 1276 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1188 Label call_runtime, call_string_add_or_runtime; 1277 Label call_runtime, call_string_add_or_runtime;
1189 1278
1190 BinaryOpStub_GenerateSmiCode( 1279 BinaryOpStub_GenerateSmiCode(
1191 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); 1280 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1192 1281
1193 BinaryOpStub_GenerateFloatingPointCode( 1282 BinaryOpStub_GenerateFloatingPointCode(
1194 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); 1283 masm, &call_runtime, &call_string_add_or_runtime, op_,
1284 result_type_, NULL, mode_);
1195 1285
1196 __ bind(&call_string_add_or_runtime); 1286 __ bind(&call_string_add_or_runtime);
1197 if (op_ == Token::ADD) { 1287 if (op_ == Token::ADD) {
1198 GenerateAddStrings(masm); 1288 GenerateAddStrings(masm);
1199 } 1289 }
1200 1290
1201 __ bind(&call_runtime); 1291 __ bind(&call_runtime);
1202 { 1292 {
1203 FrameScope scope(masm, StackFrame::INTERNAL); 1293 FrameScope scope(masm, StackFrame::INTERNAL);
1204 GenerateRegisterArgsPush(masm); 1294 GenerateRegisterArgsPush(masm);
(...skipping 530 matching lines...) Expand 10 before | Expand all | Expand 10 after
1735 // Convert HeapNumber to smi if possible. 1825 // Convert HeapNumber to smi if possible.
1736 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); 1826 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1737 __ movq(scratch2, xmm0); 1827 __ movq(scratch2, xmm0);
1738 __ cvttsd2siq(smi_result, xmm0); 1828 __ cvttsd2siq(smi_result, xmm0);
1739 // Check if conversion was successful by converting back and 1829 // Check if conversion was successful by converting back and
1740 // comparing to the original double's bits. 1830 // comparing to the original double's bits.
1741 __ cvtlsi2sd(xmm1, smi_result); 1831 __ cvtlsi2sd(xmm1, smi_result);
1742 __ movq(kScratchRegister, xmm1); 1832 __ movq(kScratchRegister, xmm1);
1743 __ cmpq(scratch2, kScratchRegister); 1833 __ cmpq(scratch2, kScratchRegister);
1744 __ j(not_equal, on_not_smis); 1834 __ j(not_equal, on_not_smis);
1835 __ JumpIfNotValidSmiValue(smi_result, on_not_smis);
1745 __ Integer32ToSmi(first, smi_result); 1836 __ Integer32ToSmi(first, smi_result);
1746 1837
1747 __ bind(&first_done); 1838 __ bind(&first_done);
1748 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); 1839 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1749 __ bind(&first_smi); 1840 __ bind(&first_smi);
1750 __ AssertNotSmi(second); 1841 __ AssertNotSmi(second);
1751 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); 1842 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1752 __ j(not_equal, 1843 __ j(not_equal,
1753 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) 1844 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1754 ? &maybe_undefined_second 1845 ? &maybe_undefined_second
1755 : on_not_smis); 1846 : on_not_smis);
1756 // Convert second to smi, if possible. 1847 // Convert second to smi, if possible.
1757 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); 1848 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1758 __ movq(scratch2, xmm0); 1849 __ movq(scratch2, xmm0);
1759 __ cvttsd2siq(smi_result, xmm0); 1850 __ cvttsd2siq(smi_result, xmm0);
1760 __ cvtlsi2sd(xmm1, smi_result); 1851 __ cvtlsi2sd(xmm1, smi_result);
1761 __ movq(kScratchRegister, xmm1); 1852 __ movq(kScratchRegister, xmm1);
1762 __ cmpq(scratch2, kScratchRegister); 1853 __ cmpq(scratch2, kScratchRegister);
1763 __ j(not_equal, on_not_smis); 1854 __ j(not_equal, on_not_smis);
1855 __ JumpIfNotValidSmiValue(smi_result, on_not_smis);
1764 __ Integer32ToSmi(second, smi_result); 1856 __ Integer32ToSmi(second, smi_result);
1765 if (on_success != NULL) { 1857 if (on_success != NULL) {
1766 __ jmp(on_success); 1858 __ jmp(on_success);
1767 } else { 1859 } else {
1768 __ jmp(&done); 1860 __ jmp(&done);
1769 } 1861 }
1770 1862
1771 __ bind(&maybe_undefined_first); 1863 __ bind(&maybe_undefined_first);
1772 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); 1864 __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
1773 __ j(not_equal, on_not_smis); 1865 __ j(not_equal, on_not_smis);
(...skipping 2788 matching lines...) Expand 10 before | Expand all | Expand 10 after
4562 // by the code above. 4654 // by the code above.
4563 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 4655 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4564 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); 4656 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
4565 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); 4657 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
4566 } 4658 }
4567 // Get the instance types of the two strings as they will be needed soon. 4659 // Get the instance types of the two strings as they will be needed soon.
4568 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); 4660 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
4569 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); 4661 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
4570 4662
4571 // Look at the length of the result of adding the two strings. 4663 // Look at the length of the result of adding the two strings.
4572 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); 4664 if (kSmiValueSize == 32) {
4573 __ SmiAdd(rbx, rbx, rcx); 4665 ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
4666 __ SmiAdd(rbx, rbx, rcx);
4667 } else {
4668 ASSERT(kSmiValueSize == 31);
4669 __ SmiAdd(rbx, rbx, rcx, &call_runtime);
danno 2013/08/07 18:41:26 I still think it's OK to use the overflow-checking
haitao.feng 2013/08/12 09:54:24 Done.
4670 }
4671
4574 // Use the string table when adding two one character strings, as it 4672 // Use the string table when adding two one character strings, as it
4575 // helps later optimizations to return an internalized string here. 4673 // helps later optimizations to return an internalized string here.
4576 __ SmiCompare(rbx, Smi::FromInt(2)); 4674 __ SmiCompare(rbx, Smi::FromInt(2));
4577 __ j(not_equal, &longer_than_two); 4675 __ j(not_equal, &longer_than_two);
4578 4676
4579 // Check that both strings are non-external ASCII strings. 4677 // Check that both strings are non-external ASCII strings.
4580 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, 4678 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4581 &call_runtime); 4679 &call_runtime);
4582 4680
4583 // Get the two characters forming the sub string. 4681 // Get the two characters forming the sub string.
(...skipping 2227 matching lines...) Expand 10 before | Expand all | Expand 10 after
6811 __ bind(&fast_elements_case); 6909 __ bind(&fast_elements_case);
6812 GenerateCase(masm, FAST_ELEMENTS); 6910 GenerateCase(masm, FAST_ELEMENTS);
6813 } 6911 }
6814 6912
6815 6913
6816 #undef __ 6914 #undef __
6817 6915
6818 } } // namespace v8::internal 6916 } } // namespace v8::internal
6819 6917
6820 #endif // V8_TARGET_ARCH_X64 6918 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/assembler-x64.cc ('k') | src/x64/debug-x64.cc » ('j') | src/x64/lithium-codegen-x64.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698