Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(298)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 6529055: [Isolates] Merge crankshaft (r5922 from bleeding_edge). (Closed)
Patch Set: Win32 port Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
58 // Initialize the rest of the function. We don't have to update the 58 // Initialize the rest of the function. We don't have to update the
59 // write barrier because the allocated object is in new space. 59 // write barrier because the allocated object is in new space.
60 __ mov(ebx, Immediate(FACTORY->empty_fixed_array())); 60 __ mov(ebx, Immediate(FACTORY->empty_fixed_array()));
61 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); 61 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
62 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 62 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
63 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), 63 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
64 Immediate(FACTORY->the_hole_value())); 64 Immediate(FACTORY->the_hole_value()));
65 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); 65 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
66 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); 66 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
67 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); 67 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
68 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
69 Immediate(FACTORY->undefined_value()));
68 70
69 // Initialize the code pointer in the function to be the one 71 // Initialize the code pointer in the function to be the one
70 // found in the shared function info object. 72 // found in the shared function info object.
71 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); 73 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
72 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 74 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
73 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); 75 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
74 76
75 // Return and remove the on-stack parameter. 77 // Return and remove the on-stack parameter.
76 __ ret(1 * kPointerSize); 78 __ ret(1 * kPointerSize);
77 79
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after
441 // Expects operands in edx, eax. 443 // Expects operands in edx, eax.
442 static void LoadFloatSmis(MacroAssembler* masm, Register scratch); 444 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
443 445
444 // Test if operands are smi or number objects (fp). Requirements: 446 // Test if operands are smi or number objects (fp). Requirements:
445 // operand_1 in eax, operand_2 in edx; falls through on float 447 // operand_1 in eax, operand_2 in edx; falls through on float
446 // operands, jumps to the non_float label otherwise. 448 // operands, jumps to the non_float label otherwise.
447 static void CheckFloatOperands(MacroAssembler* masm, 449 static void CheckFloatOperands(MacroAssembler* masm,
448 Label* non_float, 450 Label* non_float,
449 Register scratch); 451 Register scratch);
450 452
453 // Checks that the two floating point numbers on top of the FPU stack
454 // have int32 values.
455 static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
456 Label* non_int32);
457
451 // Takes the operands in edx and eax and loads them as integers in eax 458 // Takes the operands in edx and eax and loads them as integers in eax
452 // and ecx. 459 // and ecx.
453 static void LoadAsIntegers(MacroAssembler* masm, 460 static void LoadAsIntegers(MacroAssembler* masm,
454 TypeInfo type_info, 461 TypeInfo type_info,
455 bool use_sse3, 462 bool use_sse3,
456 Label* operand_conversion_failure); 463 Label* operand_conversion_failure);
457 static void LoadNumbersAsIntegers(MacroAssembler* masm, 464 static void LoadNumbersAsIntegers(MacroAssembler* masm,
458 TypeInfo type_info, 465 TypeInfo type_info,
459 bool use_sse3, 466 bool use_sse3,
460 Label* operand_conversion_failure); 467 Label* operand_conversion_failure);
461 static void LoadUnknownsAsIntegers(MacroAssembler* masm, 468 static void LoadUnknownsAsIntegers(MacroAssembler* masm,
462 bool use_sse3, 469 bool use_sse3,
463 Label* operand_conversion_failure); 470 Label* operand_conversion_failure);
464 471
465 // Test if operands are smis or heap numbers and load them 472 // Must only be called after LoadUnknownsAsIntegers. Assumes that the
466 // into xmm0 and xmm1 if they are. Operands are in edx and eax. 473 // operands are pushed on the stack, and that their conversions to int32
474 // are in eax and ecx. Checks that the original numbers were in the int32
475 // range.
476 static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
477 bool use_sse3,
478 Label* not_int32);
479
480 // Assumes that operands are smis or heap numbers and loads them
481 // into xmm0 and xmm1. Operands are in edx and eax.
467 // Leaves operands unchanged. 482 // Leaves operands unchanged.
468 static void LoadSSE2Operands(MacroAssembler* masm); 483 static void LoadSSE2Operands(MacroAssembler* masm);
469 484
470 // Test if operands are numbers (smi or HeapNumber objects), and load 485 // Test if operands are numbers (smi or HeapNumber objects), and load
471 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 486 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
472 // either operand is not a number. Operands are in edx and eax. 487 // either operand is not a number. Operands are in edx and eax.
473 // Leaves operands unchanged. 488 // Leaves operands unchanged.
474 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); 489 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
475 490
476 // Similar to LoadSSE2Operands but assumes that both operands are smis. 491 // Similar to LoadSSE2Operands but assumes that both operands are smis.
477 // Expects operands in edx, eax. 492 // Expects operands in edx, eax.
478 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); 493 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
494
495 // Checks that the two floating point numbers loaded into xmm0 and xmm1
496 // have int32 values.
497 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
498 Label* non_int32,
499 Register scratch);
479 }; 500 };
480 501
481 502
482 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 503 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
483 // 1. Move arguments into edx, eax except for DIV and MOD, which need the 504 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
484 // dividend in eax and edx free for the division. Use eax, ebx for those. 505 // dividend in eax and edx free for the division. Use eax, ebx for those.
485 Comment load_comment(masm, "-- Load arguments"); 506 Comment load_comment(masm, "-- Load arguments");
486 Register left = edx; 507 Register left = edx;
487 Register right = eax; 508 Register right = eax;
488 if (op_ == Token::DIV || op_ == Token::MOD) { 509 if (op_ == Token::DIV || op_ == Token::MOD) {
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 // 5. Emit return of result in eax. 725 // 5. Emit return of result in eax.
705 GenerateReturn(masm); 726 GenerateReturn(masm);
706 727
707 // 6. For some operations emit inline code to perform floating point 728 // 6. For some operations emit inline code to perform floating point
708 // operations on known smis (e.g., if the result of the operation 729 // operations on known smis (e.g., if the result of the operation
709 // overflowed the smi range). 730 // overflowed the smi range).
710 switch (op_) { 731 switch (op_) {
711 case Token::SHL: { 732 case Token::SHL: {
712 Comment perform_float(masm, "-- Perform float operation on smis"); 733 Comment perform_float(masm, "-- Perform float operation on smis");
713 __ bind(&use_fp_on_smis); 734 __ bind(&use_fp_on_smis);
714 // Result we want is in left == edx, so we can put the allocated heap 735 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
715 // number in eax. 736 // Result we want is in left == edx, so we can put the allocated heap
716 __ AllocateHeapNumber(eax, ecx, ebx, slow); 737 // number in eax.
717 // Store the result in the HeapNumber and return. 738 __ AllocateHeapNumber(eax, ecx, ebx, slow);
718 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { 739 // Store the result in the HeapNumber and return.
719 CpuFeatures::Scope use_sse2(SSE2); 740 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
720 __ cvtsi2sd(xmm0, Operand(left)); 741 CpuFeatures::Scope use_sse2(SSE2);
721 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 742 __ cvtsi2sd(xmm0, Operand(left));
743 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
744 } else {
745 // It's OK to overwrite the right argument on the stack because we
746 // are about to return.
747 __ mov(Operand(esp, 1 * kPointerSize), left);
748 __ fild_s(Operand(esp, 1 * kPointerSize));
749 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
750 }
751 GenerateReturn(masm);
722 } else { 752 } else {
723 // It's OK to overwrite the right argument on the stack because we 753 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
724 // are about to return. 754 __ jmp(slow);
725 __ mov(Operand(esp, 1 * kPointerSize), left);
726 __ fild_s(Operand(esp, 1 * kPointerSize));
727 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
728 } 755 }
729 GenerateReturn(masm);
730 break; 756 break;
731 } 757 }
732 758
733 case Token::ADD: 759 case Token::ADD:
734 case Token::SUB: 760 case Token::SUB:
735 case Token::MUL: 761 case Token::MUL:
736 case Token::DIV: { 762 case Token::DIV: {
737 Comment perform_float(masm, "-- Perform float operation on smis"); 763 Comment perform_float(masm, "-- Perform float operation on smis");
738 __ bind(&use_fp_on_smis); 764 __ bind(&use_fp_on_smis);
739 // Restore arguments to edx, eax. 765 // Restore arguments to edx, eax.
(...skipping 12 matching lines...) Expand all
752 break; 778 break;
753 case Token::DIV: 779 case Token::DIV:
754 // Left was clobbered but a copy is in edi. Right is in ebx for 780 // Left was clobbered but a copy is in edi. Right is in ebx for
755 // division. 781 // division.
756 __ mov(edx, edi); 782 __ mov(edx, edi);
757 __ mov(eax, right); 783 __ mov(eax, right);
758 break; 784 break;
759 default: UNREACHABLE(); 785 default: UNREACHABLE();
760 break; 786 break;
761 } 787 }
762 __ AllocateHeapNumber(ecx, ebx, no_reg, slow); 788 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
763 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { 789 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
764 CpuFeatures::Scope use_sse2(SSE2); 790 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
765 FloatingPointHelper::LoadSSE2Smis(masm, ebx); 791 CpuFeatures::Scope use_sse2(SSE2);
766 switch (op_) { 792 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
767 case Token::ADD: __ addsd(xmm0, xmm1); break; 793 switch (op_) {
768 case Token::SUB: __ subsd(xmm0, xmm1); break; 794 case Token::ADD: __ addsd(xmm0, xmm1); break;
769 case Token::MUL: __ mulsd(xmm0, xmm1); break; 795 case Token::SUB: __ subsd(xmm0, xmm1); break;
770 case Token::DIV: __ divsd(xmm0, xmm1); break; 796 case Token::MUL: __ mulsd(xmm0, xmm1); break;
771 default: UNREACHABLE(); 797 case Token::DIV: __ divsd(xmm0, xmm1); break;
798 default: UNREACHABLE();
799 }
800 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
801 } else { // SSE2 not available, use FPU.
802 FloatingPointHelper::LoadFloatSmis(masm, ebx);
803 switch (op_) {
804 case Token::ADD: __ faddp(1); break;
805 case Token::SUB: __ fsubp(1); break;
806 case Token::MUL: __ fmulp(1); break;
807 case Token::DIV: __ fdivp(1); break;
808 default: UNREACHABLE();
809 }
810 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
772 } 811 }
773 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); 812 __ mov(eax, ecx);
774 } else { // SSE2 not available, use FPU. 813 GenerateReturn(masm);
775 FloatingPointHelper::LoadFloatSmis(masm, ebx); 814 } else {
776 switch (op_) { 815 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
777 case Token::ADD: __ faddp(1); break; 816 __ jmp(slow);
778 case Token::SUB: __ fsubp(1); break;
779 case Token::MUL: __ fmulp(1); break;
780 case Token::DIV: __ fdivp(1); break;
781 default: UNREACHABLE();
782 }
783 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
784 } 817 }
785 __ mov(eax, ecx);
786 GenerateReturn(masm);
787 break; 818 break;
788 } 819 }
789 820
790 default: 821 default:
791 break; 822 break;
792 } 823 }
793 824
794 // 7. Non-smi operands, fall out to the non-smi code with the operands in 825 // 7. Non-smi operands, fall out to the non-smi code with the operands in
795 // edx and eax. 826 // edx and eax.
796 Comment done_comment(masm, "-- Enter non-smi code"); 827 Comment done_comment(masm, "-- Enter non-smi code");
(...skipping 19 matching lines...) Expand all
816 break; 847 break;
817 } 848 }
818 } 849 }
819 850
820 851
821 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 852 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
822 Label call_runtime; 853 Label call_runtime;
823 854
824 __ IncrementCounter(COUNTERS->generic_binary_stub_calls(), 1); 855 __ IncrementCounter(COUNTERS->generic_binary_stub_calls(), 1);
825 856
857 if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
858 Label slow;
859 if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
860 __ bind(&slow);
861 GenerateTypeTransition(masm);
862 }
863
826 // Generate fast case smi code if requested. This flag is set when the fast 864 // Generate fast case smi code if requested. This flag is set when the fast
827 // case smi code is not generated by the caller. Generating it here will speed 865 // case smi code is not generated by the caller. Generating it here will speed
828 // up common operations. 866 // up common operations.
829 if (ShouldGenerateSmiCode()) { 867 if (ShouldGenerateSmiCode()) {
830 GenerateSmiCode(masm, &call_runtime); 868 GenerateSmiCode(masm, &call_runtime);
831 } else if (op_ != Token::MOD) { // MOD goes straight to runtime. 869 } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
832 if (!HasArgsInRegisters()) { 870 if (!HasArgsInRegisters()) {
833 GenerateLoadArguments(masm); 871 GenerateLoadArguments(masm);
834 } 872 }
835 } 873 }
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
1033 if (HasArgsReversed()) { 1071 if (HasArgsReversed()) {
1034 lhs = eax; 1072 lhs = eax;
1035 rhs = edx; 1073 rhs = edx;
1036 } else { 1074 } else {
1037 lhs = edx; 1075 lhs = edx;
1038 rhs = eax; 1076 rhs = eax;
1039 } 1077 }
1040 1078
1041 // Test if left operand is a string. 1079 // Test if left operand is a string.
1042 NearLabel lhs_not_string; 1080 NearLabel lhs_not_string;
1081 __ test(lhs, Immediate(kSmiTagMask));
1082 __ j(zero, &lhs_not_string);
1083 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
1084 __ j(above_equal, &lhs_not_string);
1085
1086 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1087 __ TailCallStub(&string_add_left_stub);
1088
1089 NearLabel call_runtime_with_args;
1090 // Left operand is not a string, test right.
1091 __ bind(&lhs_not_string);
1092 __ test(rhs, Immediate(kSmiTagMask));
1093 __ j(zero, &call_runtime_with_args);
1094 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
1095 __ j(above_equal, &call_runtime_with_args);
1096
1097 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1098 __ TailCallStub(&string_add_right_stub);
1099
1100 // Neither argument is a string.
1101 __ bind(&call_runtime);
1102 if (HasArgsInRegisters()) {
1103 GenerateRegisterArgsPush(masm);
1104 }
1105 __ bind(&call_runtime_with_args);
1106 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1107 break;
1108 }
1109 case Token::SUB:
1110 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1111 break;
1112 case Token::MUL:
1113 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1114 break;
1115 case Token::DIV:
1116 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1117 break;
1118 case Token::MOD:
1119 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1120 break;
1121 case Token::BIT_OR:
1122 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1123 break;
1124 case Token::BIT_AND:
1125 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1126 break;
1127 case Token::BIT_XOR:
1128 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1129 break;
1130 case Token::SAR:
1131 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1132 break;
1133 case Token::SHL:
1134 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1135 break;
1136 case Token::SHR:
1137 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1138 break;
1139 default:
1140 UNREACHABLE();
1141 }
1142 }
1143
1144
1145 void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1146 Label* alloc_failure) {
1147 Label skip_allocation;
1148 OverwriteMode mode = mode_;
1149 if (HasArgsReversed()) {
1150 if (mode == OVERWRITE_RIGHT) {
1151 mode = OVERWRITE_LEFT;
1152 } else if (mode == OVERWRITE_LEFT) {
1153 mode = OVERWRITE_RIGHT;
1154 }
1155 }
1156 switch (mode) {
1157 case OVERWRITE_LEFT: {
1158 // If the argument in edx is already an object, we skip the
1159 // allocation of a heap number.
1160 __ test(edx, Immediate(kSmiTagMask));
1161 __ j(not_zero, &skip_allocation, not_taken);
1162 // Allocate a heap number for the result. Keep eax and edx intact
1163 // for the possible runtime call.
1164 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1165 // Now edx can be overwritten losing one of the arguments as we are
1166 // now done and will not need it any more.
1167 __ mov(edx, Operand(ebx));
1168 __ bind(&skip_allocation);
1169 // Use object in edx as a result holder
1170 __ mov(eax, Operand(edx));
1171 break;
1172 }
1173 case OVERWRITE_RIGHT:
1174 // If the argument in eax is already an object, we skip the
1175 // allocation of a heap number.
1176 __ test(eax, Immediate(kSmiTagMask));
1177 __ j(not_zero, &skip_allocation, not_taken);
1178 // Fall through!
1179 case NO_OVERWRITE:
1180 // Allocate a heap number for the result. Keep eax and edx intact
1181 // for the possible runtime call.
1182 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1183 // Now eax can be overwritten losing one of the arguments as we are
1184 // now done and will not need it any more.
1185 __ mov(eax, ebx);
1186 __ bind(&skip_allocation);
1187 break;
1188 default: UNREACHABLE();
1189 }
1190 }
1191
1192
1193 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
1194 // If arguments are not passed in registers read them from the stack.
1195 ASSERT(!HasArgsInRegisters());
1196 __ mov(eax, Operand(esp, 1 * kPointerSize));
1197 __ mov(edx, Operand(esp, 2 * kPointerSize));
1198 }
1199
1200
1201 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
1202 // If arguments are not passed in registers remove them from the stack before
1203 // returning.
1204 if (!HasArgsInRegisters()) {
1205 __ ret(2 * kPointerSize); // Remove both operands
1206 } else {
1207 __ ret(0);
1208 }
1209 }
1210
1211
1212 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1213 ASSERT(HasArgsInRegisters());
1214 __ pop(ecx);
1215 if (HasArgsReversed()) {
1216 __ push(eax);
1217 __ push(edx);
1218 } else {
1219 __ push(edx);
1220 __ push(eax);
1221 }
1222 __ push(ecx);
1223 }
1224
1225
1226 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1227 // Ensure the operands are on the stack.
1228 if (HasArgsInRegisters()) {
1229 GenerateRegisterArgsPush(masm);
1230 }
1231
1232 __ pop(ecx); // Save return address.
1233
1234 // Left and right arguments are now on top.
1235 // Push this stub's key. Although the operation and the type info are
1236 // encoded into the key, the encoding is opaque, so push them too.
1237 __ push(Immediate(Smi::FromInt(MinorKey())));
1238 __ push(Immediate(Smi::FromInt(op_)));
1239 __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
1240
1241 __ push(ecx); // Push return address.
1242
1243 // Patch the caller to an appropriate specialized stub and return the
1244 // operation result to the caller of the stub.
1245 __ TailCallExternalReference(
1246 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
1247 5,
1248 1);
1249 }
1250
1251
1252 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
1253 GenericBinaryOpStub stub(key, type_info);
1254 return stub.GetCode();
1255 }
1256
1257
1258 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
1259 TRBinaryOpIC::TypeInfo type_info,
1260 TRBinaryOpIC::TypeInfo result_type_info) {
1261 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
1262 return stub.GetCode();
1263 }
1264
1265
1266 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1267 __ pop(ecx); // Save return address.
1268 __ push(edx);
1269 __ push(eax);
1270 // Left and right arguments are now on top.
1271 // Push this stub's key. Although the operation and the type info are
1272 // encoded into the key, the encoding is opaque, so push them too.
1273 __ push(Immediate(Smi::FromInt(MinorKey())));
1274 __ push(Immediate(Smi::FromInt(op_)));
1275 __ push(Immediate(Smi::FromInt(operands_type_)));
1276
1277 __ push(ecx); // Push return address.
1278
1279 // Patch the caller to an appropriate specialized stub and return the
1280 // operation result to the caller of the stub.
1281 __ TailCallExternalReference(
1282 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1283 5,
1284 1);
1285 }
1286
1287
1288 // Prepare for a type transition runtime call when the args are already on
1289 // the stack, under the return address.
1290 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1291 MacroAssembler* masm) {
1292 __ pop(ecx); // Save return address.
1293 // Left and right arguments are already on top of the stack.
1294 // Push this stub's key. Although the operation and the type info are
1295 // encoded into the key, the encoding is opaque, so push them too.
1296 __ push(Immediate(Smi::FromInt(MinorKey())));
1297 __ push(Immediate(Smi::FromInt(op_)));
1298 __ push(Immediate(Smi::FromInt(operands_type_)));
1299
1300 __ push(ecx); // Push return address.
1301
1302 // Patch the caller to an appropriate specialized stub and return the
1303 // operation result to the caller of the stub.
1304 __ TailCallExternalReference(
1305 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1306 5,
1307 1);
1308 }
1309
1310
1311 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
1312 switch (operands_type_) {
1313 case TRBinaryOpIC::UNINITIALIZED:
1314 GenerateTypeTransition(masm);
1315 break;
1316 case TRBinaryOpIC::SMI:
1317 GenerateSmiStub(masm);
1318 break;
1319 case TRBinaryOpIC::INT32:
1320 GenerateInt32Stub(masm);
1321 break;
1322 case TRBinaryOpIC::HEAP_NUMBER:
1323 GenerateHeapNumberStub(masm);
1324 break;
1325 case TRBinaryOpIC::STRING:
1326 GenerateStringStub(masm);
1327 break;
1328 case TRBinaryOpIC::GENERIC:
1329 GenerateGeneric(masm);
1330 break;
1331 default:
1332 UNREACHABLE();
1333 }
1334 }
1335
1336
1337 const char* TypeRecordingBinaryOpStub::GetName() {
1338 if (name_ != NULL) return name_;
1339 const int kMaxNameLength = 100;
1340 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
1341 kMaxNameLength);
1342 if (name_ == NULL) return "OOM";
1343 const char* op_name = Token::Name(op_);
1344 const char* overwrite_name;
1345 switch (mode_) {
1346 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1347 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1348 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1349 default: overwrite_name = "UnknownOverwrite"; break;
1350 }
1351
1352 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
1353 "TypeRecordingBinaryOpStub_%s_%s_%s",
1354 op_name,
1355 overwrite_name,
1356 TRBinaryOpIC::GetName(operands_type_));
1357 return name_;
1358 }
1359
1360
1361 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
1362 Label* slow,
1363 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1364 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1365 // dividend in eax and edx free for the division. Use eax, ebx for those.
1366 Comment load_comment(masm, "-- Load arguments");
1367 Register left = edx;
1368 Register right = eax;
1369 if (op_ == Token::DIV || op_ == Token::MOD) {
1370 left = eax;
1371 right = ebx;
1372 __ mov(ebx, eax);
1373 __ mov(eax, edx);
1374 }
1375
1376
1377 // 2. Prepare the smi check of both operands by oring them together.
1378 Comment smi_check_comment(masm, "-- Smi check arguments");
1379 Label not_smis;
1380 Register combined = ecx;
1381 ASSERT(!left.is(combined) && !right.is(combined));
1382 switch (op_) {
1383 case Token::BIT_OR:
1384 // Perform the operation into eax and smi check the result. Preserve
1385 // eax in case the result is not a smi.
1386 ASSERT(!left.is(ecx) && !right.is(ecx));
1387 __ mov(ecx, right);
1388 __ or_(right, Operand(left)); // Bitwise or is commutative.
1389 combined = right;
1390 break;
1391
1392 case Token::BIT_XOR:
1393 case Token::BIT_AND:
1394 case Token::ADD:
1395 case Token::SUB:
1396 case Token::MUL:
1397 case Token::DIV:
1398 case Token::MOD:
1399 __ mov(combined, right);
1400 __ or_(combined, Operand(left));
1401 break;
1402
1403 case Token::SHL:
1404 case Token::SAR:
1405 case Token::SHR:
1406 // Move the right operand into ecx for the shift operation, use eax
1407 // for the smi check register.
1408 ASSERT(!left.is(ecx) && !right.is(ecx));
1409 __ mov(ecx, right);
1410 __ or_(right, Operand(left));
1411 combined = right;
1412 break;
1413
1414 default:
1415 break;
1416 }
1417
1418 // 3. Perform the smi check of the operands.
1419 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
1420 __ test(combined, Immediate(kSmiTagMask));
1421 __ j(not_zero, &not_smis, not_taken);
1422
1423 // 4. Operands are both smis, perform the operation leaving the result in
1424 // eax and check the result if necessary.
1425 Comment perform_smi(masm, "-- Perform smi operation");
1426 Label use_fp_on_smis;
1427 switch (op_) {
1428 case Token::BIT_OR:
1429 // Nothing to do.
1430 break;
1431
1432 case Token::BIT_XOR:
1433 ASSERT(right.is(eax));
1434 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
1435 break;
1436
1437 case Token::BIT_AND:
1438 ASSERT(right.is(eax));
1439 __ and_(right, Operand(left)); // Bitwise and is commutative.
1440 break;
1441
1442 case Token::SHL:
1443 // Remove tags from operands (but keep sign).
1444 __ SmiUntag(left);
1445 __ SmiUntag(ecx);
1446 // Perform the operation.
1447 __ shl_cl(left);
1448 // Check that the *signed* result fits in a smi.
1449 __ cmp(left, 0xc0000000);
1450 __ j(sign, &use_fp_on_smis, not_taken);
1451 // Tag the result and store it in register eax.
1452 __ SmiTag(left);
1453 __ mov(eax, left);
1454 break;
1455
1456 case Token::SAR:
1457 // Remove tags from operands (but keep sign).
1458 __ SmiUntag(left);
1459 __ SmiUntag(ecx);
1460 // Perform the operation.
1461 __ sar_cl(left);
1462 // Tag the result and store it in register eax.
1463 __ SmiTag(left);
1464 __ mov(eax, left);
1465 break;
1466
1467 case Token::SHR:
1468 // Remove tags from operands (but keep sign).
1469 __ SmiUntag(left);
1470 __ SmiUntag(ecx);
1471 // Perform the operation.
1472 __ shr_cl(left);
1473 // Check that the *unsigned* result fits in a smi.
1474 // Neither of the two high-order bits can be set:
1475 // - 0x80000000: high bit would be lost when smi tagging.
1476 // - 0x40000000: this number would convert to negative when
1477 // Smi tagging these two cases can only happen with shifts
1478 // by 0 or 1 when handed a valid smi.
1479 __ test(left, Immediate(0xc0000000));
1480 __ j(not_zero, slow, not_taken);
1481 // Tag the result and store it in register eax.
1482 __ SmiTag(left);
1483 __ mov(eax, left);
1484 break;
1485
1486 case Token::ADD:
1487 ASSERT(right.is(eax));
1488 __ add(right, Operand(left)); // Addition is commutative.
1489 __ j(overflow, &use_fp_on_smis, not_taken);
1490 break;
1491
1492 case Token::SUB:
1493 __ sub(left, Operand(right));
1494 __ j(overflow, &use_fp_on_smis, not_taken);
1495 __ mov(eax, left);
1496 break;
1497
1498 case Token::MUL:
1499 // If the smi tag is 0 we can just leave the tag on one operand.
1500 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1501 // We can't revert the multiplication if the result is not a smi
1502 // so save the right operand.
1503 __ mov(ebx, right);
1504 // Remove tag from one of the operands (but keep sign).
1505 __ SmiUntag(right);
1506 // Do multiplication.
1507 __ imul(right, Operand(left)); // Multiplication is commutative.
1508 __ j(overflow, &use_fp_on_smis, not_taken);
1509 // Check for negative zero result. Use combined = left | right.
1510 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1511 break;
1512
1513 case Token::DIV:
1514 // We can't revert the division if the result is not a smi so
1515 // save the left operand.
1516 __ mov(edi, left);
1517 // Check for 0 divisor.
1518 __ test(right, Operand(right));
1519 __ j(zero, &use_fp_on_smis, not_taken);
1520 // Sign extend left into edx:eax.
1521 ASSERT(left.is(eax));
1522 __ cdq();
1523 // Divide edx:eax by right.
1524 __ idiv(right);
1525 // Check for the corner case of dividing the most negative smi by
1526 // -1. We cannot use the overflow flag, since it is not set by idiv
1527 // instruction.
1528 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1529 __ cmp(eax, 0x40000000);
1530 __ j(equal, &use_fp_on_smis);
1531 // Check for negative zero result. Use combined = left | right.
1532 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1533 // Check that the remainder is zero.
1534 __ test(edx, Operand(edx));
1535 __ j(not_zero, &use_fp_on_smis);
1536 // Tag the result and store it in register eax.
1537 __ SmiTag(eax);
1538 break;
1539
1540 case Token::MOD:
1541 // Check for 0 divisor.
1542 __ test(right, Operand(right));
1543 __ j(zero, &not_smis, not_taken);
1544
1545 // Sign extend left into edx:eax.
1546 ASSERT(left.is(eax));
1547 __ cdq();
1548 // Divide edx:eax by right.
1549 __ idiv(right);
1550 // Check for negative zero result. Use combined = left | right.
1551 __ NegativeZeroTest(edx, combined, slow);
1552 // Move remainder to register eax.
1553 __ mov(eax, edx);
1554 break;
1555
1556 default:
1557 UNREACHABLE();
1558 }
1559
1560 // 5. Emit return of result in eax. Some operations have registers pushed.
1561 switch (op_) {
1562 case Token::ADD:
1563 case Token::SUB:
1564 case Token::MUL:
1565 case Token::DIV:
1566 __ ret(0);
1567 break;
1568 case Token::MOD:
1569 case Token::BIT_OR:
1570 case Token::BIT_AND:
1571 case Token::BIT_XOR:
1572 case Token::SAR:
1573 case Token::SHL:
1574 case Token::SHR:
1575 __ ret(2 * kPointerSize);
1576 break;
1577 default:
1578 UNREACHABLE();
1579 }
1580
1581 // 6. For some operations emit inline code to perform floating point
1582 // operations on known smis (e.g., if the result of the operation
1583 // overflowed the smi range).
1584 if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1585 __ bind(&use_fp_on_smis);
1586 switch (op_) {
1587 // Undo the effects of some operations, and some register moves.
1588 case Token::SHL:
1589 // The arguments are saved on the stack, and only used from there.
1590 break;
1591 case Token::ADD:
1592 // Revert right = right + left.
1593 __ sub(right, Operand(left));
1594 break;
1595 case Token::SUB:
1596 // Revert left = left - right.
1597 __ add(left, Operand(right));
1598 break;
1599 case Token::MUL:
1600 // Right was clobbered but a copy is in ebx.
1601 __ mov(right, ebx);
1602 break;
1603 case Token::DIV:
1604 // Left was clobbered but a copy is in edi. Right is in ebx for
1605 // division. They should be in eax, ebx for jump to not_smi.
1606 __ mov(eax, edi);
1607 break;
1608 default:
1609 // No other operators jump to use_fp_on_smis.
1610 break;
1611 }
1612 __ jmp(&not_smis);
1613 } else {
1614 ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1615 switch (op_) {
1616 case Token::SHL: {
1617 Comment perform_float(masm, "-- Perform float operation on smis");
1618 __ bind(&use_fp_on_smis);
1619 // Result we want is in left == edx, so we can put the allocated heap
1620 // number in eax.
1621 __ AllocateHeapNumber(eax, ecx, ebx, slow);
1622 // Store the result in the HeapNumber and return.
1623 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
1624 CpuFeatures::Scope use_sse2(SSE2);
1625 __ cvtsi2sd(xmm0, Operand(left));
1626 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1627 } else {
1628 // It's OK to overwrite the right argument on the stack because we
1629 // are about to return.
1630 __ mov(Operand(esp, 1 * kPointerSize), left);
1631 __ fild_s(Operand(esp, 1 * kPointerSize));
1632 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1633 }
1634 __ ret(2 * kPointerSize);
1635 break;
1636 }
1637
1638 case Token::ADD:
1639 case Token::SUB:
1640 case Token::MUL:
1641 case Token::DIV: {
1642 Comment perform_float(masm, "-- Perform float operation on smis");
1643 __ bind(&use_fp_on_smis);
1644 // Restore arguments to edx, eax.
1645 switch (op_) {
1646 case Token::ADD:
1647 // Revert right = right + left.
1648 __ sub(right, Operand(left));
1649 break;
1650 case Token::SUB:
1651 // Revert left = left - right.
1652 __ add(left, Operand(right));
1653 break;
1654 case Token::MUL:
1655 // Right was clobbered but a copy is in ebx.
1656 __ mov(right, ebx);
1657 break;
1658 case Token::DIV:
1659 // Left was clobbered but a copy is in edi. Right is in ebx for
1660 // division.
1661 __ mov(edx, edi);
1662 __ mov(eax, right);
1663 break;
1664 default: UNREACHABLE();
1665 break;
1666 }
1667 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1668 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
1669 CpuFeatures::Scope use_sse2(SSE2);
1670 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1671 switch (op_) {
1672 case Token::ADD: __ addsd(xmm0, xmm1); break;
1673 case Token::SUB: __ subsd(xmm0, xmm1); break;
1674 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1675 case Token::DIV: __ divsd(xmm0, xmm1); break;
1676 default: UNREACHABLE();
1677 }
1678 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1679 } else { // SSE2 not available, use FPU.
1680 FloatingPointHelper::LoadFloatSmis(masm, ebx);
1681 switch (op_) {
1682 case Token::ADD: __ faddp(1); break;
1683 case Token::SUB: __ fsubp(1); break;
1684 case Token::MUL: __ fmulp(1); break;
1685 case Token::DIV: __ fdivp(1); break;
1686 default: UNREACHABLE();
1687 }
1688 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1689 }
1690 __ mov(eax, ecx);
1691 __ ret(0);
1692 break;
1693 }
1694
1695 default:
1696 break;
1697 }
1698 }
1699
1700 // 7. Non-smi operands, fall out to the non-smi code with the operands in
1701 // edx and eax.
1702 Comment done_comment(masm, "-- Enter non-smi code");
1703 __ bind(&not_smis);
1704 switch (op_) {
1705 case Token::BIT_OR:
1706 case Token::SHL:
1707 case Token::SAR:
1708 case Token::SHR:
1709 // Right operand is saved in ecx and eax was destroyed by the smi
1710 // check.
1711 __ mov(eax, ecx);
1712 break;
1713
1714 case Token::DIV:
1715 case Token::MOD:
1716 // Operands are in eax, ebx at this point.
1717 __ mov(edx, eax);
1718 __ mov(eax, ebx);
1719 break;
1720
1721 default:
1722 break;
1723 }
1724 }
1725
1726
1727 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1728 Label call_runtime;
1729
1730 switch (op_) {
1731 case Token::ADD:
1732 case Token::SUB:
1733 case Token::MUL:
1734 case Token::DIV:
1735 break;
1736 case Token::MOD:
1737 case Token::BIT_OR:
1738 case Token::BIT_AND:
1739 case Token::BIT_XOR:
1740 case Token::SAR:
1741 case Token::SHL:
1742 case Token::SHR:
1743 GenerateRegisterArgsPush(masm);
1744 break;
1745 default:
1746 UNREACHABLE();
1747 }
1748
1749 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
1750 result_type_ == TRBinaryOpIC::SMI) {
1751 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1752 } else {
1753 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1754 }
1755 __ bind(&call_runtime);
1756 switch (op_) {
1757 case Token::ADD:
1758 case Token::SUB:
1759 case Token::MUL:
1760 case Token::DIV:
1761 GenerateTypeTransition(masm);
1762 break;
1763 case Token::MOD:
1764 case Token::BIT_OR:
1765 case Token::BIT_AND:
1766 case Token::BIT_XOR:
1767 case Token::SAR:
1768 case Token::SHL:
1769 case Token::SHR:
1770 GenerateTypeTransitionWithSavedArgs(masm);
1771 break;
1772 default:
1773 UNREACHABLE();
1774 }
1775 }
1776
1777
1778
1779 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1780 Label call_runtime;
1781 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
1782 ASSERT(op_ == Token::ADD);
1783 // If one of the arguments is a string, call the string add stub.
1784 // Otherwise, transition to the generic TRBinaryOpIC type.
1785
1786 // Registers containing left and right operands respectively.
1787 Register left = edx;
1788 Register right = eax;
1789
1790 // Test if left operand is a string.
1791 NearLabel left_not_string;
1792 __ test(left, Immediate(kSmiTagMask));
1793 __ j(zero, &left_not_string);
1794 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1795 __ j(above_equal, &left_not_string);
1796
1797 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1798 GenerateRegisterArgsPush(masm);
1799 __ TailCallStub(&string_add_left_stub);
1800
1801 // Left operand is not a string, test right.
1802 __ bind(&left_not_string);
1803 __ test(right, Immediate(kSmiTagMask));
1804 __ j(zero, &call_runtime);
1805 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1806 __ j(above_equal, &call_runtime);
1807
1808 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1809 GenerateRegisterArgsPush(masm);
1810 __ TailCallStub(&string_add_right_stub);
1811
1812 // Neither argument is a string.
1813 __ bind(&call_runtime);
1814 GenerateTypeTransition(masm);
1815 }
1816
1817
1818 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1819 Label call_runtime;
1820 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
1821
1822 // Floating point case.
1823 switch (op_) {
1824 case Token::ADD:
1825 case Token::SUB:
1826 case Token::MUL:
1827 case Token::DIV: {
1828 Label not_floats;
1829 Label not_int32;
1830 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
1831 CpuFeatures::Scope use_sse2(SSE2);
1832 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1833 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1834 switch (op_) {
1835 case Token::ADD: __ addsd(xmm0, xmm1); break;
1836 case Token::SUB: __ subsd(xmm0, xmm1); break;
1837 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1838 case Token::DIV: __ divsd(xmm0, xmm1); break;
1839 default: UNREACHABLE();
1840 }
1841 // Check result type if it is currently Int32.
1842 if (result_type_ <= TRBinaryOpIC::INT32) {
1843 __ cvttsd2si(ecx, Operand(xmm0));
1844 __ cvtsi2sd(xmm2, Operand(ecx));
1845 __ ucomisd(xmm0, xmm2);
1846 __ j(not_zero, &not_int32);
1847 __ j(carry, &not_int32);
1848 }
1849 GenerateHeapResultAllocation(masm, &call_runtime);
1850 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1851 __ ret(0);
1852 } else { // SSE2 not available, use FPU.
1853 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1854 FloatingPointHelper::LoadFloatOperands(
1855 masm,
1856 ecx,
1857 FloatingPointHelper::ARGS_IN_REGISTERS);
1858 FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1859 switch (op_) {
1860 case Token::ADD: __ faddp(1); break;
1861 case Token::SUB: __ fsubp(1); break;
1862 case Token::MUL: __ fmulp(1); break;
1863 case Token::DIV: __ fdivp(1); break;
1864 default: UNREACHABLE();
1865 }
1866 Label after_alloc_failure;
1867 GenerateHeapResultAllocation(masm, &after_alloc_failure);
1868 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1869 __ ret(0);
1870 __ bind(&after_alloc_failure);
1871 __ ffree();
1872 __ jmp(&call_runtime);
1873 }
1874
1875 __ bind(&not_floats);
1876 __ bind(&not_int32);
1877 GenerateTypeTransition(masm);
1878 break;
1879 }
1880
1881 case Token::MOD: {
1882 // For MOD we go directly to runtime in the non-smi case.
1883 break;
1884 }
1885 case Token::BIT_OR:
1886 case Token::BIT_AND:
1887 case Token::BIT_XOR:
1888 case Token::SAR:
1889 case Token::SHL:
1890 case Token::SHR: {
1891 GenerateRegisterArgsPush(masm);
1892 Label not_floats;
1893 Label not_int32;
1894 Label non_smi_result;
1895 /* {
1896 CpuFeatures::Scope use_sse2(SSE2);
1897 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1898 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1899 }*/
1900 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1901 use_sse3_,
1902 &not_floats);
1903 FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1904 &not_int32);
1905 switch (op_) {
1906 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
1907 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1908 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1909 case Token::SAR: __ sar_cl(eax); break;
1910 case Token::SHL: __ shl_cl(eax); break;
1911 case Token::SHR: __ shr_cl(eax); break;
1912 default: UNREACHABLE();
1913 }
1914 if (op_ == Token::SHR) {
1915 // Check if result is non-negative and fits in a smi.
1916 __ test(eax, Immediate(0xc0000000));
1917 __ j(not_zero, &call_runtime);
1918 } else {
1919 // Check if result fits in a smi.
1920 __ cmp(eax, 0xc0000000);
1921 __ j(negative, &non_smi_result);
1922 }
1923 // Tag smi result and return.
1924 __ SmiTag(eax);
1925 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1926
1927 // All ops except SHR return a signed int32 that we load in
1928 // a HeapNumber.
1929 if (op_ != Token::SHR) {
1930 __ bind(&non_smi_result);
1931 // Allocate a heap number if needed.
1932 __ mov(ebx, Operand(eax)); // ebx: result
1933 NearLabel skip_allocation;
1934 switch (mode_) {
1935 case OVERWRITE_LEFT:
1936 case OVERWRITE_RIGHT:
1937 // If the operand was an object, we skip the
1938 // allocation of a heap number.
1939 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1940 1 * kPointerSize : 2 * kPointerSize));
1941 __ test(eax, Immediate(kSmiTagMask));
1942 __ j(not_zero, &skip_allocation, not_taken);
1943 // Fall through!
1944 case NO_OVERWRITE:
1945 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1946 __ bind(&skip_allocation);
1947 break;
1948 default: UNREACHABLE();
1949 }
1950 // Store the result in the HeapNumber and return.
1951 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
1952 CpuFeatures::Scope use_sse2(SSE2);
1953 __ cvtsi2sd(xmm0, Operand(ebx));
1954 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1955 } else {
1956 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1957 __ fild_s(Operand(esp, 1 * kPointerSize));
1958 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1959 }
1960 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1961 }
1962
1963 __ bind(&not_floats);
1964 __ bind(&not_int32);
1965 GenerateTypeTransitionWithSavedArgs(masm);
1966 break;
1967 }
1968 default: UNREACHABLE(); break;
1969 }
1970
1971 // If an allocation fails, or SHR or MOD hit a hard case,
1972 // use the runtime system to get the correct result.
1973 __ bind(&call_runtime);
1974
1975 switch (op_) {
1976 case Token::ADD:
1977 GenerateRegisterArgsPush(masm);
1978 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1979 break;
1980 case Token::SUB:
1981 GenerateRegisterArgsPush(masm);
1982 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1983 break;
1984 case Token::MUL:
1985 GenerateRegisterArgsPush(masm);
1986 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1987 break;
1988 case Token::DIV:
1989 GenerateRegisterArgsPush(masm);
1990 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1991 break;
1992 case Token::MOD:
1993 GenerateRegisterArgsPush(masm);
1994 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1995 break;
1996 case Token::BIT_OR:
1997 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1998 break;
1999 case Token::BIT_AND:
2000 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2001 break;
2002 case Token::BIT_XOR:
2003 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2004 break;
2005 case Token::SAR:
2006 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2007 break;
2008 case Token::SHL:
2009 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2010 break;
2011 case Token::SHR:
2012 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2013 break;
2014 default:
2015 UNREACHABLE();
2016 }
2017 }
2018
2019
2020 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2021 Label call_runtime;
2022 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
2023 operands_type_ == TRBinaryOpIC::INT32);
2024
2025 // Floating point case.
2026 switch (op_) {
2027 case Token::ADD:
2028 case Token::SUB:
2029 case Token::MUL:
2030 case Token::DIV: {
2031 Label not_floats;
2032 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
2033 CpuFeatures::Scope use_sse2(SSE2);
2034 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2035
2036 switch (op_) {
2037 case Token::ADD: __ addsd(xmm0, xmm1); break;
2038 case Token::SUB: __ subsd(xmm0, xmm1); break;
2039 case Token::MUL: __ mulsd(xmm0, xmm1); break;
2040 case Token::DIV: __ divsd(xmm0, xmm1); break;
2041 default: UNREACHABLE();
2042 }
2043 GenerateHeapResultAllocation(masm, &call_runtime);
2044 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2045 __ ret(0);
2046 } else { // SSE2 not available, use FPU.
2047 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2048 FloatingPointHelper::LoadFloatOperands(
2049 masm,
2050 ecx,
2051 FloatingPointHelper::ARGS_IN_REGISTERS);
2052 switch (op_) {
2053 case Token::ADD: __ faddp(1); break;
2054 case Token::SUB: __ fsubp(1); break;
2055 case Token::MUL: __ fmulp(1); break;
2056 case Token::DIV: __ fdivp(1); break;
2057 default: UNREACHABLE();
2058 }
2059 Label after_alloc_failure;
2060 GenerateHeapResultAllocation(masm, &after_alloc_failure);
2061 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2062 __ ret(0);
2063 __ bind(&after_alloc_failure);
2064 __ ffree();
2065 __ jmp(&call_runtime);
2066 }
2067
2068 __ bind(&not_floats);
2069 GenerateTypeTransition(masm);
2070 break;
2071 }
2072
2073 case Token::MOD: {
2074 // For MOD we go directly to runtime in the non-smi case.
2075 break;
2076 }
2077 case Token::BIT_OR:
2078 case Token::BIT_AND:
2079 case Token::BIT_XOR:
2080 case Token::SAR:
2081 case Token::SHL:
2082 case Token::SHR: {
2083 GenerateRegisterArgsPush(masm);
2084 Label not_floats;
2085 Label non_smi_result;
2086 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2087 use_sse3_,
2088 &not_floats);
2089 switch (op_) {
2090 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
2091 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2092 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2093 case Token::SAR: __ sar_cl(eax); break;
2094 case Token::SHL: __ shl_cl(eax); break;
2095 case Token::SHR: __ shr_cl(eax); break;
2096 default: UNREACHABLE();
2097 }
2098 if (op_ == Token::SHR) {
2099 // Check if result is non-negative and fits in a smi.
2100 __ test(eax, Immediate(0xc0000000));
2101 __ j(not_zero, &call_runtime);
2102 } else {
2103 // Check if result fits in a smi.
2104 __ cmp(eax, 0xc0000000);
2105 __ j(negative, &non_smi_result);
2106 }
2107 // Tag smi result and return.
2108 __ SmiTag(eax);
2109 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2110
2111 // All ops except SHR return a signed int32 that we load in
2112 // a HeapNumber.
2113 if (op_ != Token::SHR) {
2114 __ bind(&non_smi_result);
2115 // Allocate a heap number if needed.
2116 __ mov(ebx, Operand(eax)); // ebx: result
2117 NearLabel skip_allocation;
2118 switch (mode_) {
2119 case OVERWRITE_LEFT:
2120 case OVERWRITE_RIGHT:
2121 // If the operand was an object, we skip the
2122 // allocation of a heap number.
2123 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2124 1 * kPointerSize : 2 * kPointerSize));
2125 __ test(eax, Immediate(kSmiTagMask));
2126 __ j(not_zero, &skip_allocation, not_taken);
2127 // Fall through!
2128 case NO_OVERWRITE:
2129 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2130 __ bind(&skip_allocation);
2131 break;
2132 default: UNREACHABLE();
2133 }
2134 // Store the result in the HeapNumber and return.
2135 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
2136 CpuFeatures::Scope use_sse2(SSE2);
2137 __ cvtsi2sd(xmm0, Operand(ebx));
2138 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2139 } else {
2140 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2141 __ fild_s(Operand(esp, 1 * kPointerSize));
2142 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2143 }
2144 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2145 }
2146
2147 __ bind(&not_floats);
2148 GenerateTypeTransitionWithSavedArgs(masm);
2149 break;
2150 }
2151 default: UNREACHABLE(); break;
2152 }
2153
2154 // If an allocation fails, or SHR or MOD hit a hard case,
2155 // use the runtime system to get the correct result.
2156 __ bind(&call_runtime);
2157
2158 switch (op_) {
2159 case Token::ADD:
2160 GenerateRegisterArgsPush(masm);
2161 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2162 break;
2163 case Token::SUB:
2164 GenerateRegisterArgsPush(masm);
2165 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2166 break;
2167 case Token::MUL:
2168 GenerateRegisterArgsPush(masm);
2169 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2170 break;
2171 case Token::DIV:
2172 GenerateRegisterArgsPush(masm);
2173 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2174 break;
2175 case Token::MOD:
2176 GenerateRegisterArgsPush(masm);
2177 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2178 break;
2179 case Token::BIT_OR:
2180 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2181 break;
2182 case Token::BIT_AND:
2183 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2184 break;
2185 case Token::BIT_XOR:
2186 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2187 break;
2188 case Token::SAR:
2189 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2190 break;
2191 case Token::SHL:
2192 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2193 break;
2194 case Token::SHR:
2195 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2196 break;
2197 default:
2198 UNREACHABLE();
2199 }
2200 }
2201
2202
2203 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2204 Label call_runtime;
2205
2206 __ IncrementCounter(COUNTERS->generic_binary_stub_calls(), 1);
2207
2208 switch (op_) {
2209 case Token::ADD:
2210 case Token::SUB:
2211 case Token::MUL:
2212 case Token::DIV:
2213 break;
2214 case Token::MOD:
2215 case Token::BIT_OR:
2216 case Token::BIT_AND:
2217 case Token::BIT_XOR:
2218 case Token::SAR:
2219 case Token::SHL:
2220 case Token::SHR:
2221 GenerateRegisterArgsPush(masm);
2222 break;
2223 default:
2224 UNREACHABLE();
2225 }
2226
2227 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2228
2229 // Floating point case.
2230 switch (op_) {
2231 case Token::ADD:
2232 case Token::SUB:
2233 case Token::MUL:
2234 case Token::DIV: {
2235 Label not_floats;
2236 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
2237 CpuFeatures::Scope use_sse2(SSE2);
2238 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2239
2240 switch (op_) {
2241 case Token::ADD: __ addsd(xmm0, xmm1); break;
2242 case Token::SUB: __ subsd(xmm0, xmm1); break;
2243 case Token::MUL: __ mulsd(xmm0, xmm1); break;
2244 case Token::DIV: __ divsd(xmm0, xmm1); break;
2245 default: UNREACHABLE();
2246 }
2247 GenerateHeapResultAllocation(masm, &call_runtime);
2248 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2249 __ ret(0);
2250 } else { // SSE2 not available, use FPU.
2251 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2252 FloatingPointHelper::LoadFloatOperands(
2253 masm,
2254 ecx,
2255 FloatingPointHelper::ARGS_IN_REGISTERS);
2256 switch (op_) {
2257 case Token::ADD: __ faddp(1); break;
2258 case Token::SUB: __ fsubp(1); break;
2259 case Token::MUL: __ fmulp(1); break;
2260 case Token::DIV: __ fdivp(1); break;
2261 default: UNREACHABLE();
2262 }
2263 Label after_alloc_failure;
2264 GenerateHeapResultAllocation(masm, &after_alloc_failure);
2265 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2266 __ ret(0);
2267 __ bind(&after_alloc_failure);
2268 __ ffree();
2269 __ jmp(&call_runtime);
2270 }
2271 __ bind(&not_floats);
2272 break;
2273 }
2274 case Token::MOD: {
2275 // For MOD we go directly to runtime in the non-smi case.
2276 break;
2277 }
2278 case Token::BIT_OR:
2279 case Token::BIT_AND:
2280 case Token::BIT_XOR:
2281 case Token::SAR:
2282 case Token::SHL:
2283 case Token::SHR: {
2284 Label non_smi_result;
2285 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2286 use_sse3_,
2287 &call_runtime);
2288 switch (op_) {
2289 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
2290 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2291 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2292 case Token::SAR: __ sar_cl(eax); break;
2293 case Token::SHL: __ shl_cl(eax); break;
2294 case Token::SHR: __ shr_cl(eax); break;
2295 default: UNREACHABLE();
2296 }
2297 if (op_ == Token::SHR) {
2298 // Check if result is non-negative and fits in a smi.
2299 __ test(eax, Immediate(0xc0000000));
2300 __ j(not_zero, &call_runtime);
2301 } else {
2302 // Check if result fits in a smi.
2303 __ cmp(eax, 0xc0000000);
2304 __ j(negative, &non_smi_result);
2305 }
2306 // Tag smi result and return.
2307 __ SmiTag(eax);
2308 __ ret(2 * kPointerSize); // Drop the arguments from the stack.
2309
2310 // All ops except SHR return a signed int32 that we load in
2311 // a HeapNumber.
2312 if (op_ != Token::SHR) {
2313 __ bind(&non_smi_result);
2314 // Allocate a heap number if needed.
2315 __ mov(ebx, Operand(eax)); // ebx: result
2316 NearLabel skip_allocation;
2317 switch (mode_) {
2318 case OVERWRITE_LEFT:
2319 case OVERWRITE_RIGHT:
2320 // If the operand was an object, we skip the
2321 // allocation of a heap number.
2322 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2323 1 * kPointerSize : 2 * kPointerSize));
2324 __ test(eax, Immediate(kSmiTagMask));
2325 __ j(not_zero, &skip_allocation, not_taken);
2326 // Fall through!
2327 case NO_OVERWRITE:
2328 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2329 __ bind(&skip_allocation);
2330 break;
2331 default: UNREACHABLE();
2332 }
2333 // Store the result in the HeapNumber and return.
2334 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
2335 CpuFeatures::Scope use_sse2(SSE2);
2336 __ cvtsi2sd(xmm0, Operand(ebx));
2337 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2338 } else {
2339 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2340 __ fild_s(Operand(esp, 1 * kPointerSize));
2341 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2342 }
2343 __ ret(2 * kPointerSize);
2344 }
2345 break;
2346 }
2347 default: UNREACHABLE(); break;
2348 }
2349
2350 // If all else fails, use the runtime system to get the correct
2351 // result.
2352 __ bind(&call_runtime);
2353 switch (op_) {
2354 case Token::ADD: {
2355 GenerateRegisterArgsPush(masm);
2356 // Test for string arguments before calling runtime.
2357 // Registers containing left and right operands respectively.
2358 Register lhs, rhs;
2359 lhs = edx;
2360 rhs = eax;
2361
2362 // Test if left operand is a string.
2363 NearLabel lhs_not_string;
1043 __ test(lhs, Immediate(kSmiTagMask)); 2364 __ test(lhs, Immediate(kSmiTagMask));
1044 __ j(zero, &lhs_not_string); 2365 __ j(zero, &lhs_not_string);
1045 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); 2366 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
1046 __ j(above_equal, &lhs_not_string); 2367 __ j(above_equal, &lhs_not_string);
1047 2368
1048 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); 2369 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1049 __ TailCallStub(&string_add_left_stub); 2370 __ TailCallStub(&string_add_left_stub);
1050 2371
1051 NearLabel call_runtime_with_args; 2372 NearLabel call_add_runtime;
1052 // Left operand is not a string, test right. 2373 // Left operand is not a string, test right.
1053 __ bind(&lhs_not_string); 2374 __ bind(&lhs_not_string);
1054 __ test(rhs, Immediate(kSmiTagMask)); 2375 __ test(rhs, Immediate(kSmiTagMask));
1055 __ j(zero, &call_runtime_with_args); 2376 __ j(zero, &call_add_runtime);
1056 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); 2377 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
1057 __ j(above_equal, &call_runtime_with_args); 2378 __ j(above_equal, &call_add_runtime);
1058 2379
1059 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); 2380 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1060 __ TailCallStub(&string_add_right_stub); 2381 __ TailCallStub(&string_add_right_stub);
1061 2382
1062 // Neither argument is a string. 2383 // Neither argument is a string.
1063 __ bind(&call_runtime); 2384 __ bind(&call_add_runtime);
1064 if (HasArgsInRegisters()) {
1065 GenerateRegisterArgsPush(masm);
1066 }
1067 __ bind(&call_runtime_with_args);
1068 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 2385 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1069 break; 2386 break;
1070 } 2387 }
1071 case Token::SUB: 2388 case Token::SUB:
2389 GenerateRegisterArgsPush(masm);
1072 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 2390 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1073 break; 2391 break;
1074 case Token::MUL: 2392 case Token::MUL:
2393 GenerateRegisterArgsPush(masm);
1075 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 2394 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1076 break; 2395 break;
1077 case Token::DIV: 2396 case Token::DIV:
2397 GenerateRegisterArgsPush(masm);
1078 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); 2398 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1079 break; 2399 break;
1080 case Token::MOD: 2400 case Token::MOD:
1081 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 2401 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1082 break; 2402 break;
1083 case Token::BIT_OR: 2403 case Token::BIT_OR:
1084 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); 2404 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1085 break; 2405 break;
1086 case Token::BIT_AND: 2406 case Token::BIT_AND:
1087 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); 2407 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1088 break; 2408 break;
1089 case Token::BIT_XOR: 2409 case Token::BIT_XOR:
1090 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); 2410 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1091 break; 2411 break;
1092 case Token::SAR: 2412 case Token::SAR:
1093 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); 2413 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1094 break; 2414 break;
1095 case Token::SHL: 2415 case Token::SHL:
1096 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); 2416 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1097 break; 2417 break;
1098 case Token::SHR: 2418 case Token::SHR:
1099 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 2419 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1100 break; 2420 break;
1101 default: 2421 default:
1102 UNREACHABLE(); 2422 UNREACHABLE();
1103 } 2423 }
1104 } 2424 }
1105 2425
1106 2426
1107 void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, 2427 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
1108 Label* alloc_failure) { 2428 MacroAssembler* masm,
2429 Label* alloc_failure) {
1109 Label skip_allocation; 2430 Label skip_allocation;
1110 OverwriteMode mode = mode_; 2431 OverwriteMode mode = mode_;
1111 if (HasArgsReversed()) {
1112 if (mode == OVERWRITE_RIGHT) {
1113 mode = OVERWRITE_LEFT;
1114 } else if (mode == OVERWRITE_LEFT) {
1115 mode = OVERWRITE_RIGHT;
1116 }
1117 }
1118 switch (mode) { 2432 switch (mode) {
1119 case OVERWRITE_LEFT: { 2433 case OVERWRITE_LEFT: {
1120 // If the argument in edx is already an object, we skip the 2434 // If the argument in edx is already an object, we skip the
1121 // allocation of a heap number. 2435 // allocation of a heap number.
1122 __ test(edx, Immediate(kSmiTagMask)); 2436 __ test(edx, Immediate(kSmiTagMask));
1123 __ j(not_zero, &skip_allocation, not_taken); 2437 __ j(not_zero, &skip_allocation, not_taken);
1124 // Allocate a heap number for the result. Keep eax and edx intact 2438 // Allocate a heap number for the result. Keep eax and edx intact
1125 // for the possible runtime call. 2439 // for the possible runtime call.
1126 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); 2440 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1127 // Now edx can be overwritten losing one of the arguments as we are 2441 // Now edx can be overwritten losing one of the arguments as we are
(...skipping 17 matching lines...) Expand all
1145 // Now eax can be overwritten losing one of the arguments as we are 2459 // Now eax can be overwritten losing one of the arguments as we are
1146 // now done and will not need it any more. 2460 // now done and will not need it any more.
1147 __ mov(eax, ebx); 2461 __ mov(eax, ebx);
1148 __ bind(&skip_allocation); 2462 __ bind(&skip_allocation);
1149 break; 2463 break;
1150 default: UNREACHABLE(); 2464 default: UNREACHABLE();
1151 } 2465 }
1152 } 2466 }
1153 2467
1154 2468
1155 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { 2469 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1156 // If arguments are not passed in registers read them from the stack.
1157 ASSERT(!HasArgsInRegisters());
1158 __ mov(eax, Operand(esp, 1 * kPointerSize));
1159 __ mov(edx, Operand(esp, 2 * kPointerSize));
1160 }
1161
1162
1163 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
1164 // If arguments are not passed in registers remove them from the stack before
1165 // returning.
1166 if (!HasArgsInRegisters()) {
1167 __ ret(2 * kPointerSize); // Remove both operands
1168 } else {
1169 __ ret(0);
1170 }
1171 }
1172
1173
1174 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1175 ASSERT(HasArgsInRegisters());
1176 __ pop(ecx); 2470 __ pop(ecx);
1177 if (HasArgsReversed()) { 2471 __ push(edx);
1178 __ push(eax); 2472 __ push(eax);
1179 __ push(edx);
1180 } else {
1181 __ push(edx);
1182 __ push(eax);
1183 }
1184 __ push(ecx); 2473 __ push(ecx);
1185 } 2474 }
1186 2475
1187 2476
1188 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1189 // Ensure the operands are on the stack.
1190 if (HasArgsInRegisters()) {
1191 GenerateRegisterArgsPush(masm);
1192 }
1193
1194 __ pop(ecx); // Save return address.
1195
1196 // Left and right arguments are now on top.
1197 // Push this stub's key. Although the operation and the type info are
1198 // encoded into the key, the encoding is opaque, so push them too.
1199 __ push(Immediate(Smi::FromInt(MinorKey())));
1200 __ push(Immediate(Smi::FromInt(op_)));
1201 __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
1202
1203 __ push(ecx); // Push return address.
1204
1205 // Patch the caller to an appropriate specialized stub and return the
1206 // operation result to the caller of the stub.
1207 __ TailCallExternalReference(
1208 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
1209 5,
1210 1);
1211 }
1212
1213
1214 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
1215 GenericBinaryOpStub stub(key, type_info);
1216 return stub.GetCode();
1217 }
1218
1219
1220 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2477 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1221 // Input on stack: 2478 // Input on stack:
1222 // esp[4]: argument (should be number). 2479 // esp[4]: argument (should be number).
1223 // esp[0]: return address. 2480 // esp[0]: return address.
1224 // Test that eax is a number. 2481 // Test that eax is a number.
1225 Label runtime_call; 2482 Label runtime_call;
1226 Label runtime_call_clear_stack; 2483 Label runtime_call_clear_stack;
1227 NearLabel input_not_smi; 2484 NearLabel input_not_smi;
1228 NearLabel loaded; 2485 NearLabel loaded;
1229 __ mov(eax, Operand(esp, kPointerSize)); 2486 __ mov(eax, Operand(esp, kPointerSize));
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after
1705 bool use_sse3, 2962 bool use_sse3,
1706 Label* conversion_failure) { 2963 Label* conversion_failure) {
1707 if (type_info.IsNumber()) { 2964 if (type_info.IsNumber()) {
1708 LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); 2965 LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
1709 } else { 2966 } else {
1710 LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); 2967 LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
1711 } 2968 }
1712 } 2969 }
1713 2970
1714 2971
2972 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2973 bool use_sse3,
2974 Label* not_int32) {
2975 return;
2976 }
2977
2978
1715 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 2979 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
1716 Register number) { 2980 Register number) {
1717 NearLabel load_smi, done; 2981 NearLabel load_smi, done;
1718 2982
1719 __ test(number, Immediate(kSmiTagMask)); 2983 __ test(number, Immediate(kSmiTagMask));
1720 __ j(zero, &load_smi, not_taken); 2984 __ j(zero, &load_smi, not_taken);
1721 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 2985 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
1722 __ jmp(&done); 2986 __ jmp(&done);
1723 2987
1724 __ bind(&load_smi); 2988 __ bind(&load_smi);
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1800 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 3064 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
1801 __ SmiUntag(scratch); 3065 __ SmiUntag(scratch);
1802 __ cvtsi2sd(xmm0, Operand(scratch)); 3066 __ cvtsi2sd(xmm0, Operand(scratch));
1803 3067
1804 __ mov(scratch, right); 3068 __ mov(scratch, right);
1805 __ SmiUntag(scratch); 3069 __ SmiUntag(scratch);
1806 __ cvtsi2sd(xmm1, Operand(scratch)); 3070 __ cvtsi2sd(xmm1, Operand(scratch));
1807 } 3071 }
1808 3072
1809 3073
3074 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
3075 Label* non_int32,
3076 Register scratch) {
3077 __ cvttsd2si(scratch, Operand(xmm0));
3078 __ cvtsi2sd(xmm2, Operand(scratch));
3079 __ ucomisd(xmm0, xmm2);
3080 __ j(not_zero, non_int32);
3081 __ j(carry, non_int32);
3082 __ cvttsd2si(scratch, Operand(xmm1));
3083 __ cvtsi2sd(xmm2, Operand(scratch));
3084 __ ucomisd(xmm1, xmm2);
3085 __ j(not_zero, non_int32);
3086 __ j(carry, non_int32);
3087 }
3088
3089
1810 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 3090 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
1811 Register scratch, 3091 Register scratch,
1812 ArgLocation arg_location) { 3092 ArgLocation arg_location) {
1813 NearLabel load_smi_1, load_smi_2, done_load_1, done; 3093 NearLabel load_smi_1, load_smi_2, done_load_1, done;
1814 if (arg_location == ARGS_IN_REGISTERS) { 3094 if (arg_location == ARGS_IN_REGISTERS) {
1815 __ mov(scratch, edx); 3095 __ mov(scratch, edx);
1816 } else { 3096 } else {
1817 __ mov(scratch, Operand(esp, 2 * kPointerSize)); 3097 __ mov(scratch, Operand(esp, 2 * kPointerSize));
1818 } 3098 }
1819 __ test(scratch, Immediate(kSmiTagMask)); 3099 __ test(scratch, Immediate(kSmiTagMask));
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1883 __ j(zero, &done); // argument in eax is OK 3163 __ j(zero, &done); // argument in eax is OK
1884 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); 3164 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
1885 __ cmp(scratch, FACTORY->heap_number_map()); 3165 __ cmp(scratch, FACTORY->heap_number_map());
1886 __ j(not_equal, non_float); // argument in eax is not a number -> NaN 3166 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
1887 3167
1888 // Fall-through: Both operands are numbers. 3168 // Fall-through: Both operands are numbers.
1889 __ bind(&done); 3169 __ bind(&done);
1890 } 3170 }
1891 3171
1892 3172
3173 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
3174 Label* non_int32) {
3175 return;
3176 }
3177
3178
1893 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { 3179 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
1894 Label slow, done, undo; 3180 Label slow, done, undo;
1895 3181
1896 if (op_ == Token::SUB) { 3182 if (op_ == Token::SUB) {
1897 if (include_smi_code_) { 3183 if (include_smi_code_) {
1898 // Check whether the value is a smi. 3184 // Check whether the value is a smi.
1899 NearLabel try_float; 3185 NearLabel try_float;
1900 __ test(eax, Immediate(kSmiTagMask)); 3186 __ test(eax, Immediate(kSmiTagMask));
1901 __ j(not_zero, &try_float, not_taken); 3187 __ j(not_zero, &try_float, not_taken);
1902 3188
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
2017 break; 3303 break;
2018 case Token::BIT_NOT: 3304 case Token::BIT_NOT:
2019 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); 3305 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2020 break; 3306 break;
2021 default: 3307 default:
2022 UNREACHABLE(); 3308 UNREACHABLE();
2023 } 3309 }
2024 } 3310 }
2025 3311
2026 3312
3313 void MathPowStub::Generate(MacroAssembler* masm) {
3314 // Registers are used as follows:
3315 // edx = base
3316 // eax = exponent
3317 // ecx = temporary, result
3318
3319 CpuFeatures::Scope use_sse2(SSE2);
3320 Label allocate_return, call_runtime;
3321
3322 // Load input parameters.
3323 __ mov(edx, Operand(esp, 2 * kPointerSize));
3324 __ mov(eax, Operand(esp, 1 * kPointerSize));
3325
3326 // Save 1 in xmm3 - we need this several times later on.
3327 __ mov(ecx, Immediate(1));
3328 __ cvtsi2sd(xmm3, Operand(ecx));
3329
3330 Label exponent_nonsmi;
3331 Label base_nonsmi;
3332 // If the exponent is a heap number go to that specific case.
3333 __ test(eax, Immediate(kSmiTagMask));
3334 __ j(not_zero, &exponent_nonsmi);
3335 __ test(edx, Immediate(kSmiTagMask));
3336 __ j(not_zero, &base_nonsmi);
3337
3338 // Optimized version when both exponent and base is a smi.
3339 Label powi;
3340 __ SmiUntag(edx);
3341 __ cvtsi2sd(xmm0, Operand(edx));
3342 __ jmp(&powi);
3343 // exponent is smi and base is a heapnumber.
3344 __ bind(&base_nonsmi);
3345 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3346 FACTORY->heap_number_map());
3347 __ j(not_equal, &call_runtime);
3348
3349 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3350
3351 // Optimized version of pow if exponent is a smi.
3352 // xmm0 contains the base.
3353 __ bind(&powi);
3354 __ SmiUntag(eax);
3355
3356 // Save exponent in base as we need to check if exponent is negative later.
3357 // We know that base and exponent are in different registers.
3358 __ mov(edx, eax);
3359
3360 // Get absolute value of exponent.
3361 NearLabel no_neg;
3362 __ cmp(eax, 0);
3363 __ j(greater_equal, &no_neg);
3364 __ neg(eax);
3365 __ bind(&no_neg);
3366
3367 // Load xmm1 with 1.
3368 __ movsd(xmm1, xmm3);
3369 NearLabel while_true;
3370 NearLabel no_multiply;
3371
3372 __ bind(&while_true);
3373 __ shr(eax, 1);
3374 __ j(not_carry, &no_multiply);
3375 __ mulsd(xmm1, xmm0);
3376 __ bind(&no_multiply);
3377 __ test(eax, Operand(eax));
3378 __ mulsd(xmm0, xmm0);
3379 __ j(not_zero, &while_true);
3380
3381 // base has the original value of the exponent - if the exponent is
3382 // negative return 1/result.
3383 __ test(edx, Operand(edx));
3384 __ j(positive, &allocate_return);
3385 // Special case if xmm1 has reached infinity.
3386 __ mov(ecx, Immediate(0x7FB00000));
3387 __ movd(xmm0, Operand(ecx));
3388 __ cvtss2sd(xmm0, xmm0);
3389 __ ucomisd(xmm0, xmm1);
3390 __ j(equal, &call_runtime);
3391 __ divsd(xmm3, xmm1);
3392 __ movsd(xmm1, xmm3);
3393 __ jmp(&allocate_return);
3394
3395 // exponent (or both) is a heapnumber - no matter what we should now work
3396 // on doubles.
3397 __ bind(&exponent_nonsmi);
3398 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
3399 FACTORY->heap_number_map());
3400 __ j(not_equal, &call_runtime);
3401 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3402 // Test if exponent is nan.
3403 __ ucomisd(xmm1, xmm1);
3404 __ j(parity_even, &call_runtime);
3405
3406 NearLabel base_not_smi;
3407 NearLabel handle_special_cases;
3408 __ test(edx, Immediate(kSmiTagMask));
3409 __ j(not_zero, &base_not_smi);
3410 __ SmiUntag(edx);
3411 __ cvtsi2sd(xmm0, Operand(edx));
3412 __ jmp(&handle_special_cases);
3413
3414 __ bind(&base_not_smi);
3415 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3416 FACTORY->heap_number_map());
3417 __ j(not_equal, &call_runtime);
3418 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
3419 __ and_(ecx, HeapNumber::kExponentMask);
3420 __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
3421 // base is NaN or +/-Infinity
3422 __ j(greater_equal, &call_runtime);
3423 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3424
3425 // base is in xmm0 and exponent is in xmm1.
3426 __ bind(&handle_special_cases);
3427 NearLabel not_minus_half;
3428 // Test for -0.5.
3429 // Load xmm2 with -0.5.
3430 __ mov(ecx, Immediate(0xBF000000));
3431 __ movd(xmm2, Operand(ecx));
3432 __ cvtss2sd(xmm2, xmm2);
3433 // xmm2 now has -0.5.
3434 __ ucomisd(xmm2, xmm1);
3435 __ j(not_equal, &not_minus_half);
3436
3437 // Calculates reciprocal of square root.
3438 // Note that 1/sqrt(x) = sqrt(1/x))
3439 __ divsd(xmm3, xmm0);
3440 __ movsd(xmm1, xmm3);
3441 __ sqrtsd(xmm1, xmm1);
3442 __ jmp(&allocate_return);
3443
3444 // Test for 0.5.
3445 __ bind(&not_minus_half);
3446 // Load xmm2 with 0.5.
3447 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
3448 __ addsd(xmm2, xmm3);
3449 // xmm2 now has 0.5.
3450 __ ucomisd(xmm2, xmm1);
3451 __ j(not_equal, &call_runtime);
3452 // Calculates square root.
3453 __ movsd(xmm1, xmm0);
3454 __ sqrtsd(xmm1, xmm1);
3455
3456 __ bind(&allocate_return);
3457 __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
3458 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
3459 __ mov(eax, ecx);
3460 __ ret(2);
3461
3462 __ bind(&call_runtime);
3463 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3464 }
3465
3466
2027 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 3467 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2028 // The key is in edx and the parameter count is in eax. 3468 // The key is in edx and the parameter count is in eax.
2029 3469
2030 // The displacement is used for skipping the frame pointer on the 3470 // The displacement is used for skipping the frame pointer on the
2031 // stack. It is the offset of the last parameter (if any) relative 3471 // stack. It is the offset of the last parameter (if any) relative
2032 // to the frame pointer. 3472 // to the frame pointer.
2033 static const int kDisplacement = 1 * kPointerSize; 3473 static const int kDisplacement = 1 * kPointerSize;
2034 3474
2035 // Check that the key is a smi. 3475 // Check that the key is a smi.
2036 Label slow; 3476 Label slow;
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after
2511 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 3951 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
2512 __ ret(4 * kPointerSize); 3952 __ ret(4 * kPointerSize);
2513 3953
2514 // Do the runtime call to execute the regexp. 3954 // Do the runtime call to execute the regexp.
2515 __ bind(&runtime); 3955 __ bind(&runtime);
2516 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 3956 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2517 #endif // V8_INTERPRETED_REGEXP 3957 #endif // V8_INTERPRETED_REGEXP
2518 } 3958 }
2519 3959
2520 3960
3961 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3962 const int kMaxInlineLength = 100;
3963 Label slowcase;
3964 NearLabel done;
3965 __ mov(ebx, Operand(esp, kPointerSize * 3));
3966 __ test(ebx, Immediate(kSmiTagMask));
3967 __ j(not_zero, &slowcase);
3968 __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
3969 __ j(above, &slowcase);
3970 // Smi-tagging is equivalent to multiplying by 2.
3971 STATIC_ASSERT(kSmiTag == 0);
3972 STATIC_ASSERT(kSmiTagSize == 1);
3973 // Allocate RegExpResult followed by FixedArray with size in ebx.
3974 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
3975 // Elements: [Map][Length][..elements..]
3976 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3977 times_half_pointer_size,
3978 ebx, // In: Number of elements (times 2, being a smi)
3979 eax, // Out: Start of allocation (tagged).
3980 ecx, // Out: End of allocation.
3981 edx, // Scratch register
3982 &slowcase,
3983 TAG_OBJECT);
3984 // eax: Start of allocated area, object-tagged.
3985
3986 // Set JSArray map to global.regexp_result_map().
3987 // Set empty properties FixedArray.
3988 // Set elements to point to FixedArray allocated right after the JSArray.
3989 // Interleave operations for better latency.
3990 __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
3991 __ mov(ecx, Immediate(FACTORY->empty_fixed_array()));
3992 __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
3993 __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
3994 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
3995 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
3996 __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
3997 __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
3998
3999 // Set input, index and length fields from arguments.
4000 __ mov(ecx, Operand(esp, kPointerSize * 1));
4001 __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
4002 __ mov(ecx, Operand(esp, kPointerSize * 2));
4003 __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
4004 __ mov(ecx, Operand(esp, kPointerSize * 3));
4005 __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
4006
4007 // Fill out the elements FixedArray.
4008 // eax: JSArray.
4009 // ebx: FixedArray.
4010 // ecx: Number of elements in array, as smi.
4011
4012 // Set map.
4013 __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
4014 Immediate(FACTORY->fixed_array_map()));
4015 // Set length.
4016 __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4017 // Fill contents of fixed-array with the-hole.
4018 __ SmiUntag(ecx);
4019 __ mov(edx, Immediate(FACTORY->the_hole_value()));
4020 __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4021 // Fill fixed array elements with hole.
4022 // eax: JSArray.
4023 // ecx: Number of elements to fill.
4024 // ebx: Start of elements in FixedArray.
4025 // edx: the hole.
4026 Label loop;
4027 __ test(ecx, Operand(ecx));
4028 __ bind(&loop);
4029 __ j(less_equal, &done); // Jump if ecx is negative or zero.
4030 __ sub(Operand(ecx), Immediate(1));
4031 __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4032 __ jmp(&loop);
4033
4034 __ bind(&done);
4035 __ ret(3 * kPointerSize);
4036
4037 __ bind(&slowcase);
4038 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4039 }
4040
4041
2521 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 4042 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
2522 Register object, 4043 Register object,
2523 Register result, 4044 Register result,
2524 Register scratch1, 4045 Register scratch1,
2525 Register scratch2, 4046 Register scratch2,
2526 bool object_is_smi, 4047 bool object_is_smi,
2527 Label* not_found) { 4048 Label* not_found) {
2528 // Use of registers. Register result is used as a temporary. 4049 // Use of registers. Register result is used as a temporary.
2529 Register number_string_cache = result; 4050 Register number_string_cache = result;
2530 Register mask = scratch1; 4051 Register mask = scratch1;
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after
3132 4653
3133 // Check for failure result. 4654 // Check for failure result.
3134 Label failure_returned; 4655 Label failure_returned;
3135 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 4656 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3136 __ lea(ecx, Operand(eax, 1)); 4657 __ lea(ecx, Operand(eax, 1));
3137 // Lower 2 bits of ecx are 0 iff eax has failure tag. 4658 // Lower 2 bits of ecx are 0 iff eax has failure tag.
3138 __ test(ecx, Immediate(kFailureTagMask)); 4659 __ test(ecx, Immediate(kFailureTagMask));
3139 __ j(zero, &failure_returned, not_taken); 4660 __ j(zero, &failure_returned, not_taken);
3140 4661
3141 // Exit the JavaScript to C++ exit frame. 4662 // Exit the JavaScript to C++ exit frame.
3142 __ LeaveExitFrame(); 4663 __ LeaveExitFrame(save_doubles_);
3143 __ ret(0); 4664 __ ret(0);
3144 4665
3145 // Handling of failure. 4666 // Handling of failure.
3146 __ bind(&failure_returned); 4667 __ bind(&failure_returned);
3147 4668
3148 Label retry; 4669 Label retry;
3149 // If the returned exception is RETRY_AFTER_GC continue at retry label 4670 // If the returned exception is RETRY_AFTER_GC continue at retry label
3150 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 4671 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3151 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); 4672 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3152 __ j(zero, &retry, taken); 4673 __ j(zero, &retry, taken);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
3234 // ebp: frame pointer (restored after C call) 4755 // ebp: frame pointer (restored after C call)
3235 // esp: stack pointer (restored after C call) 4756 // esp: stack pointer (restored after C call)
3236 // esi: current context (C callee-saved) 4757 // esi: current context (C callee-saved)
3237 // edi: JS function of the caller (C callee-saved) 4758 // edi: JS function of the caller (C callee-saved)
3238 4759
3239 // NOTE: Invocations of builtins may return failure objects instead 4760 // NOTE: Invocations of builtins may return failure objects instead
3240 // of a proper result. The builtin entry handles this by performing 4761 // of a proper result. The builtin entry handles this by performing
3241 // a garbage collection and retrying the builtin (twice). 4762 // a garbage collection and retrying the builtin (twice).
3242 4763
3243 // Enter the exit frame that transitions from JavaScript to C++. 4764 // Enter the exit frame that transitions from JavaScript to C++.
3244 __ EnterExitFrame(); 4765 __ EnterExitFrame(save_doubles_);
3245 4766
3246 // eax: result parameter for PerformGC, if any (setup below) 4767 // eax: result parameter for PerformGC, if any (setup below)
3247 // ebx: pointer to builtin function (C callee-saved) 4768 // ebx: pointer to builtin function (C callee-saved)
3248 // ebp: frame pointer (restored after C call) 4769 // ebp: frame pointer (restored after C call)
3249 // esp: stack pointer (restored after C call) 4770 // esp: stack pointer (restored after C call)
3250 // edi: number of arguments including receiver (C callee-saved) 4771 // edi: number of arguments including receiver (C callee-saved)
3251 // esi: argv pointer (C callee-saved) 4772 // esi: argv pointer (C callee-saved)
3252 4773
3253 Label throw_normal_exception; 4774 Label throw_normal_exception;
3254 Label throw_termination_exception; 4775 Label throw_termination_exception;
(...skipping 1330 matching lines...) Expand 10 before | Expand all | Expand 10 after
4585 __ add(Operand(esp), Immediate(2 * kPointerSize)); 6106 __ add(Operand(esp), Immediate(2 * kPointerSize));
4586 __ push(ecx); 6107 __ push(ecx);
4587 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); 6108 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
4588 6109
4589 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 6110 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
4590 // tagged as a small integer. 6111 // tagged as a small integer.
4591 __ bind(&runtime); 6112 __ bind(&runtime);
4592 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6113 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4593 } 6114 }
4594 6115
6116
6117 void StringCharAtStub::Generate(MacroAssembler* masm) {
6118 // Expects two arguments (object, index) on the stack:
6119
6120 // Stack frame on entry.
6121 // esp[0]: return address
6122 // esp[4]: index
6123 // esp[8]: object
6124
6125 Register object = ebx;
6126 Register index = eax;
6127 Register scratch1 = ecx;
6128 Register scratch2 = edx;
6129 Register result = eax;
6130
6131 __ pop(scratch1); // Return address.
6132 __ pop(index);
6133 __ pop(object);
6134 __ push(scratch1);
6135
6136 Label need_conversion;
6137 Label index_out_of_range;
6138 Label done;
6139 StringCharAtGenerator generator(object,
6140 index,
6141 scratch1,
6142 scratch2,
6143 result,
6144 &need_conversion,
6145 &need_conversion,
6146 &index_out_of_range,
6147 STRING_INDEX_IS_NUMBER);
6148 generator.GenerateFast(masm);
6149 __ jmp(&done);
6150
6151 __ bind(&index_out_of_range);
6152 // When the index is out of range, the spec requires us to return
6153 // the empty string.
6154 __ Set(result, Immediate(FACTORY->empty_string()));
6155 __ jmp(&done);
6156
6157 __ bind(&need_conversion);
6158 // Move smi zero into the result register, which will trigger
6159 // conversion.
6160 __ Set(result, Immediate(Smi::FromInt(0)));
6161 __ jmp(&done);
6162
6163 StubRuntimeCallHelper call_helper;
6164 generator.GenerateSlow(masm, call_helper);
6165
6166 __ bind(&done);
6167 __ ret(0);
6168 }
6169
6170 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6171 ASSERT(state_ == CompareIC::SMIS);
6172 NearLabel miss;
6173 __ mov(ecx, Operand(edx));
6174 __ or_(ecx, Operand(eax));
6175 __ test(ecx, Immediate(kSmiTagMask));
6176 __ j(not_zero, &miss, not_taken);
6177
6178 if (GetCondition() == equal) {
6179 // For equality we do not care about the sign of the result.
6180 __ sub(eax, Operand(edx));
6181 } else {
6182 NearLabel done;
6183 __ sub(edx, Operand(eax));
6184 __ j(no_overflow, &done);
6185 // Correct sign of result in case of overflow.
6186 __ not_(edx);
6187 __ bind(&done);
6188 __ mov(eax, edx);
6189 }
6190 __ ret(0);
6191
6192 __ bind(&miss);
6193 GenerateMiss(masm);
6194 }
6195
6196
6197 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6198 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6199
6200 NearLabel generic_stub;
6201 NearLabel unordered;
6202 NearLabel miss;
6203 __ mov(ecx, Operand(edx));
6204 __ and_(ecx, Operand(eax));
6205 __ test(ecx, Immediate(kSmiTagMask));
6206 __ j(zero, &generic_stub, not_taken);
6207
6208 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6209 __ j(not_equal, &miss, not_taken);
6210 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6211 __ j(not_equal, &miss, not_taken);
6212
6213 // Inlining the double comparison and falling back to the general compare
6214 // stub if NaN is involved or SS2 or CMOV is unsupported.
6215 CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
6216 if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
6217 CpuFeatures::Scope scope1(SSE2);
6218 CpuFeatures::Scope scope2(CMOV);
6219
6220 // Load left and right operand
6221 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6222 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6223
6224 // Compare operands
6225 __ ucomisd(xmm0, xmm1);
6226
6227 // Don't base result on EFLAGS when a NaN is involved.
6228 __ j(parity_even, &unordered, not_taken);
6229
6230 // Return a result of -1, 0, or 1, based on EFLAGS.
6231 // Performing mov, because xor would destroy the flag register.
6232 __ mov(eax, 0); // equal
6233 __ mov(ecx, Immediate(Smi::FromInt(1)));
6234 __ cmov(above, eax, Operand(ecx));
6235 __ mov(ecx, Immediate(Smi::FromInt(-1)));
6236 __ cmov(below, eax, Operand(ecx));
6237 __ ret(0);
6238
6239 __ bind(&unordered);
6240 }
6241
6242 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6243 __ bind(&generic_stub);
6244 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6245
6246 __ bind(&miss);
6247 GenerateMiss(masm);
6248 }
6249
6250
6251 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6252 ASSERT(state_ == CompareIC::OBJECTS);
6253 NearLabel miss;
6254 __ mov(ecx, Operand(edx));
6255 __ and_(ecx, Operand(eax));
6256 __ test(ecx, Immediate(kSmiTagMask));
6257 __ j(zero, &miss, not_taken);
6258
6259 __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6260 __ j(not_equal, &miss, not_taken);
6261 __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6262 __ j(not_equal, &miss, not_taken);
6263
6264 ASSERT(GetCondition() == equal);
6265 __ sub(eax, Operand(edx));
6266 __ ret(0);
6267
6268 __ bind(&miss);
6269 GenerateMiss(masm);
6270 }
6271
6272
6273 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6274 // Save the registers.
6275 __ pop(ecx);
6276 __ push(edx);
6277 __ push(eax);
6278 __ push(ecx);
6279
6280 // Call the runtime system in a fresh internal frame.
6281 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
6282 __ EnterInternalFrame();
6283 __ push(edx);
6284 __ push(eax);
6285 __ push(Immediate(Smi::FromInt(op_)));
6286 __ CallExternalReference(miss, 3);
6287 __ LeaveInternalFrame();
6288
6289 // Compute the entry point of the rewritten stub.
6290 __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6291
6292 // Restore registers.
6293 __ pop(ecx);
6294 __ pop(eax);
6295 __ pop(edx);
6296 __ push(ecx);
6297
6298 // Do a tail call to the rewritten stub.
6299 __ jmp(Operand(edi));
6300 }
6301
6302
4595 #undef __ 6303 #undef __
4596 6304
4597 } } // namespace v8::internal 6305 } } // namespace v8::internal
4598 6306
4599 #endif // V8_TARGET_ARCH_IA32 6307 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698