Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(362)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 25571002: Revert "Hydrogenisation of binops" (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/hydrogen-instructions.h ('k') | src/ia32/deoptimizer-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
286 Isolate* isolate, 286 Isolate* isolate,
287 CodeStubInterfaceDescriptor* descriptor) { 287 CodeStubInterfaceDescriptor* descriptor) {
288 static Register registers[] = { eax, ebx, ecx, edx }; 288 static Register registers[] = { eax, ebx, ecx, edx };
289 descriptor->register_param_count_ = 4; 289 descriptor->register_param_count_ = 4;
290 descriptor->register_params_ = registers; 290 descriptor->register_params_ = registers;
291 descriptor->deoptimization_handler_ = 291 descriptor->deoptimization_handler_ =
292 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); 292 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
293 } 293 }
294 294
295 295
296 void BinaryOpStub::InitializeInterfaceDescriptor(
297 Isolate* isolate,
298 CodeStubInterfaceDescriptor* descriptor) {
299 static Register registers[] = { edx, eax };
300 descriptor->register_param_count_ = 2;
301 descriptor->register_params_ = registers;
302 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
303 descriptor->SetMissHandler(
304 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
305 }
306
307
308 #define __ ACCESS_MASM(masm) 296 #define __ ACCESS_MASM(masm)
309 297
310 298
311 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { 299 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
312 // Update the static counter each time a new code stub is generated. 300 // Update the static counter each time a new code stub is generated.
313 Isolate* isolate = masm->isolate(); 301 Isolate* isolate = masm->isolate();
314 isolate->counters()->code_stubs()->Increment(); 302 isolate->counters()->code_stubs()->Increment();
315 303
316 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); 304 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
317 int param_count = descriptor->register_param_count_; 305 int param_count = descriptor->register_param_count_;
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 ARGS_ON_STACK, 473 ARGS_ON_STACK,
486 ARGS_IN_REGISTERS 474 ARGS_IN_REGISTERS
487 }; 475 };
488 476
489 // Code pattern for loading a floating point value. Input value must 477 // Code pattern for loading a floating point value. Input value must
490 // be either a smi or a heap number object (fp value). Requirements: 478 // be either a smi or a heap number object (fp value). Requirements:
491 // operand in register number. Returns operand as floating point number 479 // operand in register number. Returns operand as floating point number
492 // on FPU stack. 480 // on FPU stack.
493 static void LoadFloatOperand(MacroAssembler* masm, Register number); 481 static void LoadFloatOperand(MacroAssembler* masm, Register number);
494 482
483 // Code pattern for loading floating point values. Input values must
484 // be either smi or heap number objects (fp values). Requirements:
485 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
486 // Returns operands as floating point numbers on FPU stack.
487 static void LoadFloatOperands(MacroAssembler* masm,
488 Register scratch,
489 ArgLocation arg_location = ARGS_ON_STACK);
490
491 // Similar to LoadFloatOperand but assumes that both operands are smis.
492 // Expects operands in edx, eax.
493 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
494
495 // Test if operands are smi or number objects (fp). Requirements: 495 // Test if operands are smi or number objects (fp). Requirements:
496 // operand_1 in eax, operand_2 in edx; falls through on float 496 // operand_1 in eax, operand_2 in edx; falls through on float
497 // operands, jumps to the non_float label otherwise. 497 // operands, jumps to the non_float label otherwise.
498 static void CheckFloatOperands(MacroAssembler* masm, 498 static void CheckFloatOperands(MacroAssembler* masm,
499 Label* non_float, 499 Label* non_float,
500 Register scratch); 500 Register scratch);
501 501
502 // Takes the operands in edx and eax and loads them as integers in eax
503 // and ecx.
504 static void LoadUnknownsAsIntegers(MacroAssembler* masm,
505 bool use_sse3,
506 BinaryOpIC::TypeInfo left_type,
507 BinaryOpIC::TypeInfo right_type,
508 Label* operand_conversion_failure);
509
502 // Test if operands are numbers (smi or HeapNumber objects), and load 510 // Test if operands are numbers (smi or HeapNumber objects), and load
503 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 511 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
504 // either operand is not a number. Operands are in edx and eax. 512 // either operand is not a number. Operands are in edx and eax.
505 // Leaves operands unchanged. 513 // Leaves operands unchanged.
506 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); 514 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
515
516 // Similar to LoadSSE2Operands but assumes that both operands are smis.
517 // Expects operands in edx, eax.
518 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
519
520 // Checks that |operand| has an int32 value. If |int32_result| is different
521 // from |scratch|, it will contain that int32 value.
522 static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
523 Label* non_int32,
524 XMMRegister operand,
525 Register int32_result,
526 Register scratch,
527 XMMRegister xmm_scratch);
507 }; 528 };
508 529
509 530
510 void DoubleToIStub::Generate(MacroAssembler* masm) { 531 void DoubleToIStub::Generate(MacroAssembler* masm) {
511 Register input_reg = this->source(); 532 Register input_reg = this->source();
512 Register final_result_reg = this->destination(); 533 Register final_result_reg = this->destination();
513 ASSERT(is_truncating()); 534 ASSERT(is_truncating());
514 535
515 Label check_negative, process_64_bits, done, done_no_stash; 536 Label check_negative, process_64_bits, done, done_no_stash;
516 537
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
640 if (!final_result_reg.is(result_reg)) { 661 if (!final_result_reg.is(result_reg)) {
641 ASSERT(final_result_reg.is(ecx)); 662 ASSERT(final_result_reg.is(ecx));
642 __ mov(final_result_reg, result_reg); 663 __ mov(final_result_reg, result_reg);
643 } 664 }
644 __ pop(save_reg); 665 __ pop(save_reg);
645 __ pop(scratch1); 666 __ pop(scratch1);
646 __ ret(0); 667 __ ret(0);
647 } 668 }
648 669
649 670
671 void BinaryOpStub::Initialize() {
672 platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
673 }
674
675
676 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
677 __ pop(ecx); // Save return address.
678 __ push(edx);
679 __ push(eax);
680 // Left and right arguments are now on top.
681 __ push(Immediate(Smi::FromInt(MinorKey())));
682
683 __ push(ecx); // Push return address.
684
685 // Patch the caller to an appropriate specialized stub and return the
686 // operation result to the caller of the stub.
687 __ TailCallExternalReference(
688 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
689 masm->isolate()),
690 3,
691 1);
692 }
693
694
695 // Prepare for a type transition runtime call when the args are already on
696 // the stack, under the return address.
697 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
698 __ pop(ecx); // Save return address.
699 // Left and right arguments are already on top of the stack.
700 __ push(Immediate(Smi::FromInt(MinorKey())));
701
702 __ push(ecx); // Push return address.
703
704 // Patch the caller to an appropriate specialized stub and return the
705 // operation result to the caller of the stub.
706 __ TailCallExternalReference(
707 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
708 masm->isolate()),
709 3,
710 1);
711 }
712
713
714 static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
715 __ pop(ecx);
716 __ pop(eax);
717 __ pop(edx);
718 __ push(ecx);
719 }
720
721
722 static void BinaryOpStub_GenerateSmiCode(
723 MacroAssembler* masm,
724 Label* slow,
725 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
726 Token::Value op) {
727 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
728 // dividend in eax and edx free for the division. Use eax, ebx for those.
729 Comment load_comment(masm, "-- Load arguments");
730 Register left = edx;
731 Register right = eax;
732 if (op == Token::DIV || op == Token::MOD) {
733 left = eax;
734 right = ebx;
735 __ mov(ebx, eax);
736 __ mov(eax, edx);
737 }
738
739
740 // 2. Prepare the smi check of both operands by oring them together.
741 Comment smi_check_comment(masm, "-- Smi check arguments");
742 Label not_smis;
743 Register combined = ecx;
744 ASSERT(!left.is(combined) && !right.is(combined));
745 switch (op) {
746 case Token::BIT_OR:
747 // Perform the operation into eax and smi check the result. Preserve
748 // eax in case the result is not a smi.
749 ASSERT(!left.is(ecx) && !right.is(ecx));
750 __ mov(ecx, right);
751 __ or_(right, left); // Bitwise or is commutative.
752 combined = right;
753 break;
754
755 case Token::BIT_XOR:
756 case Token::BIT_AND:
757 case Token::ADD:
758 case Token::SUB:
759 case Token::MUL:
760 case Token::DIV:
761 case Token::MOD:
762 __ mov(combined, right);
763 __ or_(combined, left);
764 break;
765
766 case Token::SHL:
767 case Token::SAR:
768 case Token::SHR:
769 // Move the right operand into ecx for the shift operation, use eax
770 // for the smi check register.
771 ASSERT(!left.is(ecx) && !right.is(ecx));
772 __ mov(ecx, right);
773 __ or_(right, left);
774 combined = right;
775 break;
776
777 default:
778 break;
779 }
780
781 // 3. Perform the smi check of the operands.
782 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
783 __ JumpIfNotSmi(combined, &not_smis);
784
785 // 4. Operands are both smis, perform the operation leaving the result in
786 // eax and check the result if necessary.
787 Comment perform_smi(masm, "-- Perform smi operation");
788 Label use_fp_on_smis;
789 switch (op) {
790 case Token::BIT_OR:
791 // Nothing to do.
792 break;
793
794 case Token::BIT_XOR:
795 ASSERT(right.is(eax));
796 __ xor_(right, left); // Bitwise xor is commutative.
797 break;
798
799 case Token::BIT_AND:
800 ASSERT(right.is(eax));
801 __ and_(right, left); // Bitwise and is commutative.
802 break;
803
804 case Token::SHL:
805 // Remove tags from operands (but keep sign).
806 __ SmiUntag(left);
807 __ SmiUntag(ecx);
808 // Perform the operation.
809 __ shl_cl(left);
810 // Check that the *signed* result fits in a smi.
811 __ cmp(left, 0xc0000000);
812 __ j(sign, &use_fp_on_smis);
813 // Tag the result and store it in register eax.
814 __ SmiTag(left);
815 __ mov(eax, left);
816 break;
817
818 case Token::SAR:
819 // Remove tags from operands (but keep sign).
820 __ SmiUntag(left);
821 __ SmiUntag(ecx);
822 // Perform the operation.
823 __ sar_cl(left);
824 // Tag the result and store it in register eax.
825 __ SmiTag(left);
826 __ mov(eax, left);
827 break;
828
829 case Token::SHR:
830 // Remove tags from operands (but keep sign).
831 __ SmiUntag(left);
832 __ SmiUntag(ecx);
833 // Perform the operation.
834 __ shr_cl(left);
835 // Check that the *unsigned* result fits in a smi.
836 // Neither of the two high-order bits can be set:
837 // - 0x80000000: high bit would be lost when smi tagging.
838 // - 0x40000000: this number would convert to negative when
839 // Smi tagging these two cases can only happen with shifts
840 // by 0 or 1 when handed a valid smi.
841 __ test(left, Immediate(0xc0000000));
842 __ j(not_zero, &use_fp_on_smis);
843 // Tag the result and store it in register eax.
844 __ SmiTag(left);
845 __ mov(eax, left);
846 break;
847
848 case Token::ADD:
849 ASSERT(right.is(eax));
850 __ add(right, left); // Addition is commutative.
851 __ j(overflow, &use_fp_on_smis);
852 break;
853
854 case Token::SUB:
855 __ sub(left, right);
856 __ j(overflow, &use_fp_on_smis);
857 __ mov(eax, left);
858 break;
859
860 case Token::MUL:
861 // If the smi tag is 0 we can just leave the tag on one operand.
862 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
863 // We can't revert the multiplication if the result is not a smi
864 // so save the right operand.
865 __ mov(ebx, right);
866 // Remove tag from one of the operands (but keep sign).
867 __ SmiUntag(right);
868 // Do multiplication.
869 __ imul(right, left); // Multiplication is commutative.
870 __ j(overflow, &use_fp_on_smis);
871 // Check for negative zero result. Use combined = left | right.
872 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
873 break;
874
875 case Token::DIV:
876 // We can't revert the division if the result is not a smi so
877 // save the left operand.
878 __ mov(edi, left);
879 // Check for 0 divisor.
880 __ test(right, right);
881 __ j(zero, &use_fp_on_smis);
882 // Sign extend left into edx:eax.
883 ASSERT(left.is(eax));
884 __ cdq();
885 // Divide edx:eax by right.
886 __ idiv(right);
887 // Check for the corner case of dividing the most negative smi by
888 // -1. We cannot use the overflow flag, since it is not set by idiv
889 // instruction.
890 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
891 __ cmp(eax, 0x40000000);
892 __ j(equal, &use_fp_on_smis);
893 // Check for negative zero result. Use combined = left | right.
894 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
895 // Check that the remainder is zero.
896 __ test(edx, edx);
897 __ j(not_zero, &use_fp_on_smis);
898 // Tag the result and store it in register eax.
899 __ SmiTag(eax);
900 break;
901
902 case Token::MOD:
903 // Check for 0 divisor.
904 __ test(right, right);
905 __ j(zero, &not_smis);
906
907 // Sign extend left into edx:eax.
908 ASSERT(left.is(eax));
909 __ cdq();
910 // Divide edx:eax by right.
911 __ idiv(right);
912 // Check for negative zero result. Use combined = left | right.
913 __ NegativeZeroTest(edx, combined, slow);
914 // Move remainder to register eax.
915 __ mov(eax, edx);
916 break;
917
918 default:
919 UNREACHABLE();
920 }
921
922 // 5. Emit return of result in eax. Some operations have registers pushed.
923 switch (op) {
924 case Token::ADD:
925 case Token::SUB:
926 case Token::MUL:
927 case Token::DIV:
928 __ ret(0);
929 break;
930 case Token::MOD:
931 case Token::BIT_OR:
932 case Token::BIT_AND:
933 case Token::BIT_XOR:
934 case Token::SAR:
935 case Token::SHL:
936 case Token::SHR:
937 __ ret(2 * kPointerSize);
938 break;
939 default:
940 UNREACHABLE();
941 }
942
943 // 6. For some operations emit inline code to perform floating point
944 // operations on known smis (e.g., if the result of the operation
945 // overflowed the smi range).
946 if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
947 __ bind(&use_fp_on_smis);
948 switch (op) {
949 // Undo the effects of some operations, and some register moves.
950 case Token::SHL:
951 // The arguments are saved on the stack, and only used from there.
952 break;
953 case Token::ADD:
954 // Revert right = right + left.
955 __ sub(right, left);
956 break;
957 case Token::SUB:
958 // Revert left = left - right.
959 __ add(left, right);
960 break;
961 case Token::MUL:
962 // Right was clobbered but a copy is in ebx.
963 __ mov(right, ebx);
964 break;
965 case Token::DIV:
966 // Left was clobbered but a copy is in edi. Right is in ebx for
967 // division. They should be in eax, ebx for jump to not_smi.
968 __ mov(eax, edi);
969 break;
970 default:
971 // No other operators jump to use_fp_on_smis.
972 break;
973 }
974 __ jmp(&not_smis);
975 } else {
976 ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
977 switch (op) {
978 case Token::SHL:
979 case Token::SHR: {
980 Comment perform_float(masm, "-- Perform float operation on smis");
981 __ bind(&use_fp_on_smis);
982 // Result we want is in left == edx, so we can put the allocated heap
983 // number in eax.
984 __ AllocateHeapNumber(eax, ecx, ebx, slow);
985 // Store the result in the HeapNumber and return.
986 // It's OK to overwrite the arguments on the stack because we
987 // are about to return.
988 if (op == Token::SHR) {
989 __ mov(Operand(esp, 1 * kPointerSize), left);
990 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
991 __ fild_d(Operand(esp, 1 * kPointerSize));
992 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
993 } else {
994 ASSERT_EQ(Token::SHL, op);
995 if (CpuFeatures::IsSupported(SSE2)) {
996 CpuFeatureScope use_sse2(masm, SSE2);
997 __ Cvtsi2sd(xmm0, left);
998 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
999 } else {
1000 __ mov(Operand(esp, 1 * kPointerSize), left);
1001 __ fild_s(Operand(esp, 1 * kPointerSize));
1002 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1003 }
1004 }
1005 __ ret(2 * kPointerSize);
1006 break;
1007 }
1008
1009 case Token::ADD:
1010 case Token::SUB:
1011 case Token::MUL:
1012 case Token::DIV: {
1013 Comment perform_float(masm, "-- Perform float operation on smis");
1014 __ bind(&use_fp_on_smis);
1015 // Restore arguments to edx, eax.
1016 switch (op) {
1017 case Token::ADD:
1018 // Revert right = right + left.
1019 __ sub(right, left);
1020 break;
1021 case Token::SUB:
1022 // Revert left = left - right.
1023 __ add(left, right);
1024 break;
1025 case Token::MUL:
1026 // Right was clobbered but a copy is in ebx.
1027 __ mov(right, ebx);
1028 break;
1029 case Token::DIV:
1030 // Left was clobbered but a copy is in edi. Right is in ebx for
1031 // division.
1032 __ mov(edx, edi);
1033 __ mov(eax, right);
1034 break;
1035 default: UNREACHABLE();
1036 break;
1037 }
1038 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1039 if (CpuFeatures::IsSupported(SSE2)) {
1040 CpuFeatureScope use_sse2(masm, SSE2);
1041 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1042 switch (op) {
1043 case Token::ADD: __ addsd(xmm0, xmm1); break;
1044 case Token::SUB: __ subsd(xmm0, xmm1); break;
1045 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1046 case Token::DIV: __ divsd(xmm0, xmm1); break;
1047 default: UNREACHABLE();
1048 }
1049 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1050 } else { // SSE2 not available, use FPU.
1051 FloatingPointHelper::LoadFloatSmis(masm, ebx);
1052 switch (op) {
1053 case Token::ADD: __ faddp(1); break;
1054 case Token::SUB: __ fsubp(1); break;
1055 case Token::MUL: __ fmulp(1); break;
1056 case Token::DIV: __ fdivp(1); break;
1057 default: UNREACHABLE();
1058 }
1059 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1060 }
1061 __ mov(eax, ecx);
1062 __ ret(0);
1063 break;
1064 }
1065
1066 default:
1067 break;
1068 }
1069 }
1070
1071 // 7. Non-smi operands, fall out to the non-smi code with the operands in
1072 // edx and eax.
1073 Comment done_comment(masm, "-- Enter non-smi code");
1074 __ bind(&not_smis);
1075 switch (op) {
1076 case Token::BIT_OR:
1077 case Token::SHL:
1078 case Token::SAR:
1079 case Token::SHR:
1080 // Right operand is saved in ecx and eax was destroyed by the smi
1081 // check.
1082 __ mov(eax, ecx);
1083 break;
1084
1085 case Token::DIV:
1086 case Token::MOD:
1087 // Operands are in eax, ebx at this point.
1088 __ mov(edx, eax);
1089 __ mov(eax, ebx);
1090 break;
1091
1092 default:
1093 break;
1094 }
1095 }
1096
1097
1098 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1099 Label right_arg_changed, call_runtime;
1100
1101 switch (op_) {
1102 case Token::ADD:
1103 case Token::SUB:
1104 case Token::MUL:
1105 case Token::DIV:
1106 break;
1107 case Token::MOD:
1108 case Token::BIT_OR:
1109 case Token::BIT_AND:
1110 case Token::BIT_XOR:
1111 case Token::SAR:
1112 case Token::SHL:
1113 case Token::SHR:
1114 GenerateRegisterArgsPush(masm);
1115 break;
1116 default:
1117 UNREACHABLE();
1118 }
1119
1120 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1121 // It is guaranteed that the value will fit into a Smi, because if it
1122 // didn't, we wouldn't be here, see BinaryOp_Patch.
1123 __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
1124 __ j(not_equal, &right_arg_changed);
1125 }
1126
1127 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1128 result_type_ == BinaryOpIC::SMI) {
1129 BinaryOpStub_GenerateSmiCode(
1130 masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
1131 } else {
1132 BinaryOpStub_GenerateSmiCode(
1133 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1134 }
1135
1136 // Code falls through if the result is not returned as either a smi or heap
1137 // number.
1138 __ bind(&right_arg_changed);
1139 switch (op_) {
1140 case Token::ADD:
1141 case Token::SUB:
1142 case Token::MUL:
1143 case Token::DIV:
1144 GenerateTypeTransition(masm);
1145 break;
1146 case Token::MOD:
1147 case Token::BIT_OR:
1148 case Token::BIT_AND:
1149 case Token::BIT_XOR:
1150 case Token::SAR:
1151 case Token::SHL:
1152 case Token::SHR:
1153 GenerateTypeTransitionWithSavedArgs(masm);
1154 break;
1155 default:
1156 UNREACHABLE();
1157 }
1158
1159 __ bind(&call_runtime);
1160 switch (op_) {
1161 case Token::ADD:
1162 case Token::SUB:
1163 case Token::MUL:
1164 case Token::DIV:
1165 break;
1166 case Token::MOD:
1167 case Token::BIT_OR:
1168 case Token::BIT_AND:
1169 case Token::BIT_XOR:
1170 case Token::SAR:
1171 case Token::SHL:
1172 case Token::SHR:
1173 BinaryOpStub_GenerateRegisterArgsPop(masm);
1174 break;
1175 default:
1176 UNREACHABLE();
1177 }
1178
1179 {
1180 FrameScope scope(masm, StackFrame::INTERNAL);
1181 __ push(edx);
1182 __ push(eax);
1183 GenerateCallRuntime(masm);
1184 }
1185 __ ret(0);
1186 }
1187
1188
1189 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1190 Label call_runtime;
1191 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1192 ASSERT(op_ == Token::ADD);
1193 // If both arguments are strings, call the string add stub.
1194 // Otherwise, do a transition.
1195
1196 // Registers containing left and right operands respectively.
1197 Register left = edx;
1198 Register right = eax;
1199
1200 // Test if left operand is a string.
1201 __ JumpIfSmi(left, &call_runtime, Label::kNear);
1202 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1203 __ j(above_equal, &call_runtime, Label::kNear);
1204
1205 // Test if right operand is a string.
1206 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1207 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1208 __ j(above_equal, &call_runtime, Label::kNear);
1209
1210 StringAddStub string_add_stub(
1211 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
1212 GenerateRegisterArgsPush(masm);
1213 __ TailCallStub(&string_add_stub);
1214
1215 __ bind(&call_runtime);
1216 GenerateTypeTransition(masm);
1217 }
1218
1219
1220 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1221 Label* alloc_failure,
1222 OverwriteMode mode);
1223
1224
1225 // Input:
1226 // edx: left operand (tagged)
1227 // eax: right operand (tagged)
1228 // Output:
1229 // eax: result (tagged)
1230 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1231 Label call_runtime;
1232 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1233
1234 // Floating point case.
1235 switch (op_) {
1236 case Token::ADD:
1237 case Token::SUB:
1238 case Token::MUL:
1239 case Token::DIV:
1240 case Token::MOD: {
1241 Label not_floats, not_int32, right_arg_changed;
1242 if (CpuFeatures::IsSupported(SSE2)) {
1243 CpuFeatureScope use_sse2(masm, SSE2);
1244 // It could be that only SMIs have been seen at either the left
1245 // or the right operand. For precise type feedback, patch the IC
1246 // again if this changes.
1247 // In theory, we would need the same check in the non-SSE2 case,
1248 // but since we don't support Crankshaft on such hardware we can
1249 // afford not to care about precise type feedback.
1250 if (left_type_ == BinaryOpIC::SMI) {
1251 __ JumpIfNotSmi(edx, &not_int32);
1252 }
1253 if (right_type_ == BinaryOpIC::SMI) {
1254 __ JumpIfNotSmi(eax, &not_int32);
1255 }
1256 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1257 FloatingPointHelper::CheckSSE2OperandIsInt32(
1258 masm, &not_int32, xmm0, ebx, ecx, xmm2);
1259 FloatingPointHelper::CheckSSE2OperandIsInt32(
1260 masm, &not_int32, xmm1, edi, ecx, xmm2);
1261 if (op_ == Token::MOD) {
1262 if (encoded_right_arg_.has_value) {
1263 __ cmp(edi, Immediate(fixed_right_arg_value()));
1264 __ j(not_equal, &right_arg_changed);
1265 }
1266 GenerateRegisterArgsPush(masm);
1267 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1268 } else {
1269 switch (op_) {
1270 case Token::ADD: __ addsd(xmm0, xmm1); break;
1271 case Token::SUB: __ subsd(xmm0, xmm1); break;
1272 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1273 case Token::DIV: __ divsd(xmm0, xmm1); break;
1274 default: UNREACHABLE();
1275 }
1276 // Check result type if it is currently Int32.
1277 if (result_type_ <= BinaryOpIC::INT32) {
1278 FloatingPointHelper::CheckSSE2OperandIsInt32(
1279 masm, &not_int32, xmm0, ecx, ecx, xmm2);
1280 }
1281 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1282 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1283 __ ret(0);
1284 }
1285 } else { // SSE2 not available, use FPU.
1286 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1287 FloatingPointHelper::LoadFloatOperands(
1288 masm,
1289 ecx,
1290 FloatingPointHelper::ARGS_IN_REGISTERS);
1291 if (op_ == Token::MOD) {
1292 // The operands are now on the FPU stack, but we don't need them.
1293 __ fstp(0);
1294 __ fstp(0);
1295 GenerateRegisterArgsPush(masm);
1296 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1297 } else {
1298 switch (op_) {
1299 case Token::ADD: __ faddp(1); break;
1300 case Token::SUB: __ fsubp(1); break;
1301 case Token::MUL: __ fmulp(1); break;
1302 case Token::DIV: __ fdivp(1); break;
1303 default: UNREACHABLE();
1304 }
1305 Label after_alloc_failure;
1306 BinaryOpStub_GenerateHeapResultAllocation(
1307 masm, &after_alloc_failure, mode_);
1308 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1309 __ ret(0);
1310 __ bind(&after_alloc_failure);
1311 __ fstp(0); // Pop FPU stack before calling runtime.
1312 __ jmp(&call_runtime);
1313 }
1314 }
1315
1316 __ bind(&not_floats);
1317 __ bind(&not_int32);
1318 __ bind(&right_arg_changed);
1319 GenerateTypeTransition(masm);
1320 break;
1321 }
1322
1323 case Token::BIT_OR:
1324 case Token::BIT_AND:
1325 case Token::BIT_XOR:
1326 case Token::SAR:
1327 case Token::SHL:
1328 case Token::SHR: {
1329 GenerateRegisterArgsPush(masm);
1330 Label not_floats;
1331 Label not_int32;
1332 Label non_smi_result;
1333 bool use_sse3 = platform_specific_bit_;
1334 FloatingPointHelper::LoadUnknownsAsIntegers(
1335 masm, use_sse3, left_type_, right_type_, &not_floats);
1336 switch (op_) {
1337 case Token::BIT_OR: __ or_(eax, ecx); break;
1338 case Token::BIT_AND: __ and_(eax, ecx); break;
1339 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1340 case Token::SAR: __ sar_cl(eax); break;
1341 case Token::SHL: __ shl_cl(eax); break;
1342 case Token::SHR: __ shr_cl(eax); break;
1343 default: UNREACHABLE();
1344 }
1345 if (op_ == Token::SHR) {
1346 // Check if result is non-negative and fits in a smi.
1347 __ test(eax, Immediate(0xc0000000));
1348 __ j(not_zero, &call_runtime);
1349 } else {
1350 // Check if result fits in a smi.
1351 __ cmp(eax, 0xc0000000);
1352 __ j(negative, &non_smi_result, Label::kNear);
1353 }
1354 // Tag smi result and return.
1355 __ SmiTag(eax);
1356 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1357
1358 // All ops except SHR return a signed int32 that we load in
1359 // a HeapNumber.
1360 if (op_ != Token::SHR) {
1361 __ bind(&non_smi_result);
1362 // Allocate a heap number if needed.
1363 __ mov(ebx, eax); // ebx: result
1364 Label skip_allocation;
1365 switch (mode_) {
1366 case OVERWRITE_LEFT:
1367 case OVERWRITE_RIGHT:
1368 // If the operand was an object, we skip the
1369 // allocation of a heap number.
1370 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1371 1 * kPointerSize : 2 * kPointerSize));
1372 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1373 // Fall through!
1374 case NO_OVERWRITE:
1375 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1376 __ bind(&skip_allocation);
1377 break;
1378 default: UNREACHABLE();
1379 }
1380 // Store the result in the HeapNumber and return.
1381 if (CpuFeatures::IsSupported(SSE2)) {
1382 CpuFeatureScope use_sse2(masm, SSE2);
1383 __ Cvtsi2sd(xmm0, ebx);
1384 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1385 } else {
1386 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1387 __ fild_s(Operand(esp, 1 * kPointerSize));
1388 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1389 }
1390 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1391 }
1392
1393 __ bind(&not_floats);
1394 __ bind(&not_int32);
1395 GenerateTypeTransitionWithSavedArgs(masm);
1396 break;
1397 }
1398 default: UNREACHABLE(); break;
1399 }
1400
1401 // If an allocation fails, or SHR hits a hard case, use the runtime system to
1402 // get the correct result.
1403 __ bind(&call_runtime);
1404
1405 switch (op_) {
1406 case Token::ADD:
1407 case Token::SUB:
1408 case Token::MUL:
1409 case Token::DIV:
1410 break;
1411 case Token::MOD:
1412 return; // Handled above.
1413 case Token::BIT_OR:
1414 case Token::BIT_AND:
1415 case Token::BIT_XOR:
1416 case Token::SAR:
1417 case Token::SHL:
1418 case Token::SHR:
1419 BinaryOpStub_GenerateRegisterArgsPop(masm);
1420 break;
1421 default:
1422 UNREACHABLE();
1423 }
1424
1425 {
1426 FrameScope scope(masm, StackFrame::INTERNAL);
1427 __ push(edx);
1428 __ push(eax);
1429 GenerateCallRuntime(masm);
1430 }
1431 __ ret(0);
1432 }
1433
1434
1435 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1436 if (op_ == Token::ADD) {
1437 // Handle string addition here, because it is the only operation
1438 // that does not do a ToNumber conversion on the operands.
1439 GenerateAddStrings(masm);
1440 }
1441
1442 Factory* factory = masm->isolate()->factory();
1443
1444 // Convert odd ball arguments to numbers.
1445 Label check, done;
1446 __ cmp(edx, factory->undefined_value());
1447 __ j(not_equal, &check, Label::kNear);
1448 if (Token::IsBitOp(op_)) {
1449 __ xor_(edx, edx);
1450 } else {
1451 __ mov(edx, Immediate(factory->nan_value()));
1452 }
1453 __ jmp(&done, Label::kNear);
1454 __ bind(&check);
1455 __ cmp(eax, factory->undefined_value());
1456 __ j(not_equal, &done, Label::kNear);
1457 if (Token::IsBitOp(op_)) {
1458 __ xor_(eax, eax);
1459 } else {
1460 __ mov(eax, Immediate(factory->nan_value()));
1461 }
1462 __ bind(&done);
1463
1464 GenerateNumberStub(masm);
1465 }
1466
1467
1468 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1469 Label call_runtime;
1470
1471 // Floating point case.
1472 switch (op_) {
1473 case Token::ADD:
1474 case Token::SUB:
1475 case Token::MUL:
1476 case Token::DIV: {
1477 Label not_floats;
1478 if (CpuFeatures::IsSupported(SSE2)) {
1479 CpuFeatureScope use_sse2(masm, SSE2);
1480
1481 // It could be that only SMIs have been seen at either the left
1482 // or the right operand. For precise type feedback, patch the IC
1483 // again if this changes.
1484 // In theory, we would need the same check in the non-SSE2 case,
1485 // but since we don't support Crankshaft on such hardware we can
1486 // afford not to care about precise type feedback.
1487 if (left_type_ == BinaryOpIC::SMI) {
1488 __ JumpIfNotSmi(edx, &not_floats);
1489 }
1490 if (right_type_ == BinaryOpIC::SMI) {
1491 __ JumpIfNotSmi(eax, &not_floats);
1492 }
1493 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1494 if (left_type_ == BinaryOpIC::INT32) {
1495 FloatingPointHelper::CheckSSE2OperandIsInt32(
1496 masm, &not_floats, xmm0, ecx, ecx, xmm2);
1497 }
1498 if (right_type_ == BinaryOpIC::INT32) {
1499 FloatingPointHelper::CheckSSE2OperandIsInt32(
1500 masm, &not_floats, xmm1, ecx, ecx, xmm2);
1501 }
1502
1503 switch (op_) {
1504 case Token::ADD: __ addsd(xmm0, xmm1); break;
1505 case Token::SUB: __ subsd(xmm0, xmm1); break;
1506 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1507 case Token::DIV: __ divsd(xmm0, xmm1); break;
1508 default: UNREACHABLE();
1509 }
1510 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1511 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1512 __ ret(0);
1513 } else { // SSE2 not available, use FPU.
1514 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1515 FloatingPointHelper::LoadFloatOperands(
1516 masm,
1517 ecx,
1518 FloatingPointHelper::ARGS_IN_REGISTERS);
1519 switch (op_) {
1520 case Token::ADD: __ faddp(1); break;
1521 case Token::SUB: __ fsubp(1); break;
1522 case Token::MUL: __ fmulp(1); break;
1523 case Token::DIV: __ fdivp(1); break;
1524 default: UNREACHABLE();
1525 }
1526 Label after_alloc_failure;
1527 BinaryOpStub_GenerateHeapResultAllocation(
1528 masm, &after_alloc_failure, mode_);
1529 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1530 __ ret(0);
1531 __ bind(&after_alloc_failure);
1532 __ fstp(0); // Pop FPU stack before calling runtime.
1533 __ jmp(&call_runtime);
1534 }
1535
1536 __ bind(&not_floats);
1537 GenerateTypeTransition(masm);
1538 break;
1539 }
1540
1541 case Token::MOD: {
1542 // For MOD we go directly to runtime in the non-smi case.
1543 break;
1544 }
1545 case Token::BIT_OR:
1546 case Token::BIT_AND:
1547 case Token::BIT_XOR:
1548 case Token::SAR:
1549 case Token::SHL:
1550 case Token::SHR: {
1551 GenerateRegisterArgsPush(masm);
1552 Label not_floats;
1553 Label non_smi_result;
1554 // We do not check the input arguments here, as any value is
1555 // unconditionally truncated to an int32 anyway. To get the
1556 // right optimized code, int32 type feedback is just right.
1557 bool use_sse3 = platform_specific_bit_;
1558 FloatingPointHelper::LoadUnknownsAsIntegers(
1559 masm, use_sse3, left_type_, right_type_, &not_floats);
1560 switch (op_) {
1561 case Token::BIT_OR: __ or_(eax, ecx); break;
1562 case Token::BIT_AND: __ and_(eax, ecx); break;
1563 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1564 case Token::SAR: __ sar_cl(eax); break;
1565 case Token::SHL: __ shl_cl(eax); break;
1566 case Token::SHR: __ shr_cl(eax); break;
1567 default: UNREACHABLE();
1568 }
1569 if (op_ == Token::SHR) {
1570 // Check if result is non-negative and fits in a smi.
1571 __ test(eax, Immediate(0xc0000000));
1572 __ j(not_zero, &call_runtime);
1573 } else {
1574 // Check if result fits in a smi.
1575 __ cmp(eax, 0xc0000000);
1576 __ j(negative, &non_smi_result, Label::kNear);
1577 }
1578 // Tag smi result and return.
1579 __ SmiTag(eax);
1580 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1581
1582 // All ops except SHR return a signed int32 that we load in
1583 // a HeapNumber.
1584 if (op_ != Token::SHR) {
1585 __ bind(&non_smi_result);
1586 // Allocate a heap number if needed.
1587 __ mov(ebx, eax); // ebx: result
1588 Label skip_allocation;
1589 switch (mode_) {
1590 case OVERWRITE_LEFT:
1591 case OVERWRITE_RIGHT:
1592 // If the operand was an object, we skip the
1593 // allocation of a heap number.
1594 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1595 1 * kPointerSize : 2 * kPointerSize));
1596 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1597 // Fall through!
1598 case NO_OVERWRITE:
1599 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1600 __ bind(&skip_allocation);
1601 break;
1602 default: UNREACHABLE();
1603 }
1604 // Store the result in the HeapNumber and return.
1605 if (CpuFeatures::IsSupported(SSE2)) {
1606 CpuFeatureScope use_sse2(masm, SSE2);
1607 __ Cvtsi2sd(xmm0, ebx);
1608 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1609 } else {
1610 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1611 __ fild_s(Operand(esp, 1 * kPointerSize));
1612 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1613 }
1614 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1615 }
1616
1617 __ bind(&not_floats);
1618 GenerateTypeTransitionWithSavedArgs(masm);
1619 break;
1620 }
1621 default: UNREACHABLE(); break;
1622 }
1623
1624 // If an allocation fails, or SHR or MOD hit a hard case,
1625 // use the runtime system to get the correct result.
1626 __ bind(&call_runtime);
1627
1628 switch (op_) {
1629 case Token::ADD:
1630 case Token::SUB:
1631 case Token::MUL:
1632 case Token::DIV:
1633 case Token::MOD:
1634 break;
1635 case Token::BIT_OR:
1636 case Token::BIT_AND:
1637 case Token::BIT_XOR:
1638 case Token::SAR:
1639 case Token::SHL:
1640 case Token::SHR:
1641 BinaryOpStub_GenerateRegisterArgsPop(masm);
1642 break;
1643 default:
1644 UNREACHABLE();
1645 }
1646
1647 {
1648 FrameScope scope(masm, StackFrame::INTERNAL);
1649 __ push(edx);
1650 __ push(eax);
1651 GenerateCallRuntime(masm);
1652 }
1653 __ ret(0);
1654 }
1655
1656
1657 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1658 Label call_runtime;
1659
1660 Counters* counters = masm->isolate()->counters();
1661 __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
1662
1663 switch (op_) {
1664 case Token::ADD:
1665 case Token::SUB:
1666 case Token::MUL:
1667 case Token::DIV:
1668 break;
1669 case Token::MOD:
1670 case Token::BIT_OR:
1671 case Token::BIT_AND:
1672 case Token::BIT_XOR:
1673 case Token::SAR:
1674 case Token::SHL:
1675 case Token::SHR:
1676 GenerateRegisterArgsPush(masm);
1677 break;
1678 default:
1679 UNREACHABLE();
1680 }
1681
1682 BinaryOpStub_GenerateSmiCode(
1683 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1684
1685 // Floating point case.
1686 switch (op_) {
1687 case Token::ADD:
1688 case Token::SUB:
1689 case Token::MUL:
1690 case Token::DIV: {
1691 Label not_floats;
1692 if (CpuFeatures::IsSupported(SSE2)) {
1693 CpuFeatureScope use_sse2(masm, SSE2);
1694 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1695
1696 switch (op_) {
1697 case Token::ADD: __ addsd(xmm0, xmm1); break;
1698 case Token::SUB: __ subsd(xmm0, xmm1); break;
1699 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1700 case Token::DIV: __ divsd(xmm0, xmm1); break;
1701 default: UNREACHABLE();
1702 }
1703 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1704 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1705 __ ret(0);
1706 } else { // SSE2 not available, use FPU.
1707 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1708 FloatingPointHelper::LoadFloatOperands(
1709 masm,
1710 ecx,
1711 FloatingPointHelper::ARGS_IN_REGISTERS);
1712 switch (op_) {
1713 case Token::ADD: __ faddp(1); break;
1714 case Token::SUB: __ fsubp(1); break;
1715 case Token::MUL: __ fmulp(1); break;
1716 case Token::DIV: __ fdivp(1); break;
1717 default: UNREACHABLE();
1718 }
1719 Label after_alloc_failure;
1720 BinaryOpStub_GenerateHeapResultAllocation(
1721 masm, &after_alloc_failure, mode_);
1722 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1723 __ ret(0);
1724 __ bind(&after_alloc_failure);
1725 __ fstp(0); // Pop FPU stack before calling runtime.
1726 __ jmp(&call_runtime);
1727 }
1728 __ bind(&not_floats);
1729 break;
1730 }
1731 case Token::MOD: {
1732 // For MOD we go directly to runtime in the non-smi case.
1733 break;
1734 }
1735 case Token::BIT_OR:
1736 case Token::BIT_AND:
1737 case Token::BIT_XOR:
1738 case Token::SAR:
1739 case Token::SHL:
1740 case Token::SHR: {
1741 Label non_smi_result;
1742 bool use_sse3 = platform_specific_bit_;
1743 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1744 use_sse3,
1745 BinaryOpIC::GENERIC,
1746 BinaryOpIC::GENERIC,
1747 &call_runtime);
1748 switch (op_) {
1749 case Token::BIT_OR: __ or_(eax, ecx); break;
1750 case Token::BIT_AND: __ and_(eax, ecx); break;
1751 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1752 case Token::SAR: __ sar_cl(eax); break;
1753 case Token::SHL: __ shl_cl(eax); break;
1754 case Token::SHR: __ shr_cl(eax); break;
1755 default: UNREACHABLE();
1756 }
1757 if (op_ == Token::SHR) {
1758 // Check if result is non-negative and fits in a smi.
1759 __ test(eax, Immediate(0xc0000000));
1760 __ j(not_zero, &call_runtime);
1761 } else {
1762 // Check if result fits in a smi.
1763 __ cmp(eax, 0xc0000000);
1764 __ j(negative, &non_smi_result, Label::kNear);
1765 }
1766 // Tag smi result and return.
1767 __ SmiTag(eax);
1768 __ ret(2 * kPointerSize); // Drop the arguments from the stack.
1769
1770 // All ops except SHR return a signed int32 that we load in
1771 // a HeapNumber.
1772 if (op_ != Token::SHR) {
1773 __ bind(&non_smi_result);
1774 // Allocate a heap number if needed.
1775 __ mov(ebx, eax); // ebx: result
1776 Label skip_allocation;
1777 switch (mode_) {
1778 case OVERWRITE_LEFT:
1779 case OVERWRITE_RIGHT:
1780 // If the operand was an object, we skip the
1781 // allocation of a heap number.
1782 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1783 1 * kPointerSize : 2 * kPointerSize));
1784 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1785 // Fall through!
1786 case NO_OVERWRITE:
1787 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1788 __ bind(&skip_allocation);
1789 break;
1790 default: UNREACHABLE();
1791 }
1792 // Store the result in the HeapNumber and return.
1793 if (CpuFeatures::IsSupported(SSE2)) {
1794 CpuFeatureScope use_sse2(masm, SSE2);
1795 __ Cvtsi2sd(xmm0, ebx);
1796 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1797 } else {
1798 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1799 __ fild_s(Operand(esp, 1 * kPointerSize));
1800 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1801 }
1802 __ ret(2 * kPointerSize);
1803 }
1804 break;
1805 }
1806 default: UNREACHABLE(); break;
1807 }
1808
1809 // If all else fails, use the runtime system to get the correct
1810 // result.
1811 __ bind(&call_runtime);
1812 switch (op_) {
1813 case Token::ADD:
1814 GenerateAddStrings(masm);
1815 // Fall through.
1816 case Token::SUB:
1817 case Token::MUL:
1818 case Token::DIV:
1819 break;
1820 case Token::MOD:
1821 case Token::BIT_OR:
1822 case Token::BIT_AND:
1823 case Token::BIT_XOR:
1824 case Token::SAR:
1825 case Token::SHL:
1826 case Token::SHR:
1827 BinaryOpStub_GenerateRegisterArgsPop(masm);
1828 break;
1829 default:
1830 UNREACHABLE();
1831 }
1832
1833 {
1834 FrameScope scope(masm, StackFrame::INTERNAL);
1835 __ push(edx);
1836 __ push(eax);
1837 GenerateCallRuntime(masm);
1838 }
1839 __ ret(0);
1840 }
1841
1842
1843 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
1844 ASSERT(op_ == Token::ADD);
1845 Label left_not_string, call_runtime;
1846
1847 // Registers containing left and right operands respectively.
1848 Register left = edx;
1849 Register right = eax;
1850
1851 // Test if left operand is a string.
1852 __ JumpIfSmi(left, &left_not_string, Label::kNear);
1853 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1854 __ j(above_equal, &left_not_string, Label::kNear);
1855
1856 StringAddStub string_add_left_stub(
1857 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
1858 GenerateRegisterArgsPush(masm);
1859 __ TailCallStub(&string_add_left_stub);
1860
1861 // Left operand is not a string, test right.
1862 __ bind(&left_not_string);
1863 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1864 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1865 __ j(above_equal, &call_runtime, Label::kNear);
1866
1867 StringAddStub string_add_right_stub(
1868 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
1869 GenerateRegisterArgsPush(masm);
1870 __ TailCallStub(&string_add_right_stub);
1871
1872 // Neither argument is a string.
1873 __ bind(&call_runtime);
1874 }
1875
1876
1877 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1878 Label* alloc_failure,
1879 OverwriteMode mode) {
1880 Label skip_allocation;
1881 switch (mode) {
1882 case OVERWRITE_LEFT: {
1883 // If the argument in edx is already an object, we skip the
1884 // allocation of a heap number.
1885 __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
1886 // Allocate a heap number for the result. Keep eax and edx intact
1887 // for the possible runtime call.
1888 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1889 // Now edx can be overwritten losing one of the arguments as we are
1890 // now done and will not need it any more.
1891 __ mov(edx, ebx);
1892 __ bind(&skip_allocation);
1893 // Use object in edx as a result holder
1894 __ mov(eax, edx);
1895 break;
1896 }
1897 case OVERWRITE_RIGHT:
1898 // If the argument in eax is already an object, we skip the
1899 // allocation of a heap number.
1900 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1901 // Fall through!
1902 case NO_OVERWRITE:
1903 // Allocate a heap number for the result. Keep eax and edx intact
1904 // for the possible runtime call.
1905 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1906 // Now eax can be overwritten losing one of the arguments as we are
1907 // now done and will not need it any more.
1908 __ mov(eax, ebx);
1909 __ bind(&skip_allocation);
1910 break;
1911 default: UNREACHABLE();
1912 }
1913 }
1914
1915
1916 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1917 __ pop(ecx);
1918 __ push(edx);
1919 __ push(eax);
1920 __ push(ecx);
1921 }
1922
1923
650 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 1924 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
651 // TAGGED case: 1925 // TAGGED case:
652 // Input: 1926 // Input:
653 // esp[4]: tagged number input argument (should be number). 1927 // esp[4]: tagged number input argument (should be number).
654 // esp[0]: return address. 1928 // esp[0]: return address.
655 // Output: 1929 // Output:
656 // eax: tagged double result. 1930 // eax: tagged double result.
657 // UNTAGGED case: 1931 // UNTAGGED case:
658 // Input:: 1932 // Input::
659 // esp[0]: return address. 1933 // esp[0]: return address.
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 __ bind(&done); 2224 __ bind(&done);
951 } else { 2225 } else {
952 ASSERT(type == TranscendentalCache::LOG); 2226 ASSERT(type == TranscendentalCache::LOG);
953 __ fldln2(); 2227 __ fldln2();
954 __ fxch(); 2228 __ fxch();
955 __ fyl2x(); 2229 __ fyl2x();
956 } 2230 }
957 } 2231 }
958 2232
959 2233
2234 // Input: edx, eax are the left and right objects of a bit op.
2235 // Output: eax, ecx are left and right integers for a bit op.
2236 // Warning: can clobber inputs even when it jumps to |conversion_failure|!
2237 void FloatingPointHelper::LoadUnknownsAsIntegers(
2238 MacroAssembler* masm,
2239 bool use_sse3,
2240 BinaryOpIC::TypeInfo left_type,
2241 BinaryOpIC::TypeInfo right_type,
2242 Label* conversion_failure) {
2243 // Check float operands.
2244 Label arg1_is_object, check_undefined_arg1;
2245 Label arg2_is_object, check_undefined_arg2;
2246 Label load_arg2, done;
2247
2248 // Test if arg1 is a Smi.
2249 if (left_type == BinaryOpIC::SMI) {
2250 __ JumpIfNotSmi(edx, conversion_failure);
2251 } else {
2252 __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2253 }
2254
2255 __ SmiUntag(edx);
2256 __ jmp(&load_arg2);
2257
2258 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2259 __ bind(&check_undefined_arg1);
2260 Factory* factory = masm->isolate()->factory();
2261 __ cmp(edx, factory->undefined_value());
2262 __ j(not_equal, conversion_failure);
2263 __ mov(edx, Immediate(0));
2264 __ jmp(&load_arg2);
2265
2266 __ bind(&arg1_is_object);
2267 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2268 __ cmp(ebx, factory->heap_number_map());
2269 __ j(not_equal, &check_undefined_arg1);
2270
2271 __ TruncateHeapNumberToI(edx, edx);
2272
2273 // Here edx has the untagged integer, eax has a Smi or a heap number.
2274 __ bind(&load_arg2);
2275
2276 // Test if arg2 is a Smi.
2277 if (right_type == BinaryOpIC::SMI) {
2278 __ JumpIfNotSmi(eax, conversion_failure);
2279 } else {
2280 __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2281 }
2282
2283 __ SmiUntag(eax);
2284 __ mov(ecx, eax);
2285 __ jmp(&done);
2286
2287 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2288 __ bind(&check_undefined_arg2);
2289 __ cmp(eax, factory->undefined_value());
2290 __ j(not_equal, conversion_failure);
2291 __ mov(ecx, Immediate(0));
2292 __ jmp(&done);
2293
2294 __ bind(&arg2_is_object);
2295 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2296 __ cmp(ebx, factory->heap_number_map());
2297 __ j(not_equal, &check_undefined_arg2);
2298 // Get the untagged integer version of the eax heap number in ecx.
2299
2300 __ TruncateHeapNumberToI(ecx, eax);
2301
2302 __ bind(&done);
2303 __ mov(eax, edx);
2304 }
2305
2306
960 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 2307 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
961 Register number) { 2308 Register number) {
962 Label load_smi, done; 2309 Label load_smi, done;
963 2310
964 __ JumpIfSmi(number, &load_smi, Label::kNear); 2311 __ JumpIfSmi(number, &load_smi, Label::kNear);
965 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 2312 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
966 __ jmp(&done, Label::kNear); 2313 __ jmp(&done, Label::kNear);
967 2314
968 __ bind(&load_smi); 2315 __ bind(&load_smi);
969 __ SmiUntag(number); 2316 __ SmiUntag(number);
(...skipping 29 matching lines...) Expand all
999 __ SmiUntag(eax); // Untag smi before converting to float. 2346 __ SmiUntag(eax); // Untag smi before converting to float.
1000 __ Cvtsi2sd(xmm1, eax); 2347 __ Cvtsi2sd(xmm1, eax);
1001 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2348 __ SmiTag(eax); // Retag smi for heap number overwriting test.
1002 __ jmp(&done, Label::kNear); 2349 __ jmp(&done, Label::kNear);
1003 __ bind(&load_float_eax); 2350 __ bind(&load_float_eax);
1004 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2351 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
1005 __ bind(&done); 2352 __ bind(&done);
1006 } 2353 }
1007 2354
1008 2355
2356 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2357 Register scratch) {
2358 const Register left = edx;
2359 const Register right = eax;
2360 __ mov(scratch, left);
2361 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2362 __ SmiUntag(scratch);
2363 __ Cvtsi2sd(xmm0, scratch);
2364
2365 __ mov(scratch, right);
2366 __ SmiUntag(scratch);
2367 __ Cvtsi2sd(xmm1, scratch);
2368 }
2369
2370
2371 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
2372 Label* non_int32,
2373 XMMRegister operand,
2374 Register int32_result,
2375 Register scratch,
2376 XMMRegister xmm_scratch) {
2377 __ cvttsd2si(int32_result, Operand(operand));
2378 __ Cvtsi2sd(xmm_scratch, int32_result);
2379 __ pcmpeqd(xmm_scratch, operand);
2380 __ movmskps(scratch, xmm_scratch);
2381 // Two least significant bits should be both set.
2382 __ not_(scratch);
2383 __ test(scratch, Immediate(3));
2384 __ j(not_zero, non_int32);
2385 }
2386
2387
2388 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2389 Register scratch,
2390 ArgLocation arg_location) {
2391 Label load_smi_1, load_smi_2, done_load_1, done;
2392 if (arg_location == ARGS_IN_REGISTERS) {
2393 __ mov(scratch, edx);
2394 } else {
2395 __ mov(scratch, Operand(esp, 2 * kPointerSize));
2396 }
2397 __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2398 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2399 __ bind(&done_load_1);
2400
2401 if (arg_location == ARGS_IN_REGISTERS) {
2402 __ mov(scratch, eax);
2403 } else {
2404 __ mov(scratch, Operand(esp, 1 * kPointerSize));
2405 }
2406 __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2407 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2408 __ jmp(&done, Label::kNear);
2409
2410 __ bind(&load_smi_1);
2411 __ SmiUntag(scratch);
2412 __ push(scratch);
2413 __ fild_s(Operand(esp, 0));
2414 __ pop(scratch);
2415 __ jmp(&done_load_1);
2416
2417 __ bind(&load_smi_2);
2418 __ SmiUntag(scratch);
2419 __ push(scratch);
2420 __ fild_s(Operand(esp, 0));
2421 __ pop(scratch);
2422
2423 __ bind(&done);
2424 }
2425
2426
2427 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2428 Register scratch) {
2429 const Register left = edx;
2430 const Register right = eax;
2431 __ mov(scratch, left);
2432 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2433 __ SmiUntag(scratch);
2434 __ push(scratch);
2435 __ fild_s(Operand(esp, 0));
2436
2437 __ mov(scratch, right);
2438 __ SmiUntag(scratch);
2439 __ mov(Operand(esp, 0), scratch);
2440 __ fild_s(Operand(esp, 0));
2441 __ pop(scratch);
2442 }
2443
2444
1009 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 2445 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
1010 Label* non_float, 2446 Label* non_float,
1011 Register scratch) { 2447 Register scratch) {
1012 Label test_other, done; 2448 Label test_other, done;
1013 // Test if both operands are floats or smi -> scratch=k_is_float; 2449 // Test if both operands are floats or smi -> scratch=k_is_float;
1014 // Otherwise scratch = k_not_float. 2450 // Otherwise scratch = k_not_float.
1015 __ JumpIfSmi(edx, &test_other, Label::kNear); 2451 __ JumpIfSmi(edx, &test_other, Label::kNear);
1016 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); 2452 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
1017 Factory* factory = masm->isolate()->factory(); 2453 Factory* factory = masm->isolate()->factory();
1018 __ cmp(scratch, factory->heap_number_map()); 2454 __ cmp(scratch, factory->heap_number_map());
(...skipping 1892 matching lines...) Expand 10 before | Expand all | Expand 10 after
2911 4347
2912 4348
2913 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 4349 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2914 CEntryStub::GenerateAheadOfTime(isolate); 4350 CEntryStub::GenerateAheadOfTime(isolate);
2915 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 4351 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2916 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 4352 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2917 // It is important that the store buffer overflow stubs are generated first. 4353 // It is important that the store buffer overflow stubs are generated first.
2918 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 4354 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
2919 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 4355 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2920 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 4356 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2921 PlatformFeatureScope sse2(SSE2);
2922 BinaryOpStub::GenerateAheadOfTime(isolate);
2923 } 4357 }
2924 4358
2925 4359
2926 void CodeStub::GenerateFPStubs(Isolate* isolate) { 4360 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2927 if (CpuFeatures::IsSupported(SSE2)) { 4361 if (CpuFeatures::IsSupported(SSE2)) {
2928 CEntryStub save_doubles(1, kSaveFPRegs); 4362 CEntryStub save_doubles(1, kSaveFPRegs);
2929 // Stubs might already be in the snapshot, detect that and don't regenerate, 4363 // Stubs might already be in the snapshot, detect that and don't regenerate,
2930 // which would lead to code stub initialization state being messed up. 4364 // which would lead to code stub initialization state being messed up.
2931 Code* save_doubles_code; 4365 Code* save_doubles_code;
2932 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 4366 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
(...skipping 3085 matching lines...) Expand 10 before | Expand all | Expand 10 after
6018 __ bind(&fast_elements_case); 7452 __ bind(&fast_elements_case);
6019 GenerateCase(masm, FAST_ELEMENTS); 7453 GenerateCase(masm, FAST_ELEMENTS);
6020 } 7454 }
6021 7455
6022 7456
6023 #undef __ 7457 #undef __
6024 7458
6025 } } // namespace v8::internal 7459 } } // namespace v8::internal
6026 7460
6027 #endif // V8_TARGET_ARCH_IA32 7461 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/hydrogen-instructions.h ('k') | src/ia32/deoptimizer-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698