Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(155)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 23618002: Hydrogenisation of binops (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebase Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/assembler-ia32.cc ('k') | src/ia32/deoptimizer-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
276 Isolate* isolate, 276 Isolate* isolate,
277 CodeStubInterfaceDescriptor* descriptor) { 277 CodeStubInterfaceDescriptor* descriptor) {
278 static Register registers[] = { eax, ebx, ecx, edx }; 278 static Register registers[] = { eax, ebx, ecx, edx };
279 descriptor->register_param_count_ = 4; 279 descriptor->register_param_count_ = 4;
280 descriptor->register_params_ = registers; 280 descriptor->register_params_ = registers;
281 descriptor->deoptimization_handler_ = 281 descriptor->deoptimization_handler_ =
282 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); 282 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
283 } 283 }
284 284
285 285
286 void BinaryOpStub::InitializeInterfaceDescriptor(
287 Isolate* isolate,
288 CodeStubInterfaceDescriptor* descriptor) {
289 static Register registers[] = { edx, eax };
290 descriptor->register_param_count_ = 2;
291 descriptor->register_params_ = registers;
292 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
293 descriptor->SetMissHandler(
294 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
295 }
296
297
286 #define __ ACCESS_MASM(masm) 298 #define __ ACCESS_MASM(masm)
287 299
288 300
289 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { 301 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
290 // Update the static counter each time a new code stub is generated. 302 // Update the static counter each time a new code stub is generated.
291 Isolate* isolate = masm->isolate(); 303 Isolate* isolate = masm->isolate();
292 isolate->counters()->code_stubs()->Increment(); 304 isolate->counters()->code_stubs()->Increment();
293 305
294 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); 306 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
295 int param_count = descriptor->register_param_count_; 307 int param_count = descriptor->register_param_count_;
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 ARGS_ON_STACK, 475 ARGS_ON_STACK,
464 ARGS_IN_REGISTERS 476 ARGS_IN_REGISTERS
465 }; 477 };
466 478
467 // Code pattern for loading a floating point value. Input value must 479 // Code pattern for loading a floating point value. Input value must
468 // be either a smi or a heap number object (fp value). Requirements: 480 // be either a smi or a heap number object (fp value). Requirements:
469 // operand in register number. Returns operand as floating point number 481 // operand in register number. Returns operand as floating point number
470 // on FPU stack. 482 // on FPU stack.
471 static void LoadFloatOperand(MacroAssembler* masm, Register number); 483 static void LoadFloatOperand(MacroAssembler* masm, Register number);
472 484
473 // Code pattern for loading floating point values. Input values must
474 // be either smi or heap number objects (fp values). Requirements:
475 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
476 // Returns operands as floating point numbers on FPU stack.
477 static void LoadFloatOperands(MacroAssembler* masm,
478 Register scratch,
479 ArgLocation arg_location = ARGS_ON_STACK);
480
481 // Similar to LoadFloatOperand but assumes that both operands are smis.
482 // Expects operands in edx, eax.
483 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
484
485 // Test if operands are smi or number objects (fp). Requirements: 485 // Test if operands are smi or number objects (fp). Requirements:
486 // operand_1 in eax, operand_2 in edx; falls through on float 486 // operand_1 in eax, operand_2 in edx; falls through on float
487 // operands, jumps to the non_float label otherwise. 487 // operands, jumps to the non_float label otherwise.
488 static void CheckFloatOperands(MacroAssembler* masm, 488 static void CheckFloatOperands(MacroAssembler* masm,
489 Label* non_float, 489 Label* non_float,
490 Register scratch); 490 Register scratch);
491 491
492 // Takes the operands in edx and eax and loads them as integers in eax
493 // and ecx.
494 static void LoadUnknownsAsIntegers(MacroAssembler* masm,
495 bool use_sse3,
496 BinaryOpIC::TypeInfo left_type,
497 BinaryOpIC::TypeInfo right_type,
498 Label* operand_conversion_failure);
499
500 // Test if operands are numbers (smi or HeapNumber objects), and load 492 // Test if operands are numbers (smi or HeapNumber objects), and load
501 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 493 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
502 // either operand is not a number. Operands are in edx and eax. 494 // either operand is not a number. Operands are in edx and eax.
503 // Leaves operands unchanged. 495 // Leaves operands unchanged.
504 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); 496 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
505
506 // Similar to LoadSSE2Operands but assumes that both operands are smis.
507 // Expects operands in edx, eax.
508 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
509
510 // Checks that |operand| has an int32 value. If |int32_result| is different
511 // from |scratch|, it will contain that int32 value.
512 static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
513 Label* non_int32,
514 XMMRegister operand,
515 Register int32_result,
516 Register scratch,
517 XMMRegister xmm_scratch);
518 }; 497 };
519 498
520 499
521 void DoubleToIStub::Generate(MacroAssembler* masm) { 500 void DoubleToIStub::Generate(MacroAssembler* masm) {
522 Register input_reg = this->source(); 501 Register input_reg = this->source();
523 Register final_result_reg = this->destination(); 502 Register final_result_reg = this->destination();
524 ASSERT(is_truncating()); 503 ASSERT(is_truncating());
525 504
526 Label check_negative, process_64_bits, done, done_no_stash; 505 Label check_negative, process_64_bits, done, done_no_stash;
527 506
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 if (!final_result_reg.is(result_reg)) { 630 if (!final_result_reg.is(result_reg)) {
652 ASSERT(final_result_reg.is(ecx)); 631 ASSERT(final_result_reg.is(ecx));
653 __ mov(final_result_reg, result_reg); 632 __ mov(final_result_reg, result_reg);
654 } 633 }
655 __ pop(save_reg); 634 __ pop(save_reg);
656 __ pop(scratch1); 635 __ pop(scratch1);
657 __ ret(0); 636 __ ret(0);
658 } 637 }
659 638
660 639
661 void BinaryOpStub::Initialize() {
662 platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
663 }
664
665
666 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
667 __ pop(ecx); // Save return address.
668 __ push(edx);
669 __ push(eax);
670 // Left and right arguments are now on top.
671 __ push(Immediate(Smi::FromInt(MinorKey())));
672
673 __ push(ecx); // Push return address.
674
675 // Patch the caller to an appropriate specialized stub and return the
676 // operation result to the caller of the stub.
677 __ TailCallExternalReference(
678 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
679 masm->isolate()),
680 3,
681 1);
682 }
683
684
685 // Prepare for a type transition runtime call when the args are already on
686 // the stack, under the return address.
687 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
688 __ pop(ecx); // Save return address.
689 // Left and right arguments are already on top of the stack.
690 __ push(Immediate(Smi::FromInt(MinorKey())));
691
692 __ push(ecx); // Push return address.
693
694 // Patch the caller to an appropriate specialized stub and return the
695 // operation result to the caller of the stub.
696 __ TailCallExternalReference(
697 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
698 masm->isolate()),
699 3,
700 1);
701 }
702
703
704 static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
705 __ pop(ecx);
706 __ pop(eax);
707 __ pop(edx);
708 __ push(ecx);
709 }
710
711
712 static void BinaryOpStub_GenerateSmiCode(
713 MacroAssembler* masm,
714 Label* slow,
715 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
716 Token::Value op) {
717 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
718 // dividend in eax and edx free for the division. Use eax, ebx for those.
719 Comment load_comment(masm, "-- Load arguments");
720 Register left = edx;
721 Register right = eax;
722 if (op == Token::DIV || op == Token::MOD) {
723 left = eax;
724 right = ebx;
725 __ mov(ebx, eax);
726 __ mov(eax, edx);
727 }
728
729
730 // 2. Prepare the smi check of both operands by oring them together.
731 Comment smi_check_comment(masm, "-- Smi check arguments");
732 Label not_smis;
733 Register combined = ecx;
734 ASSERT(!left.is(combined) && !right.is(combined));
735 switch (op) {
736 case Token::BIT_OR:
737 // Perform the operation into eax and smi check the result. Preserve
738 // eax in case the result is not a smi.
739 ASSERT(!left.is(ecx) && !right.is(ecx));
740 __ mov(ecx, right);
741 __ or_(right, left); // Bitwise or is commutative.
742 combined = right;
743 break;
744
745 case Token::BIT_XOR:
746 case Token::BIT_AND:
747 case Token::ADD:
748 case Token::SUB:
749 case Token::MUL:
750 case Token::DIV:
751 case Token::MOD:
752 __ mov(combined, right);
753 __ or_(combined, left);
754 break;
755
756 case Token::SHL:
757 case Token::SAR:
758 case Token::SHR:
759 // Move the right operand into ecx for the shift operation, use eax
760 // for the smi check register.
761 ASSERT(!left.is(ecx) && !right.is(ecx));
762 __ mov(ecx, right);
763 __ or_(right, left);
764 combined = right;
765 break;
766
767 default:
768 break;
769 }
770
771 // 3. Perform the smi check of the operands.
772 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
773 __ JumpIfNotSmi(combined, &not_smis);
774
775 // 4. Operands are both smis, perform the operation leaving the result in
776 // eax and check the result if necessary.
777 Comment perform_smi(masm, "-- Perform smi operation");
778 Label use_fp_on_smis;
779 switch (op) {
780 case Token::BIT_OR:
781 // Nothing to do.
782 break;
783
784 case Token::BIT_XOR:
785 ASSERT(right.is(eax));
786 __ xor_(right, left); // Bitwise xor is commutative.
787 break;
788
789 case Token::BIT_AND:
790 ASSERT(right.is(eax));
791 __ and_(right, left); // Bitwise and is commutative.
792 break;
793
794 case Token::SHL:
795 // Remove tags from operands (but keep sign).
796 __ SmiUntag(left);
797 __ SmiUntag(ecx);
798 // Perform the operation.
799 __ shl_cl(left);
800 // Check that the *signed* result fits in a smi.
801 __ cmp(left, 0xc0000000);
802 __ j(sign, &use_fp_on_smis);
803 // Tag the result and store it in register eax.
804 __ SmiTag(left);
805 __ mov(eax, left);
806 break;
807
808 case Token::SAR:
809 // Remove tags from operands (but keep sign).
810 __ SmiUntag(left);
811 __ SmiUntag(ecx);
812 // Perform the operation.
813 __ sar_cl(left);
814 // Tag the result and store it in register eax.
815 __ SmiTag(left);
816 __ mov(eax, left);
817 break;
818
819 case Token::SHR:
820 // Remove tags from operands (but keep sign).
821 __ SmiUntag(left);
822 __ SmiUntag(ecx);
823 // Perform the operation.
824 __ shr_cl(left);
825 // Check that the *unsigned* result fits in a smi.
826 // Neither of the two high-order bits can be set:
827 // - 0x80000000: high bit would be lost when smi tagging.
828 // - 0x40000000: this number would convert to negative when
829 // Smi tagging these two cases can only happen with shifts
830 // by 0 or 1 when handed a valid smi.
831 __ test(left, Immediate(0xc0000000));
832 __ j(not_zero, &use_fp_on_smis);
833 // Tag the result and store it in register eax.
834 __ SmiTag(left);
835 __ mov(eax, left);
836 break;
837
838 case Token::ADD:
839 ASSERT(right.is(eax));
840 __ add(right, left); // Addition is commutative.
841 __ j(overflow, &use_fp_on_smis);
842 break;
843
844 case Token::SUB:
845 __ sub(left, right);
846 __ j(overflow, &use_fp_on_smis);
847 __ mov(eax, left);
848 break;
849
850 case Token::MUL:
851 // If the smi tag is 0 we can just leave the tag on one operand.
852 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
853 // We can't revert the multiplication if the result is not a smi
854 // so save the right operand.
855 __ mov(ebx, right);
856 // Remove tag from one of the operands (but keep sign).
857 __ SmiUntag(right);
858 // Do multiplication.
859 __ imul(right, left); // Multiplication is commutative.
860 __ j(overflow, &use_fp_on_smis);
861 // Check for negative zero result. Use combined = left | right.
862 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
863 break;
864
865 case Token::DIV:
866 // We can't revert the division if the result is not a smi so
867 // save the left operand.
868 __ mov(edi, left);
869 // Check for 0 divisor.
870 __ test(right, right);
871 __ j(zero, &use_fp_on_smis);
872 // Sign extend left into edx:eax.
873 ASSERT(left.is(eax));
874 __ cdq();
875 // Divide edx:eax by right.
876 __ idiv(right);
877 // Check for the corner case of dividing the most negative smi by
878 // -1. We cannot use the overflow flag, since it is not set by idiv
879 // instruction.
880 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
881 __ cmp(eax, 0x40000000);
882 __ j(equal, &use_fp_on_smis);
883 // Check for negative zero result. Use combined = left | right.
884 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
885 // Check that the remainder is zero.
886 __ test(edx, edx);
887 __ j(not_zero, &use_fp_on_smis);
888 // Tag the result and store it in register eax.
889 __ SmiTag(eax);
890 break;
891
892 case Token::MOD:
893 // Check for 0 divisor.
894 __ test(right, right);
895 __ j(zero, &not_smis);
896
897 // Sign extend left into edx:eax.
898 ASSERT(left.is(eax));
899 __ cdq();
900 // Divide edx:eax by right.
901 __ idiv(right);
902 // Check for negative zero result. Use combined = left | right.
903 __ NegativeZeroTest(edx, combined, slow);
904 // Move remainder to register eax.
905 __ mov(eax, edx);
906 break;
907
908 default:
909 UNREACHABLE();
910 }
911
912 // 5. Emit return of result in eax. Some operations have registers pushed.
913 switch (op) {
914 case Token::ADD:
915 case Token::SUB:
916 case Token::MUL:
917 case Token::DIV:
918 __ ret(0);
919 break;
920 case Token::MOD:
921 case Token::BIT_OR:
922 case Token::BIT_AND:
923 case Token::BIT_XOR:
924 case Token::SAR:
925 case Token::SHL:
926 case Token::SHR:
927 __ ret(2 * kPointerSize);
928 break;
929 default:
930 UNREACHABLE();
931 }
932
933 // 6. For some operations emit inline code to perform floating point
934 // operations on known smis (e.g., if the result of the operation
935 // overflowed the smi range).
936 if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
937 __ bind(&use_fp_on_smis);
938 switch (op) {
939 // Undo the effects of some operations, and some register moves.
940 case Token::SHL:
941 // The arguments are saved on the stack, and only used from there.
942 break;
943 case Token::ADD:
944 // Revert right = right + left.
945 __ sub(right, left);
946 break;
947 case Token::SUB:
948 // Revert left = left - right.
949 __ add(left, right);
950 break;
951 case Token::MUL:
952 // Right was clobbered but a copy is in ebx.
953 __ mov(right, ebx);
954 break;
955 case Token::DIV:
956 // Left was clobbered but a copy is in edi. Right is in ebx for
957 // division. They should be in eax, ebx for jump to not_smi.
958 __ mov(eax, edi);
959 break;
960 default:
961 // No other operators jump to use_fp_on_smis.
962 break;
963 }
964 __ jmp(&not_smis);
965 } else {
966 ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
967 switch (op) {
968 case Token::SHL:
969 case Token::SHR: {
970 Comment perform_float(masm, "-- Perform float operation on smis");
971 __ bind(&use_fp_on_smis);
972 // Result we want is in left == edx, so we can put the allocated heap
973 // number in eax.
974 __ AllocateHeapNumber(eax, ecx, ebx, slow);
975 // Store the result in the HeapNumber and return.
976 // It's OK to overwrite the arguments on the stack because we
977 // are about to return.
978 if (op == Token::SHR) {
979 __ mov(Operand(esp, 1 * kPointerSize), left);
980 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
981 __ fild_d(Operand(esp, 1 * kPointerSize));
982 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
983 } else {
984 ASSERT_EQ(Token::SHL, op);
985 if (CpuFeatures::IsSupported(SSE2)) {
986 CpuFeatureScope use_sse2(masm, SSE2);
987 __ Cvtsi2sd(xmm0, left);
988 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
989 } else {
990 __ mov(Operand(esp, 1 * kPointerSize), left);
991 __ fild_s(Operand(esp, 1 * kPointerSize));
992 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
993 }
994 }
995 __ ret(2 * kPointerSize);
996 break;
997 }
998
999 case Token::ADD:
1000 case Token::SUB:
1001 case Token::MUL:
1002 case Token::DIV: {
1003 Comment perform_float(masm, "-- Perform float operation on smis");
1004 __ bind(&use_fp_on_smis);
1005 // Restore arguments to edx, eax.
1006 switch (op) {
1007 case Token::ADD:
1008 // Revert right = right + left.
1009 __ sub(right, left);
1010 break;
1011 case Token::SUB:
1012 // Revert left = left - right.
1013 __ add(left, right);
1014 break;
1015 case Token::MUL:
1016 // Right was clobbered but a copy is in ebx.
1017 __ mov(right, ebx);
1018 break;
1019 case Token::DIV:
1020 // Left was clobbered but a copy is in edi. Right is in ebx for
1021 // division.
1022 __ mov(edx, edi);
1023 __ mov(eax, right);
1024 break;
1025 default: UNREACHABLE();
1026 break;
1027 }
1028 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1029 if (CpuFeatures::IsSupported(SSE2)) {
1030 CpuFeatureScope use_sse2(masm, SSE2);
1031 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1032 switch (op) {
1033 case Token::ADD: __ addsd(xmm0, xmm1); break;
1034 case Token::SUB: __ subsd(xmm0, xmm1); break;
1035 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1036 case Token::DIV: __ divsd(xmm0, xmm1); break;
1037 default: UNREACHABLE();
1038 }
1039 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1040 } else { // SSE2 not available, use FPU.
1041 FloatingPointHelper::LoadFloatSmis(masm, ebx);
1042 switch (op) {
1043 case Token::ADD: __ faddp(1); break;
1044 case Token::SUB: __ fsubp(1); break;
1045 case Token::MUL: __ fmulp(1); break;
1046 case Token::DIV: __ fdivp(1); break;
1047 default: UNREACHABLE();
1048 }
1049 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1050 }
1051 __ mov(eax, ecx);
1052 __ ret(0);
1053 break;
1054 }
1055
1056 default:
1057 break;
1058 }
1059 }
1060
1061 // 7. Non-smi operands, fall out to the non-smi code with the operands in
1062 // edx and eax.
1063 Comment done_comment(masm, "-- Enter non-smi code");
1064 __ bind(&not_smis);
1065 switch (op) {
1066 case Token::BIT_OR:
1067 case Token::SHL:
1068 case Token::SAR:
1069 case Token::SHR:
1070 // Right operand is saved in ecx and eax was destroyed by the smi
1071 // check.
1072 __ mov(eax, ecx);
1073 break;
1074
1075 case Token::DIV:
1076 case Token::MOD:
1077 // Operands are in eax, ebx at this point.
1078 __ mov(edx, eax);
1079 __ mov(eax, ebx);
1080 break;
1081
1082 default:
1083 break;
1084 }
1085 }
1086
1087
1088 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1089 Label right_arg_changed, call_runtime;
1090
1091 switch (op_) {
1092 case Token::ADD:
1093 case Token::SUB:
1094 case Token::MUL:
1095 case Token::DIV:
1096 break;
1097 case Token::MOD:
1098 case Token::BIT_OR:
1099 case Token::BIT_AND:
1100 case Token::BIT_XOR:
1101 case Token::SAR:
1102 case Token::SHL:
1103 case Token::SHR:
1104 GenerateRegisterArgsPush(masm);
1105 break;
1106 default:
1107 UNREACHABLE();
1108 }
1109
1110 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1111 // It is guaranteed that the value will fit into a Smi, because if it
1112 // didn't, we wouldn't be here, see BinaryOp_Patch.
1113 __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
1114 __ j(not_equal, &right_arg_changed);
1115 }
1116
1117 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1118 result_type_ == BinaryOpIC::SMI) {
1119 BinaryOpStub_GenerateSmiCode(
1120 masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
1121 } else {
1122 BinaryOpStub_GenerateSmiCode(
1123 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1124 }
1125
1126 // Code falls through if the result is not returned as either a smi or heap
1127 // number.
1128 __ bind(&right_arg_changed);
1129 switch (op_) {
1130 case Token::ADD:
1131 case Token::SUB:
1132 case Token::MUL:
1133 case Token::DIV:
1134 GenerateTypeTransition(masm);
1135 break;
1136 case Token::MOD:
1137 case Token::BIT_OR:
1138 case Token::BIT_AND:
1139 case Token::BIT_XOR:
1140 case Token::SAR:
1141 case Token::SHL:
1142 case Token::SHR:
1143 GenerateTypeTransitionWithSavedArgs(masm);
1144 break;
1145 default:
1146 UNREACHABLE();
1147 }
1148
1149 __ bind(&call_runtime);
1150 switch (op_) {
1151 case Token::ADD:
1152 case Token::SUB:
1153 case Token::MUL:
1154 case Token::DIV:
1155 break;
1156 case Token::MOD:
1157 case Token::BIT_OR:
1158 case Token::BIT_AND:
1159 case Token::BIT_XOR:
1160 case Token::SAR:
1161 case Token::SHL:
1162 case Token::SHR:
1163 BinaryOpStub_GenerateRegisterArgsPop(masm);
1164 break;
1165 default:
1166 UNREACHABLE();
1167 }
1168
1169 {
1170 FrameScope scope(masm, StackFrame::INTERNAL);
1171 __ push(edx);
1172 __ push(eax);
1173 GenerateCallRuntime(masm);
1174 }
1175 __ ret(0);
1176 }
1177
1178
1179 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1180 Label call_runtime;
1181 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1182 ASSERT(op_ == Token::ADD);
1183 // If both arguments are strings, call the string add stub.
1184 // Otherwise, do a transition.
1185
1186 // Registers containing left and right operands respectively.
1187 Register left = edx;
1188 Register right = eax;
1189
1190 // Test if left operand is a string.
1191 __ JumpIfSmi(left, &call_runtime, Label::kNear);
1192 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1193 __ j(above_equal, &call_runtime, Label::kNear);
1194
1195 // Test if right operand is a string.
1196 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1197 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1198 __ j(above_equal, &call_runtime, Label::kNear);
1199
1200 StringAddStub string_add_stub(
1201 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
1202 GenerateRegisterArgsPush(masm);
1203 __ TailCallStub(&string_add_stub);
1204
1205 __ bind(&call_runtime);
1206 GenerateTypeTransition(masm);
1207 }
1208
1209
1210 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1211 Label* alloc_failure,
1212 OverwriteMode mode);
1213
1214
1215 // Input:
1216 // edx: left operand (tagged)
1217 // eax: right operand (tagged)
1218 // Output:
1219 // eax: result (tagged)
1220 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1221 Label call_runtime;
1222 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1223
1224 // Floating point case.
1225 switch (op_) {
1226 case Token::ADD:
1227 case Token::SUB:
1228 case Token::MUL:
1229 case Token::DIV:
1230 case Token::MOD: {
1231 Label not_floats, not_int32, right_arg_changed;
1232 if (CpuFeatures::IsSupported(SSE2)) {
1233 CpuFeatureScope use_sse2(masm, SSE2);
1234 // It could be that only SMIs have been seen at either the left
1235 // or the right operand. For precise type feedback, patch the IC
1236 // again if this changes.
1237 // In theory, we would need the same check in the non-SSE2 case,
1238 // but since we don't support Crankshaft on such hardware we can
1239 // afford not to care about precise type feedback.
1240 if (left_type_ == BinaryOpIC::SMI) {
1241 __ JumpIfNotSmi(edx, &not_int32);
1242 }
1243 if (right_type_ == BinaryOpIC::SMI) {
1244 __ JumpIfNotSmi(eax, &not_int32);
1245 }
1246 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1247 FloatingPointHelper::CheckSSE2OperandIsInt32(
1248 masm, &not_int32, xmm0, ebx, ecx, xmm2);
1249 FloatingPointHelper::CheckSSE2OperandIsInt32(
1250 masm, &not_int32, xmm1, edi, ecx, xmm2);
1251 if (op_ == Token::MOD) {
1252 if (encoded_right_arg_.has_value) {
1253 __ cmp(edi, Immediate(fixed_right_arg_value()));
1254 __ j(not_equal, &right_arg_changed);
1255 }
1256 GenerateRegisterArgsPush(masm);
1257 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1258 } else {
1259 switch (op_) {
1260 case Token::ADD: __ addsd(xmm0, xmm1); break;
1261 case Token::SUB: __ subsd(xmm0, xmm1); break;
1262 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1263 case Token::DIV: __ divsd(xmm0, xmm1); break;
1264 default: UNREACHABLE();
1265 }
1266 // Check result type if it is currently Int32.
1267 if (result_type_ <= BinaryOpIC::INT32) {
1268 FloatingPointHelper::CheckSSE2OperandIsInt32(
1269 masm, &not_int32, xmm0, ecx, ecx, xmm2);
1270 }
1271 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1272 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1273 __ ret(0);
1274 }
1275 } else { // SSE2 not available, use FPU.
1276 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1277 FloatingPointHelper::LoadFloatOperands(
1278 masm,
1279 ecx,
1280 FloatingPointHelper::ARGS_IN_REGISTERS);
1281 if (op_ == Token::MOD) {
1282 // The operands are now on the FPU stack, but we don't need them.
1283 __ fstp(0);
1284 __ fstp(0);
1285 GenerateRegisterArgsPush(masm);
1286 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1287 } else {
1288 switch (op_) {
1289 case Token::ADD: __ faddp(1); break;
1290 case Token::SUB: __ fsubp(1); break;
1291 case Token::MUL: __ fmulp(1); break;
1292 case Token::DIV: __ fdivp(1); break;
1293 default: UNREACHABLE();
1294 }
1295 Label after_alloc_failure;
1296 BinaryOpStub_GenerateHeapResultAllocation(
1297 masm, &after_alloc_failure, mode_);
1298 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1299 __ ret(0);
1300 __ bind(&after_alloc_failure);
1301 __ fstp(0); // Pop FPU stack before calling runtime.
1302 __ jmp(&call_runtime);
1303 }
1304 }
1305
1306 __ bind(&not_floats);
1307 __ bind(&not_int32);
1308 __ bind(&right_arg_changed);
1309 GenerateTypeTransition(masm);
1310 break;
1311 }
1312
1313 case Token::BIT_OR:
1314 case Token::BIT_AND:
1315 case Token::BIT_XOR:
1316 case Token::SAR:
1317 case Token::SHL:
1318 case Token::SHR: {
1319 GenerateRegisterArgsPush(masm);
1320 Label not_floats;
1321 Label not_int32;
1322 Label non_smi_result;
1323 bool use_sse3 = platform_specific_bit_;
1324 FloatingPointHelper::LoadUnknownsAsIntegers(
1325 masm, use_sse3, left_type_, right_type_, &not_floats);
1326 switch (op_) {
1327 case Token::BIT_OR: __ or_(eax, ecx); break;
1328 case Token::BIT_AND: __ and_(eax, ecx); break;
1329 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1330 case Token::SAR: __ sar_cl(eax); break;
1331 case Token::SHL: __ shl_cl(eax); break;
1332 case Token::SHR: __ shr_cl(eax); break;
1333 default: UNREACHABLE();
1334 }
1335 if (op_ == Token::SHR) {
1336 // Check if result is non-negative and fits in a smi.
1337 __ test(eax, Immediate(0xc0000000));
1338 __ j(not_zero, &call_runtime);
1339 } else {
1340 // Check if result fits in a smi.
1341 __ cmp(eax, 0xc0000000);
1342 __ j(negative, &non_smi_result, Label::kNear);
1343 }
1344 // Tag smi result and return.
1345 __ SmiTag(eax);
1346 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1347
1348 // All ops except SHR return a signed int32 that we load in
1349 // a HeapNumber.
1350 if (op_ != Token::SHR) {
1351 __ bind(&non_smi_result);
1352 // Allocate a heap number if needed.
1353 __ mov(ebx, eax); // ebx: result
1354 Label skip_allocation;
1355 switch (mode_) {
1356 case OVERWRITE_LEFT:
1357 case OVERWRITE_RIGHT:
1358 // If the operand was an object, we skip the
1359 // allocation of a heap number.
1360 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1361 1 * kPointerSize : 2 * kPointerSize));
1362 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1363 // Fall through!
1364 case NO_OVERWRITE:
1365 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1366 __ bind(&skip_allocation);
1367 break;
1368 default: UNREACHABLE();
1369 }
1370 // Store the result in the HeapNumber and return.
1371 if (CpuFeatures::IsSupported(SSE2)) {
1372 CpuFeatureScope use_sse2(masm, SSE2);
1373 __ Cvtsi2sd(xmm0, ebx);
1374 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1375 } else {
1376 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1377 __ fild_s(Operand(esp, 1 * kPointerSize));
1378 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1379 }
1380 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1381 }
1382
1383 __ bind(&not_floats);
1384 __ bind(&not_int32);
1385 GenerateTypeTransitionWithSavedArgs(masm);
1386 break;
1387 }
1388 default: UNREACHABLE(); break;
1389 }
1390
1391 // If an allocation fails, or SHR hits a hard case, use the runtime system to
1392 // get the correct result.
1393 __ bind(&call_runtime);
1394
1395 switch (op_) {
1396 case Token::ADD:
1397 case Token::SUB:
1398 case Token::MUL:
1399 case Token::DIV:
1400 break;
1401 case Token::MOD:
1402 return; // Handled above.
1403 case Token::BIT_OR:
1404 case Token::BIT_AND:
1405 case Token::BIT_XOR:
1406 case Token::SAR:
1407 case Token::SHL:
1408 case Token::SHR:
1409 BinaryOpStub_GenerateRegisterArgsPop(masm);
1410 break;
1411 default:
1412 UNREACHABLE();
1413 }
1414
1415 {
1416 FrameScope scope(masm, StackFrame::INTERNAL);
1417 __ push(edx);
1418 __ push(eax);
1419 GenerateCallRuntime(masm);
1420 }
1421 __ ret(0);
1422 }
1423
1424
1425 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1426 if (op_ == Token::ADD) {
1427 // Handle string addition here, because it is the only operation
1428 // that does not do a ToNumber conversion on the operands.
1429 GenerateAddStrings(masm);
1430 }
1431
1432 Factory* factory = masm->isolate()->factory();
1433
1434 // Convert odd ball arguments to numbers.
1435 Label check, done;
1436 __ cmp(edx, factory->undefined_value());
1437 __ j(not_equal, &check, Label::kNear);
1438 if (Token::IsBitOp(op_)) {
1439 __ xor_(edx, edx);
1440 } else {
1441 __ mov(edx, Immediate(factory->nan_value()));
1442 }
1443 __ jmp(&done, Label::kNear);
1444 __ bind(&check);
1445 __ cmp(eax, factory->undefined_value());
1446 __ j(not_equal, &done, Label::kNear);
1447 if (Token::IsBitOp(op_)) {
1448 __ xor_(eax, eax);
1449 } else {
1450 __ mov(eax, Immediate(factory->nan_value()));
1451 }
1452 __ bind(&done);
1453
1454 GenerateNumberStub(masm);
1455 }
1456
1457
1458 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1459 Label call_runtime;
1460
1461 // Floating point case.
1462 switch (op_) {
1463 case Token::ADD:
1464 case Token::SUB:
1465 case Token::MUL:
1466 case Token::DIV: {
1467 Label not_floats;
1468 if (CpuFeatures::IsSupported(SSE2)) {
1469 CpuFeatureScope use_sse2(masm, SSE2);
1470
1471 // It could be that only SMIs have been seen at either the left
1472 // or the right operand. For precise type feedback, patch the IC
1473 // again if this changes.
1474 // In theory, we would need the same check in the non-SSE2 case,
1475 // but since we don't support Crankshaft on such hardware we can
1476 // afford not to care about precise type feedback.
1477 if (left_type_ == BinaryOpIC::SMI) {
1478 __ JumpIfNotSmi(edx, &not_floats);
1479 }
1480 if (right_type_ == BinaryOpIC::SMI) {
1481 __ JumpIfNotSmi(eax, &not_floats);
1482 }
1483 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1484 if (left_type_ == BinaryOpIC::INT32) {
1485 FloatingPointHelper::CheckSSE2OperandIsInt32(
1486 masm, &not_floats, xmm0, ecx, ecx, xmm2);
1487 }
1488 if (right_type_ == BinaryOpIC::INT32) {
1489 FloatingPointHelper::CheckSSE2OperandIsInt32(
1490 masm, &not_floats, xmm1, ecx, ecx, xmm2);
1491 }
1492
1493 switch (op_) {
1494 case Token::ADD: __ addsd(xmm0, xmm1); break;
1495 case Token::SUB: __ subsd(xmm0, xmm1); break;
1496 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1497 case Token::DIV: __ divsd(xmm0, xmm1); break;
1498 default: UNREACHABLE();
1499 }
1500 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1501 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1502 __ ret(0);
1503 } else { // SSE2 not available, use FPU.
1504 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1505 FloatingPointHelper::LoadFloatOperands(
1506 masm,
1507 ecx,
1508 FloatingPointHelper::ARGS_IN_REGISTERS);
1509 switch (op_) {
1510 case Token::ADD: __ faddp(1); break;
1511 case Token::SUB: __ fsubp(1); break;
1512 case Token::MUL: __ fmulp(1); break;
1513 case Token::DIV: __ fdivp(1); break;
1514 default: UNREACHABLE();
1515 }
1516 Label after_alloc_failure;
1517 BinaryOpStub_GenerateHeapResultAllocation(
1518 masm, &after_alloc_failure, mode_);
1519 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1520 __ ret(0);
1521 __ bind(&after_alloc_failure);
1522 __ fstp(0); // Pop FPU stack before calling runtime.
1523 __ jmp(&call_runtime);
1524 }
1525
1526 __ bind(&not_floats);
1527 GenerateTypeTransition(masm);
1528 break;
1529 }
1530
1531 case Token::MOD: {
1532 // For MOD we go directly to runtime in the non-smi case.
1533 break;
1534 }
1535 case Token::BIT_OR:
1536 case Token::BIT_AND:
1537 case Token::BIT_XOR:
1538 case Token::SAR:
1539 case Token::SHL:
1540 case Token::SHR: {
1541 GenerateRegisterArgsPush(masm);
1542 Label not_floats;
1543 Label non_smi_result;
1544 // We do not check the input arguments here, as any value is
1545 // unconditionally truncated to an int32 anyway. To get the
1546 // right optimized code, int32 type feedback is just right.
1547 bool use_sse3 = platform_specific_bit_;
1548 FloatingPointHelper::LoadUnknownsAsIntegers(
1549 masm, use_sse3, left_type_, right_type_, &not_floats);
1550 switch (op_) {
1551 case Token::BIT_OR: __ or_(eax, ecx); break;
1552 case Token::BIT_AND: __ and_(eax, ecx); break;
1553 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1554 case Token::SAR: __ sar_cl(eax); break;
1555 case Token::SHL: __ shl_cl(eax); break;
1556 case Token::SHR: __ shr_cl(eax); break;
1557 default: UNREACHABLE();
1558 }
1559 if (op_ == Token::SHR) {
1560 // Check if result is non-negative and fits in a smi.
1561 __ test(eax, Immediate(0xc0000000));
1562 __ j(not_zero, &call_runtime);
1563 } else {
1564 // Check if result fits in a smi.
1565 __ cmp(eax, 0xc0000000);
1566 __ j(negative, &non_smi_result, Label::kNear);
1567 }
1568 // Tag smi result and return.
1569 __ SmiTag(eax);
1570 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1571
1572 // All ops except SHR return a signed int32 that we load in
1573 // a HeapNumber.
1574 if (op_ != Token::SHR) {
1575 __ bind(&non_smi_result);
1576 // Allocate a heap number if needed.
1577 __ mov(ebx, eax); // ebx: result
1578 Label skip_allocation;
1579 switch (mode_) {
1580 case OVERWRITE_LEFT:
1581 case OVERWRITE_RIGHT:
1582 // If the operand was an object, we skip the
1583 // allocation of a heap number.
1584 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1585 1 * kPointerSize : 2 * kPointerSize));
1586 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1587 // Fall through!
1588 case NO_OVERWRITE:
1589 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1590 __ bind(&skip_allocation);
1591 break;
1592 default: UNREACHABLE();
1593 }
1594 // Store the result in the HeapNumber and return.
1595 if (CpuFeatures::IsSupported(SSE2)) {
1596 CpuFeatureScope use_sse2(masm, SSE2);
1597 __ Cvtsi2sd(xmm0, ebx);
1598 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1599 } else {
1600 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1601 __ fild_s(Operand(esp, 1 * kPointerSize));
1602 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1603 }
1604 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1605 }
1606
1607 __ bind(&not_floats);
1608 GenerateTypeTransitionWithSavedArgs(masm);
1609 break;
1610 }
1611 default: UNREACHABLE(); break;
1612 }
1613
1614 // If an allocation fails, or SHR or MOD hit a hard case,
1615 // use the runtime system to get the correct result.
1616 __ bind(&call_runtime);
1617
1618 switch (op_) {
1619 case Token::ADD:
1620 case Token::SUB:
1621 case Token::MUL:
1622 case Token::DIV:
1623 case Token::MOD:
1624 break;
1625 case Token::BIT_OR:
1626 case Token::BIT_AND:
1627 case Token::BIT_XOR:
1628 case Token::SAR:
1629 case Token::SHL:
1630 case Token::SHR:
1631 BinaryOpStub_GenerateRegisterArgsPop(masm);
1632 break;
1633 default:
1634 UNREACHABLE();
1635 }
1636
1637 {
1638 FrameScope scope(masm, StackFrame::INTERNAL);
1639 __ push(edx);
1640 __ push(eax);
1641 GenerateCallRuntime(masm);
1642 }
1643 __ ret(0);
1644 }
1645
1646
1647 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1648 Label call_runtime;
1649
1650 Counters* counters = masm->isolate()->counters();
1651 __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
1652
1653 switch (op_) {
1654 case Token::ADD:
1655 case Token::SUB:
1656 case Token::MUL:
1657 case Token::DIV:
1658 break;
1659 case Token::MOD:
1660 case Token::BIT_OR:
1661 case Token::BIT_AND:
1662 case Token::BIT_XOR:
1663 case Token::SAR:
1664 case Token::SHL:
1665 case Token::SHR:
1666 GenerateRegisterArgsPush(masm);
1667 break;
1668 default:
1669 UNREACHABLE();
1670 }
1671
1672 BinaryOpStub_GenerateSmiCode(
1673 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1674
1675 // Floating point case.
1676 switch (op_) {
1677 case Token::ADD:
1678 case Token::SUB:
1679 case Token::MUL:
1680 case Token::DIV: {
1681 Label not_floats;
1682 if (CpuFeatures::IsSupported(SSE2)) {
1683 CpuFeatureScope use_sse2(masm, SSE2);
1684 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1685
1686 switch (op_) {
1687 case Token::ADD: __ addsd(xmm0, xmm1); break;
1688 case Token::SUB: __ subsd(xmm0, xmm1); break;
1689 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1690 case Token::DIV: __ divsd(xmm0, xmm1); break;
1691 default: UNREACHABLE();
1692 }
1693 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1694 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1695 __ ret(0);
1696 } else { // SSE2 not available, use FPU.
1697 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1698 FloatingPointHelper::LoadFloatOperands(
1699 masm,
1700 ecx,
1701 FloatingPointHelper::ARGS_IN_REGISTERS);
1702 switch (op_) {
1703 case Token::ADD: __ faddp(1); break;
1704 case Token::SUB: __ fsubp(1); break;
1705 case Token::MUL: __ fmulp(1); break;
1706 case Token::DIV: __ fdivp(1); break;
1707 default: UNREACHABLE();
1708 }
1709 Label after_alloc_failure;
1710 BinaryOpStub_GenerateHeapResultAllocation(
1711 masm, &after_alloc_failure, mode_);
1712 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1713 __ ret(0);
1714 __ bind(&after_alloc_failure);
1715 __ fstp(0); // Pop FPU stack before calling runtime.
1716 __ jmp(&call_runtime);
1717 }
1718 __ bind(&not_floats);
1719 break;
1720 }
1721 case Token::MOD: {
1722 // For MOD we go directly to runtime in the non-smi case.
1723 break;
1724 }
1725 case Token::BIT_OR:
1726 case Token::BIT_AND:
1727 case Token::BIT_XOR:
1728 case Token::SAR:
1729 case Token::SHL:
1730 case Token::SHR: {
1731 Label non_smi_result;
1732 bool use_sse3 = platform_specific_bit_;
1733 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1734 use_sse3,
1735 BinaryOpIC::GENERIC,
1736 BinaryOpIC::GENERIC,
1737 &call_runtime);
1738 switch (op_) {
1739 case Token::BIT_OR: __ or_(eax, ecx); break;
1740 case Token::BIT_AND: __ and_(eax, ecx); break;
1741 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1742 case Token::SAR: __ sar_cl(eax); break;
1743 case Token::SHL: __ shl_cl(eax); break;
1744 case Token::SHR: __ shr_cl(eax); break;
1745 default: UNREACHABLE();
1746 }
1747 if (op_ == Token::SHR) {
1748 // Check if result is non-negative and fits in a smi.
1749 __ test(eax, Immediate(0xc0000000));
1750 __ j(not_zero, &call_runtime);
1751 } else {
1752 // Check if result fits in a smi.
1753 __ cmp(eax, 0xc0000000);
1754 __ j(negative, &non_smi_result, Label::kNear);
1755 }
1756 // Tag smi result and return.
1757 __ SmiTag(eax);
1758 __ ret(2 * kPointerSize); // Drop the arguments from the stack.
1759
1760 // All ops except SHR return a signed int32 that we load in
1761 // a HeapNumber.
1762 if (op_ != Token::SHR) {
1763 __ bind(&non_smi_result);
1764 // Allocate a heap number if needed.
1765 __ mov(ebx, eax); // ebx: result
1766 Label skip_allocation;
1767 switch (mode_) {
1768 case OVERWRITE_LEFT:
1769 case OVERWRITE_RIGHT:
1770 // If the operand was an object, we skip the
1771 // allocation of a heap number.
1772 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1773 1 * kPointerSize : 2 * kPointerSize));
1774 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1775 // Fall through!
1776 case NO_OVERWRITE:
1777 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1778 __ bind(&skip_allocation);
1779 break;
1780 default: UNREACHABLE();
1781 }
1782 // Store the result in the HeapNumber and return.
1783 if (CpuFeatures::IsSupported(SSE2)) {
1784 CpuFeatureScope use_sse2(masm, SSE2);
1785 __ Cvtsi2sd(xmm0, ebx);
1786 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1787 } else {
1788 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1789 __ fild_s(Operand(esp, 1 * kPointerSize));
1790 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1791 }
1792 __ ret(2 * kPointerSize);
1793 }
1794 break;
1795 }
1796 default: UNREACHABLE(); break;
1797 }
1798
1799 // If all else fails, use the runtime system to get the correct
1800 // result.
1801 __ bind(&call_runtime);
1802 switch (op_) {
1803 case Token::ADD:
1804 GenerateAddStrings(masm);
1805 // Fall through.
1806 case Token::SUB:
1807 case Token::MUL:
1808 case Token::DIV:
1809 break;
1810 case Token::MOD:
1811 case Token::BIT_OR:
1812 case Token::BIT_AND:
1813 case Token::BIT_XOR:
1814 case Token::SAR:
1815 case Token::SHL:
1816 case Token::SHR:
1817 BinaryOpStub_GenerateRegisterArgsPop(masm);
1818 break;
1819 default:
1820 UNREACHABLE();
1821 }
1822
1823 {
1824 FrameScope scope(masm, StackFrame::INTERNAL);
1825 __ push(edx);
1826 __ push(eax);
1827 GenerateCallRuntime(masm);
1828 }
1829 __ ret(0);
1830 }
1831
1832
1833 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
1834 ASSERT(op_ == Token::ADD);
1835 Label left_not_string, call_runtime;
1836
1837 // Registers containing left and right operands respectively.
1838 Register left = edx;
1839 Register right = eax;
1840
1841 // Test if left operand is a string.
1842 __ JumpIfSmi(left, &left_not_string, Label::kNear);
1843 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1844 __ j(above_equal, &left_not_string, Label::kNear);
1845
1846 StringAddStub string_add_left_stub(
1847 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
1848 GenerateRegisterArgsPush(masm);
1849 __ TailCallStub(&string_add_left_stub);
1850
1851 // Left operand is not a string, test right.
1852 __ bind(&left_not_string);
1853 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1854 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1855 __ j(above_equal, &call_runtime, Label::kNear);
1856
1857 StringAddStub string_add_right_stub(
1858 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
1859 GenerateRegisterArgsPush(masm);
1860 __ TailCallStub(&string_add_right_stub);
1861
1862 // Neither argument is a string.
1863 __ bind(&call_runtime);
1864 }
1865
1866
1867 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1868 Label* alloc_failure,
1869 OverwriteMode mode) {
1870 Label skip_allocation;
1871 switch (mode) {
1872 case OVERWRITE_LEFT: {
1873 // If the argument in edx is already an object, we skip the
1874 // allocation of a heap number.
1875 __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
1876 // Allocate a heap number for the result. Keep eax and edx intact
1877 // for the possible runtime call.
1878 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1879 // Now edx can be overwritten losing one of the arguments as we are
1880 // now done and will not need it any more.
1881 __ mov(edx, ebx);
1882 __ bind(&skip_allocation);
1883 // Use object in edx as a result holder
1884 __ mov(eax, edx);
1885 break;
1886 }
1887 case OVERWRITE_RIGHT:
1888 // If the argument in eax is already an object, we skip the
1889 // allocation of a heap number.
1890 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1891 // Fall through!
1892 case NO_OVERWRITE:
1893 // Allocate a heap number for the result. Keep eax and edx intact
1894 // for the possible runtime call.
1895 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1896 // Now eax can be overwritten losing one of the arguments as we are
1897 // now done and will not need it any more.
1898 __ mov(eax, ebx);
1899 __ bind(&skip_allocation);
1900 break;
1901 default: UNREACHABLE();
1902 }
1903 }
1904
1905
1906 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1907 __ pop(ecx);
1908 __ push(edx);
1909 __ push(eax);
1910 __ push(ecx);
1911 }
1912
1913
1914 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 640 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1915 // TAGGED case: 641 // TAGGED case:
1916 // Input: 642 // Input:
1917 // esp[4]: tagged number input argument (should be number). 643 // esp[4]: tagged number input argument (should be number).
1918 // esp[0]: return address. 644 // esp[0]: return address.
1919 // Output: 645 // Output:
1920 // eax: tagged double result. 646 // eax: tagged double result.
1921 // UNTAGGED case: 647 // UNTAGGED case:
1922 // Input:: 648 // Input::
1923 // esp[0]: return address. 649 // esp[0]: return address.
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
2214 __ bind(&done); 940 __ bind(&done);
2215 } else { 941 } else {
2216 ASSERT(type == TranscendentalCache::LOG); 942 ASSERT(type == TranscendentalCache::LOG);
2217 __ fldln2(); 943 __ fldln2();
2218 __ fxch(); 944 __ fxch();
2219 __ fyl2x(); 945 __ fyl2x();
2220 } 946 }
2221 } 947 }
2222 948
2223 949
2224 // Input: edx, eax are the left and right objects of a bit op.
2225 // Output: eax, ecx are left and right integers for a bit op.
2226 // Warning: can clobber inputs even when it jumps to |conversion_failure|!
2227 void FloatingPointHelper::LoadUnknownsAsIntegers(
2228 MacroAssembler* masm,
2229 bool use_sse3,
2230 BinaryOpIC::TypeInfo left_type,
2231 BinaryOpIC::TypeInfo right_type,
2232 Label* conversion_failure) {
2233 // Check float operands.
2234 Label arg1_is_object, check_undefined_arg1;
2235 Label arg2_is_object, check_undefined_arg2;
2236 Label load_arg2, done;
2237
2238 // Test if arg1 is a Smi.
2239 if (left_type == BinaryOpIC::SMI) {
2240 __ JumpIfNotSmi(edx, conversion_failure);
2241 } else {
2242 __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2243 }
2244
2245 __ SmiUntag(edx);
2246 __ jmp(&load_arg2);
2247
2248 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2249 __ bind(&check_undefined_arg1);
2250 Factory* factory = masm->isolate()->factory();
2251 __ cmp(edx, factory->undefined_value());
2252 __ j(not_equal, conversion_failure);
2253 __ mov(edx, Immediate(0));
2254 __ jmp(&load_arg2);
2255
2256 __ bind(&arg1_is_object);
2257 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2258 __ cmp(ebx, factory->heap_number_map());
2259 __ j(not_equal, &check_undefined_arg1);
2260
2261 __ TruncateHeapNumberToI(edx, edx);
2262
2263 // Here edx has the untagged integer, eax has a Smi or a heap number.
2264 __ bind(&load_arg2);
2265
2266 // Test if arg2 is a Smi.
2267 if (right_type == BinaryOpIC::SMI) {
2268 __ JumpIfNotSmi(eax, conversion_failure);
2269 } else {
2270 __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2271 }
2272
2273 __ SmiUntag(eax);
2274 __ mov(ecx, eax);
2275 __ jmp(&done);
2276
2277 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2278 __ bind(&check_undefined_arg2);
2279 __ cmp(eax, factory->undefined_value());
2280 __ j(not_equal, conversion_failure);
2281 __ mov(ecx, Immediate(0));
2282 __ jmp(&done);
2283
2284 __ bind(&arg2_is_object);
2285 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2286 __ cmp(ebx, factory->heap_number_map());
2287 __ j(not_equal, &check_undefined_arg2);
2288 // Get the untagged integer version of the eax heap number in ecx.
2289
2290 __ TruncateHeapNumberToI(ecx, eax);
2291
2292 __ bind(&done);
2293 __ mov(eax, edx);
2294 }
2295
2296
2297 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 950 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2298 Register number) { 951 Register number) {
2299 Label load_smi, done; 952 Label load_smi, done;
2300 953
2301 __ JumpIfSmi(number, &load_smi, Label::kNear); 954 __ JumpIfSmi(number, &load_smi, Label::kNear);
2302 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 955 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2303 __ jmp(&done, Label::kNear); 956 __ jmp(&done, Label::kNear);
2304 957
2305 __ bind(&load_smi); 958 __ bind(&load_smi);
2306 __ SmiUntag(number); 959 __ SmiUntag(number);
(...skipping 29 matching lines...) Expand all
2336 __ SmiUntag(eax); // Untag smi before converting to float. 989 __ SmiUntag(eax); // Untag smi before converting to float.
2337 __ Cvtsi2sd(xmm1, eax); 990 __ Cvtsi2sd(xmm1, eax);
2338 __ SmiTag(eax); // Retag smi for heap number overwriting test. 991 __ SmiTag(eax); // Retag smi for heap number overwriting test.
2339 __ jmp(&done, Label::kNear); 992 __ jmp(&done, Label::kNear);
2340 __ bind(&load_float_eax); 993 __ bind(&load_float_eax);
2341 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 994 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2342 __ bind(&done); 995 __ bind(&done);
2343 } 996 }
2344 997
2345 998
2346 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2347 Register scratch) {
2348 const Register left = edx;
2349 const Register right = eax;
2350 __ mov(scratch, left);
2351 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2352 __ SmiUntag(scratch);
2353 __ Cvtsi2sd(xmm0, scratch);
2354
2355 __ mov(scratch, right);
2356 __ SmiUntag(scratch);
2357 __ Cvtsi2sd(xmm1, scratch);
2358 }
2359
2360
2361 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
2362 Label* non_int32,
2363 XMMRegister operand,
2364 Register int32_result,
2365 Register scratch,
2366 XMMRegister xmm_scratch) {
2367 __ cvttsd2si(int32_result, Operand(operand));
2368 __ Cvtsi2sd(xmm_scratch, int32_result);
2369 __ pcmpeqd(xmm_scratch, operand);
2370 __ movmskps(scratch, xmm_scratch);
2371 // Two least significant bits should be both set.
2372 __ not_(scratch);
2373 __ test(scratch, Immediate(3));
2374 __ j(not_zero, non_int32);
2375 }
2376
2377
2378 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2379 Register scratch,
2380 ArgLocation arg_location) {
2381 Label load_smi_1, load_smi_2, done_load_1, done;
2382 if (arg_location == ARGS_IN_REGISTERS) {
2383 __ mov(scratch, edx);
2384 } else {
2385 __ mov(scratch, Operand(esp, 2 * kPointerSize));
2386 }
2387 __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2388 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2389 __ bind(&done_load_1);
2390
2391 if (arg_location == ARGS_IN_REGISTERS) {
2392 __ mov(scratch, eax);
2393 } else {
2394 __ mov(scratch, Operand(esp, 1 * kPointerSize));
2395 }
2396 __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2397 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2398 __ jmp(&done, Label::kNear);
2399
2400 __ bind(&load_smi_1);
2401 __ SmiUntag(scratch);
2402 __ push(scratch);
2403 __ fild_s(Operand(esp, 0));
2404 __ pop(scratch);
2405 __ jmp(&done_load_1);
2406
2407 __ bind(&load_smi_2);
2408 __ SmiUntag(scratch);
2409 __ push(scratch);
2410 __ fild_s(Operand(esp, 0));
2411 __ pop(scratch);
2412
2413 __ bind(&done);
2414 }
2415
2416
2417 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2418 Register scratch) {
2419 const Register left = edx;
2420 const Register right = eax;
2421 __ mov(scratch, left);
2422 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2423 __ SmiUntag(scratch);
2424 __ push(scratch);
2425 __ fild_s(Operand(esp, 0));
2426
2427 __ mov(scratch, right);
2428 __ SmiUntag(scratch);
2429 __ mov(Operand(esp, 0), scratch);
2430 __ fild_s(Operand(esp, 0));
2431 __ pop(scratch);
2432 }
2433
2434
2435 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 999 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2436 Label* non_float, 1000 Label* non_float,
2437 Register scratch) { 1001 Register scratch) {
2438 Label test_other, done; 1002 Label test_other, done;
2439 // Test if both operands are floats or smi -> scratch=k_is_float; 1003 // Test if both operands are floats or smi -> scratch=k_is_float;
2440 // Otherwise scratch = k_not_float. 1004 // Otherwise scratch = k_not_float.
2441 __ JumpIfSmi(edx, &test_other, Label::kNear); 1005 __ JumpIfSmi(edx, &test_other, Label::kNear);
2442 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); 1006 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2443 Factory* factory = masm->isolate()->factory(); 1007 Factory* factory = masm->isolate()->factory();
2444 __ cmp(scratch, factory->heap_number_map()); 1008 __ cmp(scratch, factory->heap_number_map());
(...skipping 1992 matching lines...) Expand 10 before | Expand all | Expand 10 after
4437 3001
4438 3002
4439 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 3003 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
4440 CEntryStub::GenerateAheadOfTime(isolate); 3004 CEntryStub::GenerateAheadOfTime(isolate);
4441 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 3005 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
4442 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 3006 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
4443 // It is important that the store buffer overflow stubs are generated first. 3007 // It is important that the store buffer overflow stubs are generated first.
4444 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 3008 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
4445 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 3009 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
4446 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 3010 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
3011 BinaryOpStub::GenerateAheadOfTime(isolate);
3012 CpuFeatures::ForceFeature(SSE2);
3013 BinaryOpStub::GenerateAheadOfTime(isolate);
3014 CpuFeatures::ClearForcedFeatures();
4447 } 3015 }
4448 3016
4449 3017
4450 void CodeStub::GenerateFPStubs(Isolate* isolate) { 3018 void CodeStub::GenerateFPStubs(Isolate* isolate) {
4451 if (CpuFeatures::IsSupported(SSE2)) { 3019 if (CpuFeatures::IsSupported(SSE2)) {
4452 CEntryStub save_doubles(1, kSaveFPRegs); 3020 CEntryStub save_doubles(1, kSaveFPRegs);
4453 // Stubs might already be in the snapshot, detect that and don't regenerate, 3021 // Stubs might already be in the snapshot, detect that and don't regenerate,
4454 // which would lead to code stub initialization state being messed up. 3022 // which would lead to code stub initialization state being messed up.
4455 Code* save_doubles_code; 3023 Code* save_doubles_code;
4456 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 3024 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
(...skipping 3089 matching lines...) Expand 10 before | Expand all | Expand 10 after
7546 __ bind(&fast_elements_case); 6114 __ bind(&fast_elements_case);
7547 GenerateCase(masm, FAST_ELEMENTS); 6115 GenerateCase(masm, FAST_ELEMENTS);
7548 } 6116 }
7549 6117
7550 6118
7551 #undef __ 6119 #undef __
7552 6120
7553 } } // namespace v8::internal 6121 } } // namespace v8::internal
7554 6122
7555 #endif // V8_TARGET_ARCH_IA32 6123 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/assembler-ia32.cc ('k') | src/ia32/deoptimizer-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698