 Chromium Code Reviews
 Chromium Code Reviews Issue 1606019:
  Make binary op stubs in both r0-r1 and r1-r0 versions to reduce...  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
    
  
    Issue 1606019:
  Make binary op stubs in both r0-r1 and r1-r0 versions to reduce...  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/| OLD | NEW | 
|---|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright | 
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. | 
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above | 
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following | 
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided | 
| (...skipping 734 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 745 case Token::DIV: | 745 case Token::DIV: | 
| 746 case Token::MOD: | 746 case Token::MOD: | 
| 747 case Token::BIT_OR: | 747 case Token::BIT_OR: | 
| 748 case Token::BIT_AND: | 748 case Token::BIT_AND: | 
| 749 case Token::BIT_XOR: | 749 case Token::BIT_XOR: | 
| 750 case Token::SHL: | 750 case Token::SHL: | 
| 751 case Token::SHR: | 751 case Token::SHR: | 
| 752 case Token::SAR: { | 752 case Token::SAR: { | 
| 753 frame_->EmitPop(r0); // r0 : y | 753 frame_->EmitPop(r0); // r0 : y | 
| 754 frame_->EmitPop(r1); // r1 : x | 754 frame_->EmitPop(r1); // r1 : x | 
| 755 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); | 755 GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs); | 
| 756 frame_->CallStub(&stub, 0); | 756 frame_->CallStub(&stub, 0); | 
| 757 break; | 757 break; | 
| 758 } | 758 } | 
| 759 | 759 | 
| 760 case Token::COMMA: | 760 case Token::COMMA: | 
| 761 frame_->EmitPop(r0); | 761 frame_->EmitPop(r0); | 
| 762 // Simply discard left value. | 762 // Simply discard left value. | 
| 763 frame_->Drop(); | 763 frame_->Drop(); | 
| 764 break; | 764 break; | 
| 765 | 765 | 
| (...skipping 18 matching lines...) Expand all Loading... | |
| 784 case Token::SUB: // fall through. | 784 case Token::SUB: // fall through. | 
| 785 case Token::MUL: | 785 case Token::MUL: | 
| 786 case Token::DIV: | 786 case Token::DIV: | 
| 787 case Token::MOD: | 787 case Token::MOD: | 
| 788 case Token::BIT_OR: | 788 case Token::BIT_OR: | 
| 789 case Token::BIT_AND: | 789 case Token::BIT_AND: | 
| 790 case Token::BIT_XOR: | 790 case Token::BIT_XOR: | 
| 791 case Token::SHL: | 791 case Token::SHL: | 
| 792 case Token::SHR: | 792 case Token::SHR: | 
| 793 case Token::SAR: { | 793 case Token::SAR: { | 
| 794 frame_->PopToR1R0(); // Pop y to r0 and x to r1. | 794 Register rhs = frame_->PopToRegister(); | 
| 795 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. | |
| 795 { | 796 { | 
| 796 VirtualFrame::SpilledScope spilled_scope(frame_); | 797 VirtualFrame::SpilledScope spilled_scope(frame_); | 
| 797 GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); | 798 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); | 
| 798 frame_->CallStub(&stub, 0); | 799 frame_->CallStub(&stub, 0); | 
| 799 } | 800 } | 
| 800 frame_->EmitPush(r0); | 801 frame_->EmitPush(r0); | 
| 801 break; | 802 break; | 
| 802 } | 803 } | 
| 803 | 804 | 
| 804 case Token::COMMA: { | 805 case Token::COMMA: { | 
| 805 Register scratch = frame_->PopToRegister(); | 806 Register scratch = frame_->PopToRegister(); | 
| 806 // Simply discard left value. | 807 // Simply discard left value. | 
| 807 frame_->Drop(); | 808 frame_->Drop(); | 
| (...skipping 29 matching lines...) Expand all Loading... | |
| 837 private: | 838 private: | 
| 838 Token::Value op_; | 839 Token::Value op_; | 
| 839 int value_; | 840 int value_; | 
| 840 bool reversed_; | 841 bool reversed_; | 
| 841 OverwriteMode overwrite_mode_; | 842 OverwriteMode overwrite_mode_; | 
| 842 Register tos_register_; | 843 Register tos_register_; | 
| 843 }; | 844 }; | 
| 844 | 845 | 
| 845 | 846 | 
| 846 void DeferredInlineSmiOperation::Generate() { | 847 void DeferredInlineSmiOperation::Generate() { | 
| 848 Register lhs = r1; | |
| 849 Register rhs = r0; | |
| 847 switch (op_) { | 850 switch (op_) { | 
| 848 case Token::ADD: { | 851 case Token::ADD: { | 
| 849 // Revert optimistic add. | 852 // Revert optimistic add. | 
| 850 if (reversed_) { | 853 if (reversed_) { | 
| 851 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); | 854 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); | 
| 852 __ mov(r1, Operand(Smi::FromInt(value_))); | 855 __ mov(r1, Operand(Smi::FromInt(value_))); | 
| 853 } else { | 856 } else { | 
| 854 __ sub(r1, tos_register_, Operand(Smi::FromInt(value_))); | 857 __ sub(r1, tos_register_, Operand(Smi::FromInt(value_))); | 
| 855 __ mov(r0, Operand(Smi::FromInt(value_))); | 858 __ mov(r0, Operand(Smi::FromInt(value_))); | 
| 856 } | 859 } | 
| (...skipping 13 matching lines...) Expand all Loading... | |
| 870 } | 873 } | 
| 871 | 874 | 
| 872 // For these operations there is no optimistic operation that needs to be | 875 // For these operations there is no optimistic operation that needs to be | 
| 873 // reverted. | 876 // reverted. | 
| 874 case Token::MUL: | 877 case Token::MUL: | 
| 875 case Token::MOD: | 878 case Token::MOD: | 
| 876 case Token::BIT_OR: | 879 case Token::BIT_OR: | 
| 877 case Token::BIT_XOR: | 880 case Token::BIT_XOR: | 
| 878 case Token::BIT_AND: { | 881 case Token::BIT_AND: { | 
| 879 if (reversed_) { | 882 if (reversed_) { | 
| 880 __ Move(r0, tos_register_); | 883 if (tos_register_.is(r0)) { | 
| 881 __ mov(r1, Operand(Smi::FromInt(value_))); | 884 __ mov(r1, Operand(Smi::FromInt(value_))); | 
| 885 } else { | |
| 886 ASSERT(tos_register_.is(r1)); | |
| 887 __ mov(r0, Operand(Smi::FromInt(value_))); | |
| 888 lhs = r0; | |
| 889 rhs = r1; | |
| 890 } | |
| 882 } else { | 891 } else { | 
| 883 __ Move(r1, tos_register_); | 892 if (tos_register_.is(r1)) { | 
| 884 __ mov(r0, Operand(Smi::FromInt(value_))); | 893 __ mov(r0, Operand(Smi::FromInt(value_))); | 
| 894 } else { | |
| 895 ASSERT(tos_register_.is(r0)); | |
| 896 __ mov(r1, Operand(Smi::FromInt(value_))); | |
| 897 lhs = r0; | |
| 898 rhs = r1; | |
| 899 } | |
| 885 } | 900 } | 
| 886 break; | 901 break; | 
| 887 } | 902 } | 
| 888 | 903 | 
| 889 case Token::SHL: | 904 case Token::SHL: | 
| 890 case Token::SHR: | 905 case Token::SHR: | 
| 891 case Token::SAR: { | 906 case Token::SAR: { | 
| 892 if (!reversed_) { | 907 if (!reversed_) { | 
| 893 __ Move(r1, tos_register_); | 908 if (tos_register_.is(r1)) { | 
| 894 __ mov(r0, Operand(Smi::FromInt(value_))); | 909 __ mov(r0, Operand(Smi::FromInt(value_))); | 
| 910 } else { | |
| 911 ASSERT(tos_register_.is(r0)); | |
| 912 __ mov(r1, Operand(Smi::FromInt(value_))); | |
| 913 lhs = r0; | |
| 914 rhs = r1; | |
| 915 } | |
| 895 } else { | 916 } else { | 
| 896 UNREACHABLE(); // Should have been handled in SmiOperation. | 917 UNREACHABLE(); // Should have been handled in SmiOperation. | 
| 897 } | 918 } | 
| 898 break; | 919 break; | 
| 899 } | 920 } | 
| 900 | 921 | 
| 901 default: | 922 default: | 
| 902 // Other cases should have been handled before this point. | 923 // Other cases should have been handled before this point. | 
| 903 UNREACHABLE(); | 924 UNREACHABLE(); | 
| 904 break; | 925 break; | 
| 905 } | 926 } | 
| 906 | 927 | 
| 907 GenericBinaryOpStub stub(op_, overwrite_mode_, value_); | 928 GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_); | 
| 908 __ CallStub(&stub); | 929 __ CallStub(&stub); | 
| 909 // The generic stub returns its value in r0, but that's not | 930 // The generic stub returns its value in r0, but that's not | 
| 910 // necessarily what we want. We want whatever the inlined code | 931 // necessarily what we want. We want whatever the inlined code | 
| 911 // expected, which is that the answer is in the same register as | 932 // expected, which is that the answer is in the same register as | 
| 912 // the operand was. | 933 // the operand was. | 
| 913 __ Move(tos_register_, r0); | 934 __ Move(tos_register_, r0); | 
| 914 } | 935 } | 
| 915 | 936 | 
| 916 | 937 | 
| 917 static bool PopCountLessThanEqual2(unsigned int x) { | 938 static bool PopCountLessThanEqual2(unsigned int x) { | 
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 978 break; | 999 break; | 
| 979 } | 1000 } | 
| 980 default: { | 1001 default: { | 
| 981 something_to_inline = false; | 1002 something_to_inline = false; | 
| 982 break; | 1003 break; | 
| 983 } | 1004 } | 
| 984 } | 1005 } | 
| 985 | 1006 | 
| 986 if (!something_to_inline) { | 1007 if (!something_to_inline) { | 
| 987 if (!reversed) { | 1008 if (!reversed) { | 
| 988 // Move the lhs to r1. | 1009 Register rhs = frame_->GetTOSRegister(); | 
| 989 frame_->PopToR1(); | 1010 __ mov(rhs, Operand(value)); | 
| 990 // Flush any other registers to the stack. | 1011 frame_->EmitPush(rhs); | 
| 991 frame_->SpillAll(); | |
| 992 // Tell the virtual frame that TOS is in r1 (no code emitted). | |
| 993 frame_->EmitPush(r1); | |
| 994 // We know that r0 is free. | |
| 995 __ mov(r0, Operand(value)); | |
| 996 // Push r0 on the virtual frame (no code emitted). | |
| 997 frame_->EmitPush(r0); | |
| 998 // This likes having r1 and r0 on top of the stack. It pushes | |
| 999 // the answer on the virtual frame. | |
| 1000 VirtualFrameBinaryOperation(op, mode, int_value); | 1012 VirtualFrameBinaryOperation(op, mode, int_value); | 
| 1001 } else { | 1013 } else { | 
| 1002 // Move the rhs to r0. | 1014 // Move the rhs to r0. | 
| 
Søren Thygesen Gjesse
2010/04/09 13:22:36
Does this comment still hold?
 | |
| 1003 frame_->PopToR0(); | 1015 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing. | 
| 1004 // Flush any other registers to the stack. | 1016 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this. | 
| 1005 frame_->SpillAll(); | 1017 __ mov(lhs, Operand(value)); | 
| 1006 // We know that r1 is free. | 1018 frame_->EmitPush(lhs); | 
| 1007 __ mov(r1, Operand(value)); | 1019 frame_->EmitPush(rhs); | 
| 1008 // Tell the virtual frame that TOS is in r1 (no code emitted). | |
| 1009 frame_->EmitPush(r1); | |
| 1010 // Push r0 on the virtual frame (no code emitted). | |
| 1011 frame_->EmitPush(r0); | |
| 1012 // This likes having r1 and r0 on top of the stack. It pushes | |
| 1013 // the answer on the virtual frame. | |
| 1014 VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); | 1020 VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); | 
| 1015 } | 1021 } | 
| 1016 return; | 1022 return; | 
| 1017 } | 1023 } | 
| 1018 | 1024 | 
| 1019 // We move the top of stack to a register (normally no move is invoved). | 1025 // We move the top of stack to a register (normally no move is invoved). | 
| 1020 Register tos = frame_->PopToRegister(); | 1026 Register tos = frame_->PopToRegister(); | 
| 1021 // All other registers are spilled. The deferred code expects one argument | 1027 // All other registers are spilled. The deferred code expects one argument | 
| 1022 // in a register and all other values are flushed to the stack. The | 1028 // in a register and all other values are flushed to the stack. The | 
| 1023 // answer is returned in the same register that the top of stack argument was | 1029 // answer is returned in the same register that the top of stack argument was | 
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1084 DeferredCode* deferred = | 1090 DeferredCode* deferred = | 
| 1085 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); | 1091 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); | 
| 1086 __ tst(tos, Operand(kSmiTagMask)); | 1092 __ tst(tos, Operand(kSmiTagMask)); | 
| 1087 deferred->Branch(ne); | 1093 deferred->Branch(ne); | 
| 1088 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags | 1094 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags | 
| 1089 switch (op) { | 1095 switch (op) { | 
| 1090 case Token::SHL: { | 1096 case Token::SHL: { | 
| 1091 if (shift_value != 0) { | 1097 if (shift_value != 0) { | 
| 1092 __ mov(scratch, Operand(scratch, LSL, shift_value)); | 1098 __ mov(scratch, Operand(scratch, LSL, shift_value)); | 
| 1093 } | 1099 } | 
| 1094 // check that the *unsigned* result fits in a smi | 1100 // check that the *signed* result fits in a smi | 
| 1095 __ add(scratch2, scratch, Operand(0x40000000), SetCC); | 1101 __ add(scratch2, scratch, Operand(0x40000000), SetCC); | 
| 1096 deferred->Branch(mi); | 1102 deferred->Branch(mi); | 
| 1097 break; | 1103 break; | 
| 1098 } | 1104 } | 
| 1099 case Token::SHR: { | 1105 case Token::SHR: { | 
| 1100 // LSR by immediate 0 means shifting 32 bits. | 1106 // LSR by immediate 0 means shifting 32 bits. | 
| 1101 if (shift_value != 0) { | 1107 if (shift_value != 0) { | 
| 1102 __ mov(scratch, Operand(scratch, LSR, shift_value)); | 1108 __ mov(scratch, Operand(scratch, LSR, shift_value)); | 
| 1103 } | 1109 } | 
| 1104 // check that the *unsigned* result fits in a smi | 1110 // check that the *unsigned* result fits in a smi | 
| 1105 // neither of the two high-order bits can be set: | 1111 // neither of the two high-order bits can be set: | 
| 1106 // - 0x80000000: high bit would be lost when smi tagging | 1112 // - 0x80000000: high bit would be lost when smi tagging | 
| 1107 // - 0x40000000: this number would convert to negative when | 1113 // - 0x40000000: this number would convert to negative when | 
| 1108 // smi tagging these two cases can only happen with shifts | 1114 // smi tagging these two cases can only happen with shifts | 
| 1109 // by 0 or 1 when handed a valid smi | 1115 // by 0 or 1 when handed a valid smi | 
| 1110 __ and_(scratch2, scratch, Operand(0xc0000000), SetCC); | 1116 __ tst(scratch, Operand(0xc0000000)); | 
| 1111 deferred->Branch(ne); | 1117 deferred->Branch(ne); | 
| 1112 break; | 1118 break; | 
| 1113 } | 1119 } | 
| 1114 case Token::SAR: { | 1120 case Token::SAR: { | 
| 1115 if (shift_value != 0) { | 1121 if (shift_value != 0) { | 
| 1116 // ASR by immediate 0 means shifting 32 bits. | 1122 // ASR by immediate 0 means shifting 32 bits. | 
| 1117 __ mov(scratch, Operand(scratch, ASR, shift_value)); | 1123 __ mov(scratch, Operand(scratch, ASR, shift_value)); | 
| 1118 } | 1124 } | 
| 1119 break; | 1125 break; | 
| 1120 } | 1126 } | 
| (...skipping 4652 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5773 __ InvokeBuiltin(native, JUMP_JS); | 5779 __ InvokeBuiltin(native, JUMP_JS); | 
| 5774 } | 5780 } | 
| 5775 | 5781 | 
| 5776 | 5782 | 
| 5777 // We fall into this code if the operands were Smis, but the result was | 5783 // We fall into this code if the operands were Smis, but the result was | 
| 5778 // not (eg. overflow). We branch into this code (to the not_smi label) if | 5784 // not (eg. overflow). We branch into this code (to the not_smi label) if | 
| 5779 // the operands were not both Smi. The operands are in r0 and r1. In order | 5785 // the operands were not both Smi. The operands are in r0 and r1. In order | 
| 5780 // to call the C-implemented binary fp operation routines we need to end up | 5786 // to call the C-implemented binary fp operation routines we need to end up | 
| 5781 // with the double precision floating point operands in r0 and r1 (for the | 5787 // with the double precision floating point operands in r0 and r1 (for the | 
| 5782 // value in r1) and r2 and r3 (for the value in r0). | 5788 // value in r1) and r2 and r3 (for the value in r0). | 
| 5783 void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, | 5789 void GenericBinaryOpStub::HandleBinaryOpSlowCases( | 
| 5784 Label* not_smi, | 5790 MacroAssembler* masm, | 
| 5785 const Builtins::JavaScript& builtin) { | 5791 Label* not_smi, | 
| 5792 Register lhs, | |
| 5793 Register rhs, | |
| 5794 const Builtins::JavaScript& builtin) { | |
| 5786 Label slow, slow_pop_2_first, do_the_call; | 5795 Label slow, slow_pop_2_first, do_the_call; | 
| 5787 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 5796 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 
| 5788 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; | 5797 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; | 
| 5789 | 5798 | 
| 5799 ASSERT((lhs.is(r0) && rhs.is(r1)) || lhs.is(r1) && rhs.is(r0)); | |
| 5800 | |
| 5790 if (ShouldGenerateSmiCode()) { | 5801 if (ShouldGenerateSmiCode()) { | 
| 5791 // Smi-smi case (overflow). | 5802 // Smi-smi case (overflow). | 
| 5792 // Since both are Smis there is no heap number to overwrite, so allocate. | 5803 // Since both are Smis there is no heap number to overwrite, so allocate. | 
| 5793 // The new heap number is in r5. r6 and r7 are scratch. | 5804 // The new heap number is in r5. r6 and r7 are scratch. | 
| 5794 __ AllocateHeapNumber(r5, r6, r7, &slow); | 5805 __ AllocateHeapNumber(r5, r6, r7, &slow); | 
| 5795 | 5806 | 
| 5796 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 5807 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 
| 5797 // using registers d7 and d6 for the double values. | 5808 // using registers d7 and d6 for the double values. | 
| 5798 if (use_fp_registers) { | 5809 if (use_fp_registers) { | 
| 5799 CpuFeatures::Scope scope(VFP3); | 5810 CpuFeatures::Scope scope(VFP3); | 
| 5800 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 5811 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); | 
| 5801 __ vmov(s15, r7); | 5812 __ vmov(s15, r7); | 
| 5802 __ vcvt_f64_s32(d7, s15); | 5813 __ vcvt_f64_s32(d7, s15); | 
| 5803 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 5814 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); | 
| 5804 __ vmov(s13, r7); | 5815 __ vmov(s13, r7); | 
| 5805 __ vcvt_f64_s32(d6, s13); | 5816 __ vcvt_f64_s32(d6, s13); | 
| 5806 } else { | 5817 } else { | 
| 5807 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 5818 // Write Smi from rhs to r3 and r2 in double format. r6 is scratch. | 
| 5808 __ mov(r7, Operand(r0)); | 5819 __ mov(r7, Operand(rhs)); | 
| 5809 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 5820 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 
| 5810 __ push(lr); | 5821 __ push(lr); | 
| 5811 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5822 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
| 5812 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5823 // Write Smi from lhs to r1 and r0 in double format. r6 is scratch. | 
| 5813 __ mov(r7, Operand(r1)); | 5824 __ mov(r7, Operand(lhs)); | 
| 5814 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5825 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 
| 5815 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5826 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
| 5816 __ pop(lr); | 5827 __ pop(lr); | 
| 5817 } | 5828 } | 
| 5818 __ jmp(&do_the_call); // Tail call. No return. | 5829 __ jmp(&do_the_call); // Tail call. No return. | 
| 5819 } | 5830 } | 
| 5820 | 5831 | 
| 5821 // We branch here if at least one of r0 and r1 is not a Smi. | 5832 // We branch here if at least one of r0 and r1 is not a Smi. | 
| 5822 __ bind(not_smi); | 5833 __ bind(not_smi); | 
| 5823 | 5834 | 
| 5835 if (lhs.is(r0)) { | |
| 5836 __ Swap(r0, r1, ip); | |
| 5837 } | |
| 5838 | |
| 5824 if (ShouldGenerateFPCode()) { | 5839 if (ShouldGenerateFPCode()) { | 
| 5825 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | 5840 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | 
| 5826 switch (op_) { | 5841 switch (op_) { | 
| 5827 case Token::ADD: | 5842 case Token::ADD: | 
| 5828 case Token::SUB: | 5843 case Token::SUB: | 
| 5829 case Token::MUL: | 5844 case Token::MUL: | 
| 5830 case Token::DIV: | 5845 case Token::DIV: | 
| 5831 GenerateTypeTransition(masm); | 5846 GenerateTypeTransition(masm); | 
| 5832 break; | 5847 break; | 
| 5833 | 5848 | 
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6130 } | 6145 } | 
| 6131 __ bind(&done); | 6146 __ bind(&done); | 
| 6132 } | 6147 } | 
| 6133 | 6148 | 
| 6134 // For bitwise ops where the inputs are not both Smis we here try to determine | 6149 // For bitwise ops where the inputs are not both Smis we here try to determine | 
| 6135 // whether both inputs are either Smis or at least heap numbers that can be | 6150 // whether both inputs are either Smis or at least heap numbers that can be | 
| 6136 // represented by a 32 bit signed value. We truncate towards zero as required | 6151 // represented by a 32 bit signed value. We truncate towards zero as required | 
| 6137 // by the ES spec. If this is the case we do the bitwise op and see if the | 6152 // by the ES spec. If this is the case we do the bitwise op and see if the | 
| 6138 // result is a Smi. If so, great, otherwise we try to find a heap number to | 6153 // result is a Smi. If so, great, otherwise we try to find a heap number to | 
| 6139 // write the answer into (either by allocating or by overwriting). | 6154 // write the answer into (either by allocating or by overwriting). | 
| 6140 // On entry the operands are in r0 and r1. On exit the answer is in r0. | 6155 // On entry the operands are in lhs and rhs. On exit the answer is in r0. | 
| 6141 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { | 6156 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, | 
| 6157 Register lhs, | |
| 6158 Register rhs) { | |
| 6142 Label slow, result_not_a_smi; | 6159 Label slow, result_not_a_smi; | 
| 6143 Label r0_is_smi, r1_is_smi; | 6160 Label rhs_is_smi, lhs_is_smi; | 
| 6144 Label done_checking_r0, done_checking_r1; | 6161 Label done_checking_rhs, done_checking_lhs; | 
| 6145 | 6162 | 
| 6146 __ tst(r1, Operand(kSmiTagMask)); | 6163 __ tst(lhs, Operand(kSmiTagMask)); | 
| 6147 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | 6164 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. | 
| 6148 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 6165 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 
| 6149 __ b(ne, &slow); | 6166 __ b(ne, &slow); | 
| 6150 GetInt32(masm, r1, r3, r5, r4, &slow); | 6167 GetInt32(masm, lhs, r3, r5, r4, &slow); | 
| 6151 __ jmp(&done_checking_r1); | 6168 __ jmp(&done_checking_lhs); | 
| 6152 __ bind(&r1_is_smi); | 6169 __ bind(&lhs_is_smi); | 
| 6153 __ mov(r3, Operand(r1, ASR, 1)); | 6170 __ mov(r3, Operand(lhs, ASR, 1)); | 
| 6154 __ bind(&done_checking_r1); | 6171 __ bind(&done_checking_lhs); | 
| 6155 | 6172 | 
| 6156 __ tst(r0, Operand(kSmiTagMask)); | 6173 __ tst(rhs, Operand(kSmiTagMask)); | 
| 6157 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 6174 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. | 
| 6158 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 6175 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | 
| 6159 __ b(ne, &slow); | 6176 __ b(ne, &slow); | 
| 6160 GetInt32(masm, r0, r2, r5, r4, &slow); | 6177 GetInt32(masm, rhs, r2, r5, r4, &slow); | 
| 6161 __ jmp(&done_checking_r0); | 6178 __ jmp(&done_checking_rhs); | 
| 6162 __ bind(&r0_is_smi); | 6179 __ bind(&rhs_is_smi); | 
| 6163 __ mov(r2, Operand(r0, ASR, 1)); | 6180 __ mov(r2, Operand(rhs, ASR, 1)); | 
| 6164 __ bind(&done_checking_r0); | 6181 __ bind(&done_checking_rhs); | 
| 6182 | |
| 6183 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); | |
| 6165 | 6184 | 
| 6166 // r0 and r1: Original operands (Smi or heap numbers). | 6185 // r0 and r1: Original operands (Smi or heap numbers). | 
| 6167 // r2 and r3: Signed int32 operands. | 6186 // r2 and r3: Signed int32 operands. | 
| 6168 switch (op_) { | 6187 switch (op_) { | 
| 6169 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | 6188 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | 
| 6170 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | 6189 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | 
| 6171 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; | 6190 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; | 
| 6172 case Token::SAR: | 6191 case Token::SAR: | 
| 6173 // Use only the 5 least significant bits of the shift count. | 6192 // Use only the 5 least significant bits of the shift count. | 
| 6174 __ and_(r2, r2, Operand(0x1f)); | 6193 __ and_(r2, r2, Operand(0x1f)); | 
| (...skipping 19 matching lines...) Expand all Loading... | |
| 6194 // check that the *signed* result fits in a smi | 6213 // check that the *signed* result fits in a smi | 
| 6195 __ add(r3, r2, Operand(0x40000000), SetCC); | 6214 __ add(r3, r2, Operand(0x40000000), SetCC); | 
| 6196 __ b(mi, &result_not_a_smi); | 6215 __ b(mi, &result_not_a_smi); | 
| 6197 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | 6216 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | 
| 6198 __ Ret(); | 6217 __ Ret(); | 
| 6199 | 6218 | 
| 6200 Label have_to_allocate, got_a_heap_number; | 6219 Label have_to_allocate, got_a_heap_number; | 
| 6201 __ bind(&result_not_a_smi); | 6220 __ bind(&result_not_a_smi); | 
| 6202 switch (mode_) { | 6221 switch (mode_) { | 
| 6203 case OVERWRITE_RIGHT: { | 6222 case OVERWRITE_RIGHT: { | 
| 6204 __ tst(r0, Operand(kSmiTagMask)); | 6223 __ tst(rhs, Operand(kSmiTagMask)); | 
| 6205 __ b(eq, &have_to_allocate); | 6224 __ b(eq, &have_to_allocate); | 
| 6206 __ mov(r5, Operand(r0)); | 6225 __ mov(r5, Operand(rhs)); | 
| 6207 break; | 6226 break; | 
| 6208 } | 6227 } | 
| 6209 case OVERWRITE_LEFT: { | 6228 case OVERWRITE_LEFT: { | 
| 6210 __ tst(r1, Operand(kSmiTagMask)); | 6229 __ tst(lhs, Operand(kSmiTagMask)); | 
| 6211 __ b(eq, &have_to_allocate); | 6230 __ b(eq, &have_to_allocate); | 
| 6212 __ mov(r5, Operand(r1)); | 6231 __ mov(r5, Operand(lhs)); | 
| 6213 break; | 6232 break; | 
| 6214 } | 6233 } | 
| 6215 case NO_OVERWRITE: { | 6234 case NO_OVERWRITE: { | 
| 6216 // Get a new heap number in r5. r6 and r7 are scratch. | 6235 // Get a new heap number in r5. r6 and r7 are scratch. | 
| 6217 __ AllocateHeapNumber(r5, r6, r7, &slow); | 6236 __ AllocateHeapNumber(r5, r6, r7, &slow); | 
| 6218 } | 6237 } | 
| 6219 default: break; | 6238 default: break; | 
| 6220 } | 6239 } | 
| 6221 __ bind(&got_a_heap_number); | 6240 __ bind(&got_a_heap_number); | 
| 6222 // r2: Answer as signed int32. | 6241 // r2: Answer as signed int32. | 
| (...skipping 10 matching lines...) Expand all Loading... | |
| 6233 | 6252 | 
| 6234 if (mode_ != NO_OVERWRITE) { | 6253 if (mode_ != NO_OVERWRITE) { | 
| 6235 __ bind(&have_to_allocate); | 6254 __ bind(&have_to_allocate); | 
| 6236 // Get a new heap number in r5. r6 and r7 are scratch. | 6255 // Get a new heap number in r5. r6 and r7 are scratch. | 
| 6237 __ AllocateHeapNumber(r5, r6, r7, &slow); | 6256 __ AllocateHeapNumber(r5, r6, r7, &slow); | 
| 6238 __ jmp(&got_a_heap_number); | 6257 __ jmp(&got_a_heap_number); | 
| 6239 } | 6258 } | 
| 6240 | 6259 | 
| 6241 // If all else failed then we go to the runtime system. | 6260 // If all else failed then we go to the runtime system. | 
| 6242 __ bind(&slow); | 6261 __ bind(&slow); | 
| 6243 __ push(r1); // restore stack | 6262 __ push(lhs); // restore stack | 
| 6244 __ push(r0); | 6263 __ push(rhs); | 
| 6245 switch (op_) { | 6264 switch (op_) { | 
| 6246 case Token::BIT_OR: | 6265 case Token::BIT_OR: | 
| 6247 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 6266 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 
| 6248 break; | 6267 break; | 
| 6249 case Token::BIT_AND: | 6268 case Token::BIT_AND: | 
| 6250 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 6269 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 
| 6251 break; | 6270 break; | 
| 6252 case Token::BIT_XOR: | 6271 case Token::BIT_XOR: | 
| 6253 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 6272 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 
| 6254 break; | 6273 break; | 
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6364 "GenericBinaryOpStub_%s_%s%s", | 6383 "GenericBinaryOpStub_%s_%s%s", | 
| 6365 op_name, | 6384 op_name, | 
| 6366 overwrite_name, | 6385 overwrite_name, | 
| 6367 specialized_on_rhs_ ? "_ConstantRhs" : 0); | 6386 specialized_on_rhs_ ? "_ConstantRhs" : 0); | 
| 6368 return name_; | 6387 return name_; | 
| 6369 } | 6388 } | 
| 6370 | 6389 | 
| 6371 | 6390 | 
| 6372 | 6391 | 
| 6373 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 6392 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 
| 6374 // r1 : x | 6393 // lhs_ : x | 
| 6375 // r0 : y | 6394 // rhs_ : y | 
| 6376 // result : r0 | 6395 // r0 : result | 
| 6396 | |
| 6397 Register result = r0; | |
| 6398 Register lhs = lhs_; | |
| 6399 Register rhs = rhs_; | |
| 6377 | 6400 | 
| 
Søren Thygesen Gjesse
2010/04/09 13:22:36
How about having scratch registers here as well, a
 | |
| 6378 // All ops need to know whether we are dealing with two Smis. Set up r2 to | 6401 // All ops need to know whether we are dealing with two Smis. Set up r2 to | 
| 6379 // tell us that. | 6402 // tell us that. | 
| 6380 if (ShouldGenerateSmiCode()) { | 6403 if (ShouldGenerateSmiCode()) { | 
| 6381 __ orr(r2, r1, Operand(r0)); // r2 = x | y; | 6404 __ orr(r2, lhs, Operand(rhs)); // r2 = x | y; | 
| 6382 } | 6405 } | 
| 6383 | 6406 | 
| 6384 switch (op_) { | 6407 switch (op_) { | 
| 6385 case Token::ADD: { | 6408 case Token::ADD: { | 
| 6386 Label not_smi; | 6409 Label not_smi; | 
| 6387 // Fast path. | 6410 // Fast path. | 
| 6388 if (ShouldGenerateSmiCode()) { | 6411 if (ShouldGenerateSmiCode()) { | 
| 6389 ASSERT(kSmiTag == 0); // Adjust code below. | 6412 ASSERT(kSmiTag == 0); // Adjust code below. | 
| 6413 // This code can't cope with other register allocations yet. | |
| 
Søren Thygesen Gjesse
2010/04/09 13:22:36
Doesn't this assert apply for the GenericBinaryOpS
 | |
| 6414 ASSERT(result.is(r0) && | |
| 6415 ((lhs.is(r0) && rhs.is(r1)) || | |
| 6416 (lhs.is(r1) && rhs.is(r0)))); | |
| 6390 __ tst(r2, Operand(kSmiTagMask)); | 6417 __ tst(r2, Operand(kSmiTagMask)); | 
| 6391 __ b(ne, ¬_smi); | 6418 __ b(ne, ¬_smi); | 
| 6392 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. | 6419 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. | 
| 6393 // Return if no overflow. | 6420 // Return if no overflow. | 
| 6394 __ Ret(vc); | 6421 __ Ret(vc); | 
| 6395 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. | 6422 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. | 
| 6396 } | 6423 } | 
| 6397 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::ADD); | 6424 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); | 
| 6398 break; | 6425 break; | 
| 6399 } | 6426 } | 
| 6400 | 6427 | 
| 6401 case Token::SUB: { | 6428 case Token::SUB: { | 
| 6402 Label not_smi; | 6429 Label not_smi; | 
| 6403 // Fast path. | 6430 // Fast path. | 
| 6404 if (ShouldGenerateSmiCode()) { | 6431 if (ShouldGenerateSmiCode()) { | 
| 6405 ASSERT(kSmiTag == 0); // Adjust code below. | 6432 ASSERT(kSmiTag == 0); // Adjust code below. | 
| 6433 // This code can't cope with other register allocations yet. | |
| 
Søren Thygesen Gjesse
2010/04/09 13:22:36
Ditto.
 | |
| 6434 ASSERT(result.is(r0) && | |
| 6435 ((lhs.is(r0) && rhs.is(r1)) || | |
| 6436 (lhs.is(r1) && rhs.is(r0)))); | |
| 6406 __ tst(r2, Operand(kSmiTagMask)); | 6437 __ tst(r2, Operand(kSmiTagMask)); | 
| 6407 __ b(ne, ¬_smi); | 6438 __ b(ne, ¬_smi); | 
| 6408 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. | 6439 if (lhs.is(r1)) { | 
| 
Søren Thygesen Gjesse
2010/04/09 13:22:36
Can't you just drop the if/else and use:
__ sub(r
 | |
| 6409 // Return if no overflow. | 6440 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. | 
| 6410 __ Ret(vc); | 6441 // Return if no overflow. | 
| 6411 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. | 6442 __ Ret(vc); | 
| 6443 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. | |
| 6444 } else { | |
| 6445 __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. | |
| 6446 // Return if no overflow. | |
| 6447 __ Ret(vc); | |
| 6448 __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. | |
| 6449 } | |
| 6412 } | 6450 } | 
| 6413 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::SUB); | 6451 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); | 
| 6414 break; | 6452 break; | 
| 6415 } | 6453 } | 
| 6416 | 6454 | 
| 6417 case Token::MUL: { | 6455 case Token::MUL: { | 
| 6418 Label not_smi, slow; | 6456 Label not_smi, slow; | 
| 6419 if (ShouldGenerateSmiCode()) { | 6457 if (ShouldGenerateSmiCode()) { | 
| 6420 ASSERT(kSmiTag == 0); // adjust code below | 6458 ASSERT(kSmiTag == 0); // adjust code below | 
| 6421 __ tst(r2, Operand(kSmiTagMask)); | 6459 __ tst(r2, Operand(kSmiTagMask)); | 
| 6422 __ b(ne, ¬_smi); | 6460 __ b(ne, ¬_smi); | 
| 6423 // Remove tag from one operand (but keep sign), so that result is Smi. | 6461 // Remove tag from one operand (but keep sign), so that result is Smi. | 
| 6424 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); | 6462 __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); | 
| 6425 // Do multiplication | 6463 // Do multiplication | 
| 6426 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. | 6464 __ smull(r3, r2, lhs, ip); // r3 = lower 32 bits of ip*r1. | 
| 6427 // Go slow on overflows (overflow bit is not set). | 6465 // Go slow on overflows (overflow bit is not set). | 
| 6428 __ mov(ip, Operand(r3, ASR, 31)); | 6466 __ mov(ip, Operand(r3, ASR, 31)); | 
| 6429 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical | 6467 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical | 
| 6430 __ b(ne, &slow); | 6468 __ b(ne, &slow); | 
| 6431 // Go slow on zero result to handle -0. | 6469 // Go slow on zero result to handle -0. | 
| 6432 __ tst(r3, Operand(r3)); | 6470 __ tst(r3, Operand(r3)); | 
| 6433 __ mov(r0, Operand(r3), LeaveCC, ne); | 6471 __ mov(result, Operand(r3), LeaveCC, ne); | 
| 6434 __ Ret(ne); | 6472 __ Ret(ne); | 
| 6435 // We need -0 if we were multiplying a negative number with 0 to get 0. | 6473 // We need -0 if we were multiplying a negative number with 0 to get 0. | 
| 6436 // We know one of them was zero. | 6474 // We know one of them was zero. | 
| 6437 __ add(r2, r0, Operand(r1), SetCC); | 6475 __ add(r2, rhs, Operand(lhs), SetCC); | 
| 6438 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); | 6476 __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); | 
| 6439 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. | 6477 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. | 
| 6440 // Slow case. We fall through here if we multiplied a negative number | 6478 // Slow case. We fall through here if we multiplied a negative number | 
| 6441 // with 0, because that would mean we should produce -0. | 6479 // with 0, because that would mean we should produce -0. | 
| 6442 __ bind(&slow); | 6480 __ bind(&slow); | 
| 6443 } | 6481 } | 
| 6444 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::MUL); | 6482 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); | 
| 6445 break; | 6483 break; | 
| 6446 } | 6484 } | 
| 6447 | 6485 | 
| 6448 case Token::DIV: | 6486 case Token::DIV: | 
| 6449 case Token::MOD: { | 6487 case Token::MOD: { | 
| 6450 Label not_smi; | 6488 Label not_smi; | 
| 6451 if (ShouldGenerateSmiCode()) { | 6489 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { | 
| 6452 Label smi_is_unsuitable; | 6490 Label smi_is_unsuitable; | 
| 6453 __ BranchOnNotSmi(r1, ¬_smi); | 6491 __ BranchOnNotSmi(lhs, ¬_smi); | 
| 6454 if (IsPowerOf2(constant_rhs_)) { | 6492 if (IsPowerOf2(constant_rhs_)) { | 
| 6455 if (op_ == Token::MOD) { | 6493 if (op_ == Token::MOD) { | 
| 6456 __ and_(r0, | 6494 __ and_(rhs, | 
| 6457 r1, | 6495 lhs, | 
| 6458 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 6496 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 
| 6459 SetCC); | 6497 SetCC); | 
| 6460 // We now have the answer, but if the input was negative we also | 6498 // We now have the answer, but if the input was negative we also | 
| 6461 // have the sign bit. Our work is done if the result is | 6499 // have the sign bit. Our work is done if the result is | 
| 6462 // positive or zero: | 6500 // positive or zero: | 
| 6501 if (!rhs.is(r0)) { | |
| 6502 __ mov(r0, rhs, LeaveCC, pl); | |
| 6503 } | |
| 6463 __ Ret(pl); | 6504 __ Ret(pl); | 
| 6464 // A mod of a negative left hand side must return a negative number. | 6505 // A mod of a negative left hand side must return a negative number. | 
| 6465 // Unfortunately if the answer is 0 then we must return -0. And we | 6506 // Unfortunately if the answer is 0 then we must return -0. And we | 
| 6466 // already optimistically trashed r0 so we may need to restore it. | 6507 // already optimistically trashed rhs so we may need to restore it. | 
| 6467 __ eor(r0, r0, Operand(0x80000000u), SetCC); | 6508 __ eor(rhs, rhs, Operand(0x80000000u), SetCC); | 
| 6468 // Next two instructions are conditional on the answer being -0. | 6509 // Next two instructions are conditional on the answer being -0. | 
| 6469 __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); | 6510 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); | 
| 6470 __ b(eq, &smi_is_unsuitable); | 6511 __ b(eq, &smi_is_unsuitable); | 
| 6471 // We need to subtract the dividend. Eg. -3 % 4 == -3. | 6512 // We need to subtract the dividend. Eg. -3 % 4 == -3. | 
| 6472 __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_))); | 6513 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); | 
| 6473 } else { | 6514 } else { | 
| 6474 ASSERT(op_ == Token::DIV); | 6515 ASSERT(op_ == Token::DIV); | 
| 6475 __ tst(r1, | 6516 __ tst(lhs, | 
| 6476 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); | 6517 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); | 
| 6477 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder. | 6518 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder. | 
| 6478 int shift = 0; | 6519 int shift = 0; | 
| 6479 int d = constant_rhs_; | 6520 int d = constant_rhs_; | 
| 6480 while ((d & 1) == 0) { | 6521 while ((d & 1) == 0) { | 
| 6481 d >>= 1; | 6522 d >>= 1; | 
| 6482 shift++; | 6523 shift++; | 
| 6483 } | 6524 } | 
| 6484 __ mov(r0, Operand(r1, LSR, shift)); | 6525 __ mov(r0, Operand(lhs, LSR, shift)); | 
| 6485 __ bic(r0, r0, Operand(kSmiTagMask)); | 6526 __ bic(r0, r0, Operand(kSmiTagMask)); | 
| 6486 } | 6527 } | 
| 6487 } else { | 6528 } else { | 
| 6488 // Not a power of 2. | 6529 // Not a power of 2. | 
| 6489 __ tst(r1, Operand(0x80000000u)); | 6530 __ tst(lhs, Operand(0x80000000u)); | 
| 6490 __ b(ne, &smi_is_unsuitable); | 6531 __ b(ne, &smi_is_unsuitable); | 
| 6491 // Find a fixed point reciprocal of the divisor so we can divide by | 6532 // Find a fixed point reciprocal of the divisor so we can divide by | 
| 6492 // multiplying. | 6533 // multiplying. | 
| 6493 double divisor = 1.0 / constant_rhs_; | 6534 double divisor = 1.0 / constant_rhs_; | 
| 6494 int shift = 32; | 6535 int shift = 32; | 
| 6495 double scale = 4294967296.0; // 1 << 32. | 6536 double scale = 4294967296.0; // 1 << 32. | 
| 6496 uint32_t mul; | 6537 uint32_t mul; | 
| 6497 // Maximise the precision of the fixed point reciprocal. | 6538 // Maximise the precision of the fixed point reciprocal. | 
| 6498 while (true) { | 6539 while (true) { | 
| 6499 mul = static_cast<uint32_t>(scale * divisor); | 6540 mul = static_cast<uint32_t>(scale * divisor); | 
| 6500 if (mul >= 0x7fffffff) break; | 6541 if (mul >= 0x7fffffff) break; | 
| 6501 scale *= 2.0; | 6542 scale *= 2.0; | 
| 6502 shift++; | 6543 shift++; | 
| 6503 } | 6544 } | 
| 6504 mul++; | 6545 mul++; | 
| 6505 __ mov(r2, Operand(mul)); | 6546 __ mov(r2, Operand(mul)); | 
| 6506 __ umull(r3, r2, r2, r1); | 6547 __ umull(r3, r2, r2, lhs); | 
| 6507 __ mov(r2, Operand(r2, LSR, shift - 31)); | 6548 __ mov(r2, Operand(r2, LSR, shift - 31)); | 
| 6508 // r2 is r1 / rhs. r2 is not Smi tagged. | 6549 // r2 is lhs / rhs. r2 is not Smi tagged. | 
| 6509 // r0 is still the known rhs. r0 is Smi tagged. | 6550 // rhs is still the known rhs. rhs is Smi tagged. | 
| 6510 // r1 is still the unkown lhs. r1 is Smi tagged. | 6551 // lhs is still the unkown lhs. lhs is Smi tagged. | 
| 6511 int required_r4_shift = 0; // Including the Smi tag shift of 1. | 6552 int required_r4_shift = 0; // Including the Smi tag shift of 1. | 
| 6512 // r4 = r2 * r0. | 6553 // r4 = r2 * rhs. | 
| 6513 MultiplyByKnownInt2(masm, | 6554 MultiplyByKnownInt2(masm, | 
| 6514 r4, | 6555 r4, | 
| 6515 r2, | 6556 r2, | 
| 6516 r0, | 6557 rhs, | 
| 6517 constant_rhs_, | 6558 constant_rhs_, | 
| 6518 &required_r4_shift); | 6559 &required_r4_shift); | 
| 6519 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs). | 6560 // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs). | 
| 6520 if (op_ == Token::DIV) { | 6561 if (op_ == Token::DIV) { | 
| 6521 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); | 6562 __ sub(r3, lhs, Operand(r4, LSL, required_r4_shift), SetCC); | 
| 6522 __ b(ne, &smi_is_unsuitable); // There was a remainder. | 6563 __ b(ne, &smi_is_unsuitable); // There was a remainder. | 
| 6523 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | 6564 __ mov(result, Operand(r2, LSL, kSmiTagSize)); | 
| 6524 } else { | 6565 } else { | 
| 6525 ASSERT(op_ == Token::MOD); | 6566 ASSERT(op_ == Token::MOD); | 
| 6526 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); | 6567 __ sub(result, lhs, Operand(r4, LSL, required_r4_shift)); | 
| 6527 } | 6568 } | 
| 6528 } | 6569 } | 
| 6529 __ Ret(); | 6570 __ Ret(); | 
| 6530 __ bind(&smi_is_unsuitable); | 6571 __ bind(&smi_is_unsuitable); | 
| 6531 } | 6572 } | 
| 6532 HandleBinaryOpSlowCases( | 6573 HandleBinaryOpSlowCases( | 
| 6533 masm, | 6574 masm, | 
| 6534 ¬_smi, | 6575 ¬_smi, | 
| 6576 lhs, | |
| 6577 rhs, | |
| 6535 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); | 6578 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); | 
| 6536 break; | 6579 break; | 
| 6537 } | 6580 } | 
| 6538 | 6581 | 
| 6539 case Token::BIT_OR: | 6582 case Token::BIT_OR: | 
| 6540 case Token::BIT_AND: | 6583 case Token::BIT_AND: | 
| 6541 case Token::BIT_XOR: | 6584 case Token::BIT_XOR: | 
| 6542 case Token::SAR: | 6585 case Token::SAR: | 
| 6543 case Token::SHR: | 6586 case Token::SHR: | 
| 6544 case Token::SHL: { | 6587 case Token::SHL: { | 
| 6545 Label slow; | 6588 Label slow; | 
| 6546 ASSERT(kSmiTag == 0); // adjust code below | 6589 ASSERT(kSmiTag == 0); // adjust code below | 
| 6547 __ tst(r2, Operand(kSmiTagMask)); | 6590 __ tst(r2, Operand(kSmiTagMask)); | 
| 6548 __ b(ne, &slow); | 6591 __ b(ne, &slow); | 
| 6549 switch (op_) { | 6592 switch (op_) { | 
| 6550 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; | 6593 case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; | 
| 6551 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; | 6594 case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; | 
| 6552 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; | 6595 case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; | 
| 6553 case Token::SAR: | 6596 case Token::SAR: | 
| 6554 // Remove tags from right operand. | 6597 // Remove tags from right operand. | 
| 6555 __ GetLeastBitsFromSmi(r2, r0, 5); | 6598 __ GetLeastBitsFromSmi(r2, rhs, 5); | 
| 6556 __ mov(r0, Operand(r1, ASR, r2)); | 6599 __ mov(result, Operand(lhs, ASR, r2)); | 
| 6557 // Smi tag result. | 6600 // Smi tag result. | 
| 6558 __ bic(r0, r0, Operand(kSmiTagMask)); | 6601 __ bic(result, result, Operand(kSmiTagMask)); | 
| 6559 break; | 6602 break; | 
| 6560 case Token::SHR: | 6603 case Token::SHR: | 
| 6561 // Remove tags from operands. We can't do this on a 31 bit number | 6604 // Remove tags from operands. We can't do this on a 31 bit number | 
| 6562 // because then the 0s get shifted into bit 30 instead of bit 31. | 6605 // because then the 0s get shifted into bit 30 instead of bit 31. | 
| 6563 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x | 6606 __ mov(r3, Operand(lhs, ASR, kSmiTagSize)); // x | 
| 6564 __ GetLeastBitsFromSmi(r2, r0, 5); | 6607 __ GetLeastBitsFromSmi(r2, rhs, 5); | 
| 6565 __ mov(r3, Operand(r3, LSR, r2)); | 6608 __ mov(r3, Operand(r3, LSR, r2)); | 
| 6566 // Unsigned shift is not allowed to produce a negative number, so | 6609 // Unsigned shift is not allowed to produce a negative number, so | 
| 6567 // check the sign bit and the sign bit after Smi tagging. | 6610 // check the sign bit and the sign bit after Smi tagging. | 
| 6568 __ tst(r3, Operand(0xc0000000)); | 6611 __ tst(r3, Operand(0xc0000000)); | 
| 6569 __ b(ne, &slow); | 6612 __ b(ne, &slow); | 
| 6570 // Smi tag result. | 6613 // Smi tag result. | 
| 6571 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); | 6614 __ mov(result, Operand(r3, LSL, kSmiTagSize)); | 
| 6572 break; | 6615 break; | 
| 6573 case Token::SHL: | 6616 case Token::SHL: | 
| 6574 // Remove tags from operands. | 6617 // Remove tags from operands. | 
| 6575 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x | 6618 __ mov(r3, Operand(lhs, ASR, kSmiTagSize)); // x | 
| 6576 __ GetLeastBitsFromSmi(r2, r0, 5); | 6619 __ GetLeastBitsFromSmi(r2, rhs, 5); | 
| 6577 __ mov(r3, Operand(r3, LSL, r2)); | 6620 __ mov(r3, Operand(r3, LSL, r2)); | 
| 6578 // Check that the signed result fits in a Smi. | 6621 // Check that the signed result fits in a Smi. | 
| 6579 __ add(r2, r3, Operand(0x40000000), SetCC); | 6622 __ add(r2, r3, Operand(0x40000000), SetCC); | 
| 6580 __ b(mi, &slow); | 6623 __ b(mi, &slow); | 
| 6581 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); | 6624 __ mov(result, Operand(r3, LSL, kSmiTagSize)); | 
| 6582 break; | 6625 break; | 
| 6583 default: UNREACHABLE(); | 6626 default: UNREACHABLE(); | 
| 6584 } | 6627 } | 
| 6585 __ Ret(); | 6628 __ Ret(); | 
| 6586 __ bind(&slow); | 6629 __ bind(&slow); | 
| 6587 HandleNonSmiBitwiseOp(masm); | 6630 HandleNonSmiBitwiseOp(masm, lhs, rhs); | 
| 6588 break; | 6631 break; | 
| 6589 } | 6632 } | 
| 6590 | 6633 | 
| 6591 default: UNREACHABLE(); | 6634 default: UNREACHABLE(); | 
| 6592 } | 6635 } | 
| 6593 // This code should be unreachable. | 6636 // This code should be unreachable. | 
| 6594 __ stop("Unreachable"); | 6637 __ stop("Unreachable"); | 
| 6595 | 6638 | 
| 6596 // Generate an unreachable reference to the DEFAULT stub so that it can be | 6639 // Generate an unreachable reference to the DEFAULT stub so that it can be | 
| 6597 // found at the end of this stub when clearing ICs at GC. | 6640 // found at the end of this stub when clearing ICs at GC. | 
| (...skipping 1731 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 8329 | 8372 | 
| 8330 // Just jump to runtime to add the two strings. | 8373 // Just jump to runtime to add the two strings. | 
| 8331 __ bind(&string_add_runtime); | 8374 __ bind(&string_add_runtime); | 
| 8332 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 8375 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 
| 8333 } | 8376 } | 
| 8334 | 8377 | 
| 8335 | 8378 | 
| 8336 #undef __ | 8379 #undef __ | 
| 8337 | 8380 | 
| 8338 } } // namespace v8::internal | 8381 } } // namespace v8::internal | 
| OLD | NEW |