OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
200 // Code pattern for loading a floating point value. Input value must | 200 // Code pattern for loading a floating point value. Input value must |
201 // be either a smi or a heap number object (fp value). Requirements: | 201 // be either a smi or a heap number object (fp value). Requirements: |
202 // operand in src register. Returns operand as floating point number | 202 // operand in src register. Returns operand as floating point number |
203 // in XMM register | 203 // in XMM register |
204 static void LoadFloatOperand(MacroAssembler* masm, | 204 static void LoadFloatOperand(MacroAssembler* masm, |
205 Register src, | 205 Register src, |
206 XMMRegister dst); | 206 XMMRegister dst); |
207 | 207 |
208 // Code pattern for loading floating point values. Input values must | 208 // Code pattern for loading floating point values. Input values must |
209 // be either smi or heap number objects (fp values). Requirements: | 209 // be either smi or heap number objects (fp values). Requirements: |
210 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as | 210 // operand_1 in rdx, operand_2 in rax; Returns operands as |
211 // floating point numbers in XMM registers. | 211 // floating point numbers in XMM registers. |
212 static void LoadFloatOperands(MacroAssembler* masm, | 212 static void LoadFloatOperands(MacroAssembler* masm, |
213 XMMRegister dst1, | 213 XMMRegister dst1, |
214 XMMRegister dst2); | 214 XMMRegister dst2); |
215 | 215 |
216 // Similar to LoadFloatOperands, assumes that the operands are smis. | |
217 static void LoadFloatOperandsFromSmis(MacroAssembler* masm, | |
218 XMMRegister dst1, | |
219 XMMRegister dst2); | |
220 | |
216 // Code pattern for loading floating point values onto the fp stack. | 221 // Code pattern for loading floating point values onto the fp stack. |
217 // Input values must be either smi or heap number objects (fp values). | 222 // Input values must be either smi or heap number objects (fp values). |
218 // Requirements: | 223 // Requirements: |
219 // Register version: operands in registers lhs and rhs. | 224 // Register version: operands in registers lhs and rhs. |
220 // Stack version: operands on TOS+1 and TOS+2. | 225 // Stack version: operands on TOS+1 and TOS+2. |
221 // Returns operands as floating point numbers on fp stack. | 226 // Returns operands as floating point numbers on fp stack. |
222 static void LoadFloatOperands(MacroAssembler* masm); | |
223 static void LoadFloatOperands(MacroAssembler* masm, | 227 static void LoadFloatOperands(MacroAssembler* masm, |
224 Register lhs, | 228 Register lhs, |
225 Register rhs); | 229 Register rhs); |
226 | 230 |
227 // Test if operands are smi or number objects (fp). Requirements: | 231 // Test if operands are smi or number objects (fp). Requirements: |
228 // operand_1 in rax, operand_2 in rdx; falls through on float or smi | 232 // operand_1 in rax, operand_2 in rdx; falls through on float or smi |
229 // operands, jumps to the non_float label otherwise. | 233 // operands, jumps to the non_float label otherwise. |
230 static void CheckNumberOperands(MacroAssembler* masm, | 234 static void CheckNumberOperands(MacroAssembler* masm, |
231 Label* non_float); | 235 Label* non_float); |
232 | 236 |
(...skipping 4877 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5110 | 5114 |
5111 if (left_is_smi && right_is_smi) { | 5115 if (left_is_smi && right_is_smi) { |
5112 // Compute the constant result at compile time, and leave it on the frame. | 5116 // Compute the constant result at compile time, and leave it on the frame. |
5113 int left_int = Smi::cast(*left.handle())->value(); | 5117 int left_int = Smi::cast(*left.handle())->value(); |
5114 int right_int = Smi::cast(*right.handle())->value(); | 5118 int right_int = Smi::cast(*right.handle())->value(); |
5115 if (FoldConstantSmis(op, left_int, right_int)) return; | 5119 if (FoldConstantSmis(op, left_int, right_int)) return; |
5116 } | 5120 } |
5117 | 5121 |
5118 Result answer; | 5122 Result answer; |
5119 if (left_is_non_smi || right_is_non_smi) { | 5123 if (left_is_non_smi || right_is_non_smi) { |
5120 // Go straight to the slow case, with no smi code | |
5121 frame_->Push(&left); | |
5122 frame_->Push(&right); | |
5123 GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB); | 5124 GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB); |
5124 answer = frame_->CallStub(&stub, 2); | 5125 answer = stub.GenerateCall(masm_, frame_, &left, &right); |
5125 } else if (right_is_smi) { | 5126 } else if (right_is_smi) { |
5126 answer = ConstantSmiBinaryOperation(op, &left, right.handle(), | 5127 answer = ConstantSmiBinaryOperation(op, &left, right.handle(), |
5127 type, false, overwrite_mode); | 5128 type, false, overwrite_mode); |
5128 } else if (left_is_smi) { | 5129 } else if (left_is_smi) { |
5129 answer = ConstantSmiBinaryOperation(op, &right, left.handle(), | 5130 answer = ConstantSmiBinaryOperation(op, &right, left.handle(), |
5130 type, true, overwrite_mode); | 5131 type, true, overwrite_mode); |
5131 } else { | 5132 } else { |
5132 // Set the flags based on the operation, type and loop nesting level. | 5133 // Set the flags based on the operation, type and loop nesting level. |
5133 // Bit operations always assume they likely operate on Smis. Still only | 5134 // Bit operations always assume they likely operate on Smis. Still only |
5134 // generate the inline Smi check code if this operation is part of a loop. | 5135 // generate the inline Smi check code if this operation is part of a loop. |
5135 // For all other operations only inline the Smi check code for likely smis | 5136 // For all other operations only inline the Smi check code for likely smis |
5136 // if the operation is part of a loop. | 5137 // if the operation is part of a loop. |
5137 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) { | 5138 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) { |
5138 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); | 5139 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); |
5139 } else { | 5140 } else { |
5140 frame_->Push(&left); | |
5141 frame_->Push(&right); | |
5142 GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS); | 5141 GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS); |
5143 answer = frame_->CallStub(&stub, 2); | 5142 answer = stub.GenerateCall(masm_, frame_, &left, &right); |
5144 } | 5143 } |
5145 } | 5144 } |
5146 frame_->Push(&answer); | 5145 frame_->Push(&answer); |
5147 } | 5146 } |
5148 | 5147 |
5149 | 5148 |
5150 // Emit a LoadIC call to get the value from receiver and leave it in | 5149 // Emit a LoadIC call to get the value from receiver and leave it in |
5151 // dst. The receiver register is restored after the call. | 5150 // dst. The receiver register is restored after the call. |
5152 class DeferredReferenceGetNamedValue: public DeferredCode { | 5151 class DeferredReferenceGetNamedValue: public DeferredCode { |
5153 public: | 5152 public: |
(...skipping 2340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7494 __ SmiToInteger32(src, src); | 7493 __ SmiToInteger32(src, src); |
7495 __ cvtlsi2sd(dst, src); | 7494 __ cvtlsi2sd(dst, src); |
7496 | 7495 |
7497 __ bind(&done); | 7496 __ bind(&done); |
7498 } | 7497 } |
7499 | 7498 |
7500 | 7499 |
7501 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | 7500 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
7502 XMMRegister dst1, | 7501 XMMRegister dst1, |
7503 XMMRegister dst2) { | 7502 XMMRegister dst2) { |
7504 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); | 7503 Register left = rdx; |
Mads Ager (chromium)
2010/01/28 12:32:09
Does it add much to rename to left and right here.
Vladislav Kaznacheev
2010/01/28 12:38:20
Done.
| |
7504 Register right = rax; | |
7505 __ movq(kScratchRegister, left); | |
7505 LoadFloatOperand(masm, kScratchRegister, dst1); | 7506 LoadFloatOperand(masm, kScratchRegister, dst1); |
7506 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 7507 __ movq(kScratchRegister, right); |
7507 LoadFloatOperand(masm, kScratchRegister, dst2); | 7508 LoadFloatOperand(masm, kScratchRegister, dst2); |
7508 } | 7509 } |
7509 | 7510 |
7510 | 7511 |
7511 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) { | 7512 void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm, |
7512 Label load_smi_1, load_smi_2, done_load_1, done; | 7513 XMMRegister dst1, |
7513 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); | 7514 XMMRegister dst2) { |
7514 __ JumpIfSmi(kScratchRegister, &load_smi_1); | 7515 Register left = rdx; |
Mads Ager (chromium)
2010/01/28 12:32:09
Same here.
Vladislav Kaznacheev
2010/01/28 12:38:20
Done.
| |
7515 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); | 7516 Register right = rax; |
7516 __ bind(&done_load_1); | 7517 __ SmiToInteger32(kScratchRegister, left); |
7517 | 7518 __ cvtlsi2sd(dst1, kScratchRegister); |
7518 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 7519 __ SmiToInteger32(kScratchRegister, right); |
7519 __ JumpIfSmi(kScratchRegister, &load_smi_2); | 7520 __ cvtlsi2sd(dst2, kScratchRegister); |
7520 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); | |
7521 __ jmp(&done); | |
7522 | |
7523 __ bind(&load_smi_1); | |
7524 __ SmiToInteger32(kScratchRegister, kScratchRegister); | |
7525 __ push(kScratchRegister); | |
7526 __ fild_s(Operand(rsp, 0)); | |
7527 __ pop(kScratchRegister); | |
7528 __ jmp(&done_load_1); | |
7529 | |
7530 __ bind(&load_smi_2); | |
7531 __ SmiToInteger32(kScratchRegister, kScratchRegister); | |
7532 __ push(kScratchRegister); | |
7533 __ fild_s(Operand(rsp, 0)); | |
7534 __ pop(kScratchRegister); | |
7535 | |
7536 __ bind(&done); | |
7537 } | 7521 } |
7538 | 7522 |
7539 | 7523 |
7540 // Input: rdx, rax are the left and right objects of a bit op. | 7524 // Input: rdx, rax are the left and right objects of a bit op. |
7541 // Output: rax, rcx are left and right integers for a bit op. | 7525 // Output: rax, rcx are left and right integers for a bit op. |
7542 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | 7526 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
7543 bool use_sse3, | 7527 bool use_sse3, |
7544 Label* conversion_failure) { | 7528 Label* conversion_failure) { |
7545 // Check float operands. | 7529 // Check float operands. |
7546 Label arg1_is_object, check_undefined_arg1; | 7530 Label arg1_is_object, check_undefined_arg1; |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7780 // Update flags to indicate that arguments are in registers. | 7764 // Update flags to indicate that arguments are in registers. |
7781 SetArgsInRegisters(); | 7765 SetArgsInRegisters(); |
7782 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); | 7766 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
7783 } | 7767 } |
7784 | 7768 |
7785 // Call the stub. | 7769 // Call the stub. |
7786 __ CallStub(this); | 7770 __ CallStub(this); |
7787 } | 7771 } |
7788 | 7772 |
7789 | 7773 |
7774 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, | |
7775 VirtualFrame* frame, | |
7776 Result* left, | |
7777 Result* right) { | |
7778 if (ArgsInRegistersSupported()) { | |
7779 SetArgsInRegisters(); | |
7780 return frame->CallStub(this, left, right); | |
7781 } else { | |
7782 frame->Push(left); | |
7783 frame->Push(right); | |
7784 return frame->CallStub(this, 2); | |
7785 } | |
7786 } | |
7787 | |
7788 | |
7790 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 7789 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
7791 // Perform fast-case smi code for the operation (rax <op> rbx) and | 7790 // 1. Move arguments into edx, eax except for DIV and MOD, which need the |
7792 // leave result in register rax. | 7791 // dividend in eax and edx free for the division. Use eax, ebx for those. |
7792 Comment load_comment(masm, "-- Load arguments"); | |
7793 Register left = rdx; | |
7794 Register right = rax; | |
7795 if (op_ == Token::DIV || op_ == Token::MOD) { | |
7796 left = rax; | |
7797 right = rbx; | |
7798 if (HasArgsInRegisters()) { | |
7799 __ movq(rbx, rax); | |
7800 __ movq(rax, rdx); | |
7801 } | |
7802 } | |
7803 if (!HasArgsInRegisters()) { | |
7804 __ movq(right, Operand(rsp, 1 * kPointerSize)); | |
7805 __ movq(left, Operand(rsp, 2 * kPointerSize)); | |
7806 } | |
7793 | 7807 |
7794 // Smi check both operands. | 7808 // 2. Smi check both operands. Skip the check for OR as it is better combined |
7795 __ JumpIfNotBothSmi(rax, rbx, slow); | 7809 // with the actual operation. |
7810 Label not_smis; | |
7811 if (op_ != Token::BIT_OR) { | |
7812 Comment smi_check_comment(masm, "-- Smi check arguments"); | |
7813 __ JumpIfNotBothSmi(left, right, ¬_smis); | |
7814 } | |
7796 | 7815 |
7816 // 3. Operands are both smis (except for OR), perform the operation leaving | |
7817 // the result in rax and check the result if necessary. | |
7818 Comment perform_smi(masm, "-- Perform smi operation"); | |
7819 Label use_fp_on_smis; | |
7797 switch (op_) { | 7820 switch (op_) { |
7798 case Token::ADD: { | 7821 case Token::ADD: { |
7799 __ SmiAdd(rax, rax, rbx, slow); | 7822 ASSERT(right.is(rax)); |
7823 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. | |
7800 break; | 7824 break; |
7801 } | 7825 } |
7802 | 7826 |
7803 case Token::SUB: { | 7827 case Token::SUB: { |
7804 __ SmiSub(rax, rax, rbx, slow); | 7828 __ SmiSub(left, left, right, &use_fp_on_smis); |
7829 __ movq(rax, left); | |
7805 break; | 7830 break; |
7806 } | 7831 } |
7807 | 7832 |
7808 case Token::MUL: | 7833 case Token::MUL: |
7809 __ SmiMul(rax, rax, rbx, slow); | 7834 ASSERT(right.is(rax)); |
7835 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. | |
7810 break; | 7836 break; |
7811 | 7837 |
7812 case Token::DIV: | 7838 case Token::DIV: |
7813 __ SmiDiv(rax, rax, rbx, slow); | 7839 ASSERT(left.is(rax)); |
7840 __ SmiDiv(left, left, right, &use_fp_on_smis); | |
7814 break; | 7841 break; |
7815 | 7842 |
7816 case Token::MOD: | 7843 case Token::MOD: |
7817 __ SmiMod(rax, rax, rbx, slow); | 7844 ASSERT(left.is(rax)); |
7845 __ SmiMod(left, left, right, slow); | |
7818 break; | 7846 break; |
7819 | 7847 |
7820 case Token::BIT_OR: | 7848 case Token::BIT_OR: |
7821 __ SmiOr(rax, rax, rbx); | 7849 ASSERT(right.is(rax)); |
7850 __ movq(rcx, right); // Save the right operand. | |
7851 __ SmiOr(right, right, left); // BIT_OR is commutative. | |
7852 __ testb(right, Immediate(kSmiTagMask)); | |
7853 __ j(not_zero, ¬_smis); | |
7822 break; | 7854 break; |
7823 | 7855 |
7824 case Token::BIT_AND: | 7856 case Token::BIT_AND: |
7825 __ SmiAnd(rax, rax, rbx); | 7857 ASSERT(right.is(rax)); |
7858 __ SmiAnd(right, right, left); // BIT_AND is commutative. | |
7826 break; | 7859 break; |
7827 | 7860 |
7828 case Token::BIT_XOR: | 7861 case Token::BIT_XOR: |
7829 __ SmiXor(rax, rax, rbx); | 7862 ASSERT(right.is(rax)); |
7863 __ SmiXor(right, right, left); // BIT_XOR is commutative. | |
7830 break; | 7864 break; |
7831 | 7865 |
7832 case Token::SHL: | 7866 case Token::SHL: |
7833 case Token::SHR: | 7867 case Token::SHR: |
7834 case Token::SAR: | 7868 case Token::SAR: |
7835 // Move the second operand into register rcx. | |
7836 __ movq(rcx, rbx); | |
7837 // Perform the operation. | |
7838 switch (op_) { | 7869 switch (op_) { |
7839 case Token::SAR: | 7870 case Token::SAR: |
7840 __ SmiShiftArithmeticRight(rax, rax, rcx); | 7871 __ SmiShiftArithmeticRight(left, left, right); |
7841 break; | 7872 break; |
7842 case Token::SHR: | 7873 case Token::SHR: |
7843 __ SmiShiftLogicalRight(rax, rax, rcx, slow); | 7874 __ SmiShiftLogicalRight(left, left, right, slow); |
7844 break; | 7875 break; |
7845 case Token::SHL: | 7876 case Token::SHL: |
7846 __ SmiShiftLeft(rax, rax, rcx, slow); | 7877 __ SmiShiftLeft(left, left, right, slow); |
7847 break; | 7878 break; |
7848 default: | 7879 default: |
7849 UNREACHABLE(); | 7880 UNREACHABLE(); |
7850 } | 7881 } |
7882 __ movq(rax, left); | |
7851 break; | 7883 break; |
7852 | 7884 |
7853 default: | 7885 default: |
7854 UNREACHABLE(); | 7886 UNREACHABLE(); |
7855 break; | 7887 break; |
7856 } | 7888 } |
7889 | |
7890 // 4. Emit return of result in eax. | |
7891 GenerateReturn(masm); | |
7892 | |
7893 // 5. For some operations emit inline code to perform floating point | |
7894 // operations on known smis (e.g., if the result of the operation | |
7895 // overflowed the smi range). | |
7896 switch (op_) { | |
7897 case Token::ADD: | |
7898 case Token::SUB: | |
7899 case Token::MUL: | |
7900 case Token::DIV: { | |
7901 __ bind(&use_fp_on_smis); | |
7902 if (op_ == Token::DIV) { | |
7903 __ movq(rdx, rax); | |
7904 __ movq(rax, rbx); | |
7905 } | |
7906 // left is rdx, right is rax. | |
7907 __ AllocateHeapNumber(rbx, rcx, slow); | |
7908 FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5); | |
7909 switch (op_) { | |
7910 case Token::ADD: __ addsd(xmm4, xmm5); break; | |
7911 case Token::SUB: __ subsd(xmm4, xmm5); break; | |
7912 case Token::MUL: __ mulsd(xmm4, xmm5); break; | |
7913 case Token::DIV: __ divsd(xmm4, xmm5); break; | |
7914 default: UNREACHABLE(); | |
7915 } | |
7916 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4); | |
7917 __ movq(rax, rbx); | |
7918 GenerateReturn(masm); | |
7919 } | |
7920 default: | |
7921 break; | |
7922 } | |
7923 | |
7924 // 6. Non-smi operands, fall out to the non-smi code with the operands in | |
7925 // edx and eax. | |
Mads Ager (chromium)
2010/01/28 12:32:09
edx -> rdx
eax -> rax
Vladislav Kaznacheev
2010/01/28 12:38:20
Done.
| |
7926 Comment done_comment(masm, "-- Enter non-smi code"); | |
7927 __ bind(¬_smis); | |
7928 | |
7929 switch (op_) { | |
7930 case Token::DIV: | |
7931 case Token::MOD: | |
7932 // Operands are in rax, rbx at this point. | |
7933 __ movq(rdx, rax); | |
7934 __ movq(rax, rbx); | |
7935 break; | |
7936 | |
7937 case Token::BIT_OR: | |
7938 // Right operand is saved in rcx and rax was destroyed by the smi | |
7939 // operation. | |
7940 __ movq(rax, rcx); | |
7941 break; | |
7942 | |
7943 default: | |
7944 break; | |
7945 } | |
7857 } | 7946 } |
7858 | 7947 |
7859 | 7948 |
7860 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 7949 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
7861 Label call_runtime; | 7950 Label call_runtime; |
7862 if (HasSmiCodeInStub()) { | 7951 if (HasSmiCodeInStub()) { |
7863 // The fast case smi code wasn't inlined in the stub caller | 7952 GenerateSmiCode(masm, &call_runtime); |
7864 // code. Generate it here to speed up common operations. | 7953 } else if (op_ != Token::MOD) { |
7865 Label slow; | 7954 GenerateLoadArguments(masm); |
7866 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y | |
7867 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x | |
7868 GenerateSmiCode(masm, &slow); | |
7869 GenerateReturn(masm); | |
7870 | |
7871 // Too bad. The fast case smi code didn't succeed. | |
7872 __ bind(&slow); | |
7873 } | 7955 } |
7874 | |
7875 // Make sure the arguments are in rdx and rax. | |
7876 GenerateLoadArguments(masm); | |
7877 | |
7878 // Floating point case. | 7956 // Floating point case. |
7879 switch (op_) { | 7957 switch (op_) { |
7880 case Token::ADD: | 7958 case Token::ADD: |
7881 case Token::SUB: | 7959 case Token::SUB: |
7882 case Token::MUL: | 7960 case Token::MUL: |
7883 case Token::DIV: { | 7961 case Token::DIV: { |
7884 // rax: y | 7962 // rax: y |
7885 // rdx: x | 7963 // rdx: x |
7886 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); | 7964 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); |
7887 // Fast-case: Both operands are numbers. | 7965 // Fast-case: Both operands are numbers. |
7966 // xmm4 and xmm5 are volatile XMM registers. | |
7967 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); | |
7968 | |
7969 switch (op_) { | |
7970 case Token::ADD: __ addsd(xmm4, xmm5); break; | |
7971 case Token::SUB: __ subsd(xmm4, xmm5); break; | |
7972 case Token::MUL: __ mulsd(xmm4, xmm5); break; | |
7973 case Token::DIV: __ divsd(xmm4, xmm5); break; | |
7974 default: UNREACHABLE(); | |
7975 } | |
7888 // Allocate a heap number, if needed. | 7976 // Allocate a heap number, if needed. |
7889 Label skip_allocation; | 7977 Label skip_allocation; |
7890 switch (mode_) { | 7978 OverwriteMode mode = mode_; |
7979 if (HasArgsReversed()) { | |
7980 if (mode == OVERWRITE_RIGHT) { | |
7981 mode = OVERWRITE_LEFT; | |
7982 } else if (mode == OVERWRITE_LEFT) { | |
7983 mode = OVERWRITE_RIGHT; | |
7984 } | |
7985 } | |
7986 switch (mode) { | |
7891 case OVERWRITE_LEFT: | 7987 case OVERWRITE_LEFT: |
7988 __ JumpIfNotSmi(rdx, &skip_allocation); | |
7989 __ AllocateHeapNumber(rbx, rcx, &call_runtime); | |
7990 __ movq(rdx, rbx); | |
7991 __ bind(&skip_allocation); | |
7892 __ movq(rax, rdx); | 7992 __ movq(rax, rdx); |
7893 // Fall through! | 7993 break; |
7894 case OVERWRITE_RIGHT: | 7994 case OVERWRITE_RIGHT: |
7895 // If the argument in rax is already an object, we skip the | 7995 // If the argument in rax is already an object, we skip the |
7896 // allocation of a heap number. | 7996 // allocation of a heap number. |
7897 __ JumpIfNotSmi(rax, &skip_allocation); | 7997 __ JumpIfNotSmi(rax, &skip_allocation); |
7898 // Fall through! | 7998 // Fall through! |
7899 case NO_OVERWRITE: | 7999 case NO_OVERWRITE: |
7900 // Allocate a heap number for the result. Keep rax and rdx intact | 8000 // Allocate a heap number for the result. Keep rax and rdx intact |
7901 // for the possible runtime call. | 8001 // for the possible runtime call. |
7902 __ AllocateHeapNumber(rbx, rcx, &call_runtime); | 8002 __ AllocateHeapNumber(rbx, rcx, &call_runtime); |
7903 __ movq(rax, rbx); | 8003 __ movq(rax, rbx); |
7904 __ bind(&skip_allocation); | 8004 __ bind(&skip_allocation); |
7905 break; | 8005 break; |
7906 default: UNREACHABLE(); | 8006 default: UNREACHABLE(); |
7907 } | 8007 } |
7908 // xmm4 and xmm5 are volatile XMM registers. | |
7909 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); | |
7910 | |
7911 switch (op_) { | |
7912 case Token::ADD: __ addsd(xmm4, xmm5); break; | |
7913 case Token::SUB: __ subsd(xmm4, xmm5); break; | |
7914 case Token::MUL: __ mulsd(xmm4, xmm5); break; | |
7915 case Token::DIV: __ divsd(xmm4, xmm5); break; | |
7916 default: UNREACHABLE(); | |
7917 } | |
7918 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); | 8008 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); |
7919 GenerateReturn(masm); | 8009 GenerateReturn(masm); |
7920 } | 8010 } |
7921 case Token::MOD: { | 8011 case Token::MOD: { |
7922 // For MOD we go directly to runtime in the non-smi case. | 8012 // For MOD we go directly to runtime in the non-smi case. |
7923 break; | 8013 break; |
7924 } | 8014 } |
7925 case Token::BIT_OR: | 8015 case Token::BIT_OR: |
7926 case Token::BIT_AND: | 8016 case Token::BIT_AND: |
7927 case Token::BIT_XOR: | 8017 case Token::BIT_XOR: |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7974 __ movq(Operand(rsp, 1 * kPointerSize), rbx); | 8064 __ movq(Operand(rsp, 1 * kPointerSize), rbx); |
7975 __ fild_s(Operand(rsp, 1 * kPointerSize)); | 8065 __ fild_s(Operand(rsp, 1 * kPointerSize)); |
7976 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 8066 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
7977 GenerateReturn(masm); | 8067 GenerateReturn(masm); |
7978 } | 8068 } |
7979 | 8069 |
7980 // SHR should return uint32 - go to runtime for non-smi/negative result. | 8070 // SHR should return uint32 - go to runtime for non-smi/negative result. |
7981 if (op_ == Token::SHR) { | 8071 if (op_ == Token::SHR) { |
7982 __ bind(&non_smi_result); | 8072 __ bind(&non_smi_result); |
7983 } | 8073 } |
7984 __ movq(rax, Operand(rsp, 1 * kPointerSize)); | |
7985 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | |
7986 break; | 8074 break; |
7987 } | 8075 } |
7988 default: UNREACHABLE(); break; | 8076 default: UNREACHABLE(); break; |
7989 } | 8077 } |
7990 | 8078 |
7991 // If all else fails, use the runtime system to get the correct | 8079 // If all else fails, use the runtime system to get the correct |
7992 // result. If arguments was passed in registers now place them on the | 8080 // result. If arguments was passed in registers now place them on the |
7993 // stack in the correct order below the return address. | 8081 // stack in the correct order below the return address. |
7994 __ bind(&call_runtime); | 8082 __ bind(&call_runtime); |
7995 if (HasArgumentsInRegisters()) { | 8083 if (HasArgsInRegisters()) { |
7996 __ pop(rcx); | 8084 __ pop(rcx); |
7997 if (HasArgumentsReversed()) { | 8085 if (HasArgsReversed()) { |
7998 __ push(rax); | 8086 __ push(rax); |
7999 __ push(rdx); | 8087 __ push(rdx); |
8000 } else { | 8088 } else { |
8001 __ push(rdx); | 8089 __ push(rdx); |
8002 __ push(rax); | 8090 __ push(rax); |
8003 } | 8091 } |
8004 __ push(rcx); | 8092 __ push(rcx); |
8005 } | 8093 } |
8006 switch (op_) { | 8094 switch (op_) { |
8007 case Token::ADD: { | 8095 case Token::ADD: { |
8008 // Test for string arguments before calling runtime. | 8096 // Test for string arguments before calling runtime. |
8009 Label not_strings, both_strings, not_string1, string1; | 8097 Label not_strings, both_strings, not_string1, string1; |
8010 Condition is_smi; | 8098 Condition is_smi; |
8011 Result answer; | 8099 Result answer; |
8012 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument. | |
8013 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument. | |
8014 is_smi = masm->CheckSmi(rdx); | 8100 is_smi = masm->CheckSmi(rdx); |
8015 __ j(is_smi, ¬_string1); | 8101 __ j(is_smi, ¬_string1); |
8016 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx); | 8102 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx); |
8017 __ j(above_equal, ¬_string1); | 8103 __ j(above_equal, ¬_string1); |
8018 | 8104 |
8019 // First argument is a a string, test second. | 8105 // First argument is a a string, test second. |
8020 is_smi = masm->CheckSmi(rax); | 8106 is_smi = masm->CheckSmi(rax); |
8021 __ j(is_smi, &string1); | 8107 __ j(is_smi, &string1); |
8022 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax); | 8108 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax); |
8023 __ j(above_equal, &string1); | 8109 __ j(above_equal, &string1); |
8024 | 8110 |
8025 // First and second argument are strings. | 8111 // First and second argument are strings. |
8026 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 8112 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
8027 __ TailCallStub(&stub); | 8113 __ TailCallStub(&stub); |
8028 | 8114 |
8029 // Only first argument is a string. | 8115 // Only first argument is a string. |
8030 __ bind(&string1); | 8116 __ bind(&string1); |
8031 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); | 8117 __ InvokeBuiltin( |
8118 HasArgsReversed() ? | |
8119 Builtins::STRING_ADD_RIGHT : | |
8120 Builtins::STRING_ADD_LEFT, | |
8121 JUMP_FUNCTION); | |
8032 | 8122 |
8033 // First argument was not a string, test second. | 8123 // First argument was not a string, test second. |
8034 __ bind(¬_string1); | 8124 __ bind(¬_string1); |
8035 is_smi = masm->CheckSmi(rax); | 8125 is_smi = masm->CheckSmi(rax); |
8036 __ j(is_smi, ¬_strings); | 8126 __ j(is_smi, ¬_strings); |
8037 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax); | 8127 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax); |
8038 __ j(above_equal, ¬_strings); | 8128 __ j(above_equal, ¬_strings); |
8039 | 8129 |
8040 // Only second argument is a string. | 8130 // Only second argument is a string. |
8041 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); | 8131 __ InvokeBuiltin( |
8132 HasArgsReversed() ? | |
8133 Builtins::STRING_ADD_LEFT : | |
8134 Builtins::STRING_ADD_RIGHT, | |
8135 JUMP_FUNCTION); | |
8042 | 8136 |
8043 __ bind(¬_strings); | 8137 __ bind(¬_strings); |
8044 // Neither argument is a string. | 8138 // Neither argument is a string. |
8045 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | 8139 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
8046 break; | 8140 break; |
8047 } | 8141 } |
8048 case Token::SUB: | 8142 case Token::SUB: |
8049 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | 8143 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
8050 break; | 8144 break; |
8051 case Token::MUL: | 8145 case Token::MUL: |
8052 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | 8146 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
8053 break; | 8147 break; |
8054 case Token::DIV: | 8148 case Token::DIV: |
8055 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | 8149 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
8056 break; | 8150 break; |
8057 case Token::MOD: | 8151 case Token::MOD: |
8058 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | 8152 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
8059 break; | 8153 break; |
8060 case Token::BIT_OR: | 8154 case Token::BIT_OR: |
8061 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | 8155 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
8062 break; | 8156 break; |
8063 case Token::BIT_AND: | 8157 case Token::BIT_AND: |
(...skipping 12 matching lines...) Expand all Loading... | |
8076 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | 8170 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
8077 break; | 8171 break; |
8078 default: | 8172 default: |
8079 UNREACHABLE(); | 8173 UNREACHABLE(); |
8080 } | 8174 } |
8081 } | 8175 } |
8082 | 8176 |
8083 | 8177 |
8084 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { | 8178 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
8085 // If arguments are not passed in registers read them from the stack. | 8179 // If arguments are not passed in registers read them from the stack. |
8086 if (!HasArgumentsInRegisters()) { | 8180 if (!HasArgsInRegisters()) { |
8087 __ movq(rax, Operand(rsp, 1 * kPointerSize)); | 8181 __ movq(rax, Operand(rsp, 1 * kPointerSize)); |
8088 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 8182 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
8089 } | 8183 } |
8090 } | 8184 } |
8091 | 8185 |
8092 | 8186 |
8093 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { | 8187 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |
8094 // If arguments are not passed in registers remove them from the stack before | 8188 // If arguments are not passed in registers remove them from the stack before |
8095 // returning. | 8189 // returning. |
8096 if (!HasArgumentsInRegisters()) { | 8190 if (!HasArgsInRegisters()) { |
8097 __ ret(2 * kPointerSize); // Remove both operands | 8191 __ ret(2 * kPointerSize); // Remove both operands |
8098 } else { | 8192 } else { |
8099 __ ret(0); | 8193 __ ret(0); |
8100 } | 8194 } |
8101 } | 8195 } |
8102 | 8196 |
8103 | 8197 |
8104 int CompareStub::MinorKey() { | 8198 int CompareStub::MinorKey() { |
8105 // Encode the three parameters in a unique 16 bit value. | 8199 // Encode the three parameters in a unique 16 bit value. |
8106 ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); | 8200 ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); |
(...skipping 649 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
8756 // Call the function from C++. | 8850 // Call the function from C++. |
8757 return FUNCTION_CAST<ModuloFunction>(buffer); | 8851 return FUNCTION_CAST<ModuloFunction>(buffer); |
8758 } | 8852 } |
8759 | 8853 |
8760 #endif | 8854 #endif |
8761 | 8855 |
8762 | 8856 |
8763 #undef __ | 8857 #undef __ |
8764 | 8858 |
8765 } } // namespace v8::internal | 8859 } } // namespace v8::internal |
OLD | NEW |