| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 4653 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4664 virtual void Generate(); | 4664 virtual void Generate(); |
| 4665 | 4665 |
| 4666 private: | 4666 private: |
| 4667 Register dst_; | 4667 Register dst_; |
| 4668 Smi* value_; | 4668 Smi* value_; |
| 4669 OverwriteMode overwrite_mode_; | 4669 OverwriteMode overwrite_mode_; |
| 4670 }; | 4670 }; |
| 4671 | 4671 |
| 4672 | 4672 |
| 4673 void DeferredInlineSmiAdd::Generate() { | 4673 void DeferredInlineSmiAdd::Generate() { |
| 4674 // Undo the optimistic add operation and call the shared stub. | |
| 4675 __ subq(dst_, Immediate(value_)); | |
| 4676 __ push(dst_); | 4674 __ push(dst_); |
| 4677 __ push(Immediate(value_)); | 4675 __ push(Immediate(value_)); |
| 4678 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | 4676 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 4679 __ CallStub(&igostub); | 4677 __ CallStub(&igostub); |
| 4680 if (!dst_.is(rax)) __ movq(dst_, rax); | 4678 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 4681 } | 4679 } |
| 4682 | 4680 |
| 4683 | 4681 |
| 4684 // The result of value + src is in dst. It either overflowed or was not | 4682 // The result of value + src is in dst. It either overflowed or was not |
| 4685 // smi tagged. Undo the speculative addition and call the appropriate | 4683 // smi tagged. Undo the speculative addition and call the appropriate |
| (...skipping 10 matching lines...) Expand all Loading... |
| 4696 virtual void Generate(); | 4694 virtual void Generate(); |
| 4697 | 4695 |
| 4698 private: | 4696 private: |
| 4699 Register dst_; | 4697 Register dst_; |
| 4700 Smi* value_; | 4698 Smi* value_; |
| 4701 OverwriteMode overwrite_mode_; | 4699 OverwriteMode overwrite_mode_; |
| 4702 }; | 4700 }; |
| 4703 | 4701 |
| 4704 | 4702 |
| 4705 void DeferredInlineSmiAddReversed::Generate() { | 4703 void DeferredInlineSmiAddReversed::Generate() { |
| 4706 // Undo the optimistic add operation and call the shared stub. | |
| 4707 __ subq(dst_, Immediate(value_)); | |
| 4708 __ push(Immediate(value_)); | 4704 __ push(Immediate(value_)); |
| 4709 __ push(dst_); | 4705 __ push(dst_); |
| 4710 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | 4706 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 4711 __ CallStub(&igostub); | 4707 __ CallStub(&igostub); |
| 4712 if (!dst_.is(rax)) __ movq(dst_, rax); | 4708 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 4713 } | 4709 } |
| 4714 | 4710 |
| 4715 | 4711 |
| 4716 // The result of src - value is in dst. It either overflowed or was not | 4712 // The result of src - value is in dst. It either overflowed or was not |
| 4717 // smi tagged. Undo the speculative subtraction and call the | 4713 // smi tagged. Undo the speculative subtraction and call the |
| (...skipping 11 matching lines...) Expand all Loading... |
| 4729 virtual void Generate(); | 4725 virtual void Generate(); |
| 4730 | 4726 |
| 4731 private: | 4727 private: |
| 4732 Register dst_; | 4728 Register dst_; |
| 4733 Smi* value_; | 4729 Smi* value_; |
| 4734 OverwriteMode overwrite_mode_; | 4730 OverwriteMode overwrite_mode_; |
| 4735 }; | 4731 }; |
| 4736 | 4732 |
| 4737 | 4733 |
| 4738 void DeferredInlineSmiSub::Generate() { | 4734 void DeferredInlineSmiSub::Generate() { |
| 4739 // Undo the optimistic sub operation and call the shared stub. | |
| 4740 __ addq(dst_, Immediate(value_)); | |
| 4741 __ push(dst_); | 4735 __ push(dst_); |
| 4742 __ push(Immediate(value_)); | 4736 __ push(Immediate(value_)); |
| 4743 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | 4737 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
| 4744 __ CallStub(&igostub); | 4738 __ CallStub(&igostub); |
| 4745 if (!dst_.is(rax)) __ movq(dst_, rax); | 4739 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 4746 } | 4740 } |
| 4747 | 4741 |
| 4748 | 4742 |
| 4749 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, | 4743 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, |
| 4750 Result* operand, | 4744 Result* operand, |
| (...skipping 21 matching lines...) Expand all Loading... |
| 4772 return; | 4766 return; |
| 4773 } | 4767 } |
| 4774 | 4768 |
| 4775 // Get the literal value. | 4769 // Get the literal value. |
| 4776 Smi* smi_value = Smi::cast(*value); | 4770 Smi* smi_value = Smi::cast(*value); |
| 4777 | 4771 |
| 4778 switch (op) { | 4772 switch (op) { |
| 4779 case Token::ADD: { | 4773 case Token::ADD: { |
| 4780 operand->ToRegister(); | 4774 operand->ToRegister(); |
| 4781 frame_->Spill(operand->reg()); | 4775 frame_->Spill(operand->reg()); |
| 4782 | |
| 4783 // Optimistically add. Call the specialized add stub if the | |
| 4784 // result is not a smi or overflows. | |
| 4785 DeferredCode* deferred = NULL; | 4776 DeferredCode* deferred = NULL; |
| 4786 if (reversed) { | 4777 if (reversed) { |
| 4787 deferred = new DeferredInlineSmiAddReversed(operand->reg(), | 4778 deferred = new DeferredInlineSmiAddReversed(operand->reg(), |
| 4788 smi_value, | 4779 smi_value, |
| 4789 overwrite_mode); | 4780 overwrite_mode); |
| 4790 } else { | 4781 } else { |
| 4791 deferred = new DeferredInlineSmiAdd(operand->reg(), | 4782 deferred = new DeferredInlineSmiAdd(operand->reg(), |
| 4792 smi_value, | 4783 smi_value, |
| 4793 overwrite_mode); | 4784 overwrite_mode); |
| 4794 } | 4785 } |
| 4795 __ movq(kScratchRegister, value, RelocInfo::NONE); | |
| 4796 __ addl(operand->reg(), kScratchRegister); | |
| 4797 deferred->Branch(overflow); | |
| 4798 __ testl(operand->reg(), Immediate(kSmiTagMask)); | 4786 __ testl(operand->reg(), Immediate(kSmiTagMask)); |
| 4799 deferred->Branch(not_zero); | 4787 deferred->Branch(not_zero); |
| 4788 // A smi currently fits in a 32-bit Immediate. |
| 4789 __ addl(operand->reg(), Immediate(smi_value)); |
| 4790 Label add_success; |
| 4791 __ j(no_overflow, &add_success); |
| 4792 __ subl(operand->reg(), Immediate(smi_value)); |
| 4793 __ movsxlq(operand->reg(), operand->reg()); |
| 4794 deferred->Jump(); |
| 4795 __ bind(&add_success); |
| 4796 __ movsxlq(operand->reg(), operand->reg()); |
| 4800 deferred->BindExit(); | 4797 deferred->BindExit(); |
| 4801 frame_->Push(operand); | 4798 frame_->Push(operand); |
| 4802 break; | 4799 break; |
| 4803 } | 4800 } |
| 4804 // TODO(X64): Move other implementations from ia32 to here. | 4801 // TODO(X64): Move other implementations from ia32 to here. |
| 4805 default: { | 4802 default: { |
| 4806 Result constant_operand(value); | 4803 Result constant_operand(value); |
| 4807 if (reversed) { | 4804 if (reversed) { |
| 4808 LikelySmiBinaryOperation(op, &constant_operand, operand, | 4805 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 4809 overwrite_mode); | 4806 overwrite_mode); |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5075 } else { | 5072 } else { |
| 5076 __ movq(answer.reg(), left->reg()); | 5073 __ movq(answer.reg(), left->reg()); |
| 5077 __ or_(answer.reg(), right->reg()); | 5074 __ or_(answer.reg(), right->reg()); |
| 5078 ASSERT(kSmiTag == 0); // Adjust test if not the case. | 5075 ASSERT(kSmiTag == 0); // Adjust test if not the case. |
| 5079 __ testl(answer.reg(), Immediate(kSmiTagMask)); | 5076 __ testl(answer.reg(), Immediate(kSmiTagMask)); |
| 5080 } | 5077 } |
| 5081 deferred->Branch(not_zero); | 5078 deferred->Branch(not_zero); |
| 5082 __ movq(answer.reg(), left->reg()); | 5079 __ movq(answer.reg(), left->reg()); |
| 5083 switch (op) { | 5080 switch (op) { |
| 5084 case Token::ADD: | 5081 case Token::ADD: |
| 5085 __ addl(answer.reg(), right->reg()); // Add optimistically. | 5082 __ addl(answer.reg(), right->reg()); |
| 5086 deferred->Branch(overflow); | 5083 deferred->Branch(overflow); |
| 5087 break; | 5084 break; |
| 5088 | 5085 |
| 5089 case Token::SUB: | 5086 case Token::SUB: |
| 5090 __ subl(answer.reg(), right->reg()); // Subtract optimistically. | 5087 __ subl(answer.reg(), right->reg()); |
| 5091 deferred->Branch(overflow); | 5088 deferred->Branch(overflow); |
| 5092 break; | 5089 break; |
| 5093 | 5090 |
| 5094 case Token::MUL: { | 5091 case Token::MUL: { |
| 5095 // If the smi tag is 0 we can just leave the tag on one operand. | 5092 // If the smi tag is 0 we can just leave the tag on one operand. |
| 5096 ASSERT(kSmiTag == 0); // Adjust code below if not the case. | 5093 ASSERT(kSmiTag == 0); // Adjust code below if not the case. |
| 5097 // Remove smi tag from the left operand (but keep sign). | 5094 // Remove smi tag from the left operand (but keep sign). |
| 5098 // Left-hand operand has been copied into answer. | 5095 // Left-hand operand has been copied into answer. |
| 5099 __ sar(answer.reg(), Immediate(kSmiTagSize)); | 5096 __ sar(answer.reg(), Immediate(kSmiTagSize)); |
| 5100 // Do multiplication of smis, leaving result in answer. | 5097 // Do multiplication of smis, leaving result in answer. |
| (...skipping 1392 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6493 case Token::SHL: return "GenericBinaryOpStub_SHL"; | 6490 case Token::SHL: return "GenericBinaryOpStub_SHL"; |
| 6494 case Token::SHR: return "GenericBinaryOpStub_SHR"; | 6491 case Token::SHR: return "GenericBinaryOpStub_SHR"; |
| 6495 default: return "GenericBinaryOpStub"; | 6492 default: return "GenericBinaryOpStub"; |
| 6496 } | 6493 } |
| 6497 } | 6494 } |
| 6498 | 6495 |
| 6499 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 6496 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 6500 // Perform fast-case smi code for the operation (rax <op> rbx) and | 6497 // Perform fast-case smi code for the operation (rax <op> rbx) and |
| 6501 // leave result in register rax. | 6498 // leave result in register rax. |
| 6502 | 6499 |
| 6503 // Prepare the smi check of both operands by or'ing them together | 6500 // Smi check both operands. |
| 6504 // before checking against the smi mask. | |
| 6505 __ movq(rcx, rbx); | 6501 __ movq(rcx, rbx); |
| 6506 __ or_(rcx, rax); | 6502 __ or_(rcx, rax); |
| 6507 | |
| 6508 switch (op_) { | |
| 6509 case Token::ADD: | |
| 6510 __ addl(rax, rbx); // add optimistically | |
| 6511 __ j(overflow, slow); | |
| 6512 __ movsxlq(rax, rax); // Sign extend eax into rax. | |
| 6513 break; | |
| 6514 | |
| 6515 case Token::SUB: | |
| 6516 __ subl(rax, rbx); // subtract optimistically | |
| 6517 __ j(overflow, slow); | |
| 6518 __ movsxlq(rax, rax); // Sign extend eax into rax. | |
| 6519 break; | |
| 6520 | |
| 6521 case Token::DIV: | |
| 6522 case Token::MOD: | |
| 6523 // Sign extend rax into rdx:rax | |
| 6524 // (also sign extends eax into edx if eax is Smi). | |
| 6525 __ cqo(); | |
| 6526 // Check for 0 divisor. | |
| 6527 __ testq(rbx, rbx); | |
| 6528 __ j(zero, slow); | |
| 6529 break; | |
| 6530 | |
| 6531 default: | |
| 6532 // Fall-through to smi check. | |
| 6533 break; | |
| 6534 } | |
| 6535 | |
| 6536 // Perform the actual smi check. | |
| 6537 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
| 6538 __ testl(rcx, Immediate(kSmiTagMask)); | 6503 __ testl(rcx, Immediate(kSmiTagMask)); |
| 6539 __ j(not_zero, slow); | 6504 __ j(not_zero, slow); |
| 6540 | 6505 |
| 6541 switch (op_) { | 6506 switch (op_) { |
| 6542 case Token::ADD: | 6507 case Token::ADD: { |
| 6543 case Token::SUB: | 6508 __ addl(rax, rbx); |
| 6544 // Do nothing here. | 6509 __ j(overflow, slow); // The slow case rereads operands from the stack. |
| 6510 __ movsxlq(rax, rax); // Sign extend eax into rax. |
| 6545 break; | 6511 break; |
| 6512 } |
| 6513 |
| 6514 case Token::SUB: { |
| 6515 __ subl(rax, rbx); |
| 6516 __ j(overflow, slow); // The slow case rereads operands from the stack. |
| 6517 __ movsxlq(rax, rax); // Sign extend eax into rax. |
| 6518 break; |
| 6519 } |
| 6546 | 6520 |
| 6547 case Token::MUL: | 6521 case Token::MUL: |
| 6548 // If the smi tag is 0 we can just leave the tag on one operand. | 6522 // If the smi tag is 0 we can just leave the tag on one operand. |
| 6549 ASSERT(kSmiTag == 0); // adjust code below if not the case | 6523 ASSERT(kSmiTag == 0); // adjust code below if not the case |
| 6550 // Remove tag from one of the operands (but keep sign). | 6524 // Remove tag from one of the operands (but keep sign). |
| 6551 __ sar(rax, Immediate(kSmiTagSize)); | 6525 __ sar(rax, Immediate(kSmiTagSize)); |
| 6552 // Do multiplication. | 6526 // Do multiplication. |
| 6553 __ imull(rax, rbx); // multiplication of smis; result in eax | 6527 __ imull(rax, rbx); // multiplication of smis; result in eax |
| 6554 // Go slow on overflows. | 6528 // Go slow on overflows. |
| 6555 __ j(overflow, slow); | 6529 __ j(overflow, slow); |
| 6556 // Check for negative zero result. | 6530 // Check for negative zero result. |
| 6557 __ movsxlq(rax, rax); // Sign extend eax into rax. | 6531 __ movsxlq(rax, rax); // Sign extend eax into rax. |
| 6558 __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y | 6532 __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y |
| 6559 break; | 6533 break; |
| 6560 | 6534 |
| 6561 case Token::DIV: | 6535 case Token::DIV: |
| 6536 // Sign extend rax into rdx:rax |
| 6537 // (also sign extends eax into edx if eax is Smi). |
| 6538 __ cqo(); |
| 6539 // Check for 0 divisor. |
| 6540 __ testq(rbx, rbx); |
| 6541 __ j(zero, slow); |
| 6562 // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). | 6542 // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). |
| 6563 __ idiv(rbx); | 6543 __ idiv(rbx); |
| 6564 // Check that the remainder is zero. | 6544 // Check that the remainder is zero. |
| 6565 __ testq(rdx, rdx); | 6545 __ testq(rdx, rdx); |
| 6566 __ j(not_zero, slow); | 6546 __ j(not_zero, slow); |
| 6567 // Check for the corner case of dividing the most negative smi | 6547 // Check for the corner case of dividing the most negative smi |
| 6568 // by -1. We cannot use the overflow flag, since it is not set | 6548 // by -1. We cannot use the overflow flag, since it is not set |
| 6569 // by idiv instruction. | 6549 // by idiv instruction. |
| 6570 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 6550 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 6571 // TODO(X64): TODO(Smi): Smi implementation dependent constant. | 6551 // TODO(X64): TODO(Smi): Smi implementation dependent constant. |
| 6572 // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1) | 6552 // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1) |
| 6573 __ cmpq(rax, Immediate(0x40000000)); | 6553 __ cmpq(rax, Immediate(0x40000000)); |
| 6574 __ j(equal, slow); | 6554 __ j(equal, slow); |
| 6575 // Check for negative zero result. | 6555 // Check for negative zero result. |
| 6576 __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y | 6556 __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y |
| 6577 // Tag the result and store it in register rax. | 6557 // Tag the result and store it in register rax. |
| 6578 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 6558 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 6579 __ lea(rax, Operand(rax, rax, times_1, kSmiTag)); | 6559 __ lea(rax, Operand(rax, rax, times_1, kSmiTag)); |
| 6580 break; | 6560 break; |
| 6581 | 6561 |
| 6582 case Token::MOD: | 6562 case Token::MOD: |
| 6563 // Sign extend rax into rdx:rax |
| 6564 // (also sign extends eax into edx if eax is Smi). |
| 6565 __ cqo(); |
| 6566 // Check for 0 divisor. |
| 6567 __ testq(rbx, rbx); |
| 6568 __ j(zero, slow); |
| 6583 // Divide rdx:rax by rbx. | 6569 // Divide rdx:rax by rbx. |
| 6584 __ idiv(rbx); | 6570 __ idiv(rbx); |
| 6585 // Check for negative zero result. | 6571 // Check for negative zero result. |
| 6586 __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y | 6572 __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y |
| 6587 // Move remainder to register rax. | 6573 // Move remainder to register rax. |
| 6588 __ movq(rax, rdx); | 6574 __ movq(rax, rdx); |
| 6589 break; | 6575 break; |
| 6590 | 6576 |
| 6591 case Token::BIT_OR: | 6577 case Token::BIT_OR: |
| 6592 __ or_(rax, rbx); | 6578 __ or_(rax, rbx); |
| (...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6894 int CompareStub::MinorKey() { | 6880 int CompareStub::MinorKey() { |
| 6895 // Encode the two parameters in a unique 16 bit value. | 6881 // Encode the two parameters in a unique 16 bit value. |
| 6896 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); | 6882 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); |
| 6897 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); | 6883 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); |
| 6898 } | 6884 } |
| 6899 | 6885 |
| 6900 | 6886 |
| 6901 #undef __ | 6887 #undef __ |
| 6902 | 6888 |
| 6903 } } // namespace v8::internal | 6889 } } // namespace v8::internal |
| OLD | NEW |