OLD | NEW |
---|---|
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4695 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4706 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 4706 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); |
4707 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); | 4707 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); |
4708 #endif | 4708 #endif |
4709 __ mov(r0, Operand(r4)); | 4709 __ mov(r0, Operand(r4)); |
4710 // And we are done. | 4710 // And we are done. |
4711 __ pop(pc); | 4711 __ pop(pc); |
4712 } | 4712 } |
4713 | 4713 |
4714 | 4714 |
4715 // Tries to get a signed int32 out of a double precision floating point heap | 4715 // Tries to get a signed int32 out of a double precision floating point heap |
4716 // number. Rounds towards 0. Only succeeds for doubles that are in the ranges | 4716 // number. Rounds towards 0. Fastest for doubles that are in the ranges |
4717 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | 4717 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds |
4718 // almost to the range of signed int32 values that are not Smis. Jumps to the | 4718 // almost to the range of signed int32 values that are not Smis. Jumps to the |
4719 // label if the double isn't in the range it can cope with. | 4719 // label if the double isn't in the range -0x80000000 to 0x80000000 (excluding |
William Hesse
2009/06/16 09:33:16
the label slow
| |
4720 // the endpoints). | |
4720 static void GetInt32(MacroAssembler* masm, | 4721 static void GetInt32(MacroAssembler* masm, |
4721 Register source, | 4722 Register source, |
4722 Register dest, | 4723 Register dest, |
4723 Register scratch, | 4724 Register scratch, |
4725 Register scratch2, | |
4724 Label* slow) { | 4726 Label* slow) { |
4725 Register scratch2 = dest; | 4727 Label right_exponent, done; |
4726 // Get exponent word. | 4728 // Get exponent word. |
4727 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); | 4729 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); |
4728 // Get exponent alone in scratch2. | 4730 // Get exponent alone in scratch2. |
4729 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask)); | 4731 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask)); |
4732 // Load dest with zero. We use this either for the final shift or | |
4733 // for the answer. | |
4734 __ mov(dest, Operand(0)); | |
4730 // Check whether the exponent matches a 32 bit signed int that is not a Smi. | 4735 // Check whether the exponent matches a 32 bit signed int that is not a Smi. |
4731 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | 4736 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is |
4737 // the exponent that we are fastest at and also the highest exponent we can | |
4738 // handle here. | |
4732 const uint32_t non_smi_exponent = | 4739 const uint32_t non_smi_exponent = |
4733 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 4740 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
4734 __ cmp(scratch2, Operand(non_smi_exponent)); | 4741 __ cmp(scratch2, Operand(non_smi_exponent)); |
4735 // If not, then we go slow. | 4742 // If we have a match of the int32-but-not-Smi exponent then skip some logic. |
4736 __ b(ne, slow); | 4743 __ b(eq, &right_exponent); |
4744 // If the exponent is higher than that then go to slow case. This catches | |
4745 // numbers that don't fit in a signed int32, infinities and NaNs. | |
4746 __ b(gt, slow); | |
4747 | |
4748 // We know the exponent is smaller than 30 (biased). If it is less than | |
4749 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
4750 // it rounds to zero. | |
4751 const uint32_t zero_exponent = | |
4752 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | |
4753 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); | |
4754 // Dest already has a Smi zero. | |
4755 __ b(lt, &done); | |
4756 // We have a shifted exponent between 0 and 30 in scratch2. | |
4757 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); | |
4758 // We now have the exponent in dest. Subtract from 30 to get | |
4759 // how much to shift down. | |
4760 __ rsb(dest, dest, Operand(30)); | |
4761 | |
4762 __ bind(&right_exponent); | |
4737 // Get the top bits of the mantissa. | 4763 // Get the top bits of the mantissa. |
4738 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 4764 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
4739 // Put back the implicit 1. | 4765 // Put back the implicit 1. |
4740 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 4766 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); |
4741 // Shift up the mantissa bits to take up the space the exponent used to take. | 4767 // Shift up the mantissa bits to take up the space the exponent used to take. |
4742 // We just orred in the implicit bit so that took care of one and we want to | 4768 // We just orred in the implicit bit so that took care of one and we want to |
4743 // leave the sign bit 0 so we subtract 2 bits from the shift distance. | 4769 // leave the sign bit 0 so we subtract 2 bits from the shift distance. |
4744 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 4770 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
4745 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | 4771 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); |
4746 // Put sign in zero flag. | 4772 // Put sign in zero flag. |
4747 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 4773 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
4748 // Get the second half of the double. | 4774 // Get the second half of the double. For some exponents we don't actually |
4775 // need this because the bits get shifted out again, but it's probably slower | |
4776 // to test than just to do it. | |
4749 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 4777 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
4750 // Shift down 22 bits to get the last 10 bits. | 4778 // Shift down 22 bits to get the last 10 bits. |
4751 __ orr(dest, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 4779 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); |
4780 // Move down according to the exponent. | |
4781 __ mov(dest, Operand(scratch, LSR, dest)); | |
4752 // Fix sign if sign bit was set. | 4782 // Fix sign if sign bit was set. |
4753 __ rsb(dest, dest, Operand(0), LeaveCC, ne); | 4783 __ rsb(dest, dest, Operand(0), LeaveCC, ne); |
4784 __ bind(&done); | |
4754 } | 4785 } |
4755 | 4786 |
4756 | 4787 |
4757 // For bitwise ops where the inputs are not both Smis we here try to determine | 4788 // For bitwise ops where the inputs are not both Smis we here try to determine |
4758 // whether both inputs are either Smis or at least heap numbers that can be | 4789 // whether both inputs are either Smis or at least heap numbers that can be |
4759 // represented by a 32 bit signed value. We truncate towards zero as required | 4790 // represented by a 32 bit signed value. We truncate towards zero as required |
4760 // by the ES spec. If this is the case we do the bitwise op and see if the | 4791 // by the ES spec. If this is the case we do the bitwise op and see if the |
4761 // result is a Smi. If so, great, otherwise we try to find a heap number to | 4792 // result is a Smi. If so, great, otherwise we try to find a heap number to |
4762 // write the answer into (either by allocating or by overwriting). | 4793 // write the answer into (either by allocating or by overwriting). |
4763 // On entry the operands are in r0 and r1. On exit the answer is in r0. | 4794 // On entry the operands are in r0 and r1. On exit the answer is in r0. |
4764 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { | 4795 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { |
4765 Label slow, result_not_a_smi; | 4796 Label slow, result_not_a_smi; |
4766 Label r0_is_smi, r1_is_smi; | 4797 Label r0_is_smi, r1_is_smi; |
4767 Label done_checking_r0, done_checking_r1; | 4798 Label done_checking_r0, done_checking_r1; |
4768 | 4799 |
4769 __ tst(r1, Operand(kSmiTagMask)); | 4800 __ tst(r1, Operand(kSmiTagMask)); |
4770 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | 4801 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
4771 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 4802 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
4772 __ b(ne, &slow); | 4803 __ b(ne, &slow); |
4773 GetInt32(masm, r1, r3, r4, &slow); | 4804 GetInt32(masm, r1, r3, r4, r5, &slow); |
4774 __ jmp(&done_checking_r1); | 4805 __ jmp(&done_checking_r1); |
4775 __ bind(&r1_is_smi); | 4806 __ bind(&r1_is_smi); |
4776 __ mov(r3, Operand(r1, ASR, 1)); | 4807 __ mov(r3, Operand(r1, ASR, 1)); |
4777 __ bind(&done_checking_r1); | 4808 __ bind(&done_checking_r1); |
4778 | 4809 |
4779 __ tst(r0, Operand(kSmiTagMask)); | 4810 __ tst(r0, Operand(kSmiTagMask)); |
4780 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 4811 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
4781 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 4812 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
4782 __ b(ne, &slow); | 4813 __ b(ne, &slow); |
4783 GetInt32(masm, r0, r2, r4, &slow); | 4814 GetInt32(masm, r0, r2, r4, r5, &slow); |
4784 __ jmp(&done_checking_r0); | 4815 __ jmp(&done_checking_r0); |
4785 __ bind(&r0_is_smi); | 4816 __ bind(&r0_is_smi); |
4786 __ mov(r2, Operand(r0, ASR, 1)); | 4817 __ mov(r2, Operand(r0, ASR, 1)); |
4787 __ bind(&done_checking_r0); | 4818 __ bind(&done_checking_r0); |
4788 | 4819 |
4789 // r0 and r1: Original operands (Smi or heap numbers). | 4820 // r0 and r1: Original operands (Smi or heap numbers). |
4790 // r2 and r3: Signed int32 operands. | 4821 // r2 and r3: Signed int32 operands. |
4791 switch (op_) { | 4822 switch (op_) { |
4792 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | 4823 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; |
4793 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | 4824 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; |
(...skipping 868 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5662 __ mov(r2, Operand(0)); | 5693 __ mov(r2, Operand(0)); |
5663 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5694 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
5664 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5695 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), |
5665 RelocInfo::CODE_TARGET); | 5696 RelocInfo::CODE_TARGET); |
5666 } | 5697 } |
5667 | 5698 |
5668 | 5699 |
5669 #undef __ | 5700 #undef __ |
5670 | 5701 |
5671 } } // namespace v8::internal | 5702 } } // namespace v8::internal |
OLD | NEW |