OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 811 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { | 822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { |
823 if (dst.code() < 16) { | 823 if (dst.code() < 16) { |
824 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | 824 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); |
825 vmov(loc.low(), src); | 825 vmov(loc.low(), src); |
826 } else { | 826 } else { |
827 vmov(dst, VmovIndexLo, src); | 827 vmov(dst, VmovIndexLo, src); |
828 } | 828 } |
829 } | 829 } |
830 | 830 |
831 | 831 |
832 void MacroAssembler::ConvertNumberToInt32(Register object, | |
833 Register dst, | |
834 Register heap_number_map, | |
835 Register scratch1, | |
836 Register scratch2, | |
837 Register scratch3, | |
838 DwVfpRegister double_scratch1, | |
839 LowDwVfpRegister double_scratch2, | |
840 Label* not_number) { | |
841 Label done; | |
842 UntagAndJumpIfSmi(dst, object, &done); | |
843 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | |
844 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
845 ECMAToInt32(dst, double_scratch1, | |
846 scratch1, scratch2, scratch3, double_scratch2); | |
847 | |
848 bind(&done); | |
849 } | |
850 | |
851 | |
852 void MacroAssembler::LoadNumber(Register object, | 832 void MacroAssembler::LoadNumber(Register object, |
853 LowDwVfpRegister dst, | 833 LowDwVfpRegister dst, |
854 Register heap_number_map, | 834 Register heap_number_map, |
855 Register scratch, | 835 Register scratch, |
856 Label* not_number) { | 836 Label* not_number) { |
857 Label is_smi, done; | 837 Label is_smi, done; |
858 | 838 |
859 UntagAndJumpIfSmi(scratch, object, &is_smi); | 839 UntagAndJumpIfSmi(scratch, object, &is_smi); |
860 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); | 840 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); |
861 | 841 |
(...skipping 1669 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2531 // If x is a non integer negative number, | 2511 // If x is a non integer negative number, |
2532 // floor(x) <=> round_to_zero(x) - 1. | 2512 // floor(x) <=> round_to_zero(x) - 1. |
2533 bind(&negative); | 2513 bind(&negative); |
2534 sub(result, result, Operand(1), SetCC); | 2514 sub(result, result, Operand(1), SetCC); |
2535 // If result is still negative, go to done, result fetched. | 2515 // If result is still negative, go to done, result fetched. |
2536 // Else, we had an overflow and we fall through exception. | 2516 // Else, we had an overflow and we fall through exception. |
2537 b(mi, done); | 2517 b(mi, done); |
2538 bind(&exception); | 2518 bind(&exception); |
2539 } | 2519 } |
2540 | 2520 |
2541 | 2521 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
2542 void MacroAssembler::ECMAToInt32(Register result, | 2522 DwVfpRegister double_input, |
2543 DwVfpRegister double_input, | 2523 Label* done) { |
2544 Register scratch, | 2524 LowDwVfpRegister double_scratch = kScratchDoubleReg; |
2545 Register scratch_high, | |
2546 Register scratch_low, | |
2547 LowDwVfpRegister double_scratch) { | |
2548 ASSERT(!scratch_high.is(result)); | |
2549 ASSERT(!scratch_low.is(result)); | |
2550 ASSERT(!scratch_low.is(scratch_high)); | |
2551 ASSERT(!scratch.is(result) && | |
2552 !scratch.is(scratch_high) && | |
2553 !scratch.is(scratch_low)); | |
2554 ASSERT(!double_input.is(double_scratch)); | |
2555 | |
2556 Label out_of_range, only_low, negate, done; | |
2557 | |
2558 vcvt_s32_f64(double_scratch.low(), double_input); | 2525 vcvt_s32_f64(double_scratch.low(), double_input); |
2559 vmov(result, double_scratch.low()); | 2526 vmov(result, double_scratch.low()); |
2560 | 2527 |
2561 // If result is not saturated (0x7fffffff or 0x80000000), we are done. | 2528 // If result is not saturated (0x7fffffff or 0x80000000), we are done. |
2562 sub(scratch, result, Operand(1)); | 2529 sub(ip, result, Operand(1)); |
2563 cmp(scratch, Operand(0x7ffffffe)); | 2530 cmp(ip, Operand(0x7ffffffe)); |
2564 b(lt, &done); | 2531 b(lt, done); |
| 2532 } |
2565 | 2533 |
2566 vmov(scratch_low, scratch_high, double_input); | |
2567 Ubfx(scratch, scratch_high, | |
2568 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
2569 // Load scratch with exponent - 1. This is faster than loading | |
2570 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. | |
2571 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | |
2572 // If exponent is greater than or equal to 84, the 32 less significant | |
2573 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | |
2574 // the result is 0. | |
2575 // Compare exponent with 84 (compare exponent - 1 with 83). | |
2576 cmp(scratch, Operand(83)); | |
2577 b(ge, &out_of_range); | |
2578 | 2534 |
2579 // If we reach this code, 31 <= exponent <= 83. | 2535 void MacroAssembler::TruncateDoubleToI(Register result, |
2580 // So, we don't have to handle cases where 0 <= exponent <= 20 for | 2536 DwVfpRegister double_input) { |
2581 // which we would need to shift right the high part of the mantissa. | 2537 Label done; |
2582 // Scratch contains exponent - 1. | |
2583 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | |
2584 rsb(scratch, scratch, Operand(51), SetCC); | |
2585 b(ls, &only_low); | |
2586 // 21 <= exponent <= 51, shift scratch_low and scratch_high | |
2587 // to generate the result. | |
2588 mov(scratch_low, Operand(scratch_low, LSR, scratch)); | |
2589 // Scratch contains: 52 - exponent. | |
2590 // We needs: exponent - 20. | |
2591 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | |
2592 rsb(scratch, scratch, Operand(32)); | |
2593 Ubfx(result, scratch_high, | |
2594 0, HeapNumber::kMantissaBitsInTopWord); | |
2595 // Set the implicit 1 before the mantissa part in scratch_high. | |
2596 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | |
2597 orr(result, scratch_low, Operand(result, LSL, scratch)); | |
2598 b(&negate); | |
2599 | 2538 |
2600 bind(&out_of_range); | 2539 TryInlineTruncateDoubleToI(result, double_input, &done); |
2601 mov(result, Operand::Zero()); | |
2602 b(&done); | |
2603 | 2540 |
2604 bind(&only_low); | 2541 // If we fell through then inline version didn't succeed - call stub instead. |
2605 // 52 <= exponent <= 83, shift only scratch_low. | 2542 push(lr); |
2606 // On entry, scratch contains: 52 - exponent. | 2543 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. |
2607 rsb(scratch, scratch, Operand::Zero()); | 2544 vstr(double_input, MemOperand(sp, 0)); |
2608 mov(result, Operand(scratch_low, LSL, scratch)); | |
2609 | 2545 |
2610 bind(&negate); | 2546 DoubleToIStub stub(sp, result, 0, true, true); |
2611 // If input was positive, scratch_high ASR 31 equals 0 and | 2547 CallStub(&stub); |
2612 // scratch_high LSR 31 equals zero. | 2548 |
2613 // New result = (result eor 0) + 0 = result. | 2549 add(sp, sp, Operand(kDoubleSize)); |
2614 // If the input was negative, we have to negate the result. | 2550 pop(lr); |
2615 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | |
2616 // New result = (result eor 0xffffffff) + 1 = 0 - result. | |
2617 eor(result, result, Operand(scratch_high, ASR, 31)); | |
2618 add(result, result, Operand(scratch_high, LSR, 31)); | |
2619 | 2551 |
2620 bind(&done); | 2552 bind(&done); |
2621 } | 2553 } |
| 2554 |
| 2555 |
| 2556 void MacroAssembler::TruncateHeapNumberToI(Register result, |
| 2557 Register object) { |
| 2558 Label done; |
| 2559 LowDwVfpRegister double_scratch = kScratchDoubleReg; |
| 2560 ASSERT(!result.is(object)); |
| 2561 |
| 2562 vldr(double_scratch, |
| 2563 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); |
| 2564 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
| 2565 |
| 2566 // If we fell through then inline version didn't succeed - call stub instead. |
| 2567 push(lr); |
| 2568 DoubleToIStub stub(object, |
| 2569 result, |
| 2570 HeapNumber::kValueOffset - kHeapObjectTag, |
| 2571 true, |
| 2572 true); |
| 2573 CallStub(&stub); |
| 2574 pop(lr); |
| 2575 |
| 2576 bind(&done); |
| 2577 } |
| 2578 |
| 2579 |
| 2580 void MacroAssembler::TruncateNumberToI(Register object, |
| 2581 Register result, |
| 2582 Register heap_number_map, |
| 2583 Register scratch1, |
| 2584 Label* not_number) { |
| 2585 Label done; |
| 2586 ASSERT(!result.is(object)); |
| 2587 |
| 2588 UntagAndJumpIfSmi(result, object, &done); |
| 2589 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 2590 TruncateHeapNumberToI(result, object); |
| 2591 |
| 2592 bind(&done); |
| 2593 } |
2622 | 2594 |
2623 | 2595 |
2624 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2596 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
2625 Register src, | 2597 Register src, |
2626 int num_least_bits) { | 2598 int num_least_bits) { |
2627 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2599 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { |
2628 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2600 ubfx(dst, src, kSmiTagSize, num_least_bits); |
2629 } else { | 2601 } else { |
2630 SmiUntag(dst, src); | 2602 SmiUntag(dst, src); |
2631 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2603 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
(...skipping 1191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3823 ldr(ip, MemOperand(ip)); | 3795 ldr(ip, MemOperand(ip)); |
3824 cmp(scratch_reg, ip); | 3796 cmp(scratch_reg, ip); |
3825 b(gt, &no_memento_available); | 3797 b(gt, &no_memento_available); |
3826 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); | 3798 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
3827 cmp(scratch_reg, | 3799 cmp(scratch_reg, |
3828 Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); | 3800 Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); |
3829 bind(&no_memento_available); | 3801 bind(&no_memento_available); |
3830 } | 3802 } |
3831 | 3803 |
3832 | 3804 |
| 3805 Register GetRegisterThatIsNotOneOf(Register reg1, |
| 3806 Register reg2, |
| 3807 Register reg3, |
| 3808 Register reg4, |
| 3809 Register reg5, |
| 3810 Register reg6) { |
| 3811 RegList regs = 0; |
| 3812 if (reg1.is_valid()) regs |= reg1.bit(); |
| 3813 if (reg2.is_valid()) regs |= reg2.bit(); |
| 3814 if (reg3.is_valid()) regs |= reg3.bit(); |
| 3815 if (reg4.is_valid()) regs |= reg4.bit(); |
| 3816 if (reg5.is_valid()) regs |= reg5.bit(); |
| 3817 if (reg6.is_valid()) regs |= reg6.bit(); |
| 3818 |
| 3819 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { |
| 3820 Register candidate = Register::FromAllocationIndex(i); |
| 3821 if (regs & candidate.bit()) continue; |
| 3822 return candidate; |
| 3823 } |
| 3824 UNREACHABLE(); |
| 3825 return no_reg; |
| 3826 } |
| 3827 |
| 3828 |
3833 #ifdef DEBUG | 3829 #ifdef DEBUG |
3834 bool AreAliased(Register reg1, | 3830 bool AreAliased(Register reg1, |
3835 Register reg2, | 3831 Register reg2, |
3836 Register reg3, | 3832 Register reg3, |
3837 Register reg4, | 3833 Register reg4, |
3838 Register reg5, | 3834 Register reg5, |
3839 Register reg6) { | 3835 Register reg6) { |
3840 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + | 3836 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + |
3841 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); | 3837 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); |
3842 | 3838 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3888 void CodePatcher::EmitCondition(Condition cond) { | 3884 void CodePatcher::EmitCondition(Condition cond) { |
3889 Instr instr = Assembler::instr_at(masm_.pc_); | 3885 Instr instr = Assembler::instr_at(masm_.pc_); |
3890 instr = (instr & ~kCondMask) | cond; | 3886 instr = (instr & ~kCondMask) | cond; |
3891 masm_.emit(instr); | 3887 masm_.emit(instr); |
3892 } | 3888 } |
3893 | 3889 |
3894 | 3890 |
3895 } } // namespace v8::internal | 3891 } } // namespace v8::internal |
3896 | 3892 |
3897 #endif // V8_TARGET_ARCH_ARM | 3893 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |