| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 811 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { | 822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { |
| 823 if (dst.code() < 16) { | 823 if (dst.code() < 16) { |
| 824 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | 824 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); |
| 825 vmov(loc.low(), src); | 825 vmov(loc.low(), src); |
| 826 } else { | 826 } else { |
| 827 vmov(dst, VmovIndexLo, src); | 827 vmov(dst, VmovIndexLo, src); |
| 828 } | 828 } |
| 829 } | 829 } |
| 830 | 830 |
| 831 | 831 |
| 832 void MacroAssembler::ConvertNumberToInt32(Register object, | |
| 833 Register dst, | |
| 834 Register heap_number_map, | |
| 835 Register scratch1, | |
| 836 Register scratch2, | |
| 837 Register scratch3, | |
| 838 DwVfpRegister double_scratch1, | |
| 839 LowDwVfpRegister double_scratch2, | |
| 840 Label* not_number) { | |
| 841 Label done; | |
| 842 UntagAndJumpIfSmi(dst, object, &done); | |
| 843 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | |
| 844 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 845 ECMAToInt32(dst, double_scratch1, | |
| 846 scratch1, scratch2, scratch3, double_scratch2); | |
| 847 | |
| 848 bind(&done); | |
| 849 } | |
| 850 | |
| 851 | |
| 852 void MacroAssembler::LoadNumber(Register object, | 832 void MacroAssembler::LoadNumber(Register object, |
| 853 LowDwVfpRegister dst, | 833 LowDwVfpRegister dst, |
| 854 Register heap_number_map, | 834 Register heap_number_map, |
| 855 Register scratch, | 835 Register scratch, |
| 856 Label* not_number) { | 836 Label* not_number) { |
| 857 Label is_smi, done; | 837 Label is_smi, done; |
| 858 | 838 |
| 859 UntagAndJumpIfSmi(scratch, object, &is_smi); | 839 UntagAndJumpIfSmi(scratch, object, &is_smi); |
| 860 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); | 840 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); |
| 861 | 841 |
| (...skipping 1655 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2517 // If x is a non integer negative number, | 2497 // If x is a non integer negative number, |
| 2518 // floor(x) <=> round_to_zero(x) - 1. | 2498 // floor(x) <=> round_to_zero(x) - 1. |
| 2519 bind(&negative); | 2499 bind(&negative); |
| 2520 sub(result, result, Operand(1), SetCC); | 2500 sub(result, result, Operand(1), SetCC); |
| 2521 // If result is still negative, go to done, result fetched. | 2501 // If result is still negative, go to done, result fetched. |
| 2522 // Else, we had an overflow and we fall through exception. | 2502 // Else, we had an overflow and we fall through exception. |
| 2523 b(mi, done); | 2503 b(mi, done); |
| 2524 bind(&exception); | 2504 bind(&exception); |
| 2525 } | 2505 } |
| 2526 | 2506 |
| 2527 | 2507 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
| 2528 void MacroAssembler::ECMAToInt32(Register result, | 2508 DwVfpRegister double_input, |
| 2529 DwVfpRegister double_input, | 2509 Label* done) { |
| 2530 Register scratch, | 2510 LowDwVfpRegister double_scratch = kScratchDoubleReg; |
| 2531 Register scratch_high, | |
| 2532 Register scratch_low, | |
| 2533 LowDwVfpRegister double_scratch) { | |
| 2534 ASSERT(!scratch_high.is(result)); | |
| 2535 ASSERT(!scratch_low.is(result)); | |
| 2536 ASSERT(!scratch_low.is(scratch_high)); | |
| 2537 ASSERT(!scratch.is(result) && | |
| 2538 !scratch.is(scratch_high) && | |
| 2539 !scratch.is(scratch_low)); | |
| 2540 ASSERT(!double_input.is(double_scratch)); | |
| 2541 | |
| 2542 Label out_of_range, only_low, negate, done; | |
| 2543 | |
| 2544 vcvt_s32_f64(double_scratch.low(), double_input); | 2511 vcvt_s32_f64(double_scratch.low(), double_input); |
| 2545 vmov(result, double_scratch.low()); | 2512 vmov(result, double_scratch.low()); |
| 2546 | 2513 |
| 2547 // If result is not saturated (0x7fffffff or 0x80000000), we are done. | 2514 // If result is not saturated (0x7fffffff or 0x80000000), we are done. |
| 2548 sub(scratch, result, Operand(1)); | 2515 sub(ip, result, Operand(1)); |
| 2549 cmp(scratch, Operand(0x7ffffffe)); | 2516 cmp(ip, Operand(0x7ffffffe)); |
| 2550 b(lt, &done); | 2517 b(lt, done); |
| 2518 } |
| 2551 | 2519 |
| 2552 vmov(scratch_low, scratch_high, double_input); | |
| 2553 Ubfx(scratch, scratch_high, | |
| 2554 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 2555 // Load scratch with exponent - 1. This is faster than loading | |
| 2556 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. | |
| 2557 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | |
| 2558 // If exponent is greater than or equal to 84, the 32 less significant | |
| 2559 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | |
| 2560 // the result is 0. | |
| 2561 // Compare exponent with 84 (compare exponent - 1 with 83). | |
| 2562 cmp(scratch, Operand(83)); | |
| 2563 b(ge, &out_of_range); | |
| 2564 | 2520 |
| 2565 // If we reach this code, 31 <= exponent <= 83. | 2521 void MacroAssembler::TruncateDoubleToI(Register result, |
| 2566 // So, we don't have to handle cases where 0 <= exponent <= 20 for | 2522 DwVfpRegister double_input) { |
| 2567 // which we would need to shift right the high part of the mantissa. | 2523 Label done; |
| 2568 // Scratch contains exponent - 1. | |
| 2569 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | |
| 2570 rsb(scratch, scratch, Operand(51), SetCC); | |
| 2571 b(ls, &only_low); | |
| 2572 // 21 <= exponent <= 51, shift scratch_low and scratch_high | |
| 2573 // to generate the result. | |
| 2574 mov(scratch_low, Operand(scratch_low, LSR, scratch)); | |
| 2575 // Scratch contains: 52 - exponent. | |
| 2576 // We needs: exponent - 20. | |
| 2577 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | |
| 2578 rsb(scratch, scratch, Operand(32)); | |
| 2579 Ubfx(result, scratch_high, | |
| 2580 0, HeapNumber::kMantissaBitsInTopWord); | |
| 2581 // Set the implicit 1 before the mantissa part in scratch_high. | |
| 2582 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | |
| 2583 orr(result, scratch_low, Operand(result, LSL, scratch)); | |
| 2584 b(&negate); | |
| 2585 | 2524 |
| 2586 bind(&out_of_range); | 2525 TryInlineTruncateDoubleToI(result, double_input, &done); |
| 2587 mov(result, Operand::Zero()); | |
| 2588 b(&done); | |
| 2589 | 2526 |
| 2590 bind(&only_low); | 2527 // If we fell through then inline version didn't succeed - call stub instead. |
| 2591 // 52 <= exponent <= 83, shift only scratch_low. | 2528 push(lr); |
| 2592 // On entry, scratch contains: 52 - exponent. | 2529 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. |
| 2593 rsb(scratch, scratch, Operand::Zero()); | 2530 vstr(double_input, MemOperand(sp, 0)); |
| 2594 mov(result, Operand(scratch_low, LSL, scratch)); | |
| 2595 | 2531 |
| 2596 bind(&negate); | 2532 DoubleToIStub stub(sp, result, 0, true, true); |
| 2597 // If input was positive, scratch_high ASR 31 equals 0 and | 2533 CallStub(&stub); |
| 2598 // scratch_high LSR 31 equals zero. | 2534 |
| 2599 // New result = (result eor 0) + 0 = result. | 2535 add(sp, sp, Operand(kDoubleSize)); |
| 2600 // If the input was negative, we have to negate the result. | 2536 pop(lr); |
| 2601 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | |
| 2602 // New result = (result eor 0xffffffff) + 1 = 0 - result. | |
| 2603 eor(result, result, Operand(scratch_high, ASR, 31)); | |
| 2604 add(result, result, Operand(scratch_high, LSR, 31)); | |
| 2605 | 2537 |
| 2606 bind(&done); | 2538 bind(&done); |
| 2607 } | 2539 } |
| 2540 |
| 2541 |
| 2542 void MacroAssembler::TruncateDoubleToI(Register result, |
| 2543 Register input, |
| 2544 int offset) { |
| 2545 Label done; |
| 2546 LowDwVfpRegister double_scratch = kScratchDoubleReg; |
| 2547 ASSERT(!result.is(input)); |
| 2548 |
| 2549 vldr(double_scratch, MemOperand(input, offset)); |
| 2550 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
| 2551 |
| 2552 // If we fell through then inline version didn't succeed - call stub instead. |
| 2553 push(lr); |
| 2554 DoubleToIStub stub(input, result, offset, true, true); |
| 2555 CallStub(&stub); |
| 2556 pop(lr); |
| 2557 |
| 2558 bind(&done); |
| 2559 } |
| 2560 |
| 2561 |
| 2562 void MacroAssembler::TruncateNumberToI(Register object, |
| 2563 Register result, |
| 2564 Register heap_number_map, |
| 2565 Register scratch1, |
| 2566 Label* not_number) { |
| 2567 Label done; |
| 2568 ASSERT(!result.is(object)); |
| 2569 |
| 2570 UntagAndJumpIfSmi(result, object, &done); |
| 2571 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 2572 TruncateDoubleToI(result, object, HeapNumber::kValueOffset - kHeapObjectTag); |
| 2573 |
| 2574 bind(&done); |
| 2575 } |
| 2608 | 2576 |
| 2609 | 2577 |
| 2610 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2578 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 2611 Register src, | 2579 Register src, |
| 2612 int num_least_bits) { | 2580 int num_least_bits) { |
| 2613 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2581 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { |
| 2614 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2582 ubfx(dst, src, kSmiTagSize, num_least_bits); |
| 2615 } else { | 2583 } else { |
| 2616 SmiUntag(dst, src); | 2584 SmiUntag(dst, src); |
| 2617 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2585 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
| (...skipping 1191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3809 ldr(ip, MemOperand(ip)); | 3777 ldr(ip, MemOperand(ip)); |
| 3810 cmp(scratch_reg, ip); | 3778 cmp(scratch_reg, ip); |
| 3811 b(gt, &no_memento_available); | 3779 b(gt, &no_memento_available); |
| 3812 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); | 3780 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
| 3813 cmp(scratch_reg, | 3781 cmp(scratch_reg, |
| 3814 Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); | 3782 Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); |
| 3815 bind(&no_memento_available); | 3783 bind(&no_memento_available); |
| 3816 } | 3784 } |
| 3817 | 3785 |
| 3818 | 3786 |
| 3787 Register GetRegisterThatIsNotOneOf(Register reg1, |
| 3788 Register reg2, |
| 3789 Register reg3, |
| 3790 Register reg4, |
| 3791 Register reg5, |
| 3792 Register reg6) { |
| 3793 RegList regs = 0; |
| 3794 if (reg1.is_valid()) regs |= reg1.bit(); |
| 3795 if (reg2.is_valid()) regs |= reg2.bit(); |
| 3796 if (reg3.is_valid()) regs |= reg3.bit(); |
| 3797 if (reg4.is_valid()) regs |= reg4.bit(); |
| 3798 if (reg5.is_valid()) regs |= reg5.bit(); |
| 3799 if (reg6.is_valid()) regs |= reg6.bit(); |
| 3800 |
| 3801 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { |
| 3802 Register candidate = Register::FromAllocationIndex(i); |
| 3803 if (regs & candidate.bit()) continue; |
| 3804 return candidate; |
| 3805 } |
| 3806 UNREACHABLE(); |
| 3807 return no_reg; |
| 3808 } |
| 3809 |
| 3810 |
| 3819 #ifdef DEBUG | 3811 #ifdef DEBUG |
| 3820 bool AreAliased(Register reg1, | 3812 bool AreAliased(Register reg1, |
| 3821 Register reg2, | 3813 Register reg2, |
| 3822 Register reg3, | 3814 Register reg3, |
| 3823 Register reg4, | 3815 Register reg4, |
| 3824 Register reg5, | 3816 Register reg5, |
| 3825 Register reg6) { | 3817 Register reg6) { |
| 3826 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + | 3818 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + |
| 3827 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); | 3819 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); |
| 3828 | 3820 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3874 void CodePatcher::EmitCondition(Condition cond) { | 3866 void CodePatcher::EmitCondition(Condition cond) { |
| 3875 Instr instr = Assembler::instr_at(masm_.pc_); | 3867 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3876 instr = (instr & ~kCondMask) | cond; | 3868 instr = (instr & ~kCondMask) | cond; |
| 3877 masm_.emit(instr); | 3869 masm_.emit(instr); |
| 3878 } | 3870 } |
| 3879 | 3871 |
| 3880 | 3872 |
| 3881 } } // namespace v8::internal | 3873 } } // namespace v8::internal |
| 3882 | 3874 |
| 3883 #endif // V8_TARGET_ARCH_ARM | 3875 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |