Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 627 // Compute lower part of fraction (last 12 bits). | 627 // Compute lower part of fraction (last 12 bits). |
| 628 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 628 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
| 629 // And the top (top 20 bits). | 629 // And the top (top 20 bits). |
| 630 __ orr(exponent, | 630 __ orr(exponent, |
| 631 exponent, | 631 exponent, |
| 632 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 632 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
| 633 __ Ret(); | 633 __ Ret(); |
| 634 } | 634 } |
| 635 | 635 |
| 636 | 636 |
| 637 void DoubleToIStub::Generate(MacroAssembler* masm) { | |
| 638 Label out_of_range, only_low, negate, done; | |
| 639 Register input_reg = source(); | |
| 640 Register result_reg = destination(); | |
| 641 | |
| 642 int double_offset = offset(); | |
| 643 // Account for saved regs if input is sp. | |
| 644 if (input_reg.is(sp)) double_offset += 2 * kPointerSize; | |
| 645 | |
| 646 // Immediate values for this stub fit in instructions, so it's safe to use ip. | |
| 647 Register scratch = ip; | |
| 648 Register scratch_low = | |
| 649 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | |
| 650 Register scratch_high = | |
| 651 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); | |
| 652 LowDwVfpRegister double_scratch = kScratchDoubleReg; | |
| 653 | |
| 654 __ Push(scratch_high, scratch_low); | |
| 655 | |
| 656 if (!skip_fastpath()) { | |
| 657 // Load double input. | |
| 658 __ vldr(double_scratch, MemOperand(input_reg, double_offset)); | |
| 659 __ vmov(scratch_low, scratch_high, double_scratch); | |
| 660 | |
| 661 // Do fast-path convert from double to int. | |
| 662 __ vcvt_s32_f64(double_scratch.low(), double_scratch); | |
| 663 __ vmov(result_reg, double_scratch.low()); | |
| 664 | |
| 665 // If result is not saturated (0x7fffffff or 0x80000000), we are done. | |
| 666 __ sub(scratch, result_reg, Operand(1)); | |
| 667 __ cmp(scratch, Operand(0x7ffffffe)); | |
| 668 __ b(lt, &done); | |
| 669 } else { | |
| 670 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we | |
| 671 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate. | |
| 672 if (double_offset == 0) { | |
| 673 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit()); | |
| 674 } else { | |
| 675 __ ldr(scratch_low, MemOperand(input_reg, double_offset)); | |
| 676 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize)); | |
| 677 } | |
| 678 } | |
| 679 | |
| 680 __ Ubfx(scratch, scratch_high, | |
| 681 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 682 // Load scratch with exponent - 1. This is faster than loading | |
| 683 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. | |
|
Benedikt Meurer
2013/08/19 07:48:01
Please add a STATIC_ASSERT() for this.
rmcilroy
2013/08/19 12:35:24
Done.
| |
| 684 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | |
| 685 // If exponent is greater than or equal to 84, the 32 less significant | |
| 686 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | |
| 687 // the result is 0. | |
| 688 // Compare exponent with 84 (compare exponent - 1 with 83). | |
| 689 __ cmp(scratch, Operand(83)); | |
| 690 __ b(ge, &out_of_range); | |
| 691 | |
| 692 // If we reach this code, 31 <= exponent <= 83. | |
| 693 // So, we don't have to handle cases where 0 <= exponent <= 20 for | |
| 694 // which we would need to shift right the high part of the mantissa. | |
| 695 // Scratch contains exponent - 1. | |
| 696 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | |
| 697 __ rsb(scratch, scratch, Operand(51), SetCC); | |
| 698 __ b(ls, &only_low); | |
| 699 // 21 <= exponent <= 51, shift scratch_low and scratch_high | |
| 700 // to generate the result. | |
| 701 __ mov(scratch_low, Operand(scratch_low, LSR, scratch)); | |
| 702 // Scratch contains: 52 - exponent. | |
| 703 // We needs: exponent - 20. | |
| 704 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | |
| 705 __ rsb(scratch, scratch, Operand(32)); | |
| 706 __ Ubfx(result_reg, scratch_high, | |
| 707 0, HeapNumber::kMantissaBitsInTopWord); | |
| 708 // Set the implicit 1 before the mantissa part in scratch_high. | |
| 709 __ orr(result_reg, result_reg, | |
| 710 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | |
| 711 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch)); | |
| 712 __ b(&negate); | |
| 713 | |
| 714 __ bind(&out_of_range); | |
| 715 __ mov(result_reg, Operand::Zero()); | |
| 716 __ b(&done); | |
| 717 | |
| 718 __ bind(&only_low); | |
| 719 // 52 <= exponent <= 83, shift only scratch_low. | |
| 720 // On entry, scratch contains: 52 - exponent. | |
| 721 __ rsb(scratch, scratch, Operand::Zero()); | |
| 722 __ mov(result_reg, Operand(scratch_low, LSL, scratch)); | |
| 723 | |
| 724 __ bind(&negate); | |
| 725 // If input was positive, scratch_high ASR 31 equals 0 and | |
| 726 // scratch_high LSR 31 equals zero. | |
| 727 // New result = (result eor 0) + 0 = result. | |
| 728 // If the input was negative, we have to negate the result. | |
| 729 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | |
| 730 // New result = (result eor 0xffffffff) + 1 = 0 - result. | |
| 731 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31)); | |
| 732 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31)); | |
| 733 | |
| 734 __ bind(&done); | |
| 735 | |
| 736 __ Pop(scratch_high, scratch_low); | |
| 737 __ Ret(); | |
| 738 } | |
| 739 | |
| 740 | |
| 637 bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 741 bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
| 638 // These variants are compiled ahead of time. See next method. | 742 // These variants are compiled ahead of time. See next method. |
| 639 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { | 743 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { |
| 640 return true; | 744 return true; |
| 641 } | 745 } |
| 642 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { | 746 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { |
| 643 return true; | 747 return true; |
| 644 } | 748 } |
| 645 // Other register combinations are generated as and when they are needed, | 749 // Other register combinations are generated as and when they are needed, |
| 646 // so it is unsafe to call them from stubs (we can't generate a stub while | 750 // so it is unsafe to call them from stubs (we can't generate a stub while |
| (...skipping 937 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1584 bool smi_operands, | 1688 bool smi_operands, |
| 1585 Label* not_numbers, | 1689 Label* not_numbers, |
| 1586 Label* gc_required, | 1690 Label* gc_required, |
| 1587 Label* miss, | 1691 Label* miss, |
| 1588 Token::Value op, | 1692 Token::Value op, |
| 1589 OverwriteMode mode) { | 1693 OverwriteMode mode) { |
| 1590 Register left = r1; | 1694 Register left = r1; |
| 1591 Register right = r0; | 1695 Register right = r0; |
| 1592 Register scratch1 = r6; | 1696 Register scratch1 = r6; |
| 1593 Register scratch2 = r7; | 1697 Register scratch2 = r7; |
| 1594 Register scratch3 = r4; | |
| 1595 | 1698 |
| 1596 ASSERT(smi_operands || (not_numbers != NULL)); | 1699 ASSERT(smi_operands || (not_numbers != NULL)); |
| 1597 if (smi_operands) { | 1700 if (smi_operands) { |
| 1598 __ AssertSmi(left); | 1701 __ AssertSmi(left); |
| 1599 __ AssertSmi(right); | 1702 __ AssertSmi(right); |
| 1600 } | 1703 } |
| 1601 if (left_type == BinaryOpIC::SMI) { | 1704 if (left_type == BinaryOpIC::SMI) { |
| 1602 __ JumpIfNotSmi(left, miss); | 1705 __ JumpIfNotSmi(left, miss); |
| 1603 } | 1706 } |
| 1604 if (right_type == BinaryOpIC::SMI) { | 1707 if (right_type == BinaryOpIC::SMI) { |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1682 case Token::BIT_XOR: | 1785 case Token::BIT_XOR: |
| 1683 case Token::BIT_AND: | 1786 case Token::BIT_AND: |
| 1684 case Token::SAR: | 1787 case Token::SAR: |
| 1685 case Token::SHR: | 1788 case Token::SHR: |
| 1686 case Token::SHL: { | 1789 case Token::SHL: { |
| 1687 if (smi_operands) { | 1790 if (smi_operands) { |
| 1688 __ SmiUntag(r3, left); | 1791 __ SmiUntag(r3, left); |
| 1689 __ SmiUntag(r2, right); | 1792 __ SmiUntag(r2, right); |
| 1690 } else { | 1793 } else { |
| 1691 // Convert operands to 32-bit integers. Right in r2 and left in r3. | 1794 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
| 1692 __ ConvertNumberToInt32( | 1795 __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); |
| 1693 left, r3, heap_number_map, | 1796 __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); |
| 1694 scratch1, scratch2, scratch3, d0, d1, not_numbers); | |
| 1695 __ ConvertNumberToInt32( | |
| 1696 right, r2, heap_number_map, | |
| 1697 scratch1, scratch2, scratch3, d0, d1, not_numbers); | |
| 1698 } | 1797 } |
| 1699 | 1798 |
| 1700 Label result_not_a_smi; | 1799 Label result_not_a_smi; |
| 1701 switch (op) { | 1800 switch (op) { |
| 1702 case Token::BIT_OR: | 1801 case Token::BIT_OR: |
| 1703 __ orr(r2, r3, Operand(r2)); | 1802 __ orr(r2, r3, Operand(r2)); |
| 1704 break; | 1803 break; |
| 1705 case Token::BIT_XOR: | 1804 case Token::BIT_XOR: |
| 1706 __ eor(r2, r3, Operand(r2)); | 1805 __ eor(r2, r3, Operand(r2)); |
| 1707 break; | 1806 break; |
| (...skipping 5455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7163 __ bind(&fast_elements_case); | 7262 __ bind(&fast_elements_case); |
| 7164 GenerateCase(masm, FAST_ELEMENTS); | 7263 GenerateCase(masm, FAST_ELEMENTS); |
| 7165 } | 7264 } |
| 7166 | 7265 |
| 7167 | 7266 |
| 7168 #undef __ | 7267 #undef __ |
| 7169 | 7268 |
| 7170 } } // namespace v8::internal | 7269 } } // namespace v8::internal |
| 7171 | 7270 |
| 7172 #endif // V8_TARGET_ARCH_ARM | 7271 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |