Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 23129003: Arm support for DoubleToIStub (truncating). (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix CALL_GENERATED_CODE call when on native. Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/lithium-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after
627 // Compute lower part of fraction (last 12 bits). 627 // Compute lower part of fraction (last 12 bits).
628 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); 628 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
629 // And the top (top 20 bits). 629 // And the top (top 20 bits).
630 __ orr(exponent, 630 __ orr(exponent,
631 exponent, 631 exponent,
632 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); 632 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
633 __ Ret(); 633 __ Ret();
634 } 634 }
635 635
636 636
637 void DoubleToIStub::Generate(MacroAssembler* masm) {
638 Label out_of_range, only_low, negate, done;
639 Register input_reg = source();
640 Register result_reg = destination();
641
642 int double_offset = offset();
643 // Account for saved regs if input is sp.
644 if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
645
646 // Immediate values for this stub fit in instructions, so it's safe to use ip.
647 Register scratch = ip;
648 Register scratch_low =
649 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
650 Register scratch_high =
651 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
652 LowDwVfpRegister double_scratch = kScratchDoubleReg;
653
654 __ Push(scratch_high, scratch_low);
655
656 if (!skip_fastpath()) {
657 // Load double input.
658 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
659 __ vmov(scratch_low, scratch_high, double_scratch);
660
661 // Do fast-path convert from double to int.
662 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
663 __ vmov(result_reg, double_scratch.low());
664
665 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
666 __ sub(scratch, result_reg, Operand(1));
667 __ cmp(scratch, Operand(0x7ffffffe));
668 __ b(lt, &done);
669 } else {
670 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
671 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
672 if (double_offset == 0) {
673 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
674 } else {
675 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
676 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
677 }
678 }
679
680 __ Ubfx(scratch, scratch_high,
681 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
682 // Load scratch with exponent - 1. This is faster than loading
683 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
684 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
685 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
686 // If exponent is greater than or equal to 84, the 32 less significant
687 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
688 // the result is 0.
689 // Compare exponent with 84 (compare exponent - 1 with 83).
690 __ cmp(scratch, Operand(83));
691 __ b(ge, &out_of_range);
692
693 // If we reach this code, 31 <= exponent <= 83.
694 // So, we don't have to handle cases where 0 <= exponent <= 20 for
695 // which we would need to shift right the high part of the mantissa.
696 // Scratch contains exponent - 1.
697 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
698 __ rsb(scratch, scratch, Operand(51), SetCC);
699 __ b(ls, &only_low);
700 // 21 <= exponent <= 51, shift scratch_low and scratch_high
701 // to generate the result.
702 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
703 // Scratch contains: 52 - exponent.
704 // We needs: exponent - 20.
705 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
706 __ rsb(scratch, scratch, Operand(32));
707 __ Ubfx(result_reg, scratch_high,
708 0, HeapNumber::kMantissaBitsInTopWord);
709 // Set the implicit 1 before the mantissa part in scratch_high.
710 __ orr(result_reg, result_reg,
711 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
712 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
713 __ b(&negate);
714
715 __ bind(&out_of_range);
716 __ mov(result_reg, Operand::Zero());
717 __ b(&done);
718
719 __ bind(&only_low);
720 // 52 <= exponent <= 83, shift only scratch_low.
721 // On entry, scratch contains: 52 - exponent.
722 __ rsb(scratch, scratch, Operand::Zero());
723 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
724
725 __ bind(&negate);
726 // If input was positive, scratch_high ASR 31 equals 0 and
727 // scratch_high LSR 31 equals zero.
728 // New result = (result eor 0) + 0 = result.
729 // If the input was negative, we have to negate the result.
730 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
731 // New result = (result eor 0xffffffff) + 1 = 0 - result.
732 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
733 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
734
735 __ bind(&done);
736
737 __ Pop(scratch_high, scratch_low);
738 __ Ret();
739 }
740
741
637 bool WriteInt32ToHeapNumberStub::IsPregenerated() { 742 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
638 // These variants are compiled ahead of time. See next method. 743 // These variants are compiled ahead of time. See next method.
639 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { 744 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
640 return true; 745 return true;
641 } 746 }
642 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { 747 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
643 return true; 748 return true;
644 } 749 }
645 // Other register combinations are generated as and when they are needed, 750 // Other register combinations are generated as and when they are needed,
646 // so it is unsafe to call them from stubs (we can't generate a stub while 751 // so it is unsafe to call them from stubs (we can't generate a stub while
(...skipping 937 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 bool smi_operands, 1689 bool smi_operands,
1585 Label* not_numbers, 1690 Label* not_numbers,
1586 Label* gc_required, 1691 Label* gc_required,
1587 Label* miss, 1692 Label* miss,
1588 Token::Value op, 1693 Token::Value op,
1589 OverwriteMode mode) { 1694 OverwriteMode mode) {
1590 Register left = r1; 1695 Register left = r1;
1591 Register right = r0; 1696 Register right = r0;
1592 Register scratch1 = r6; 1697 Register scratch1 = r6;
1593 Register scratch2 = r7; 1698 Register scratch2 = r7;
1594 Register scratch3 = r4;
1595 1699
1596 ASSERT(smi_operands || (not_numbers != NULL)); 1700 ASSERT(smi_operands || (not_numbers != NULL));
1597 if (smi_operands) { 1701 if (smi_operands) {
1598 __ AssertSmi(left); 1702 __ AssertSmi(left);
1599 __ AssertSmi(right); 1703 __ AssertSmi(right);
1600 } 1704 }
1601 if (left_type == BinaryOpIC::SMI) { 1705 if (left_type == BinaryOpIC::SMI) {
1602 __ JumpIfNotSmi(left, miss); 1706 __ JumpIfNotSmi(left, miss);
1603 } 1707 }
1604 if (right_type == BinaryOpIC::SMI) { 1708 if (right_type == BinaryOpIC::SMI) {
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1682 case Token::BIT_XOR: 1786 case Token::BIT_XOR:
1683 case Token::BIT_AND: 1787 case Token::BIT_AND:
1684 case Token::SAR: 1788 case Token::SAR:
1685 case Token::SHR: 1789 case Token::SHR:
1686 case Token::SHL: { 1790 case Token::SHL: {
1687 if (smi_operands) { 1791 if (smi_operands) {
1688 __ SmiUntag(r3, left); 1792 __ SmiUntag(r3, left);
1689 __ SmiUntag(r2, right); 1793 __ SmiUntag(r2, right);
1690 } else { 1794 } else {
1691 // Convert operands to 32-bit integers. Right in r2 and left in r3. 1795 // Convert operands to 32-bit integers. Right in r2 and left in r3.
1692 __ ConvertNumberToInt32( 1796 __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
1693 left, r3, heap_number_map, 1797 __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
1694 scratch1, scratch2, scratch3, d0, d1, not_numbers);
1695 __ ConvertNumberToInt32(
1696 right, r2, heap_number_map,
1697 scratch1, scratch2, scratch3, d0, d1, not_numbers);
1698 } 1798 }
1699 1799
1700 Label result_not_a_smi; 1800 Label result_not_a_smi;
1701 switch (op) { 1801 switch (op) {
1702 case Token::BIT_OR: 1802 case Token::BIT_OR:
1703 __ orr(r2, r3, Operand(r2)); 1803 __ orr(r2, r3, Operand(r2));
1704 break; 1804 break;
1705 case Token::BIT_XOR: 1805 case Token::BIT_XOR:
1706 __ eor(r2, r3, Operand(r2)); 1806 __ eor(r2, r3, Operand(r2));
1707 break; 1807 break;
(...skipping 5455 matching lines...) Expand 10 before | Expand all | Expand 10 after
7163 __ bind(&fast_elements_case); 7263 __ bind(&fast_elements_case);
7164 GenerateCase(masm, FAST_ELEMENTS); 7264 GenerateCase(masm, FAST_ELEMENTS);
7165 } 7265 }
7166 7266
7167 7267
7168 #undef __ 7268 #undef __
7169 7269
7170 } } // namespace v8::internal 7270 } } // namespace v8::internal
7171 7271
7172 #endif // V8_TARGET_ARCH_ARM 7272 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/lithium-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698