Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 126 | 126 |
| 127 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 127 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 128 Label* slow, | 128 Label* slow, |
| 129 Condition cond); | 129 Condition cond); |
| 130 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 130 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 131 Register lhs, | 131 Register lhs, |
| 132 Register rhs, | 132 Register rhs, |
| 133 Label* lhs_not_nan, | 133 Label* lhs_not_nan, |
| 134 Label* slow, | 134 Label* slow, |
| 135 bool strict); | 135 bool strict); |
| 136 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); | |
| 137 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 136 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 138 Register lhs, | 137 Register lhs, |
| 139 Register rhs); | 138 Register rhs); |
| 140 | 139 |
| 141 | 140 |
| 142 // Check if the operand is a heap number. | 141 // Check if the operand is a heap number. |
| 143 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, | 142 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, |
| 144 Register scratch1, Register scratch2, | 143 Register scratch1, Register scratch2, |
| 145 Label* not_a_heap_number) { | 144 Label* not_a_heap_number) { |
| 146 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); | 145 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); |
| (...skipping 496 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 643 exponent, | 642 exponent, |
| 644 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 643 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
| 645 __ Ret(); | 644 __ Ret(); |
| 646 } | 645 } |
| 647 | 646 |
| 648 | 647 |
| 649 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 648 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
| 650 FloatingPointHelper::Destination destination, | 649 FloatingPointHelper::Destination destination, |
| 651 Register scratch1, | 650 Register scratch1, |
| 652 Register scratch2) { | 651 Register scratch2) { |
| 653 if (CpuFeatures::IsSupported(VFP2)) { | 652 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
| 654 CpuFeatureScope scope(masm, VFP2); | 653 __ vmov(d7.high(), scratch1); |
| 655 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 654 __ vcvt_f64_s32(d7, d7.high()); |
| 656 __ vmov(d7.high(), scratch1); | 655 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); |
| 657 __ vcvt_f64_s32(d7, d7.high()); | 656 __ vmov(d6.high(), scratch1); |
| 658 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | 657 __ vcvt_f64_s32(d6, d6.high()); |
| 659 __ vmov(d6.high(), scratch1); | 658 if (destination == kCoreRegisters) { |
| 660 __ vcvt_f64_s32(d6, d6.high()); | 659 __ vmov(r2, r3, d7); |
| 661 if (destination == kCoreRegisters) { | 660 __ vmov(r0, r1, d6); |
| 662 __ vmov(r2, r3, d7); | |
| 663 __ vmov(r0, r1, d6); | |
| 664 } | |
| 665 } else { | |
| 666 ASSERT(destination == kCoreRegisters); | |
| 667 // Write Smi from r0 to r3 and r2 in double format. | |
| 668 __ mov(scratch1, Operand(r0)); | |
| 669 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | |
| 670 __ push(lr); | |
| 671 __ Call(stub1.GetCode(masm->isolate())); | |
| 672 // Write Smi from r1 to r1 and r0 in double format. | |
| 673 __ mov(scratch1, Operand(r1)); | |
| 674 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | |
| 675 __ Call(stub2.GetCode(masm->isolate())); | |
| 676 __ pop(lr); | |
| 677 } | 661 } |
| 678 } | 662 } |
| 679 | 663 |
| 680 | 664 |
| 681 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 665 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| 682 Destination destination, | 666 Destination destination, |
| 683 Register object, | 667 Register object, |
| 684 DwVfpRegister dst, | 668 DwVfpRegister dst, |
| 685 Register dst1, | 669 Register dst1, |
| 686 Register dst2, | 670 Register dst2, |
| 687 Register heap_number_map, | 671 Register heap_number_map, |
| 688 Register scratch1, | 672 Register scratch1, |
| 689 Register scratch2, | 673 Register scratch2, |
| 690 Label* not_number) { | 674 Label* not_number) { |
| 691 __ AssertRootValue(heap_number_map, | 675 __ AssertRootValue(heap_number_map, |
| 692 Heap::kHeapNumberMapRootIndex, | 676 Heap::kHeapNumberMapRootIndex, |
| 693 "HeapNumberMap register clobbered."); | 677 "HeapNumberMap register clobbered."); |
| 694 | 678 |
| 695 Label is_smi, done; | 679 Label is_smi, done; |
| 696 | 680 |
| 697 // Smi-check | 681 // Smi-check |
| 698 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | 682 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
| 699 // Heap number check | 683 // Heap number check |
| 700 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 684 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 701 | 685 |
| 702 // Handle loading a double from a heap number. | 686 // Handle loading a double from a heap number. |
| 703 if (CpuFeatures::IsSupported(VFP2) && | 687 if (destination == kVFPRegisters) { |
| 704 destination == kVFPRegisters) { | |
| 705 CpuFeatureScope scope(masm, VFP2); | |
| 706 // Load the double from tagged HeapNumber to double register. | 688 // Load the double from tagged HeapNumber to double register. |
| 707 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 689 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 708 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 690 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
| 709 } else { | 691 } else { |
| 710 ASSERT(destination == kCoreRegisters); | 692 ASSERT(destination == kCoreRegisters); |
| 711 // Load the double from heap number to dst1 and dst2 in double format. | 693 // Load the double from heap number to dst1 and dst2 in double format. |
| 712 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 694 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 713 } | 695 } |
| 714 __ jmp(&done); | 696 __ jmp(&done); |
| 715 | 697 |
| 716 // Handle loading a double from a smi. | 698 // Handle loading a double from a smi. |
| 717 __ bind(&is_smi); | 699 __ bind(&is_smi); |
| 718 if (CpuFeatures::IsSupported(VFP2)) { | 700 // Convert smi to double using VFP instructions. |
| 719 CpuFeatureScope scope(masm, VFP2); | 701 __ vmov(dst.high(), scratch1); |
| 720 // Convert smi to double using VFP instructions. | 702 __ vcvt_f64_s32(dst, dst.high()); |
| 721 __ vmov(dst.high(), scratch1); | 703 if (destination == kCoreRegisters) { |
| 722 __ vcvt_f64_s32(dst, dst.high()); | 704 // Load the converted smi to dst1 and dst2 in double format. |
| 723 if (destination == kCoreRegisters) { | 705 __ vmov(dst1, dst2, dst); |
| 724 // Load the converted smi to dst1 and dst2 in double format. | |
| 725 __ vmov(dst1, dst2, dst); | |
| 726 } | |
| 727 } else { | |
| 728 ASSERT(destination == kCoreRegisters); | |
| 729 // Write smi to dst1 and dst2 double format. | |
| 730 __ mov(scratch1, Operand(object)); | |
| 731 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | |
| 732 __ push(lr); | |
| 733 __ Call(stub.GetCode(masm->isolate())); | |
| 734 __ pop(lr); | |
| 735 } | 706 } |
| 736 | 707 |
| 737 __ bind(&done); | 708 __ bind(&done); |
| 738 } | 709 } |
| 739 | 710 |
| 740 | 711 |
| 741 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 712 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
| 742 Register object, | 713 Register object, |
| 743 Register dst, | 714 Register dst, |
| 744 Register heap_number_map, | 715 Register heap_number_map, |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 771 Register dst_mantissa, | 742 Register dst_mantissa, |
| 772 Register dst_exponent, | 743 Register dst_exponent, |
| 773 Register scratch2, | 744 Register scratch2, |
| 774 SwVfpRegister single_scratch) { | 745 SwVfpRegister single_scratch) { |
| 775 ASSERT(!int_scratch.is(scratch2)); | 746 ASSERT(!int_scratch.is(scratch2)); |
| 776 ASSERT(!int_scratch.is(dst_mantissa)); | 747 ASSERT(!int_scratch.is(dst_mantissa)); |
| 777 ASSERT(!int_scratch.is(dst_exponent)); | 748 ASSERT(!int_scratch.is(dst_exponent)); |
| 778 | 749 |
| 779 Label done; | 750 Label done; |
| 780 | 751 |
| 781 if (CpuFeatures::IsSupported(VFP2)) { | 752 __ vmov(single_scratch, int_scratch); |
| 782 CpuFeatureScope scope(masm, VFP2); | 753 __ vcvt_f64_s32(double_dst, single_scratch); |
| 783 __ vmov(single_scratch, int_scratch); | 754 if (destination == kCoreRegisters) { |
| 784 __ vcvt_f64_s32(double_dst, single_scratch); | 755 __ vmov(dst_mantissa, dst_exponent, double_dst); |
| 785 if (destination == kCoreRegisters) { | |
| 786 __ vmov(dst_mantissa, dst_exponent, double_dst); | |
| 787 } | |
| 788 } else { | |
| 789 Label fewer_than_20_useful_bits; | |
| 790 // Expected output: | |
| 791 // | dst_exponent | dst_mantissa | | |
| 792 // | s | exp | mantissa | | |
| 793 | |
| 794 // Check for zero. | |
| 795 __ cmp(int_scratch, Operand::Zero()); | |
| 796 __ mov(dst_exponent, int_scratch); | |
| 797 __ mov(dst_mantissa, int_scratch); | |
| 798 __ b(eq, &done); | |
| 799 | |
| 800 // Preload the sign of the value. | |
| 801 __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); | |
| 802 // Get the absolute value of the object (as an unsigned integer). | |
| 803 __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); | |
| 804 | |
| 805 // Get mantissa[51:20]. | |
| 806 | |
| 807 // Get the position of the first set bit. | |
| 808 __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); | |
| 809 __ rsb(dst_mantissa, dst_mantissa, Operand(31)); | |
| 810 | |
| 811 // Set the exponent. | |
| 812 __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); | |
| 813 __ Bfi(dst_exponent, scratch2, scratch2, | |
| 814 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 815 | |
| 816 // Clear the first non null bit. | |
| 817 __ mov(scratch2, Operand(1)); | |
| 818 __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); | |
| 819 | |
| 820 __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
| 821 // Get the number of bits to set in the lower part of the mantissa. | |
| 822 __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), | |
| 823 SetCC); | |
| 824 __ b(mi, &fewer_than_20_useful_bits); | |
| 825 // Set the higher 20 bits of the mantissa. | |
| 826 __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); | |
| 827 __ rsb(scratch2, scratch2, Operand(32)); | |
| 828 __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); | |
| 829 __ b(&done); | |
| 830 | |
| 831 __ bind(&fewer_than_20_useful_bits); | |
| 832 __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
| 833 __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); | |
| 834 __ orr(dst_exponent, dst_exponent, scratch2); | |
| 835 // Set dst1 to 0. | |
| 836 __ mov(dst_mantissa, Operand::Zero()); | |
| 837 } | 756 } |
| 838 __ bind(&done); | 757 __ bind(&done); |
| 839 } | 758 } |
| 840 | 759 |
| 841 | 760 |
| 842 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 761 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
| 843 Register object, | 762 Register object, |
| 844 Destination destination, | 763 Destination destination, |
| 845 DwVfpRegister double_dst, | 764 DwVfpRegister double_dst, |
| 846 DwVfpRegister double_scratch, | 765 DwVfpRegister double_scratch, |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 865 dst_exponent, scratch2, single_scratch); | 784 dst_exponent, scratch2, single_scratch); |
| 866 __ b(&done); | 785 __ b(&done); |
| 867 | 786 |
| 868 __ bind(&obj_is_not_smi); | 787 __ bind(&obj_is_not_smi); |
| 869 __ AssertRootValue(heap_number_map, | 788 __ AssertRootValue(heap_number_map, |
| 870 Heap::kHeapNumberMapRootIndex, | 789 Heap::kHeapNumberMapRootIndex, |
| 871 "HeapNumberMap register clobbered."); | 790 "HeapNumberMap register clobbered."); |
| 872 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 791 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 873 | 792 |
| 874 // Load the number. | 793 // Load the number. |
| 875 if (CpuFeatures::IsSupported(VFP2)) { | 794 // Load the double value. |
| 876 CpuFeatureScope scope(masm, VFP2); | 795 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 877 // Load the double value. | 796 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
| 878 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
| 879 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | |
| 880 | 797 |
| 881 __ TestDoubleIsInt32(double_dst, double_scratch); | 798 __ TestDoubleIsInt32(double_dst, double_scratch); |
| 882 // Jump to not_int32 if the operation did not succeed. | 799 // Jump to not_int32 if the operation did not succeed. |
| 883 __ b(ne, not_int32); | 800 __ b(ne, not_int32); |
| 884 | 801 |
| 885 if (destination == kCoreRegisters) { | 802 if (destination == kCoreRegisters) { |
| 886 __ vmov(dst_mantissa, dst_exponent, double_dst); | 803 __ vmov(dst_mantissa, dst_exponent, double_dst); |
| 887 } | |
| 888 | |
| 889 } else { | |
| 890 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
| 891 // Load the double value in the destination registers. | |
| 892 bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); | |
| 893 if (save_registers) { | |
| 894 // Save both output registers, because the other one probably holds | |
| 895 // an important value too. | |
| 896 __ Push(dst_exponent, dst_mantissa); | |
| 897 } | |
| 898 __ Ldrd(dst_mantissa, dst_exponent, | |
| 899 FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 900 | |
| 901 // Check for 0 and -0. | |
| 902 Label zero; | |
| 903 __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); | |
| 904 __ orr(scratch1, scratch1, Operand(dst_mantissa)); | |
| 905 __ cmp(scratch1, Operand::Zero()); | |
| 906 __ b(eq, &zero); | |
| 907 | |
| 908 // Check that the value can be exactly represented by a 32-bit integer. | |
| 909 // Jump to not_int32 if that's not the case. | |
| 910 Label restore_input_and_miss; | |
| 911 DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, | |
| 912 &restore_input_and_miss); | |
| 913 | |
| 914 // dst_* were trashed. Reload the double value. | |
| 915 if (save_registers) { | |
| 916 __ Pop(dst_exponent, dst_mantissa); | |
| 917 } | |
| 918 __ Ldrd(dst_mantissa, dst_exponent, | |
| 919 FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 920 __ b(&done); | |
| 921 | |
| 922 __ bind(&restore_input_and_miss); | |
| 923 if (save_registers) { | |
| 924 __ Pop(dst_exponent, dst_mantissa); | |
| 925 } | |
| 926 __ b(not_int32); | |
| 927 | |
| 928 __ bind(&zero); | |
| 929 if (save_registers) { | |
| 930 __ Drop(2); | |
| 931 } | |
| 932 } | 804 } |
| 933 | |
| 934 __ bind(&done); | 805 __ bind(&done); |
| 935 } | 806 } |
| 936 | 807 |
| 937 | 808 |
| 938 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 809 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
| 939 Register object, | 810 Register object, |
| 940 Register dst, | 811 Register dst, |
| 941 Register heap_number_map, | 812 Register heap_number_map, |
| 942 Register scratch1, | 813 Register scratch1, |
| 943 Register scratch2, | 814 Register scratch2, |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 956 __ UntagAndJumpIfSmi(dst, object, &done); | 827 __ UntagAndJumpIfSmi(dst, object, &done); |
| 957 | 828 |
| 958 __ AssertRootValue(heap_number_map, | 829 __ AssertRootValue(heap_number_map, |
| 959 Heap::kHeapNumberMapRootIndex, | 830 Heap::kHeapNumberMapRootIndex, |
| 960 "HeapNumberMap register clobbered."); | 831 "HeapNumberMap register clobbered."); |
| 961 | 832 |
| 962 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | 833 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
| 963 | 834 |
| 964 // Object is a heap number. | 835 // Object is a heap number. |
| 965 // Convert the floating point value to a 32-bit integer. | 836 // Convert the floating point value to a 32-bit integer. |
| 966 if (CpuFeatures::IsSupported(VFP2)) { | 837 // Load the double value. |
| 967 CpuFeatureScope scope(masm, VFP2); | 838 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 839 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | |
| 968 | 840 |
| 969 // Load the double value. | 841 __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); |
| 970 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 842 // Jump to not_int32 if the operation did not succeed. |
| 971 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | 843 __ b(ne, not_int32); |
| 972 | |
| 973 __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); | |
| 974 // Jump to not_int32 if the operation did not succeed. | |
| 975 __ b(ne, not_int32); | |
| 976 } else { | |
| 977 // Load the double value in the destination registers. | |
| 978 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 979 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
| 980 | |
| 981 // Check for 0 and -0. | |
| 982 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); | |
| 983 __ orr(dst, scratch2, Operand(dst)); | |
| 984 __ cmp(dst, Operand::Zero()); | |
| 985 __ b(eq, &done); | |
| 986 | |
| 987 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); | |
| 988 | |
| 989 // Registers state after DoubleIs32BitInteger. | |
| 990 // dst: mantissa[51:20]. | |
| 991 // scratch2: 1 | |
| 992 | |
| 993 // Shift back the higher bits of the mantissa. | |
| 994 __ mov(dst, Operand(dst, LSR, scratch3)); | |
| 995 // Set the implicit first bit. | |
| 996 __ rsb(scratch3, scratch3, Operand(32)); | |
| 997 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); | |
| 998 // Set the sign. | |
| 999 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 1000 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | |
| 1001 __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); | |
| 1002 } | |
| 1003 __ b(&done); | 844 __ b(&done); |
| 1004 | 845 |
| 1005 __ bind(&maybe_undefined); | 846 __ bind(&maybe_undefined); |
| 1006 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); | 847 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); |
| 1007 __ b(ne, not_int32); | 848 __ b(ne, not_int32); |
| 1008 // |undefined| is truncated to 0. | 849 // |undefined| is truncated to 0. |
| 1009 __ mov(dst, Operand(Smi::FromInt(0))); | 850 __ mov(dst, Operand(Smi::FromInt(0))); |
| 1010 // Fall through. | 851 // Fall through. |
| 1011 | 852 |
| 1012 __ bind(&done); | 853 __ bind(&done); |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1086 | 927 |
| 1087 // Assert that heap_number_result is callee-saved. | 928 // Assert that heap_number_result is callee-saved. |
| 1088 // We currently always use r5 to pass it. | 929 // We currently always use r5 to pass it. |
| 1089 ASSERT(heap_number_result.is(r5)); | 930 ASSERT(heap_number_result.is(r5)); |
| 1090 | 931 |
| 1091 // Push the current return address before the C call. Return will be | 932 // Push the current return address before the C call. Return will be |
| 1092 // through pop(pc) below. | 933 // through pop(pc) below. |
| 1093 __ push(lr); | 934 __ push(lr); |
| 1094 __ PrepareCallCFunction(0, 2, scratch); | 935 __ PrepareCallCFunction(0, 2, scratch); |
| 1095 if (masm->use_eabi_hardfloat()) { | 936 if (masm->use_eabi_hardfloat()) { |
| 1096 CpuFeatureScope scope(masm, VFP2); | |
| 1097 __ vmov(d0, r0, r1); | 937 __ vmov(d0, r0, r1); |
| 1098 __ vmov(d1, r2, r3); | 938 __ vmov(d1, r2, r3); |
| 1099 } | 939 } |
| 1100 { | 940 { |
| 1101 AllowExternalCallThatCantCauseGC scope(masm); | 941 AllowExternalCallThatCantCauseGC scope(masm); |
| 1102 __ CallCFunction( | 942 __ CallCFunction( |
| 1103 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 943 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
| 1104 } | 944 } |
| 1105 // Store answer in the overwritable heap number. Double returned in | 945 // Store answer in the overwritable heap number. Double returned in |
| 1106 // registers r0 and r1 or in d0. | 946 // registers r0 and r1 or in d0. |
| 1107 if (masm->use_eabi_hardfloat()) { | 947 if (masm->use_eabi_hardfloat()) { |
| 1108 CpuFeatureScope scope(masm, VFP2); | |
| 1109 __ vstr(d0, | 948 __ vstr(d0, |
| 1110 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 949 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| 1111 } else { | 950 } else { |
| 1112 __ Strd(r0, r1, FieldMemOperand(heap_number_result, | 951 __ Strd(r0, r1, FieldMemOperand(heap_number_result, |
| 1113 HeapNumber::kValueOffset)); | 952 HeapNumber::kValueOffset)); |
| 1114 } | 953 } |
| 1115 // Place heap_number_result in r0 and return to the pushed return address. | 954 // Place heap_number_result in r0 and return to the pushed return address. |
| 1116 __ mov(r0, Operand(heap_number_result)); | 955 __ mov(r0, Operand(heap_number_result)); |
| 1117 __ pop(pc); | 956 __ pop(pc); |
| 1118 } | 957 } |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1311 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1150 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| 1312 } | 1151 } |
| 1313 __ Ret(ne); | 1152 __ Ret(ne); |
| 1314 } else { | 1153 } else { |
| 1315 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1154 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 1316 // the runtime. | 1155 // the runtime. |
| 1317 __ b(ne, slow); | 1156 __ b(ne, slow); |
| 1318 } | 1157 } |
| 1319 | 1158 |
| 1320 // Lhs is a smi, rhs is a number. | 1159 // Lhs is a smi, rhs is a number. |
| 1321 if (CpuFeatures::IsSupported(VFP2)) { | 1160 // Convert lhs to a double in d7. |
| 1322 // Convert lhs to a double in d7. | 1161 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
| 1323 CpuFeatureScope scope(masm, VFP2); | 1162 // Load the double from rhs, tagged HeapNumber r0, to d6. |
| 1324 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 1163 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| 1325 // Load the double from rhs, tagged HeapNumber r0, to d6. | 1164 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 1326 __ sub(r7, rhs, Operand(kHeapObjectTag)); | |
| 1327 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
| 1328 } else { | |
| 1329 __ push(lr); | |
| 1330 // Convert lhs to a double in r2, r3. | |
| 1331 __ mov(r7, Operand(lhs)); | |
| 1332 ConvertToDoubleStub stub1(r3, r2, r7, r6); | |
| 1333 __ Call(stub1.GetCode(masm->isolate())); | |
| 1334 // Load rhs to a double in r0, r1. | |
| 1335 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 1336 __ pop(lr); | |
| 1337 } | |
| 1338 | 1165 |
| 1339 // We now have both loaded as doubles but we can skip the lhs nan check | 1166 // We now have both loaded as doubles but we can skip the lhs nan check |
| 1340 // since it's a smi. | 1167 // since it's a smi. |
| 1341 __ jmp(lhs_not_nan); | 1168 __ jmp(lhs_not_nan); |
| 1342 | 1169 |
| 1343 __ bind(&rhs_is_smi); | 1170 __ bind(&rhs_is_smi); |
| 1344 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 1171 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
| 1345 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 1172 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
| 1346 if (strict) { | 1173 if (strict) { |
| 1347 // If lhs is not a number and rhs is a smi then strict equality cannot | 1174 // If lhs is not a number and rhs is a smi then strict equality cannot |
| 1348 // succeed. Return non-equal. | 1175 // succeed. Return non-equal. |
| 1349 // If lhs is r0 then there is already a non zero value in it. | 1176 // If lhs is r0 then there is already a non zero value in it. |
| 1350 if (!lhs.is(r0)) { | 1177 if (!lhs.is(r0)) { |
| 1351 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1178 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| 1352 } | 1179 } |
| 1353 __ Ret(ne); | 1180 __ Ret(ne); |
| 1354 } else { | 1181 } else { |
| 1355 // Smi compared non-strictly with a non-smi non-heap-number. Call | 1182 // Smi compared non-strictly with a non-smi non-heap-number. Call |
| 1356 // the runtime. | 1183 // the runtime. |
| 1357 __ b(ne, slow); | 1184 __ b(ne, slow); |
| 1358 } | 1185 } |
| 1359 | 1186 |
| 1360 // Rhs is a smi, lhs is a heap number. | 1187 // Rhs is a smi, lhs is a heap number. |
| 1361 if (CpuFeatures::IsSupported(VFP2)) { | 1188 // Load the double from lhs, tagged HeapNumber r1, to d7. |
| 1362 CpuFeatureScope scope(masm, VFP2); | 1189 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| 1363 // Load the double from lhs, tagged HeapNumber r1, to d7. | 1190 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 1364 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1191 // Convert rhs to a double in d6 . |
| 1365 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1192 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); |
| 1366 // Convert rhs to a double in d6 . | |
| 1367 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | |
| 1368 } else { | |
| 1369 __ push(lr); | |
| 1370 // Load lhs to a double in r2, r3. | |
| 1371 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 1372 // Convert rhs to a double in r0, r1. | |
| 1373 __ mov(r7, Operand(rhs)); | |
| 1374 ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
| 1375 __ Call(stub2.GetCode(masm->isolate())); | |
| 1376 __ pop(lr); | |
| 1377 } | |
| 1378 // Fall through to both_loaded_as_doubles. | 1193 // Fall through to both_loaded_as_doubles. |
| 1379 } | 1194 } |
| 1380 | 1195 |
| 1381 | 1196 |
| 1382 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | 1197 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
| 1383 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1198 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
| 1384 Register rhs_exponent = exp_first ? r0 : r1; | 1199 Register rhs_exponent = exp_first ? r0 : r1; |
| 1385 Register lhs_exponent = exp_first ? r2 : r3; | 1200 Register lhs_exponent = exp_first ? r2 : r3; |
| 1386 Register rhs_mantissa = exp_first ? r1 : r0; | 1201 Register rhs_mantissa = exp_first ? r1 : r0; |
| 1387 Register lhs_mantissa = exp_first ? r3 : r2; | 1202 Register lhs_mantissa = exp_first ? r3 : r2; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1424 } else { | 1239 } else { |
| 1425 __ mov(r0, Operand(LESS)); | 1240 __ mov(r0, Operand(LESS)); |
| 1426 } | 1241 } |
| 1427 __ Ret(); | 1242 __ Ret(); |
| 1428 | 1243 |
| 1429 __ bind(&neither_is_nan); | 1244 __ bind(&neither_is_nan); |
| 1430 } | 1245 } |
| 1431 | 1246 |
| 1432 | 1247 |
| 1433 // See comment at call site. | 1248 // See comment at call site. |
| 1434 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, | |
| 1435 Condition cond) { | |
| 1436 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
| 1437 Register rhs_exponent = exp_first ? r0 : r1; | |
| 1438 Register lhs_exponent = exp_first ? r2 : r3; | |
| 1439 Register rhs_mantissa = exp_first ? r1 : r0; | |
| 1440 Register lhs_mantissa = exp_first ? r3 : r2; | |
| 1441 | |
| 1442 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | |
| 1443 if (cond == eq) { | |
| 1444 // Doubles are not equal unless they have the same bit pattern. | |
| 1445 // Exception: 0 and -0. | |
| 1446 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | |
| 1447 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | |
| 1448 // Return non-zero if the numbers are unequal. | |
| 1449 __ Ret(ne); | |
| 1450 | |
| 1451 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | |
| 1452 // If exponents are equal then return 0. | |
| 1453 __ Ret(eq); | |
| 1454 | |
| 1455 // Exponents are unequal. The only way we can return that the numbers | |
| 1456 // are equal is if one is -0 and the other is 0. We already dealt | |
| 1457 // with the case where both are -0 or both are 0. | |
| 1458 // We start by seeing if the mantissas (that are equal) or the bottom | |
| 1459 // 31 bits of the rhs exponent are non-zero. If so we return not | |
| 1460 // equal. | |
| 1461 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); | |
| 1462 __ mov(r0, Operand(r4), LeaveCC, ne); | |
| 1463 __ Ret(ne); | |
| 1464 // Now they are equal if and only if the lhs exponent is zero in its | |
| 1465 // low 31 bits. | |
| 1466 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); | |
| 1467 __ Ret(); | |
| 1468 } else { | |
| 1469 // Call a native function to do a comparison between two non-NaNs. | |
| 1470 // Call C routine that may not cause GC or other trouble. | |
| 1471 __ push(lr); | |
| 1472 __ PrepareCallCFunction(0, 2, r5); | |
| 1473 if (masm->use_eabi_hardfloat()) { | |
| 1474 CpuFeatureScope scope(masm, VFP2); | |
| 1475 __ vmov(d0, r0, r1); | |
| 1476 __ vmov(d1, r2, r3); | |
| 1477 } | |
| 1478 | |
| 1479 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1480 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | |
| 1481 0, 2); | |
| 1482 __ pop(pc); // Return. | |
| 1483 } | |
| 1484 } | |
| 1485 | |
| 1486 | |
| 1487 // See comment at call site. | |
| 1488 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 1249 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 1489 Register lhs, | 1250 Register lhs, |
| 1490 Register rhs) { | 1251 Register rhs) { |
| 1491 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1252 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| 1492 (lhs.is(r1) && rhs.is(r0))); | 1253 (lhs.is(r1) && rhs.is(r0))); |
| 1493 | 1254 |
| 1494 // If either operand is a JS object or an oddball value, then they are | 1255 // If either operand is a JS object or an oddball value, then they are |
| 1495 // not equal since their pointers are different. | 1256 // not equal since their pointers are different. |
| 1496 // There is no test for undetectability in strict equality. | 1257 // There is no test for undetectability in strict equality. |
| 1497 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 1258 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1540 (lhs.is(r1) && rhs.is(r0))); | 1301 (lhs.is(r1) && rhs.is(r0))); |
| 1541 | 1302 |
| 1542 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 1303 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
| 1543 __ b(ne, not_heap_numbers); | 1304 __ b(ne, not_heap_numbers); |
| 1544 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 1305 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 1545 __ cmp(r2, r3); | 1306 __ cmp(r2, r3); |
| 1546 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 1307 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
| 1547 | 1308 |
| 1548 // Both are heap numbers. Load them up then jump to the code we have | 1309 // Both are heap numbers. Load them up then jump to the code we have |
| 1549 // for that. | 1310 // for that. |
| 1550 if (CpuFeatures::IsSupported(VFP2)) { | 1311 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| 1551 CpuFeatureScope scope(masm, VFP2); | 1312 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 1552 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1313 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| 1553 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1314 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 1554 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
| 1555 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
| 1556 } else { | |
| 1557 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 1558 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 1559 } | |
| 1560 __ jmp(both_loaded_as_doubles); | 1315 __ jmp(both_loaded_as_doubles); |
| 1561 } | 1316 } |
| 1562 | 1317 |
| 1563 | 1318 |
| 1564 // Fast negative check for internalized-to-internalized equality. | 1319 // Fast negative check for internalized-to-internalized equality. |
| 1565 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 1320 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 1566 Register lhs, | 1321 Register lhs, |
| 1567 Register rhs, | 1322 Register rhs, |
| 1568 Label* possible_strings, | 1323 Label* possible_strings, |
| 1569 Label* not_both_strings) { | 1324 Label* not_both_strings) { |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1630 | 1385 |
| 1631 // Calculate the entry in the number string cache. The hash value in the | 1386 // Calculate the entry in the number string cache. The hash value in the |
| 1632 // number string cache for smis is just the smi value, and the hash for | 1387 // number string cache for smis is just the smi value, and the hash for |
| 1633 // doubles is the xor of the upper and lower words. See | 1388 // doubles is the xor of the upper and lower words. See |
| 1634 // Heap::GetNumberStringCache. | 1389 // Heap::GetNumberStringCache. |
| 1635 Isolate* isolate = masm->isolate(); | 1390 Isolate* isolate = masm->isolate(); |
| 1636 Label is_smi; | 1391 Label is_smi; |
| 1637 Label load_result_from_cache; | 1392 Label load_result_from_cache; |
| 1638 if (!object_is_smi) { | 1393 if (!object_is_smi) { |
| 1639 __ JumpIfSmi(object, &is_smi); | 1394 __ JumpIfSmi(object, &is_smi); |
| 1640 if (CpuFeatures::IsSupported(VFP2)) { | 1395 __ CheckMap(object, |
| 1641 CpuFeatureScope scope(masm, VFP2); | 1396 scratch1, |
| 1642 __ CheckMap(object, | 1397 Heap::kHeapNumberMapRootIndex, |
| 1643 scratch1, | 1398 not_found, |
| 1644 Heap::kHeapNumberMapRootIndex, | 1399 DONT_DO_SMI_CHECK); |
| 1645 not_found, | |
| 1646 DONT_DO_SMI_CHECK); | |
| 1647 | 1400 |
| 1648 STATIC_ASSERT(8 == kDoubleSize); | 1401 STATIC_ASSERT(8 == kDoubleSize); |
| 1649 __ add(scratch1, | 1402 __ add(scratch1, |
| 1650 object, | 1403 object, |
| 1651 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1404 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
| 1652 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 1405 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); |
| 1653 __ eor(scratch1, scratch1, Operand(scratch2)); | 1406 __ eor(scratch1, scratch1, Operand(scratch2)); |
| 1654 __ and_(scratch1, scratch1, Operand(mask)); | 1407 __ and_(scratch1, scratch1, Operand(mask)); |
| 1655 | 1408 |
| 1656 // Calculate address of entry in string cache: each entry consists | 1409 // Calculate address of entry in string cache: each entry consists |
| 1657 // of two pointer sized fields. | 1410 // of two pointer sized fields. |
| 1658 __ add(scratch1, | 1411 __ add(scratch1, |
| 1659 number_string_cache, | 1412 number_string_cache, |
| 1660 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 1413 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
| 1661 | 1414 |
| 1662 Register probe = mask; | 1415 Register probe = mask; |
| 1663 __ ldr(probe, | 1416 __ ldr(probe, |
| 1664 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1417 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 1665 __ JumpIfSmi(probe, not_found); | 1418 __ JumpIfSmi(probe, not_found); |
| 1666 __ sub(scratch2, object, Operand(kHeapObjectTag)); | 1419 __ sub(scratch2, object, Operand(kHeapObjectTag)); |
| 1667 __ vldr(d0, scratch2, HeapNumber::kValueOffset); | 1420 __ vldr(d0, scratch2, HeapNumber::kValueOffset); |
| 1668 __ sub(probe, probe, Operand(kHeapObjectTag)); | 1421 __ sub(probe, probe, Operand(kHeapObjectTag)); |
| 1669 __ vldr(d1, probe, HeapNumber::kValueOffset); | 1422 __ vldr(d1, probe, HeapNumber::kValueOffset); |
| 1670 __ VFPCompareAndSetFlags(d0, d1); | 1423 __ VFPCompareAndSetFlags(d0, d1); |
| 1671 __ b(ne, not_found); // The cache did not contain this value. | 1424 __ b(ne, not_found); // The cache did not contain this value. |
| 1672 __ b(&load_result_from_cache); | 1425 __ b(&load_result_from_cache); |
| 1673 } else { | 1426 } else { |
|
Rodolph Perfetta
2013/04/04 15:30:04
This was the else from CpuFeatures::IsSupported(VF
danno
2013/04/08 19:45:33
Done.
| |
| 1674 __ b(not_found); | 1427 __ b(not_found); |
| 1675 } | |
| 1676 } | 1428 } |
| 1677 | 1429 |
| 1678 __ bind(&is_smi); | 1430 __ bind(&is_smi); |
| 1679 Register scratch = scratch1; | 1431 Register scratch = scratch1; |
| 1680 __ and_(scratch, mask, Operand(object, ASR, 1)); | 1432 __ and_(scratch, mask, Operand(object, ASR, 1)); |
| 1681 // Calculate address of entry in string cache: each entry consists | 1433 // Calculate address of entry in string cache: each entry consists |
| 1682 // of two pointer sized fields. | 1434 // of two pointer sized fields. |
| 1683 __ add(scratch, | 1435 __ add(scratch, |
| 1684 number_string_cache, | 1436 number_string_cache, |
| 1685 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | 1437 Operand(scratch, LSL, kPointerSizeLog2 + 1)); |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1780 // In cases 3 and 4 we have found out we were dealing with a number-number | 1532 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1781 // comparison. If VFP3 is supported the double values of the numbers have | 1533 // comparison. If VFP3 is supported the double values of the numbers have |
| 1782 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 1534 // been loaded into d7 and d6. Otherwise, the double values have been loaded |
| 1783 // into r0, r1, r2, and r3. | 1535 // into r0, r1, r2, and r3. |
| 1784 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); | 1536 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); |
| 1785 | 1537 |
| 1786 __ bind(&both_loaded_as_doubles); | 1538 __ bind(&both_loaded_as_doubles); |
| 1787 // The arguments have been converted to doubles and stored in d6 and d7, if | 1539 // The arguments have been converted to doubles and stored in d6 and d7, if |
| 1788 // VFP3 is supported, or in r0, r1, r2, and r3. | 1540 // VFP3 is supported, or in r0, r1, r2, and r3. |
| 1789 Isolate* isolate = masm->isolate(); | 1541 Isolate* isolate = masm->isolate(); |
| 1790 if (CpuFeatures::IsSupported(VFP2)) { | 1542 __ bind(&lhs_not_nan); |
| 1791 __ bind(&lhs_not_nan); | 1543 Label no_nan; |
| 1792 CpuFeatureScope scope(masm, VFP2); | 1544 // ARMv7 VFP3 instructions to implement double precision comparison. |
| 1793 Label no_nan; | 1545 __ VFPCompareAndSetFlags(d7, d6); |
| 1794 // ARMv7 VFP3 instructions to implement double precision comparison. | 1546 Label nan; |
| 1795 __ VFPCompareAndSetFlags(d7, d6); | 1547 __ b(vs, &nan); |
| 1796 Label nan; | 1548 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 1797 __ b(vs, &nan); | 1549 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 1798 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1550 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 1799 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1551 __ Ret(); |
| 1800 __ mov(r0, Operand(GREATER), LeaveCC, gt); | |
| 1801 __ Ret(); | |
| 1802 | 1552 |
| 1803 __ bind(&nan); | 1553 __ bind(&nan); |
| 1804 // If one of the sides was a NaN then the v flag is set. Load r0 with | 1554 // If one of the sides was a NaN then the v flag is set. Load r0 with |
| 1805 // whatever it takes to make the comparison fail, since comparisons with NaN | 1555 // whatever it takes to make the comparison fail, since comparisons with NaN |
| 1806 // always fail. | 1556 // always fail. |
| 1807 if (cc == lt || cc == le) { | 1557 if (cc == lt || cc == le) { |
| 1808 __ mov(r0, Operand(GREATER)); | 1558 __ mov(r0, Operand(GREATER)); |
| 1809 } else { | |
| 1810 __ mov(r0, Operand(LESS)); | |
| 1811 } | |
| 1812 __ Ret(); | |
| 1813 } else { | 1559 } else { |
| 1814 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1560 __ mov(r0, Operand(LESS)); |
| 1815 // fall through if neither is a NaN. Also binds lhs_not_nan. | |
| 1816 EmitNanCheck(masm, &lhs_not_nan, cc); | |
| 1817 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | |
| 1818 // answer. Never falls through. | |
| 1819 EmitTwoNonNanDoubleComparison(masm, cc); | |
| 1820 } | 1561 } |
| 1562 __ Ret(); | |
| 1821 | 1563 |
| 1822 __ bind(¬_smis); | 1564 __ bind(¬_smis); |
| 1823 // At this point we know we are dealing with two different objects, | 1565 // At this point we know we are dealing with two different objects, |
| 1824 // and neither of them is a Smi. The objects are in rhs_ and lhs_. | 1566 // and neither of them is a Smi. The objects are in rhs_ and lhs_. |
| 1825 if (strict()) { | 1567 if (strict()) { |
| 1826 // This returns non-equal for some object types, or falls through if it | 1568 // This returns non-equal for some object types, or falls through if it |
| 1827 // was not lucky. | 1569 // was not lucky. |
| 1828 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 1570 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
| 1829 } | 1571 } |
| 1830 | 1572 |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1907 } | 1649 } |
| 1908 | 1650 |
| 1909 | 1651 |
| 1910 // The stub expects its argument in the tos_ register and returns its result in | 1652 // The stub expects its argument in the tos_ register and returns its result in |
| 1911 // it, too: zero for false, and a non-zero value for true. | 1653 // it, too: zero for false, and a non-zero value for true. |
| 1912 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1654 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 1913 // This stub overrides SometimesSetsUpAFrame() to return false. That means | 1655 // This stub overrides SometimesSetsUpAFrame() to return false. That means |
| 1914 // we cannot call anything that could cause a GC from this stub. | 1656 // we cannot call anything that could cause a GC from this stub. |
| 1915 Label patch; | 1657 Label patch; |
| 1916 const Register map = r9.is(tos_) ? r7 : r9; | 1658 const Register map = r9.is(tos_) ? r7 : r9; |
| 1917 const Register temp = map; | |
| 1918 | 1659 |
| 1919 // undefined -> false. | 1660 // undefined -> false. |
| 1920 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); | 1661 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
| 1921 | 1662 |
| 1922 // Boolean -> its value. | 1663 // Boolean -> its value. |
| 1923 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); | 1664 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
| 1924 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); | 1665 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
| 1925 | 1666 |
| 1926 // 'null' -> false. | 1667 // 'null' -> false. |
| 1927 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); | 1668 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 1950 | 1691 |
| 1951 if (types_.Contains(SPEC_OBJECT)) { | 1692 if (types_.Contains(SPEC_OBJECT)) { |
| 1952 // Spec object -> true. | 1693 // Spec object -> true. |
| 1953 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); | 1694 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); |
| 1954 // tos_ contains the correct non-zero return value already. | 1695 // tos_ contains the correct non-zero return value already. |
| 1955 __ Ret(ge); | 1696 __ Ret(ge); |
| 1956 } | 1697 } |
| 1957 | 1698 |
| 1958 if (types_.Contains(STRING)) { | 1699 if (types_.Contains(STRING)) { |
| 1959 // String value -> false iff empty. | 1700 // String value -> false iff empty. |
| 1960 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 1701 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); |
| 1961 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); | 1702 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); |
| 1962 __ Ret(lt); // the string length is OK as the return value | 1703 __ Ret(lt); // the string length is OK as the return value |
| 1963 } | 1704 } |
| 1964 | 1705 |
| 1965 if (types_.Contains(HEAP_NUMBER)) { | 1706 if (types_.Contains(HEAP_NUMBER)) { |
| 1966 // Heap number -> false iff +0, -0, or NaN. | 1707 // Heap number -> false iff +0, -0, or NaN. |
| 1967 Label not_heap_number; | 1708 Label not_heap_number; |
| 1968 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 1709 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| 1969 __ b(ne, ¬_heap_number); | 1710 __ b(ne, ¬_heap_number); |
| 1970 | 1711 |
| 1971 if (CpuFeatures::IsSupported(VFP2)) { | 1712 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
| 1972 CpuFeatureScope scope(masm, VFP2); | 1713 __ VFPCompareAndSetFlags(d1, 0.0); |
| 1973 | 1714 // "tos_" is a register, and contains a non zero value by default. |
| 1974 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); | 1715 // Hence we only need to overwrite "tos_" with zero to return false for |
| 1975 __ VFPCompareAndSetFlags(d1, 0.0); | 1716 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
| 1976 // "tos_" is a register, and contains a non zero value by default. | 1717 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO |
| 1977 // Hence we only need to overwrite "tos_" with zero to return false for | 1718 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN |
| 1978 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | |
| 1979 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO | |
| 1980 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN | |
| 1981 } else { | |
| 1982 Label done, not_nan, not_zero; | |
| 1983 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | |
| 1984 // -0 maps to false: | |
| 1985 __ bic( | |
| 1986 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); | |
| 1987 __ b(ne, ¬_zero); | |
| 1988 // If exponent word is zero then the answer depends on the mantissa word. | |
| 1989 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
| 1990 __ jmp(&done); | |
| 1991 | |
| 1992 // Check for NaN. | |
| 1993 __ bind(¬_zero); | |
| 1994 // We already zeroed the sign bit, now shift out the mantissa so we only | |
| 1995 // have the exponent left. | |
| 1996 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); | |
| 1997 unsigned int shifted_exponent_mask = | |
| 1998 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; | |
| 1999 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); | |
| 2000 __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. | |
| 2001 | |
| 2002 // Reload exponent word. | |
| 2003 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | |
| 2004 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); | |
| 2005 // If mantissa is not zero then we have a NaN, so return 0. | |
| 2006 __ mov(tos_, Operand::Zero(), LeaveCC, ne); | |
| 2007 __ b(ne, &done); | |
| 2008 | |
| 2009 // Load mantissa word. | |
| 2010 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
| 2011 __ cmp(temp, Operand::Zero()); | |
| 2012 // If mantissa is not zero then we have a NaN, so return 0. | |
| 2013 __ mov(tos_, Operand::Zero(), LeaveCC, ne); | |
| 2014 __ b(ne, &done); | |
| 2015 | |
| 2016 __ bind(¬_nan); | |
| 2017 __ mov(tos_, Operand(1, RelocInfo::NONE32)); | |
| 2018 __ bind(&done); | |
| 2019 } | |
| 2020 __ Ret(); | 1719 __ Ret(); |
| 2021 __ bind(¬_heap_number); | 1720 __ bind(¬_heap_number); |
| 2022 } | 1721 } |
| 2023 | 1722 |
| 2024 __ bind(&patch); | 1723 __ bind(&patch); |
| 2025 GenerateTypeTransition(masm); | 1724 GenerateTypeTransition(masm); |
| 2026 } | 1725 } |
| 2027 | 1726 |
| 2028 | 1727 |
| 2029 void ToBooleanStub::CheckOddball(MacroAssembler* masm, | 1728 void ToBooleanStub::CheckOddball(MacroAssembler* masm, |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2062 | 1761 |
| 2063 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 1762 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| 2064 // We don't allow a GC during a store buffer overflow so there is no need to | 1763 // We don't allow a GC during a store buffer overflow so there is no need to |
| 2065 // store the registers in any particular way, but we do have to store and | 1764 // store the registers in any particular way, but we do have to store and |
| 2066 // restore them. | 1765 // restore them. |
| 2067 __ stm(db_w, sp, kCallerSaved | lr.bit()); | 1766 __ stm(db_w, sp, kCallerSaved | lr.bit()); |
| 2068 | 1767 |
| 2069 const Register scratch = r1; | 1768 const Register scratch = r1; |
| 2070 | 1769 |
| 2071 if (save_doubles_ == kSaveFPRegs) { | 1770 if (save_doubles_ == kSaveFPRegs) { |
| 2072 CpuFeatureScope scope(masm, VFP2); | |
| 2073 // Check CPU flags for number of registers, setting the Z condition flag. | 1771 // Check CPU flags for number of registers, setting the Z condition flag. |
| 2074 __ CheckFor32DRegs(scratch); | 1772 __ CheckFor32DRegs(scratch); |
| 2075 | 1773 |
| 2076 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); | 1774 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); |
| 2077 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { | 1775 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { |
| 2078 DwVfpRegister reg = DwVfpRegister::from_code(i); | 1776 DwVfpRegister reg = DwVfpRegister::from_code(i); |
| 2079 __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); | 1777 __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); |
| 2080 } | 1778 } |
| 2081 } | 1779 } |
| 2082 const int argument_count = 1; | 1780 const int argument_count = 1; |
| 2083 const int fp_argument_count = 0; | 1781 const int fp_argument_count = 0; |
| 2084 | 1782 |
| 2085 AllowExternalCallThatCantCauseGC scope(masm); | 1783 AllowExternalCallThatCantCauseGC scope(masm); |
| 2086 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 1784 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
| 2087 __ mov(r0, Operand(ExternalReference::isolate_address())); | 1785 __ mov(r0, Operand(ExternalReference::isolate_address())); |
| 2088 __ CallCFunction( | 1786 __ CallCFunction( |
| 2089 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 1787 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
| 2090 argument_count); | 1788 argument_count); |
| 2091 if (save_doubles_ == kSaveFPRegs) { | 1789 if (save_doubles_ == kSaveFPRegs) { |
| 2092 CpuFeatureScope scope(masm, VFP2); | |
| 2093 | |
| 2094 // Check CPU flags for number of registers, setting the Z condition flag. | 1790 // Check CPU flags for number of registers, setting the Z condition flag. |
| 2095 __ CheckFor32DRegs(scratch); | 1791 __ CheckFor32DRegs(scratch); |
| 2096 | 1792 |
| 2097 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { | 1793 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { |
| 2098 DwVfpRegister reg = DwVfpRegister::from_code(i); | 1794 DwVfpRegister reg = DwVfpRegister::from_code(i); |
| 2099 __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); | 1795 __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); |
| 2100 } | 1796 } |
| 2101 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); | 1797 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); |
| 2102 } | 1798 } |
| 2103 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). | 1799 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). |
| (...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2308 // Push the 31 high bits (bit 0 cleared to look like a smi). | 2004 // Push the 31 high bits (bit 0 cleared to look like a smi). |
| 2309 __ bic(r1, r1, Operand(1)); | 2005 __ bic(r1, r1, Operand(1)); |
| 2310 __ Push(r2, r1); | 2006 __ Push(r2, r1); |
| 2311 __ CallRuntime(Runtime::kNumberAlloc, 0); | 2007 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 2312 __ Pop(r2, r1); // Restore the result. | 2008 __ Pop(r2, r1); // Restore the result. |
| 2313 __ orr(r1, r1, Operand(r2, LSR, 31)); | 2009 __ orr(r1, r1, Operand(r2, LSR, 31)); |
| 2314 } | 2010 } |
| 2315 __ bind(&heapnumber_allocated); | 2011 __ bind(&heapnumber_allocated); |
| 2316 } | 2012 } |
| 2317 | 2013 |
| 2318 if (CpuFeatures::IsSupported(VFP2)) { | 2014 __ vmov(s0, r1); |
| 2319 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 2015 __ vcvt_f64_s32(d0, s0); |
| 2320 CpuFeatureScope scope(masm, VFP2); | 2016 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 2321 __ vmov(s0, r1); | 2017 __ Ret(); |
| 2322 __ vcvt_f64_s32(d0, s0); | |
| 2323 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 2324 __ Ret(); | |
| 2325 } else { | |
| 2326 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | |
| 2327 // have to set up a frame. | |
| 2328 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | |
| 2329 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
| 2330 } | |
| 2331 } | 2018 } |
| 2332 | 2019 |
| 2333 | 2020 |
| 2334 // TODO(svenpanne): Use virtual functions instead of switch. | 2021 // TODO(svenpanne): Use virtual functions instead of switch. |
| 2335 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { | 2022 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
| 2336 switch (op_) { | 2023 switch (op_) { |
| 2337 case Token::SUB: | 2024 case Token::SUB: |
| 2338 GenerateGenericStubSub(masm); | 2025 GenerateGenericStubSub(masm); |
| 2339 break; | 2026 break; |
| 2340 case Token::BIT_NOT: | 2027 case Token::BIT_NOT: |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2376 case Token::BIT_NOT: | 2063 case Token::BIT_NOT: |
| 2377 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 2064 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 2378 break; | 2065 break; |
| 2379 default: | 2066 default: |
| 2380 UNREACHABLE(); | 2067 UNREACHABLE(); |
| 2381 } | 2068 } |
| 2382 } | 2069 } |
| 2383 | 2070 |
| 2384 | 2071 |
| 2385 void BinaryOpStub::Initialize() { | 2072 void BinaryOpStub::Initialize() { |
| 2386 platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); | 2073 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
| 2387 } | 2074 } |
| 2388 | 2075 |
| 2389 | 2076 |
| 2390 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 2077 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 2391 Label get_result; | 2078 Label get_result; |
| 2392 | 2079 |
| 2393 __ Push(r1, r0); | 2080 __ Push(r1, r0); |
| 2394 | 2081 |
| 2395 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | 2082 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| 2396 __ push(r2); | 2083 __ push(r2); |
| (...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2655 | 2342 |
| 2656 switch (op) { | 2343 switch (op) { |
| 2657 case Token::ADD: | 2344 case Token::ADD: |
| 2658 case Token::SUB: | 2345 case Token::SUB: |
| 2659 case Token::MUL: | 2346 case Token::MUL: |
| 2660 case Token::DIV: | 2347 case Token::DIV: |
| 2661 case Token::MOD: { | 2348 case Token::MOD: { |
| 2662 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 2349 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
| 2663 // depending on whether VFP3 is available or not. | 2350 // depending on whether VFP3 is available or not. |
| 2664 FloatingPointHelper::Destination destination = | 2351 FloatingPointHelper::Destination destination = |
| 2665 CpuFeatures::IsSupported(VFP2) && | |
| 2666 op != Token::MOD ? | 2352 op != Token::MOD ? |
| 2667 FloatingPointHelper::kVFPRegisters : | 2353 FloatingPointHelper::kVFPRegisters : |
| 2668 FloatingPointHelper::kCoreRegisters; | 2354 FloatingPointHelper::kCoreRegisters; |
| 2669 | 2355 |
| 2670 // Allocate new heap number for result. | 2356 // Allocate new heap number for result. |
| 2671 Register result = r5; | 2357 Register result = r5; |
| 2672 BinaryOpStub_GenerateHeapResultAllocation( | 2358 BinaryOpStub_GenerateHeapResultAllocation( |
| 2673 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 2359 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| 2674 | 2360 |
| 2675 // Load the operands. | 2361 // Load the operands. |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 2699 masm, destination, left, d6, r0, r1, heap_number_map, | 2385 masm, destination, left, d6, r0, r1, heap_number_map, |
| 2700 scratch1, scratch2, fail); | 2386 scratch1, scratch2, fail); |
| 2701 } | 2387 } |
| 2702 } | 2388 } |
| 2703 | 2389 |
| 2704 // Calculate the result. | 2390 // Calculate the result. |
| 2705 if (destination == FloatingPointHelper::kVFPRegisters) { | 2391 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 2706 // Using VFP registers: | 2392 // Using VFP registers: |
| 2707 // d6: Left value | 2393 // d6: Left value |
| 2708 // d7: Right value | 2394 // d7: Right value |
| 2709 CpuFeatureScope scope(masm, VFP2); | |
| 2710 switch (op) { | 2395 switch (op) { |
| 2711 case Token::ADD: | 2396 case Token::ADD: |
| 2712 __ vadd(d5, d6, d7); | 2397 __ vadd(d5, d6, d7); |
| 2713 break; | 2398 break; |
| 2714 case Token::SUB: | 2399 case Token::SUB: |
| 2715 __ vsub(d5, d6, d7); | 2400 __ vsub(d5, d6, d7); |
| 2716 break; | 2401 break; |
| 2717 case Token::MUL: | 2402 case Token::MUL: |
| 2718 __ vmul(d5, d6, d7); | 2403 __ vmul(d5, d6, d7); |
| 2719 break; | 2404 break; |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2790 __ mov(r2, Operand(r3, ASR, r2)); | 2475 __ mov(r2, Operand(r3, ASR, r2)); |
| 2791 break; | 2476 break; |
| 2792 case Token::SHR: | 2477 case Token::SHR: |
| 2793 // Use only the 5 least significant bits of the shift count. | 2478 // Use only the 5 least significant bits of the shift count. |
| 2794 __ GetLeastBitsFromInt32(r2, r2, 5); | 2479 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2795 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2480 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 2796 // SHR is special because it is required to produce a positive answer. | 2481 // SHR is special because it is required to produce a positive answer. |
| 2797 // The code below for writing into heap numbers isn't capable of | 2482 // The code below for writing into heap numbers isn't capable of |
| 2798 // writing the register as an unsigned int so we go to slow case if we | 2483 // writing the register as an unsigned int so we go to slow case if we |
| 2799 // hit this case. | 2484 // hit this case. |
| 2800 if (CpuFeatures::IsSupported(VFP2)) { | 2485 __ b(mi, &result_not_a_smi); |
| 2801 __ b(mi, &result_not_a_smi); | |
| 2802 } else { | |
| 2803 __ b(mi, not_numbers); | |
| 2804 } | |
| 2805 break; | 2486 break; |
| 2806 case Token::SHL: | 2487 case Token::SHL: |
| 2807 // Use only the 5 least significant bits of the shift count. | 2488 // Use only the 5 least significant bits of the shift count. |
| 2808 __ GetLeastBitsFromInt32(r2, r2, 5); | 2489 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2809 __ mov(r2, Operand(r3, LSL, r2)); | 2490 __ mov(r2, Operand(r3, LSL, r2)); |
| 2810 break; | 2491 break; |
| 2811 default: | 2492 default: |
| 2812 UNREACHABLE(); | 2493 UNREACHABLE(); |
| 2813 } | 2494 } |
| 2814 | 2495 |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 2830 mode); | 2511 mode); |
| 2831 } | 2512 } |
| 2832 | 2513 |
| 2833 // r2: Answer as signed int32. | 2514 // r2: Answer as signed int32. |
| 2834 // r5: Heap number to write answer into. | 2515 // r5: Heap number to write answer into. |
| 2835 | 2516 |
| 2836 // Nothing can go wrong now, so move the heap number to r0, which is the | 2517 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 2837 // result. | 2518 // result. |
| 2838 __ mov(r0, Operand(r5)); | 2519 __ mov(r0, Operand(r5)); |
| 2839 | 2520 |
| 2840 if (CpuFeatures::IsSupported(VFP2)) { | 2521 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 2841 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 2522 // mentioned above SHR needs to always produce a positive result. |
| 2842 // mentioned above SHR needs to always produce a positive result. | 2523 __ vmov(s0, r2); |
| 2843 CpuFeatureScope scope(masm, VFP2); | 2524 if (op == Token::SHR) { |
| 2844 __ vmov(s0, r2); | 2525 __ vcvt_f64_u32(d0, s0); |
| 2845 if (op == Token::SHR) { | |
| 2846 __ vcvt_f64_u32(d0, s0); | |
| 2847 } else { | |
| 2848 __ vcvt_f64_s32(d0, s0); | |
| 2849 } | |
| 2850 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
| 2851 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
| 2852 __ Ret(); | |
| 2853 } else { | 2526 } else { |
| 2854 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2527 __ vcvt_f64_s32(d0, s0); |
| 2855 // r3 as scratch. r0 is preserved and returned. | |
| 2856 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
| 2857 __ TailCallStub(&stub); | |
| 2858 } | 2528 } |
| 2529 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
| 2530 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
| 2531 __ Ret(); | |
| 2859 break; | 2532 break; |
| 2860 } | 2533 } |
| 2861 default: | 2534 default: |
| 2862 UNREACHABLE(); | 2535 UNREACHABLE(); |
| 2863 } | 2536 } |
| 2864 } | 2537 } |
| 2865 | 2538 |
| 2866 | 2539 |
| 2867 // Generate the smi code. If the operation on smis are successful this return is | 2540 // Generate the smi code. If the operation on smis are successful this return is |
| 2868 // generated. If the result is not a smi and heap number allocation is not | 2541 // generated. If the result is not a smi and heap number allocation is not |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2994 // again if this changes. | 2667 // again if this changes. |
| 2995 if (left_type_ == BinaryOpIC::SMI) { | 2668 if (left_type_ == BinaryOpIC::SMI) { |
| 2996 __ JumpIfNotSmi(left, &transition); | 2669 __ JumpIfNotSmi(left, &transition); |
| 2997 } | 2670 } |
| 2998 if (right_type_ == BinaryOpIC::SMI) { | 2671 if (right_type_ == BinaryOpIC::SMI) { |
| 2999 __ JumpIfNotSmi(right, &transition); | 2672 __ JumpIfNotSmi(right, &transition); |
| 3000 } | 2673 } |
| 3001 // Load both operands and check that they are 32-bit integer. | 2674 // Load both operands and check that they are 32-bit integer. |
| 3002 // Jump to type transition if they are not. The registers r0 and r1 (right | 2675 // Jump to type transition if they are not. The registers r0 and r1 (right |
| 3003 // and left) are preserved for the runtime call. | 2676 // and left) are preserved for the runtime call. |
| 3004 FloatingPointHelper::Destination destination = | 2677 FloatingPointHelper::Destination destination = (op_ != Token::MOD) |
| 3005 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) | |
| 3006 ? FloatingPointHelper::kVFPRegisters | 2678 ? FloatingPointHelper::kVFPRegisters |
| 3007 : FloatingPointHelper::kCoreRegisters; | 2679 : FloatingPointHelper::kCoreRegisters; |
| 3008 | 2680 |
| 3009 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2681 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 3010 right, | 2682 right, |
| 3011 destination, | 2683 destination, |
| 3012 d7, | 2684 d7, |
| 3013 d8, | 2685 d8, |
| 3014 r2, | 2686 r2, |
| 3015 r3, | 2687 r3, |
| 3016 heap_number_map, | 2688 heap_number_map, |
| 3017 scratch1, | 2689 scratch1, |
| 3018 scratch2, | 2690 scratch2, |
| 3019 s0, | 2691 s0, |
| 3020 &transition); | 2692 &transition); |
| 3021 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2693 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 3022 left, | 2694 left, |
| 3023 destination, | 2695 destination, |
| 3024 d6, | 2696 d6, |
| 3025 d8, | 2697 d8, |
| 3026 r4, | 2698 r4, |
| 3027 r5, | 2699 r5, |
| 3028 heap_number_map, | 2700 heap_number_map, |
| 3029 scratch1, | 2701 scratch1, |
| 3030 scratch2, | 2702 scratch2, |
| 3031 s0, | 2703 s0, |
| 3032 &transition); | 2704 &transition); |
| 3033 | 2705 |
| 3034 if (destination == FloatingPointHelper::kVFPRegisters) { | 2706 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 3035 CpuFeatureScope scope(masm, VFP2); | |
| 3036 Label return_heap_number; | 2707 Label return_heap_number; |
| 3037 switch (op_) { | 2708 switch (op_) { |
| 3038 case Token::ADD: | 2709 case Token::ADD: |
| 3039 __ vadd(d5, d6, d7); | 2710 __ vadd(d5, d6, d7); |
| 3040 break; | 2711 break; |
| 3041 case Token::SUB: | 2712 case Token::SUB: |
| 3042 __ vsub(d5, d6, d7); | 2713 __ vsub(d5, d6, d7); |
| 3043 break; | 2714 break; |
| 3044 case Token::MUL: | 2715 case Token::MUL: |
| 3045 __ vmul(d5, d6, d7); | 2716 __ vmul(d5, d6, d7); |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3193 __ and_(r2, r2, Operand(0x1f)); | 2864 __ and_(r2, r2, Operand(0x1f)); |
| 3194 __ mov(r2, Operand(r3, ASR, r2)); | 2865 __ mov(r2, Operand(r3, ASR, r2)); |
| 3195 break; | 2866 break; |
| 3196 case Token::SHR: | 2867 case Token::SHR: |
| 3197 __ and_(r2, r2, Operand(0x1f)); | 2868 __ and_(r2, r2, Operand(0x1f)); |
| 3198 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2869 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 3199 // SHR is special because it is required to produce a positive answer. | 2870 // SHR is special because it is required to produce a positive answer. |
| 3200 // We only get a negative result if the shift value (r2) is 0. | 2871 // We only get a negative result if the shift value (r2) is 0. |
| 3201 // This result cannot be respresented as a signed 32-bit integer, try | 2872 // This result cannot be respresented as a signed 32-bit integer, try |
| 3202 // to return a heap number if we can. | 2873 // to return a heap number if we can. |
| 3203 // The non vfp2 code does not support this special case, so jump to | 2874 __ b(mi, (result_type_ <= BinaryOpIC::INT32) |
| 3204 // runtime if we don't support it. | 2875 ? &transition |
| 3205 if (CpuFeatures::IsSupported(VFP2)) { | 2876 : &return_heap_number); |
| 3206 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | |
| 3207 ? &transition | |
| 3208 : &return_heap_number); | |
| 3209 } else { | |
| 3210 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | |
| 3211 ? &transition | |
| 3212 : &call_runtime); | |
| 3213 } | |
| 3214 break; | 2877 break; |
| 3215 case Token::SHL: | 2878 case Token::SHL: |
| 3216 __ and_(r2, r2, Operand(0x1f)); | 2879 __ and_(r2, r2, Operand(0x1f)); |
| 3217 __ mov(r2, Operand(r3, LSL, r2)); | 2880 __ mov(r2, Operand(r3, LSL, r2)); |
| 3218 break; | 2881 break; |
| 3219 default: | 2882 default: |
| 3220 UNREACHABLE(); | 2883 UNREACHABLE(); |
| 3221 } | 2884 } |
| 3222 | 2885 |
| 3223 // Check if the result fits in a smi. | 2886 // Check if the result fits in a smi. |
| 3224 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 2887 __ add(scratch1, r2, Operand(0x40000000), SetCC); |
| 3225 // If not try to return a heap number. (We know the result is an int32.) | 2888 // If not try to return a heap number. (We know the result is an int32.) |
| 3226 __ b(mi, &return_heap_number); | 2889 __ b(mi, &return_heap_number); |
| 3227 // Tag the result and return. | 2890 // Tag the result and return. |
| 3228 __ SmiTag(r0, r2); | 2891 __ SmiTag(r0, r2); |
| 3229 __ Ret(); | 2892 __ Ret(); |
| 3230 | 2893 |
| 3231 __ bind(&return_heap_number); | 2894 __ bind(&return_heap_number); |
| 3232 heap_number_result = r5; | 2895 heap_number_result = r5; |
| 3233 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2896 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 3234 heap_number_result, | 2897 heap_number_result, |
| 3235 heap_number_map, | 2898 heap_number_map, |
| 3236 scratch1, | 2899 scratch1, |
| 3237 scratch2, | 2900 scratch2, |
| 3238 &call_runtime, | 2901 &call_runtime, |
| 3239 mode_); | 2902 mode_); |
| 3240 | 2903 |
| 3241 if (CpuFeatures::IsSupported(VFP2)) { | 2904 if (op_ != Token::SHR) { |
| 3242 CpuFeatureScope scope(masm, VFP2); | 2905 // Convert the result to a floating point value. |
| 3243 if (op_ != Token::SHR) { | 2906 __ vmov(double_scratch.low(), r2); |
| 3244 // Convert the result to a floating point value. | 2907 __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
| 3245 __ vmov(double_scratch.low(), r2); | 2908 } else { |
| 3246 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | 2909 // The result must be interpreted as an unsigned 32-bit integer. |
| 3247 } else { | 2910 __ vmov(double_scratch.low(), r2); |
| 3248 // The result must be interpreted as an unsigned 32-bit integer. | 2911 __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
| 3249 __ vmov(double_scratch.low(), r2); | 2912 } |
| 3250 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | |
| 3251 } | |
| 3252 | 2913 |
| 3253 // Store the result. | 2914 // Store the result. |
| 3254 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 2915 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
| 3255 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | 2916 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
| 3256 __ mov(r0, heap_number_result); | 2917 __ mov(r0, heap_number_result); |
| 3257 __ Ret(); | 2918 __ Ret(); |
| 3258 } else { | |
| 3259 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
| 3260 // r3 as scratch. r0 is preserved and returned. | |
| 3261 __ mov(r0, r5); | |
| 3262 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
| 3263 __ TailCallStub(&stub); | |
| 3264 } | |
| 3265 | 2919 |
| 3266 break; | 2920 break; |
| 3267 } | 2921 } |
| 3268 | 2922 |
| 3269 default: | 2923 default: |
| 3270 UNREACHABLE(); | 2924 UNREACHABLE(); |
| 3271 } | 2925 } |
| 3272 | 2926 |
| 3273 // We never expect DIV to yield an integer result, so we always generate | 2927 // We never expect DIV to yield an integer result, so we always generate |
| 3274 // type transition code for DIV operations expecting an integer result: the | 2928 // type transition code for DIV operations expecting an integer result: the |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3434 | 3088 |
| 3435 Label input_not_smi; | 3089 Label input_not_smi; |
| 3436 Label loaded; | 3090 Label loaded; |
| 3437 Label calculate; | 3091 Label calculate; |
| 3438 Label invalid_cache; | 3092 Label invalid_cache; |
| 3439 const Register scratch0 = r9; | 3093 const Register scratch0 = r9; |
| 3440 const Register scratch1 = r7; | 3094 const Register scratch1 = r7; |
| 3441 const Register cache_entry = r0; | 3095 const Register cache_entry = r0; |
| 3442 const bool tagged = (argument_type_ == TAGGED); | 3096 const bool tagged = (argument_type_ == TAGGED); |
| 3443 | 3097 |
| 3444 if (CpuFeatures::IsSupported(VFP2)) { | 3098 if (tagged) { |
| 3445 CpuFeatureScope scope(masm, VFP2); | 3099 // Argument is a number and is on stack and in r0. |
| 3446 if (tagged) { | 3100 // Load argument and check if it is a smi. |
| 3447 // Argument is a number and is on stack and in r0. | 3101 __ JumpIfNotSmi(r0, &input_not_smi); |
| 3448 // Load argument and check if it is a smi. | |
| 3449 __ JumpIfNotSmi(r0, &input_not_smi); | |
| 3450 | 3102 |
| 3451 // Input is a smi. Convert to double and load the low and high words | 3103 // Input is a smi. Convert to double and load the low and high words |
| 3452 // of the double into r2, r3. | 3104 // of the double into r2, r3. |
| 3453 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 3105 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
| 3454 __ b(&loaded); | 3106 __ b(&loaded); |
| 3455 | 3107 |
| 3456 __ bind(&input_not_smi); | 3108 __ bind(&input_not_smi); |
| 3457 // Check if input is a HeapNumber. | 3109 // Check if input is a HeapNumber. |
| 3458 __ CheckMap(r0, | 3110 __ CheckMap(r0, |
| 3459 r1, | 3111 r1, |
| 3460 Heap::kHeapNumberMapRootIndex, | 3112 Heap::kHeapNumberMapRootIndex, |
| 3461 &calculate, | 3113 &calculate, |
| 3462 DONT_DO_SMI_CHECK); | 3114 DONT_DO_SMI_CHECK); |
| 3463 // Input is a HeapNumber. Load it to a double register and store the | 3115 // Input is a HeapNumber. Load it to a double register and store the |
| 3464 // low and high words into r2, r3. | 3116 // low and high words into r2, r3. |
| 3465 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 3117 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 3466 __ vmov(r2, r3, d0); | 3118 __ vmov(r2, r3, d0); |
| 3467 } else { | 3119 } else { |
| 3468 // Input is untagged double in d2. Output goes to d2. | 3120 // Input is untagged double in d2. Output goes to d2. |
| 3469 __ vmov(r2, r3, d2); | 3121 __ vmov(r2, r3, d2); |
| 3470 } | 3122 } |
| 3471 __ bind(&loaded); | 3123 __ bind(&loaded); |
| 3472 // r2 = low 32 bits of double value | 3124 // r2 = low 32 bits of double value |
| 3473 // r3 = high 32 bits of double value | 3125 // r3 = high 32 bits of double value |
| 3474 // Compute hash (the shifts are arithmetic): | 3126 // Compute hash (the shifts are arithmetic): |
| 3475 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 3127 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| 3476 __ eor(r1, r2, Operand(r3)); | 3128 __ eor(r1, r2, Operand(r3)); |
| 3477 __ eor(r1, r1, Operand(r1, ASR, 16)); | 3129 __ eor(r1, r1, Operand(r1, ASR, 16)); |
| 3478 __ eor(r1, r1, Operand(r1, ASR, 8)); | 3130 __ eor(r1, r1, Operand(r1, ASR, 8)); |
| 3479 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | 3131 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
| 3480 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | 3132 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
| 3481 | 3133 |
| 3482 // r2 = low 32 bits of double value. | 3134 // r2 = low 32 bits of double value. |
| 3483 // r3 = high 32 bits of double value. | 3135 // r3 = high 32 bits of double value. |
| 3484 // r1 = TranscendentalCache::hash(double value). | 3136 // r1 = TranscendentalCache::hash(double value). |
| 3485 Isolate* isolate = masm->isolate(); | 3137 Isolate* isolate = masm->isolate(); |
| 3486 ExternalReference cache_array = | 3138 ExternalReference cache_array = |
| 3487 ExternalReference::transcendental_cache_array_address(isolate); | 3139 ExternalReference::transcendental_cache_array_address(isolate); |
| 3488 __ mov(cache_entry, Operand(cache_array)); | 3140 __ mov(cache_entry, Operand(cache_array)); |
| 3489 // cache_entry points to cache array. | 3141 // cache_entry points to cache array. |
| 3490 int cache_array_index | 3142 int cache_array_index |
| 3491 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); | 3143 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); |
| 3492 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); | 3144 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); |
| 3493 // r0 points to the cache for the type type_. | 3145 // r0 points to the cache for the type type_. |
| 3494 // If NULL, the cache hasn't been initialized yet, so go through runtime. | 3146 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| 3495 __ cmp(cache_entry, Operand::Zero()); | 3147 __ cmp(cache_entry, Operand::Zero()); |
| 3496 __ b(eq, &invalid_cache); | 3148 __ b(eq, &invalid_cache); |
| 3497 | 3149 |
| 3498 #ifdef DEBUG | 3150 #ifdef DEBUG |
| 3499 // Check that the layout of cache elements match expectations. | 3151 // Check that the layout of cache elements match expectations. |
| 3500 { TranscendentalCache::SubCache::Element test_elem[2]; | 3152 { TranscendentalCache::SubCache::Element test_elem[2]; |
| 3501 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | 3153 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
| 3502 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | 3154 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
| 3503 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | 3155 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
| 3504 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 3156 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
| 3505 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 3157 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
| 3506 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | 3158 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
| 3507 CHECK_EQ(0, elem_in0 - elem_start); | 3159 CHECK_EQ(0, elem_in0 - elem_start); |
| 3508 CHECK_EQ(kIntSize, elem_in1 - elem_start); | 3160 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
| 3509 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 3161 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
| 3510 } | 3162 } |
| 3511 #endif | 3163 #endif |
| 3512 | 3164 |
| 3513 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 3165 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
| 3514 __ add(r1, r1, Operand(r1, LSL, 1)); | 3166 __ add(r1, r1, Operand(r1, LSL, 1)); |
| 3515 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | 3167 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); |
| 3516 // Check if cache matches: Double value is stored in uint32_t[2] array. | 3168 // Check if cache matches: Double value is stored in uint32_t[2] array. |
| 3517 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | 3169 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
| 3518 __ cmp(r2, r4); | 3170 __ cmp(r2, r4); |
| 3519 __ cmp(r3, r5, eq); | 3171 __ cmp(r3, r5, eq); |
| 3520 __ b(ne, &calculate); | 3172 __ b(ne, &calculate); |
| 3521 // Cache hit. Load result, cleanup and return. | 3173 // Cache hit. Load result, cleanup and return. |
| 3522 Counters* counters = masm->isolate()->counters(); | 3174 Counters* counters = masm->isolate()->counters(); |
| 3523 __ IncrementCounter( | 3175 __ IncrementCounter( |
| 3524 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 3176 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
| 3525 if (tagged) { | 3177 if (tagged) { |
| 3526 // Pop input value from stack and load result into r0. | 3178 // Pop input value from stack and load result into r0. |
| 3527 __ pop(); | 3179 __ pop(); |
| 3528 __ mov(r0, Operand(r6)); | 3180 __ mov(r0, Operand(r6)); |
| 3529 } else { | 3181 } else { |
| 3530 // Load result into d2. | 3182 // Load result into d2. |
| 3531 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | 3183 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
| 3532 } | 3184 } |
| 3533 __ Ret(); | 3185 __ Ret(); |
| 3534 } // if (CpuFeatures::IsSupported(VFP3)) | |
| 3535 | 3186 |
| 3536 __ bind(&calculate); | 3187 __ bind(&calculate); |
| 3537 Counters* counters = masm->isolate()->counters(); | |
| 3538 __ IncrementCounter( | 3188 __ IncrementCounter( |
| 3539 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | 3189 counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
| 3540 if (tagged) { | 3190 if (tagged) { |
| 3541 __ bind(&invalid_cache); | 3191 __ bind(&invalid_cache); |
| 3542 ExternalReference runtime_function = | 3192 ExternalReference runtime_function = |
| 3543 ExternalReference(RuntimeFunction(), masm->isolate()); | 3193 ExternalReference(RuntimeFunction(), masm->isolate()); |
| 3544 __ TailCallExternalReference(runtime_function, 1, 1); | 3194 __ TailCallExternalReference(runtime_function, 1, 1); |
| 3545 } else { | 3195 } else { |
| 3546 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 3547 CpuFeatureScope scope(masm, VFP2); | |
| 3548 | |
| 3549 Label no_update; | 3196 Label no_update; |
| 3550 Label skip_cache; | 3197 Label skip_cache; |
| 3551 | 3198 |
| 3552 // Call C function to calculate the result and update the cache. | 3199 // Call C function to calculate the result and update the cache. |
| 3553 // r0: precalculated cache entry address. | 3200 // r0: precalculated cache entry address. |
| 3554 // r2 and r3: parts of the double value. | 3201 // r2 and r3: parts of the double value. |
| 3555 // Store r0, r2 and r3 on stack for later before calling C function. | 3202 // Store r0, r2 and r3 on stack for later before calling C function. |
| 3556 __ Push(r3, r2, cache_entry); | 3203 __ Push(r3, r2, cache_entry); |
| 3557 GenerateCallCFunction(masm, scratch0); | 3204 GenerateCallCFunction(masm, scratch0); |
| 3558 __ GetCFunctionDoubleResult(d2); | 3205 __ GetCFunctionDoubleResult(d2); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3598 __ push(scratch0); | 3245 __ push(scratch0); |
| 3599 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | 3246 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
| 3600 } | 3247 } |
| 3601 __ Ret(); | 3248 __ Ret(); |
| 3602 } | 3249 } |
| 3603 } | 3250 } |
| 3604 | 3251 |
| 3605 | 3252 |
| 3606 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | 3253 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
| 3607 Register scratch) { | 3254 Register scratch) { |
| 3608 ASSERT(masm->IsEnabled(VFP2)); | |
| 3609 Isolate* isolate = masm->isolate(); | 3255 Isolate* isolate = masm->isolate(); |
| 3610 | 3256 |
| 3611 __ push(lr); | 3257 __ push(lr); |
| 3612 __ PrepareCallCFunction(0, 1, scratch); | 3258 __ PrepareCallCFunction(0, 1, scratch); |
| 3613 if (masm->use_eabi_hardfloat()) { | 3259 if (masm->use_eabi_hardfloat()) { |
| 3614 __ vmov(d0, d2); | 3260 __ vmov(d0, d2); |
| 3615 } else { | 3261 } else { |
| 3616 __ vmov(r0, r1, d2); | 3262 __ vmov(r0, r1, d2); |
| 3617 } | 3263 } |
| 3618 AllowExternalCallThatCantCauseGC scope(masm); | 3264 AllowExternalCallThatCantCauseGC scope(masm); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3659 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 3305 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
| 3660 } | 3306 } |
| 3661 | 3307 |
| 3662 | 3308 |
| 3663 void InterruptStub::Generate(MacroAssembler* masm) { | 3309 void InterruptStub::Generate(MacroAssembler* masm) { |
| 3664 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); | 3310 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
| 3665 } | 3311 } |
| 3666 | 3312 |
| 3667 | 3313 |
| 3668 void MathPowStub::Generate(MacroAssembler* masm) { | 3314 void MathPowStub::Generate(MacroAssembler* masm) { |
| 3669 CpuFeatureScope vfp2_scope(masm, VFP2); | |
| 3670 const Register base = r1; | 3315 const Register base = r1; |
| 3671 const Register exponent = r2; | 3316 const Register exponent = r2; |
| 3672 const Register heapnumbermap = r5; | 3317 const Register heapnumbermap = r5; |
| 3673 const Register heapnumber = r0; | 3318 const Register heapnumber = r0; |
| 3674 const DwVfpRegister double_base = d1; | 3319 const DwVfpRegister double_base = d1; |
| 3675 const DwVfpRegister double_exponent = d2; | 3320 const DwVfpRegister double_exponent = d2; |
| 3676 const DwVfpRegister double_result = d3; | 3321 const DwVfpRegister double_result = d3; |
| 3677 const DwVfpRegister double_scratch = d0; | 3322 const DwVfpRegister double_scratch = d0; |
| 3678 const SwVfpRegister single_scratch = s0; | 3323 const SwVfpRegister single_scratch = s0; |
| 3679 const Register scratch = r9; | 3324 const Register scratch = r9; |
| (...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3877 | 3522 |
| 3878 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 3523 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 3879 CEntryStub::GenerateAheadOfTime(isolate); | 3524 CEntryStub::GenerateAheadOfTime(isolate); |
| 3880 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3525 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3881 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3526 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3882 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3527 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3883 } | 3528 } |
| 3884 | 3529 |
| 3885 | 3530 |
| 3886 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 3531 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| 3887 SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) | 3532 SaveFPRegsMode mode = kSaveFPRegs; |
| 3888 ? kSaveFPRegs | |
| 3889 : kDontSaveFPRegs; | |
| 3890 CEntryStub save_doubles(1, mode); | 3533 CEntryStub save_doubles(1, mode); |
| 3891 StoreBufferOverflowStub stub(mode); | 3534 StoreBufferOverflowStub stub(mode); |
| 3892 // These stubs might already be in the snapshot, detect that and don't | 3535 // These stubs might already be in the snapshot, detect that and don't |
| 3893 // regenerate, which would lead to code stub initialization state being messed | 3536 // regenerate, which would lead to code stub initialization state being messed |
| 3894 // up. | 3537 // up. |
| 3895 Code* save_doubles_code; | 3538 Code* save_doubles_code; |
| 3896 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { | 3539 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
| 3897 save_doubles_code = *save_doubles.GetCode(isolate); | 3540 save_doubles_code = *save_doubles.GetCode(isolate); |
| 3898 save_doubles_code->set_is_pregenerated(true); | 3541 save_doubles_code->set_is_pregenerated(true); |
| 3899 | 3542 |
| (...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4139 // r3: argc | 3782 // r3: argc |
| 4140 // [sp+0]: argv | 3783 // [sp+0]: argv |
| 4141 | 3784 |
| 4142 Label invoke, handler_entry, exit; | 3785 Label invoke, handler_entry, exit; |
| 4143 | 3786 |
| 4144 // Called from C, so do not pop argc and args on exit (preserve sp) | 3787 // Called from C, so do not pop argc and args on exit (preserve sp) |
| 4145 // No need to save register-passed args | 3788 // No need to save register-passed args |
| 4146 // Save callee-saved registers (incl. cp and fp), sp, and lr | 3789 // Save callee-saved registers (incl. cp and fp), sp, and lr |
| 4147 __ stm(db_w, sp, kCalleeSaved | lr.bit()); | 3790 __ stm(db_w, sp, kCalleeSaved | lr.bit()); |
| 4148 | 3791 |
| 4149 if (CpuFeatures::IsSupported(VFP2)) { | 3792 // Save callee-saved vfp registers. |
| 4150 CpuFeatureScope scope(masm, VFP2); | 3793 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| 4151 // Save callee-saved vfp registers. | 3794 // Set up the reserved register for 0.0. |
| 4152 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | 3795 __ vmov(kDoubleRegZero, 0.0); |
| 4153 // Set up the reserved register for 0.0. | |
| 4154 __ vmov(kDoubleRegZero, 0.0); | |
| 4155 } | |
| 4156 | 3796 |
| 4157 // Get address of argv, see stm above. | 3797 // Get address of argv, see stm above. |
| 4158 // r0: code entry | 3798 // r0: code entry |
| 4159 // r1: function | 3799 // r1: function |
| 4160 // r2: receiver | 3800 // r2: receiver |
| 4161 // r3: argc | 3801 // r3: argc |
| 4162 | 3802 |
| 4163 // Set up argv in r4. | 3803 // Set up argv in r4. |
| 4164 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 3804 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
| 4165 if (CpuFeatures::IsSupported(VFP2)) { | 3805 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
| 4166 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | |
| 4167 } | |
| 4168 __ ldr(r4, MemOperand(sp, offset_to_argv)); | 3806 __ ldr(r4, MemOperand(sp, offset_to_argv)); |
| 4169 | 3807 |
| 4170 // Push a frame with special values setup to mark it as an entry frame. | 3808 // Push a frame with special values setup to mark it as an entry frame. |
| 4171 // r0: code entry | 3809 // r0: code entry |
| 4172 // r1: function | 3810 // r1: function |
| 4173 // r2: receiver | 3811 // r2: receiver |
| 4174 // r3: argc | 3812 // r3: argc |
| 4175 // r4: argv | 3813 // r4: argv |
| 4176 Isolate* isolate = masm->isolate(); | 3814 Isolate* isolate = masm->isolate(); |
| 4177 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 3815 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4293 // Reset the stack to the callee saved registers. | 3931 // Reset the stack to the callee saved registers. |
| 4294 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 3932 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
| 4295 | 3933 |
| 4296 // Restore callee-saved registers and return. | 3934 // Restore callee-saved registers and return. |
| 4297 #ifdef DEBUG | 3935 #ifdef DEBUG |
| 4298 if (FLAG_debug_code) { | 3936 if (FLAG_debug_code) { |
| 4299 __ mov(lr, Operand(pc)); | 3937 __ mov(lr, Operand(pc)); |
| 4300 } | 3938 } |
| 4301 #endif | 3939 #endif |
| 4302 | 3940 |
| 4303 if (CpuFeatures::IsSupported(VFP2)) { | 3941 // Restore callee-saved vfp registers. |
| 4304 CpuFeatureScope scope(masm, VFP2); | 3942 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| 4305 // Restore callee-saved vfp registers. | |
| 4306 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | |
| 4307 } | |
| 4308 | 3943 |
| 4309 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | 3944 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); |
| 4310 } | 3945 } |
| 4311 | 3946 |
| 4312 | 3947 |
| 4313 // Uses registers r0 to r4. | 3948 // Uses registers r0 to r4. |
| 4314 // Expected input (depending on whether args are in registers or on the stack): | 3949 // Expected input (depending on whether args are in registers or on the stack): |
| 4315 // * object: r0 or at sp + 1 * kPointerSize. | 3950 // * object: r0 or at sp + 1 * kPointerSize. |
| 4316 // * function: r1 or at sp. | 3951 // * function: r1 or at sp. |
| 4317 // | 3952 // |
| (...skipping 2684 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7002 Label miss; | 6637 Label miss; |
| 7003 | 6638 |
| 7004 if (left_ == CompareIC::SMI) { | 6639 if (left_ == CompareIC::SMI) { |
| 7005 __ JumpIfNotSmi(r1, &miss); | 6640 __ JumpIfNotSmi(r1, &miss); |
| 7006 } | 6641 } |
| 7007 if (right_ == CompareIC::SMI) { | 6642 if (right_ == CompareIC::SMI) { |
| 7008 __ JumpIfNotSmi(r0, &miss); | 6643 __ JumpIfNotSmi(r0, &miss); |
| 7009 } | 6644 } |
| 7010 | 6645 |
| 7011 // Inlining the double comparison and falling back to the general compare | 6646 // Inlining the double comparison and falling back to the general compare |
| 7012 // stub if NaN is involved or VFP2 is unsupported. | 6647 // stub if NaN is involved. |
| 7013 if (CpuFeatures::IsSupported(VFP2)) { | 6648 // Load left and right operand. |
| 7014 CpuFeatureScope scope(masm, VFP2); | 6649 Label done, left, left_smi, right_smi; |
| 6650 __ JumpIfSmi(r0, &right_smi); | |
| 6651 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | |
| 6652 DONT_DO_SMI_CHECK); | |
| 6653 __ sub(r2, r0, Operand(kHeapObjectTag)); | |
| 6654 __ vldr(d1, r2, HeapNumber::kValueOffset); | |
| 6655 __ b(&left); | |
| 6656 __ bind(&right_smi); | |
| 6657 __ SmiUntag(r2, r0); // Can't clobber r0 yet. | |
| 6658 SwVfpRegister single_scratch = d2.low(); | |
| 6659 __ vmov(single_scratch, r2); | |
| 6660 __ vcvt_f64_s32(d1, single_scratch); | |
| 7015 | 6661 |
| 7016 // Load left and right operand. | 6662 __ bind(&left); |
| 7017 Label done, left, left_smi, right_smi; | 6663 __ JumpIfSmi(r1, &left_smi); |
| 7018 __ JumpIfSmi(r0, &right_smi); | 6664 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 7019 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 6665 DONT_DO_SMI_CHECK); |
| 7020 DONT_DO_SMI_CHECK); | 6666 __ sub(r2, r1, Operand(kHeapObjectTag)); |
| 7021 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6667 __ vldr(d0, r2, HeapNumber::kValueOffset); |
| 7022 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6668 __ b(&done); |
| 7023 __ b(&left); | 6669 __ bind(&left_smi); |
| 7024 __ bind(&right_smi); | 6670 __ SmiUntag(r2, r1); // Can't clobber r1 yet. |
| 7025 __ SmiUntag(r2, r0); // Can't clobber r0 yet. | 6671 single_scratch = d3.low(); |
| 7026 SwVfpRegister single_scratch = d2.low(); | 6672 __ vmov(single_scratch, r2); |
| 7027 __ vmov(single_scratch, r2); | 6673 __ vcvt_f64_s32(d0, single_scratch); |
| 7028 __ vcvt_f64_s32(d1, single_scratch); | |
| 7029 | 6674 |
| 7030 __ bind(&left); | 6675 __ bind(&done); |
| 7031 __ JumpIfSmi(r1, &left_smi); | 6676 // Compare operands. |
| 7032 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 6677 __ VFPCompareAndSetFlags(d0, d1); |
| 7033 DONT_DO_SMI_CHECK); | |
| 7034 __ sub(r2, r1, Operand(kHeapObjectTag)); | |
| 7035 __ vldr(d0, r2, HeapNumber::kValueOffset); | |
| 7036 __ b(&done); | |
| 7037 __ bind(&left_smi); | |
| 7038 __ SmiUntag(r2, r1); // Can't clobber r1 yet. | |
| 7039 single_scratch = d3.low(); | |
| 7040 __ vmov(single_scratch, r2); | |
| 7041 __ vcvt_f64_s32(d0, single_scratch); | |
| 7042 | 6678 |
| 7043 __ bind(&done); | 6679 // Don't base result on status bits when a NaN is involved. |
| 7044 // Compare operands. | 6680 __ b(vs, &unordered); |
| 7045 __ VFPCompareAndSetFlags(d0, d1); | |
| 7046 | 6681 |
| 7047 // Don't base result on status bits when a NaN is involved. | 6682 // Return a result of -1, 0, or 1, based on status bits. |
| 7048 __ b(vs, &unordered); | 6683 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 7049 | 6684 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 7050 // Return a result of -1, 0, or 1, based on status bits. | 6685 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 7051 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 6686 __ Ret(); |
| 7052 __ mov(r0, Operand(LESS), LeaveCC, lt); | |
| 7053 __ mov(r0, Operand(GREATER), LeaveCC, gt); | |
| 7054 __ Ret(); | |
| 7055 } | |
| 7056 | 6687 |
| 7057 __ bind(&unordered); | 6688 __ bind(&unordered); |
| 7058 __ bind(&generic_stub); | 6689 __ bind(&generic_stub); |
| 7059 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | 6690 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| 7060 CompareIC::GENERIC); | 6691 CompareIC::GENERIC); |
| 7061 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6692 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 7062 | 6693 |
| 7063 __ bind(&maybe_undefined1); | 6694 __ bind(&maybe_undefined1); |
| 7064 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6695 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 7065 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 6696 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
| (...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7682 entry->value, | 7313 entry->value, |
| 7683 entry->address, | 7314 entry->address, |
| 7684 entry->action, | 7315 entry->action, |
| 7685 kDontSaveFPRegs); | 7316 kDontSaveFPRegs); |
| 7686 stub.GetCode(isolate)->set_is_pregenerated(true); | 7317 stub.GetCode(isolate)->set_is_pregenerated(true); |
| 7687 } | 7318 } |
| 7688 } | 7319 } |
| 7689 | 7320 |
| 7690 | 7321 |
| 7691 bool CodeStub::CanUseFPRegisters() { | 7322 bool CodeStub::CanUseFPRegisters() { |
| 7692 return CpuFeatures::IsSupported(VFP2); | 7323 return true; // VFP2 is a base requirement for V8 |
| 7693 } | 7324 } |
| 7694 | 7325 |
| 7695 | 7326 |
| 7696 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 7327 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
| 7697 // the value has just been written into the object, now this stub makes sure | 7328 // the value has just been written into the object, now this stub makes sure |
| 7698 // we keep the GC informed. The word in the object where the value has been | 7329 // we keep the GC informed. The word in the object where the value has been |
| 7699 // written is in the address register. | 7330 // written is in the address register. |
| 7700 void RecordWriteStub::Generate(MacroAssembler* masm) { | 7331 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 7701 Label skip_to_incremental_noncompacting; | 7332 Label skip_to_incremental_noncompacting; |
| 7702 Label skip_to_incremental_compacting; | 7333 Label skip_to_incremental_compacting; |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7948 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7579 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 7949 __ StoreNumberToDoubleElements(r0, r3, | 7580 __ StoreNumberToDoubleElements(r0, r3, |
| 7950 // Overwrites all regs after this. | 7581 // Overwrites all regs after this. |
| 7951 r5, r6, r7, r9, r2, | 7582 r5, r6, r7, r9, r2, |
| 7952 &slow_elements); | 7583 &slow_elements); |
| 7953 __ Ret(); | 7584 __ Ret(); |
| 7954 } | 7585 } |
| 7955 | 7586 |
| 7956 | 7587 |
| 7957 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 7588 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 7958 ASSERT(!Serializer::enabled()); | 7589 CEntryStub ces(1, kSaveFPRegs); |
| 7959 bool save_fp_regs = CpuFeatures::IsSupported(VFP2); | |
| 7960 CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); | |
| 7961 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 7590 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 7962 int parameter_count_offset = | 7591 int parameter_count_offset = |
| 7963 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 7592 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| 7964 __ ldr(r1, MemOperand(fp, parameter_count_offset)); | 7593 __ ldr(r1, MemOperand(fp, parameter_count_offset)); |
| 7965 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | 7594 if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
| 7966 __ add(r1, r1, Operand(1)); | 7595 __ add(r1, r1, Operand(1)); |
| 7967 } | 7596 } |
| 7968 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 7597 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| 7969 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); | 7598 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); |
| 7970 __ add(sp, sp, r1); | 7599 __ add(sp, sp, r1); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 8029 | 7658 |
| 8030 __ Pop(lr, r5, r1); | 7659 __ Pop(lr, r5, r1); |
| 8031 __ Ret(); | 7660 __ Ret(); |
| 8032 } | 7661 } |
| 8033 | 7662 |
| 8034 #undef __ | 7663 #undef __ |
| 8035 | 7664 |
| 8036 } } // namespace v8::internal | 7665 } } // namespace v8::internal |
| 8037 | 7666 |
| 8038 #endif // V8_TARGET_ARCH_ARM | 7667 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |