Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2570 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2581 __ Ret(); | 2581 __ Ret(); |
| 2582 break; | 2582 break; |
| 2583 case Token::BIT_AND: | 2583 case Token::BIT_AND: |
| 2584 __ and_(right, left, Operand(right)); | 2584 __ and_(right, left, Operand(right)); |
| 2585 __ Ret(); | 2585 __ Ret(); |
| 2586 break; | 2586 break; |
| 2587 case Token::BIT_XOR: | 2587 case Token::BIT_XOR: |
| 2588 __ eor(right, left, Operand(right)); | 2588 __ eor(right, left, Operand(right)); |
| 2589 __ Ret(); | 2589 __ Ret(); |
| 2590 break; | 2590 break; |
| 2591 case Token::SAR: | |
| 2592 // Remove tags from right operand. | |
| 2593 __ GetLeastBitsFromSmi(scratch1, right, 5); | |
| 2594 __ mov(right, Operand(left, ASR, scratch1)); | |
| 2595 // Smi tag result. | |
| 2596 __ bic(right, right, Operand(kSmiTagMask)); | |
| 2597 __ Ret(); | |
| 2598 break; | |
| 2599 case Token::SHR: | |
| 2600 // Remove tags from operands. We can't do this on a 31 bit number | |
| 2601 // because then the 0s get shifted into bit 30 instead of bit 31. | |
| 2602 __ SmiUntag(scratch1, left); | |
| 2603 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
| 2604 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); | |
| 2605 // Unsigned shift is not allowed to produce a negative number, so | |
| 2606 // check the sign bit and the sign bit after Smi tagging. | |
| 2607 __ tst(scratch1, Operand(0xc0000000)); | |
| 2608 __ b(ne, ¬_smi_result); | |
| 2609 // Smi tag result. | |
| 2610 __ SmiTag(right, scratch1); | |
| 2611 __ Ret(); | |
| 2612 break; | |
| 2613 case Token::SHL: | |
| 2614 // Remove tags from operands. | |
| 2615 __ SmiUntag(scratch1, left); | |
| 2616 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
| 2617 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); | |
| 2618 // Check that the signed result fits in a Smi. | |
| 2619 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | |
| 2620 __ b(mi, ¬_smi_result); | |
| 2621 __ SmiTag(right, scratch1); | |
| 2622 __ Ret(); | |
| 2623 break; | |
| 2591 default: | 2624 default: |
| 2592 UNREACHABLE(); | 2625 UNREACHABLE(); |
| 2593 } | 2626 } |
| 2594 __ bind(¬_smi_result); | 2627 __ bind(¬_smi_result); |
| 2595 } | 2628 } |
| 2596 | 2629 |
| 2597 | 2630 |
| 2598 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2631 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| 2599 bool smi_operands, | 2632 bool smi_operands, |
| 2600 Label* not_numbers, | 2633 Label* not_numbers, |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2696 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | 2729 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 2697 #endif | 2730 #endif |
| 2698 // Plase result in r0 and return to the pushed return address. | 2731 // Plase result in r0 and return to the pushed return address. |
| 2699 __ mov(r0, Operand(result)); | 2732 __ mov(r0, Operand(result)); |
| 2700 __ pop(pc); | 2733 __ pop(pc); |
| 2701 } | 2734 } |
| 2702 break; | 2735 break; |
| 2703 } | 2736 } |
| 2704 case Token::BIT_OR: | 2737 case Token::BIT_OR: |
| 2705 case Token::BIT_XOR: | 2738 case Token::BIT_XOR: |
| 2706 case Token::BIT_AND: { | 2739 case Token::BIT_AND: |
| 2740 case Token::SAR: | |
| 2741 case Token::SHR: | |
| 2742 case Token::SHL: { | |
| 2707 if (smi_operands) { | 2743 if (smi_operands) { |
| 2708 __ SmiUntag(r3, left); | 2744 __ SmiUntag(r3, left); |
| 2709 __ SmiUntag(r2, right); | 2745 __ SmiUntag(r2, right); |
| 2710 } else { | 2746 } else { |
| 2711 // Convert operands to 32-bit integers. Right in r2 and left in r3. | 2747 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
| 2712 FloatingPointHelper::LoadNumberAsInteger(masm, | 2748 FloatingPointHelper::LoadNumberAsInteger(masm, |
| 2713 left, | 2749 left, |
| 2714 r3, | 2750 r3, |
| 2715 heap_number_map, | 2751 heap_number_map, |
| 2716 scratch1, | 2752 scratch1, |
| 2717 scratch2, | 2753 scratch2, |
| 2718 d0, | 2754 d0, |
| 2719 not_numbers); | 2755 not_numbers); |
| 2720 FloatingPointHelper::LoadNumberAsInteger(masm, | 2756 FloatingPointHelper::LoadNumberAsInteger(masm, |
| 2721 right, | 2757 right, |
| 2722 r2, | 2758 r2, |
| 2723 heap_number_map, | 2759 heap_number_map, |
| 2724 scratch1, | 2760 scratch1, |
| 2725 scratch2, | 2761 scratch2, |
| 2726 d0, | 2762 d0, |
| 2727 not_numbers); | 2763 not_numbers); |
| 2728 } | 2764 } |
| 2765 | |
| 2766 Label result_not_a_smi; | |
| 2729 switch (op_) { | 2767 switch (op_) { |
| 2730 case Token::BIT_OR: | 2768 case Token::BIT_OR: |
| 2731 __ orr(r2, r3, Operand(r2)); | 2769 __ orr(r2, r3, Operand(r2)); |
| 2732 break; | 2770 break; |
| 2733 case Token::BIT_XOR: | 2771 case Token::BIT_XOR: |
| 2734 __ eor(r2, r3, Operand(r2)); | 2772 __ eor(r2, r3, Operand(r2)); |
| 2735 break; | 2773 break; |
| 2736 case Token::BIT_AND: | 2774 case Token::BIT_AND: |
| 2737 __ and_(r2, r3, Operand(r2)); | 2775 __ and_(r2, r3, Operand(r2)); |
| 2738 break; | 2776 break; |
| 2777 case Token::SAR: | |
| 2778 // Use only the 5 least significant bits of the shift count. | |
|
Mads Ager (chromium)
2011/02/10 16:27:45
You have a macro assembler thing for this that you
Søren Thygesen Gjesse
2011/02/10 20:22:17
The one above takes a smi, but here we have an int
| |
| 2779 __ and_(r2, r2, Operand(0x1f)); | |
| 2780 __ mov(r2, Operand(r3, ASR, r2)); | |
| 2781 break; | |
| 2782 case Token::SHR: | |
| 2783 // Use only the 5 least significant bits of the shift count. | |
| 2784 __ and_(r2, r2, Operand(0x1f)); | |
|
Mads Ager (chromium)
2011/02/10 16:27:45
Ditto?
Søren Thygesen Gjesse
2011/02/10 20:22:17
Ditto.
| |
| 2785 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
| 2786 // SHR is special because it is required to produce a positive answer. | |
| 2787 // The code below for writing into heap numbers isn't capable of | |
| 2788 // writing the register as an unsigned int so we go to slow case if we | |
| 2789 // hit this case. | |
| 2790 if (CpuFeatures::IsSupported(VFP3)) { | |
| 2791 __ b(mi, &result_not_a_smi); | |
| 2792 } else { | |
| 2793 __ b(mi, not_numbers); | |
| 2794 } | |
| 2795 break; | |
| 2796 case Token::SHL: | |
| 2797 // Use only the 5 least significant bits of the shift count. | |
| 2798 __ and_(r2, r2, Operand(0x1f)); | |
|
Mads Ager (chromium)
2011/02/10 16:27:45
And here.
Søren Thygesen Gjesse
2011/02/10 20:22:17
Ditto.
| |
| 2799 __ mov(r2, Operand(r3, LSL, r2)); | |
| 2800 break; | |
| 2739 default: | 2801 default: |
| 2740 UNREACHABLE(); | 2802 UNREACHABLE(); |
| 2741 } | 2803 } |
| 2742 | 2804 |
| 2743 Label result_not_a_smi; | |
| 2744 // Check that the *signed* result fits in a smi. | 2805 // Check that the *signed* result fits in a smi. |
| 2745 __ add(r3, r2, Operand(0x40000000), SetCC); | 2806 __ add(r3, r2, Operand(0x40000000), SetCC); |
| 2746 __ b(mi, &result_not_a_smi); | 2807 __ b(mi, &result_not_a_smi); |
| 2747 __ SmiTag(r0, r2); | 2808 __ SmiTag(r0, r2); |
| 2748 __ Ret(); | 2809 __ Ret(); |
| 2749 | 2810 |
| 2750 // Allocate new heap number for result. | 2811 // Allocate new heap number for result. |
| 2751 __ bind(&result_not_a_smi); | 2812 __ bind(&result_not_a_smi); |
| 2752 __ AllocateHeapNumber( | 2813 __ AllocateHeapNumber( |
| 2753 r5, scratch1, scratch2, heap_number_map, gc_required); | 2814 r5, scratch1, scratch2, heap_number_map, gc_required); |
| 2754 | 2815 |
| 2755 // r2: Answer as signed int32. | 2816 // r2: Answer as signed int32. |
| 2756 // r5: Heap number to write answer into. | 2817 // r5: Heap number to write answer into. |
| 2757 | 2818 |
| 2758 // Nothing can go wrong now, so move the heap number to r0, which is the | 2819 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 2759 // result. | 2820 // result. |
| 2760 __ mov(r0, Operand(r5)); | 2821 __ mov(r0, Operand(r5)); |
| 2761 | 2822 |
| 2762 if (CpuFeatures::IsSupported(VFP3)) { | 2823 if (CpuFeatures::IsSupported(VFP3)) { |
| 2763 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. | 2824 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 2825 // mentioned above SHR needs to always produce a positive result. | |
| 2764 CpuFeatures::Scope scope(VFP3); | 2826 CpuFeatures::Scope scope(VFP3); |
| 2765 __ vmov(s0, r2); | 2827 __ vmov(s0, r2); |
| 2766 __ vcvt_f64_s32(d0, s0); | 2828 if (op_ == Token::SHR) { |
| 2829 __ vcvt_f64_u32(d0, s0); | |
| 2830 } else { | |
| 2831 __ vcvt_f64_s32(d0, s0); | |
| 2832 } | |
| 2767 __ sub(r3, r0, Operand(kHeapObjectTag)); | 2833 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 2768 __ vstr(d0, r3, HeapNumber::kValueOffset); | 2834 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 2769 __ Ret(); | 2835 __ Ret(); |
| 2770 } else { | 2836 } else { |
| 2771 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2837 // Tail call that writes the int32 in r2 to the heap number in r0, using |
| 2772 // r3 as scratch. r0 is preserved and returned. | 2838 // r3 as scratch. r0 is preserved and returned. |
| 2773 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 2839 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
| 2774 __ TailCallStub(&stub); | 2840 __ TailCallStub(&stub); |
| 2775 } | 2841 } |
| 2776 break; | 2842 break; |
| 2777 } | 2843 } |
| 2778 default: | 2844 default: |
| 2779 UNREACHABLE(); | 2845 UNREACHABLE(); |
| 2780 } | 2846 } |
| 2781 } | 2847 } |
| 2782 | 2848 |
| 2783 | 2849 |
| 2784 // Generate the smi code. If the operation on smis are successful this return is | 2850 // Generate the smi code. If the operation on smis are successful this return is |
| 2785 // generated. If the result is not a smi and heap number allocation is not | 2851 // generated. If the result is not a smi and heap number allocation is not |
| 2786 // requested the code falls through. If number allocation is requested but a | 2852 // requested the code falls through. If number allocation is requested but a |
| 2787 // heap number cannot be allocated the code jumps to the lable gc_required. | 2853 // heap number cannot be allocated the code jumps to the lable gc_required. |
| 2788 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2854 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 2789 Label* gc_required, | 2855 Label* gc_required, |
| 2790 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2856 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 2791 Label not_smis; | 2857 Label not_smis; |
| 2792 | 2858 |
| 2793 ASSERT(op_ == Token::ADD || | |
| 2794 op_ == Token::SUB || | |
| 2795 op_ == Token::MUL || | |
| 2796 op_ == Token::DIV || | |
| 2797 op_ == Token::MOD || | |
| 2798 op_ == Token::BIT_OR || | |
| 2799 op_ == Token::BIT_AND || | |
| 2800 op_ == Token::BIT_XOR); | |
| 2801 | |
| 2802 Register left = r1; | 2859 Register left = r1; |
| 2803 Register right = r0; | 2860 Register right = r0; |
| 2804 Register scratch1 = r7; | 2861 Register scratch1 = r7; |
| 2805 Register scratch2 = r9; | 2862 Register scratch2 = r9; |
| 2806 | 2863 |
| 2807 // Perform combined smi check on both operands. | 2864 // Perform combined smi check on both operands. |
| 2808 __ orr(scratch1, left, Operand(right)); | 2865 __ orr(scratch1, left, Operand(right)); |
| 2809 STATIC_ASSERT(kSmiTag == 0); | 2866 STATIC_ASSERT(kSmiTag == 0); |
| 2810 __ tst(scratch1, Operand(kSmiTagMask)); | 2867 __ tst(scratch1, Operand(kSmiTagMask)); |
| 2811 __ b(ne, ¬_smis); | 2868 __ b(ne, ¬_smis); |
| 2812 | 2869 |
| 2813 // If the smi-smi operation results in a smi return is generated. | 2870 // If the smi-smi operation results in a smi return is generated. |
| 2814 GenerateSmiSmiOperation(masm); | 2871 GenerateSmiSmiOperation(masm); |
| 2815 | 2872 |
| 2816 // If heap number results are possible generate the result in an allocated | 2873 // If heap number results are possible generate the result in an allocated |
| 2817 // heap number. | 2874 // heap number. |
| 2818 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2875 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
| 2819 GenerateFPOperation(masm, true, NULL, gc_required); | 2876 GenerateFPOperation(masm, true, NULL, gc_required); |
| 2820 } | 2877 } |
| 2821 __ bind(¬_smis); | 2878 __ bind(¬_smis); |
| 2822 } | 2879 } |
| 2823 | 2880 |
| 2824 | 2881 |
| 2825 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2882 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 2826 Label not_smis, call_runtime; | 2883 Label not_smis, call_runtime; |
| 2827 | 2884 |
| 2828 ASSERT(op_ == Token::ADD || | |
| 2829 op_ == Token::SUB || | |
| 2830 op_ == Token::MUL || | |
| 2831 op_ == Token::DIV || | |
| 2832 op_ == Token::MOD || | |
| 2833 op_ == Token::BIT_OR || | |
| 2834 op_ == Token::BIT_AND || | |
| 2835 op_ == Token::BIT_XOR); | |
| 2836 | |
| 2837 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | 2885 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
| 2838 result_type_ == TRBinaryOpIC::SMI) { | 2886 result_type_ == TRBinaryOpIC::SMI) { |
| 2839 // Only allow smi results. | 2887 // Only allow smi results. |
| 2840 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); | 2888 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); |
| 2841 } else { | 2889 } else { |
| 2842 // Allow heap number result and don't make a transition if a heap number | 2890 // Allow heap number result and don't make a transition if a heap number |
| 2843 // cannot be allocated. | 2891 // cannot be allocated. |
| 2844 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2892 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 2845 } | 2893 } |
| 2846 | 2894 |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 2857 ASSERT(operands_type_ == TRBinaryOpIC::STRING); | 2905 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
| 2858 ASSERT(op_ == Token::ADD); | 2906 ASSERT(op_ == Token::ADD); |
| 2859 // Try to add arguments as strings, otherwise, transition to the generic | 2907 // Try to add arguments as strings, otherwise, transition to the generic |
| 2860 // TRBinaryOpIC type. | 2908 // TRBinaryOpIC type. |
| 2861 GenerateAddStrings(masm); | 2909 GenerateAddStrings(masm); |
| 2862 GenerateTypeTransition(masm); | 2910 GenerateTypeTransition(masm); |
| 2863 } | 2911 } |
| 2864 | 2912 |
| 2865 | 2913 |
| 2866 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2914 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 2867 ASSERT(op_ == Token::ADD || | |
| 2868 op_ == Token::SUB || | |
| 2869 op_ == Token::MUL || | |
| 2870 op_ == Token::DIV || | |
| 2871 op_ == Token::MOD || | |
| 2872 op_ == Token::BIT_OR || | |
| 2873 op_ == Token::BIT_AND || | |
| 2874 op_ == Token::BIT_XOR); | |
| 2875 | |
| 2876 ASSERT(operands_type_ == TRBinaryOpIC::INT32); | 2915 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
| 2877 | 2916 |
| 2878 GenerateTypeTransition(masm); | 2917 GenerateTypeTransition(masm); |
| 2879 } | 2918 } |
| 2880 | 2919 |
| 2881 | 2920 |
| 2882 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 2921 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 2883 ASSERT(op_ == Token::ADD || | |
| 2884 op_ == Token::SUB || | |
| 2885 op_ == Token::MUL || | |
| 2886 op_ == Token::DIV || | |
| 2887 op_ == Token::MOD || | |
| 2888 op_ == Token::BIT_OR || | |
| 2889 op_ == Token::BIT_AND || | |
| 2890 op_ == Token::BIT_XOR); | |
| 2891 | |
| 2892 Label not_numbers, call_runtime; | 2922 Label not_numbers, call_runtime; |
| 2893 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 2923 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
| 2894 | 2924 |
| 2895 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); | 2925 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); |
| 2896 | 2926 |
| 2897 __ bind(¬_numbers); | 2927 __ bind(¬_numbers); |
| 2898 GenerateTypeTransition(masm); | 2928 GenerateTypeTransition(masm); |
| 2899 | 2929 |
| 2900 __ bind(&call_runtime); | 2930 __ bind(&call_runtime); |
| 2901 GenerateCallRuntime(masm); | 2931 GenerateCallRuntime(masm); |
| 2902 } | 2932 } |
| 2903 | 2933 |
| 2904 | 2934 |
| 2905 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2935 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 2906 ASSERT(op_ == Token::ADD || | |
| 2907 op_ == Token::SUB || | |
| 2908 op_ == Token::MUL || | |
| 2909 op_ == Token::DIV || | |
| 2910 op_ == Token::MOD || | |
| 2911 op_ == Token::BIT_OR || | |
| 2912 op_ == Token::BIT_AND || | |
| 2913 op_ == Token::BIT_XOR); | |
| 2914 | |
| 2915 Label call_runtime; | 2936 Label call_runtime; |
| 2916 | 2937 |
| 2917 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2938 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 2918 | 2939 |
| 2919 // If all else fails, use the runtime system to get the correct | 2940 // If all else fails, use the runtime system to get the correct |
| 2920 // result. | 2941 // result. |
| 2921 __ bind(&call_runtime); | 2942 __ bind(&call_runtime); |
| 2922 | 2943 |
| 2923 // Try to add strings before calling runtime. | 2944 // Try to add strings before calling runtime. |
| 2924 if (op_ == Token::ADD) { | 2945 if (op_ == Token::ADD) { |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2977 break; | 2998 break; |
| 2978 case Token::BIT_OR: | 2999 case Token::BIT_OR: |
| 2979 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 3000 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); |
| 2980 break; | 3001 break; |
| 2981 case Token::BIT_AND: | 3002 case Token::BIT_AND: |
| 2982 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 3003 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); |
| 2983 break; | 3004 break; |
| 2984 case Token::BIT_XOR: | 3005 case Token::BIT_XOR: |
| 2985 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 3006 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); |
| 2986 break; | 3007 break; |
| 3008 case Token::SAR: | |
| 3009 __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | |
| 3010 break; | |
| 3011 case Token::SHR: | |
| 3012 __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | |
| 3013 break; | |
| 3014 case Token::SHL: | |
| 3015 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | |
| 3016 break; | |
| 2987 default: | 3017 default: |
| 2988 UNREACHABLE(); | 3018 UNREACHABLE(); |
| 2989 } | 3019 } |
| 2990 } | 3020 } |
| 2991 | 3021 |
| 2992 | 3022 |
| 2993 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 3023 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
| 2994 MacroAssembler* masm, | 3024 MacroAssembler* masm, |
| 2995 Register result, | 3025 Register result, |
| 2996 Register heap_number_map, | 3026 Register heap_number_map, |
| (...skipping 2998 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5995 __ SmiTag(r0, scratch1); | 6025 __ SmiTag(r0, scratch1); |
| 5996 __ Ret(); | 6026 __ Ret(); |
| 5997 } | 6027 } |
| 5998 | 6028 |
| 5999 | 6029 |
| 6000 #undef __ | 6030 #undef __ |
| 6001 | 6031 |
| 6002 } } // namespace v8::internal | 6032 } } // namespace v8::internal |
| 6003 | 6033 |
| 6004 #endif // V8_TARGET_ARCH_ARM | 6034 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |