OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2570 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2581 __ Ret(); | 2581 __ Ret(); |
2582 break; | 2582 break; |
2583 case Token::BIT_AND: | 2583 case Token::BIT_AND: |
2584 __ and_(right, left, Operand(right)); | 2584 __ and_(right, left, Operand(right)); |
2585 __ Ret(); | 2585 __ Ret(); |
2586 break; | 2586 break; |
2587 case Token::BIT_XOR: | 2587 case Token::BIT_XOR: |
2588 __ eor(right, left, Operand(right)); | 2588 __ eor(right, left, Operand(right)); |
2589 __ Ret(); | 2589 __ Ret(); |
2590 break; | 2590 break; |
| 2591 case Token::SAR: |
| 2592 // Remove tags from right operand. |
| 2593 __ GetLeastBitsFromSmi(scratch1, right, 5); |
| 2594 __ mov(right, Operand(left, ASR, scratch1)); |
| 2595 // Smi tag result. |
| 2596 __ bic(right, right, Operand(kSmiTagMask)); |
| 2597 __ Ret(); |
| 2598 break; |
| 2599 case Token::SHR: |
| 2600 // Remove tags from operands. We can't do this on a 31 bit number |
| 2601 // because then the 0s get shifted into bit 30 instead of bit 31. |
| 2602 __ SmiUntag(scratch1, left); |
| 2603 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2604 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
| 2605 // Unsigned shift is not allowed to produce a negative number, so |
| 2606 // check the sign bit and the sign bit after Smi tagging. |
| 2607 __ tst(scratch1, Operand(0xc0000000)); |
| 2608 __ b(ne, ¬_smi_result); |
| 2609 // Smi tag result. |
| 2610 __ SmiTag(right, scratch1); |
| 2611 __ Ret(); |
| 2612 break; |
| 2613 case Token::SHL: |
| 2614 // Remove tags from operands. |
| 2615 __ SmiUntag(scratch1, left); |
| 2616 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2617 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
| 2618 // Check that the signed result fits in a Smi. |
| 2619 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); |
| 2620 __ b(mi, ¬_smi_result); |
| 2621 __ SmiTag(right, scratch1); |
| 2622 __ Ret(); |
| 2623 break; |
2591 default: | 2624 default: |
2592 UNREACHABLE(); | 2625 UNREACHABLE(); |
2593 } | 2626 } |
2594 __ bind(¬_smi_result); | 2627 __ bind(¬_smi_result); |
2595 } | 2628 } |
2596 | 2629 |
2597 | 2630 |
2598 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2631 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
2599 bool smi_operands, | 2632 bool smi_operands, |
2600 Label* not_numbers, | 2633 Label* not_numbers, |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2696 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | 2729 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
2697 #endif | 2730 #endif |
2698 // Plase result in r0 and return to the pushed return address. | 2731 // Plase result in r0 and return to the pushed return address. |
2699 __ mov(r0, Operand(result)); | 2732 __ mov(r0, Operand(result)); |
2700 __ pop(pc); | 2733 __ pop(pc); |
2701 } | 2734 } |
2702 break; | 2735 break; |
2703 } | 2736 } |
2704 case Token::BIT_OR: | 2737 case Token::BIT_OR: |
2705 case Token::BIT_XOR: | 2738 case Token::BIT_XOR: |
2706 case Token::BIT_AND: { | 2739 case Token::BIT_AND: |
| 2740 case Token::SAR: |
| 2741 case Token::SHR: |
| 2742 case Token::SHL: { |
2707 if (smi_operands) { | 2743 if (smi_operands) { |
2708 __ SmiUntag(r3, left); | 2744 __ SmiUntag(r3, left); |
2709 __ SmiUntag(r2, right); | 2745 __ SmiUntag(r2, right); |
2710 } else { | 2746 } else { |
2711 // Convert operands to 32-bit integers. Right in r2 and left in r3. | 2747 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
2712 FloatingPointHelper::LoadNumberAsInteger(masm, | 2748 FloatingPointHelper::LoadNumberAsInteger(masm, |
2713 left, | 2749 left, |
2714 r3, | 2750 r3, |
2715 heap_number_map, | 2751 heap_number_map, |
2716 scratch1, | 2752 scratch1, |
2717 scratch2, | 2753 scratch2, |
2718 d0, | 2754 d0, |
2719 not_numbers); | 2755 not_numbers); |
2720 FloatingPointHelper::LoadNumberAsInteger(masm, | 2756 FloatingPointHelper::LoadNumberAsInteger(masm, |
2721 right, | 2757 right, |
2722 r2, | 2758 r2, |
2723 heap_number_map, | 2759 heap_number_map, |
2724 scratch1, | 2760 scratch1, |
2725 scratch2, | 2761 scratch2, |
2726 d0, | 2762 d0, |
2727 not_numbers); | 2763 not_numbers); |
2728 } | 2764 } |
| 2765 |
| 2766 Label result_not_a_smi; |
2729 switch (op_) { | 2767 switch (op_) { |
2730 case Token::BIT_OR: | 2768 case Token::BIT_OR: |
2731 __ orr(r2, r3, Operand(r2)); | 2769 __ orr(r2, r3, Operand(r2)); |
2732 break; | 2770 break; |
2733 case Token::BIT_XOR: | 2771 case Token::BIT_XOR: |
2734 __ eor(r2, r3, Operand(r2)); | 2772 __ eor(r2, r3, Operand(r2)); |
2735 break; | 2773 break; |
2736 case Token::BIT_AND: | 2774 case Token::BIT_AND: |
2737 __ and_(r2, r3, Operand(r2)); | 2775 __ and_(r2, r3, Operand(r2)); |
2738 break; | 2776 break; |
| 2777 case Token::SAR: |
| 2778 // Use only the 5 least significant bits of the shift count. |
| 2779 __ and_(r2, r2, Operand(0x1f)); |
| 2780 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2781 __ mov(r2, Operand(r3, ASR, r2)); |
| 2782 break; |
| 2783 case Token::SHR: |
| 2784 // Use only the 5 least significant bits of the shift count. |
| 2785 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2786 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 2787 // SHR is special because it is required to produce a positive answer. |
| 2788 // The code below for writing into heap numbers isn't capable of |
| 2789 // writing the register as an unsigned int so we go to slow case if we |
| 2790 // hit this case. |
| 2791 if (CpuFeatures::IsSupported(VFP3)) { |
| 2792 __ b(mi, &result_not_a_smi); |
| 2793 } else { |
| 2794 __ b(mi, not_numbers); |
| 2795 } |
| 2796 break; |
| 2797 case Token::SHL: |
| 2798 // Use only the 5 least significant bits of the shift count. |
| 2799 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2800 __ mov(r2, Operand(r3, LSL, r2)); |
| 2801 break; |
2739 default: | 2802 default: |
2740 UNREACHABLE(); | 2803 UNREACHABLE(); |
2741 } | 2804 } |
2742 | 2805 |
2743 Label result_not_a_smi; | |
2744 // Check that the *signed* result fits in a smi. | 2806 // Check that the *signed* result fits in a smi. |
2745 __ add(r3, r2, Operand(0x40000000), SetCC); | 2807 __ add(r3, r2, Operand(0x40000000), SetCC); |
2746 __ b(mi, &result_not_a_smi); | 2808 __ b(mi, &result_not_a_smi); |
2747 __ SmiTag(r0, r2); | 2809 __ SmiTag(r0, r2); |
2748 __ Ret(); | 2810 __ Ret(); |
2749 | 2811 |
2750 // Allocate new heap number for result. | 2812 // Allocate new heap number for result. |
2751 __ bind(&result_not_a_smi); | 2813 __ bind(&result_not_a_smi); |
2752 __ AllocateHeapNumber( | 2814 __ AllocateHeapNumber( |
2753 r5, scratch1, scratch2, heap_number_map, gc_required); | 2815 r5, scratch1, scratch2, heap_number_map, gc_required); |
2754 | 2816 |
2755 // r2: Answer as signed int32. | 2817 // r2: Answer as signed int32. |
2756 // r5: Heap number to write answer into. | 2818 // r5: Heap number to write answer into. |
2757 | 2819 |
2758 // Nothing can go wrong now, so move the heap number to r0, which is the | 2820 // Nothing can go wrong now, so move the heap number to r0, which is the |
2759 // result. | 2821 // result. |
2760 __ mov(r0, Operand(r5)); | 2822 __ mov(r0, Operand(r5)); |
2761 | 2823 |
2762 if (CpuFeatures::IsSupported(VFP3)) { | 2824 if (CpuFeatures::IsSupported(VFP3)) { |
2763 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. | 2825 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 2826 // mentioned above SHR needs to always produce a positive result. |
2764 CpuFeatures::Scope scope(VFP3); | 2827 CpuFeatures::Scope scope(VFP3); |
2765 __ vmov(s0, r2); | 2828 __ vmov(s0, r2); |
2766 __ vcvt_f64_s32(d0, s0); | 2829 if (op_ == Token::SHR) { |
| 2830 __ vcvt_f64_u32(d0, s0); |
| 2831 } else { |
| 2832 __ vcvt_f64_s32(d0, s0); |
| 2833 } |
2767 __ sub(r3, r0, Operand(kHeapObjectTag)); | 2834 __ sub(r3, r0, Operand(kHeapObjectTag)); |
2768 __ vstr(d0, r3, HeapNumber::kValueOffset); | 2835 __ vstr(d0, r3, HeapNumber::kValueOffset); |
2769 __ Ret(); | 2836 __ Ret(); |
2770 } else { | 2837 } else { |
2771 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2838 // Tail call that writes the int32 in r2 to the heap number in r0, using |
2772 // r3 as scratch. r0 is preserved and returned. | 2839 // r3 as scratch. r0 is preserved and returned. |
2773 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 2840 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
2774 __ TailCallStub(&stub); | 2841 __ TailCallStub(&stub); |
2775 } | 2842 } |
2776 break; | 2843 break; |
2777 } | 2844 } |
2778 default: | 2845 default: |
2779 UNREACHABLE(); | 2846 UNREACHABLE(); |
2780 } | 2847 } |
2781 } | 2848 } |
2782 | 2849 |
2783 | 2850 |
2784 // Generate the smi code. If the operation on smis are successful this return is | 2851 // Generate the smi code. If the operation on smis are successful this return is |
2785 // generated. If the result is not a smi and heap number allocation is not | 2852 // generated. If the result is not a smi and heap number allocation is not |
2786 // requested the code falls through. If number allocation is requested but a | 2853 // requested the code falls through. If number allocation is requested but a |
2787 // heap number cannot be allocated the code jumps to the lable gc_required. | 2854 // heap number cannot be allocated the code jumps to the lable gc_required. |
2788 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2855 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
2789 Label* gc_required, | 2856 Label* gc_required, |
2790 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2857 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
2791 Label not_smis; | 2858 Label not_smis; |
2792 | 2859 |
2793 ASSERT(op_ == Token::ADD || | |
2794 op_ == Token::SUB || | |
2795 op_ == Token::MUL || | |
2796 op_ == Token::DIV || | |
2797 op_ == Token::MOD || | |
2798 op_ == Token::BIT_OR || | |
2799 op_ == Token::BIT_AND || | |
2800 op_ == Token::BIT_XOR); | |
2801 | |
2802 Register left = r1; | 2860 Register left = r1; |
2803 Register right = r0; | 2861 Register right = r0; |
2804 Register scratch1 = r7; | 2862 Register scratch1 = r7; |
2805 Register scratch2 = r9; | 2863 Register scratch2 = r9; |
2806 | 2864 |
2807 // Perform combined smi check on both operands. | 2865 // Perform combined smi check on both operands. |
2808 __ orr(scratch1, left, Operand(right)); | 2866 __ orr(scratch1, left, Operand(right)); |
2809 STATIC_ASSERT(kSmiTag == 0); | 2867 STATIC_ASSERT(kSmiTag == 0); |
2810 __ tst(scratch1, Operand(kSmiTagMask)); | 2868 __ tst(scratch1, Operand(kSmiTagMask)); |
2811 __ b(ne, ¬_smis); | 2869 __ b(ne, ¬_smis); |
2812 | 2870 |
2813 // If the smi-smi operation results in a smi return is generated. | 2871 // If the smi-smi operation results in a smi return is generated. |
2814 GenerateSmiSmiOperation(masm); | 2872 GenerateSmiSmiOperation(masm); |
2815 | 2873 |
2816 // If heap number results are possible generate the result in an allocated | 2874 // If heap number results are possible generate the result in an allocated |
2817 // heap number. | 2875 // heap number. |
2818 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2876 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
2819 GenerateFPOperation(masm, true, NULL, gc_required); | 2877 GenerateFPOperation(masm, true, NULL, gc_required); |
2820 } | 2878 } |
2821 __ bind(¬_smis); | 2879 __ bind(¬_smis); |
2822 } | 2880 } |
2823 | 2881 |
2824 | 2882 |
2825 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2883 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
2826 Label not_smis, call_runtime; | 2884 Label not_smis, call_runtime; |
2827 | 2885 |
2828 ASSERT(op_ == Token::ADD || | |
2829 op_ == Token::SUB || | |
2830 op_ == Token::MUL || | |
2831 op_ == Token::DIV || | |
2832 op_ == Token::MOD || | |
2833 op_ == Token::BIT_OR || | |
2834 op_ == Token::BIT_AND || | |
2835 op_ == Token::BIT_XOR); | |
2836 | |
2837 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | 2886 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
2838 result_type_ == TRBinaryOpIC::SMI) { | 2887 result_type_ == TRBinaryOpIC::SMI) { |
2839 // Only allow smi results. | 2888 // Only allow smi results. |
2840 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); | 2889 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); |
2841 } else { | 2890 } else { |
2842 // Allow heap number result and don't make a transition if a heap number | 2891 // Allow heap number result and don't make a transition if a heap number |
2843 // cannot be allocated. | 2892 // cannot be allocated. |
2844 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2893 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
2845 } | 2894 } |
2846 | 2895 |
(...skipping 10 matching lines...) Expand all Loading... |
2857 ASSERT(operands_type_ == TRBinaryOpIC::STRING); | 2906 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
2858 ASSERT(op_ == Token::ADD); | 2907 ASSERT(op_ == Token::ADD); |
2859 // Try to add arguments as strings, otherwise, transition to the generic | 2908 // Try to add arguments as strings, otherwise, transition to the generic |
2860 // TRBinaryOpIC type. | 2909 // TRBinaryOpIC type. |
2861 GenerateAddStrings(masm); | 2910 GenerateAddStrings(masm); |
2862 GenerateTypeTransition(masm); | 2911 GenerateTypeTransition(masm); |
2863 } | 2912 } |
2864 | 2913 |
2865 | 2914 |
2866 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2915 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
2867 ASSERT(op_ == Token::ADD || | |
2868 op_ == Token::SUB || | |
2869 op_ == Token::MUL || | |
2870 op_ == Token::DIV || | |
2871 op_ == Token::MOD || | |
2872 op_ == Token::BIT_OR || | |
2873 op_ == Token::BIT_AND || | |
2874 op_ == Token::BIT_XOR); | |
2875 | |
2876 ASSERT(operands_type_ == TRBinaryOpIC::INT32); | 2916 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
2877 | 2917 |
2878 GenerateTypeTransition(masm); | 2918 GenerateTypeTransition(masm); |
2879 } | 2919 } |
2880 | 2920 |
2881 | 2921 |
2882 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 2922 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
2883 ASSERT(op_ == Token::ADD || | |
2884 op_ == Token::SUB || | |
2885 op_ == Token::MUL || | |
2886 op_ == Token::DIV || | |
2887 op_ == Token::MOD || | |
2888 op_ == Token::BIT_OR || | |
2889 op_ == Token::BIT_AND || | |
2890 op_ == Token::BIT_XOR); | |
2891 | |
2892 Label not_numbers, call_runtime; | 2923 Label not_numbers, call_runtime; |
2893 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 2924 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
2894 | 2925 |
2895 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); | 2926 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); |
2896 | 2927 |
2897 __ bind(¬_numbers); | 2928 __ bind(¬_numbers); |
2898 GenerateTypeTransition(masm); | 2929 GenerateTypeTransition(masm); |
2899 | 2930 |
2900 __ bind(&call_runtime); | 2931 __ bind(&call_runtime); |
2901 GenerateCallRuntime(masm); | 2932 GenerateCallRuntime(masm); |
2902 } | 2933 } |
2903 | 2934 |
2904 | 2935 |
2905 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2936 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2906 ASSERT(op_ == Token::ADD || | |
2907 op_ == Token::SUB || | |
2908 op_ == Token::MUL || | |
2909 op_ == Token::DIV || | |
2910 op_ == Token::MOD || | |
2911 op_ == Token::BIT_OR || | |
2912 op_ == Token::BIT_AND || | |
2913 op_ == Token::BIT_XOR); | |
2914 | |
2915 Label call_runtime; | 2937 Label call_runtime; |
2916 | 2938 |
2917 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2939 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
2918 | 2940 |
2919 // If all else fails, use the runtime system to get the correct | 2941 // If all else fails, use the runtime system to get the correct |
2920 // result. | 2942 // result. |
2921 __ bind(&call_runtime); | 2943 __ bind(&call_runtime); |
2922 | 2944 |
2923 // Try to add strings before calling runtime. | 2945 // Try to add strings before calling runtime. |
2924 if (op_ == Token::ADD) { | 2946 if (op_ == Token::ADD) { |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2977 break; | 2999 break; |
2978 case Token::BIT_OR: | 3000 case Token::BIT_OR: |
2979 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 3001 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); |
2980 break; | 3002 break; |
2981 case Token::BIT_AND: | 3003 case Token::BIT_AND: |
2982 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 3004 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); |
2983 break; | 3005 break; |
2984 case Token::BIT_XOR: | 3006 case Token::BIT_XOR: |
2985 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 3007 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); |
2986 break; | 3008 break; |
| 3009 case Token::SAR: |
| 3010 __ InvokeBuiltin(Builtins::SAR, JUMP_JS); |
| 3011 break; |
| 3012 case Token::SHR: |
| 3013 __ InvokeBuiltin(Builtins::SHR, JUMP_JS); |
| 3014 break; |
| 3015 case Token::SHL: |
| 3016 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); |
| 3017 break; |
2987 default: | 3018 default: |
2988 UNREACHABLE(); | 3019 UNREACHABLE(); |
2989 } | 3020 } |
2990 } | 3021 } |
2991 | 3022 |
2992 | 3023 |
2993 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 3024 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
2994 MacroAssembler* masm, | 3025 MacroAssembler* masm, |
2995 Register result, | 3026 Register result, |
2996 Register heap_number_map, | 3027 Register heap_number_map, |
(...skipping 2998 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5995 __ SmiTag(r0, scratch1); | 6026 __ SmiTag(r0, scratch1); |
5996 __ Ret(); | 6027 __ Ret(); |
5997 } | 6028 } |
5998 | 6029 |
5999 | 6030 |
6000 #undef __ | 6031 #undef __ |
6001 | 6032 |
6002 } } // namespace v8::internal | 6033 } } // namespace v8::internal |
6003 | 6034 |
6004 #endif // V8_TARGET_ARCH_ARM | 6035 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |