| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 2834 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2845 CallFunctionNoFeedback(masm, | 2845 CallFunctionNoFeedback(masm, |
| 2846 arg_count(), | 2846 arg_count(), |
| 2847 true, | 2847 true, |
| 2848 CallAsMethod()); | 2848 CallAsMethod()); |
| 2849 | 2849 |
| 2850 // Unreachable. | 2850 // Unreachable. |
| 2851 __ stop("Unexpected code address"); | 2851 __ stop("Unexpected code address"); |
| 2852 } | 2852 } |
| 2853 | 2853 |
| 2854 | 2854 |
| 2855 void CallIC_RoundStub::Generate(MacroAssembler* masm) { |
| 2856 Register function = a1; |
| 2857 Register vector = a2; |
| 2858 Register slot = a3; |
| 2859 |
| 2860 Register temp1 = a0; |
| 2861 Register temp2 = t0; |
| 2862 DoubleRegister double_temp1 = f12; |
| 2863 DoubleRegister double_temp2 = f14; |
| 2864 Label tail, miss; |
| 2865 |
| 2866 // Ensure nobody has snuck in another function. |
| 2867 __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); |
| 2868 |
| 2869 if (arg_count() > 0) { |
| 2870 __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); |
| 2871 Handle<Map> map = isolate()->factory()->heap_number_map(); |
| 2872 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); |
| 2873 __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); |
| 2874 |
| 2875 // If the number is >0, it doesn't round to -0 |
| 2876 __ Move(double_temp2, 0.0); |
| 2877 __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); |
| 2878 |
| 2879 // If the number is <-.5, it doesn't round to -0 |
| 2880 __ Move(double_temp2, -.5); |
| 2881 __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2); |
| 2882 |
| 2883 // +0 doesn't round to -0 |
| 2884 __ FmoveHigh(temp1, double_temp1); |
| 2885 __ Branch(&tail, ne, temp1, Operand(0x80000000)); |
| 2886 |
| 2887 __ sll(temp1, slot, 1); |
| 2888 __ Addu(temp1, temp1, vector); |
| 2889 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); |
| 2890 __ sw(temp2, |
| 2891 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); |
| 2892 } |
| 2893 |
| 2894 __ bind(&tail); |
| 2895 // The slow case, we need this no matter what to complete a call after a miss. |
| 2896 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); |
| 2897 |
| 2898 // Unreachable. |
| 2899 __ stop("Unreachable"); |
| 2900 |
| 2901 __ bind(&miss); |
| 2902 GenerateMiss(masm); |
| 2903 __ Branch(&tail); |
| 2904 } |
| 2905 |
| 2906 |
| 2907 void CallIC_FloorStub::Generate(MacroAssembler* masm) { |
| 2908 Register function = a1; |
| 2909 Register vector = a2; |
| 2910 Register slot = a3; |
| 2911 |
| 2912 Register temp1 = a0; |
| 2913 Register temp2 = t0; |
| 2914 DoubleRegister double_temp = f12; |
| 2915 Label tail, miss; |
| 2916 |
| 2917 // Ensure nobody has snuck in another function. |
| 2918 __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); |
| 2919 |
| 2920 if (arg_count() > 0) { |
| 2921 __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); |
| 2922 Handle<Map> map = isolate()->factory()->heap_number_map(); |
| 2923 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); |
| 2924 __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset)); |
| 2925 |
| 2926 // Only -0 floors to -0. |
| 2927 __ FmoveHigh(temp1, double_temp); |
| 2928 __ Branch(&tail, ne, temp1, Operand(0x80000000)); |
| 2929 __ FmoveLow(temp1, double_temp); |
| 2930 __ Branch(&tail, ne, temp1, Operand(zero_reg)); |
| 2931 |
| 2932 __ sll(temp1, slot, 1); |
| 2933 __ Addu(temp1, temp1, vector); |
| 2934 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); |
| 2935 __ sw(temp2, |
| 2936 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); |
| 2937 } |
| 2938 |
| 2939 __ bind(&tail); |
| 2940 // The slow case, we need this no matter what to complete a call after a miss. |
| 2941 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); |
| 2942 |
| 2943 // Unreachable. |
| 2944 __ stop("Unreachable"); |
| 2945 |
| 2946 __ bind(&miss); |
| 2947 GenerateMiss(masm); |
| 2948 __ Branch(&tail); |
| 2949 } |
| 2950 |
| 2951 |
| 2952 void CallIC_CeilStub::Generate(MacroAssembler* masm) { |
| 2953 Register function = a1; |
| 2954 Register vector = a2; |
| 2955 Register slot = a3; |
| 2956 |
| 2957 Register temp1 = a0; |
| 2958 Register temp2 = t0; |
| 2959 DoubleRegister double_temp1 = f12; |
| 2960 DoubleRegister double_temp2 = f14; |
| 2961 Label tail, miss; |
| 2962 |
| 2963 // Ensure nobody has snuck in another function. |
| 2964 __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); |
| 2965 |
| 2966 if (arg_count() > 0) { |
| 2967 __ lw(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); |
| 2968 Handle<Map> map = isolate()->factory()->heap_number_map(); |
| 2969 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); |
| 2970 __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); |
| 2971 |
| 2972 // If the number is >0, it doesn't round to -0 |
| 2973 __ Move(double_temp2, 0.0); |
| 2974 __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); |
| 2975 |
| 2976 // If the number is <=-1, it doesn't round to -0 |
| 2977 __ Move(double_temp2, -1.0); |
| 2978 __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2); |
| 2979 |
| 2980 // +0 doesn't round to -0. |
| 2981 __ FmoveHigh(temp1, double_temp1); |
| 2982 __ Branch(&tail, ne, temp1, Operand(0x80000000)); |
| 2983 |
| 2984 __ sll(temp1, slot, 1); |
| 2985 __ Addu(temp1, temp1, vector); |
| 2986 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); |
| 2987 __ sw(temp2, |
| 2988 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); |
| 2989 } |
| 2990 |
| 2991 __ bind(&tail); |
| 2992 // The slow case, we need this no matter what to complete a call after a miss. |
| 2993 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); |
| 2994 |
| 2995 // Unreachable. |
| 2996 __ stop("Unreachable"); |
| 2997 |
| 2998 __ bind(&miss); |
| 2999 GenerateMiss(masm); |
| 3000 __ Branch(&tail); |
| 3001 } |
| 3002 |
| 3003 |
| 2855 void CallICStub::Generate(MacroAssembler* masm) { | 3004 void CallICStub::Generate(MacroAssembler* masm) { |
| 2856 // a1 - function | 3005 // a1 - function |
| 2857 // a3 - slot id (Smi) | 3006 // a3 - slot id (Smi) |
| 2858 // a2 - vector | 3007 // a2 - vector |
| 2859 const int with_types_offset = | 3008 const int with_types_offset = |
| 2860 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); | 3009 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); |
| 2861 const int generic_offset = | 3010 const int generic_offset = |
| 2862 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); | 3011 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); |
| 2863 Label extra_checks_or_miss, slow_start; | 3012 Label extra_checks_or_miss, slow_start; |
| 2864 Label slow, non_function, wrap, cont; | 3013 Label slow, non_function, wrap, cont; |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2955 | 3104 |
| 2956 // Goto miss case if we do not have a function. | 3105 // Goto miss case if we do not have a function. |
| 2957 __ GetObjectType(a1, t0, t0); | 3106 __ GetObjectType(a1, t0, t0); |
| 2958 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); | 3107 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); |
| 2959 | 3108 |
| 2960 // Make sure the function is not the Array() function, which requires special | 3109 // Make sure the function is not the Array() function, which requires special |
| 2961 // behavior on MISS. | 3110 // behavior on MISS. |
| 2962 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); | 3111 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); |
| 2963 __ Branch(&miss, eq, a1, Operand(t0)); | 3112 __ Branch(&miss, eq, a1, Operand(t0)); |
| 2964 | 3113 |
| 3114 // Some builtin functions require special handling, miss to the runtime. |
| 3115 __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 3116 __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); |
| 3117 __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0))); |
| 3118 |
| 2965 // Update stats. | 3119 // Update stats. |
| 2966 __ lw(t0, FieldMemOperand(a2, with_types_offset)); | 3120 __ lw(t0, FieldMemOperand(a2, with_types_offset)); |
| 2967 __ Addu(t0, t0, Operand(Smi::FromInt(1))); | 3121 __ Addu(t0, t0, Operand(Smi::FromInt(1))); |
| 2968 __ sw(t0, FieldMemOperand(a2, with_types_offset)); | 3122 __ sw(t0, FieldMemOperand(a2, with_types_offset)); |
| 2969 | 3123 |
| 2970 // Store the function. Use a stub since we need a frame for allocation. | 3124 // Store the function. Use a stub since we need a frame for allocation. |
| 2971 // a2 - vector | 3125 // a2 - vector |
| 2972 // a3 - slot | 3126 // a3 - slot |
| 2973 // a1 - function | 3127 // a1 - function |
| 2974 { | 3128 { |
| (...skipping 1608 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4583 } | 4737 } |
| 4584 | 4738 |
| 4585 | 4739 |
| 4586 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { | 4740 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { |
| 4587 EmitLoadTypeFeedbackVector(masm, a2); | 4741 EmitLoadTypeFeedbackVector(masm, a2); |
| 4588 CallIC_ArrayStub stub(isolate(), state()); | 4742 CallIC_ArrayStub stub(isolate(), state()); |
| 4589 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 4743 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 4590 } | 4744 } |
| 4591 | 4745 |
| 4592 | 4746 |
| 4747 void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { |
| 4748 EmitLoadTypeFeedbackVector(masm, a2); |
| 4749 CallIC_RoundStub stub(isolate(), state()); |
| 4750 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 4751 } |
| 4752 |
| 4753 |
| 4754 void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { |
| 4755 EmitLoadTypeFeedbackVector(masm, a2); |
| 4756 CallIC_FloorStub stub(isolate(), state()); |
| 4757 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 4758 } |
| 4759 |
| 4760 |
| 4761 void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { |
| 4762 EmitLoadTypeFeedbackVector(masm, a2); |
| 4763 CallIC_CeilStub stub(isolate(), state()); |
| 4764 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 4765 } |
| 4766 |
| 4767 |
| 4593 void VectorRawLoadStub::Generate(MacroAssembler* masm) { | 4768 void VectorRawLoadStub::Generate(MacroAssembler* masm) { |
| 4594 GenerateImpl(masm, false); | 4769 GenerateImpl(masm, false); |
| 4595 } | 4770 } |
| 4596 | 4771 |
| 4597 | 4772 |
| 4598 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { | 4773 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { |
| 4599 GenerateImpl(masm, true); | 4774 GenerateImpl(masm, true); |
| 4600 } | 4775 } |
| 4601 | 4776 |
| 4602 | 4777 |
| (...skipping 891 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5494 kStackUnwindSpace, kInvalidStackOffset, | 5669 kStackUnwindSpace, kInvalidStackOffset, |
| 5495 MemOperand(fp, 6 * kPointerSize), NULL); | 5670 MemOperand(fp, 6 * kPointerSize), NULL); |
| 5496 } | 5671 } |
| 5497 | 5672 |
| 5498 | 5673 |
| 5499 #undef __ | 5674 #undef __ |
| 5500 | 5675 |
| 5501 } } // namespace v8::internal | 5676 } } // namespace v8::internal |
| 5502 | 5677 |
| 5503 #endif // V8_TARGET_ARCH_MIPS | 5678 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |