OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 2912 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2923 CallFunctionNoFeedback(masm, | 2923 CallFunctionNoFeedback(masm, |
2924 arg_count(), | 2924 arg_count(), |
2925 true, | 2925 true, |
2926 CallAsMethod()); | 2926 CallAsMethod()); |
2927 | 2927 |
2928 // Unreachable. | 2928 // Unreachable. |
2929 __ stop("Unexpected code address"); | 2929 __ stop("Unexpected code address"); |
2930 } | 2930 } |
2931 | 2931 |
2932 | 2932 |
2933 void CallIC_RoundStub::Generate(MacroAssembler* masm) { | |
2934 Register function = a1; | |
2935 Register vector = a2; | |
2936 Register slot = a3; | |
2937 | |
2938 Register temp1 = a0; | |
2939 Register temp2 = a4; | |
2940 DoubleRegister double_temp1 = f12; | |
2941 DoubleRegister double_temp2 = f14; | |
2942 Label tail, miss; | |
2943 | |
2944 // Ensure nobody has snuck in another function. | |
2945 __ BranchIfNotBuiltin(function, temp1, kMathRound, &miss); | |
2946 | |
2947 if (arg_count() > 0) { | |
2948 __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); | |
2949 Handle<Map> map = isolate()->factory()->heap_number_map(); | |
2950 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); | |
2951 __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); | |
2952 | |
2953 // If the number is >0, it doesn't round to -0 | |
2954 __ Move(double_temp2, 0.0); | |
2955 __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); | |
2956 | |
2957 // If the number is <-.5, it doesn't round to -0 | |
2958 __ Move(double_temp2, -.5); | |
2959 __ BranchF64(&tail, nullptr, lt, double_temp1, double_temp2); | |
2960 | |
2961 // +0 doesn't round to -0 | |
2962 __ FmoveHigh(temp1, double_temp1); | |
2963 __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); | |
2964 | |
2965 __ SmiScale(temp1, slot, kPointerSizeLog2); | |
2966 __ Daddu(temp1, temp1, vector); | |
2967 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); | |
2968 __ sd(temp2, | |
2969 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); | |
2970 } | |
2971 | |
2972 __ bind(&tail); | |
2973 // The slow case, we need this no matter what to complete a call after a miss. | |
2974 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); | |
2975 | |
2976 // Unreachable. | |
2977 __ stop("Unreachable"); | |
2978 | |
2979 __ bind(&miss); | |
2980 GenerateMiss(masm); | |
2981 __ Branch(&tail); | |
2982 } | |
2983 | |
2984 | |
2985 void CallIC_FloorStub::Generate(MacroAssembler* masm) { | |
2986 Register function = a1; | |
2987 Register vector = a2; | |
2988 Register slot = a3; | |
2989 | |
2990 Register temp1 = a0; | |
2991 Register temp2 = a4; | |
2992 DoubleRegister double_temp = f12; | |
2993 Label tail, miss; | |
2994 | |
2995 // Ensure nobody has snuck in another function. | |
2996 __ BranchIfNotBuiltin(function, temp1, kMathFloor, &miss); | |
2997 | |
2998 if (arg_count() > 0) { | |
2999 __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); | |
3000 Handle<Map> map = isolate()->factory()->heap_number_map(); | |
3001 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); | |
3002 __ ldc1(double_temp, FieldMemOperand(temp1, HeapNumber::kValueOffset)); | |
3003 | |
3004 // Only -0 floors to -0. | |
3005 __ FmoveHigh(temp1, double_temp); | |
3006 __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); | |
3007 __ FmoveLow(temp1, double_temp); | |
3008 __ Branch(&tail, ne, temp1, Operand(zero_reg)); | |
3009 | |
3010 __ SmiScale(temp1, slot, kPointerSizeLog2); | |
3011 __ Daddu(temp1, temp1, vector); | |
3012 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); | |
3013 __ sd(temp2, | |
3014 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); | |
3015 } | |
3016 | |
3017 __ bind(&tail); | |
3018 // The slow case, we need this no matter what to complete a call after a miss. | |
3019 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); | |
3020 | |
3021 // Unreachable. | |
3022 __ stop("Unreachable"); | |
3023 | |
3024 __ bind(&miss); | |
3025 GenerateMiss(masm); | |
3026 __ Branch(&tail); | |
3027 } | |
3028 | |
3029 | |
3030 void CallIC_CeilStub::Generate(MacroAssembler* masm) { | |
3031 Register function = a1; | |
3032 Register vector = a2; | |
3033 Register slot = a3; | |
3034 | |
3035 Register temp1 = a0; | |
3036 Register temp2 = a4; | |
3037 DoubleRegister double_temp1 = f12; | |
3038 DoubleRegister double_temp2 = f14; | |
3039 Label tail, miss; | |
3040 | |
3041 // Ensure nobody has snuck in another function. | |
3042 __ BranchIfNotBuiltin(function, temp1, kMathCeil, &miss); | |
3043 | |
3044 if (arg_count() > 0) { | |
3045 __ ld(temp1, MemOperand(sp, (arg_count() - 1) * kPointerSize)); | |
3046 Handle<Map> map = isolate()->factory()->heap_number_map(); | |
3047 __ CheckMap(temp1, temp2, map, &tail, DO_SMI_CHECK); | |
3048 __ ldc1(double_temp1, FieldMemOperand(temp1, HeapNumber::kValueOffset)); | |
3049 | |
3050 // If the number is >0, it doesn't round to -0 | |
3051 __ Move(double_temp2, 0.0); | |
3052 __ BranchF64(&tail, nullptr, gt, double_temp1, double_temp2); | |
3053 | |
3054 // If the number is <=-1, it doesn't round to -0 | |
3055 __ Move(double_temp2, -1.0); | |
3056 __ BranchF64(&tail, nullptr, le, double_temp1, double_temp2); | |
3057 | |
3058 // +0 doesn't round to -0. | |
3059 __ FmoveHigh(temp1, double_temp1); | |
3060 __ Branch(&tail, ne, temp1, Operand(0xffffffff80000000)); | |
3061 | |
3062 __ SmiScale(temp1, slot, kPointerSizeLog2); | |
3063 __ Daddu(temp1, temp1, vector); | |
3064 __ li(temp2, Operand(Smi::FromInt(kHasReturnedMinusZeroSentinel))); | |
3065 __ sd(temp2, | |
3066 FieldMemOperand(temp1, FixedArray::kHeaderSize + kPointerSize)); | |
3067 } | |
3068 | |
3069 __ bind(&tail); | |
3070 // The slow case, we need this no matter what to complete a call after a miss. | |
3071 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod()); | |
3072 | |
3073 // Unreachable. | |
3074 __ stop("Unreachable"); | |
3075 | |
3076 __ bind(&miss); | |
3077 GenerateMiss(masm); | |
3078 __ Branch(&tail); | |
3079 } | |
3080 | |
3081 | |
3082 void CallICStub::Generate(MacroAssembler* masm) { | 2933 void CallICStub::Generate(MacroAssembler* masm) { |
3083 // a1 - function | 2934 // a1 - function |
3084 // a3 - slot id (Smi) | 2935 // a3 - slot id (Smi) |
3085 // a2 - vector | 2936 // a2 - vector |
3086 const int with_types_offset = | 2937 const int with_types_offset = |
3087 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); | 2938 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); |
3088 const int generic_offset = | 2939 const int generic_offset = |
3089 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); | 2940 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); |
3090 Label extra_checks_or_miss, slow_start; | 2941 Label extra_checks_or_miss, slow_start; |
3091 Label slow, non_function, wrap, cont; | 2942 Label slow, non_function, wrap, cont; |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3182 | 3033 |
3183 // Goto miss case if we do not have a function. | 3034 // Goto miss case if we do not have a function. |
3184 __ GetObjectType(a1, a4, a4); | 3035 __ GetObjectType(a1, a4, a4); |
3185 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE)); | 3036 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE)); |
3186 | 3037 |
3187 // Make sure the function is not the Array() function, which requires special | 3038 // Make sure the function is not the Array() function, which requires special |
3188 // behavior on MISS. | 3039 // behavior on MISS. |
3189 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); | 3040 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); |
3190 __ Branch(&miss, eq, a1, Operand(a4)); | 3041 __ Branch(&miss, eq, a1, Operand(a4)); |
3191 | 3042 |
3192 // Some builtin functions require special handling, miss to the runtime. | |
3193 __ ld(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | |
3194 __ ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); | |
3195 __ Branch(&miss, ne, t0, Operand(Smi::FromInt(0))); | |
3196 | |
3197 // Update stats. | 3043 // Update stats. |
3198 __ ld(a4, FieldMemOperand(a2, with_types_offset)); | 3044 __ ld(a4, FieldMemOperand(a2, with_types_offset)); |
3199 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); | 3045 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); |
3200 __ sd(a4, FieldMemOperand(a2, with_types_offset)); | 3046 __ sd(a4, FieldMemOperand(a2, with_types_offset)); |
3201 | 3047 |
3202 // Store the function. Use a stub since we need a frame for allocation. | 3048 // Store the function. Use a stub since we need a frame for allocation. |
3203 // a2 - vector | 3049 // a2 - vector |
3204 // a3 - slot | 3050 // a3 - slot |
3205 // a1 - function | 3051 // a1 - function |
3206 { | 3052 { |
(...skipping 1573 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4780 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 4626 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
4781 } | 4627 } |
4782 | 4628 |
4783 | 4629 |
4784 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { | 4630 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { |
4785 EmitLoadTypeFeedbackVector(masm, a2); | 4631 EmitLoadTypeFeedbackVector(masm, a2); |
4786 CallIC_ArrayStub stub(isolate(), state()); | 4632 CallIC_ArrayStub stub(isolate(), state()); |
4787 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 4633 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
4788 } | 4634 } |
4789 | 4635 |
4790 | |
4791 void CallIC_RoundTrampolineStub::Generate(MacroAssembler* masm) { | |
4792 EmitLoadTypeFeedbackVector(masm, a2); | |
4793 CallIC_RoundStub stub(isolate(), state()); | |
4794 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | |
4795 } | |
4796 | |
4797 | |
4798 void CallIC_FloorTrampolineStub::Generate(MacroAssembler* masm) { | |
4799 EmitLoadTypeFeedbackVector(masm, a2); | |
4800 CallIC_FloorStub stub(isolate(), state()); | |
4801 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | |
4802 } | |
4803 | |
4804 | |
4805 void CallIC_CeilTrampolineStub::Generate(MacroAssembler* masm) { | |
4806 EmitLoadTypeFeedbackVector(masm, a2); | |
4807 CallIC_CeilStub stub(isolate(), state()); | |
4808 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | |
4809 } | |
4810 | |
4811 | 4636 |
4812 void VectorRawLoadStub::Generate(MacroAssembler* masm) { | 4637 void VectorRawLoadStub::Generate(MacroAssembler* masm) { |
4813 GenerateImpl(masm, false); | 4638 GenerateImpl(masm, false); |
4814 } | 4639 } |
4815 | 4640 |
4816 | 4641 |
4817 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { | 4642 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { |
4818 GenerateImpl(masm, true); | 4643 GenerateImpl(masm, true); |
4819 } | 4644 } |
4820 | 4645 |
(...skipping 891 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5712 kStackUnwindSpace, kInvalidStackOffset, | 5537 kStackUnwindSpace, kInvalidStackOffset, |
5713 MemOperand(fp, 6 * kPointerSize), NULL); | 5538 MemOperand(fp, 6 * kPointerSize), NULL); |
5714 } | 5539 } |
5715 | 5540 |
5716 | 5541 |
5717 #undef __ | 5542 #undef __ |
5718 | 5543 |
5719 } } // namespace v8::internal | 5544 } } // namespace v8::internal |
5720 | 5545 |
5721 #endif // V8_TARGET_ARCH_MIPS64 | 5546 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |