| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
| 6 | 6 |
| 7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
| 8 #include "src/codegen.h" | 8 #include "src/codegen.h" |
| 9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
| 10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
| (...skipping 605 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 616 __ bind(&bytecode_array_loaded); | 616 __ bind(&bytecode_array_loaded); |
| 617 | 617 |
| 618 // Check whether we should continue to use the interpreter. | 618 // Check whether we should continue to use the interpreter. |
| 619 Label switch_to_different_code_kind; | 619 Label switch_to_different_code_kind; |
| 620 __ Move(rcx, masm->CodeObject()); // Self-reference to this code. | 620 __ Move(rcx, masm->CodeObject()); // Self-reference to this code. |
| 621 __ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset)); | 621 __ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset)); |
| 622 __ j(not_equal, &switch_to_different_code_kind); | 622 __ j(not_equal, &switch_to_different_code_kind); |
| 623 | 623 |
| 624 // Increment invocation count for the function. | 624 // Increment invocation count for the function. |
| 625 __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset)); | 625 __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset)); |
| 626 __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset)); |
| 626 __ SmiAddConstant( | 627 __ SmiAddConstant( |
| 627 FieldOperand(rcx, | 628 FieldOperand(rcx, |
| 628 TypeFeedbackVector::kInvocationCountIndex * kPointerSize + | 629 TypeFeedbackVector::kInvocationCountIndex * kPointerSize + |
| 629 TypeFeedbackVector::kHeaderSize), | 630 TypeFeedbackVector::kHeaderSize), |
| 630 Smi::FromInt(1)); | 631 Smi::FromInt(1)); |
| 631 | 632 |
| 632 // Check function data field is actually a BytecodeArray object. | 633 // Check function data field is actually a BytecodeArray object. |
| 633 if (FLAG_debug_code) { | 634 if (FLAG_debug_code) { |
| 634 __ AssertNotSmi(kInterpreterBytecodeArrayRegister); | 635 __ AssertNotSmi(kInterpreterBytecodeArrayRegister); |
| 635 __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, | 636 __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 988 // -- rdi : target function (preserved for callee) | 989 // -- rdi : target function (preserved for callee) |
| 989 // ----------------------------------- | 990 // ----------------------------------- |
| 990 // First lookup code, maybe we don't need to compile! | 991 // First lookup code, maybe we don't need to compile! |
| 991 Label gotta_call_runtime; | 992 Label gotta_call_runtime; |
| 992 Label try_shared; | 993 Label try_shared; |
| 993 Label loop_top, loop_bottom; | 994 Label loop_top, loop_bottom; |
| 994 | 995 |
| 995 Register closure = rdi; | 996 Register closure = rdi; |
| 996 Register map = r8; | 997 Register map = r8; |
| 997 Register index = r9; | 998 Register index = r9; |
| 999 |
| 1000 // Do we have a valid feedback vector? |
| 1001 __ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset)); |
| 1002 __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset)); |
| 1003 __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); |
| 1004 |
| 998 __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 1005 __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 999 __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); | 1006 __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1000 __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset)); | 1007 __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset)); |
| 1001 __ cmpl(index, Immediate(2)); | 1008 __ cmpl(index, Immediate(2)); |
| 1002 __ j(less, &gotta_call_runtime); | 1009 __ j(less, &try_shared); |
| 1003 | 1010 |
| 1004 // Find literals. | |
| 1005 // r14 : native context | 1011 // r14 : native context |
| 1006 // r9 : length / index | 1012 // r9 : length / index |
| 1007 // r8 : optimized code map | 1013 // r8 : optimized code map |
| 1008 // rdx : new target | 1014 // rdx : new target |
| 1009 // rdi : closure | 1015 // rdi : closure |
| 1010 Register native_context = r14; | 1016 Register native_context = r14; |
| 1011 __ movp(native_context, NativeContextOperand()); | 1017 __ movp(native_context, NativeContextOperand()); |
| 1012 | 1018 |
| 1013 __ bind(&loop_top); | 1019 __ bind(&loop_top); |
| 1014 // Native context match? | 1020 // Native context match? |
| 1015 Register temp = r11; | 1021 Register temp = r11; |
| 1016 __ movp(temp, FieldOperand(map, index, times_pointer_size, | 1022 __ movp(temp, FieldOperand(map, index, times_pointer_size, |
| 1017 SharedFunctionInfo::kOffsetToPreviousContext)); | 1023 SharedFunctionInfo::kOffsetToPreviousContext)); |
| 1018 __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset)); | 1024 __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset)); |
| 1019 __ cmpp(temp, native_context); | 1025 __ cmpp(temp, native_context); |
| 1020 __ j(not_equal, &loop_bottom); | 1026 __ j(not_equal, &loop_bottom); |
| 1021 // Feedback vector available? | |
| 1022 __ movp(temp, FieldOperand(map, index, times_pointer_size, | |
| 1023 SharedFunctionInfo::kOffsetToPreviousLiterals)); | |
| 1024 __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset)); | |
| 1025 __ JumpIfSmi(temp, &gotta_call_runtime); | |
| 1026 | |
| 1027 // Save the feedback vector in the closure. | |
| 1028 __ movp(FieldOperand(closure, JSFunction::kFeedbackVectorOffset), temp); | |
| 1029 __ movp(r15, index); | |
| 1030 __ RecordWriteField(closure, JSFunction::kFeedbackVectorOffset, temp, r15, | |
| 1031 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 1032 | 1027 |
| 1033 // Code available? | 1028 // Code available? |
| 1034 Register entry = rcx; | 1029 Register entry = rcx; |
| 1035 __ movp(entry, FieldOperand(map, index, times_pointer_size, | 1030 __ movp(entry, FieldOperand(map, index, times_pointer_size, |
| 1036 SharedFunctionInfo::kOffsetToPreviousCachedCode)); | 1031 SharedFunctionInfo::kOffsetToPreviousCachedCode)); |
| 1037 __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset)); | 1032 __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset)); |
| 1038 __ JumpIfSmi(entry, &try_shared); | 1033 __ JumpIfSmi(entry, &try_shared); |
| 1039 | 1034 |
| 1040 // Found literals and code. Get them into the closure and return. | 1035 // Found code. Get it into the closure and return. |
| 1041 __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); | 1036 __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); |
| 1042 __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); | 1037 __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); |
| 1043 __ RecordWriteCodeEntryField(closure, entry, r15); | 1038 __ RecordWriteCodeEntryField(closure, entry, r15); |
| 1044 | 1039 |
| 1045 // Link the closure into the optimized function list. | 1040 // Link the closure into the optimized function list. |
| 1046 // rcx : code entry (entry) | 1041 // rcx : code entry (entry) |
| 1047 // r14 : native context | 1042 // r14 : native context |
| 1048 // rdx : new target | 1043 // rdx : new target |
| 1049 // rdi : closure | 1044 // rdi : closure |
| 1050 __ movp(rbx, | 1045 __ movp(rbx, |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1061 __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15, | 1056 __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15, |
| 1062 kDontSaveFPRegs); | 1057 kDontSaveFPRegs); |
| 1063 __ movp(closure, rbx); | 1058 __ movp(closure, rbx); |
| 1064 __ jmp(entry); | 1059 __ jmp(entry); |
| 1065 | 1060 |
| 1066 __ bind(&loop_bottom); | 1061 __ bind(&loop_bottom); |
| 1067 __ subl(index, Immediate(SharedFunctionInfo::kEntryLength)); | 1062 __ subl(index, Immediate(SharedFunctionInfo::kEntryLength)); |
| 1068 __ cmpl(index, Immediate(1)); | 1063 __ cmpl(index, Immediate(1)); |
| 1069 __ j(greater, &loop_top); | 1064 __ j(greater, &loop_top); |
| 1070 | 1065 |
| 1071 // We found neither literals nor code. | 1066 // We found no code. |
| 1072 __ jmp(&gotta_call_runtime); | |
| 1073 | |
| 1074 __ bind(&try_shared); | 1067 __ bind(&try_shared); |
| 1075 __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 1068 __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1076 // Is the shared function marked for tier up? | 1069 // Is the shared function marked for tier up? |
| 1077 __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset), | 1070 __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset), |
| 1078 Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); | 1071 Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); |
| 1079 __ j(not_zero, &gotta_call_runtime); | 1072 __ j(not_zero, &gotta_call_runtime); |
| 1080 | 1073 |
| 1081 // If SFI points to anything other than CompileLazy, install that. | 1074 // If SFI points to anything other than CompileLazy, install that. |
| 1082 __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); | 1075 __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 1083 __ Move(rbx, masm->CodeObject()); | 1076 __ Move(rbx, masm->CodeObject()); |
| (...skipping 2180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3264 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { | 3257 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { |
| 3265 Generate_OnStackReplacementHelper(masm, true); | 3258 Generate_OnStackReplacementHelper(masm, true); |
| 3266 } | 3259 } |
| 3267 | 3260 |
| 3268 #undef __ | 3261 #undef __ |
| 3269 | 3262 |
| 3270 } // namespace internal | 3263 } // namespace internal |
| 3271 } // namespace v8 | 3264 } // namespace v8 |
| 3272 | 3265 |
| 3273 #endif // V8_TARGET_ARCH_X64 | 3266 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |