| OLD | NEW | 
|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 | 
| 6 | 6 | 
| 7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" | 
| 8 #include "src/codegen.h" | 8 #include "src/codegen.h" | 
| 9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" | 
| 10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" | 
| (...skipping 885 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 896   // This simulates the initial call to bytecode handlers in interpreter entry | 896   // This simulates the initial call to bytecode handlers in interpreter entry | 
| 897   // trampoline. The return will never actually be taken, but our stack walker | 897   // trampoline. The return will never actually be taken, but our stack walker | 
| 898   // uses this address to determine whether a frame is interpreted. | 898   // uses this address to determine whether a frame is interpreted. | 
| 899   __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 899   __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 
| 900 | 900 | 
| 901   Generate_EnterBytecodeDispatch(masm); | 901   Generate_EnterBytecodeDispatch(masm); | 
| 902 } | 902 } | 
| 903 | 903 | 
| 904 | 904 | 
| 905 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 905 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 
| 906   // ----------- S t a t e ------------- |  | 
| 907   //  -- rdx : new target (preserved for callee) |  | 
| 908   //  -- rdi : target function (preserved for callee) |  | 
| 909   // ----------------------------------- |  | 
| 910   // First lookup code, maybe we don't need to compile! |  | 
| 911   Label gotta_call_runtime; |  | 
| 912   Label maybe_call_runtime; |  | 
| 913   Label try_shared; |  | 
| 914   Label loop_top, loop_bottom; |  | 
| 915 |  | 
| 916   Register closure = rdi; |  | 
| 917   Register map = r8; |  | 
| 918   Register index = r9; |  | 
| 919   __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |  | 
| 920   __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |  | 
| 921   __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset)); |  | 
| 922   __ cmpl(index, Immediate(2)); |  | 
| 923   __ j(less, &gotta_call_runtime); |  | 
| 924 |  | 
| 925   // Find literals. |  | 
| 926   // r14 : native context |  | 
| 927   // r9  : length / index |  | 
| 928   // r8  : optimized code map |  | 
| 929   // rdx : new target |  | 
| 930   // rdi : closure |  | 
| 931   Register native_context = r14; |  | 
| 932   __ movp(native_context, NativeContextOperand()); |  | 
| 933 |  | 
| 934   __ bind(&loop_top); |  | 
| 935   // Native context match? |  | 
| 936   Register temp = r11; |  | 
| 937   __ movp(temp, FieldOperand(map, index, times_pointer_size, |  | 
| 938                              SharedFunctionInfo::OffsetToPreviousContext())); |  | 
| 939   __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset)); |  | 
| 940   __ cmpp(temp, native_context); |  | 
| 941   __ j(not_equal, &loop_bottom); |  | 
| 942   // OSR id set to none? |  | 
| 943   __ movp(temp, FieldOperand(map, index, times_pointer_size, |  | 
| 944                              SharedFunctionInfo::OffsetToPreviousOsrAstId())); |  | 
| 945   __ SmiToInteger32(temp, temp); |  | 
| 946   const int bailout_id = BailoutId::None().ToInt(); |  | 
| 947   __ cmpl(temp, Immediate(bailout_id)); |  | 
| 948   __ j(not_equal, &loop_bottom); |  | 
| 949   // Literals available? |  | 
| 950   __ movp(temp, FieldOperand(map, index, times_pointer_size, |  | 
| 951                              SharedFunctionInfo::OffsetToPreviousLiterals())); |  | 
| 952   __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset)); |  | 
| 953   __ JumpIfSmi(temp, &gotta_call_runtime); |  | 
| 954 |  | 
| 955   // Save the literals in the closure. |  | 
| 956   __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp); |  | 
| 957   __ movp(rax, index); |  | 
| 958   __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, rax, |  | 
| 959                       kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |  | 
| 960 |  | 
| 961   // Code available? |  | 
| 962   Register entry = rcx; |  | 
| 963   __ movp(entry, |  | 
| 964           FieldOperand(map, index, times_pointer_size, |  | 
| 965                        SharedFunctionInfo::OffsetToPreviousCachedCode())); |  | 
| 966   __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset)); |  | 
| 967   __ JumpIfSmi(entry, &maybe_call_runtime); |  | 
| 968 |  | 
| 969   // Found literals and code. Get them into the closure and return. |  | 
| 970   __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); |  | 
| 971 |  | 
| 972   Label install_optimized_code_and_tailcall; |  | 
| 973   __ bind(&install_optimized_code_and_tailcall); |  | 
| 974   __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); |  | 
| 975   __ RecordWriteCodeEntryField(closure, entry, rax); |  | 
| 976 |  | 
| 977   // Link the closure into the optimized function list. |  | 
| 978   // rcx : code entry (entry) |  | 
| 979   // r14 : native context |  | 
| 980   // rdx : new target |  | 
| 981   // rdi : closure |  | 
| 982   __ movp(rbx, |  | 
| 983           ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |  | 
| 984   __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx); |  | 
| 985   __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, rax, |  | 
| 986                       kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |  | 
| 987   const int function_list_offset = |  | 
| 988       Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |  | 
| 989   __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), |  | 
| 990           closure); |  | 
| 991   // Save closure before the write barrier. |  | 
| 992   __ movp(rbx, closure); |  | 
| 993   __ RecordWriteContextSlot(native_context, function_list_offset, closure, rax, |  | 
| 994                             kDontSaveFPRegs); |  | 
| 995   __ movp(closure, rbx); |  | 
| 996   __ jmp(entry); |  | 
| 997 |  | 
| 998   __ bind(&loop_bottom); |  | 
| 999   __ subl(index, Immediate(SharedFunctionInfo::kEntryLength)); |  | 
| 1000   __ cmpl(index, Immediate(1)); |  | 
| 1001   __ j(greater, &loop_top); |  | 
| 1002 |  | 
| 1003   // We found neither literals nor code. |  | 
| 1004   __ jmp(&gotta_call_runtime); |  | 
| 1005 |  | 
| 1006   __ bind(&maybe_call_runtime); |  | 
| 1007 |  | 
| 1008   // Last possibility. Check the context free optimized code map entry. |  | 
| 1009   __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize + |  | 
| 1010                                        SharedFunctionInfo::kSharedCodeIndex)); |  | 
| 1011   __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset)); |  | 
| 1012   __ JumpIfSmi(entry, &try_shared); |  | 
| 1013 |  | 
| 1014   // Store code entry in the closure. |  | 
| 1015   __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); |  | 
| 1016   __ jmp(&install_optimized_code_and_tailcall); |  | 
| 1017 |  | 
| 1018   __ bind(&try_shared); |  | 
| 1019   // Is the full code valid? |  | 
| 1020   __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |  | 
| 1021   __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); |  | 
| 1022   __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset)); |  | 
| 1023   __ andl(rbx, Immediate(Code::KindField::kMask)); |  | 
| 1024   __ shrl(rbx, Immediate(Code::KindField::kShift)); |  | 
| 1025   __ cmpl(rbx, Immediate(Code::BUILTIN)); |  | 
| 1026   __ j(equal, &gotta_call_runtime); |  | 
| 1027   // Yes, install the full code. |  | 
| 1028   __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); |  | 
| 1029   __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); |  | 
| 1030   __ RecordWriteCodeEntryField(closure, entry, rax); |  | 
| 1031   __ jmp(entry); |  | 
| 1032 |  | 
| 1033   __ bind(&gotta_call_runtime); |  | 
| 1034   CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 906   CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 
| 1035   GenerateTailCallToReturnedCode(masm); | 907   GenerateTailCallToReturnedCode(masm); | 
| 1036 } | 908 } | 
| 1037 | 909 | 
| 1038 | 910 | 
| 1039 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 911 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 
| 1040   CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 912   CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 
| 1041   GenerateTailCallToReturnedCode(masm); | 913   GenerateTailCallToReturnedCode(masm); | 
| 1042 } | 914 } | 
| 1043 | 915 | 
| (...skipping 1938 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2982   __ ret(0); | 2854   __ ret(0); | 
| 2983 } | 2855 } | 
| 2984 | 2856 | 
| 2985 | 2857 | 
| 2986 #undef __ | 2858 #undef __ | 
| 2987 | 2859 | 
| 2988 }  // namespace internal | 2860 }  // namespace internal | 
| 2989 }  // namespace v8 | 2861 }  // namespace v8 | 
| 2990 | 2862 | 
| 2991 #endif  // V8_TARGET_ARCH_X64 | 2863 #endif  // V8_TARGET_ARCH_X64 | 
| OLD | NEW | 
|---|