OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_IA32 | 5 #if V8_TARGET_ARCH_IA32 |
6 | 6 |
7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
8 #include "src/codegen.h" | 8 #include "src/codegen.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 824 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
835 // This simulates the initial call to bytecode handlers in interpreter entry | 835 // This simulates the initial call to bytecode handlers in interpreter entry |
836 // trampoline. The return will never actually be taken, but our stack walker | 836 // trampoline. The return will never actually be taken, but our stack walker |
837 // uses this address to determine whether a frame is interpreted. | 837 // uses this address to determine whether a frame is interpreted. |
838 __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 838 __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
839 | 839 |
840 Generate_EnterBytecodeDispatch(masm); | 840 Generate_EnterBytecodeDispatch(masm); |
841 } | 841 } |
842 | 842 |
843 | 843 |
844 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 844 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 845 // ----------- S t a t e ------------- |
| 846 // -- eax : argument count (preserved for callee) |
| 847 // -- edx : new target (preserved for callee) |
| 848 // -- edi : target function (preserved for callee) |
| 849 // ----------------------------------- |
| 850 // First lookup code, maybe we don't need to compile! |
| 851 Label gotta_call_runtime, gotta_call_runtime_no_stack; |
| 852 Label maybe_call_runtime; |
| 853 Label try_shared; |
| 854 Label loop_top, loop_bottom; |
| 855 |
| 856 Register closure = edi; |
| 857 Register new_target = edx; |
| 858 Register argument_count = eax; |
| 859 |
| 860 __ push(argument_count); |
| 861 __ push(new_target); |
| 862 __ push(closure); |
| 863 |
| 864 Register map = argument_count; |
| 865 Register index = ebx; |
| 866 __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 867 __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 868 __ mov(index, FieldOperand(map, FixedArray::kLengthOffset)); |
| 869 __ cmp(index, Immediate(Smi::FromInt(2))); |
| 870 __ j(less, &gotta_call_runtime); |
| 871 |
| 872 // Find literals. |
| 873 // edx : native context |
| 874 // ebx : length / index |
| 875 // eax : optimized code map |
| 876 // stack[0] : new target |
| 877 // stack[4] : closure |
| 878 Register native_context = edx; |
| 879 __ mov(native_context, NativeContextOperand()); |
| 880 |
| 881 __ bind(&loop_top); |
| 882 Register temp = edi; |
| 883 |
| 884 // Does the native context match? |
| 885 __ mov(temp, FieldOperand(map, index, times_half_pointer_size, |
| 886 SharedFunctionInfo::kOffsetToPreviousContext)); |
| 887 __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset)); |
| 888 __ cmp(temp, native_context); |
| 889 __ j(not_equal, &loop_bottom); |
| 890 // OSR id set to none? |
| 891 __ mov(temp, FieldOperand(map, index, times_half_pointer_size, |
| 892 SharedFunctionInfo::kOffsetToPreviousOsrAstId)); |
| 893 const int bailout_id = BailoutId::None().ToInt(); |
| 894 __ cmp(temp, Immediate(Smi::FromInt(bailout_id))); |
| 895 __ j(not_equal, &loop_bottom); |
| 896 // Literals available? |
| 897 __ mov(temp, FieldOperand(map, index, times_half_pointer_size, |
| 898 SharedFunctionInfo::kOffsetToPreviousLiterals)); |
| 899 __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset)); |
| 900 __ JumpIfSmi(temp, &gotta_call_runtime); |
| 901 |
| 902 // Save the literals in the closure. |
| 903 __ mov(ecx, Operand(esp, 0)); |
| 904 __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp); |
| 905 __ push(index); |
| 906 __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index, |
| 907 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 908 __ pop(index); |
| 909 |
| 910 // Code available? |
| 911 Register entry = ecx; |
| 912 __ mov(entry, FieldOperand(map, index, times_half_pointer_size, |
| 913 SharedFunctionInfo::kOffsetToPreviousCachedCode)); |
| 914 __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset)); |
| 915 __ JumpIfSmi(entry, &maybe_call_runtime); |
| 916 |
| 917 // Found literals and code. Get them into the closure and return. |
| 918 __ pop(closure); |
| 919 // Store code entry in the closure. |
| 920 __ lea(entry, FieldOperand(entry, Code::kHeaderSize)); |
| 921 |
| 922 Label install_optimized_code_and_tailcall; |
| 923 __ bind(&install_optimized_code_and_tailcall); |
| 924 __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); |
| 925 __ RecordWriteCodeEntryField(closure, entry, eax); |
| 926 |
| 927 // Link the closure into the optimized function list. |
| 928 // ecx : code entry |
| 929 // edx : native context |
| 930 // edi : closure |
| 931 __ mov(ebx, |
| 932 ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 933 __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx); |
| 934 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax, |
| 935 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 936 const int function_list_offset = |
| 937 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 938 __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), |
| 939 closure); |
| 940 // Save closure before the write barrier. |
| 941 __ mov(ebx, closure); |
| 942 __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax, |
| 943 kDontSaveFPRegs); |
| 944 __ mov(closure, ebx); |
| 945 __ pop(new_target); |
| 946 __ pop(argument_count); |
| 947 __ jmp(entry); |
| 948 |
| 949 __ bind(&loop_bottom); |
| 950 __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 951 __ cmp(index, Immediate(Smi::FromInt(1))); |
| 952 __ j(greater, &loop_top); |
| 953 |
| 954 // We found neither literals nor code. |
| 955 __ jmp(&gotta_call_runtime); |
| 956 |
| 957 __ bind(&maybe_call_runtime); |
| 958 __ pop(closure); |
| 959 |
| 960 // Last possibility. Check the context free optimized code map entry. |
| 961 __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize + |
| 962 SharedFunctionInfo::kSharedCodeIndex)); |
| 963 __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset)); |
| 964 __ JumpIfSmi(entry, &try_shared); |
| 965 |
| 966 // Store code entry in the closure. |
| 967 __ lea(entry, FieldOperand(entry, Code::kHeaderSize)); |
| 968 __ jmp(&install_optimized_code_and_tailcall); |
| 969 |
| 970 __ bind(&try_shared); |
| 971 __ pop(new_target); |
| 972 __ pop(argument_count); |
| 973 // Is the full code valid? |
| 974 __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 975 __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 976 __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset)); |
| 977 __ and_(ebx, Code::KindField::kMask); |
| 978 __ shr(ebx, Code::KindField::kShift); |
| 979 __ cmp(ebx, Immediate(Code::BUILTIN)); |
| 980 __ j(equal, &gotta_call_runtime_no_stack); |
| 981 // Yes, install the full code. |
| 982 __ lea(entry, FieldOperand(entry, Code::kHeaderSize)); |
| 983 __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); |
| 984 __ RecordWriteCodeEntryField(closure, entry, ebx); |
| 985 __ jmp(entry); |
| 986 |
| 987 __ bind(&gotta_call_runtime); |
| 988 __ pop(closure); |
| 989 __ pop(new_target); |
| 990 __ pop(argument_count); |
| 991 __ bind(&gotta_call_runtime_no_stack); |
| 992 |
845 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); | 993 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
846 } | 994 } |
847 | 995 |
848 | 996 |
849 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 997 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
850 GenerateTailCallToReturnedCode(masm, | 998 GenerateTailCallToReturnedCode(masm, |
851 Runtime::kCompileOptimized_NotConcurrent); | 999 Runtime::kCompileOptimized_NotConcurrent); |
852 } | 1000 } |
853 | 1001 |
854 | 1002 |
(...skipping 1867 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2722 // And "return" to the OSR entry point of the function. | 2870 // And "return" to the OSR entry point of the function. |
2723 __ ret(0); | 2871 __ ret(0); |
2724 } | 2872 } |
2725 | 2873 |
2726 | 2874 |
2727 #undef __ | 2875 #undef __ |
2728 } // namespace internal | 2876 } // namespace internal |
2729 } // namespace v8 | 2877 } // namespace v8 |
2730 | 2878 |
2731 #endif // V8_TARGET_ARCH_IA32 | 2879 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |