Index: src/mips64/builtins-mips64.cc |
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc |
index 95174da2347409df77a2fb4130cb376882856b3a..a4b144ecc1aa82de51c93c189b0e831df3b146ca 100644 |
--- a/src/mips64/builtins-mips64.cc |
+++ b/src/mips64/builtins-mips64.cc |
@@ -1215,6 +1215,154 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { |
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
+ // ----------- S t a t e ------------- |
+ // -- a0 : argument count (preserved for callee) |
+ // -- a3 : new target (preserved for callee) |
+ // -- a1 : target function (preserved for callee) |
+ // ----------------------------------- |
+ // First lookup code, maybe we don't need to compile! |
+ Label gotta_call_runtime, gotta_call_runtime_no_stack; |
+ Label maybe_call_runtime; |
+ Label try_shared; |
+ Label loop_top, loop_bottom; |
+ |
+ Register argument_count = a0; |
+ Register closure = a1; |
+ Register new_target = a3; |
+ __ push(argument_count); |
+ __ push(new_target); |
+ __ push(closure); |
+ |
+ Register map = a0; |
+ Register index = a2; |
+ __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
+ __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
+ __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2))); |
+ |
+ // Find literals. |
+ // a3 : native context |
+ // a2 : length / index |
+ // a0 : optimized code map |
+ // stack[0] : new target |
+ // stack[4] : closure |
+ Register native_context = a3; |
+ __ ld(native_context, NativeContextMemOperand()); |
+ |
+ __ bind(&loop_top); |
+ Register temp = a1; |
+ Register array_pointer = a5; |
+ |
+ // Does the native context match? |
+ __ SmiScale(at, index, kPointerSizeLog2); |
+ __ Daddu(array_pointer, map, Operand(at)); |
+ __ ld(temp, FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousContext)); |
+ __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
+ __ Branch(&loop_bottom, ne, temp, Operand(native_context)); |
+ // OSR id set to none? |
+ __ ld(temp, FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId)); |
+ const int bailout_id = BailoutId::None().ToInt(); |
+ __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id))); |
+ // Literals available? |
+ __ ld(temp, FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousLiterals)); |
+ __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
+ __ JumpIfSmi(temp, &gotta_call_runtime); |
+ |
+ // Save the literals in the closure. |
+ __ ld(a4, MemOperand(sp, 0)); |
+ __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset)); |
+ __ push(index); |
+ __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index, |
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
+ OMIT_SMI_CHECK); |
+ __ pop(index); |
+ |
+ // Code available? |
+ Register entry = a4; |
+ __ ld(entry, |
+ FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousCachedCode)); |
+ __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
+ __ JumpIfSmi(entry, &maybe_call_runtime); |
+ |
+ // Found literals and code. Get them into the closure and return. |
+ __ pop(closure); |
+ // Store code entry in the closure. |
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ |
+ Label install_optimized_code_and_tailcall; |
+ __ bind(&install_optimized_code_and_tailcall); |
+ __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
+ __ RecordWriteCodeEntryField(closure, entry, a5); |
+ |
+ // Link the closure into the optimized function list. |
+ // a4 : code entry |
+ // a3 : native context |
+ // a1 : closure |
+ __ ld(a5, |
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
+ __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); |
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0, |
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
+ OMIT_SMI_CHECK); |
+ const int function_list_offset = |
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
+ __ sd(closure, |
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
+ // Save closure before the write barrier. |
+ __ mov(a5, closure); |
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0, |
+ kRAHasNotBeenSaved, kDontSaveFPRegs); |
+ __ mov(closure, a5); |
+ __ pop(new_target); |
+ __ pop(argument_count); |
+ __ Jump(entry); |
+ |
+ __ bind(&loop_bottom); |
+ __ Dsubu(index, index, |
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
+ __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1))); |
+ |
+ // We found neither literals nor code. |
+ __ jmp(&gotta_call_runtime); |
+ |
+ __ bind(&maybe_call_runtime); |
+ __ pop(closure); |
+ |
+ // Last possibility. Check the context free optimized code map entry. |
+ __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize + |
+ SharedFunctionInfo::kSharedCodeIndex)); |
+ __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
+ __ JumpIfSmi(entry, &try_shared); |
+ |
+ // Store code entry in the closure. |
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ jmp(&install_optimized_code_and_tailcall); |
+ |
+ __ bind(&try_shared); |
+ __ pop(new_target); |
+ __ pop(argument_count); |
+ // Is the full code valid? |
+ __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
+ __ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset)); |
+ __ And(a5, a5, Operand(Code::KindField::kMask)); |
+ __ dsrl(a5, a5, Code::KindField::kShift); |
+ __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN)); |
+ // Yes, install the full code. |
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
+ __ RecordWriteCodeEntryField(closure, entry, a5); |
+ __ Jump(entry); |
+ |
+ __ bind(&gotta_call_runtime); |
+ __ pop(closure); |
+ __ pop(new_target); |
+ __ pop(argument_count); |
+ __ bind(&gotta_call_runtime_no_stack); |
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
} |
@@ -1224,7 +1372,6 @@ void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
Runtime::kCompileOptimized_NotConcurrent); |
} |
- |
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { |
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); |
} |