OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS | 5 #if V8_TARGET_ARCH_MIPS |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1116 Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); | 1116 Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); |
1117 } | 1117 } |
1118 | 1118 |
1119 | 1119 |
1120 void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) { | 1120 void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) { |
1121 Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); | 1121 Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); |
1122 } | 1122 } |
1123 | 1123 |
1124 | 1124 |
1125 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1125 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 1126 // ----------- S t a t e ------------- |
| 1127 // -- a3 : new target (preserved for callee) |
| 1128 // -- a1 : target function (preserved for callee) |
| 1129 // ----------------------------------- |
| 1130 // First lookup code, maybe we don't need to compile! |
| 1131 Label gotta_call_runtime, gotta_call_runtime_no_stack; |
| 1132 Label maybe_call_runtime; |
| 1133 Label try_shared; |
| 1134 Label loop_top, loop_bottom; |
| 1135 |
| 1136 Register closure = a1; |
| 1137 Register new_target = a3; |
| 1138 __ push(new_target); |
| 1139 __ push(closure); |
| 1140 |
| 1141 Register map = a0; |
| 1142 Register index = a2; |
| 1143 __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1144 __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1145 __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
| 1146 __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2))); |
| 1147 |
| 1148 // Find literals. |
| 1149 // a3 : native context |
| 1150 // a2 : length / index |
| 1151 // a0 : optimized code map |
| 1152 // stack[0] : new target |
| 1153 // stack[4] : closure |
| 1154 Register native_context = a3; |
| 1155 __ lw(native_context, NativeContextMemOperand()); |
| 1156 |
| 1157 __ bind(&loop_top); |
| 1158 Register temp = a1; |
| 1159 Register array_pointer = t1; |
| 1160 |
| 1161 // Does the native context match? |
| 1162 __ sll(at, index, kPointerSizeLog2 - kSmiTagSize); |
| 1163 __ Addu(array_pointer, map, Operand(at)); |
| 1164 __ lw(temp, FieldMemOperand(array_pointer, |
| 1165 SharedFunctionInfo::OffsetToPreviousContext())); |
| 1166 __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1167 __ Branch(&loop_bottom, ne, temp, Operand(native_context)); |
| 1168 // OSR id set to none? |
| 1169 __ lw(temp, FieldMemOperand(array_pointer, |
| 1170 SharedFunctionInfo::OffsetToPreviousOsrAstId())); |
| 1171 const int bailout_id = BailoutId::None().ToInt(); |
| 1172 __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id))); |
| 1173 // Literals available? |
| 1174 __ lw(temp, FieldMemOperand(array_pointer, |
| 1175 SharedFunctionInfo::OffsetToPreviousLiterals())); |
| 1176 __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1177 __ JumpIfSmi(temp, &gotta_call_runtime); |
| 1178 |
| 1179 // Save the literals in the closure. |
| 1180 __ lw(t0, MemOperand(sp, 0)); |
| 1181 __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); |
| 1182 __ push(index); |
| 1183 __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index, |
| 1184 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1185 OMIT_SMI_CHECK); |
| 1186 __ pop(index); |
| 1187 |
| 1188 // Code available? |
| 1189 Register entry = t0; |
| 1190 __ lw(entry, |
| 1191 FieldMemOperand(array_pointer, |
| 1192 SharedFunctionInfo::OffsetToPreviousCachedCode())); |
| 1193 __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1194 __ JumpIfSmi(entry, &maybe_call_runtime); |
| 1195 |
| 1196 // Found literals and code. Get them into the closure and return. |
| 1197 __ pop(closure); |
| 1198 // Store code entry in the closure. |
| 1199 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1200 |
| 1201 Label install_optimized_code_and_tailcall; |
| 1202 __ bind(&install_optimized_code_and_tailcall); |
| 1203 __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1204 |
| 1205 // Link the closure into the optimized function list. |
| 1206 // t0 : code entry |
| 1207 // a3 : native context |
| 1208 // a1 : closure |
| 1209 __ lw(t1, |
| 1210 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1211 __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); |
| 1212 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0, |
| 1213 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1214 OMIT_SMI_CHECK); |
| 1215 const int function_list_offset = |
| 1216 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 1217 __ sw(closure, |
| 1218 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1219 // Save closure before the write barrier. |
| 1220 __ mov(t1, closure); |
| 1221 __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0, |
| 1222 kRAHasNotBeenSaved, kDontSaveFPRegs); |
| 1223 __ mov(closure, t1); |
| 1224 __ pop(new_target); |
| 1225 __ Jump(entry); |
| 1226 |
| 1227 __ bind(&loop_bottom); |
| 1228 __ Subu(index, index, |
| 1229 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 1230 __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1))); |
| 1231 |
| 1232 // We found neither literals nor code. |
| 1233 __ jmp(&gotta_call_runtime); |
| 1234 |
| 1235 __ bind(&maybe_call_runtime); |
| 1236 __ pop(closure); |
| 1237 |
| 1238 // Last possibility. Check the context free optimized code map entry. |
| 1239 __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize)); |
| 1240 __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1241 __ JumpIfSmi(entry, &try_shared); |
| 1242 |
| 1243 // Store code entry in the closure. |
| 1244 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1245 __ jmp(&install_optimized_code_and_tailcall); |
| 1246 |
| 1247 __ bind(&try_shared); |
| 1248 __ pop(new_target); |
| 1249 // Is the full code valid? |
| 1250 __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1251 __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 1252 __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset)); |
| 1253 __ And(t1, t1, Operand(Code::KindField::kMask)); |
| 1254 __ srl(t1, t1, Code::KindField::kShift); |
| 1255 __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN)); |
| 1256 // Yes, install the full code. |
| 1257 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1258 __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1259 __ Jump(entry); |
| 1260 |
| 1261 __ bind(&gotta_call_runtime); |
| 1262 __ pop(closure); |
| 1263 __ pop(new_target); |
| 1264 __ bind(&gotta_call_runtime_no_stack); |
1126 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1265 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
1127 GenerateTailCallToReturnedCode(masm); | 1266 GenerateTailCallToReturnedCode(masm); |
1128 } | 1267 } |
1129 | 1268 |
1130 | 1269 |
1131 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1270 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1132 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1271 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
1133 GenerateTailCallToReturnedCode(masm); | 1272 GenerateTailCallToReturnedCode(masm); |
1134 } | 1273 } |
1135 | 1274 |
(...skipping 1403 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2539 } | 2678 } |
2540 } | 2679 } |
2541 | 2680 |
2542 | 2681 |
2543 #undef __ | 2682 #undef __ |
2544 | 2683 |
2545 } // namespace internal | 2684 } // namespace internal |
2546 } // namespace v8 | 2685 } // namespace v8 |
2547 | 2686 |
2548 #endif // V8_TARGET_ARCH_MIPS | 2687 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |