OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS64 | 5 #if V8_TARGET_ARCH_MIPS64 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1225 // This simulates the initial call to bytecode handlers in interpreter entry | 1225 // This simulates the initial call to bytecode handlers in interpreter entry |
1226 // trampoline. The return will never actually be taken, but our stack walker | 1226 // trampoline. The return will never actually be taken, but our stack walker |
1227 // uses this address to determine whether a frame is interpreted. | 1227 // uses this address to determine whether a frame is interpreted. |
1228 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); | 1228 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); |
1229 | 1229 |
1230 Generate_EnterBytecodeDispatch(masm); | 1230 Generate_EnterBytecodeDispatch(masm); |
1231 } | 1231 } |
1232 | 1232 |
1233 | 1233 |
1234 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1234 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1235 // ----------- S t a t e ------------- | |
1236 // -- a3 : new target (preserved for callee) | |
1237 // -- a1 : target function (preserved for callee) | |
1238 // ----------------------------------- | |
1239 // First lookup code, maybe we don't need to compile! | |
1240 Label gotta_call_runtime, gotta_call_runtime_no_stack; | |
1241 Label maybe_call_runtime; | |
1242 Label try_shared; | |
1243 Label loop_top, loop_bottom; | |
1244 | |
1245 Register closure = a1; | |
1246 Register new_target = a3; | |
1247 __ push(new_target); | |
1248 __ push(closure); | |
1249 | |
1250 Register map = a0; | |
1251 Register index = a2; | |
1252 __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1253 __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
1254 __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
1255 __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2))); | |
1256 | |
1257 // Find literals. | |
1258 // a3 : native context | |
1259 // a2 : length / index | |
1260 // a0 : optimized code map | |
1261 // stack[0] : new target | |
1262 // stack[4] : closure | |
1263 Register native_context = a3; | |
1264 __ ld(native_context, NativeContextMemOperand()); | |
1265 | |
1266 __ bind(&loop_top); | |
1267 Register temp = a1; | |
1268 Register array_pointer = a5; | |
1269 | |
1270 // Does the native context match? | |
1271 __ SmiScale(at, index, kPointerSizeLog2); | |
1272 __ Daddu(array_pointer, map, Operand(at)); | |
1273 __ ld(temp, FieldMemOperand(array_pointer, | |
1274 SharedFunctionInfo::OffsetToPreviousContext())); | |
1275 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1276 __ Branch(&loop_bottom, ne, temp, Operand(native_context)); | |
1277 // OSR id set to none? | |
1278 __ ld(temp, FieldMemOperand(array_pointer, | |
1279 SharedFunctionInfo::OffsetToPreviousOsrAstId())); | |
1280 const int bailout_id = BailoutId::None().ToInt(); | |
1281 __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id))); | |
1282 // Literals available? | |
1283 __ ld(temp, FieldMemOperand(array_pointer, | |
1284 SharedFunctionInfo::OffsetToPreviousLiterals())); | |
1285 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1286 __ JumpIfSmi(temp, &gotta_call_runtime); | |
1287 | |
1288 // Save the literals in the closure. | |
1289 __ ld(a4, MemOperand(sp, 0)); | |
1290 __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset)); | |
1291 __ push(index); | |
1292 __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index, | |
1293 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1294 OMIT_SMI_CHECK); | |
1295 __ pop(index); | |
1296 | |
1297 // Code available? | |
1298 Register entry = a4; | |
1299 __ ld(entry, | |
1300 FieldMemOperand(array_pointer, | |
1301 SharedFunctionInfo::OffsetToPreviousCachedCode())); | |
1302 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1303 __ JumpIfSmi(entry, &maybe_call_runtime); | |
1304 | |
1305 // Found literals and code. Get them into the closure and return. | |
1306 __ pop(closure); | |
1307 // Store code entry in the closure. | |
1308 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1309 | |
1310 Label install_optimized_code_and_tailcall; | |
1311 __ bind(&install_optimized_code_and_tailcall); | |
1312 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1313 __ RecordWriteCodeEntryField(closure, entry, a5); | |
1314 | |
1315 // Link the closure into the optimized function list. | |
1316 // a4 : code entry | |
1317 // a3 : native context | |
1318 // a1 : closure | |
1319 __ ld(a5, | |
1320 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1321 __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); | |
1322 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0, | |
1323 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1324 OMIT_SMI_CHECK); | |
1325 const int function_list_offset = | |
1326 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | |
1327 __ sd(closure, | |
1328 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1329 // Save closure before the write barrier. | |
1330 __ mov(a5, closure); | |
1331 __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0, | |
1332 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
1333 __ mov(closure, a5); | |
1334 __ pop(new_target); | |
1335 __ Jump(entry); | |
1336 | |
1337 __ bind(&loop_bottom); | |
1338 __ Dsubu(index, index, | |
1339 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | |
1340 __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1))); | |
1341 | |
1342 // We found neither literals nor code. | |
1343 __ jmp(&gotta_call_runtime); | |
1344 | |
1345 __ bind(&maybe_call_runtime); | |
1346 __ pop(closure); | |
1347 | |
1348 // Last possibility. Check the context free optimized code map entry. | |
1349 __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize + | |
1350 SharedFunctionInfo::kSharedCodeIndex)); | |
1351 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1352 __ JumpIfSmi(entry, &try_shared); | |
1353 | |
1354 // Store code entry in the closure. | |
1355 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1356 __ jmp(&install_optimized_code_and_tailcall); | |
1357 | |
1358 __ bind(&try_shared); | |
1359 __ pop(new_target); | |
1360 // Is the full code valid? | |
1361 __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1362 __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | |
1363 __ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset)); | |
1364 __ And(a5, a5, Operand(Code::KindField::kMask)); | |
1365 __ dsrl(a5, a5, Code::KindField::kShift); | |
1366 __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN)); | |
1367 // Yes, install the full code. | |
1368 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1369 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1370 __ RecordWriteCodeEntryField(closure, entry, a5); | |
1371 __ Jump(entry); | |
1372 | |
1373 __ bind(&gotta_call_runtime); | |
1374 __ pop(closure); | |
1375 __ pop(new_target); | |
1376 __ bind(&gotta_call_runtime_no_stack); | |
1377 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1235 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
1378 GenerateTailCallToReturnedCode(masm); | 1236 GenerateTailCallToReturnedCode(masm); |
1379 } | 1237 } |
1380 | 1238 |
1381 | 1239 |
1382 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1240 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1383 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1241 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
1384 GenerateTailCallToReturnedCode(masm); | 1242 GenerateTailCallToReturnedCode(masm); |
1385 } | 1243 } |
1386 | 1244 |
(...skipping 1515 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2902 } | 2760 } |
2903 } | 2761 } |
2904 | 2762 |
2905 | 2763 |
2906 #undef __ | 2764 #undef __ |
2907 | 2765 |
2908 } // namespace internal | 2766 } // namespace internal |
2909 } // namespace v8 | 2767 } // namespace v8 |
2910 | 2768 |
2911 #endif // V8_TARGET_ARCH_MIPS64 | 2769 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |