OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM | 5 #if V8_TARGET_ARCH_ARM |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1223 // This simulates the initial call to bytecode handlers in interpreter entry | 1223 // This simulates the initial call to bytecode handlers in interpreter entry |
1224 // trampoline. The return will never actually be taken, but our stack walker | 1224 // trampoline. The return will never actually be taken, but our stack walker |
1225 // uses this address to determine whether a frame is interpreted. | 1225 // uses this address to determine whether a frame is interpreted. |
1226 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 1226 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
1227 | 1227 |
1228 Generate_EnterBytecodeDispatch(masm); | 1228 Generate_EnterBytecodeDispatch(masm); |
1229 } | 1229 } |
1230 | 1230 |
1231 | 1231 |
1232 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1232 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 1233 // ----------- S t a t e ------------- |
| 1234 // -- r0 : argument count (preserved for callee) |
| 1235 // -- r3 : new target (preserved for callee) |
| 1236 // -- r1 : target function (preserved for callee) |
| 1237 // ----------------------------------- |
| 1238 // First lookup code, maybe we don't need to compile! |
| 1239 Label gotta_call_runtime, gotta_call_runtime_no_stack; |
| 1240 Label maybe_call_runtime; |
| 1241 Label try_shared; |
| 1242 Label loop_top, loop_bottom; |
| 1243 |
| 1244 Register argument_count = r0; |
| 1245 Register closure = r1; |
| 1246 Register new_target = r3; |
| 1247 __ push(argument_count); |
| 1248 __ push(new_target); |
| 1249 __ push(closure); |
| 1250 |
| 1251 Register map = argument_count; |
| 1252 Register index = r2; |
| 1253 __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1254 __ ldr(map, |
| 1255 FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1256 __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
| 1257 __ cmp(index, Operand(Smi::FromInt(2))); |
| 1258 __ b(lt, &gotta_call_runtime); |
| 1259 |
| 1260 // Find literals. |
| 1261 // r3 : native context |
| 1262 // r2 : length / index |
| 1263 // r0 : optimized code map |
| 1264 // stack[0] : new target |
| 1265 // stack[4] : closure |
| 1266 Register native_context = r3; |
| 1267 __ ldr(native_context, NativeContextMemOperand()); |
| 1268 |
| 1269 __ bind(&loop_top); |
| 1270 Register temp = r1; |
| 1271 Register array_pointer = r5; |
| 1272 |
| 1273 // Does the native context match? |
| 1274 __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index)); |
| 1275 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1276 SharedFunctionInfo::OffsetToPreviousContext())); |
| 1277 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1278 __ cmp(temp, native_context); |
| 1279 __ b(ne, &loop_bottom); |
| 1280 // OSR id set to none? |
| 1281 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1282 SharedFunctionInfo::OffsetToPreviousOsrAstId())); |
| 1283 const int bailout_id = BailoutId::None().ToInt(); |
| 1284 __ cmp(temp, Operand(Smi::FromInt(bailout_id))); |
| 1285 __ b(ne, &loop_bottom); |
| 1286 // Literals available? |
| 1287 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1288 SharedFunctionInfo::OffsetToPreviousLiterals())); |
| 1289 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1290 __ JumpIfSmi(temp, &gotta_call_runtime); |
| 1291 |
| 1292 // Save the literals in the closure. |
| 1293 __ ldr(r4, MemOperand(sp, 0)); |
| 1294 __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset)); |
| 1295 __ push(index); |
| 1296 __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index, |
| 1297 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1298 OMIT_SMI_CHECK); |
| 1299 __ pop(index); |
| 1300 |
| 1301 // Code available? |
| 1302 Register entry = r4; |
| 1303 __ ldr(entry, |
| 1304 FieldMemOperand(array_pointer, |
| 1305 SharedFunctionInfo::OffsetToPreviousCachedCode())); |
| 1306 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1307 __ JumpIfSmi(entry, &maybe_call_runtime); |
| 1308 |
| 1309 // Found literals and code. Get them into the closure and return. |
| 1310 __ pop(closure); |
| 1311 // Store code entry in the closure. |
| 1312 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1313 |
| 1314 Label install_optimized_code_and_tailcall; |
| 1315 __ bind(&install_optimized_code_and_tailcall); |
| 1316 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1317 __ RecordWriteCodeEntryField(closure, entry, r5); |
| 1318 |
| 1319 // Link the closure into the optimized function list. |
| 1320 // r4 : code entry |
| 1321 // r3 : native context |
| 1322 // r1 : closure |
| 1323 __ ldr(r5, |
| 1324 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1325 __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); |
| 1326 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0, |
| 1327 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1328 OMIT_SMI_CHECK); |
| 1329 const int function_list_offset = |
| 1330 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 1331 __ str(closure, |
| 1332 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1333 // Save closure before the write barrier. |
| 1334 __ mov(r5, closure); |
| 1335 __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0, |
| 1336 kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 1337 __ mov(closure, r5); |
| 1338 __ pop(new_target); |
| 1339 __ pop(argument_count); |
| 1340 __ Jump(entry); |
| 1341 |
| 1342 __ bind(&loop_bottom); |
| 1343 __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 1344 __ cmp(index, Operand(Smi::FromInt(1))); |
| 1345 __ b(gt, &loop_top); |
| 1346 |
| 1347 // We found neither literals nor code. |
| 1348 __ jmp(&gotta_call_runtime); |
| 1349 |
| 1350 __ bind(&maybe_call_runtime); |
| 1351 __ pop(closure); |
| 1352 |
| 1353 // Last possibility. Check the context free optimized code map entry. |
| 1354 __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize + |
| 1355 SharedFunctionInfo::kSharedCodeIndex)); |
| 1356 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1357 __ JumpIfSmi(entry, &try_shared); |
| 1358 |
| 1359 // Store code entry in the closure. |
| 1360 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1361 __ jmp(&install_optimized_code_and_tailcall); |
| 1362 |
| 1363 __ bind(&try_shared); |
| 1364 __ pop(new_target); |
| 1365 __ pop(argument_count); |
| 1366 // Is the full code valid? |
| 1367 __ ldr(entry, |
| 1368 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1369 __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 1370 __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset)); |
| 1371 __ and_(r5, r5, Operand(Code::KindField::kMask)); |
| 1372 __ mov(r5, Operand(r5, LSR, Code::KindField::kShift)); |
| 1373 __ cmp(r5, Operand(Code::BUILTIN)); |
| 1374 __ b(eq, &gotta_call_runtime_no_stack); |
| 1375 // Yes, install the full code. |
| 1376 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1377 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1378 __ RecordWriteCodeEntryField(closure, entry, r5); |
| 1379 __ Jump(entry); |
| 1380 |
| 1381 __ bind(&gotta_call_runtime); |
| 1382 __ pop(closure); |
| 1383 __ pop(new_target); |
| 1384 __ pop(argument_count); |
| 1385 __ bind(&gotta_call_runtime_no_stack); |
1233 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); | 1386 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
1234 } | 1387 } |
1235 | 1388 |
1236 | 1389 |
1237 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1390 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1238 GenerateTailCallToReturnedCode(masm, | 1391 GenerateTailCallToReturnedCode(masm, |
1239 Runtime::kCompileOptimized_NotConcurrent); | 1392 Runtime::kCompileOptimized_NotConcurrent); |
1240 } | 1393 } |
1241 | 1394 |
1242 | 1395 |
(...skipping 1379 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2622 } | 2775 } |
2623 } | 2776 } |
2624 | 2777 |
2625 | 2778 |
2626 #undef __ | 2779 #undef __ |
2627 | 2780 |
2628 } // namespace internal | 2781 } // namespace internal |
2629 } // namespace v8 | 2782 } // namespace v8 |
2630 | 2783 |
2631 #endif // V8_TARGET_ARCH_ARM | 2784 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |