OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM | 5 #if V8_TARGET_ARCH_ARM |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1221 // This simulates the initial call to bytecode handlers in interpreter entry | 1221 // This simulates the initial call to bytecode handlers in interpreter entry |
1222 // trampoline. The return will never actually be taken, but our stack walker | 1222 // trampoline. The return will never actually be taken, but our stack walker |
1223 // uses this address to determine whether a frame is interpreted. | 1223 // uses this address to determine whether a frame is interpreted. |
1224 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 1224 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
1225 | 1225 |
1226 Generate_EnterBytecodeDispatch(masm); | 1226 Generate_EnterBytecodeDispatch(masm); |
1227 } | 1227 } |
1228 | 1228 |
1229 | 1229 |
1230 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1230 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1231 // ----------- S t a t e ------------- | |
1232 // -- r0 : argument count (preserved for callee) | |
1233 // -- r3 : new target (preserved for callee) | |
1234 // -- r1 : target function (preserved for callee) | |
1235 // ----------------------------------- | |
1236 // First lookup code, maybe we don't need to compile! | |
1237 Label gotta_call_runtime, gotta_call_runtime_no_stack; | |
1238 Label maybe_call_runtime; | |
1239 Label try_shared; | |
1240 Label loop_top, loop_bottom; | |
1241 | |
1242 Register argument_count = r0; | |
1243 Register closure = r1; | |
1244 Register new_target = r3; | |
1245 __ push(argument_count); | |
1246 __ push(new_target); | |
1247 __ push(closure); | |
1248 | |
1249 Register map = argument_count; | |
1250 Register index = r2; | |
1251 __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1252 __ ldr(map, | |
1253 FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
1254 __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
1255 __ cmp(index, Operand(Smi::FromInt(2))); | |
1256 __ b(lt, &gotta_call_runtime); | |
1257 | |
1258 // Find literals. | |
1259 // r3 : native context | |
1260 // r2 : length / index | |
1261 // r0 : optimized code map | |
1262 // stack[0] : new target | |
1263 // stack[4] : closure | |
1264 Register native_context = r3; | |
1265 __ ldr(native_context, NativeContextMemOperand()); | |
1266 | |
1267 __ bind(&loop_top); | |
1268 Register temp = r1; | |
1269 Register array_pointer = r5; | |
1270 | |
1271 // Does the native context match? | |
1272 __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index)); | |
1273 __ ldr(temp, FieldMemOperand(array_pointer, | |
1274 SharedFunctionInfo::kOffsetToPreviousContext)); | |
1275 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1276 __ cmp(temp, native_context); | |
1277 __ b(ne, &loop_bottom); | |
1278 // OSR id set to none? | |
1279 __ ldr(temp, FieldMemOperand(array_pointer, | |
1280 SharedFunctionInfo::kOffsetToPreviousOsrAstId)); | |
1281 const int bailout_id = BailoutId::None().ToInt(); | |
1282 __ cmp(temp, Operand(Smi::FromInt(bailout_id))); | |
1283 __ b(ne, &loop_bottom); | |
1284 // Literals available? | |
1285 __ ldr(temp, FieldMemOperand(array_pointer, | |
1286 SharedFunctionInfo::kOffsetToPreviousLiterals)); | |
1287 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1288 __ JumpIfSmi(temp, &gotta_call_runtime); | |
1289 | |
1290 // Save the literals in the closure. | |
1291 __ ldr(r4, MemOperand(sp, 0)); | |
1292 __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset)); | |
1293 __ push(index); | |
1294 __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index, | |
1295 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1296 OMIT_SMI_CHECK); | |
1297 __ pop(index); | |
1298 | |
1299 // Code available? | |
1300 Register entry = r4; | |
1301 __ ldr(entry, | |
1302 FieldMemOperand(array_pointer, | |
1303 SharedFunctionInfo::kOffsetToPreviousCachedCode)); | |
1304 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1305 __ JumpIfSmi(entry, &maybe_call_runtime); | |
1306 | |
1307 // Found literals and code. Get them into the closure and return. | |
1308 __ pop(closure); | |
1309 // Store code entry in the closure. | |
1310 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1311 | |
1312 Label install_optimized_code_and_tailcall; | |
1313 __ bind(&install_optimized_code_and_tailcall); | |
1314 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1315 __ RecordWriteCodeEntryField(closure, entry, r5); | |
1316 | |
1317 // Link the closure into the optimized function list. | |
1318 // r4 : code entry | |
1319 // r3 : native context | |
1320 // r1 : closure | |
1321 __ ldr(r5, | |
1322 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1323 __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); | |
1324 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0, | |
1325 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1326 OMIT_SMI_CHECK); | |
1327 const int function_list_offset = | |
1328 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | |
1329 __ str(closure, | |
1330 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1331 // Save closure before the write barrier. | |
1332 __ mov(r5, closure); | |
1333 __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0, | |
1334 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
1335 __ mov(closure, r5); | |
1336 __ pop(new_target); | |
1337 __ pop(argument_count); | |
1338 __ Jump(entry); | |
1339 | |
1340 __ bind(&loop_bottom); | |
1341 __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | |
1342 __ cmp(index, Operand(Smi::FromInt(1))); | |
1343 __ b(gt, &loop_top); | |
1344 | |
1345 // We found neither literals nor code. | |
1346 __ jmp(&gotta_call_runtime); | |
1347 | |
1348 __ bind(&maybe_call_runtime); | |
1349 __ pop(closure); | |
1350 | |
1351 // Last possibility. Check the context free optimized code map entry. | |
1352 __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize + | |
1353 SharedFunctionInfo::kSharedCodeIndex)); | |
1354 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1355 __ JumpIfSmi(entry, &try_shared); | |
1356 | |
1357 // Store code entry in the closure. | |
1358 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1359 __ jmp(&install_optimized_code_and_tailcall); | |
1360 | |
1361 __ bind(&try_shared); | |
1362 __ pop(new_target); | |
1363 __ pop(argument_count); | |
1364 // Is the full code valid? | |
1365 __ ldr(entry, | |
1366 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1367 __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | |
1368 __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset)); | |
1369 __ and_(r5, r5, Operand(Code::KindField::kMask)); | |
1370 __ mov(r5, Operand(r5, LSR, Code::KindField::kShift)); | |
1371 __ cmp(r5, Operand(Code::BUILTIN)); | |
1372 __ b(eq, &gotta_call_runtime_no_stack); | |
1373 // Yes, install the full code. | |
1374 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1375 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1376 __ RecordWriteCodeEntryField(closure, entry, r5); | |
1377 __ Jump(entry); | |
1378 | |
1379 __ bind(&gotta_call_runtime); | |
1380 __ pop(closure); | |
1381 __ pop(new_target); | |
1382 __ pop(argument_count); | |
1383 __ bind(&gotta_call_runtime_no_stack); | |
1384 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); | 1231 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
1385 } | 1232 } |
1386 | 1233 |
1387 | 1234 |
1388 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1235 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1389 GenerateTailCallToReturnedCode(masm, | 1236 GenerateTailCallToReturnedCode(masm, |
1390 Runtime::kCompileOptimized_NotConcurrent); | 1237 Runtime::kCompileOptimized_NotConcurrent); |
1391 } | 1238 } |
1392 | 1239 |
1393 | 1240 |
(...skipping 1379 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2773 } | 2620 } |
2774 } | 2621 } |
2775 | 2622 |
2776 | 2623 |
2777 #undef __ | 2624 #undef __ |
2778 | 2625 |
2779 } // namespace internal | 2626 } // namespace internal |
2780 } // namespace v8 | 2627 } // namespace v8 |
2781 | 2628 |
2782 #endif // V8_TARGET_ARCH_ARM | 2629 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |