OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_S390 | 5 #if V8_TARGET_ARCH_S390 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1338 | 1338 |
1339 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1339 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1340 // ----------- S t a t e ------------- | 1340 // ----------- S t a t e ------------- |
1341 // -- r2 : argument count (preserved for callee) | 1341 // -- r2 : argument count (preserved for callee) |
1342 // -- r5 : new target (preserved for callee) | 1342 // -- r5 : new target (preserved for callee) |
1343 // -- r3 : target function (preserved for callee) | 1343 // -- r3 : target function (preserved for callee) |
1344 // ----------------------------------- | 1344 // ----------------------------------- |
1345 // First lookup code, maybe we don't need to compile! | 1345 // First lookup code, maybe we don't need to compile! |
1346 Label gotta_call_runtime; | 1346 Label gotta_call_runtime; |
1347 Label try_shared; | 1347 Label try_shared; |
| 1348 Label loop_top, loop_bottom; |
1348 | 1349 |
1349 Register closure = r3; | 1350 Register closure = r3; |
| 1351 Register map = r8; |
1350 Register index = r4; | 1352 Register index = r4; |
1351 | 1353 |
1352 // Do we have a valid feedback vector? | 1354 // Do we have a valid feedback vector? |
1353 __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); | 1355 __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); |
1354 __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); | 1356 __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); |
1355 __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); | 1357 __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); |
1356 | 1358 |
1357 // Is optimized code available in the feedback vector? | 1359 __ LoadP(map, |
| 1360 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1361 __ LoadP(map, |
| 1362 FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1363 __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
| 1364 __ CmpSmiLiteral(index, Smi::FromInt(2), r0); |
| 1365 __ blt(&try_shared); |
| 1366 |
| 1367 // Find literals. |
| 1368 // r9 : native context |
| 1369 // r4 : length / index |
| 1370 // r8 : optimized code map |
| 1371 // r5 : new target |
| 1372 // r3 : closure |
| 1373 Register native_context = r9; |
| 1374 __ LoadP(native_context, NativeContextMemOperand()); |
| 1375 |
| 1376 __ bind(&loop_top); |
| 1377 Register temp = r1; |
| 1378 Register array_pointer = r7; |
| 1379 |
| 1380 // Does the native context match? |
| 1381 __ SmiToPtrArrayOffset(array_pointer, index); |
| 1382 __ AddP(array_pointer, map, array_pointer); |
| 1383 __ LoadP(temp, FieldMemOperand(array_pointer, |
| 1384 SharedFunctionInfo::kOffsetToPreviousContext)); |
| 1385 __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1386 __ CmpP(temp, native_context); |
| 1387 __ bne(&loop_bottom, Label::kNear); |
| 1388 |
| 1389 // Code available? |
1358 Register entry = r6; | 1390 Register entry = r6; |
1359 __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * | 1391 __ LoadP(entry, |
1360 kPointerSize + | 1392 FieldMemOperand(array_pointer, |
1361 FeedbackVector::kHeaderSize)); | 1393 SharedFunctionInfo::kOffsetToPreviousCachedCode)); |
1362 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | 1394 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
1363 __ JumpIfSmi(entry, &try_shared); | 1395 __ JumpIfSmi(entry, &try_shared); |
1364 | 1396 |
| 1397 // Found code. Get it into the closure and return. |
1365 // Store code entry in the closure. | 1398 // Store code entry in the closure. |
1366 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1399 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
1367 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); | 1400 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); |
1368 __ RecordWriteCodeEntryField(closure, entry, r7); | 1401 __ RecordWriteCodeEntryField(closure, entry, r7); |
1369 | 1402 |
1370 // Load native context into r8. | |
1371 Register native_context = r8; | |
1372 __ LoadP(native_context, NativeContextMemOperand()); | |
1373 | |
1374 // Link the closure into the optimized function list. | 1403 // Link the closure into the optimized function list. |
| 1404 // r6 : code entry |
| 1405 // r9: native context |
| 1406 // r3 : closure |
1375 __ LoadP( | 1407 __ LoadP( |
1376 r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | 1408 r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
1377 __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), | 1409 __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), |
1378 r0); | 1410 r0); |
1379 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4, | 1411 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp, |
1380 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | 1412 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
1381 OMIT_SMI_CHECK); | 1413 OMIT_SMI_CHECK); |
1382 const int function_list_offset = | 1414 const int function_list_offset = |
1383 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | 1415 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
1384 __ StoreP( | 1416 __ StoreP( |
1385 closure, | 1417 closure, |
1386 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); | 1418 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); |
1387 // Save closure before the write barrier. | 1419 // Save closure before the write barrier. |
1388 __ LoadRR(r7, closure); | 1420 __ LoadRR(r7, closure); |
1389 __ RecordWriteContextSlot(native_context, function_list_offset, r7, r4, | 1421 __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp, |
1390 kLRHasNotBeenSaved, kDontSaveFPRegs); | 1422 kLRHasNotBeenSaved, kDontSaveFPRegs); |
1391 __ JumpToJSEntry(entry); | 1423 __ JumpToJSEntry(entry); |
1392 | 1424 |
1393 // We found no optimized code. | 1425 __ bind(&loop_bottom); |
| 1426 __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength), |
| 1427 r0); |
| 1428 __ CmpSmiLiteral(index, Smi::FromInt(1), r0); |
| 1429 __ bgt(&loop_top); |
| 1430 |
| 1431 // We found no code. |
| 1432 __ b(&gotta_call_runtime); |
| 1433 |
1394 __ bind(&try_shared); | 1434 __ bind(&try_shared); |
1395 __ LoadP(entry, | 1435 __ LoadP(entry, |
1396 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 1436 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
1397 // Is the shared function marked for tier up? | 1437 // Is the shared function marked for tier up? |
1398 __ LoadlB(r7, FieldMemOperand( | 1438 __ LoadlB(temp, FieldMemOperand( |
1399 entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); | 1439 entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); |
1400 __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); | 1440 __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); |
1401 __ bne(&gotta_call_runtime); | 1441 __ bne(&gotta_call_runtime); |
1402 | 1442 |
1403 // If SFI points to anything other than CompileLazy, install that. | 1443 // If SFI points to anything other than CompileLazy, install that. |
1404 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | 1444 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
1405 __ mov(r7, Operand(masm->CodeObject())); | 1445 __ mov(r7, Operand(masm->CodeObject())); |
1406 __ CmpP(entry, r7); | 1446 __ CmpP(entry, r7); |
1407 __ beq(&gotta_call_runtime); | 1447 __ beq(&gotta_call_runtime); |
1408 | 1448 |
1409 // Install the SFI's code entry. | 1449 // Install the SFI's code entry. |
1410 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1450 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
(...skipping 1676 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3087 // Now jump to the instructions of the returned code object. | 3127 // Now jump to the instructions of the returned code object. |
3088 __ Jump(ip); | 3128 __ Jump(ip); |
3089 } | 3129 } |
3090 | 3130 |
3091 #undef __ | 3131 #undef __ |
3092 | 3132 |
3093 } // namespace internal | 3133 } // namespace internal |
3094 } // namespace v8 | 3134 } // namespace v8 |
3095 | 3135 |
3096 #endif // V8_TARGET_ARCH_S390 | 3136 #endif // V8_TARGET_ARCH_S390 |
OLD | NEW |