OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_S390 | 5 #if V8_TARGET_ARCH_S390 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 970 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
981 } | 981 } |
982 | 982 |
983 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { | 983 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { |
984 Generate_JSEntryTrampolineHelper(masm, false); | 984 Generate_JSEntryTrampolineHelper(masm, false); |
985 } | 985 } |
986 | 986 |
987 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { | 987 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { |
988 Generate_JSEntryTrampolineHelper(masm, true); | 988 Generate_JSEntryTrampolineHelper(masm, true); |
989 } | 989 } |
990 | 990 |
| 991 static void ReplaceClosureEntryWithOptimizedCode( |
| 992 MacroAssembler* masm, Register optimized_code_entry, Register closure, |
| 993 Register scratch1, Register scratch2, Register scratch3) { |
| 994 Register native_context = scratch1; |
| 995 // Store code entry in the closure. |
| 996 __ AddP(optimized_code_entry, optimized_code_entry, |
| 997 Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 998 __ StoreP(optimized_code_entry, |
| 999 FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); |
| 1000 __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2); |
| 1001 |
| 1002 // Link the closure into the optimized function list. |
| 1003 // r6 : code entry |
| 1004 // r9: native context |
| 1005 // r3 : closure |
| 1006 __ LoadP(native_context, NativeContextMemOperand()); |
| 1007 __ LoadP(scratch2, ContextMemOperand(native_context, |
| 1008 Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1009 __ StoreP(scratch2, |
| 1010 FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0); |
| 1011 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2, |
| 1012 scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 1013 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 1014 const int function_list_offset = |
| 1015 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 1016 __ StoreP( |
| 1017 closure, |
| 1018 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); |
| 1019 // Save closure before the write barrier. |
| 1020 __ LoadRR(scratch2, closure); |
| 1021 __ RecordWriteContextSlot(native_context, function_list_offset, closure, |
| 1022 scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 1023 __ LoadRR(closure, scratch2); |
| 1024 } |
| 1025 |
991 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { | 1026 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { |
992 Register args_count = scratch; | 1027 Register args_count = scratch; |
993 | 1028 |
994 // Get the arguments + receiver count. | 1029 // Get the arguments + receiver count. |
995 __ LoadP(args_count, | 1030 __ LoadP(args_count, |
996 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); | 1031 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); |
997 __ LoadlW(args_count, | 1032 __ LoadlW(args_count, |
998 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); | 1033 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); |
999 | 1034 |
1000 // Leave the frame (also dropping the register file). | 1035 // Leave the frame (also dropping the register file). |
(...skipping 20 matching lines...) Expand all Loading... |
1021 // frames.h for its layout. | 1056 // frames.h for its layout. |
1022 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { | 1057 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { |
1023 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 1058 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1024 | 1059 |
1025 // Open a frame scope to indicate that there is a frame on the stack. The | 1060 // Open a frame scope to indicate that there is a frame on the stack. The |
1026 // MANUAL indicates that the scope shouldn't actually generate code to set up | 1061 // MANUAL indicates that the scope shouldn't actually generate code to set up |
1027 // the frame (that is done below). | 1062 // the frame (that is done below). |
1028 FrameScope frame_scope(masm, StackFrame::MANUAL); | 1063 FrameScope frame_scope(masm, StackFrame::MANUAL); |
1029 __ PushStandardFrame(r3); | 1064 __ PushStandardFrame(r3); |
1030 | 1065 |
| 1066 // First check if there is optimized code in the feedback vector which we |
| 1067 // could call instead. |
| 1068 Label switch_to_optimized_code; |
| 1069 |
| 1070 Register optimized_code_entry = r6; |
| 1071 __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset)); |
| 1072 __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset)); |
| 1073 __ LoadP( |
| 1074 optimized_code_entry, |
| 1075 FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize + |
| 1076 FeedbackVector::kHeaderSize)); |
| 1077 __ LoadP(optimized_code_entry, |
| 1078 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset)); |
| 1079 __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code); |
| 1080 |
1031 // Get the bytecode array from the function object (or from the DebugInfo if | 1081 // Get the bytecode array from the function object (or from the DebugInfo if |
1032 // it is present) and load it into kInterpreterBytecodeArrayRegister. | 1082 // it is present) and load it into kInterpreterBytecodeArrayRegister. |
1033 __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 1083 __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); |
1034 Label array_done; | 1084 Label array_done; |
1035 Register debug_info = r4; | 1085 Register debug_info = r4; |
1036 DCHECK(!debug_info.is(r2)); | 1086 DCHECK(!debug_info.is(r2)); |
1037 __ LoadP(debug_info, | 1087 __ LoadP(debug_info, |
1038 FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); | 1088 FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); |
1039 // Load original bytecode array or the debug copy. | 1089 // Load original bytecode array or the debug copy. |
1040 __ LoadP(kInterpreterBytecodeArrayRegister, | 1090 __ LoadP(kInterpreterBytecodeArrayRegister, |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1140 // function has been switched to a different kind of code and we heal the | 1190 // function has been switched to a different kind of code and we heal the |
1141 // closure by switching the code entry field over to the new code as well. | 1191 // closure by switching the code entry field over to the new code as well. |
1142 __ bind(&switch_to_different_code_kind); | 1192 __ bind(&switch_to_different_code_kind); |
1143 __ LeaveFrame(StackFrame::JAVA_SCRIPT); | 1193 __ LeaveFrame(StackFrame::JAVA_SCRIPT); |
1144 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 1194 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); |
1145 __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset)); | 1195 __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset)); |
1146 __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1196 __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
1147 __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0); | 1197 __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0); |
1148 __ RecordWriteCodeEntryField(r3, r6, r7); | 1198 __ RecordWriteCodeEntryField(r3, r6, r7); |
1149 __ JumpToJSEntry(r6); | 1199 __ JumpToJSEntry(r6); |
| 1200 |
| 1201 // If there is optimized code on the type feedback vector, check if it is good |
| 1202 // to run, and if so, self heal the closure and call the optimized code. |
| 1203 __ bind(&switch_to_optimized_code); |
| 1204 __ LeaveFrame(StackFrame::JAVA_SCRIPT); |
| 1205 Label gotta_call_runtime; |
| 1206 |
| 1207 // Check if the optimized code is marked for deopt. |
| 1208 __ LoadlB(r7, FieldMemOperand(optimized_code_entry, |
| 1209 Code::kKindSpecificFlags1Offset)); |
| 1210 __ tmll(r7, Operand(Code::kMarkedForDeoptimizationBit)); |
| 1211 __ bne(&gotta_call_runtime); |
| 1212 |
| 1213 // Optimized code is good, get it into the closure and link the closure into |
| 1214 // the optimized functions list, then tail call the optimized code. |
| 1215 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7, |
| 1216 r4); |
| 1217 __ JumpToJSEntry(optimized_code_entry); |
| 1218 |
| 1219 // Optimized code is marked for deopt, bailout to the CompileLazy runtime |
| 1220 // function which will clear the feedback vector's optimized code slot. |
| 1221 __ bind(&gotta_call_runtime); |
| 1222 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); |
1150 } | 1223 } |
1151 | 1224 |
1152 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, | 1225 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, |
1153 Register scratch, | 1226 Register scratch, |
1154 Label* stack_overflow) { | 1227 Label* stack_overflow) { |
1155 // Check the stack for overflow. We are not trying to catch | 1228 // Check the stack for overflow. We are not trying to catch |
1156 // interruptions (e.g. debug break and preemption) here, so the "real stack | 1229 // interruptions (e.g. debug break and preemption) here, so the "real stack |
1157 // limit" is checked. | 1230 // limit" is checked. |
1158 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); | 1231 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); |
1159 // Make scratch the space we have left. The stack might already be overflowed | 1232 // Make scratch the space we have left. The stack might already be overflowed |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1406 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | 1479 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
1407 __ JumpIfSmi(entry, &try_shared); | 1480 __ JumpIfSmi(entry, &try_shared); |
1408 | 1481 |
1409 // Found code, check if it is marked for deopt, if so call into runtime to | 1482 // Found code, check if it is marked for deopt, if so call into runtime to |
1410 // clear the optimized code slot. | 1483 // clear the optimized code slot. |
1411 __ LoadlB(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset)); | 1484 __ LoadlB(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset)); |
1412 __ tmll(r7, Operand(Code::kMarkedForDeoptimizationBit)); | 1485 __ tmll(r7, Operand(Code::kMarkedForDeoptimizationBit)); |
1413 __ bne(&gotta_call_runtime); | 1486 __ bne(&gotta_call_runtime); |
1414 | 1487 |
1415 // Code is good, get it into the closure and tail call. | 1488 // Code is good, get it into the closure and tail call. |
1416 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1489 ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4); |
1417 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); | |
1418 __ RecordWriteCodeEntryField(closure, entry, r7); | |
1419 | |
1420 // Load native context into r8. | |
1421 Register native_context = r8; | |
1422 __ LoadP(native_context, NativeContextMemOperand()); | |
1423 | |
1424 // Link the closure into the optimized function list. | |
1425 __ LoadP( | |
1426 r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1427 __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), | |
1428 r0); | |
1429 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4, | |
1430 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1431 OMIT_SMI_CHECK); | |
1432 const int function_list_offset = | |
1433 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | |
1434 __ StoreP( | |
1435 closure, | |
1436 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); | |
1437 // Save closure before the write barrier. | |
1438 __ LoadRR(r7, closure); | |
1439 __ RecordWriteContextSlot(native_context, function_list_offset, r7, r4, | |
1440 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
1441 __ JumpToJSEntry(entry); | 1490 __ JumpToJSEntry(entry); |
1442 | 1491 |
1443 // We found no optimized code. | 1492 // We found no optimized code. |
1444 __ bind(&try_shared); | 1493 __ bind(&try_shared); |
1445 __ LoadP(entry, | 1494 __ LoadP(entry, |
1446 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 1495 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
1447 // Is the shared function marked for tier up? | 1496 // Is the shared function marked for tier up? |
1448 __ LoadlB(r7, FieldMemOperand( | 1497 __ LoadlB(r7, FieldMemOperand( |
1449 entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); | 1498 entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); |
1450 __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); | 1499 __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); |
(...skipping 1682 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3133 // Now jump to the instructions of the returned code object. | 3182 // Now jump to the instructions of the returned code object. |
3134 __ Jump(ip); | 3183 __ Jump(ip); |
3135 } | 3184 } |
3136 | 3185 |
3137 #undef __ | 3186 #undef __ |
3138 | 3187 |
3139 } // namespace internal | 3188 } // namespace internal |
3140 } // namespace v8 | 3189 } // namespace v8 |
3141 | 3190 |
3142 #endif // V8_TARGET_ARCH_S390 | 3191 #endif // V8_TARGET_ARCH_S390 |
OLD | NEW |