Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(311)

Side by Side Diff: src/builtins/s390/builtins-s390.cc

Issue 2947903002: PPC/s390: [compiler] Drive optimizations with feedback vector (reland) (Closed)
Patch Set: fix ppc issue Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/builtins/ppc/builtins-ppc.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_S390 5 #if V8_TARGET_ARCH_S390
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/debug/debug.h" 8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h" 9 #include "src/deoptimizer.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 __ LoadRR(r4, r2); 417 __ LoadRR(r4, r2);
418 418
419 // Restore target function and new target. 419 // Restore target function and new target.
420 __ Pop(r2, r3, r5); 420 __ Pop(r2, r3, r5);
421 __ SmiUntag(r2); 421 __ SmiUntag(r2);
422 } 422 }
423 __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); 423 __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
424 __ JumpToJSEntry(ip); 424 __ JumpToJSEntry(ip);
425 } 425 }
426 426
427 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
428 // Checking whether the queued function is ready for install is optional,
429 // since we come across interrupts and stack checks elsewhere. However,
430 // not checking may delay installing ready functions, and always checking
431 // would be quite expensive. A good compromise is to first check against
432 // stack limit as a cue for an interrupt signal.
433 Label ok;
434 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
435 __ bge(&ok, Label::kNear);
436
437 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
438
439 __ bind(&ok);
440 GenerateTailCallToSharedCode(masm);
441 }
442
443 namespace { 427 namespace {
444 428
445 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { 429 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
446 Label post_instantiation_deopt_entry; 430 Label post_instantiation_deopt_entry;
447 // ----------- S t a t e ------------- 431 // ----------- S t a t e -------------
448 // -- r2 : number of arguments 432 // -- r2 : number of arguments
449 // -- r3 : constructor function 433 // -- r3 : constructor function
450 // -- r5 : new target 434 // -- r5 : new target
451 // -- cp : context 435 // -- cp : context
452 // -- lr : return address 436 // -- lr : return address
(...skipping 569 matching lines...) Expand 10 before | Expand all | Expand 10 after
1022 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); 1006 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1023 __ LoadlW(args_count, 1007 __ LoadlW(args_count,
1024 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); 1008 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
1025 1009
1026 // Leave the frame (also dropping the register file). 1010 // Leave the frame (also dropping the register file).
1027 __ LeaveFrame(StackFrame::JAVA_SCRIPT); 1011 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1028 1012
1029 __ AddP(sp, sp, args_count); 1013 __ AddP(sp, sp, args_count);
1030 } 1014 }
1031 1015
1016 // Tail-call |function_id| if |smi_entry| == |marker|
1017 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
1018 Register smi_entry,
1019 OptimizationMarker marker,
1020 Runtime::FunctionId function_id) {
1021 Label no_match;
1022 __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
1023 __ bne(&no_match);
1024 GenerateTailCallToReturnedCode(masm, function_id);
1025 __ bind(&no_match);
1026 }
1027
1028 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
1029 Register feedback_vector,
1030 Register scratch1, Register scratch2,
1031 Register scratch3) {
1032 // ----------- S t a t e -------------
1033 // -- r0 : argument count (preserved for callee if needed, and caller)
1034 // -- r3 : new target (preserved for callee if needed, and caller)
1035 // -- r1 : target function (preserved for callee if needed, and caller)
1036 // -- feedback vector (preserved for caller if needed)
1037 // -----------------------------------
1038 DCHECK(
1039 !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
1040
1041 Label optimized_code_slot_is_cell, fallthrough;
1042
1043 Register closure = r3;
1044 Register optimized_code_entry = scratch1;
1045
1046 const int kOptimizedCodeCellOffset =
1047 FeedbackVector::kOptimizedCodeIndex * kPointerSize +
1048 FeedbackVector::kHeaderSize;
1049 __ LoadP(optimized_code_entry,
1050 FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
1051
1052 // Check if the code entry is a Smi. If yes, we interpret it as an
1053 // optimisation marker. Otherwise, interpret is as a weak cell to a code
1054 // object.
1055 __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
1056
1057 {
1058 // Optimized code slot is a Smi optimization marker.
1059
1060 // Fall through if no optimization trigger.
1061 __ CmpSmiLiteral(optimized_code_entry,
1062 Smi::FromEnum(OptimizationMarker::kNone), r0);
1063 __ beq(&fallthrough);
1064
1065 TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
1066 OptimizationMarker::kCompileOptimized,
1067 Runtime::kCompileOptimized_NotConcurrent);
1068 TailCallRuntimeIfMarkerEquals(
1069 masm, optimized_code_entry,
1070 OptimizationMarker::kCompileOptimizedConcurrent,
1071 Runtime::kCompileOptimized_Concurrent);
1072
1073 {
1074 // Otherwise, the marker is InOptimizationQueue.
1075 if (FLAG_debug_code) {
1076 __ CmpSmiLiteral(
1077 optimized_code_entry,
1078 Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
1079 __ Assert(eq, kExpectedOptimizationSentinel);
1080 }
1081
1082 // Checking whether the queued function is ready for install is optional,
1083 // since we come across interrupts and stack checks elsewhere. However,
1084 // not checking may delay installing ready functions, and always checking
1085 // would be quite expensive. A good compromise is to first check against
1086 // stack limit as a cue for an interrupt signal.
1087 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
1088 __ bge(&fallthrough, Label::kNear);
1089 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
1090 }
1091 }
1092
1093 {
1094 // Optimized code slot is a WeakCell.
1095 __ bind(&optimized_code_slot_is_cell);
1096
1097 __ LoadP(optimized_code_entry,
1098 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
1099 __ JumpIfSmi(optimized_code_entry, &fallthrough);
1100
1101 // Check if the optimized code is marked for deopt. If it is, call the
1102 // runtime to clear it.
1103 Label found_deoptimized_code;
1104 __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
1105 Code::kKindSpecificFlags1Offset));
1106 __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
1107 __ bne(&found_deoptimized_code);
1108
1109 // Optimized code is good, get it into the closure and link the closure into
1110 // the optimized functions list, then tail call the optimized code.
1111 // The feedback vector is no longer used, so re-use it as a scratch
1112 // register.
1113 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
1114 scratch2, scratch3, feedback_vector);
1115 __ Jump(optimized_code_entry);
1116
1117 // Optimized code slot contains deoptimized code, evict it and re-enter the
1118 // closure's code.
1119 __ bind(&found_deoptimized_code);
1120 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
1121 }
1122
1123 // Fall-through if the optimized code cell is clear and there is no
1124 // optimization marker.
1125 __ bind(&fallthrough);
1126 }
1127
1032 // Generate code for entering a JS function with the interpreter. 1128 // Generate code for entering a JS function with the interpreter.
1033 // On entry to the function the receiver and arguments have been pushed on the 1129 // On entry to the function the receiver and arguments have been pushed on the
1034 // stack left to right. The actual argument count matches the formal parameter 1130 // stack left to right. The actual argument count matches the formal parameter
1035 // count expected by the function. 1131 // count expected by the function.
1036 // 1132 //
1037 // The live registers are: 1133 // The live registers are:
1038 // o r3: the JS function object being called. 1134 // o r3: the JS function object being called.
1039 // o r5: the new target 1135 // o r5: the new target
1040 // o cp: our context 1136 // o cp: our context
1041 // o pp: the caller's constant pool pointer (if enabled) 1137 // o pp: the caller's constant pool pointer (if enabled)
1042 // o fp: the caller's frame pointer 1138 // o fp: the caller's frame pointer
1043 // o sp: stack pointer 1139 // o sp: stack pointer
1044 // o lr: return address 1140 // o lr: return address
1045 // 1141 //
1046 // The function builds an interpreter frame. See InterpreterFrameConstants in 1142 // The function builds an interpreter frame. See InterpreterFrameConstants in
1047 // frames.h for its layout. 1143 // frames.h for its layout.
1048 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { 1144 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1049 ProfileEntryHookStub::MaybeCallEntryHook(masm); 1145 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1050 1146
1147 Register closure = r3;
1148 Register feedback_vector = r4;
1149
1150 // Load the feedback vector from the closure.
1151 __ LoadP(feedback_vector,
1152 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1153 __ LoadP(feedback_vector,
1154 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1155 // Read off the optimized code slot in the feedback vector, and if there
1156 // is optimized code or an optimization marker, call that instead.
1157 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
1158
1051 // Open a frame scope to indicate that there is a frame on the stack. The 1159 // Open a frame scope to indicate that there is a frame on the stack. The
1052 // MANUAL indicates that the scope shouldn't actually generate code to set up 1160 // MANUAL indicates that the scope shouldn't actually generate code to set up
1053 // the frame (that is done below). 1161 // the frame (that is done below).
1054 FrameScope frame_scope(masm, StackFrame::MANUAL); 1162 FrameScope frame_scope(masm, StackFrame::MANUAL);
1055 __ PushStandardFrame(r3); 1163 __ PushStandardFrame(closure);
1056
1057 // First check if there is optimized code in the feedback vector which we
1058 // could call instead.
1059 Label switch_to_optimized_code;
1060
1061 Register optimized_code_entry = r6;
1062 __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
1063 __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
1064 __ LoadP(
1065 optimized_code_entry,
1066 FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
1067 FeedbackVector::kHeaderSize));
1068 __ LoadP(optimized_code_entry,
1069 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
1070 __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
1071 1164
1072 // Get the bytecode array from the function object (or from the DebugInfo if 1165 // Get the bytecode array from the function object (or from the DebugInfo if
1073 // it is present) and load it into kInterpreterBytecodeArrayRegister. 1166 // it is present) and load it into kInterpreterBytecodeArrayRegister.
1074 Label maybe_load_debug_bytecode_array, bytecode_array_loaded; 1167 Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
1075 __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); 1168 __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1076 // Load original bytecode array or the debug copy. 1169 // Load original bytecode array or the debug copy.
1077 __ LoadP(kInterpreterBytecodeArrayRegister, 1170 __ LoadP(kInterpreterBytecodeArrayRegister,
1078 FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset)); 1171 FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
1079 __ LoadP(r4, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); 1172 __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
1080 1173 __ TestIfSmi(r6);
1081 __ TestIfSmi(r4);
1082 __ bne(&maybe_load_debug_bytecode_array); 1174 __ bne(&maybe_load_debug_bytecode_array);
1083 __ bind(&bytecode_array_loaded); 1175 __ bind(&bytecode_array_loaded);
1084 1176
1085 // Check whether we should continue to use the interpreter. 1177 // Check whether we should continue to use the interpreter.
1086 // TODO(rmcilroy) Remove self healing once liveedit only has to deal with 1178 // TODO(rmcilroy) Remove self healing once liveedit only has to deal with
1087 // Ignition bytecode. 1179 // Ignition bytecode.
1088 Label switch_to_different_code_kind; 1180 Label switch_to_different_code_kind;
1089 __ LoadP(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset)); 1181 __ LoadP(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
1090 __ CmpP(r2, Operand(masm->CodeObject())); // Self-reference to this code. 1182 __ CmpP(r2, Operand(masm->CodeObject())); // Self-reference to this code.
1091 __ bne(&switch_to_different_code_kind); 1183 __ bne(&switch_to_different_code_kind);
1092 1184
1093 // Increment invocation count for the function. 1185 // Increment invocation count for the function.
1094 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset)); 1186 __ LoadP(
1095 __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset)); 1187 r1, FieldMemOperand(feedback_vector,
1096 __ LoadP(r1, FieldMemOperand( 1188 FeedbackVector::kInvocationCountIndex * kPointerSize +
1097 r6, FeedbackVector::kInvocationCountIndex * kPointerSize + 1189 FeedbackVector::kHeaderSize));
1098 FeedbackVector::kHeaderSize));
1099 __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0); 1190 __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
1100 __ StoreP(r1, FieldMemOperand( 1191 __ StoreP(
1101 r6, FeedbackVector::kInvocationCountIndex * kPointerSize + 1192 r1, FieldMemOperand(feedback_vector,
1102 FeedbackVector::kHeaderSize)); 1193 FeedbackVector::kInvocationCountIndex * kPointerSize +
1194 FeedbackVector::kHeaderSize));
1103 1195
1104 // Check function data field is actually a BytecodeArray object. 1196 // Check function data field is actually a BytecodeArray object.
1105 if (FLAG_debug_code) { 1197 if (FLAG_debug_code) {
1106 __ TestIfSmi(kInterpreterBytecodeArrayRegister); 1198 __ TestIfSmi(kInterpreterBytecodeArrayRegister);
1107 __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); 1199 __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1108 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg, 1200 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
1109 BYTECODE_ARRAY_TYPE); 1201 BYTECODE_ARRAY_TYPE);
1110 __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); 1202 __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1111 } 1203 }
1112 1204
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1171 1263
1172 // The return value is in r2. 1264 // The return value is in r2.
1173 LeaveInterpreterFrame(masm, r4); 1265 LeaveInterpreterFrame(masm, r4);
1174 __ Ret(); 1266 __ Ret();
1175 1267
1176 // Load debug copy of the bytecode array if it exists. 1268 // Load debug copy of the bytecode array if it exists.
1177 // kInterpreterBytecodeArrayRegister is already loaded with 1269 // kInterpreterBytecodeArrayRegister is already loaded with
1178 // SharedFunctionInfo::kFunctionDataOffset. 1270 // SharedFunctionInfo::kFunctionDataOffset.
1179 Label done; 1271 Label done;
1180 __ bind(&maybe_load_debug_bytecode_array); 1272 __ bind(&maybe_load_debug_bytecode_array);
1181 __ LoadP(ip, FieldMemOperand(r4, DebugInfo::kFlagsOffset)); 1273 __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
1182 __ SmiUntag(ip); 1274 __ SmiUntag(ip);
1183 __ tmll(ip, Operand(DebugInfo::kHasBreakInfo)); 1275 __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
1184 __ beq(&done); 1276 __ beq(&done);
1185 __ LoadP(kInterpreterBytecodeArrayRegister, 1277 __ LoadP(kInterpreterBytecodeArrayRegister,
1186 FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset)); 1278 FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
1187 __ bind(&done); 1279 __ bind(&done);
1188 __ b(&bytecode_array_loaded); 1280 __ b(&bytecode_array_loaded);
1189 1281
1190 // If the shared code is no longer this entry trampoline, then the underlying 1282 // If the shared code is no longer this entry trampoline, then the underlying
1191 // function has been switched to a different kind of code and we heal the 1283 // function has been switched to a different kind of code and we heal the
1192 // closure by switching the code entry field over to the new code as well. 1284 // closure by switching the code entry field over to the new code as well.
1193 __ bind(&switch_to_different_code_kind); 1285 __ bind(&switch_to_different_code_kind);
1194 __ LeaveFrame(StackFrame::JAVA_SCRIPT); 1286 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1195 __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); 1287 __ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1196 __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset)); 1288 __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
1197 __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); 1289 __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1198 __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0); 1290 __ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1199 __ RecordWriteCodeEntryField(r3, r6, r7); 1291 __ RecordWriteCodeEntryField(closure, r6, r7);
1200 __ JumpToJSEntry(r6); 1292 __ JumpToJSEntry(r6);
1201
1202 // If there is optimized code on the type feedback vector, check if it is good
1203 // to run, and if so, self heal the closure and call the optimized code.
1204 __ bind(&switch_to_optimized_code);
1205 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1206 Label gotta_call_runtime;
1207
1208 // Check if the optimized code is marked for deopt.
1209 __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
1210 Code::kKindSpecificFlags1Offset));
1211 __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
1212 __ bne(&gotta_call_runtime);
1213
1214 // Optimized code is good, get it into the closure and link the closure into
1215 // the optimized functions list, then tail call the optimized code.
1216 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
1217 r4);
1218 __ JumpToJSEntry(optimized_code_entry);
1219
1220 // Optimized code is marked for deopt, bailout to the CompileLazy runtime
1221 // function which will clear the feedback vector's optimized code slot.
1222 __ bind(&gotta_call_runtime);
1223 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
1224 } 1293 }
1225 1294
1226 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, 1295 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
1227 Register scratch, 1296 Register scratch,
1228 Label* stack_overflow) { 1297 Label* stack_overflow) {
1229 // Check the stack for overflow. We are not trying to catch 1298 // Check the stack for overflow. We are not trying to catch
1230 // interruptions (e.g. debug break and preemption) here, so the "real stack 1299 // interruptions (e.g. debug break and preemption) here, so the "real stack
1231 // limit" is checked. 1300 // limit" is checked.
1232 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); 1301 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
1233 // Make scratch the space we have left. The stack might already be overflowed 1302 // Make scratch the space we have left. The stack might already be overflowed
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
1447 __ StoreP(r4, 1516 __ StoreP(r4,
1448 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); 1517 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1449 1518
1450 Generate_InterpreterEnterBytecode(masm); 1519 Generate_InterpreterEnterBytecode(masm);
1451 } 1520 }
1452 1521
1453 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { 1522 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1454 Generate_InterpreterEnterBytecode(masm); 1523 Generate_InterpreterEnterBytecode(masm);
1455 } 1524 }
1456 1525
1526 void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
1527 // ----------- S t a t e -------------
1528 // -- r3 : argument count (preserved for callee)
1529 // -- r6 : new target (preserved for callee)
1530 // -- r4 : target function (preserved for callee)
1531 // -----------------------------------
1532 Register closure = r3;
1533
1534 // Get the feedback vector.
1535 Register feedback_vector = r4;
1536 __ LoadP(feedback_vector,
1537 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1538 __ LoadP(feedback_vector,
1539 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1540
1541 // The feedback vector must be defined.
1542 if (FLAG_debug_code) {
1543 __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
1544 __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
1545 }
1546
1547 // Is there an optimization marker or optimized code in the feedback vector?
1548 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
1549
1550 // Otherwise, tail call the SFI code.
1551 GenerateTailCallToSharedCode(masm);
1552 }
1553
1457 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { 1554 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1458 // ----------- S t a t e ------------- 1555 // ----------- S t a t e -------------
1459 // -- r2 : argument count (preserved for callee) 1556 // -- r2 : argument count (preserved for callee)
1460 // -- r5 : new target (preserved for callee) 1557 // -- r5 : new target (preserved for callee)
1461 // -- r3 : target function (preserved for callee) 1558 // -- r3 : target function (preserved for callee)
1462 // ----------------------------------- 1559 // -----------------------------------
1463 // First lookup code, maybe we don't need to compile! 1560 // First lookup code, maybe we don't need to compile!
1464 Label gotta_call_runtime; 1561 Label gotta_call_runtime;
1465 Label try_shared;
1466 1562
1467 Register closure = r3; 1563 Register closure = r3;
1468 Register index = r4; 1564 Register feedback_vector = r4;
1469 1565
1470 // Do we have a valid feedback vector? 1566 // Do we have a valid feedback vector?
1471 __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); 1567 __ LoadP(feedback_vector,
1472 __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); 1568 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1473 __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); 1569 __ LoadP(feedback_vector,
1570 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1571 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
1572 &gotta_call_runtime);
1474 1573
1475 // Is optimized code available in the feedback vector? 1574 // Is there an optimization marker or optimized code in the feedback vector?
1476 Register entry = r6; 1575 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
1477 __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
1478 kPointerSize +
1479 FeedbackVector::kHeaderSize));
1480 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
1481 __ JumpIfSmi(entry, &try_shared);
1482
1483 // Found code, check if it is marked for deopt, if so call into runtime to
1484 // clear the optimized code slot.
1485 __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
1486 __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
1487 __ bne(&gotta_call_runtime);
1488
1489 // Code is good, get it into the closure and tail call.
1490 ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
1491 __ JumpToJSEntry(entry);
1492 1576
1493 // We found no optimized code. 1577 // We found no optimized code.
1494 __ bind(&try_shared); 1578 Register entry = r6;
1495 __ LoadP(entry, 1579 __ LoadP(entry,
1496 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); 1580 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1497 // Is the shared function marked for tier up?
1498 __ LoadlW(r7,
1499 FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
1500 __ TestBit(r7, SharedFunctionInfo::MarkedForTierUpBit::kShift, r0);
1501 __ bne(&gotta_call_runtime);
1502 1581
1503 // If SFI points to anything other than CompileLazy, install that. 1582 // If SFI points to anything other than CompileLazy, install that.
1504 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); 1583 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
1505 __ mov(r7, Operand(masm->CodeObject())); 1584 __ mov(r7, Operand(masm->CodeObject()));
1506 __ CmpP(entry, r7); 1585 __ CmpP(entry, r7);
1507 __ beq(&gotta_call_runtime); 1586 __ beq(&gotta_call_runtime);
1508 1587
1509 // Install the SFI's code entry. 1588 // Install the SFI's code entry.
1510 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); 1589 __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1511 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); 1590 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1512 __ RecordWriteCodeEntryField(closure, entry, r7); 1591 __ RecordWriteCodeEntryField(closure, entry, r7);
1513 __ JumpToJSEntry(entry); 1592 __ JumpToJSEntry(entry);
1514 1593
1515 __ bind(&gotta_call_runtime); 1594 __ bind(&gotta_call_runtime);
1516 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); 1595 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1517 } 1596 }
1518 1597
1519 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
1520 GenerateTailCallToReturnedCode(masm,
1521 Runtime::kCompileOptimized_NotConcurrent);
1522 }
1523
1524 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
1525 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
1526 }
1527
1528 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { 1598 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1529 // ----------- S t a t e ------------- 1599 // ----------- S t a t e -------------
1530 // -- r2 : argument count (preserved for callee) 1600 // -- r2 : argument count (preserved for callee)
1531 // -- r3 : new target (preserved for callee) 1601 // -- r3 : new target (preserved for callee)
1532 // -- r5 : target function (preserved for callee) 1602 // -- r5 : target function (preserved for callee)
1533 // ----------------------------------- 1603 // -----------------------------------
1534 Label failed; 1604 Label failed;
1535 { 1605 {
1536 FrameScope scope(masm, StackFrame::INTERNAL); 1606 FrameScope scope(masm, StackFrame::INTERNAL);
1537 // Preserve argument count for later compare. 1607 // Preserve argument count for later compare.
(...skipping 1603 matching lines...) Expand 10 before | Expand all | Expand 10 after
3141 // Now jump to the instructions of the returned code object. 3211 // Now jump to the instructions of the returned code object.
3142 __ Jump(ip); 3212 __ Jump(ip);
3143 } 3213 }
3144 3214
3145 #undef __ 3215 #undef __
3146 3216
3147 } // namespace internal 3217 } // namespace internal
3148 } // namespace v8 3218 } // namespace v8
3149 3219
3150 #endif // V8_TARGET_ARCH_S390 3220 #endif // V8_TARGET_ARCH_S390
OLDNEW
« no previous file with comments | « src/builtins/ppc/builtins-ppc.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698