Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(63)

Side by Side Diff: src/builtins/ppc/builtins-ppc.cc

Issue 2947903002: PPC/s390: [compiler] Drive optimizations with feedback vector (reland) (Closed)
Patch Set: fix ppc issue Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/builtins/s390/builtins-s390.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_PPC 5 #if V8_TARGET_ARCH_PPC
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/debug/debug.h" 8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h" 9 #include "src/deoptimizer.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after
420 __ mr(r5, r3); 420 __ mr(r5, r3);
421 421
422 // Restore target function and new target. 422 // Restore target function and new target.
423 __ Pop(r3, r4, r6); 423 __ Pop(r3, r4, r6);
424 __ SmiUntag(r3); 424 __ SmiUntag(r3);
425 } 425 }
426 __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag)); 426 __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
427 __ JumpToJSEntry(ip); 427 __ JumpToJSEntry(ip);
428 } 428 }
429 429
430 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
431 // Checking whether the queued function is ready for install is optional,
432 // since we come across interrupts and stack checks elsewhere. However,
433 // not checking may delay installing ready functions, and always checking
434 // would be quite expensive. A good compromise is to first check against
435 // stack limit as a cue for an interrupt signal.
436 Label ok;
437 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
438 __ cmpl(sp, ip);
439 __ bge(&ok);
440
441 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
442
443 __ bind(&ok);
444 GenerateTailCallToSharedCode(masm);
445 }
446
447 namespace { 430 namespace {
448 431
449 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { 432 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
450 Label post_instantiation_deopt_entry; 433 Label post_instantiation_deopt_entry;
451 // ----------- S t a t e ------------- 434 // ----------- S t a t e -------------
452 // -- r3 : number of arguments 435 // -- r3 : number of arguments
453 // -- r4 : constructor function 436 // -- r4 : constructor function
454 // -- r6 : new target 437 // -- r6 : new target
455 // -- cp : context 438 // -- cp : context
456 // -- lr : return address 439 // -- lr : return address
(...skipping 569 matching lines...) Expand 10 before | Expand all | Expand 10 after
1026 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); 1009 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1027 __ lwz(args_count, 1010 __ lwz(args_count,
1028 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); 1011 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
1029 1012
1030 // Leave the frame (also dropping the register file). 1013 // Leave the frame (also dropping the register file).
1031 __ LeaveFrame(StackFrame::JAVA_SCRIPT); 1014 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1032 1015
1033 __ add(sp, sp, args_count); 1016 __ add(sp, sp, args_count);
1034 } 1017 }
1035 1018
1019 // Tail-call |function_id| if |smi_entry| == |marker|
1020 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
1021 Register smi_entry,
1022 OptimizationMarker marker,
1023 Runtime::FunctionId function_id) {
1024 Label no_match;
1025 __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
1026 __ bne(&no_match);
1027 GenerateTailCallToReturnedCode(masm, function_id);
1028 __ bind(&no_match);
1029 }
1030
1031 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
1032 Register feedback_vector,
1033 Register scratch1, Register scratch2,
1034 Register scratch3) {
1035 // ----------- S t a t e -------------
1036 // -- r0 : argument count (preserved for callee if needed, and caller)
1037 // -- r3 : new target (preserved for callee if needed, and caller)
1038 // -- r1 : target function (preserved for callee if needed, and caller)
1039 // -- feedback vector (preserved for caller if needed)
1040 // -----------------------------------
1041 DCHECK(
1042 !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
1043
1044 Label optimized_code_slot_is_cell, fallthrough;
1045
1046 Register closure = r4;
1047 Register optimized_code_entry = scratch1;
1048
1049 const int kOptimizedCodeCellOffset =
1050 FeedbackVector::kOptimizedCodeIndex * kPointerSize +
1051 FeedbackVector::kHeaderSize;
1052 __ LoadP(optimized_code_entry,
1053 FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
1054
1055 // Check if the code entry is a Smi. If yes, we interpret it as an
1056 // optimisation marker. Otherwise, interpret is as a weak cell to a code
1057 // object.
1058 __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
1059
1060 {
1061 // Optimized code slot is a Smi optimization marker.
1062
1063 // Fall through if no optimization trigger.
1064 __ CmpSmiLiteral(optimized_code_entry,
1065 Smi::FromEnum(OptimizationMarker::kNone), r0);
1066 __ beq(&fallthrough);
1067
1068 TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
1069 OptimizationMarker::kCompileOptimized,
1070 Runtime::kCompileOptimized_NotConcurrent);
1071 TailCallRuntimeIfMarkerEquals(
1072 masm, optimized_code_entry,
1073 OptimizationMarker::kCompileOptimizedConcurrent,
1074 Runtime::kCompileOptimized_Concurrent);
1075
1076 {
1077 // Otherwise, the marker is InOptimizationQueue.
1078 if (FLAG_debug_code) {
1079 __ CmpSmiLiteral(
1080 optimized_code_entry,
1081 Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
1082 __ Assert(eq, kExpectedOptimizationSentinel);
1083 }
1084
1085 // Checking whether the queued function is ready for install is optional,
1086 // since we come across interrupts and stack checks elsewhere. However,
1087 // not checking may delay installing ready functions, and always checking
1088 // would be quite expensive. A good compromise is to first check against
1089 // stack limit as a cue for an interrupt signal.
1090 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1091 __ cmpl(sp, ip);
1092 __ bge(&fallthrough);
1093 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
1094 }
1095 }
1096
1097 {
1098 // Optimized code slot is a WeakCell.
1099 __ bind(&optimized_code_slot_is_cell);
1100
1101 __ LoadP(optimized_code_entry,
1102 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
1103 __ JumpIfSmi(optimized_code_entry, &fallthrough);
1104
1105 // Check if the optimized code is marked for deopt. If it is, call the
1106 // runtime to clear it.
1107 Label found_deoptimized_code;
1108 __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
1109 Code::kKindSpecificFlags1Offset));
1110 __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
1111 __ bne(&found_deoptimized_code, cr0);
1112
1113 // Optimized code is good, get it into the closure and link the closure into
1114 // the optimized functions list, then tail call the optimized code.
1115 // The feedback vector is no longer used, so re-use it as a scratch
1116 // register.
1117 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
1118 scratch2, scratch3, feedback_vector);
1119 __ mr(ip, optimized_code_entry);
1120 __ Jump(optimized_code_entry);
1121
1122 // Optimized code slot contains deoptimized code, evict it and re-enter the
1123 // closure's code.
1124 __ bind(&found_deoptimized_code);
1125 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
1126 }
1127
1128 // Fall-through if the optimized code cell is clear and there is no
1129 // optimization marker.
1130 __ bind(&fallthrough);
1131 }
1132
1036 // Generate code for entering a JS function with the interpreter. 1133 // Generate code for entering a JS function with the interpreter.
1037 // On entry to the function the receiver and arguments have been pushed on the 1134 // On entry to the function the receiver and arguments have been pushed on the
1038 // stack left to right. The actual argument count matches the formal parameter 1135 // stack left to right. The actual argument count matches the formal parameter
1039 // count expected by the function. 1136 // count expected by the function.
1040 // 1137 //
1041 // The live registers are: 1138 // The live registers are:
1042 // o r4: the JS function object being called. 1139 // o r4: the JS function object being called.
1043 // o r6: the new target 1140 // o r6: the new target
1044 // o cp: our context 1141 // o cp: our context
1045 // o pp: the caller's constant pool pointer (if enabled) 1142 // o pp: the caller's constant pool pointer (if enabled)
1046 // o fp: the caller's frame pointer 1143 // o fp: the caller's frame pointer
1047 // o sp: stack pointer 1144 // o sp: stack pointer
1048 // o lr: return address 1145 // o lr: return address
1049 // 1146 //
1050 // The function builds an interpreter frame. See InterpreterFrameConstants in 1147 // The function builds an interpreter frame. See InterpreterFrameConstants in
1051 // frames.h for its layout. 1148 // frames.h for its layout.
1052 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { 1149 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1053 ProfileEntryHookStub::MaybeCallEntryHook(masm); 1150 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1054 1151
1152 Register closure = r4;
1153 Register feedback_vector = r5;
1154
1155 // Load the feedback vector from the closure.
1156 __ LoadP(feedback_vector,
1157 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1158 __ LoadP(feedback_vector,
1159 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1160 // Read off the optimized code slot in the feedback vector, and if there
1161 // is optimized code or an optimization marker, call that instead.
1162 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
1163
1055 // Open a frame scope to indicate that there is a frame on the stack. The 1164 // Open a frame scope to indicate that there is a frame on the stack. The
1056 // MANUAL indicates that the scope shouldn't actually generate code to set up 1165 // MANUAL indicates that the scope shouldn't actually generate code to set up
1057 // the frame (that is done below). 1166 // the frame (that is done below).
1058 FrameScope frame_scope(masm, StackFrame::MANUAL); 1167 FrameScope frame_scope(masm, StackFrame::MANUAL);
1059 __ PushStandardFrame(r4); 1168 __ PushStandardFrame(closure);
1060
1061 // First check if there is optimized code in the feedback vector which we
1062 // could call instead.
1063 Label switch_to_optimized_code;
1064
1065 Register optimized_code_entry = r7;
1066 __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
1067 __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
1068 __ LoadP(
1069 optimized_code_entry,
1070 FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
1071 FeedbackVector::kHeaderSize));
1072 __ LoadP(optimized_code_entry,
1073 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
1074 __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
1075 1169
1076 // Get the bytecode array from the function object (or from the DebugInfo if 1170 // Get the bytecode array from the function object (or from the DebugInfo if
1077 // it is present) and load it into kInterpreterBytecodeArrayRegister. 1171 // it is present) and load it into kInterpreterBytecodeArrayRegister.
1078 Label maybe_load_debug_bytecode_array, bytecode_array_loaded; 1172 Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
1079 __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); 1173 __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1080 // Load original bytecode array or the debug copy. 1174 // Load original bytecode array or the debug copy.
1081 __ LoadP(kInterpreterBytecodeArrayRegister, 1175 __ LoadP(kInterpreterBytecodeArrayRegister,
1082 FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset)); 1176 FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
1083 __ LoadP(r5, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset)); 1177 __ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
1084 __ TestIfSmi(r5, r0); 1178 __ TestIfSmi(r7, r0);
1085 __ bne(&maybe_load_debug_bytecode_array, cr0); 1179 __ bne(&maybe_load_debug_bytecode_array, cr0);
1086 __ bind(&bytecode_array_loaded); 1180 __ bind(&bytecode_array_loaded);
1087 1181
1088 // Check whether we should continue to use the interpreter. 1182 // Check whether we should continue to use the interpreter.
1089 // TODO(rmcilroy) Remove self healing once liveedit only has to deal with 1183 // TODO(rmcilroy) Remove self healing once liveedit only has to deal with
1090 // Ignition bytecode. 1184 // Ignition bytecode.
1091 Label switch_to_different_code_kind; 1185 Label switch_to_different_code_kind;
1092 __ LoadP(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); 1186 __ LoadP(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
1093 __ mov(ip, Operand(masm->CodeObject())); // Self-reference to this code. 1187 __ mov(ip, Operand(masm->CodeObject())); // Self-reference to this code.
1094 __ cmp(r3, ip); 1188 __ cmp(r3, ip);
1095 __ bne(&switch_to_different_code_kind); 1189 __ bne(&switch_to_different_code_kind);
1096 1190
1097 // Increment invocation count for the function. 1191 // Increment invocation count for the function.
1098 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset)); 1192 __ LoadP(
1099 __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset)); 1193 r8, FieldMemOperand(feedback_vector,
1100 __ LoadP(r8, FieldMemOperand( 1194 FeedbackVector::kInvocationCountIndex * kPointerSize +
1101 r7, FeedbackVector::kInvocationCountIndex * kPointerSize + 1195 FeedbackVector::kHeaderSize));
1102 FeedbackVector::kHeaderSize));
1103 __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0); 1196 __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
1104 __ StoreP(r8, FieldMemOperand( 1197 __ StoreP(
1105 r7, FeedbackVector::kInvocationCountIndex * kPointerSize + 1198 r8,
1106 FeedbackVector::kHeaderSize), 1199 FieldMemOperand(feedback_vector,
1107 r0); 1200 FeedbackVector::kInvocationCountIndex * kPointerSize +
1201 FeedbackVector::kHeaderSize),
1202 r0);
1108 1203
1109 // Check function data field is actually a BytecodeArray object. 1204 // Check function data field is actually a BytecodeArray object.
1110 1205
1111 if (FLAG_debug_code) { 1206 if (FLAG_debug_code) {
1112 __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0); 1207 __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
1113 __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0); 1208 __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
1114 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg, 1209 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
1115 BYTECODE_ARRAY_TYPE); 1210 BYTECODE_ARRAY_TYPE);
1116 __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); 1211 __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1117 } 1212 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 1270
1176 // The return value is in r3. 1271 // The return value is in r3.
1177 LeaveInterpreterFrame(masm, r5); 1272 LeaveInterpreterFrame(masm, r5);
1178 __ blr(); 1273 __ blr();
1179 1274
1180 // Load debug copy of the bytecode array if it exists. 1275 // Load debug copy of the bytecode array if it exists.
1181 // kInterpreterBytecodeArrayRegister is already loaded with 1276 // kInterpreterBytecodeArrayRegister is already loaded with
1182 // SharedFunctionInfo::kFunctionDataOffset. 1277 // SharedFunctionInfo::kFunctionDataOffset.
1183 Label done; 1278 Label done;
1184 __ bind(&maybe_load_debug_bytecode_array); 1279 __ bind(&maybe_load_debug_bytecode_array);
1185 __ LoadP(ip, FieldMemOperand(r5, DebugInfo::kFlagsOffset)); 1280 __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
1186 __ SmiUntag(ip); 1281 __ SmiUntag(ip);
1187 __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo)); 1282 __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo));
1188 __ beq(&done, cr0); 1283 __ beq(&done, cr0);
1189 __ LoadP(kInterpreterBytecodeArrayRegister, 1284 __ LoadP(kInterpreterBytecodeArrayRegister,
1190 FieldMemOperand(r5, DebugInfo::kDebugBytecodeArrayOffset)); 1285 FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
1191 __ bind(&done); 1286 __ bind(&done);
1192 __ b(&bytecode_array_loaded); 1287 __ b(&bytecode_array_loaded);
1193 1288
1194 // If the shared code is no longer this entry trampoline, then the underlying 1289 // If the shared code is no longer this entry trampoline, then the underlying
1195 // function has been switched to a different kind of code and we heal the 1290 // function has been switched to a different kind of code and we heal the
1196 // closure by switching the code entry field over to the new code as well. 1291 // closure by switching the code entry field over to the new code as well.
1197 __ bind(&switch_to_different_code_kind); 1292 __ bind(&switch_to_different_code_kind);
1198 __ LeaveFrame(StackFrame::JAVA_SCRIPT); 1293 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1199 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); 1294 __ LoadP(r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1200 __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset)); 1295 __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
1201 __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 1296 __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
1202 __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0); 1297 __ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1203 __ RecordWriteCodeEntryField(r4, r7, r8); 1298 __ RecordWriteCodeEntryField(closure, r7, r8);
1204 __ JumpToJSEntry(r7); 1299 __ JumpToJSEntry(r7);
1205
1206 // If there is optimized code on the type feedback vector, check if it is good
1207 // to run, and if so, self heal the closure and call the optimized code.
1208 __ bind(&switch_to_optimized_code);
1209 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1210 Label gotta_call_runtime;
1211
1212 // Check if the optimized code is marked for deopt.
1213 __ lwz(r8, FieldMemOperand(optimized_code_entry,
1214 Code::kKindSpecificFlags1Offset));
1215 __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
1216 __ bne(&gotta_call_runtime, cr0);
1217
1218 // Optimized code is good, get it into the closure and link the closure into
1219 // the optimized functions list, then tail call the optimized code.
1220 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
1221 r5);
1222 __ JumpToJSEntry(optimized_code_entry);
1223
1224 // Optimized code is marked for deopt, bailout to the CompileLazy runtime
1225 // function which will clear the feedback vector's optimized code slot.
1226 __ bind(&gotta_call_runtime);
1227 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
1228 } 1300 }
1229 1301
1230 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, 1302 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
1231 Register scratch, 1303 Register scratch,
1232 Label* stack_overflow) { 1304 Label* stack_overflow) {
1233 // Check the stack for overflow. We are not trying to catch 1305 // Check the stack for overflow. We are not trying to catch
1234 // interruptions (e.g. debug break and preemption) here, so the "real stack 1306 // interruptions (e.g. debug break and preemption) here, so the "real stack
1235 // limit" is checked. 1307 // limit" is checked.
1236 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); 1308 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
1237 // Make scratch the space we have left. The stack might already be overflowed 1309 // Make scratch the space we have left. The stack might already be overflowed
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
1452 __ StoreP(r5, 1524 __ StoreP(r5,
1453 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); 1525 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1454 1526
1455 Generate_InterpreterEnterBytecode(masm); 1527 Generate_InterpreterEnterBytecode(masm);
1456 } 1528 }
1457 1529
1458 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { 1530 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1459 Generate_InterpreterEnterBytecode(masm); 1531 Generate_InterpreterEnterBytecode(masm);
1460 } 1532 }
1461 1533
1534 void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
1535 // ----------- S t a t e -------------
1536 // -- r3 : argument count (preserved for callee)
1537 // -- r6 : new target (preserved for callee)
1538 // -- r4 : target function (preserved for callee)
1539 // -----------------------------------
1540 Register closure = r4;
1541
1542 // Get the feedback vector.
1543 Register feedback_vector = r5;
1544 __ LoadP(feedback_vector,
1545 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1546 __ LoadP(feedback_vector,
1547 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1548
1549 // The feedback vector must be defined.
1550 if (FLAG_debug_code) {
1551 __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
1552 __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
1553 }
1554
1555 // Is there an optimization marker or optimized code in the feedback vector?
1556 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
1557
1558 // Otherwise, tail call the SFI code.
1559 GenerateTailCallToSharedCode(masm);
1560 }
1561
1462 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { 1562 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1463 // ----------- S t a t e ------------- 1563 // ----------- S t a t e -------------
1464 // -- r3 : argument count (preserved for callee) 1564 // -- r3 : argument count (preserved for callee)
1465 // -- r6 : new target (preserved for callee) 1565 // -- r6 : new target (preserved for callee)
1466 // -- r4 : target function (preserved for callee) 1566 // -- r4 : target function (preserved for callee)
1467 // ----------------------------------- 1567 // -----------------------------------
1468 // First lookup code, maybe we don't need to compile! 1568 // First lookup code, maybe we don't need to compile!
1469 Label gotta_call_runtime; 1569 Label gotta_call_runtime;
1470 Label try_shared;
1471 1570
1472 Register closure = r4; 1571 Register closure = r4;
1473 Register index = r5; 1572 Register feedback_vector = r5;
1474 1573
1475 // Do we have a valid feedback vector? 1574 // Do we have a valid feedback vector?
1476 __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); 1575 __ LoadP(feedback_vector,
1477 __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); 1576 FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
1478 __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); 1577 __ LoadP(feedback_vector,
1578 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1579 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
1580 &gotta_call_runtime);
1479 1581
1480 // Is optimized code available in the feedback vector? 1582 // Is there an optimization marker or optimized code in the feedback vector?
1481 Register entry = r7; 1583 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
1482 __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
1483 kPointerSize +
1484 FeedbackVector::kHeaderSize));
1485 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
1486 __ JumpIfSmi(entry, &try_shared);
1487
1488 // Found code, check if it is marked for deopt, if so call into runtime to
1489 // clear the optimized code slot.
1490 __ lwz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
1491 __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
1492 __ bne(&gotta_call_runtime, cr0);
1493
1494 // Code is good, get it into the closure and tail call.
1495 ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
1496 __ JumpToJSEntry(entry);
1497 1584
1498 // We found no optimized code. 1585 // We found no optimized code.
1499 __ bind(&try_shared); 1586 Register entry = r7;
1500 __ LoadP(entry, 1587 __ LoadP(entry,
1501 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); 1588 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1502 // Is the shared function marked for tier up?
1503 __ lwz(r8, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
1504 __ TestBit(r8, SharedFunctionInfo::MarkedForTierUpBit::kShift, r0);
1505 __ bne(&gotta_call_runtime, cr0);
1506 1589
1507 // If SFI points to anything other than CompileLazy, install that. 1590 // If SFI points to anything other than CompileLazy, install that.
1508 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); 1591 __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
1509 __ mov(r8, Operand(masm->CodeObject())); 1592 __ mov(r8, Operand(masm->CodeObject()));
1510 __ cmp(entry, r8); 1593 __ cmp(entry, r8);
1511 __ beq(&gotta_call_runtime); 1594 __ beq(&gotta_call_runtime);
1512 1595
1513 // Install the SFI's code entry. 1596 // Install the SFI's code entry.
1514 __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); 1597 __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1515 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); 1598 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1516 __ RecordWriteCodeEntryField(closure, entry, r8); 1599 __ RecordWriteCodeEntryField(closure, entry, r8);
1517 __ JumpToJSEntry(entry); 1600 __ JumpToJSEntry(entry);
1518 1601
1519 __ bind(&gotta_call_runtime); 1602 __ bind(&gotta_call_runtime);
1520 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); 1603 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1521 } 1604 }
1522 1605
1523 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
1524 GenerateTailCallToReturnedCode(masm,
1525 Runtime::kCompileOptimized_NotConcurrent);
1526 }
1527
1528 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
1529 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
1530 }
1531
1532 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { 1606 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1533 // ----------- S t a t e ------------- 1607 // ----------- S t a t e -------------
1534 // -- r3 : argument count (preserved for callee) 1608 // -- r3 : argument count (preserved for callee)
1535 // -- r4 : new target (preserved for callee) 1609 // -- r4 : new target (preserved for callee)
1536 // -- r6 : target function (preserved for callee) 1610 // -- r6 : target function (preserved for callee)
1537 // ----------------------------------- 1611 // -----------------------------------
1538 Label failed; 1612 Label failed;
1539 { 1613 {
1540 FrameScope scope(masm, StackFrame::INTERNAL); 1614 FrameScope scope(masm, StackFrame::INTERNAL);
1541 // Preserve argument count for later compare. 1615 // Preserve argument count for later compare.
(...skipping 1593 matching lines...) Expand 10 before | Expand all | Expand 10 after
3135 } 3209 }
3136 // Now jump to the instructions of the returned code object. 3210 // Now jump to the instructions of the returned code object.
3137 __ Jump(r11); 3211 __ Jump(r11);
3138 } 3212 }
3139 3213
3140 #undef __ 3214 #undef __
3141 } // namespace internal 3215 } // namespace internal
3142 } // namespace v8 3216 } // namespace v8
3143 3217
3144 #endif // V8_TARGET_ARCH_PPC 3218 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « no previous file | src/builtins/s390/builtins-s390.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698