Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: src/builtins/ppc/builtins-ppc.cc

Issue 2897483002: PPC/s390: Reland: [Interpreter] Transition JSFunctions to call optimized code when possible. (Closed)
Patch Set: minor fix Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/builtins/s390/builtins-s390.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_PPC 5 #if V8_TARGET_ARCH_PPC
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/debug/debug.h" 8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h" 9 #include "src/deoptimizer.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 976 matching lines...) Expand 10 before | Expand all | Expand 10 after
987 } 987 }
988 988
989 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { 989 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
990 Generate_JSEntryTrampolineHelper(masm, false); 990 Generate_JSEntryTrampolineHelper(masm, false);
991 } 991 }
992 992
993 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { 993 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
994 Generate_JSEntryTrampolineHelper(masm, true); 994 Generate_JSEntryTrampolineHelper(masm, true);
995 } 995 }
996 996
997 static void ReplaceClosureEntryWithOptimizedCode(
998 MacroAssembler* masm, Register optimized_code_entry, Register closure,
999 Register scratch1, Register scratch2, Register scratch3) {
1000 Register native_context = scratch1;
1001 // Store code entry in the closure.
1002 __ addi(optimized_code_entry, optimized_code_entry,
1003 Operand(Code::kHeaderSize - kHeapObjectTag));
1004 __ StoreP(optimized_code_entry,
1005 FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1006 __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
1007
1008 // Link the closure into the optimized function list.
1009 // r7 : code entry
1010 // r10: native context
1011 // r4 : closure
1012 __ LoadP(native_context, NativeContextMemOperand());
1013 __ LoadP(scratch2, ContextMemOperand(native_context,
1014 Context::OPTIMIZED_FUNCTIONS_LIST));
1015 __ StoreP(scratch2,
1016 FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
1017 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
1018 scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
1019 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1020 const int function_list_offset =
1021 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
1022 __ StoreP(
1023 closure,
1024 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
1025 // Save closure before the write barrier.
1026 __ mr(scratch2, closure);
1027 __ RecordWriteContextSlot(native_context, function_list_offset, closure,
1028 scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
1029 __ mr(closure, scratch2);
1030 }
1031
997 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { 1032 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
998 Register args_count = scratch; 1033 Register args_count = scratch;
999 1034
1000 // Get the arguments + receiver count. 1035 // Get the arguments + receiver count.
1001 __ LoadP(args_count, 1036 __ LoadP(args_count,
1002 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); 1037 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1003 __ lwz(args_count, 1038 __ lwz(args_count,
1004 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); 1039 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
1005 1040
1006 // Leave the frame (also dropping the register file). 1041 // Leave the frame (also dropping the register file).
(...skipping 20 matching lines...) Expand all
1027 // frames.h for its layout. 1062 // frames.h for its layout.
1028 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { 1063 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1029 ProfileEntryHookStub::MaybeCallEntryHook(masm); 1064 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1030 1065
1031 // Open a frame scope to indicate that there is a frame on the stack. The 1066 // Open a frame scope to indicate that there is a frame on the stack. The
1032 // MANUAL indicates that the scope shouldn't actually generate code to set up 1067 // MANUAL indicates that the scope shouldn't actually generate code to set up
1033 // the frame (that is done below). 1068 // the frame (that is done below).
1034 FrameScope frame_scope(masm, StackFrame::MANUAL); 1069 FrameScope frame_scope(masm, StackFrame::MANUAL);
1035 __ PushStandardFrame(r4); 1070 __ PushStandardFrame(r4);
1036 1071
1072 // First check if there is optimized code in the feedback vector which we
1073 // could call instead.
1074 Label switch_to_optimized_code;
1075
1076 Register optimized_code_entry = r7;
1077 __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
1078 __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
1079 __ LoadP(
1080 optimized_code_entry,
1081 FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
1082 FeedbackVector::kHeaderSize));
1083 __ LoadP(optimized_code_entry,
1084 FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
1085 __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
1086
1037 // Get the bytecode array from the function object (or from the DebugInfo if 1087 // Get the bytecode array from the function object (or from the DebugInfo if
1038 // it is present) and load it into kInterpreterBytecodeArrayRegister. 1088 // it is present) and load it into kInterpreterBytecodeArrayRegister.
1039 __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); 1089 __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1040 Label array_done; 1090 Label array_done;
1041 Register debug_info = r5; 1091 Register debug_info = r5;
1042 DCHECK(!debug_info.is(r3)); 1092 DCHECK(!debug_info.is(r3));
1043 __ LoadP(debug_info, 1093 __ LoadP(debug_info,
1044 FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset)); 1094 FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
1045 // Load original bytecode array or the debug copy. 1095 // Load original bytecode array or the debug copy.
1046 __ LoadP(kInterpreterBytecodeArrayRegister, 1096 __ LoadP(kInterpreterBytecodeArrayRegister,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1147 // function has been switched to a different kind of code and we heal the 1197 // function has been switched to a different kind of code and we heal the
1148 // closure by switching the code entry field over to the new code as well. 1198 // closure by switching the code entry field over to the new code as well.
1149 __ bind(&switch_to_different_code_kind); 1199 __ bind(&switch_to_different_code_kind);
1150 __ LeaveFrame(StackFrame::JAVA_SCRIPT); 1200 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1151 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); 1201 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1152 __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset)); 1202 __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
1153 __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 1203 __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
1154 __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0); 1204 __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
1155 __ RecordWriteCodeEntryField(r4, r7, r8); 1205 __ RecordWriteCodeEntryField(r4, r7, r8);
1156 __ JumpToJSEntry(r7); 1206 __ JumpToJSEntry(r7);
1207
1208 // If there is optimized code on the type feedback vector, check if it is good
1209 // to run, and if so, self heal the closure and call the optimized code.
1210 __ bind(&switch_to_optimized_code);
1211 __ LeaveFrame(StackFrame::JAVA_SCRIPT);
1212 Label gotta_call_runtime;
1213
1214 // Check if the optimized code is marked for deopt.
1215 __ lbz(r8, FieldMemOperand(optimized_code_entry,
1216 Code::kKindSpecificFlags1Offset));
1217 __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
1218 __ bne(&gotta_call_runtime, cr0);
1219
1220 // Optimized code is good, get it into the closure and link the closure into
1221 // the optimized functions list, then tail call the optimized code.
1222 ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
1223 r5);
1224 __ JumpToJSEntry(optimized_code_entry);
1225
1226 // Optimized code is marked for deopt, bailout to the CompileLazy runtime
1227 // function which will clear the feedback vector's optimized code slot.
1228 __ bind(&gotta_call_runtime);
1229 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
1157 } 1230 }
1158 1231
1159 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, 1232 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
1160 Register scratch, 1233 Register scratch,
1161 Label* stack_overflow) { 1234 Label* stack_overflow) {
1162 // Check the stack for overflow. We are not trying to catch 1235 // Check the stack for overflow. We are not trying to catch
1163 // interruptions (e.g. debug break and preemption) here, so the "real stack 1236 // interruptions (e.g. debug break and preemption) here, so the "real stack
1164 // limit" is checked. 1237 // limit" is checked.
1165 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); 1238 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
1166 // Make scratch the space we have left. The stack might already be overflowed 1239 // Make scratch the space we have left. The stack might already be overflowed
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
1414 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); 1487 __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
1415 __ JumpIfSmi(entry, &try_shared); 1488 __ JumpIfSmi(entry, &try_shared);
1416 1489
1417 // Found code, check if it is marked for deopt, if so call into runtime to 1490 // Found code, check if it is marked for deopt, if so call into runtime to
1418 // clear the optimized code slot. 1491 // clear the optimized code slot.
1419 __ lbz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset)); 1492 __ lbz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
1420 __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0); 1493 __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
1421 __ bne(&gotta_call_runtime, cr0); 1494 __ bne(&gotta_call_runtime, cr0);
1422 1495
1423 // Code is good, get it into the closure and tail call. 1496 // Code is good, get it into the closure and tail call.
1424 __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); 1497 ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
1425 __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
1426 __ RecordWriteCodeEntryField(closure, entry, r8);
1427
1428 // Load native context into r9.
1429 Register native_context = r9;
1430 __ LoadP(native_context, NativeContextMemOperand());
1431
1432 // Link the closure into the optimized function list.
1433 __ LoadP(
1434 r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
1435 __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
1436 r0);
1437 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r5,
1438 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
1439 OMIT_SMI_CHECK);
1440 const int function_list_offset =
1441 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
1442 __ StoreP(
1443 closure,
1444 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
1445 // Save closure before the write barrier.
1446 __ mr(r8, closure);
1447 __ RecordWriteContextSlot(native_context, function_list_offset, r8, r5,
1448 kLRHasNotBeenSaved, kDontSaveFPRegs);
1449 __ JumpToJSEntry(entry); 1498 __ JumpToJSEntry(entry);
1450 1499
1451 // We found no optimized code. 1500 // We found no optimized code.
1452 __ bind(&try_shared); 1501 __ bind(&try_shared);
1453 __ LoadP(entry, 1502 __ LoadP(entry,
1454 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); 1503 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1455 // Is the shared function marked for tier up? 1504 // Is the shared function marked for tier up?
1456 __ lbz(r8, FieldMemOperand(entry, 1505 __ lbz(r8, FieldMemOperand(entry,
1457 SharedFunctionInfo::kMarkedForTierUpByteOffset)); 1506 SharedFunctionInfo::kMarkedForTierUpByteOffset));
1458 __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); 1507 __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
(...skipping 1672 matching lines...) Expand 10 before | Expand all | Expand 10 after
3131 } 3180 }
3132 // Now jump to the instructions of the returned code object. 3181 // Now jump to the instructions of the returned code object.
3133 __ Jump(r11); 3182 __ Jump(r11);
3134 } 3183 }
3135 3184
3136 #undef __ 3185 #undef __
3137 } // namespace internal 3186 } // namespace internal
3138 } // namespace v8 3187 } // namespace v8
3139 3188
3140 #endif // V8_TARGET_ARCH_PPC 3189 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « no previous file | src/builtins/s390/builtins-s390.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698