Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Unified Diff: src/builtins/s390/builtins-s390.cc

Issue 2947903002: PPC/s390: [compiler] Drive optimizations with feedback vector (reland) (Closed)
Patch Set: fix ppc issue Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/builtins/ppc/builtins-ppc.cc ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/builtins/s390/builtins-s390.cc
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index 8896977dc352c60b0f192dce3224fc2c1f56f2ac..ce3679cca854252c08758d5d025bd2e8795cfdba 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -424,22 +424,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -1029,6 +1013,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r3;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&fallthrough, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1048,37 +1144,33 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r3;
+ Register feedback_vector = r4;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r3);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r6;
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadP(r4, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
-
- __ TestIfSmi(r4);
+ __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r6);
__ bne(&maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1091,15 +1183,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
- __ LoadP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
- __ StoreP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ StoreP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1178,12 +1270,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// SharedFunctionInfo::kFunctionDataOffset.
Label done;
__ bind(&maybe_load_debug_bytecode_array);
- __ LoadP(ip, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
__ SmiUntag(ip);
__ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
__ beq(&done);
__ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset));
+ FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
__ bind(&done);
__ b(&bytecode_array_loaded);
@@ -1192,35 +1284,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
__ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r3, r6, r7);
+ __ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r6, r7);
__ JumpToJSEntry(r6);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
- r4);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1454,6 +1523,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r3;
+
+ // Get the feedback vector.
+ Register feedback_vector = r4;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1462,43 +1559,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r3;
- Register index = r4;
+ Register feedback_vector = r4;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r6;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ LoadlW(r7,
- FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r7, SharedFunctionInfo::MarkedForTierUpBit::kShift, r0);
- __ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1516,15 +1595,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
« no previous file with comments | « src/builtins/ppc/builtins-ppc.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698