| Index: src/builtins/ppc/builtins-ppc.cc | 
| diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc | 
| index 35713097dd00cd0eafd8a4a6e0e86e666bf1f539..f805818c8186d4d901b0b774c958f5a6ba50c7ac 100644 | 
| --- a/src/builtins/ppc/builtins-ppc.cc | 
| +++ b/src/builtins/ppc/builtins-ppc.cc | 
| @@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, | 
| __ JumpToJSEntry(ip); | 
| } | 
|  | 
| -void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { | 
| -  // Checking whether the queued function is ready for install is optional, | 
| -  // since we come across interrupts and stack checks elsewhere.  However, | 
| -  // not checking may delay installing ready functions, and always checking | 
| -  // would be quite expensive.  A good compromise is to first check against | 
| -  // stack limit as a cue for an interrupt signal. | 
| -  Label ok; | 
| -  __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 
| -  __ cmpl(sp, ip); | 
| -  __ bge(&ok); | 
| - | 
| -  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); | 
| - | 
| -  __ bind(&ok); | 
| -  GenerateTailCallToSharedCode(masm); | 
| -} | 
| - | 
| namespace { | 
|  | 
| void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { | 
| @@ -1033,6 +1016,120 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { | 
| __ add(sp, sp, args_count); | 
| } | 
|  | 
| +// Tail-call |function_id| if |smi_entry| == |marker| | 
| +static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, | 
| +                                          Register smi_entry, | 
| +                                          OptimizationMarker marker, | 
| +                                          Runtime::FunctionId function_id) { | 
| +  Label no_match; | 
| +  __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0); | 
| +  __ bne(&no_match); | 
| +  GenerateTailCallToReturnedCode(masm, function_id); | 
| +  __ bind(&no_match); | 
| +} | 
| + | 
| +static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, | 
| +                                           Register feedback_vector, | 
| +                                           Register scratch1, Register scratch2, | 
| +                                           Register scratch3) { | 
| +  // ----------- S t a t e ------------- | 
| +  //  -- r0 : argument count (preserved for callee if needed, and caller) | 
| +  //  -- r3 : new target (preserved for callee if needed, and caller) | 
| +  //  -- r1 : target function (preserved for callee if needed, and caller) | 
| +  //  -- feedback vector (preserved for caller if needed) | 
| +  // ----------------------------------- | 
| +  DCHECK( | 
| +      !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3)); | 
| + | 
| +  Label optimized_code_slot_is_cell, fallthrough; | 
| + | 
| +  Register closure = r4; | 
| +  Register optimized_code_entry = scratch1; | 
| + | 
| +  const int kOptimizedCodeCellOffset = | 
| +      FeedbackVector::kOptimizedCodeIndex * kPointerSize + | 
| +      FeedbackVector::kHeaderSize; | 
| +  __ LoadP(optimized_code_entry, | 
| +           FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset)); | 
| + | 
| +  // Check if the code entry is a Smi. If yes, we interpret it as an | 
| +  // optimisation marker. Otherwise, interpret is as a weak cell to a code | 
| +  // object. | 
| +  __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell); | 
| + | 
| +  { | 
| +    // Optimized code slot is a Smi optimization marker. | 
| + | 
| +    // Fall through if no optimization trigger. | 
| +    __ CmpSmiLiteral(optimized_code_entry, | 
| +                     Smi::FromEnum(OptimizationMarker::kNone), r0); | 
| +    __ beq(&fallthrough); | 
| + | 
| +    TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, | 
| +                                  OptimizationMarker::kCompileOptimized, | 
| +                                  Runtime::kCompileOptimized_NotConcurrent); | 
| +    TailCallRuntimeIfMarkerEquals( | 
| +        masm, optimized_code_entry, | 
| +        OptimizationMarker::kCompileOptimizedConcurrent, | 
| +        Runtime::kCompileOptimized_Concurrent); | 
| + | 
| +    { | 
| +      // Otherwise, the marker is InOptimizationQueue. | 
| +      if (FLAG_debug_code) { | 
| +        __ CmpSmiLiteral( | 
| +            optimized_code_entry, | 
| +            Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); | 
| +        __ Assert(eq, kExpectedOptimizationSentinel); | 
| +      } | 
| + | 
| +      // Checking whether the queued function is ready for install is optional, | 
| +      // since we come across interrupts and stack checks elsewhere.  However, | 
| +      // not checking may delay installing ready functions, and always checking | 
| +      // would be quite expensive.  A good compromise is to first check against | 
| +      // stack limit as a cue for an interrupt signal. | 
| +      __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 
| +      __ cmpl(sp, ip); | 
| +      __ bge(&fallthrough); | 
| +      GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); | 
| +    } | 
| +  } | 
| + | 
| +  { | 
| +    // Optimized code slot is a WeakCell. | 
| +    __ bind(&optimized_code_slot_is_cell); | 
| + | 
| +    __ LoadP(optimized_code_entry, | 
| +             FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset)); | 
| +    __ JumpIfSmi(optimized_code_entry, &fallthrough); | 
| + | 
| +    // Check if the optimized code is marked for deopt. If it is, call the | 
| +    // runtime to clear it. | 
| +    Label found_deoptimized_code; | 
| +    __ LoadP(scratch2, FieldMemOperand(optimized_code_entry, | 
| +                                       Code::kKindSpecificFlags1Offset)); | 
| +    __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0); | 
| +    __ bne(&found_deoptimized_code, cr0); | 
| + | 
| +    // Optimized code is good, get it into the closure and link the closure into | 
| +    // the optimized functions list, then tail call the optimized code. | 
| +    // The feedback vector is no longer used, so re-use it as a scratch | 
| +    // register. | 
| +    ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure, | 
| +                                         scratch2, scratch3, feedback_vector); | 
| +    __ mr(ip, optimized_code_entry); | 
| +    __ Jump(optimized_code_entry); | 
| + | 
| +    // Optimized code slot contains deoptimized code, evict it and re-enter the | 
| +    // closure's code. | 
| +    __ bind(&found_deoptimized_code); | 
| +    GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); | 
| +  } | 
| + | 
| +  // Fall-through if the optimized code cell is clear and there is no | 
| +  // optimization marker. | 
| +  __ bind(&fallthrough); | 
| +} | 
| + | 
| // Generate code for entering a JS function with the interpreter. | 
| // On entry to the function the receiver and arguments have been pushed on the | 
| // stack left to right.  The actual argument count matches the formal parameter | 
| @@ -1052,36 +1149,33 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { | 
| void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { | 
| ProfileEntryHookStub::MaybeCallEntryHook(masm); | 
|  | 
| +  Register closure = r4; | 
| +  Register feedback_vector = r5; | 
| + | 
| +  // Load the feedback vector from the closure. | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(feedback_vector, Cell::kValueOffset)); | 
| +  // Read off the optimized code slot in the feedback vector, and if there | 
| +  // is optimized code or an optimization marker, call that instead. | 
| +  MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); | 
| + | 
| // Open a frame scope to indicate that there is a frame on the stack.  The | 
| // MANUAL indicates that the scope shouldn't actually generate code to set up | 
| // the frame (that is done below). | 
| FrameScope frame_scope(masm, StackFrame::MANUAL); | 
| -  __ PushStandardFrame(r4); | 
| - | 
| -  // First check if there is optimized code in the feedback vector which we | 
| -  // could call instead. | 
| -  Label switch_to_optimized_code; | 
| - | 
| -  Register optimized_code_entry = r7; | 
| -  __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset)); | 
| -  __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset)); | 
| -  __ LoadP( | 
| -      optimized_code_entry, | 
| -      FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize + | 
| -                              FeedbackVector::kHeaderSize)); | 
| -  __ LoadP(optimized_code_entry, | 
| -           FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset)); | 
| -  __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code); | 
| +  __ PushStandardFrame(closure); | 
|  | 
| // Get the bytecode array from the function object (or from the DebugInfo if | 
| // it is present) and load it into kInterpreterBytecodeArrayRegister. | 
| Label maybe_load_debug_bytecode_array, bytecode_array_loaded; | 
| -  __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 
| +  __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 
| // Load original bytecode array or the debug copy. | 
| __ LoadP(kInterpreterBytecodeArrayRegister, | 
| FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset)); | 
| -  __ LoadP(r5, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset)); | 
| -  __ TestIfSmi(r5, r0); | 
| +  __ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset)); | 
| +  __ TestIfSmi(r7, r0); | 
| __ bne(&maybe_load_debug_bytecode_array, cr0); | 
| __ bind(&bytecode_array_loaded); | 
|  | 
| @@ -1095,16 +1189,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { | 
| __ bne(&switch_to_different_code_kind); | 
|  | 
| // Increment invocation count for the function. | 
| -  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset)); | 
| -  __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset)); | 
| -  __ LoadP(r8, FieldMemOperand( | 
| -                   r7, FeedbackVector::kInvocationCountIndex * kPointerSize + | 
| -                           FeedbackVector::kHeaderSize)); | 
| +  __ LoadP( | 
| +      r8, FieldMemOperand(feedback_vector, | 
| +                          FeedbackVector::kInvocationCountIndex * kPointerSize + | 
| +                              FeedbackVector::kHeaderSize)); | 
| __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0); | 
| -  __ StoreP(r8, FieldMemOperand( | 
| -                    r7, FeedbackVector::kInvocationCountIndex * kPointerSize + | 
| -                            FeedbackVector::kHeaderSize), | 
| -            r0); | 
| +  __ StoreP( | 
| +      r8, | 
| +      FieldMemOperand(feedback_vector, | 
| +                      FeedbackVector::kInvocationCountIndex * kPointerSize + | 
| +                          FeedbackVector::kHeaderSize), | 
| +      r0); | 
|  | 
| // Check function data field is actually a BytecodeArray object. | 
|  | 
| @@ -1182,12 +1277,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { | 
| // SharedFunctionInfo::kFunctionDataOffset. | 
| Label done; | 
| __ bind(&maybe_load_debug_bytecode_array); | 
| -  __ LoadP(ip, FieldMemOperand(r5, DebugInfo::kFlagsOffset)); | 
| +  __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset)); | 
| __ SmiUntag(ip); | 
| __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo)); | 
| __ beq(&done, cr0); | 
| __ LoadP(kInterpreterBytecodeArrayRegister, | 
| -           FieldMemOperand(r5, DebugInfo::kDebugBytecodeArrayOffset)); | 
| +           FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset)); | 
| __ bind(&done); | 
| __ b(&bytecode_array_loaded); | 
|  | 
| @@ -1196,35 +1291,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { | 
| // closure by switching the code entry field over to the new code as well. | 
| __ bind(&switch_to_different_code_kind); | 
| __ LeaveFrame(StackFrame::JAVA_SCRIPT); | 
| -  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 
| +  __ LoadP(r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 
| __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset)); | 
| __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| -  __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0); | 
| -  __ RecordWriteCodeEntryField(r4, r7, r8); | 
| +  __ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); | 
| +  __ RecordWriteCodeEntryField(closure, r7, r8); | 
| __ JumpToJSEntry(r7); | 
| - | 
| -  // If there is optimized code on the type feedback vector, check if it is good | 
| -  // to run, and if so, self heal the closure and call the optimized code. | 
| -  __ bind(&switch_to_optimized_code); | 
| -  __ LeaveFrame(StackFrame::JAVA_SCRIPT); | 
| -  Label gotta_call_runtime; | 
| - | 
| -  // Check if the optimized code is marked for deopt. | 
| -  __ lwz(r8, FieldMemOperand(optimized_code_entry, | 
| -                             Code::kKindSpecificFlags1Offset)); | 
| -  __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0); | 
| -  __ bne(&gotta_call_runtime, cr0); | 
| - | 
| -  // Optimized code is good, get it into the closure and link the closure into | 
| -  // the optimized functions list, then tail call the optimized code. | 
| -  ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8, | 
| -                                       r5); | 
| -  __ JumpToJSEntry(optimized_code_entry); | 
| - | 
| -  // Optimized code is marked for deopt, bailout to the CompileLazy runtime | 
| -  // function which will clear the feedback vector's optimized code slot. | 
| -  __ bind(&gotta_call_runtime); | 
| -  GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); | 
| } | 
|  | 
| static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, | 
| @@ -1459,6 +1531,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { | 
| Generate_InterpreterEnterBytecode(masm); | 
| } | 
|  | 
| +void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { | 
| +  // ----------- S t a t e ------------- | 
| +  //  -- r3 : argument count (preserved for callee) | 
| +  //  -- r6 : new target (preserved for callee) | 
| +  //  -- r4 : target function (preserved for callee) | 
| +  // ----------------------------------- | 
| +  Register closure = r4; | 
| + | 
| +  // Get the feedback vector. | 
| +  Register feedback_vector = r5; | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(feedback_vector, Cell::kValueOffset)); | 
| + | 
| +  // The feedback vector must be defined. | 
| +  if (FLAG_debug_code) { | 
| +    __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); | 
| +    __ Assert(ne, BailoutReason::kExpectedFeedbackVector); | 
| +  } | 
| + | 
| +  // Is there an optimization marker or optimized code in the feedback vector? | 
| +  MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); | 
| + | 
| +  // Otherwise, tail call the SFI code. | 
| +  GenerateTailCallToSharedCode(masm); | 
| +} | 
| + | 
| void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| //  -- r3 : argument count (preserved for callee) | 
| @@ -1467,42 +1567,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 
| // ----------------------------------- | 
| // First lookup code, maybe we don't need to compile! | 
| Label gotta_call_runtime; | 
| -  Label try_shared; | 
|  | 
| Register closure = r4; | 
| -  Register index = r5; | 
| +  Register feedback_vector = r5; | 
|  | 
| // Do we have a valid feedback vector? | 
| -  __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); | 
| -  __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); | 
| -  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset)); | 
| +  __ LoadP(feedback_vector, | 
| +           FieldMemOperand(feedback_vector, Cell::kValueOffset)); | 
| +  __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, | 
| +                &gotta_call_runtime); | 
|  | 
| -  // Is optimized code available in the feedback vector? | 
| -  Register entry = r7; | 
| -  __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * | 
| -                                                 kPointerSize + | 
| -                                             FeedbackVector::kHeaderSize)); | 
| -  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | 
| -  __ JumpIfSmi(entry, &try_shared); | 
| - | 
| -  // Found code, check if it is marked for deopt, if so call into runtime to | 
| -  // clear the optimized code slot. | 
| -  __ lwz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset)); | 
| -  __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0); | 
| -  __ bne(&gotta_call_runtime, cr0); | 
| - | 
| -  // Code is good, get it into the closure and tail call. | 
| -  ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5); | 
| -  __ JumpToJSEntry(entry); | 
| +  // Is there an optimization marker or optimized code in the feedback vector? | 
| +  MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); | 
|  | 
| // We found no optimized code. | 
| -  __ bind(&try_shared); | 
| +  Register entry = r7; | 
| __ LoadP(entry, | 
| FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | 
| -  // Is the shared function marked for tier up? | 
| -  __ lwz(r8, FieldMemOperand(entry, SharedFunctionInfo::kCompilerHintsOffset)); | 
| -  __ TestBit(r8, SharedFunctionInfo::MarkedForTierUpBit::kShift, r0); | 
| -  __ bne(&gotta_call_runtime, cr0); | 
|  | 
| // If SFI points to anything other than CompileLazy, install that. | 
| __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | 
| @@ -1520,15 +1603,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 
| GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); | 
| } | 
|  | 
| -void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 
| -  GenerateTailCallToReturnedCode(masm, | 
| -                                 Runtime::kCompileOptimized_NotConcurrent); | 
| -} | 
| - | 
| -void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { | 
| -  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); | 
| -} | 
| - | 
| void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| //  -- r3 : argument count (preserved for callee) | 
|  |