| Index: src/builtins/ppc/builtins-ppc.cc
 | 
| diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
 | 
| index f84b6a0c03180811f6dcaad77464972f2622252e..fa97eb56b931cc91d08095b53e2011df9b591f21 100644
 | 
| --- a/src/builtins/ppc/builtins-ppc.cc
 | 
| +++ b/src/builtins/ppc/builtins-ppc.cc
 | 
| @@ -994,6 +994,41 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
 | 
|    Generate_JSEntryTrampolineHelper(masm, true);
 | 
|  }
 | 
|  
 | 
| +static void ReplaceClosureEntryWithOptimizedCode(
 | 
| +    MacroAssembler* masm, Register optimized_code_entry, Register closure,
 | 
| +    Register scratch1, Register scratch2, Register scratch3) {
 | 
| +  Register native_context = scratch1;
 | 
| +  // Store code entry in the closure.
 | 
| +  __ addi(optimized_code_entry, optimized_code_entry,
 | 
| +          Operand(Code::kHeaderSize - kHeapObjectTag));
 | 
| +  __ StoreP(optimized_code_entry,
 | 
| +            FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
 | 
| +  __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
 | 
| +
 | 
| +  // Link the closure into the optimized function list.
 | 
| +  // r7 : code entry
 | 
| +  // r10: native context
 | 
| +  // r4 : closure
 | 
| +  __ LoadP(native_context, NativeContextMemOperand());
 | 
| +  __ LoadP(scratch2, ContextMemOperand(native_context,
 | 
| +                                       Context::OPTIMIZED_FUNCTIONS_LIST));
 | 
| +  __ StoreP(scratch2,
 | 
| +            FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
 | 
| +  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
 | 
| +                      scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
 | 
| +                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 | 
| +  const int function_list_offset =
 | 
| +      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
 | 
| +  __ StoreP(
 | 
| +      closure,
 | 
| +      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
 | 
| +  // Save closure before the write barrier.
 | 
| +  __ mr(scratch2, closure);
 | 
| +  __ RecordWriteContextSlot(native_context, function_list_offset, closure,
 | 
| +                            scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
 | 
| +  __ mr(closure, scratch2);
 | 
| +}
 | 
| +
 | 
|  static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
 | 
|    Register args_count = scratch;
 | 
|  
 | 
| @@ -1034,6 +1069,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
 | 
|    FrameScope frame_scope(masm, StackFrame::MANUAL);
 | 
|    __ PushStandardFrame(r4);
 | 
|  
 | 
| +  // First check if there is optimized code in the feedback vector which we
 | 
| +  // could call instead.
 | 
| +  Label switch_to_optimized_code;
 | 
| +
 | 
| +  Register optimized_code_entry = r7;
 | 
| +  __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
 | 
| +  __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
 | 
| +  __ LoadP(
 | 
| +      optimized_code_entry,
 | 
| +      FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
 | 
| +                              FeedbackVector::kHeaderSize));
 | 
| +  __ LoadP(optimized_code_entry,
 | 
| +           FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
 | 
| +  __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
 | 
| +
 | 
|    // Get the bytecode array from the function object (or from the DebugInfo if
 | 
|    // it is present) and load it into kInterpreterBytecodeArrayRegister.
 | 
|    __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
 | 
| @@ -1154,6 +1204,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
 | 
|    __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
 | 
|    __ RecordWriteCodeEntryField(r4, r7, r8);
 | 
|    __ JumpToJSEntry(r7);
 | 
| +
 | 
| +  // If there is optimized code on the type feedback vector, check if it is good
 | 
| +  // to run, and if so, self heal the closure and call the optimized code.
 | 
| +  __ bind(&switch_to_optimized_code);
 | 
| +  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
 | 
| +  Label gotta_call_runtime;
 | 
| +
 | 
| +  // Check if the optimized code is marked for deopt.
 | 
| +  __ lbz(r8, FieldMemOperand(optimized_code_entry,
 | 
| +                             Code::kKindSpecificFlags1Offset));
 | 
| +  __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
 | 
| +  __ bne(&gotta_call_runtime, cr0);
 | 
| +
 | 
| +  // Optimized code is good, get it into the closure and link the closure into
 | 
| +  // the optimized functions list, then tail call the optimized code.
 | 
| +  ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
 | 
| +                                       r5);
 | 
| +  __ JumpToJSEntry(optimized_code_entry);
 | 
| +
 | 
| +  // Optimized code is marked for deopt, bailout to the CompileLazy runtime
 | 
| +  // function which will clear the feedback vector's optimized code slot.
 | 
| +  __ bind(&gotta_call_runtime);
 | 
| +  GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
 | 
|  }
 | 
|  
 | 
|  static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
 | 
| @@ -1421,31 +1494,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
 | 
|    __ bne(&gotta_call_runtime, cr0);
 | 
|  
 | 
|    // Code is good, get it into the closure and tail call.
 | 
| -  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
 | 
| -  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
 | 
| -  __ RecordWriteCodeEntryField(closure, entry, r8);
 | 
| -
 | 
| -  // Load native context into r9.
 | 
| -  Register native_context = r9;
 | 
| -  __ LoadP(native_context, NativeContextMemOperand());
 | 
| -
 | 
| -  // Link the closure into the optimized function list.
 | 
| -  __ LoadP(
 | 
| -      r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
 | 
| -  __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
 | 
| -            r0);
 | 
| -  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r5,
 | 
| -                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
 | 
| -                      OMIT_SMI_CHECK);
 | 
| -  const int function_list_offset =
 | 
| -      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
 | 
| -  __ StoreP(
 | 
| -      closure,
 | 
| -      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
 | 
| -  // Save closure before the write barrier.
 | 
| -  __ mr(r8, closure);
 | 
| -  __ RecordWriteContextSlot(native_context, function_list_offset, r8, r5,
 | 
| -                            kLRHasNotBeenSaved, kDontSaveFPRegs);
 | 
| +  ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
 | 
|    __ JumpToJSEntry(entry);
 | 
|  
 | 
|    // We found no optimized code.
 | 
| 
 |