| Index: runtime/vm/stub_code_mips.cc | 
| diff --git a/runtime/vm/stub_code_mips.cc b/runtime/vm/stub_code_mips.cc | 
| index 62f1c091d1c5b96caa7352beffcb9764eb190705..d6b55f8f4d20e60c8e8a25d5f06a111e8dc08829 100644 | 
| --- a/runtime/vm/stub_code_mips.cc | 
| +++ b/runtime/vm/stub_code_mips.cc | 
| @@ -41,13 +41,14 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 
| const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 
| const intptr_t argv_offset = NativeArguments::argv_offset(); | 
| const intptr_t retval_offset = NativeArguments::retval_offset(); | 
| +  const intptr_t exitframe_last_param_slot_from_fp = 2; | 
|  | 
| __ SetPrologueOffset(); | 
| __ Comment("CallToRuntimeStub"); | 
| __ EnterStubFrame(); | 
|  | 
| -  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); | 
| -  __ LoadIsolate(S2); | 
| +  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 
| +  __ LoadIsolate(S6); | 
|  | 
| // Save exit frame information to enable stack walking as we are about | 
| // to transition to Dart VM C++ code. | 
| @@ -56,7 +57,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 
| #if defined(DEBUG) | 
| { Label ok; | 
| // Check that we are always entering from Dart code. | 
| -    __ lw(T0, Address(S2, Isolate::vm_tag_offset())); | 
| +    __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 
| __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 
| __ Stop("Not coming from Dart code."); | 
| __ Bind(&ok); | 
| @@ -64,7 +65,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 
| #endif | 
|  | 
| // Mark that the isolate is executing VM code. | 
| -  __ sw(S5, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(S5, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Reserve space for arguments and align frame before entering C++ world. | 
| // NativeArguments are passed in registers. | 
| @@ -87,7 +88,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 
| __ sll(A2, S4, 2); | 
| __ addu(A2, FP, A2);  // Compute argv. | 
| // Set argv in NativeArguments. | 
| -  __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); | 
| +  __ addiu(A2, A2, Immediate(exitframe_last_param_slot_from_fp * kWordSize)); | 
|  | 
|  | 
| // Call runtime or redirection via simulator. | 
| @@ -103,7 +104,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 
|  | 
| // Mark that the isolate is executing Dart code. | 
| __ LoadImmediate(A2, VMTag::kDartTagId); | 
| -  __ sw(A2, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Reset exit frame information in Isolate structure. | 
| __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 
| @@ -147,8 +148,8 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 
| __ Comment("CallNativeCFunctionStub"); | 
| __ EnterStubFrame(); | 
|  | 
| -  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); | 
| -  __ LoadIsolate(S2); | 
| +  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 
| +  __ LoadIsolate(S6); | 
|  | 
| // Save exit frame information to enable stack walking as we are about | 
| // to transition to native code. | 
| @@ -157,7 +158,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 
| #if defined(DEBUG) | 
| { Label ok; | 
| // Check that we are always entering from Dart code. | 
| -    __ lw(T0, Address(S2, Isolate::vm_tag_offset())); | 
| +    __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 
| __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 
| __ Stop("Not coming from Dart code."); | 
| __ Bind(&ok); | 
| @@ -165,7 +166,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 
| #endif | 
|  | 
| // Mark that the isolate is executing Native code. | 
| -  __ sw(T5, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(T5, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Initialize NativeArguments structure and call native function. | 
| // Registers A0, A1, A2, and A3 are used. | 
| @@ -183,8 +184,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 
| // Set argv in NativeArguments: A2 already contains argv. | 
|  | 
| ASSERT(retval_offset == 3 * kWordSize); | 
| -  // Set retval in NativeArgs. | 
| -  __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | 
| +  __ addiu(A3, FP, Immediate(3 * kWordSize));  // Set retval in NativeArgs. | 
|  | 
| // Passing the structure by value as in runtime calls would require changing | 
| // Dart API for native functions. | 
| @@ -213,7 +213,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 
|  | 
| // Mark that the isolate is executing Dart code. | 
| __ LoadImmediate(A2, VMTag::kDartTagId); | 
| -  __ sw(A2, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Reset exit frame information in Isolate structure. | 
| __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 
| @@ -238,8 +238,8 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 
| __ Comment("CallNativeCFunctionStub"); | 
| __ EnterStubFrame(); | 
|  | 
| -  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S2)) != 0); | 
| -  __ LoadIsolate(S2); | 
| +  COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << S6)) != 0); | 
| +  __ LoadIsolate(S6); | 
|  | 
| // Save exit frame information to enable stack walking as we are about | 
| // to transition to native code. | 
| @@ -248,7 +248,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 
| #if defined(DEBUG) | 
| { Label ok; | 
| // Check that we are always entering from Dart code. | 
| -    __ lw(T0, Address(S2, Isolate::vm_tag_offset())); | 
| +    __ lw(T0, Address(S6, Isolate::vm_tag_offset())); | 
| __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 
| __ Stop("Not coming from Dart code."); | 
| __ Bind(&ok); | 
| @@ -256,7 +256,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 
| #endif | 
|  | 
| // Mark that the isolate is executing Native code. | 
| -  __ sw(T5, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(T5, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Initialize NativeArguments structure and call native function. | 
| // Registers A0, A1, A2, and A3 are used. | 
| @@ -274,8 +274,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 
| // Set argv in NativeArguments: A2 already contains argv. | 
|  | 
| ASSERT(retval_offset == 3 * kWordSize); | 
| -  // Set retval in NativeArgs. | 
| -  __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | 
| +  __ addiu(A3, FP, Immediate(3 * kWordSize));  // Set retval in NativeArgs. | 
|  | 
| // Passing the structure by value as in runtime calls would require changing | 
| // Dart API for native functions. | 
| @@ -299,7 +298,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 
|  | 
| // Mark that the isolate is executing Dart code. | 
| __ LoadImmediate(A2, VMTag::kDartTagId); | 
| -  __ sw(A2, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(A2, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Reset exit frame information in Isolate structure. | 
| __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 
| @@ -324,11 +323,11 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | 
| __ Comment("CallStaticFunctionStub return"); | 
|  | 
| // Get Code object result and restore arguments descriptor array. | 
| -  __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | 
| +  __ lw(T0, Address(SP, 0 * kWordSize)); | 
| __ lw(S4, Address(SP, 1 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(2 * kWordSize)); | 
|  | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| +  __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 
|  | 
| // Remove the stub frame as we are about to jump to the dart function. | 
| __ LeaveStubFrameAndReturn(T0); | 
| @@ -339,12 +338,9 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | 
| // (invalid because its function was optimized or deoptimized). | 
| // S4: arguments descriptor array. | 
| void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | 
| -  // Load code pointer to this stub from the thread: | 
| -  // The one that is passed in, is not correct - it points to the code object | 
| -  // that needs to be replaced. | 
| -  __ lw(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); | 
| // Create a stub frame as we are pushing some objects on the stack before | 
| // calling into the runtime. | 
| +  __ Comment("FixCallersTarget"); | 
| __ EnterStubFrame(); | 
| // Setup space on stack for return value and preserve arguments descriptor. | 
| __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 
| @@ -353,12 +349,12 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | 
| __ sw(TMP, Address(SP, 0 * kWordSize)); | 
| __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); | 
| // Get Code object result and restore arguments descriptor array. | 
| -  __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | 
| +  __ lw(T0, Address(SP, 0 * kWordSize)); | 
| __ lw(S4, Address(SP, 1 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(2 * kWordSize)); | 
|  | 
| // Jump to the dart function. | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| +  __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 
|  | 
| // Remove the stub frame. | 
| __ LeaveStubFrameAndReturn(T0); | 
| @@ -368,10 +364,7 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | 
| // Called from object allocate instruction when the allocation stub has been | 
| // disabled. | 
| void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | 
| -  // Load code pointer to this stub from the thread: | 
| -  // The one that is passed in, is not correct - it points to the code object | 
| -  // that needs to be replaced. | 
| -  __ lw(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); | 
| +  __ Comment("FixAllocationStubTarget"); | 
| __ EnterStubFrame(); | 
| // Setup space on stack for return value. | 
| __ addiu(SP, SP, Immediate(-1 * kWordSize)); | 
| @@ -379,11 +372,11 @@ void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | 
| __ sw(TMP, Address(SP, 0 * kWordSize)); | 
| __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); | 
| // Get Code object result. | 
| -  __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | 
| +  __ lw(T0, Address(SP, 0 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(1 * kWordSize)); | 
|  | 
| // Jump to the dart function. | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| +  __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 
|  | 
| // Remove the stub frame. | 
| __ LeaveStubFrameAndReturn(T0); | 
| @@ -440,26 +433,34 @@ static void PushArgumentsArray(Assembler* assembler) { | 
| //   +------------------+ | 
| //   | Saved PP         | <- TOS | 
| //   +------------------+ | 
| -//   | Saved CODE_REG   | | 
| -//   +------------------+ | 
| //   | Saved FP         | <- FP of stub | 
| //   +------------------+ | 
| //   | Saved LR         |  (deoptimization point) | 
| //   +------------------+ | 
| -//   | Saved CODE_REG   | | 
| +//   | PC marker        | | 
| //   +------------------+ | 
| //   | ...              | <- SP of optimized frame | 
| // | 
| // Parts of the code cannot GC, part of the code can GC. | 
| static void GenerateDeoptimizationSequence(Assembler* assembler, | 
| -                                           DeoptStubKind kind) { | 
| +                                           bool preserve_result) { | 
| const intptr_t kPushedRegistersSize = | 
| -      kNumberOfCpuRegisters * kWordSize + kNumberOfFRegisters * kWordSize; | 
| +      kNumberOfCpuRegisters * kWordSize + | 
| +      4 * kWordSize +  // PP, FP, RA, PC marker. | 
| +      kNumberOfFRegisters * kWordSize; | 
|  | 
| __ SetPrologueOffset(); | 
| __ Comment("GenerateDeoptimizationSequence"); | 
| -  // DeoptimizeCopyFrame expects a Dart frame. | 
| -  __ EnterStubFrame(kPushedRegistersSize); | 
| +  // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 
| +  // is no need to set the correct PC marker or load PP, since they get patched. | 
| +  __ addiu(SP, SP, Immediate(-kPushedRegistersSize * kWordSize)); | 
| +  __ sw(ZR, Address(SP, kPushedRegistersSize - 1 * kWordSize)); | 
| +  __ sw(RA, Address(SP, kPushedRegistersSize - 2 * kWordSize)); | 
| +  __ sw(FP, Address(SP, kPushedRegistersSize - 3 * kWordSize)); | 
| +  __ sw(PP, Address(SP, kPushedRegistersSize - 4 * kWordSize)); | 
| +  __ addiu(FP, SP, Immediate(kPushedRegistersSize - 3 * kWordSize)); | 
| + | 
| +  __ LoadPoolPointer(); | 
|  | 
| // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | 
| // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | 
| @@ -470,38 +471,27 @@ static void GenerateDeoptimizationSequence(Assembler* assembler, | 
| // Push registers in their enumeration order: lowest register number at | 
| // lowest address. | 
| for (int i = 0; i < kNumberOfCpuRegisters; i++) { | 
| -    const int slot = kNumberOfCpuRegisters - i; | 
| +    const int slot = 4 + kNumberOfCpuRegisters - i; | 
| Register reg = static_cast<Register>(i); | 
| -    if (reg == CODE_REG) { | 
| -      // Save the original value of CODE_REG pushed before invoking this stub | 
| -      // instead of the value used to call this stub. | 
| -      COMPILE_ASSERT(TMP < CODE_REG);  // Assert TMP is pushed first. | 
| -      __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); | 
| -      __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 
| -    } else { | 
| -      __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 
| -    } | 
| +    __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 
| } | 
| for (int i = 0; i < kNumberOfFRegisters; i++) { | 
| // These go below the CPU registers. | 
| -    const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; | 
| +    const int slot = 4 + kNumberOfCpuRegisters + kNumberOfFRegisters - i; | 
| FRegister reg = static_cast<FRegister>(i); | 
| __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 
| } | 
|  | 
| __ mov(A0, SP);  // Pass address of saved registers block. | 
| -  __ LoadImmediate(A1, (kind == kLazyDeopt) ? 1 : 0); | 
| __ ReserveAlignedFrameSpace(1 * kWordSize); | 
| -  __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | 
| +  __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); | 
| // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | 
|  | 
| -  const bool preserve_result = (kind == kLazyDeopt); | 
| if (preserve_result) { | 
| // Restore result into T1 temporarily. | 
| __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | 
| } | 
|  | 
| -  __ RestoreCodePointer(); | 
| __ LeaveDartFrame(); | 
| __ subu(SP, FP, V0); | 
|  | 
| @@ -520,7 +510,6 @@ static void GenerateDeoptimizationSequence(Assembler* assembler, | 
| __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | 
| } | 
| // Code above cannot cause GC. | 
| -  __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
|  | 
| // Frame is fully rewritten at this point and it is safe to perform a GC. | 
| @@ -550,16 +539,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler, | 
| void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { | 
| // Correct return address to point just after the call that is being | 
| // deoptimized. | 
| -  __ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes); | 
| -  // Push zap value instead of CODE_REG for lazy deopt. | 
| -  __ LoadImmediate(TMP, 0xf1f1f1f1); | 
| -  __ Push(TMP); | 
| -  GenerateDeoptimizationSequence(assembler, kLazyDeopt); | 
| +  __ AddImmediate(RA, -CallPattern::kFixedLengthInBytes); | 
| +  GenerateDeoptimizationSequence(assembler, true);  // Preserve V0. | 
| } | 
|  | 
|  | 
| void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | 
| -  GenerateDeoptimizationSequence(assembler, kEagerDeopt); | 
| +  GenerateDeoptimizationSequence(assembler, false);  // Don't preserve V0. | 
| } | 
|  | 
|  | 
| @@ -629,7 +615,6 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { | 
| __ lw(S5, Address(SP, 5 * kWordSize));  // Restore IC data. | 
| __ addiu(SP, SP, Immediate(6 * kWordSize)); | 
|  | 
| -  __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
|  | 
| if (!FLAG_lazy_dispatchers) { | 
| @@ -638,7 +623,6 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { | 
| __ Bind(&call_target_function); | 
| } | 
|  | 
| -  __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | 
| __ jr(T2); | 
| } | 
| @@ -796,7 +780,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { | 
| // Called when invoking Dart code from C++ (VM code). | 
| // Input parameters: | 
| //   RA : points to return address. | 
| -//   A0 : code object of the Dart function to call. | 
| +//   A0 : entrypoint of the Dart function to call. | 
| //   A1 : arguments descriptor array. | 
| //   A2 : arguments array. | 
| //   A3 : current thread. | 
| @@ -829,9 +813,10 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | 
| __ swc1(r, Address(SP, slot * kWordSize)); | 
| } | 
|  | 
| -  // We now load the pool pointer(PP) with a GC safe value as we are about | 
| -  // to invoke dart code. | 
| -  __ LoadImmediate(PP, 0); | 
| +  // We now load the pool pointer(PP) as we are about to invoke dart code and we | 
| +  // could potentially invoke some intrinsic functions which need the PP to be | 
| +  // set up. | 
| +  __ LoadPoolPointer(); | 
|  | 
| // Set up THR, which caches the current thread in Dart code. | 
| if (THR != A3) { | 
| @@ -890,19 +875,17 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | 
| // Call the Dart code entrypoint. | 
| // We are calling into Dart code, here, so there is no need to call through | 
| // T9 to match the ABI. | 
| -  __ lw(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle)); | 
| -  __ lw(A0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| __ jalr(A0);  // S4 is the arguments descriptor array. | 
| __ Comment("InvokeDartCodeStub return"); | 
|  | 
| // Get rid of arguments pushed on the stack. | 
| __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); | 
|  | 
| -  __ LoadIsolate(S2); | 
| +  __ LoadIsolate(S6); | 
|  | 
| // Restore the current VMTag from the stack. | 
| __ lw(T1, Address(SP, 2 * kWordSize)); | 
| -  __ sw(T1, Address(S2, Isolate::vm_tag_offset())); | 
| +  __ sw(T1, Address(S6, Isolate::vm_tag_offset())); | 
|  | 
| // Restore the saved top resource and top exit frame info back into the | 
| // Isolate structure. Uses T0 as a temporary register for this. | 
| @@ -1125,9 +1108,13 @@ void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { | 
| // Input parameters: | 
| //   RA : return address. | 
| //   SP + 0 : type arguments object (only if class is parameterized). | 
| -void StubCode::GenerateAllocationStubForClass(Assembler* assembler, | 
| -                                              const Class& cls) { | 
| +// Returns patch_code_pc offset where patching code for disabling the stub | 
| +// has been generated (similar to regularly generated Dart code). | 
| +void StubCode::GenerateAllocationStubForClass( | 
| +    Assembler* assembler, const Class& cls, | 
| +    uword* entry_patch_offset, uword* patch_code_pc_offset) { | 
| __ Comment("AllocationStubForClass"); | 
| +  *entry_patch_offset = assembler->CodeSize(); | 
| // The generated code is different if the class is parameterized. | 
| const bool is_cls_parameterized = cls.NumTypeArguments() > 0; | 
| ASSERT(!is_cls_parameterized || | 
| @@ -1250,6 +1237,8 @@ void StubCode::GenerateAllocationStubForClass(Assembler* assembler, | 
| // V0: new object | 
| // Restore the frame pointer and return. | 
| __ LeaveStubFrameAndReturn(RA); | 
| +  *patch_code_pc_offset = assembler->CodeSize(); | 
| +  __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry()); | 
| } | 
|  | 
|  | 
| @@ -1569,9 +1558,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub( | 
| // Remove the call arguments pushed earlier, including the IC data object | 
| // and the arguments descriptor array. | 
| __ addiu(SP, SP, Immediate(num_slots * kWordSize)); | 
| -  if (range_collection_mode == kCollectRanges) { | 
| -    __ RestoreCodePointer(); | 
| -  } | 
| __ LeaveStubFrame(); | 
|  | 
| Label call_target_function; | 
| @@ -1619,7 +1605,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub( | 
| if (num_args == 2) { | 
| __ sw(T1, Address(SP, 1 * kWordSize)); | 
| } | 
| -    __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ jalr(T4); | 
| __ lw(S5, Address(SP, (frame_size - 2) * kWordSize)); | 
| __ lw(RA, Address(SP, (frame_size - 1) * kWordSize)); | 
| @@ -1627,11 +1612,9 @@ void StubCode::GenerateNArgsCheckInlineCacheStub( | 
| __ UpdateRangeFeedback(V0, 2, S5, T1, &done); | 
| __ Bind(&done); | 
| __ addiu(SP, SP, Immediate(frame_size * kWordSize)); | 
| -    __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
| __ Ret(); | 
| } else { | 
| -    __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ jr(T4); | 
| } | 
|  | 
| @@ -1646,7 +1629,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub( | 
| __ lw(RA, Address(SP, 0 * kWordSize)); | 
| __ lw(S5, Address(SP, 1 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(2 * kWordSize)); | 
| -    __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
| __ b(&done_stepping); | 
| } | 
| @@ -1792,7 +1774,6 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 
|  | 
| // Get function and call it, if possible. | 
| __ lw(T0, Address(T0, target_offset)); | 
| -  __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | 
| __ jr(T4); | 
|  | 
| @@ -1807,7 +1788,6 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 
| __ lw(RA, Address(SP, 0 * kWordSize)); | 
| __ lw(S5, Address(SP, 1 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(2 * kWordSize)); | 
| -    __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
| __ b(&done_stepping); | 
| } | 
| @@ -1847,7 +1827,6 @@ void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | 
| __ addiu(SP, SP, Immediate(3 * kWordSize)); | 
| __ LeaveStubFrame(); | 
|  | 
| -  __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | 
| __ jr(T2); | 
| } | 
| @@ -1865,10 +1844,9 @@ void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { | 
| __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 
|  | 
| __ lw(S5, Address(SP, 1 * kWordSize)); | 
| -  __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | 
| +  __ lw(T0, Address(SP, 0 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(2 * kWordSize)); | 
| __ LeaveStubFrame(); | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| __ jr(T0); | 
| } | 
|  | 
| @@ -1882,10 +1860,9 @@ void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { | 
|  | 
| __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 
|  | 
| -  __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | 
| +  __ lw(T0, Address(SP, 0 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(3 * kWordSize)); | 
| __ LeaveStubFrame(); | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| __ jr(T0); | 
| } | 
|  | 
| @@ -2076,11 +2053,11 @@ void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { | 
| __ sw(T0, Address(SP, 0 * kWordSize)); | 
| __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | 
| __ Comment("OptimizeFunctionStub return"); | 
| -  __ lw(CODE_REG, Address(SP, 1 * kWordSize));  // Get Code object | 
| +  __ lw(T0, Address(SP, 1 * kWordSize));  // Get Code object | 
| __ lw(S4, Address(SP, 2 * kWordSize));  // Restore argument descriptor. | 
| __ addiu(SP, SP, Immediate(3 * kWordSize));  // Discard argument. | 
|  | 
| -  __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | 
| +  __ lw(T0, FieldAddress(T0, Code::entry_point_offset())); | 
| __ LeaveStubFrameAndReturn(T0); | 
| __ break_(0); | 
| } | 
| @@ -2201,7 +2178,6 @@ void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( | 
| __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 
| __ lw(RA, Address(SP, 0 * kWordSize)); | 
| __ addiu(SP, SP, Immediate(1 * kWordSize)); | 
| -    __ RestoreCodePointer(); | 
| __ LeaveStubFrame(); | 
| __ b(&done_stepping); | 
| } | 
| @@ -2262,7 +2238,6 @@ void StubCode::EmitMegamorphicLookup( | 
| __ addu(T1, T2, T1); | 
| __ lw(T0, FieldAddress(T1, base + kWordSize)); | 
|  | 
| -  __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 
| __ lw(target, FieldAddress(T0, Function::entry_point_offset())); | 
| } | 
|  | 
|  |