Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(126)

Unified Diff: runtime/vm/stub_code_arm.cc

Issue 1343373003: Revert "VM: New calling convention for generated code." (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/stub_code_arm.cc
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index 42276dabe545a02c30917a3e323045f8a1afe130..9d2883b58d65b194d1cb612ba2591b19a494a377 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -42,11 +42,12 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
const intptr_t argv_offset = NativeArguments::argv_offset();
const intptr_t retval_offset = NativeArguments::retval_offset();
+ const intptr_t exitframe_last_param_slot_from_fp = 2;
__ EnterStubFrame();
- COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
- __ LoadIsolate(R7);
+ COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0);
+ __ LoadIsolate(R9);
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
@@ -55,7 +56,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
#if defined(DEBUG)
{ Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
+ __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset());
__ CompareImmediate(R6, VMTag::kDartTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -64,7 +65,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
#endif
// Mark that the isolate is executing VM code.
- __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset());
// Reserve space for arguments and align frame before entering C++ world.
// NativeArguments are passed in registers.
@@ -86,7 +87,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
ASSERT(argv_offset == 2 * kWordSize);
__ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
// Set argv in NativeArguments.
- __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
+ __ AddImmediate(R2, exitframe_last_param_slot_from_fp * kWordSize);
ASSERT(retval_offset == 3 * kWordSize);
__ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument.
@@ -96,7 +97,7 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
// Mark that the isolate is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartTagId);
- __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
@@ -140,8 +141,8 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
- COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
- __ LoadIsolate(R7);
+ COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0);
+ __ LoadIsolate(R9);
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
@@ -150,7 +151,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
#if defined(DEBUG)
{ Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
+ __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset());
__ CompareImmediate(R6, VMTag::kDartTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -159,7 +160,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
#endif
// Mark that the isolate is executing Native code.
- __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
@@ -182,8 +183,7 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
// Set argv in NativeArguments: R2 already contains argv.
ASSERT(retval_offset == 3 * kWordSize);
- // Set retval in NativeArgs.
- __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
+ __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs.
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
@@ -195,19 +195,17 @@ void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
// Call native function invocation wrapper or redirection via simulator.
#if defined(USING_SIMULATOR)
uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper);
- const ExternalLabel label(Simulator::RedirectExternalReference(
- entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments));
- __ LoadExternalLabel(R2, &label, kNotPatchable);
+ entry = Simulator::RedirectExternalReference(
+ entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments);
+ __ LoadImmediate(R2, entry);
__ blx(R2);
#else
- __ LoadExternalLabel(
- LR, &NativeEntry::NativeCallWrapperLabel(), kNotPatchable);
- __ blx(LR);
+ __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable);
#endif
// Mark that the isolate is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartTagId);
- __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
@@ -232,8 +230,8 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
- COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0);
- __ LoadIsolate(R7);
+ COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0);
+ __ LoadIsolate(R9);
// Save exit frame information to enable stack walking as we are about
// to transition to native code.
@@ -242,7 +240,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
#if defined(DEBUG)
{ Label ok;
// Check that we are always entering from Dart code.
- __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset());
+ __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset());
__ CompareImmediate(R6, VMTag::kDartTagId);
__ b(&ok, EQ);
__ Stop("Not coming from Dart code.");
@@ -251,7 +249,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
#endif
// Mark that the isolate is executing Native code.
- __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset());
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
@@ -274,8 +272,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
// Set argv in NativeArguments: R2 already contains argv.
ASSERT(retval_offset == 3 * kWordSize);
- // Set retval in NativeArgs.
- __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
+ __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs.
// Passing the structure by value as in runtime calls would require changing
// Dart API for native functions.
@@ -288,7 +285,7 @@ void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
// Mark that the isolate is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartTagId);
- __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
@@ -314,7 +311,6 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ mov(CODE_REG, Operand(R0));
__ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
__ bx(R0);
}
@@ -324,10 +320,6 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
- // Load code pointer to this stub from the thread:
- // The one that is passed in, is not correct - it points to the code object
- // that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset()));
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@@ -340,7 +332,6 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ mov(CODE_REG, Operand(R0));
__ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
__ bx(R0);
}
@@ -349,10 +340,6 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
- // Load code pointer to this stub from the thread:
- // The one that is passed in, is not correct - it points to the code object
- // that needs to be replaced.
- __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset()));
__ EnterStubFrame();
// Setup space on stack for return value.
__ LoadObject(R0, Object::null_object());
@@ -363,7 +350,6 @@ void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
- __ mov(CODE_REG, Operand(R0));
__ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
__ bx(R0);
}
@@ -417,15 +403,13 @@ static void PushArgumentsArray(Assembler* assembler) {
// +------------------+
// | Saved LR | (deoptimization point)
// +------------------+
-// | pc marker |
-// +------------------+
-// | Saved CODE_REG |
+// | PC marker |
// +------------------+
// | ... | <- SP of optimized frame
//
// Parts of the code cannot GC, part of the code can GC.
static void GenerateDeoptimizationSequence(Assembler* assembler,
- DeoptStubKind kind) {
+ bool preserve_result) {
// DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
// is no need to set the correct PC marker or load PP, since they get patched.
@@ -435,10 +419,21 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ eor(LR, IP, Operand(LR));
__ eor(IP, IP, Operand(LR));
- // Set up the frame manually with return address now stored in IP.
- __ EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << IP), 0);
+ // Set up the frame manually. We can't use EnterFrame because we can't
+ // clobber LR (or any other register) with 0, yet.
+ __ sub(SP, SP, Operand(kWordSize)); // Make room for PC marker of 0.
+ __ Push(IP); // Push return address.
+ __ Push(FP);
+ __ mov(FP, Operand(SP));
+ __ Push(PP);
+
__ LoadPoolPointer();
+ // Now that IP holding the return address has been written to the stack,
+ // we can clobber it with 0 to write the null PC marker.
+ __ mov(IP, Operand(0));
+ __ str(IP, Address(SP, +3 * kWordSize));
+
// The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
@@ -447,17 +442,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// Push registers in their enumeration order: lowest register number at
// lowest address.
- for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
- if (i == CODE_REG) {
- // Save the original value of CODE_REG pushed before invoking this stub
- // instead of the value used to call this stub.
- COMPILE_ASSERT(IP > CODE_REG); // Assert IP is pushed first.
- __ ldr(IP, Address(FP, kCallerSpSlotFromFp * kWordSize));
- __ Push(IP);
- } else {
- __ Push(static_cast<Register>(i));
- }
- }
+ __ PushList(kAllCpuRegistersList);
if (TargetCPUFeatures::vfp_supported()) {
ASSERT(kFpuRegisterSize == 4 * kWordSize);
@@ -472,18 +457,15 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
__ mov(R0, Operand(SP)); // Pass address of saved registers block.
- __ mov(R1, Operand(kind == kLazyDeopt ? 1 : 0));
__ ReserveAlignedFrameSpace(0);
- __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
+ __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1);
// Result (R0) is stack-size (FP - SP) in bytes.
- const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
// Restore result into R1 temporarily.
__ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
}
- __ RestoreCodePointer();
__ LeaveDartFrame();
__ sub(SP, FP, Operand(R0));
@@ -501,7 +483,6 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
}
// Code above cannot cause GC.
- __ RestoreCodePointer();
__ LeaveStubFrame();
// Frame is fully rewritten at this point and it is safe to perform a GC.
@@ -530,16 +511,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
- __ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
- // Push zap value instead of CODE_REG for lazy deopt.
- __ LoadImmediate(IP, 0xf1f1f1f1);
- __ Push(IP);
- GenerateDeoptimizationSequence(assembler, kLazyDeopt);
+ __ AddImmediate(LR, -CallPattern::LengthInBytes());
+ GenerateDeoptimizationSequence(assembler, true); // Preserve R0.
}
void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
- GenerateDeoptimizationSequence(assembler, kEagerDeopt);
+ GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0.
}
@@ -595,7 +573,6 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
// Restore IC data and arguments descriptor.
__ PopList((1 << R4) | (1 << R5));
- __ RestoreCodePointer();
__ LeaveStubFrame();
if (!FLAG_lazy_dispatchers) {
@@ -605,7 +582,6 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
}
// Tail-call to target function.
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
__ bx(R2);
}
@@ -644,24 +620,24 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
/* inline_isolate = */ false);
const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
- __ LoadImmediate(R5, fixed_size);
- __ add(R5, R5, Operand(R3, LSL, 1)); // R3 is a Smi.
+ __ LoadImmediate(R9, fixed_size);
+ __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
ASSERT(kSmiTagShift == 1);
- __ bic(R5, R5, Operand(kObjectAlignment - 1));
+ __ bic(R9, R9, Operand(kObjectAlignment - 1));
- // R5: Allocation size.
+ // R9: Allocation size.
Heap::Space space = Heap::SpaceForAllocation(cid);
__ LoadIsolate(R6);
__ ldr(R6, Address(R6, Isolate::heap_offset()));
// Potential new object start.
__ ldr(R0, Address(R6, Heap::TopOffset(space)));
- __ adds(R7, R0, Operand(R5)); // Potential next object start.
+ __ adds(R7, R0, Operand(R9)); // Potential next object start.
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// R0: potential new object start.
// R7: potential next object start.
- // R5: allocation size.
+ // R9: allocation size.
__ ldr(R3, Address(R6, Heap::EndOffset(space)));
__ cmp(R7, Operand(R3));
__ b(&slow_case, CS);
@@ -676,12 +652,12 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R0: new object start as a tagged pointer.
// R3: allocation stats address.
// R7: new object end address.
- // R5: allocation size.
+ // R9: allocation size.
{
const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
- __ CompareImmediate(R5, RawObject::SizeTag::kMaxSizeTag);
- __ mov(R6, Operand(R5, LSL, shift), LS);
+ __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag);
+ __ mov(R6, Operand(R9, LSL, shift), LS);
__ mov(R6, Operand(0), HI);
// Get the class index and insert it into the tags.
@@ -710,13 +686,13 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R6: iterator which initially points to the start of the variable
// data area to be initialized.
// R7: new object end address.
- // R5: allocation size.
- __ IncrementAllocationStatsWithSize(R3, R5, space);
+ // R9: allocation size.
__ LoadObject(R4, Object::null_object());
__ mov(R5, Operand(R4));
__ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag);
__ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5);
+ __ IncrementAllocationStatsWithSize(R3, R9, space);
__ Ret(); // Returns the newly allocated object in R0.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
@@ -741,7 +717,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// Called when invoking Dart code from C++ (VM code).
// Input parameters:
// LR : points to return address.
-// R0 : code object of the Dart function to call.
+// R0 : entrypoint of the Dart function to call.
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
@@ -761,19 +737,24 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize));
}
+ // We now load the pool pointer(PP) as we are about to invoke dart code and we
+ // could potentially invoke some intrinsic functions which need the PP to be
+ // set up.
+ __ LoadPoolPointer();
+
// Set up THR, which caches the current thread in Dart code.
if (THR != R3) {
__ mov(THR, Operand(R3));
}
- __ LoadIsolate(R7);
+ __ LoadIsolate(R9);
// Save the current VMTag on the stack.
- __ LoadFromOffset(kWord, R5, R7, Isolate::vm_tag_offset());
+ __ LoadFromOffset(kWord, R5, R9, Isolate::vm_tag_offset());
__ Push(R5);
// Mark that the isolate is executing Dart code.
__ LoadImmediate(R5, VMTag::kDartTagId);
- __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset());
// Save top resource and top exit frame info. Use R4-6 as temporary registers.
// StackFrameIterator reads the top exit frame info saved in this frame.
@@ -815,15 +796,12 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Bind(&done_push_arguments);
// Call the Dart code entrypoint.
- __ LoadImmediate(PP, 0); // GC safe value into PP.
- __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ blx(R0); // R4 is the arguments descriptor array.
// Get rid of arguments pushed on the stack.
__ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
- __ LoadIsolate(R7);
+ __ LoadIsolate(R9);
// Restore the saved top exit frame info and top resource back into the
// Isolate structure. Uses R5 as a temporary register for this.
__ Pop(R5);
@@ -833,7 +811,7 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
// Restore the current VMTag from the stack.
__ Pop(R4);
- __ StoreToOffset(kWord, R4, R7, Isolate::vm_tag_offset());
+ __ StoreToOffset(kWord, R4, R9, Isolate::vm_tag_offset());
// Restore C++ ABI callee-saved registers.
if (TargetCPUFeatures::vfp_supported()) {
@@ -1052,11 +1030,12 @@ void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) {
// Input parameters:
// LR : return address.
// SP + 0 : type arguments object (only if class is parameterized).
-void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
- const Class& cls) {
- // Must load pool pointer before being able to patch.
- Register new_pp = R7;
- __ LoadPoolPointer(new_pp);
+// Returns patch_code_pc offset where patching code for disabling the stub
+// has been generated (similar to regularly generated Dart code).
+void StubCode::GenerateAllocationStubForClass(
+ Assembler* assembler, const Class& cls,
+ uword* entry_patch_offset, uword* patch_code_pc_offset) {
+ *entry_patch_offset = assembler->CodeSize();
// The generated code is different if the class is parameterized.
const bool is_cls_parameterized = cls.NumTypeArguments() > 0;
ASSERT(!is_cls_parameterized ||
@@ -1182,6 +1161,8 @@ void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
// Restore the frame pointer.
__ LeaveStubFrame();
__ Ret();
+ *patch_code_pc_offset = assembler->CodeSize();
+ __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry());
}
@@ -1336,7 +1317,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
Token::Kind kind,
RangeCollectionMode range_collection_mode,
bool optimized) {
- __ CheckCodePointer();
ASSERT(num_args > 0);
#if defined(DEBUG)
{ Label ok;
@@ -1461,9 +1441,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
__ PopList((1 << R0) | (1 << R4) | (1 << R5));
- if (range_collection_mode == kCollectRanges) {
- __ RestoreCodePointer();
- }
__ LeaveStubFrame();
Label call_target_function;
if (!FLAG_lazy_dispatchers) {
@@ -1501,18 +1478,15 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
} else {
__ PushList((1 << R1) | (1 << R5));
}
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ blx(R2);
Label done;
__ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize));
__ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done);
__ Bind(&done);
- __ RestoreCodePointer();
__ LeaveStubFrame();
__ Ret();
} else {
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ bx(R2);
}
@@ -1522,7 +1496,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
__ Push(R5); // Preserve IC data.
__ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
__ Pop(R5);
- __ RestoreCodePointer();
__ LeaveStubFrame();
__ b(&done_stepping);
}
@@ -1673,7 +1646,6 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
// Get function and call it, if possible.
__ LoadFromOffset(kWord, R0, R6, target_offset);
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
__ bx(R2);
@@ -1683,7 +1655,6 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
__ Push(R5); // Preserve IC data.
__ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
__ Pop(R5);
- __ RestoreCodePointer();
__ LeaveStubFrame();
__ b(&done_stepping);
}
@@ -1720,7 +1691,6 @@ void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
__ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data.
__ LeaveStubFrame();
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
__ bx(R2);
}
@@ -1735,8 +1705,6 @@ void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
__ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
__ PopList((1 << R0) | (1 << R5));
__ LeaveStubFrame();
- __ mov(CODE_REG, Operand(R0));
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ bx(R0);
}
@@ -1747,9 +1715,8 @@ void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
// Make room for result.
__ PushList((1 << R0));
__ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
- __ PopList((1 << CODE_REG));
+ __ PopList((1 << R0));
__ LeaveStubFrame();
- __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ bx(R0);
}
@@ -1929,9 +1896,8 @@ void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ Pop(R0); // Discard argument.
__ Pop(R0); // Get Code object
__ Pop(R4); // Restore argument descriptor.
- __ LeaveStubFrame();
- __ mov(CODE_REG, Operand(R0));
__ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
+ __ LeaveStubFrame();
__ bx(R0);
__ bkpt(0);
}
@@ -2033,7 +1999,6 @@ void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
__ Bind(&stepping);
__ EnterStubFrame();
__ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
- __ RestoreCodePointer();
__ LeaveStubFrame();
__ b(&done_stepping);
}
@@ -2093,7 +2058,6 @@ void StubCode::EmitMegamorphicLookup(
// be invoked as a normal Dart function.
__ add(IP, R2, Operand(R3, LSL, 2));
__ ldr(R0, FieldAddress(IP, base + kWordSize));
- __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ ldr(target, FieldAddress(R0, Function::entry_point_offset()));
}
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698