| Index: runtime/vm/assembler_arm.cc
|
| diff --git a/runtime/vm/assembler_arm.cc b/runtime/vm/assembler_arm.cc
|
| index b7cec21232e184d11b7473e0dbb40f1390145f9d..71a960831e0aff959f3d0d7829966d4a7599e2c9 100644
|
| --- a/runtime/vm/assembler_arm.cc
|
| +++ b/runtime/vm/assembler_arm.cc
|
| @@ -2678,46 +2678,29 @@ void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
|
| }
|
|
|
|
|
| -void Assembler::Branch(const ExternalLabel* label, Condition cond) {
|
| - LoadImmediate(IP, label->address(), cond); // Address is never patched.
|
| - bx(IP, cond);
|
| -}
|
| -
|
| -
|
| void Assembler::Branch(const StubEntry& stub_entry, Condition cond) {
|
| - const ExternalLabel label(stub_entry.EntryPoint());
|
| - Branch(&label, cond);
|
| + // Address is never patched.
|
| + LoadImmediate(IP, stub_entry.label().address(), cond);
|
| + bx(IP, cond);
|
| }
|
|
|
|
|
| -void Assembler::BranchPatchable(const ExternalLabel* label) {
|
| +void Assembler::BranchPatchable(const StubEntry& stub_entry) {
|
| // Use a fixed size code sequence, since a function prologue may be patched
|
| // with this branch sequence.
|
| // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction
|
| // cache flush upon patching.
|
| - LoadPatchableImmediate(IP, label->address());
|
| + LoadPatchableImmediate(IP, stub_entry.label().address());
|
| bx(IP);
|
| }
|
|
|
|
|
| -void Assembler::BranchPatchable(const StubEntry& stub_entry) {
|
| - const ExternalLabel label(stub_entry.EntryPoint());
|
| - BranchPatchable(&label);
|
| -}
|
| -
|
| -
|
| void Assembler::BranchLink(const ExternalLabel* label) {
|
| LoadImmediate(LR, label->address()); // Target address is never patched.
|
| blx(LR); // Use blx instruction so that the return branch prediction works.
|
| }
|
|
|
|
|
| -void Assembler::BranchLink(const StubEntry& stub_entry) {
|
| - const ExternalLabel label(stub_entry.EntryPoint());
|
| - BranchLink(&label);
|
| -}
|
| -
|
| -
|
| void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) {
|
| // Make sure that class CallPattern is able to patch the label referred
|
| // to by this code sequence.
|
| @@ -2732,19 +2715,12 @@ void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) {
|
|
|
| void Assembler::BranchLink(const StubEntry& stub_entry,
|
| Patchability patchable) {
|
| - const ExternalLabel label(stub_entry.EntryPoint());
|
| - BranchLink(&label, patchable);
|
| -}
|
| -
|
| -
|
| -void Assembler::BranchLinkPatchable(const ExternalLabel* label) {
|
| - BranchLink(label, kPatchable);
|
| + BranchLink(&stub_entry.label(), patchable);
|
| }
|
|
|
|
|
| void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
|
| - const ExternalLabel label(stub_entry.EntryPoint());
|
| - BranchLinkPatchable(&label);
|
| + BranchLink(&stub_entry.label(), kPatchable);
|
| }
|
|
|
|
|
| @@ -3256,8 +3232,9 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
|
|
|
|
|
| void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
|
| - // Preserve volatile CPU registers.
|
| - EnterFrame(kDartVolatileCpuRegs | (1 << FP), 0);
|
| + // Preserve volatile CPU registers and PP.
|
| + EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
|
| + COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
|
|
|
| // Preserve all volatile FPU registers.
|
| if (TargetCPUFeatures::vfp_supported()) {
|
| @@ -3272,6 +3249,8 @@ void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
|
| }
|
| }
|
|
|
| + LoadPoolPointer();
|
| +
|
| ReserveAlignedFrameSpace(frame_space);
|
| }
|
|
|
| @@ -3284,10 +3263,12 @@ void Assembler::LeaveCallRuntimeFrame() {
|
| TargetCPUFeatures::vfp_supported() ?
|
| kDartVolatileFpuRegCount * kFpuRegisterSize : 0;
|
|
|
| - // We subtract one from the volatile cpu register count because, even though
|
| - // LR is volatile, it is pushed ahead of FP.
|
| + COMPILE_ASSERT(PP < FP);
|
| + COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
|
| + // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
|
| + // it is pushed ahead of FP.
|
| const intptr_t kPushedRegistersSize =
|
| - (kDartVolatileCpuRegCount - 1) * kWordSize + kPushedFpuRegisterSize;
|
| + kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
|
| AddImmediate(SP, FP, -kPushedRegistersSize);
|
|
|
| // Restore all volatile FPU registers.
|
| @@ -3304,7 +3285,7 @@ void Assembler::LeaveCallRuntimeFrame() {
|
| }
|
|
|
| // Restore volatile CPU registers.
|
| - LeaveFrame(kDartVolatileCpuRegs | (1 << FP));
|
| + LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
|
| }
|
|
|
|
|
|
|