Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2118)

Unified Diff: runtime/vm/assembler_arm.cc

Issue 1314883002: VM: Use constant pool also for leaf runtime calls on x64, arm, arm64 and mips. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: fixed non-simulated arm/mips build Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/assembler_arm.cc
diff --git a/runtime/vm/assembler_arm.cc b/runtime/vm/assembler_arm.cc
index b7cec21232e184d11b7473e0dbb40f1390145f9d..142c0ac583b51c5ee260c3621fdc96b3432a0c7b 100644
--- a/runtime/vm/assembler_arm.cc
+++ b/runtime/vm/assembler_arm.cc
@@ -2678,31 +2678,21 @@ void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
}
-void Assembler::Branch(const ExternalLabel* label, Condition cond) {
- LoadImmediate(IP, label->address(), cond); // Address is never patched.
- bx(IP, cond);
-}
-
-
void Assembler::Branch(const StubEntry& stub_entry, Condition cond) {
const ExternalLabel label(stub_entry.EntryPoint());
- Branch(&label, cond);
+ LoadImmediate(IP, label.address(), cond); // Address is never patched.
rmacnak 2015/08/26 23:55:24 Shouldn't this be LoadExternalLabel? We still need
Florian Schneider 2015/08/27 08:27:19 Yes, using LoadExternalLabel is necessary here and
Florian Schneider 2015/08/27 08:38:09 Correction: Actually, it won't work right away, si
+ bx(IP, cond);
}
-void Assembler::BranchPatchable(const ExternalLabel* label) {
+void Assembler::BranchPatchable(const StubEntry& stub_entry) {
// Use a fixed size code sequence, since a function prologue may be patched
// with this branch sequence.
// Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction
// cache flush upon patching.
- LoadPatchableImmediate(IP, label->address());
- bx(IP);
-}
-
-
-void Assembler::BranchPatchable(const StubEntry& stub_entry) {
const ExternalLabel label(stub_entry.EntryPoint());
- BranchPatchable(&label);
+ LoadPatchableImmediate(IP, label.address());
rmacnak 2015/08/26 23:55:24 Ditto
Florian Schneider 2015/08/27 08:27:19 Yes, but not the purpose of this CL (see descripti
+ bx(IP);
}
@@ -2712,12 +2702,6 @@ void Assembler::BranchLink(const ExternalLabel* label) {
}
-void Assembler::BranchLink(const StubEntry& stub_entry) {
- const ExternalLabel label(stub_entry.EntryPoint());
- BranchLink(&label);
-}
-
-
void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) {
// Make sure that class CallPattern is able to patch the label referred
// to by this code sequence.
@@ -2737,14 +2721,9 @@ void Assembler::BranchLink(const StubEntry& stub_entry,
}
-void Assembler::BranchLinkPatchable(const ExternalLabel* label) {
- BranchLink(label, kPatchable);
-}
-
-
void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
const ExternalLabel label(stub_entry.EntryPoint());
- BranchLinkPatchable(&label);
+ BranchLink(&label, kPatchable);
}
@@ -3256,8 +3235,9 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
- // Preserve volatile CPU registers.
- EnterFrame(kDartVolatileCpuRegs | (1 << FP), 0);
+ // Preserve volatile CPU registers and PP.
+ EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
+ COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
// Preserve all volatile FPU registers.
if (TargetCPUFeatures::vfp_supported()) {
@@ -3272,6 +3252,8 @@ void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
}
}
+ LoadPoolPointer();
+
ReserveAlignedFrameSpace(frame_space);
}
@@ -3284,10 +3266,12 @@ void Assembler::LeaveCallRuntimeFrame() {
TargetCPUFeatures::vfp_supported() ?
kDartVolatileFpuRegCount * kFpuRegisterSize : 0;
- // We subtract one from the volatile cpu register count because, even though
- // LR is volatile, it is pushed ahead of FP.
+ COMPILE_ASSERT(PP < FP);
+ COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
+ // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
+ // it is pushed ahead of FP.
const intptr_t kPushedRegistersSize =
- (kDartVolatileCpuRegCount - 1) * kWordSize + kPushedFpuRegisterSize;
+ kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
AddImmediate(SP, FP, -kPushedRegistersSize);
// Restore all volatile FPU registers.
@@ -3304,7 +3288,7 @@ void Assembler::LeaveCallRuntimeFrame() {
}
// Restore volatile CPU registers.
- LeaveFrame(kDartVolatileCpuRegs | (1 << FP));
+ LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
}

Powered by Google App Engine
This is Rietveld 408576698