Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(469)

Side by Side Diff: runtime/vm/assembler_arm.cc

Issue 1314883002: VM: Use constant pool also for leaf runtime calls on x64, arm, arm64 and mips. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: use StubEntry::label directly Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_arm.h ('k') | runtime/vm/assembler_arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/longjump.h" 10 #include "vm/longjump.h"
(...skipping 2660 matching lines...) Expand 10 before | Expand all | Expand 10 after
2671 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { 2671 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2672 ASSERT(qd != QTMP); 2672 ASSERT(qd != QTMP);
2673 ASSERT(qn != QTMP); 2673 ASSERT(qn != QTMP);
2674 ASSERT(qm != QTMP); 2674 ASSERT(qm != QTMP);
2675 2675
2676 Vreciprocalqs(qd, qm); 2676 Vreciprocalqs(qd, qm);
2677 vmulqs(qd, qn, qd); 2677 vmulqs(qd, qn, qd);
2678 } 2678 }
2679 2679
2680 2680
2681 void Assembler::Branch(const ExternalLabel* label, Condition cond) { 2681 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) {
2682 LoadImmediate(IP, label->address(), cond); // Address is never patched. 2682 // Address is never patched.
2683 LoadImmediate(IP, stub_entry.label().address(), cond);
2683 bx(IP, cond); 2684 bx(IP, cond);
2684 } 2685 }
2685 2686
2686 2687
2687 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) { 2688 void Assembler::BranchPatchable(const StubEntry& stub_entry) {
2688 const ExternalLabel label(stub_entry.EntryPoint());
2689 Branch(&label, cond);
2690 }
2691
2692
2693 void Assembler::BranchPatchable(const ExternalLabel* label) {
2694 // Use a fixed size code sequence, since a function prologue may be patched 2689 // Use a fixed size code sequence, since a function prologue may be patched
2695 // with this branch sequence. 2690 // with this branch sequence.
2696 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction 2691 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction
2697 // cache flush upon patching. 2692 // cache flush upon patching.
2698 LoadPatchableImmediate(IP, label->address()); 2693 LoadPatchableImmediate(IP, stub_entry.label().address());
2699 bx(IP); 2694 bx(IP);
2700 } 2695 }
2701 2696
2702 2697
2703 void Assembler::BranchPatchable(const StubEntry& stub_entry) {
2704 const ExternalLabel label(stub_entry.EntryPoint());
2705 BranchPatchable(&label);
2706 }
2707
2708
2709 void Assembler::BranchLink(const ExternalLabel* label) { 2698 void Assembler::BranchLink(const ExternalLabel* label) {
2710 LoadImmediate(LR, label->address()); // Target address is never patched. 2699 LoadImmediate(LR, label->address()); // Target address is never patched.
2711 blx(LR); // Use blx instruction so that the return branch prediction works. 2700 blx(LR); // Use blx instruction so that the return branch prediction works.
2712 } 2701 }
2713 2702
2714 2703
2715 void Assembler::BranchLink(const StubEntry& stub_entry) {
2716 const ExternalLabel label(stub_entry.EntryPoint());
2717 BranchLink(&label);
2718 }
2719
2720
2721 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) { 2704 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) {
2722 // Make sure that class CallPattern is able to patch the label referred 2705 // Make sure that class CallPattern is able to patch the label referred
2723 // to by this code sequence. 2706 // to by this code sequence.
2724 // For added code robustness, use 'blx lr' in a patchable sequence and 2707 // For added code robustness, use 'blx lr' in a patchable sequence and
2725 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). 2708 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2726 const int32_t offset = ObjectPool::element_offset( 2709 const int32_t offset = ObjectPool::element_offset(
2727 object_pool_wrapper_.FindExternalLabel(label, patchable)); 2710 object_pool_wrapper_.FindExternalLabel(label, patchable));
2728 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag); 2711 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag);
2729 blx(LR); // Use blx instruction so that the return branch prediction works. 2712 blx(LR); // Use blx instruction so that the return branch prediction works.
2730 } 2713 }
2731 2714
2732 2715
2733 void Assembler::BranchLink(const StubEntry& stub_entry, 2716 void Assembler::BranchLink(const StubEntry& stub_entry,
2734 Patchability patchable) { 2717 Patchability patchable) {
2735 const ExternalLabel label(stub_entry.EntryPoint()); 2718 BranchLink(&stub_entry.label(), patchable);
2736 BranchLink(&label, patchable);
2737 }
2738
2739
2740 void Assembler::BranchLinkPatchable(const ExternalLabel* label) {
2741 BranchLink(label, kPatchable);
2742 } 2719 }
2743 2720
2744 2721
2745 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { 2722 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
2746 const ExternalLabel label(stub_entry.EntryPoint()); 2723 BranchLink(&stub_entry.label(), kPatchable);
2747 BranchLinkPatchable(&label);
2748 } 2724 }
2749 2725
2750 2726
2751 void Assembler::BranchLinkOffset(Register base, int32_t offset) { 2727 void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2752 ASSERT(base != PC); 2728 ASSERT(base != PC);
2753 ASSERT(base != IP); 2729 ASSERT(base != IP);
2754 LoadFromOffset(kWord, IP, base, offset); 2730 LoadFromOffset(kWord, IP, base, offset);
2755 blx(IP); // Use blx instruction so that the return branch prediction works. 2731 blx(IP); // Use blx instruction so that the return branch prediction works.
2756 } 2732 }
2757 2733
(...skipping 491 matching lines...) Expand 10 before | Expand all | Expand 10 after
3249 // Reserve space for arguments and align frame before entering 3225 // Reserve space for arguments and align frame before entering
3250 // the C++ world. 3226 // the C++ world.
3251 AddImmediate(SP, -frame_space); 3227 AddImmediate(SP, -frame_space);
3252 if (OS::ActivationFrameAlignment() > 1) { 3228 if (OS::ActivationFrameAlignment() > 1) {
3253 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1)); 3229 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3254 } 3230 }
3255 } 3231 }
3256 3232
3257 3233
3258 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { 3234 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
3259 // Preserve volatile CPU registers. 3235 // Preserve volatile CPU registers and PP.
3260 EnterFrame(kDartVolatileCpuRegs | (1 << FP), 0); 3236 EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
3237 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3261 3238
3262 // Preserve all volatile FPU registers. 3239 // Preserve all volatile FPU registers.
3263 if (TargetCPUFeatures::vfp_supported()) { 3240 if (TargetCPUFeatures::vfp_supported()) {
3264 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); 3241 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3265 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); 3242 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3266 if ((lastv - firstv + 1) >= 16) { 3243 if ((lastv - firstv + 1) >= 16) {
3267 DRegister mid = static_cast<DRegister>(firstv + 16); 3244 DRegister mid = static_cast<DRegister>(firstv + 16);
3268 vstmd(DB_W, SP, mid, lastv - mid + 1); 3245 vstmd(DB_W, SP, mid, lastv - mid + 1);
3269 vstmd(DB_W, SP, firstv, 16); 3246 vstmd(DB_W, SP, firstv, 16);
3270 } else { 3247 } else {
3271 vstmd(DB_W, SP, firstv, lastv - firstv + 1); 3248 vstmd(DB_W, SP, firstv, lastv - firstv + 1);
3272 } 3249 }
3273 } 3250 }
3274 3251
3252 LoadPoolPointer();
3253
3275 ReserveAlignedFrameSpace(frame_space); 3254 ReserveAlignedFrameSpace(frame_space);
3276 } 3255 }
3277 3256
3278 3257
3279 void Assembler::LeaveCallRuntimeFrame() { 3258 void Assembler::LeaveCallRuntimeFrame() {
3280 // SP might have been modified to reserve space for arguments 3259 // SP might have been modified to reserve space for arguments
3281 // and ensure proper alignment of the stack frame. 3260 // and ensure proper alignment of the stack frame.
3282 // We need to restore it before restoring registers. 3261 // We need to restore it before restoring registers.
3283 const intptr_t kPushedFpuRegisterSize = 3262 const intptr_t kPushedFpuRegisterSize =
3284 TargetCPUFeatures::vfp_supported() ? 3263 TargetCPUFeatures::vfp_supported() ?
3285 kDartVolatileFpuRegCount * kFpuRegisterSize : 0; 3264 kDartVolatileFpuRegCount * kFpuRegisterSize : 0;
3286 3265
3287 // We subtract one from the volatile cpu register count because, even though 3266 COMPILE_ASSERT(PP < FP);
3288 // LR is volatile, it is pushed ahead of FP. 3267 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3268 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3269 // it is pushed ahead of FP.
3289 const intptr_t kPushedRegistersSize = 3270 const intptr_t kPushedRegistersSize =
3290 (kDartVolatileCpuRegCount - 1) * kWordSize + kPushedFpuRegisterSize; 3271 kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
3291 AddImmediate(SP, FP, -kPushedRegistersSize); 3272 AddImmediate(SP, FP, -kPushedRegistersSize);
3292 3273
3293 // Restore all volatile FPU registers. 3274 // Restore all volatile FPU registers.
3294 if (TargetCPUFeatures::vfp_supported()) { 3275 if (TargetCPUFeatures::vfp_supported()) {
3295 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); 3276 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3296 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); 3277 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3297 if ((lastv - firstv + 1) >= 16) { 3278 if ((lastv - firstv + 1) >= 16) {
3298 DRegister mid = static_cast<DRegister>(firstv + 16); 3279 DRegister mid = static_cast<DRegister>(firstv + 16);
3299 vldmd(IA_W, SP, firstv, 16); 3280 vldmd(IA_W, SP, firstv, 16);
3300 vldmd(IA_W, SP, mid, lastv - mid + 1); 3281 vldmd(IA_W, SP, mid, lastv - mid + 1);
3301 } else { 3282 } else {
3302 vldmd(IA_W, SP, firstv, lastv - firstv + 1); 3283 vldmd(IA_W, SP, firstv, lastv - firstv + 1);
3303 } 3284 }
3304 } 3285 }
3305 3286
3306 // Restore volatile CPU registers. 3287 // Restore volatile CPU registers.
3307 LeaveFrame(kDartVolatileCpuRegs | (1 << FP)); 3288 LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
3308 } 3289 }
3309 3290
3310 3291
3311 void Assembler::CallRuntime(const RuntimeEntry& entry, 3292 void Assembler::CallRuntime(const RuntimeEntry& entry,
3312 intptr_t argument_count) { 3293 intptr_t argument_count) {
3313 entry.Call(this, argument_count); 3294 entry.Call(this, argument_count);
3314 } 3295 }
3315 3296
3316 3297
3317 void Assembler::EnterDartFrame(intptr_t frame_size) { 3298 void Assembler::EnterDartFrame(intptr_t frame_size) {
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after
3672 3653
3673 3654
3674 const char* Assembler::FpuRegisterName(FpuRegister reg) { 3655 const char* Assembler::FpuRegisterName(FpuRegister reg) {
3675 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); 3656 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
3676 return fpu_reg_names[reg]; 3657 return fpu_reg_names[reg];
3677 } 3658 }
3678 3659
3679 } // namespace dart 3660 } // namespace dart
3680 3661
3681 #endif // defined TARGET_ARCH_ARM 3662 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/assembler_arm.h ('k') | runtime/vm/assembler_arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698