Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(169)

Side by Side Diff: runtime/vm/assembler_arm.cc

Issue 1314883002: VM: Use constant pool also for leaf runtime calls on x64, arm, arm64 and mips. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: fixed non-simulated arm/mips build Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/longjump.h" 10 #include "vm/longjump.h"
(...skipping 2660 matching lines...) Expand 10 before | Expand all | Expand 10 after
2671 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { 2671 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2672 ASSERT(qd != QTMP); 2672 ASSERT(qd != QTMP);
2673 ASSERT(qn != QTMP); 2673 ASSERT(qn != QTMP);
2674 ASSERT(qm != QTMP); 2674 ASSERT(qm != QTMP);
2675 2675
2676 Vreciprocalqs(qd, qm); 2676 Vreciprocalqs(qd, qm);
2677 vmulqs(qd, qn, qd); 2677 vmulqs(qd, qn, qd);
2678 } 2678 }
2679 2679
2680 2680
2681 void Assembler::Branch(const ExternalLabel* label, Condition cond) {
2682 LoadImmediate(IP, label->address(), cond); // Address is never patched.
2683 bx(IP, cond);
2684 }
2685
2686
2687 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) { 2681 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) {
2688 const ExternalLabel label(stub_entry.EntryPoint()); 2682 const ExternalLabel label(stub_entry.EntryPoint());
2689 Branch(&label, cond); 2683 LoadImmediate(IP, label.address(), cond); // Address is never patched.
rmacnak 2015/08/26 23:55:24 Shouldn't this be LoadExternalLabel? We still need
Florian Schneider 2015/08/27 08:27:19 Yes, using LoadExternalLabel is necessary here and
Florian Schneider 2015/08/27 08:38:09 Correction: Actually, it won't work right away, si
2684 bx(IP, cond);
2690 } 2685 }
2691 2686
2692 2687
2693 void Assembler::BranchPatchable(const ExternalLabel* label) { 2688 void Assembler::BranchPatchable(const StubEntry& stub_entry) {
2694 // Use a fixed size code sequence, since a function prologue may be patched 2689 // Use a fixed size code sequence, since a function prologue may be patched
2695 // with this branch sequence. 2690 // with this branch sequence.
2696 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction 2691 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction
2697 // cache flush upon patching. 2692 // cache flush upon patching.
2698 LoadPatchableImmediate(IP, label->address()); 2693 const ExternalLabel label(stub_entry.EntryPoint());
2694 LoadPatchableImmediate(IP, label.address());
rmacnak 2015/08/26 23:55:24 Ditto
Florian Schneider 2015/08/27 08:27:19 Yes, but not the purpose of this CL (see descripti
2699 bx(IP); 2695 bx(IP);
2700 } 2696 }
2701 2697
2702 2698
2703 void Assembler::BranchPatchable(const StubEntry& stub_entry) {
2704 const ExternalLabel label(stub_entry.EntryPoint());
2705 BranchPatchable(&label);
2706 }
2707
2708
2709 void Assembler::BranchLink(const ExternalLabel* label) { 2699 void Assembler::BranchLink(const ExternalLabel* label) {
2710 LoadImmediate(LR, label->address()); // Target address is never patched. 2700 LoadImmediate(LR, label->address()); // Target address is never patched.
2711 blx(LR); // Use blx instruction so that the return branch prediction works. 2701 blx(LR); // Use blx instruction so that the return branch prediction works.
2712 } 2702 }
2713 2703
2714 2704
2715 void Assembler::BranchLink(const StubEntry& stub_entry) {
2716 const ExternalLabel label(stub_entry.EntryPoint());
2717 BranchLink(&label);
2718 }
2719
2720
2721 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) { 2705 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) {
2722 // Make sure that class CallPattern is able to patch the label referred 2706 // Make sure that class CallPattern is able to patch the label referred
2723 // to by this code sequence. 2707 // to by this code sequence.
2724 // For added code robustness, use 'blx lr' in a patchable sequence and 2708 // For added code robustness, use 'blx lr' in a patchable sequence and
2725 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). 2709 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2726 const int32_t offset = ObjectPool::element_offset( 2710 const int32_t offset = ObjectPool::element_offset(
2727 object_pool_wrapper_.FindExternalLabel(label, patchable)); 2711 object_pool_wrapper_.FindExternalLabel(label, patchable));
2728 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag); 2712 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag);
2729 blx(LR); // Use blx instruction so that the return branch prediction works. 2713 blx(LR); // Use blx instruction so that the return branch prediction works.
2730 } 2714 }
2731 2715
2732 2716
2733 void Assembler::BranchLink(const StubEntry& stub_entry, 2717 void Assembler::BranchLink(const StubEntry& stub_entry,
2734 Patchability patchable) { 2718 Patchability patchable) {
2735 const ExternalLabel label(stub_entry.EntryPoint()); 2719 const ExternalLabel label(stub_entry.EntryPoint());
2736 BranchLink(&label, patchable); 2720 BranchLink(&label, patchable);
2737 } 2721 }
2738 2722
2739 2723
2740 void Assembler::BranchLinkPatchable(const ExternalLabel* label) {
2741 BranchLink(label, kPatchable);
2742 }
2743
2744
2745 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { 2724 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
2746 const ExternalLabel label(stub_entry.EntryPoint()); 2725 const ExternalLabel label(stub_entry.EntryPoint());
2747 BranchLinkPatchable(&label); 2726 BranchLink(&label, kPatchable);
2748 } 2727 }
2749 2728
2750 2729
2751 void Assembler::BranchLinkOffset(Register base, int32_t offset) { 2730 void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2752 ASSERT(base != PC); 2731 ASSERT(base != PC);
2753 ASSERT(base != IP); 2732 ASSERT(base != IP);
2754 LoadFromOffset(kWord, IP, base, offset); 2733 LoadFromOffset(kWord, IP, base, offset);
2755 blx(IP); // Use blx instruction so that the return branch prediction works. 2734 blx(IP); // Use blx instruction so that the return branch prediction works.
2756 } 2735 }
2757 2736
(...skipping 491 matching lines...) Expand 10 before | Expand all | Expand 10 after
3249 // Reserve space for arguments and align frame before entering 3228 // Reserve space for arguments and align frame before entering
3250 // the C++ world. 3229 // the C++ world.
3251 AddImmediate(SP, -frame_space); 3230 AddImmediate(SP, -frame_space);
3252 if (OS::ActivationFrameAlignment() > 1) { 3231 if (OS::ActivationFrameAlignment() > 1) {
3253 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1)); 3232 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3254 } 3233 }
3255 } 3234 }
3256 3235
3257 3236
3258 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { 3237 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
3259 // Preserve volatile CPU registers. 3238 // Preserve volatile CPU registers and PP.
3260 EnterFrame(kDartVolatileCpuRegs | (1 << FP), 0); 3239 EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
3240 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3261 3241
3262 // Preserve all volatile FPU registers. 3242 // Preserve all volatile FPU registers.
3263 if (TargetCPUFeatures::vfp_supported()) { 3243 if (TargetCPUFeatures::vfp_supported()) {
3264 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); 3244 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3265 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); 3245 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3266 if ((lastv - firstv + 1) >= 16) { 3246 if ((lastv - firstv + 1) >= 16) {
3267 DRegister mid = static_cast<DRegister>(firstv + 16); 3247 DRegister mid = static_cast<DRegister>(firstv + 16);
3268 vstmd(DB_W, SP, mid, lastv - mid + 1); 3248 vstmd(DB_W, SP, mid, lastv - mid + 1);
3269 vstmd(DB_W, SP, firstv, 16); 3249 vstmd(DB_W, SP, firstv, 16);
3270 } else { 3250 } else {
3271 vstmd(DB_W, SP, firstv, lastv - firstv + 1); 3251 vstmd(DB_W, SP, firstv, lastv - firstv + 1);
3272 } 3252 }
3273 } 3253 }
3274 3254
3255 LoadPoolPointer();
3256
3275 ReserveAlignedFrameSpace(frame_space); 3257 ReserveAlignedFrameSpace(frame_space);
3276 } 3258 }
3277 3259
3278 3260
3279 void Assembler::LeaveCallRuntimeFrame() { 3261 void Assembler::LeaveCallRuntimeFrame() {
3280 // SP might have been modified to reserve space for arguments 3262 // SP might have been modified to reserve space for arguments
3281 // and ensure proper alignment of the stack frame. 3263 // and ensure proper alignment of the stack frame.
3282 // We need to restore it before restoring registers. 3264 // We need to restore it before restoring registers.
3283 const intptr_t kPushedFpuRegisterSize = 3265 const intptr_t kPushedFpuRegisterSize =
3284 TargetCPUFeatures::vfp_supported() ? 3266 TargetCPUFeatures::vfp_supported() ?
3285 kDartVolatileFpuRegCount * kFpuRegisterSize : 0; 3267 kDartVolatileFpuRegCount * kFpuRegisterSize : 0;
3286 3268
3287 // We subtract one from the volatile cpu register count because, even though 3269 COMPILE_ASSERT(PP < FP);
3288 // LR is volatile, it is pushed ahead of FP. 3270 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3271 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3272 // it is pushed ahead of FP.
3289 const intptr_t kPushedRegistersSize = 3273 const intptr_t kPushedRegistersSize =
3290 (kDartVolatileCpuRegCount - 1) * kWordSize + kPushedFpuRegisterSize; 3274 kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
3291 AddImmediate(SP, FP, -kPushedRegistersSize); 3275 AddImmediate(SP, FP, -kPushedRegistersSize);
3292 3276
3293 // Restore all volatile FPU registers. 3277 // Restore all volatile FPU registers.
3294 if (TargetCPUFeatures::vfp_supported()) { 3278 if (TargetCPUFeatures::vfp_supported()) {
3295 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); 3279 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3296 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); 3280 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3297 if ((lastv - firstv + 1) >= 16) { 3281 if ((lastv - firstv + 1) >= 16) {
3298 DRegister mid = static_cast<DRegister>(firstv + 16); 3282 DRegister mid = static_cast<DRegister>(firstv + 16);
3299 vldmd(IA_W, SP, firstv, 16); 3283 vldmd(IA_W, SP, firstv, 16);
3300 vldmd(IA_W, SP, mid, lastv - mid + 1); 3284 vldmd(IA_W, SP, mid, lastv - mid + 1);
3301 } else { 3285 } else {
3302 vldmd(IA_W, SP, firstv, lastv - firstv + 1); 3286 vldmd(IA_W, SP, firstv, lastv - firstv + 1);
3303 } 3287 }
3304 } 3288 }
3305 3289
3306 // Restore volatile CPU registers. 3290 // Restore volatile CPU registers.
3307 LeaveFrame(kDartVolatileCpuRegs | (1 << FP)); 3291 LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
3308 } 3292 }
3309 3293
3310 3294
3311 void Assembler::CallRuntime(const RuntimeEntry& entry, 3295 void Assembler::CallRuntime(const RuntimeEntry& entry,
3312 intptr_t argument_count) { 3296 intptr_t argument_count) {
3313 entry.Call(this, argument_count); 3297 entry.Call(this, argument_count);
3314 } 3298 }
3315 3299
3316 3300
3317 void Assembler::EnterDartFrame(intptr_t frame_size) { 3301 void Assembler::EnterDartFrame(intptr_t frame_size) {
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after
3672 3656
3673 3657
3674 const char* Assembler::FpuRegisterName(FpuRegister reg) { 3658 const char* Assembler::FpuRegisterName(FpuRegister reg) {
3675 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); 3659 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
3676 return fpu_reg_names[reg]; 3660 return fpu_reg_names[reg];
3677 } 3661 }
3678 3662
3679 } // namespace dart 3663 } // namespace dart
3680 3664
3681 #endif // defined TARGET_ARCH_ARM 3665 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698