Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(114)

Side by Side Diff: runtime/vm/assembler_x64.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: fixed comments Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_x64.h ('k') | runtime/vm/assembler_x64_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/heap.h" 10 #include "vm/heap.h"
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 EmitRegisterREX(TMP, REX_W); 86 EmitRegisterREX(TMP, REX_W);
87 EmitUint8(0xB8 | (TMP & 7)); 87 EmitUint8(0xB8 | (TMP & 7));
88 EmitInt64(label->address()); 88 EmitInt64(label->address());
89 } 89 }
90 call(TMP); 90 call(TMP);
91 } 91 }
92 92
93 93
94 void Assembler::CallPatchable(const StubEntry& stub_entry) { 94 void Assembler::CallPatchable(const StubEntry& stub_entry) {
95 ASSERT(constant_pool_allowed()); 95 ASSERT(constant_pool_allowed());
96 const Code& target = Code::Handle(stub_entry.code());
96 intptr_t call_start = buffer_.GetPosition(); 97 intptr_t call_start = buffer_.GetPosition();
97 const int32_t offset = ObjectPool::element_offset( 98 const int32_t offset = ObjectPool::element_offset(
98 object_pool_wrapper_.FindExternalLabel(&stub_entry.label(), kPatchable)); 99 object_pool_wrapper_.FindObject(target, kPatchable));
99 call(Address::AddressBaseImm32(PP, offset - kHeapObjectTag)); 100 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
101 movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
102 call(TMP);
100 ASSERT((buffer_.GetPosition() - call_start) == kCallExternalLabelSize); 103 ASSERT((buffer_.GetPosition() - call_start) == kCallExternalLabelSize);
101 } 104 }
102 105
103 106
104 void Assembler::Call(const StubEntry& stub_entry) { 107 void Assembler::Call(const StubEntry& stub_entry) {
105 ASSERT(constant_pool_allowed()); 108 ASSERT(constant_pool_allowed());
109 const Code& target = Code::Handle(stub_entry.code());
106 const int32_t offset = ObjectPool::element_offset( 110 const int32_t offset = ObjectPool::element_offset(
107 object_pool_wrapper_.FindExternalLabel(&stub_entry.label(), 111 object_pool_wrapper_.FindObject(target, kNotPatchable));
108 kNotPatchable)); 112 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
109 call(Address::AddressBaseImm32(PP, offset - kHeapObjectTag)); 113 movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
114 call(TMP);
110 } 115 }
111 116
112 117
113 void Assembler::pushq(Register reg) { 118 void Assembler::pushq(Register reg) {
114 AssemblerBuffer::EnsureCapacity ensured(&buffer_); 119 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
115 EmitRegisterREX(reg, REX_NONE); 120 EmitRegisterREX(reg, REX_NONE);
116 EmitUint8(0x50 | (reg & 7)); 121 EmitUint8(0x50 | (reg & 7));
117 } 122 }
118 123
119 124
(...skipping 2416 matching lines...) Expand 10 before | Expand all | Expand 10 after
2536 EmitUint8(0x70 + condition); 2541 EmitUint8(0x70 + condition);
2537 EmitNearLabelLink(label); 2542 EmitNearLabelLink(label);
2538 } else { 2543 } else {
2539 EmitUint8(0x0F); 2544 EmitUint8(0x0F);
2540 EmitUint8(0x80 + condition); 2545 EmitUint8(0x80 + condition);
2541 EmitLabelLink(label); 2546 EmitLabelLink(label);
2542 } 2547 }
2543 } 2548 }
2544 2549
2545 2550
2546 void Assembler::J(Condition condition, const StubEntry& stub_entry, 2551 void Assembler::J(Condition condition,
2552 const StubEntry& stub_entry,
2547 Register pp) { 2553 Register pp) {
2548 Label no_jump; 2554 Label no_jump;
2549 // Negate condition. 2555 // Negate condition.
2550 j(static_cast<Condition>(condition ^ 1), &no_jump, Assembler::kNearJump); 2556 j(static_cast<Condition>(condition ^ 1), &no_jump, kNearJump);
2551 Jmp(stub_entry, pp); 2557 Jmp(stub_entry, pp);
2552 Bind(&no_jump); 2558 Bind(&no_jump);
2553 } 2559 }
2554 2560
2555 2561
2556 void Assembler::jmp(Register reg) { 2562 void Assembler::jmp(Register reg) {
2557 AssemblerBuffer::EnsureCapacity ensured(&buffer_); 2563 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2558 Operand operand(reg); 2564 Operand operand(reg);
2559 EmitOperandREX(4, operand, REX_NONE); 2565 EmitOperandREX(4, operand, REX_NONE);
2560 EmitUint8(0xFF); 2566 EmitUint8(0xFF);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2601 { // Encode movq(TMP, Immediate(label->address())), but always as imm64. 2607 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
2602 AssemblerBuffer::EnsureCapacity ensured(&buffer_); 2608 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2603 EmitRegisterREX(TMP, REX_W); 2609 EmitRegisterREX(TMP, REX_W);
2604 EmitUint8(0xB8 | (TMP & 7)); 2610 EmitUint8(0xB8 | (TMP & 7));
2605 EmitInt64(label->address()); 2611 EmitInt64(label->address());
2606 } 2612 }
2607 jmp(TMP); 2613 jmp(TMP);
2608 } 2614 }
2609 2615
2610 2616
2611 void Assembler::jmp(const StubEntry& stub_entry) {
2612 jmp(&stub_entry.label());
2613 }
2614
2615
2616 void Assembler::JmpPatchable(const StubEntry& stub_entry, Register pp) { 2617 void Assembler::JmpPatchable(const StubEntry& stub_entry, Register pp) {
2617 ASSERT((pp != PP) || constant_pool_allowed()); 2618 ASSERT((pp != PP) || constant_pool_allowed());
2618 intptr_t call_start = buffer_.GetPosition(); 2619 const Code& target = Code::Handle(stub_entry.code());
2619 const int32_t offset = ObjectPool::element_offset( 2620 const int32_t offset = ObjectPool::element_offset(
2620 object_pool_wrapper_.FindExternalLabel(&stub_entry.label(), kPatchable)); 2621 object_pool_wrapper_.FindObject(target, kPatchable));
2621 // Patchable jumps always use a 32-bit immediate encoding. 2622 movq(CODE_REG, Address::AddressBaseImm32(pp, offset - kHeapObjectTag));
2622 jmp(Address::AddressBaseImm32(pp, offset - kHeapObjectTag)); 2623 movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
2623 ASSERT((buffer_.GetPosition() - call_start) == JumpPattern::kLengthInBytes); 2624 jmp(TMP);
2624 } 2625 }
2625 2626
2626 2627
2627 void Assembler::Jmp(const StubEntry& stub_entry, Register pp) { 2628 void Assembler::Jmp(const StubEntry& stub_entry, Register pp) {
2628 ASSERT((pp != PP) || constant_pool_allowed()); 2629 ASSERT((pp != PP) || constant_pool_allowed());
2630 const Code& target = Code::Handle(stub_entry.code());
2629 const int32_t offset = ObjectPool::element_offset( 2631 const int32_t offset = ObjectPool::element_offset(
2630 object_pool_wrapper_.FindExternalLabel(&stub_entry.label(), 2632 object_pool_wrapper_.FindObject(target, kNotPatchable));
2631 kNotPatchable)); 2633 movq(CODE_REG, FieldAddress(pp, offset));
2632 jmp(Address(pp, offset - kHeapObjectTag)); 2634 movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
2635 jmp(TMP);
2633 } 2636 }
2634 2637
2635 2638
2636 void Assembler::lock() { 2639 void Assembler::lock() {
2637 AssemblerBuffer::EnsureCapacity ensured(&buffer_); 2640 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2638 EmitUint8(0xF0); 2641 EmitUint8(0xF0);
2639 } 2642 }
2640 2643
2641 2644
2642 void Assembler::cmpxchgl(const Address& address, Register reg) { 2645 void Assembler::cmpxchgl(const Address& address, Register reg) {
(...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after
3070 if (can_value_be_smi) { 3073 if (can_value_be_smi) {
3071 StoreIntoObjectFilter(object, value, &done); 3074 StoreIntoObjectFilter(object, value, &done);
3072 } else { 3075 } else {
3073 StoreIntoObjectFilterNoSmi(object, value, &done); 3076 StoreIntoObjectFilterNoSmi(object, value, &done);
3074 } 3077 }
3075 // A store buffer update is required. 3078 // A store buffer update is required.
3076 if (value != RDX) pushq(RDX); 3079 if (value != RDX) pushq(RDX);
3077 if (object != RDX) { 3080 if (object != RDX) {
3078 movq(RDX, object); 3081 movq(RDX, object);
3079 } 3082 }
3083 pushq(CODE_REG);
3084 movq(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset()));
3080 movq(TMP, Address(THR, Thread::update_store_buffer_entry_point_offset())); 3085 movq(TMP, Address(THR, Thread::update_store_buffer_entry_point_offset()));
3081 call(TMP); 3086 call(TMP);
3087
3088 popq(CODE_REG);
3082 if (value != RDX) popq(RDX); 3089 if (value != RDX) popq(RDX);
3083 Bind(&done); 3090 Bind(&done);
3084 } 3091 }
3085 3092
3086 3093
3087 void Assembler::StoreIntoObjectNoBarrier(Register object, 3094 void Assembler::StoreIntoObjectNoBarrier(Register object,
3088 const Address& dest, 3095 const Address& dest,
3089 Register value, 3096 Register value,
3090 FieldContent old_content) { 3097 FieldContent old_content) {
3091 VerifiedWrite(dest, value, old_content); 3098 VerifiedWrite(dest, value, old_content);
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
3372 call(reg); 3379 call(reg);
3373 } 3380 }
3374 3381
3375 3382
3376 void Assembler::CallRuntime(const RuntimeEntry& entry, 3383 void Assembler::CallRuntime(const RuntimeEntry& entry,
3377 intptr_t argument_count) { 3384 intptr_t argument_count) {
3378 entry.Call(this, argument_count); 3385 entry.Call(this, argument_count);
3379 } 3386 }
3380 3387
3381 3388
3389 void Assembler::RestoreCodePointer() {
3390 movq(CODE_REG, Address(RBP, kPcMarkerSlotFromFp * kWordSize));
3391 }
3392
3393
3382 void Assembler::LoadPoolPointer(Register pp) { 3394 void Assembler::LoadPoolPointer(Register pp) {
3383 // Load new pool pointer. 3395 // Load new pool pointer.
3384 const intptr_t kRIPRelativeMovqSize = 7; 3396 CheckCodePointer();
3385 const intptr_t entry_to_rip_offset = CodeSize() + kRIPRelativeMovqSize; 3397 movq(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
3386 const intptr_t object_pool_pc_dist =
3387 Instructions::HeaderSize() - Instructions::object_pool_offset();
3388 movq(pp, Address::AddressRIPRelative(
3389 -entry_to_rip_offset - object_pool_pc_dist));
3390 ASSERT(CodeSize() == entry_to_rip_offset);
3391 set_constant_pool_allowed(pp == PP); 3398 set_constant_pool_allowed(pp == PP);
3392 } 3399 }
3393 3400
3394 3401
3395 void Assembler::EnterDartFrame(intptr_t frame_size, 3402 void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
3396 Register new_pp,
3397 Register pc_marker_override) {
3398 ASSERT(!constant_pool_allowed()); 3403 ASSERT(!constant_pool_allowed());
3399 EnterFrame(0); 3404 EnterFrame(0);
3400 pushq(pc_marker_override); 3405 pushq(CODE_REG);
3401 pushq(PP); 3406 pushq(PP);
3402 movq(PP, new_pp); 3407 if (new_pp == kNoRegister) {
3408 LoadPoolPointer(PP);
3409 } else {
3410 movq(PP, new_pp);
3411 }
3403 set_constant_pool_allowed(true); 3412 set_constant_pool_allowed(true);
3404 if (frame_size != 0) { 3413 if (frame_size != 0) {
3405 subq(RSP, Immediate(frame_size)); 3414 subq(RSP, Immediate(frame_size));
3406 } 3415 }
3407 } 3416 }
3408 3417
3409 3418
3410 void Assembler::LeaveDartFrame() { 3419 void Assembler::LeaveDartFrame(RestorePP restore_pp) {
3411 set_constant_pool_allowed(false);
3412 // Restore caller's PP register that was pushed in EnterDartFrame. 3420 // Restore caller's PP register that was pushed in EnterDartFrame.
3413 movq(PP, Address(RBP, (kSavedCallerPpSlotFromFp * kWordSize))); 3421 if (restore_pp == kRestoreCallerPP) {
3422 movq(PP, Address(RBP, (kSavedCallerPpSlotFromFp * kWordSize)));
3423 set_constant_pool_allowed(false);
3424 }
3414 LeaveFrame(); 3425 LeaveFrame();
3415 } 3426 }
3416 3427
3417 3428
3429 void Assembler::CheckCodePointer() {
3430 #ifdef DEBUG
3431 Label cid_ok, instructions_ok;
3432 pushq(RAX);
3433 LoadClassId(RAX, CODE_REG);
3434 cmpq(RAX, Immediate(kCodeCid));
3435 j(EQUAL, &cid_ok);
3436 int3();
3437 Bind(&cid_ok);
3438 {
3439 const intptr_t kRIPRelativeLeaqSize = 7;
3440 const intptr_t header_to_entry_offset =
3441 (Instructions::HeaderSize() - kHeapObjectTag);
3442 const intptr_t header_to_rip_offset =
3443 CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
3444 leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
3445 ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
3446 }
3447 cmpq(RAX, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
3448 j(EQUAL, &instructions_ok);
3449 int3();
3450 Bind(&instructions_ok);
3451 popq(RAX);
3452 #endif
3453 }
3454
3455
3418 // On entry to a function compiled for OSR, the caller's frame pointer, the 3456 // On entry to a function compiled for OSR, the caller's frame pointer, the
3419 // stack locals, and any copied parameters are already in place. The frame 3457 // stack locals, and any copied parameters are already in place. The frame
3420 // pointer is already set up. The PC marker is not correct for the 3458 // pointer is already set up. The PC marker is not correct for the
3421 // optimized function and there may be extra space for spill slots to 3459 // optimized function and there may be extra space for spill slots to
3422 // allocate. 3460 // allocate.
3423 void Assembler::EnterOsrFrame(intptr_t extra_size, 3461 void Assembler::EnterOsrFrame(intptr_t extra_size) {
3424 Register new_pp,
3425 Register pc_marker_override) {
3426 ASSERT(!constant_pool_allowed()); 3462 ASSERT(!constant_pool_allowed());
3427 if (prologue_offset_ == -1) { 3463 if (prologue_offset_ == -1) {
3428 Comment("PrologueOffset = %" Pd "", CodeSize()); 3464 Comment("PrologueOffset = %" Pd "", CodeSize());
3429 prologue_offset_ = CodeSize(); 3465 prologue_offset_ = CodeSize();
3430 } 3466 }
3431 movq(Address(RBP, kPcMarkerSlotFromFp * kWordSize), pc_marker_override); 3467 RestoreCodePointer();
3432 movq(PP, new_pp); 3468 LoadPoolPointer();
3433 set_constant_pool_allowed(true); 3469
3434 if (extra_size != 0) { 3470 if (extra_size != 0) {
3435 subq(RSP, Immediate(extra_size)); 3471 subq(RSP, Immediate(extra_size));
3436 } 3472 }
3437 } 3473 }
3438 3474
3439 3475
3440 void Assembler::EnterStubFrame() { 3476 void Assembler::EnterStubFrame() {
3441 set_constant_pool_allowed(false); 3477 EnterDartFrame(0, kNoRegister);
3442 EnterFrame(0);
3443 pushq(Immediate(0)); // Push 0 in the saved PC area for stub frames.
3444 pushq(PP); // Save caller's pool pointer
3445 LoadPoolPointer();
3446 } 3478 }
3447 3479
3448 3480
3449 void Assembler::LeaveStubFrame() { 3481 void Assembler::LeaveStubFrame() {
3450 LeaveDartFrame(); 3482 LeaveDartFrame();
3451 } 3483 }
3452 3484
3453 3485
3454 void Assembler::MaybeTraceAllocation(intptr_t cid, 3486 void Assembler::MaybeTraceAllocation(intptr_t cid,
3455 Label* trace, 3487 Label* trace,
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
3944 3976
3945 3977
3946 const char* Assembler::FpuRegisterName(FpuRegister reg) { 3978 const char* Assembler::FpuRegisterName(FpuRegister reg) {
3947 ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters)); 3979 ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters));
3948 return xmm_reg_names[reg]; 3980 return xmm_reg_names[reg];
3949 } 3981 }
3950 3982
3951 } // namespace dart 3983 } // namespace dart
3952 3984
3953 #endif // defined TARGET_ARCH_X64 3985 #endif // defined TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « runtime/vm/assembler_x64.h ('k') | runtime/vm/assembler_x64_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698