Chromium Code Reviews| Index: runtime/vm/assembler_x64.cc |
| =================================================================== |
| --- runtime/vm/assembler_x64.cc (revision 26726) |
| +++ runtime/vm/assembler_x64.cc (working copy) |
| @@ -65,6 +65,37 @@ |
| #undef __ |
| +Assembler::Assembler(bool use_far_branches) |
| + : buffer_(), |
| + object_pool_(GrowableObjectArray::Handle()), |
| + prologue_offset_(-1), |
| + comments_() { |
| + // Far branching mode is only needed and implemented for MIPS and ARM. |
| + ASSERT(!use_far_branches); |
| + if (Isolate::Current() != Dart::vm_isolate()) { |
| + object_pool_ = GrowableObjectArray::New(Heap::kOld); |
| + |
| + // These objects and labels need to be accessible through every pool-pointer |
| + // at the same index. |
| + object_pool_.Add(Object::Handle(Object::null()), Heap::kOld); |
| + object_pool_.Add(Bool::True(), Heap::kOld); |
| + object_pool_.Add(Bool::False(), Heap::kOld); |
| + |
| + if (StubCode::UpdateStoreBuffer_entry() != NULL) { |
| + AddExternalLabel(&StubCode::UpdateStoreBufferLabel(), kNotPatchable); |
| + } else { |
| + object_pool_.Add(Object::Handle(Object::null()), Heap::kOld); |
| + } |
| + |
| + if (StubCode::CallToRuntime_entry() != NULL) { |
| + AddExternalLabel(&StubCode::CallToRuntimeLabel(), kNotPatchable); |
| + } else { |
| + object_pool_.Add(Object::Handle(Object::null()), Heap::kOld); |
| + } |
| + } |
| +} |
| + |
| + |
| void Assembler::InitializeMemoryWithBreakpoints(uword data, int length) { |
| memset(reinterpret_cast<void*>(data), Instr::kBreakPointInstruction, length); |
| } |
| @@ -95,9 +126,17 @@ |
| } |
| +void Assembler::LoadExternalLabel(const ExternalLabel* label, |
| + Patchability patchable, |
| + Register pp) { |
| + const int32_t offset = |
| + Array::element_offset(AddExternalLabel(label, patchable)); |
| + LoadWordFromPoolOffset(TMP, pp, offset - kHeapObjectTag, patchable); |
| +} |
| + |
| + |
| void Assembler::call(const ExternalLabel* label) { |
| AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| - intptr_t call_start = buffer_.GetPosition(); |
| // Encode movq(TMP, Immediate(label->address())), but always as imm64. |
| EmitRegisterREX(TMP, REX_W); |
| @@ -109,11 +148,41 @@ |
| EmitOperandREX(2, operand, REX_NONE); |
| EmitUint8(0xFF); |
| EmitOperand(2, operand); |
| +} |
| + |
| +void Assembler::CallPatchable(const ExternalLabel* label) { |
| + intptr_t call_start = buffer_.GetPosition(); |
| + LoadExternalLabel(label); |
| + { |
| + AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| + // Encode call(TMP). |
| + Operand operand(TMP); |
| + EmitOperandREX(2, operand, REX_NONE); |
| + EmitUint8(0xFF); |
| + EmitOperand(2, operand); |
| + } |
| ASSERT((buffer_.GetPosition() - call_start) == kCallExternalLabelSize); |
| } |
| +void Assembler::CallFromPool(const ExternalLabel* label) { |
| + if (Isolate::Current() == Dart::vm_isolate()) { |
| + call(label); |
| + } else { |
| + LoadExternalLabel(label, kNotPatchable); |
|
Florian Schneider
2013/09/04 09:39:47
This assumes LoadExternalLabel stores into TMP. Wh
zra
2013/09/04 21:00:41
Done.
|
| + { |
| + AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| + // Encode call(TMP). |
| + Operand operand(TMP); |
| + EmitOperandREX(2, operand, REX_NONE); |
| + EmitUint8(0xFF); |
| + EmitOperand(2, operand); |
| + } |
| + } |
| +} |
| + |
| + |
| void Assembler::pushq(Register reg) { |
| AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| EmitRegisterREX(reg, REX_NONE); |
| @@ -1960,6 +2029,15 @@ |
| } |
| +void Assembler::JumpFromPool(Condition condition, const ExternalLabel* label, |
| + Register pp) { |
| + Label no_jump; |
| + j(static_cast<Condition>(condition ^ 1), &no_jump); // Negate condition. |
| + JumpFromPool(label, pp); |
| + Bind(&no_jump); |
| +} |
| + |
| + |
| void Assembler::jmp(Register reg) { |
| AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| Operand operand(reg); |
| @@ -1995,7 +2073,6 @@ |
| void Assembler::jmp(const ExternalLabel* label) { |
| AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| - intptr_t call_start = buffer_.GetPosition(); |
| // Encode movq(TMP, Immediate(label->address())), but always as imm64. |
| EmitRegisterREX(TMP, REX_W); |
| @@ -2007,11 +2084,37 @@ |
| EmitOperandREX(4, operand, REX_NONE); |
| EmitUint8(0xFF); |
| EmitOperand(4, operand); |
| +} |
| + |
| +void Assembler::JumpPatchable(const ExternalLabel* label, Register pp) { |
| + intptr_t call_start = buffer_.GetPosition(); |
| + LoadExternalLabel(label, kPatchable, pp); |
| + { |
| + AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| + // Encode jmp(TMP). |
| + Operand operand(TMP); |
| + EmitOperandREX(4, operand, REX_NONE); |
| + EmitUint8(0xFF); |
| + EmitOperand(4, operand); |
| + } |
| ASSERT((buffer_.GetPosition() - call_start) == kCallExternalLabelSize); |
| } |
| +void Assembler::JumpFromPool(const ExternalLabel* label, Register pp) { |
| + LoadExternalLabel(label, kNotPatchable, pp); |
| + { |
| + AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| + // Encode jmp(TMP). |
| + Operand operand(TMP); |
| + EmitOperandREX(4, operand, REX_NONE); |
| + EmitUint8(0xFF); |
| + EmitOperand(4, operand); |
| + } |
| +} |
| + |
| + |
| void Assembler::lock() { |
| AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| EmitUint8(0xF0); |
| @@ -2091,49 +2194,113 @@ |
| } |
| -void Assembler::LoadObject(Register dst, const Object& object) { |
| - if (object.IsSmi() || object.InVMHeap()) { |
| +int32_t Assembler::AddObject(const Object& obj) { |
| + // The object pool cannot be used in the vm isolate. |
| + ASSERT(Isolate::Current() != Dart::vm_isolate()); |
| + ASSERT(obj.IsNotTemporaryScopedHandle()); |
| + ASSERT(obj.IsOld()); |
| + if (object_pool_.IsNull()) { |
| + object_pool_ = GrowableObjectArray::New(Heap::kOld); |
| + } |
| + for (int i = 0; i < object_pool_.Length(); i++) { |
| + if (object_pool_.At(i) == obj.raw()) { |
| + return i; |
| + } |
| + } |
| + object_pool_.Add(obj, Heap::kOld); |
| + return object_pool_.Length() - 1; |
| +} |
| + |
| + |
| +int32_t Assembler::AddExternalLabel(const ExternalLabel* label, |
| + Patchability patchable) { |
| + // The object pool cannot be used in the vm isolate. |
| + ASSERT(Isolate::Current() != Dart::vm_isolate()); |
| + if (object_pool_.IsNull()) { |
| + object_pool_ = GrowableObjectArray::New(Heap::kOld); |
| + } |
| + const word address = label->address(); |
| + ASSERT(Utils::IsAligned(address, 4)); |
| + // The address is stored in the object array as a RawSmi. |
| + const Smi& smi = Smi::Handle(Smi::New(address >> kSmiTagShift)); |
| + if (patchable == kNotPatchable) { |
| + // An external label used in a non-patchable call shouldn't also be used in |
| + // patchable calls. So, we can re-use existing entries for non-patchable |
| + // calls. |
| + for (int i = 0; i < object_pool_.Length(); i++) { |
| + if (object_pool_.At(i) == smi.raw()) { |
| + return i; |
| + } |
| + } |
| + } |
| + // If the call is patchable, do not reuse an existing entry since each |
| + // reference may be patched independently. |
| + object_pool_.Add(smi, Heap::kOld); |
| + return object_pool_.Length() - 1; |
| +} |
| + |
| + |
| +bool Assembler::CanLoadFromObjectPool(const Object& object) { |
| + return !object.IsSmi() && // Not a Smi |
| + // Not in the VMHeap, OR is one of the VMHeap objects we put in every |
| + // object pool. |
| + (!object.InVMHeap() || (object.raw() == Object::null()) || |
| + (object.raw() == Bool::True().raw()) || |
| + (object.raw() == Bool::False().raw())) && |
| + object.IsNotTemporaryScopedHandle() && |
| + object.IsOld(); |
| +} |
| + |
| + |
| +void Assembler::LoadWordFromPoolOffset(Register dst, Register pp, |
| + int32_t offset, Patchability patchable) { |
| + movq(dst, Address(pp, offset)); |
| + // This sequence must be of fixed size. If offset fits in a signed byte we |
| + // have to pad with nops. |
| + if (Utils::IsInt(8, offset) && (patchable == kPatchable)) { |
|
Florian Schneider
2013/09/04 09:39:47
In the case that patching is allowed, why not gene
zra
2013/09/04 21:00:41
I'm afraid I don't see a straightforward way of do
srdjan
2013/09/04 22:57:23
I am with Florian here, it would simplify the code
zra
2013/09/05 00:23:11
Added a constructor to Address to generate a fixed
|
| + nop(3); |
| + } |
| +} |
| + |
| + |
| +void Assembler::LoadObject(Register dst, const Object& object, |
|
Florian Schneider
2013/09/04 09:39:47
I find it too invasive to specify kPatchable/kNotP
zra
2013/09/04 21:00:41
I've used a different function. See above comment
|
| + Patchability patchable, Register pp) { |
| + if (CanLoadFromObjectPool(object)) { |
| + const int32_t offset = Array::element_offset(AddObject(object)); |
| + LoadWordFromPoolOffset(dst, pp, offset - kHeapObjectTag, patchable); |
| + } else { |
| movq(dst, Immediate(reinterpret_cast<int64_t>(object.raw()))); |
| - } else { |
| - ASSERT(object.IsNotTemporaryScopedHandle()); |
| - ASSERT(object.IsOld()); |
| - AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| - EmitRegisterREX(dst, REX_W); |
| - EmitUint8(0xB8 | (dst & 7)); |
| - buffer_.EmitObject(object); |
| } |
| } |
| void Assembler::StoreObject(const Address& dst, const Object& object) { |
| - if (object.IsSmi() || object.InVMHeap()) { |
| + if (CanLoadFromObjectPool(object)) { |
| + LoadObject(TMP, object, kNotPatchable); |
| + movq(dst, TMP); |
| + } else { |
| movq(dst, Immediate(reinterpret_cast<int64_t>(object.raw()))); |
| - } else { |
| - ASSERT(object.IsNotTemporaryScopedHandle()); |
| - ASSERT(object.IsOld()); |
| - LoadObject(TMP, object); |
| - movq(dst, TMP); |
| } |
| } |
| void Assembler::PushObject(const Object& object) { |
| - if (object.IsSmi() || object.InVMHeap()) { |
| + if (CanLoadFromObjectPool(object)) { |
| + LoadObject(TMP, object, kNotPatchable); |
| + pushq(TMP); |
| + } else { |
| pushq(Immediate(reinterpret_cast<int64_t>(object.raw()))); |
| - } else { |
| - LoadObject(TMP, object); |
| - pushq(TMP); |
| } |
| } |
| void Assembler::CompareObject(Register reg, const Object& object) { |
| - if (object.IsSmi() || object.InVMHeap()) { |
| - cmpq(reg, Immediate(reinterpret_cast<int64_t>(object.raw()))); |
| - } else { |
| + if (CanLoadFromObjectPool(object)) { |
| ASSERT(reg != TMP); |
| - LoadObject(TMP, object); |
| + LoadObject(TMP, object, kNotPatchable); |
| cmpq(reg, TMP); |
| + } else { |
| + cmpq(reg, Immediate(reinterpret_cast<int64_t>(object.raw()))); |
| } |
| } |
| @@ -2196,7 +2363,7 @@ |
| if (object != RAX) { |
| movq(RAX, object); |
| } |
| - call(&StubCode::UpdateStoreBufferLabel()); |
| + CallFromPool(&StubCode::UpdateStoreBufferLabel()); |
| if (value != RAX) popq(RAX); |
| Bind(&done); |
| } |
| @@ -2292,7 +2459,10 @@ |
| } |
| -void Assembler::LeaveFrame() { |
| +void Assembler::LeaveFrame(bool restore_pp) { |
| + if (restore_pp) { |
| + movq(PP, Address(RBP, -2 * kWordSize)); |
| + } |
| movq(RSP, RBP); |
| popq(RBP); |
| } |
| @@ -2377,17 +2547,47 @@ |
| } |
| -void Assembler::EnterDartFrame(intptr_t frame_size) { |
| +void Assembler::LoadPoolPointer(Register pp) { |
| + Label next; |
| + call(&next); |
| + Bind(&next); |
| + |
| + // Load new pool pointer. |
| + const intptr_t object_pool_pc_dist = |
| + Instructions::HeaderSize() - Instructions::object_pool_offset() + |
| + CodeSize(); |
| + popq(pp); |
| + movq(pp, Address(pp, -object_pool_pc_dist)); |
| +} |
| + |
| + |
| +void Assembler::EnterDartFrame(intptr_t frame_size, |
| + Register new_pp, Register new_pc) { |
| EnterFrame(0); |
| - Label dart_entry; |
| - call(&dart_entry); |
| - Bind(&dart_entry); |
| - // The runtime system assumes that the code marker address is |
| - // kEntryPointToPcMarkerOffset bytes from the entry. If there is any code |
| - // generated before entering the frame, the address needs to be adjusted. |
| - const intptr_t offset = kEntryPointToPcMarkerOffset - CodeSize(); |
| - if (offset != 0) { |
| - addq(Address(RSP, 0), Immediate(offset)); |
| + if (new_pc == kNoRegister) { |
| + Label dart_entry; |
| + call(&dart_entry); |
| + Bind(&dart_entry); |
| + // The runtime system assumes that the code marker address is |
| + // kEntryPointToPcMarkerOffset bytes from the entry. If there is any code |
| + // generated before entering the frame, the address needs to be adjusted. |
| + const intptr_t object_pool_pc_dist = |
| + Instructions::HeaderSize() - Instructions::object_pool_offset() + |
| + CodeSize(); |
| + const intptr_t offset = kEntryPointToPcMarkerOffset - CodeSize(); |
| + if (offset != 0) { |
| + addq(Address(RSP, 0), Immediate(offset)); |
| + } |
| + // Save caller's pool pointer |
| + pushq(PP); |
| + |
| + // Load callee's pool pointer. |
| + movq(PP, Address(RSP, 1 * kWordSize)); |
| + movq(PP, Address(PP, -object_pool_pc_dist - offset)); |
| + } else { |
| + pushq(new_pc); |
| + pushq(PP); |
| + movq(PP, new_pp); |
| } |
| if (frame_size != 0) { |
| subq(RSP, Immediate(frame_size)); |
| @@ -2400,27 +2600,48 @@ |
| // pointer is already set up. The PC marker is not correct for the |
| // optimized function and there may be extra space for spill slots to |
| // allocate. |
| -void Assembler::EnterOsrFrame(intptr_t extra_size) { |
| - Label dart_entry; |
| - call(&dart_entry); |
| - Bind(&dart_entry); |
| - // The runtime system assumes that the code marker address is |
| - // kEntryPointToPcMarkerOffset bytes from the entry. Since there is no |
| - // code to set up the frame pointer, the address needs to be adjusted. |
| - const intptr_t offset = kEntryPointToPcMarkerOffset - CodeSize(); |
| - if (offset != 0) { |
| - addq(Address(RSP, 0), Immediate(offset)); |
| +void Assembler::EnterOsrFrame(intptr_t extra_size, |
| + Register new_pp, Register new_pc) { |
| + if (new_pc == kNoRegister) { |
| + Label dart_entry; |
| + call(&dart_entry); |
| + Bind(&dart_entry); |
| + // The runtime system assumes that the code marker address is |
| + // kEntryPointToPcMarkerOffset bytes from the entry. Since there is no |
| + // code to set up the frame pointer, the address needs to be adjusted. |
| + const intptr_t object_pool_pc_dist = |
| + Instructions::HeaderSize() - Instructions::object_pool_offset() + |
| + CodeSize(); |
| + const intptr_t offset = kEntryPointToPcMarkerOffset - CodeSize(); |
| + if (offset != 0) { |
| + addq(Address(RSP, 0), Immediate(offset)); |
| + } |
| + |
| + // Load callee's pool pointer. |
| + movq(PP, Address(RSP, 0)); |
| + movq(PP, Address(PP, -object_pool_pc_dist - offset)); |
| + |
| + popq(Address(RBP, kPcMarkerSlotFromFp * kWordSize)); |
| + } else { |
| + movq(Address(RBP, kPcMarkerSlotFromFp * kWordSize), new_pc); |
| + movq(PP, new_pp); |
| } |
| - popq(Address(RBP, kPcMarkerSlotFromFp * kWordSize)); |
| if (extra_size != 0) { |
| subq(RSP, Immediate(extra_size)); |
| } |
| } |
| -void Assembler::EnterStubFrame() { |
| - EnterFrame(0); |
| - pushq(Immediate(0)); // Push 0 in the saved PC area for stub frames. |
| +void Assembler::EnterStubFrame(bool save_pp) { |
| + if (save_pp) { |
| + EnterFrame(0); |
| + pushq(Immediate(0)); // Push 0 in the saved PC area for stub frames. |
| + pushq(PP); // Save caller's pool pointer |
| + LoadPoolPointer(); |
| + } else { |
| + EnterFrame(0); |
| + pushq(Immediate(0)); // Push 0 in the saved PC area for stub frames. |
| + } |
| } |