| Index: runtime/vm/stub_code_x64.cc
|
| diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
|
| index 19cee0b6f154f23d9449a9919d8d6379bbebab58..f4a290e378a86dd5b17c423cf749afca584f741c 100644
|
| --- a/runtime/vm/stub_code_x64.cc
|
| +++ b/runtime/vm/stub_code_x64.cc
|
| @@ -277,12 +277,12 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
|
| // Setup space on stack for return value.
|
| __ PushObject(Object::null_object(), PP);
|
| __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
|
| - __ popq(RAX); // Get Code object result.
|
| + __ popq(CODE_REG); // Get Code object result.
|
| __ popq(R10); // Restore arguments descriptor array.
|
| // Remove the stub frame as we are about to jump to the dart function.
|
| __ LeaveStubFrame();
|
|
|
| - __ movq(RBX, FieldAddress(RAX, Code::instructions_offset()));
|
| + __ movq(RBX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RBX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RBX);
|
| }
|
| @@ -297,9 +297,9 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
|
| // Setup space on stack for return value.
|
| __ PushObject(Object::null_object(), PP);
|
| __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
|
| - __ popq(RAX); // Get Code object.
|
| + __ popq(CODE_REG); // Get Code object.
|
| __ popq(R10); // Restore arguments descriptor array.
|
| - __ movq(RAX, FieldAddress(RAX, Code::instructions_offset()));
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ LeaveStubFrame();
|
| __ jmp(RAX);
|
| @@ -314,8 +314,8 @@ void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
|
| // Setup space on stack for return value.
|
| __ PushObject(Object::null_object(), PP);
|
| __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
|
| - __ popq(RAX); // Get Code object.
|
| - __ movq(RAX, FieldAddress(RAX, Code::instructions_offset()));
|
| + __ popq(CODE_REG); // Get Code object.
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ LeaveStubFrame();
|
| __ jmp(RAX);
|
| @@ -334,10 +334,10 @@ void StubCode::GenerateFixAllocateArrayStubTargetStub(Assembler* assembler) {
|
| // Setup space on stack for return value.
|
| __ PushObject(Object::null_object(), PP);
|
| __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
|
| - __ popq(RAX); // Get Code object.
|
| + __ popq(CODE_REG); // Get Code object.
|
| __ popq(RBX); // Restore element type.
|
| __ popq(R10); // Restore length.
|
| - __ movq(RAX, FieldAddress(RAX, Code::instructions_offset()));
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ LeaveStubFrame();
|
| __ jmp(RAX);
|
| @@ -355,8 +355,7 @@ static void PushArgumentsArray(Assembler* assembler) {
|
| // Allocate array to store arguments of caller.
|
| __ movq(RBX, R12); // Null element type for raw Array.
|
| const Code& array_stub = Code::Handle(stub_code->GetAllocateArrayStub());
|
| - const ExternalLabel array_label(array_stub.EntryPoint());
|
| - __ call(&array_label);
|
| + __ Call(array_stub, PP);
|
| __ SmiUntag(R10);
|
| // RAX: newly allocated array.
|
| // R10: length of the array (was preserved by the stub).
|
| @@ -385,12 +384,17 @@ static void PushArgumentsArray(Assembler* assembler) {
|
|
|
|
|
| DECLARE_LEAF_RUNTIME_ENTRY(intptr_t, DeoptimizeCopyFrame,
|
| - intptr_t deopt_reason,
|
| - uword saved_registers_address);
|
| + uword saved_registers_address,
|
| + uword is_lazy_deopt);
|
|
|
| DECLARE_LEAF_RUNTIME_ENTRY(void, DeoptimizeFillFrame, uword last_fp);
|
|
|
|
|
| +enum DeoptStubKind {
|
| + kLazyDeopt,
|
| + kEagerDeopt
|
| +};
|
| +
|
| // Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
|
| // This stub translates optimized frame into unoptimized frame. The optimized
|
| // frame can contain values in registers and on stack, the unoptimized
|
| @@ -416,12 +420,10 @@ DECLARE_LEAF_RUNTIME_ENTRY(void, DeoptimizeFillFrame, uword last_fp);
|
| //
|
| // Parts of the code cannot GC, part of the code can GC.
|
| static void GenerateDeoptimizationSequence(Assembler* assembler,
|
| - bool preserve_result) {
|
| + DeoptStubKind kind) {
|
| // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
|
| // is no need to set the correct PC marker or load PP, since they get patched.
|
| - __ EnterFrame(0);
|
| - __ pushq(Immediate(0));
|
| - __ pushq(PP);
|
| + __ EnterDartFrame(0, kNoRegister);
|
|
|
| // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
|
| // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
|
| @@ -444,16 +446,18 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
|
|
|
| // Pass address of saved registers block.
|
| __ movq(CallingConventions::kArg1Reg, RSP);
|
| + __ movq(CallingConventions::kArg2Reg, Immediate(kind == kLazyDeopt ? 1 : 0));
|
| __ ReserveAlignedFrameSpace(0); // Ensure stack is aligned before the call.
|
| - __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1);
|
| + __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
|
| // Result (RAX) is stack-size (FP - SP) in bytes.
|
|
|
| - if (preserve_result) {
|
| + if (kind == kLazyDeopt) {
|
| // Restore result into RBX temporarily.
|
| __ movq(RBX, Address(RBP, saved_result_slot_from_fp * kWordSize));
|
| }
|
|
|
| // There is a Dart Frame on the stack. We must restore PP and leave frame.
|
| + __ RestoreCodePointer();
|
| __ LeaveDartFrame();
|
|
|
| __ popq(RCX); // Preserve return address.
|
| @@ -463,23 +467,22 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
|
|
|
| // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
|
| // is no need to set the correct PC marker or load PP, since they get patched.
|
| - __ EnterFrame(0);
|
| - __ pushq(Immediate(0));
|
| - __ pushq(PP);
|
| + __ EnterDartFrame(0, kNoRegister);
|
|
|
| - if (preserve_result) {
|
| + if (kind == kLazyDeopt) {
|
| __ pushq(RBX); // Preserve result as first local.
|
| }
|
| __ ReserveAlignedFrameSpace(0);
|
| // Pass last FP as a parameter.
|
| __ movq(CallingConventions::kArg1Reg, RBP);
|
| __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
|
| - if (preserve_result) {
|
| + if (kind == kLazyDeopt) {
|
| // Restore result into RBX.
|
| __ movq(RBX, Address(RBP, kFirstLocalSlotFromFp * kWordSize));
|
| }
|
| // Code above cannot cause GC.
|
| // There is a Dart Frame on the stack. We must restore PP and leave frame.
|
| + __ RestoreCodePointer();
|
| __ LeaveDartFrame();
|
|
|
| // Frame is fully rewritten at this point and it is safe to perform a GC.
|
| @@ -487,7 +490,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
|
| // require allocation.
|
| // Enter stub frame with loading PP. The caller's PP is not materialized yet.
|
| __ EnterStubFrame();
|
| - if (preserve_result) {
|
| + if (kind == kLazyDeopt) {
|
| __ pushq(Immediate(0)); // Workaround for dropped stack slot during GC.
|
| __ pushq(RBX); // Preserve result, it will be GC-d here.
|
| }
|
| @@ -497,7 +500,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
|
| // of the bottom-most frame. They were used as materialization arguments.
|
| __ popq(RBX);
|
| __ SmiUntag(RBX);
|
| - if (preserve_result) {
|
| + if (kind == kLazyDeopt) {
|
| __ popq(RAX); // Restore result.
|
| __ Drop(1); // Workaround for dropped stack slot during GC.
|
| }
|
| @@ -518,12 +521,12 @@ void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
|
| __ popq(RBX);
|
| __ subq(RBX, Immediate(ShortCallPattern::InstructionLength()));
|
| __ pushq(RBX);
|
| - GenerateDeoptimizationSequence(assembler, true); // Preserve RAX.
|
| + GenerateDeoptimizationSequence(assembler, kLazyDeopt);
|
| }
|
|
|
|
|
| void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
|
| - GenerateDeoptimizationSequence(assembler, false); // Don't preserve RAX.
|
| + GenerateDeoptimizationSequence(assembler, kEagerDeopt);
|
| }
|
|
|
|
|
| @@ -553,9 +556,11 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
|
| __ popq(RAX); // Return value from the runtime call (function).
|
| __ popq(R10); // Restore arguments descriptor.
|
| __ popq(RBX); // Restore IC data.
|
| + __ RestoreCodePointer();
|
| __ LeaveStubFrame();
|
|
|
| - __ movq(RCX, FieldAddress(RAX, Function::instructions_offset()));
|
| + __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(RCX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RCX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RCX);
|
| }
|
| @@ -691,14 +696,15 @@ void StubCode::GeneratePatchableAllocateArrayStub(Assembler* assembler,
|
| __ ret();
|
| *patch_code_pc_offset = assembler->CodeSize();
|
| StubCode* stub_code = Isolate::Current()->stub_code();
|
| - __ JmpPatchable(&stub_code->FixAllocateArrayStubTargetLabel(), new_pp);
|
| + __ JmpPatchable(Code::Handle(
|
| + stub_code->FixAllocateArrayStubTargetCode()), new_pp);
|
| }
|
|
|
|
|
| // Called when invoking Dart code from C++ (VM code).
|
| // Input parameters:
|
| // RSP : points to return address.
|
| -// RDI : entrypoint of the Dart function to call.
|
| +// RDI : target code
|
| // RSI : arguments descriptor array.
|
| // RDX : arguments array.
|
| // RCX : current thread.
|
| @@ -706,19 +712,29 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| // Save frame pointer coming in.
|
| __ EnterFrame(0);
|
|
|
| - const Register kEntryPointReg = CallingConventions::kArg1Reg;
|
| - const Register kArgDescReg = CallingConventions::kArg2Reg;
|
| - const Register kArgsReg = CallingConventions::kArg3Reg;
|
| - const Register kThreadReg = CallingConventions::kArg4Reg;
|
| + const Register kStubCodeReg = CallingConventions::kArg1Reg;
|
| + const Register kTargetCodeReg = CallingConventions::kArg2Reg;
|
| + const Register kArgDescReg = CallingConventions::kArg3Reg;
|
| + const Register kArgsReg = CallingConventions::kArg4Reg;
|
| +
|
| + // Set up THR, which caches the current thread in Dart code.
|
| +#if defined(_WIN64)
|
| + __ movq(THR, Address(RSP, 5 * kWordSize));
|
| +#else
|
| + const Register kThreadReg = CallingConventions::kArg5Reg;
|
| + if (THR != kThreadReg) {
|
| + __ movq(THR, kThreadReg);
|
| + }
|
| +#endif
|
|
|
| // At this point, the stack looks like:
|
| // | saved RBP | <-- RBP
|
| // | saved PC (return to DartEntry::InvokeFunction) |
|
|
|
| - const intptr_t kInitialOffset = 1;
|
| - // Save arguments descriptor array.
|
| - const intptr_t kArgumentsDescOffset = -(kInitialOffset) * kWordSize;
|
| + const intptr_t kArgumentsDescOffset = -1 * kWordSize;
|
| + const intptr_t kCodePointerOffset = -2 * kWordSize;
|
| __ pushq(kArgDescReg);
|
| + __ pushq(Address(kStubCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
|
|
|
| // Save C++ ABI callee-saved registers.
|
| __ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
|
| @@ -727,15 +743,12 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| // We now load the pool pointer(PP) as we are about to invoke dart code and we
|
| // could potentially invoke some intrinsic functions which need the PP to be
|
| // set up.
|
| + __ movq(CODE_REG, Address(kStubCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
|
| __ LoadPoolPointer(PP);
|
|
|
| // If any additional (or fewer) values are pushed, the offsets in
|
| // kExitLinkSlotFromEntryFp will need to be changed.
|
|
|
| - // Set up THR, which caches the current thread in Dart code.
|
| - if (THR != kThreadReg) {
|
| - __ movq(THR, kThreadReg);
|
| - }
|
| // Load Isolate pointer into kIsolateReg.
|
| const Register kIsolateReg = RBX;
|
| __ LoadIsolate(kIsolateReg);
|
| @@ -764,6 +777,7 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| __ leaq(RAX, Address(RBP, kExitLinkSlotFromEntryFp * kWordSize));
|
| __ cmpq(RAX, RSP);
|
| __ j(EQUAL, &ok);
|
| + __ int3();
|
| __ Stop("kExitLinkSlotFromEntryFp mismatch");
|
| __ Bind(&ok);
|
| }
|
| @@ -774,8 +788,8 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| // Load arguments descriptor array into R10, which is passed to Dart code.
|
| __ movq(R10, Address(kArgDescReg, VMHandles::kOffsetOfRawPtrInHandle));
|
|
|
| - // Push arguments. At this point we only need to preserve kEntryPointReg.
|
| - ASSERT(kEntryPointReg != RDX);
|
| + // Push arguments. At this point we only need to preserve kTargetCodeReg.
|
| + ASSERT(kTargetCodeReg != RDX);
|
|
|
| // Load number of arguments into RBX.
|
| __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
|
| @@ -799,7 +813,18 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| __ Bind(&done_push_arguments);
|
|
|
| // Call the Dart code entrypoint.
|
| - __ call(kEntryPointReg); // R10 is the arguments descriptor array.
|
| + __ movq(CODE_REG,
|
| + Address(kTargetCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
|
| + __ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| + __ addq(kTargetCodeReg,
|
| + Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| + __ call(kTargetCodeReg); // R10 is the arguments descriptor array.
|
| +
|
| + // If we arrived here from JumpToExceptionHandler then PP would not be
|
| + // set correctly. Reload it from the code object.
|
| + // TODO(vegorov): set PP in the JumpToExceptionHandler to avoid reloading.
|
| + __ movq(CODE_REG, Address(RBP, kCodePointerOffset));
|
| + __ LoadPoolPointer(PP);
|
|
|
| // Read the saved arguments descriptor array to obtain the number of passed
|
| // arguments.
|
| @@ -835,7 +860,7 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
|
| // Output:
|
| // RAX: new allocated RawContext object.
|
| void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
|
| - __ LoadObject(R12, Object::null_object(), PP);
|
| + __ LoadObject(R9, Object::null_object(), PP);
|
| if (FLAG_inline_alloc) {
|
| Label slow_case;
|
| Isolate* isolate = Isolate::Current();
|
| @@ -913,7 +938,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
|
| // No generational barrier needed, since we are storing null.
|
| __ InitializeFieldNoBarrier(RAX,
|
| FieldAddress(RAX, Context::parent_offset()),
|
| - R12);
|
| + R9);
|
|
|
| // Initialize the context variables.
|
| // RAX: new object.
|
| @@ -932,7 +957,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
|
| // No generational barrier needed, since we are storing null.
|
| __ InitializeFieldNoBarrier(RAX,
|
| Address(R13, R10, TIMES_8, 0),
|
| - R12);
|
| + R9);
|
| __ Bind(&entry);
|
| __ cmpq(R10, Immediate(0));
|
| __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
|
| @@ -946,7 +971,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
|
| }
|
| // Create a stub frame.
|
| __ EnterStubFrame();
|
| - __ pushq(R12); // Setup space on stack for the return value.
|
| + __ pushq(R9); // Setup space on stack for the return value.
|
| __ SmiTag(R10);
|
| __ pushq(R10); // Push number of context variables.
|
| __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
|
| @@ -1047,7 +1072,7 @@ void StubCode::GenerateAllocationStubForClass(
|
| const int kInlineInstanceSize = 12; // In words.
|
| const intptr_t instance_size = cls.instance_size();
|
| ASSERT(instance_size > 0);
|
| - __ LoadObject(R12, Object::null_object(), PP);
|
| + __ LoadObject(R9, Object::null_object(), PP);
|
| if (is_cls_parameterized) {
|
| __ movq(RDX, Address(RSP, kObjectTypeArgumentsOffset));
|
| // RDX: instantiated type arguments.
|
| @@ -1091,7 +1116,7 @@ void StubCode::GenerateAllocationStubForClass(
|
| // RAX: new object (tagged).
|
| // RBX: next object start.
|
| // RDX: new object type arguments (if is_cls_parameterized).
|
| - // R12: raw null.
|
| + // R9: raw null.
|
| // First try inlining the initialization without a loop.
|
| if (instance_size < (kInlineInstanceSize * kWordSize)) {
|
| // Check if the object contains any non-header fields.
|
| @@ -1101,7 +1126,7 @@ void StubCode::GenerateAllocationStubForClass(
|
| current_offset += kWordSize) {
|
| __ InitializeFieldNoBarrier(RAX,
|
| FieldAddress(RAX, current_offset),
|
| - R12);
|
| + R9);
|
| }
|
| } else {
|
| __ leaq(RCX, FieldAddress(RAX, Instance::NextFieldOffset()));
|
| @@ -1120,7 +1145,7 @@ void StubCode::GenerateAllocationStubForClass(
|
| static const bool kJumpLength = Assembler::kNearJump;
|
| #endif // DEBUG
|
| __ j(ABOVE_EQUAL, &done, kJumpLength);
|
| - __ InitializeFieldNoBarrier(RAX, Address(RCX, 0), R12);
|
| + __ InitializeFieldNoBarrier(RAX, Address(RCX, 0), R9);
|
| __ addq(RCX, Immediate(kWordSize));
|
| __ jmp(&init_loop, Assembler::kNearJump);
|
| __ Bind(&done);
|
| @@ -1141,12 +1166,12 @@ void StubCode::GenerateAllocationStubForClass(
|
| // RDX: new object type arguments.
|
| // Create a stub frame.
|
| __ EnterStubFrame(); // Uses PP to access class object.
|
| - __ pushq(R12); // Setup space on stack for return value.
|
| + __ pushq(R9); // Setup space on stack for return value.
|
| __ PushObject(cls, PP); // Push class of object to be allocated.
|
| if (is_cls_parameterized) {
|
| __ pushq(RDX); // Push type arguments of object to be allocated.
|
| } else {
|
| - __ pushq(R12); // Push null type arguments.
|
| + __ pushq(R9); // Push null type arguments.
|
| }
|
| __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
|
| __ popq(RAX); // Pop argument (type arguments of object).
|
| @@ -1158,7 +1183,8 @@ void StubCode::GenerateAllocationStubForClass(
|
| __ ret();
|
| *patch_code_pc_offset = assembler->CodeSize();
|
| StubCode* stub_code = Isolate::Current()->stub_code();
|
| - __ JmpPatchable(&stub_code->FixAllocationStubTargetLabel(), new_pp);
|
| + __ JmpPatchable(Code::Handle(
|
| + stub_code->FixAllocationStubTargetCode()), new_pp);
|
| }
|
|
|
|
|
| @@ -1244,9 +1270,9 @@ static void EmitFastSmiOp(Assembler* assembler,
|
| ASSERT(num_args == 2);
|
| __ movq(RCX, Address(RSP, + 1 * kWordSize)); // Right
|
| __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left.
|
| - __ movq(R12, RCX);
|
| - __ orq(R12, RAX);
|
| - __ testq(R12, Immediate(kSmiTagMask));
|
| + __ movq(R13, RCX);
|
| + __ orq(R13, RAX);
|
| + __ testq(R13, Immediate(kSmiTagMask));
|
| __ j(NOT_ZERO, not_smi_or_overflow);
|
| switch (kind) {
|
| case Token::kADD: {
|
| @@ -1282,18 +1308,18 @@ static void EmitFastSmiOp(Assembler* assembler,
|
| }
|
|
|
| // RBX: IC data object (preserved).
|
| - __ movq(R12, FieldAddress(RBX, ICData::ic_data_offset()));
|
| - // R12: ic_data_array with check entries: classes and target functions.
|
| - __ leaq(R12, FieldAddress(R12, Array::data_offset()));
|
| - // R12: points directly to the first ic data array element.
|
| + __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
|
| + // R13: ic_data_array with check entries: classes and target functions.
|
| + __ leaq(R13, FieldAddress(R13, Array::data_offset()));
|
| + // R13: points directly to the first ic data array element.
|
| #if defined(DEBUG)
|
| // Check that first entry is for Smi/Smi.
|
| Label error, ok;
|
| const Immediate& imm_smi_cid =
|
| Immediate(reinterpret_cast<intptr_t>(Smi::New(kSmiCid)));
|
| - __ cmpq(Address(R12, 0 * kWordSize), imm_smi_cid);
|
| + __ cmpq(Address(R13, 0 * kWordSize), imm_smi_cid);
|
| __ j(NOT_EQUAL, &error, Assembler::kNearJump);
|
| - __ cmpq(Address(R12, 1 * kWordSize), imm_smi_cid);
|
| + __ cmpq(Address(R13, 1 * kWordSize), imm_smi_cid);
|
| __ j(EQUAL, &ok, Assembler::kNearJump);
|
| __ Bind(&error);
|
| __ Stop("Incorrect IC data");
|
| @@ -1303,11 +1329,11 @@ static void EmitFastSmiOp(Assembler* assembler,
|
| if (FLAG_optimization_counter_threshold >= 0) {
|
| const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
|
| // Update counter.
|
| - __ movq(R8, Address(R12, count_offset));
|
| + __ movq(R8, Address(R13, count_offset));
|
| __ addq(R8, Immediate(Smi::RawValue(1)));
|
| - __ movq(R13, Immediate(Smi::RawValue(Smi::kMaxValue)));
|
| - __ cmovnoq(R13, R8);
|
| - __ StoreIntoSmiField(Address(R12, count_offset), R13);
|
| + __ movq(R9, Immediate(Smi::RawValue(Smi::kMaxValue)));
|
| + __ cmovnoq(R9, R8);
|
| + __ StoreIntoSmiField(Address(R13, count_offset), R9);
|
| }
|
|
|
| __ ret();
|
| @@ -1383,18 +1409,18 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| // Loop that checks if there is an IC data match.
|
| Label loop, update, test, found;
|
| // RBX: IC data object (preserved).
|
| - __ movq(R12, FieldAddress(RBX, ICData::ic_data_offset()));
|
| - // R12: ic_data_array with check entries: classes and target functions.
|
| - __ leaq(R12, FieldAddress(R12, Array::data_offset()));
|
| - // R12: points directly to the first ic data array element.
|
| + __ movq(R13, FieldAddress(RBX, ICData::ic_data_offset()));
|
| + // R13: ic_data_array with check entries: classes and target functions.
|
| + __ leaq(R13, FieldAddress(R13, Array::data_offset()));
|
| + // R13: points directly to the first ic data array element.
|
|
|
| // Get the receiver's class ID (first read number of arguments from
|
| // arguments descriptor array and then access the receiver from the stack).
|
| __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
|
| - __ movq(R13, Address(RSP, RAX, TIMES_4, 0)); // RAX (argument count) is Smi.
|
| - __ LoadTaggedClassIdMayBeSmi(RAX, R13);
|
| + __ movq(R9, Address(RSP, RAX, TIMES_4, 0)); // RAX (argument count) is Smi.
|
| + __ LoadTaggedClassIdMayBeSmi(RAX, R9);
|
| // RAX: receiver's class ID as smi.
|
| - __ movq(R13, Address(R12, 0)); // First class ID (Smi) to check.
|
| + __ movq(R9, Address(R13, 0)); // First class ID (Smi) to check.
|
| __ jmp(&test);
|
|
|
| __ Comment("ICData loop");
|
| @@ -1403,13 +1429,13 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| if (i > 0) {
|
| // If not the first, load the next argument's class ID.
|
| __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
|
| - __ movq(R13, Address(RSP, RAX, TIMES_4, - i * kWordSize));
|
| - __ LoadTaggedClassIdMayBeSmi(RAX, R13);
|
| + __ movq(R9, Address(RSP, RAX, TIMES_4, - i * kWordSize));
|
| + __ LoadTaggedClassIdMayBeSmi(RAX, R9);
|
| // RAX: next argument class ID (smi).
|
| - __ movq(R13, Address(R12, i * kWordSize));
|
| - // R13: next class ID to check (smi).
|
| + __ movq(R9, Address(R13, i * kWordSize));
|
| + // R9: next class ID to check (smi).
|
| }
|
| - __ cmpq(RAX, R13); // Class id match?
|
| + __ cmpq(RAX, R9); // Class id match?
|
| if (i < (num_args - 1)) {
|
| __ j(NOT_EQUAL, &update); // Continue.
|
| } else {
|
| @@ -1421,20 +1447,20 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| // Reload receiver class ID. It has not been destroyed when num_args == 1.
|
| if (num_args > 1) {
|
| __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
|
| - __ movq(R13, Address(RSP, RAX, TIMES_4, 0));
|
| - __ LoadTaggedClassIdMayBeSmi(RAX, R13);
|
| + __ movq(R9, Address(RSP, RAX, TIMES_4, 0));
|
| + __ LoadTaggedClassIdMayBeSmi(RAX, R9);
|
| }
|
|
|
| const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize;
|
| - __ addq(R12, Immediate(entry_size)); // Next entry.
|
| - __ movq(R13, Address(R12, 0)); // Next class ID.
|
| + __ addq(R13, Immediate(entry_size)); // Next entry.
|
| + __ movq(R9, Address(R13, 0)); // Next class ID.
|
|
|
| __ Bind(&test);
|
| - __ cmpq(R13, Immediate(Smi::RawValue(kIllegalCid))); // Done?
|
| + __ cmpq(R9, Immediate(Smi::RawValue(kIllegalCid))); // Done?
|
| __ j(NOT_EQUAL, &loop, Assembler::kNearJump);
|
|
|
| __ Comment("IC miss");
|
| - __ LoadObject(R12, Object::null_object(), PP);
|
| + __ LoadObject(R13, Object::null_object(), PP);
|
| // Compute address of arguments (first read number of arguments from
|
| // arguments descriptor array and then compute address on the stack).
|
| __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
|
| @@ -1442,7 +1468,7 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| __ EnterStubFrame();
|
| __ pushq(R10); // Preserve arguments descriptor array.
|
| __ pushq(RBX); // Preserve IC data object.
|
| - __ pushq(R12); // Setup space on stack for result (target code object).
|
| + __ pushq(R13); // Setup space on stack for result (target code object).
|
| // Push call arguments.
|
| for (intptr_t i = 0; i < num_args; i++) {
|
| __ movq(RCX, Address(RAX, -kWordSize * i));
|
| @@ -1457,43 +1483,48 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| __ popq(RAX); // Pop returned function object into RAX.
|
| __ popq(RBX); // Restore IC data array.
|
| __ popq(R10); // Restore arguments descriptor array.
|
| + if (range_collection_mode == kCollectRanges) {
|
| + __ RestoreCodePointer();
|
| + }
|
| __ LeaveStubFrame();
|
| Label call_target_function;
|
| __ jmp(&call_target_function);
|
|
|
| __ Bind(&found);
|
| - // R12: Pointer to an IC data check group.
|
| + // R13: Pointer to an IC data check group.
|
| const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
|
| const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
|
| - __ movq(RAX, Address(R12, target_offset));
|
| + __ movq(RAX, Address(R13, target_offset));
|
|
|
| if (FLAG_optimization_counter_threshold >= 0) {
|
| // Update counter.
|
| __ Comment("Update caller's counter");
|
| - __ movq(R8, Address(R12, count_offset));
|
| + __ movq(R8, Address(R13, count_offset));
|
| __ addq(R8, Immediate(Smi::RawValue(1)));
|
| - __ movq(R13, Immediate(Smi::RawValue(Smi::kMaxValue)));
|
| - __ cmovnoq(R13, R8);
|
| - __ StoreIntoSmiField(Address(R12, count_offset), R13);
|
| + __ movq(R9, Immediate(Smi::RawValue(Smi::kMaxValue)));
|
| + __ cmovnoq(R9, R8);
|
| + __ StoreIntoSmiField(Address(R13, count_offset), R9);
|
| }
|
|
|
| __ Comment("Call target");
|
| __ Bind(&call_target_function);
|
| // RAX: Target function.
|
| Label is_compiled;
|
| - __ movq(RCX, FieldAddress(RAX, Function::instructions_offset()));
|
| - __ addq(RCX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| if (range_collection_mode == kCollectRanges) {
|
| + __ movq(R13, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(RCX, FieldAddress(R13, Code::instructions_offset()));
|
| + __ addq(RCX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ movq(R8, Address(RSP, + 1 * kWordSize));
|
| if (num_args == 2) {
|
| - __ movq(R13, Address(RSP, + 2 * kWordSize));
|
| + __ movq(R9, Address(RSP, + 2 * kWordSize));
|
| }
|
| __ EnterStubFrame();
|
| __ pushq(RBX);
|
| if (num_args == 2) {
|
| - __ pushq(R13);
|
| + __ pushq(R9);
|
| }
|
| __ pushq(R8);
|
| + __ movq(CODE_REG, R13);
|
| __ call(RCX);
|
|
|
| Label done;
|
| @@ -1504,6 +1535,9 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| __ LeaveStubFrame();
|
| __ ret();
|
| } else {
|
| + __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(RCX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| + __ addq(RCX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RCX);
|
| }
|
|
|
| @@ -1513,6 +1547,7 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
|
| __ pushq(RBX);
|
| __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
|
| __ popq(RBX);
|
| + __ RestoreCodePointer();
|
| __ LeaveStubFrame();
|
| __ jmp(&done_stepping);
|
| }
|
| @@ -1681,7 +1716,8 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
|
|
|
| // Get function and call it, if possible.
|
| __ movq(RAX, Address(R12, target_offset));
|
| - __ movq(RCX, FieldAddress(RAX, Function::instructions_offset()));
|
| + __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(RCX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| // RCX: Target instructions.
|
| __ addq(RCX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RCX);
|
| @@ -1734,7 +1770,8 @@ void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
|
| __ popq(R10); // Restore arguments descriptor array.
|
| __ LeaveStubFrame();
|
|
|
| - __ movq(RAX, FieldAddress(RAX, Function::instructions_offset()));
|
| + __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RAX);
|
| }
|
| @@ -1751,9 +1788,12 @@ void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
|
| __ LoadObject(R12, Object::null_object(), PP);
|
| __ pushq(R12); // Room for result.
|
| __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
|
| - __ popq(RAX); // Address of original.
|
| + __ popq(CODE_REG); // Address of original.
|
| __ popq(RBX); // Restore IC data.
|
| __ LeaveStubFrame();
|
| +
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| + __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RAX); // Jump to original stub.
|
| }
|
|
|
| @@ -1766,8 +1806,11 @@ void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
|
| __ LoadObject(R12, Object::null_object(), PP);
|
| __ pushq(R12); // Room for result.
|
| __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
|
| - __ popq(RAX); // Address of original.
|
| + __ popq(CODE_REG); // Address of original.
|
| __ LeaveStubFrame();
|
| +
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| + __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ jmp(RAX); // Jump to original stub.
|
| }
|
|
|
| @@ -1803,12 +1846,12 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
| const intptr_t kInstanceOffsetInBytes = 2 * kWordSize;
|
| const intptr_t kCacheOffsetInBytes = 3 * kWordSize;
|
| __ movq(RAX, Address(RSP, kInstanceOffsetInBytes));
|
| - __ LoadObject(R12, Object::null_object(), PP);
|
| + __ LoadObject(R9, Object::null_object(), PP);
|
| if (n > 1) {
|
| __ LoadClass(R10, RAX, kNoRegister);
|
| // Compute instance type arguments into R13.
|
| Label has_no_type_arguments;
|
| - __ movq(R13, R12);
|
| + __ movq(R13, R9);
|
| __ movl(RDI, FieldAddress(R10,
|
| Class::type_arguments_field_offset_in_words_offset()));
|
| __ cmpl(RDI, Immediate(Class::kNoTypeArguments));
|
| @@ -1830,7 +1873,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
| __ SmiTag(R10);
|
| __ Bind(&loop);
|
| __ movq(RDI, Address(RDX, kWordSize * SubtypeTestCache::kInstanceClassId));
|
| - __ cmpq(RDI, R12);
|
| + __ cmpq(RDI, R9);
|
| __ j(EQUAL, ¬_found, Assembler::kNearJump);
|
| __ cmpq(RDI, R10);
|
| if (n == 1) {
|
| @@ -1857,7 +1900,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
| __ jmp(&loop, Assembler::kNearJump);
|
| // Fall through to not found.
|
| __ Bind(¬_found);
|
| - __ movq(RCX, R12);
|
| + __ movq(RCX, R9);
|
| __ ret();
|
|
|
| __ Bind(&found);
|
| @@ -1960,9 +2003,9 @@ void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
|
| __ pushq(RDI);
|
| __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
|
| __ popq(RAX); // Disard argument.
|
| - __ popq(RAX); // Get Code object.
|
| + __ popq(CODE_REG); // Get Code object.
|
| __ popq(R10); // Restore argument descriptor.
|
| - __ movq(RAX, FieldAddress(RAX, Code::instructions_offset()));
|
| + __ movq(RAX, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| __ addq(RAX, Immediate(Instructions::HeaderSize() - kHeapObjectTag));
|
| __ LeaveStubFrame();
|
| __ jmp(RAX);
|
| @@ -1998,27 +2041,27 @@ void StubCode::GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
|
| __ CompareClassId(left, kDoubleCid);
|
| __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
|
| __ CompareClassId(right, kDoubleCid);
|
| - __ j(NOT_EQUAL, &done, Assembler::kNearJump);
|
| + __ j(NOT_EQUAL, &done, Assembler::kFarJump);
|
|
|
| // Double values bitwise compare.
|
| __ movq(left, FieldAddress(left, Double::value_offset()));
|
| __ cmpq(left, FieldAddress(right, Double::value_offset()));
|
| - __ jmp(&done, Assembler::kNearJump);
|
| + __ jmp(&done, Assembler::kFarJump);
|
|
|
| __ Bind(&check_mint);
|
| __ CompareClassId(left, kMintCid);
|
| __ j(NOT_EQUAL, &check_bigint, Assembler::kNearJump);
|
| __ CompareClassId(right, kMintCid);
|
| - __ j(NOT_EQUAL, &done, Assembler::kNearJump);
|
| + __ j(NOT_EQUAL, &done, Assembler::kFarJump);
|
| __ movq(left, FieldAddress(left, Mint::value_offset()));
|
| __ cmpq(left, FieldAddress(right, Mint::value_offset()));
|
| - __ jmp(&done, Assembler::kNearJump);
|
| + __ jmp(&done, Assembler::kFarJump);
|
|
|
| __ Bind(&check_bigint);
|
| __ CompareClassId(left, kBigintCid);
|
| - __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
|
| + __ j(NOT_EQUAL, &reference_compare, Assembler::kFarJump);
|
| __ CompareClassId(right, kBigintCid);
|
| - __ j(NOT_EQUAL, &done, Assembler::kNearJump);
|
| + __ j(NOT_EQUAL, &done, Assembler::kFarJump);
|
| __ EnterStubFrame();
|
| __ ReserveAlignedFrameSpace(0);
|
| __ movq(CallingConventions::kArg1Reg, left);
|
| @@ -2121,7 +2164,8 @@ void StubCode::EmitMegamorphicLookup(
|
| // illegal class id was found, the target is a cache miss handler that can
|
| // be invoked as a normal Dart function.
|
| __ movq(RAX, FieldAddress(RDI, RCX, TIMES_8, base + kWordSize));
|
| - __ movq(target, FieldAddress(RAX, Function::instructions_offset()));
|
| + __ movq(CODE_REG, FieldAddress(RAX, Function::code_offset()));
|
| + __ movq(target, FieldAddress(CODE_REG, Code::instructions_offset()));
|
| // TODO(srdjan): Evaluate performance impact of moving the instruction below
|
| // to the call site, instead of having it here.
|
| __ AddImmediate(
|
|
|