Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(599)

Unified Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/flow_graph_compiler_x64.cc
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc
index 649e9190624fc013184ec546faea37bfca0f9251..9eb07e7fcde459e036e4de7b081c0c7e4b73c1af 100644
--- a/runtime/vm/flow_graph_compiler_x64.cc
+++ b/runtime/vm/flow_graph_compiler_x64.cc
@@ -190,7 +190,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
ASSERT(deopt_env() != NULL);
StubCode* stub_code = compiler->isolate()->stub_code();
- __ Call(&stub_code->DeoptimizeLabel(), PP);
+ __ Call(Code::Handle(stub_code->DeoptimizeCode()), PP);
set_pc_offset(assem->CodeSize());
__ int3();
#undef __
@@ -231,14 +231,14 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
if (test_kind == kTestTypeOneArg) {
ASSERT(type_arguments_reg == kNoRegister);
__ PushObject(Object::null_object(), PP);
- __ Call(&stub_code->Subtype1TestCacheLabel(), PP);
+ __ Call(Code::Handle(stub_code->Subtype1TestCacheCode()), PP);
} else if (test_kind == kTestTypeTwoArgs) {
ASSERT(type_arguments_reg == kNoRegister);
__ PushObject(Object::null_object(), PP);
- __ Call(&stub_code->Subtype2TestCacheLabel(), PP);
+ __ Call(Code::Handle(stub_code->Subtype2TestCacheCode()), PP);
} else if (test_kind == kTestTypeThreeArgs) {
__ pushq(type_arguments_reg);
- __ Call(&stub_code->Subtype3TestCacheLabel(), PP);
+ __ Call(Code::Handle(stub_code->Subtype3TestCacheCode()), PP);
} else {
UNREACHABLE();
}
@@ -931,8 +931,9 @@ void FlowGraphCompiler::CopyParameters() {
__ Bind(&wrong_num_arguments);
if (function.IsClosureFunction()) {
- __ LeaveDartFrame(); // The arguments are still on the stack.
- __ jmp(&isolate()->stub_code()->CallClosureNoSuchMethodLabel());
+ __ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
+ __ Jmp(Code::Handle(
+ isolate()->stub_code()->CallClosureNoSuchMethodCode()), PP);
// The noSuchMethod call may return to the caller, but not here.
} else if (check_correct_named_args) {
__ Stop("Wrong arguments");
@@ -992,27 +993,18 @@ void FlowGraphCompiler::EmitFrameEntry() {
ASSERT(Assembler::EntryPointToPcMarkerOffset() == 0);
const Function& function = parsed_function().function();
- const Register new_pp = R13;
- const Register new_pc = R12;
-
- // Load PC marker.
- const intptr_t kRIPRelativeLeaqSize = 7;
- const intptr_t entry_to_rip_offset = __ CodeSize() + kRIPRelativeLeaqSize;
- __ leaq(new_pc, Address::AddressRIPRelative(-entry_to_rip_offset));
- ASSERT(__ CodeSize() == entry_to_rip_offset);
-
// Load pool pointer.
- const intptr_t object_pool_pc_dist =
- Instructions::HeaderSize() - Instructions::object_pool_offset();
- __ movq(new_pp, Address(new_pc, -object_pool_pc_dist));
if (flow_graph().IsCompiledForOsr()) {
intptr_t extra_slots = StackSize()
- flow_graph().num_stack_locals()
- flow_graph().num_copied_params();
ASSERT(extra_slots >= 0);
- __ EnterOsrFrame(extra_slots * kWordSize, new_pp, new_pc);
+ __ EnterOsrFrame(extra_slots * kWordSize);
} else {
+ const Register new_pp = R13;
+ __ movq(new_pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
+
if (CanOptimizeFunction() &&
function.IsOptimizable() &&
(!is_optimizing() || may_reoptimize())) {
@@ -1033,14 +1025,14 @@ void FlowGraphCompiler::EmitFrameEntry() {
Immediate(GetOptimizationThreshold()));
ASSERT(function_reg == RDI);
__ J(GREATER_EQUAL,
- &isolate()->stub_code()->OptimizeFunctionLabel(),
+ Code::Handle(isolate()->stub_code()->OptimizeFunctionCode()),
new_pp);
} else {
entry_patch_pc_offset_ = assembler()->CodeSize();
}
ASSERT(StackSize() >= 0);
__ Comment("Enter frame");
- __ EnterDartFrameWithInfo(StackSize() * kWordSize, new_pp, new_pc);
+ __ EnterDartFrame(StackSize() * kWordSize, new_pp);
}
}
@@ -1085,8 +1077,8 @@ void FlowGraphCompiler::CompileGraph() {
__ Bind(&wrong_num_arguments);
if (function.IsClosureFunction()) {
- __ LeaveDartFrame(); // The arguments are still on the stack.
- __ jmp(&stub_code->CallClosureNoSuchMethodLabel());
+ __ LeaveDartFrame(kKeepCalleePP); // Leave arguments on the stack.
+ __ Jmp(Code::Handle(stub_code->CallClosureNoSuchMethodCode()), PP);
// The noSuchMethod call may return to the caller, but not here.
} else {
__ Stop("Wrong number of arguments");
@@ -1152,20 +1144,20 @@ void FlowGraphCompiler::CompileGraph() {
patch_code_pc_offset_ = assembler()->CodeSize();
// This is patched up to a point in FrameEntry where the PP for the
// current function is in R13 instead of PP.
- __ JmpPatchable(&stub_code->FixCallersTargetLabel(), R13);
+ __ JmpPatchable(Code::Handle(stub_code->FixCallersTargetCode()), R13);
if (is_optimizing()) {
lazy_deopt_pc_offset_ = assembler()->CodeSize();
- __ Jmp(&stub_code->DeoptimizeLazyLabel(), PP);
+ __ Jmp(Code::Handle(stub_code->DeoptimizeLazyCode()), PP);
}
}
void FlowGraphCompiler::GenerateCall(intptr_t token_pos,
- const ExternalLabel* label,
+ const Code& target,
RawPcDescriptors::Kind kind,
LocationSummary* locs) {
- __ Call(label, PP);
+ __ Call(target, PP);
AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos);
RecordSafepoint(locs);
}
@@ -1173,10 +1165,10 @@ void FlowGraphCompiler::GenerateCall(intptr_t token_pos,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
intptr_t token_pos,
- const ExternalLabel* label,
+ const Code& target,
RawPcDescriptors::Kind kind,
LocationSummary* locs) {
- __ CallPatchable(label);
+ __ CallPatchable(target);
AddCurrentDescriptor(kind, deopt_id, token_pos);
RecordSafepoint(locs);
// Marks either the continuation point in unoptimized code or the
@@ -1222,13 +1214,12 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(
LocationSummary* locs,
const ICData& ic_data) {
StubCode* stub_code = isolate()->stub_code();
- const uword label_address =
- stub_code->UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested());
- ExternalLabel target_label(label_address);
+ const Code& target = Code::Handle(
+ stub_code->UnoptimizedStaticCallCode(ic_data.NumArgsTested()));
__ LoadObject(RBX, ic_data, PP);
GenerateDartCall(deopt_id,
token_pos,
- &target_label,
+ target,
RawPcDescriptors::kUnoptStaticCall,
locs);
__ Drop(argument_count, RCX);
@@ -1264,7 +1255,7 @@ int32_t FlowGraphCompiler::EdgeCounterIncrementSizeInBytes() {
void FlowGraphCompiler::EmitOptimizedInstanceCall(
- ExternalLabel* target_label,
+ const Code& target,
const ICData& ic_data,
intptr_t argument_count,
intptr_t deopt_id,
@@ -1281,14 +1272,14 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ LoadObject(RBX, ic_data, PP);
GenerateDartCall(deopt_id,
token_pos,
- target_label,
+ target,
RawPcDescriptors::kIcCall,
locs);
__ Drop(argument_count, RCX);
}
-void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label,
+void FlowGraphCompiler::EmitInstanceCall(const Code& target,
const ICData& ic_data,
intptr_t argument_count,
intptr_t deopt_id,
@@ -1298,7 +1289,7 @@ void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label,
__ LoadObject(RBX, ic_data, PP);
GenerateDartCall(deopt_id,
token_pos,
- target_label,
+ target,
RawPcDescriptors::kIcCall,
locs);
__ Drop(argument_count, RCX);
@@ -1361,7 +1352,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
// we can record the outgoing edges to other code.
GenerateDartCall(deopt_id,
token_pos,
- &stub_code->CallStaticFunctionLabel(),
+ Code::Handle(stub_code->CallStaticFunctionCode()),
RawPcDescriptors::kOther,
locs);
AddStaticCallTarget(function);
@@ -1388,9 +1379,11 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
__ pushq(reg);
__ PushObject(obj, PP);
if (is_optimizing()) {
- __ CallPatchable(&stub_code->OptimizedIdenticalWithNumberCheckLabel());
+ __ CallPatchable(Code::Handle(
+ stub_code->OptimizedIdenticalWithNumberCheckCode()));
} else {
- __ CallPatchable(&stub_code->UnoptimizedIdenticalWithNumberCheckLabel());
+ __ CallPatchable(Code::Handle(
+ stub_code->UnoptimizedIdenticalWithNumberCheckCode()));
}
if (token_pos != Scanner::kNoSourcePos) {
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
@@ -1416,9 +1409,11 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
__ pushq(left);
__ pushq(right);
if (is_optimizing()) {
- __ CallPatchable(&stub_code->OptimizedIdenticalWithNumberCheckLabel());
+ __ CallPatchable(Code::Handle(
+ stub_code->OptimizedIdenticalWithNumberCheckCode()));
} else {
- __ CallPatchable(&stub_code->UnoptimizedIdenticalWithNumberCheckLabel());
+ __ CallPatchable(Code::Handle(
+ stub_code->UnoptimizedIdenticalWithNumberCheckCode()));
}
if (token_pos != Scanner::kNoSourcePos) {
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
@@ -1508,7 +1503,7 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
// that we can record the outgoing edges to other code.
GenerateDartCall(deopt_id,
token_index,
- &stub_code->CallStaticFunctionLabel(),
+ Code::Handle(stub_code->CallStaticFunctionCode()),
RawPcDescriptors::kOther,
locs);
const Function& function = Function::Handle(ic_data.GetTargetAt(0));
@@ -1549,7 +1544,7 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
// that we can record the outgoing edges to other code.
GenerateDartCall(deopt_id,
token_index,
- &stub_code->CallStaticFunctionLabel(),
+ Code::Handle(stub_code->CallStaticFunctionCode()),
RawPcDescriptors::kOther,
locs);
const Function& function = *sorted[i].target;

Powered by Google App Engine
This is Rietveld 408576698