Index: runtime/vm/flow_graph_compiler_x64.cc |
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc |
index fc44e42addfe8a39128cfd3d1c1c68801d0d214d..1a29be51c2e3409778447c736722c4801831f80d 100644 |
--- a/runtime/vm/flow_graph_compiler_x64.cc |
+++ b/runtime/vm/flow_graph_compiler_x64.cc |
@@ -1082,7 +1082,8 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, |
RawPcDescriptors::Kind kind, |
LocationSummary* locs) { |
__ Call(stub_entry); |
- EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); |
+ AddCurrentDescriptor(kind, Thread::kNoDeoptId, token_pos); |
+ RecordSafepoint(locs); |
} |
@@ -1091,7 +1092,8 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, |
RawPcDescriptors::Kind kind, |
LocationSummary* locs) { |
__ CallPatchable(stub_entry); |
- EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); |
+ AddCurrentDescriptor(kind, Thread::kNoDeoptId, token_pos); |
+ RecordSafepoint(locs); |
} |
@@ -1101,7 +1103,8 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, |
RawPcDescriptors::Kind kind, |
LocationSummary* locs) { |
__ CallPatchable(stub_entry); |
- EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); |
+ AddCurrentDescriptor(kind, deopt_id, token_pos); |
+ RecordSafepoint(locs); |
// Marks either the continuation point in unoptimized code or the |
// deoptimization point in optimized code, after call. |
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
@@ -1127,7 +1130,8 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, |
ASSERT(is_optimizing()); |
__ CallWithEquivalence(stub_entry, target); |
- EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); |
+ AddCurrentDescriptor(kind, deopt_id, token_pos); |
+ RecordSafepoint(locs); |
// Marks either the continuation point in unoptimized code or the |
// deoptimization point in optimized code, after call. |
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
@@ -1148,7 +1152,8 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, |
intptr_t argument_count, |
LocationSummary* locs) { |
__ CallRuntime(entry, argument_count); |
- EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); |
+ AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos); |
+ RecordSafepoint(locs); |
if (deopt_id != Thread::kNoDeoptId) { |
// Marks either the continuation point in unoptimized code or the |
// deoptimization point in optimized code, after call. |
@@ -1287,7 +1292,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
// arguments are removed. |
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
- EmitCatchEntryState(pending_deoptimization_env_, try_index); |
__ Drop(argument_count, RCX); |
} |
@@ -1308,8 +1312,8 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, |
__ LoadUniqueObject(RBX, ic_data); |
__ call(RCX); |
- |
- EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); |
+ AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, token_pos); |
+ RecordSafepoint(locs); |
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
if (is_optimizing()) { |
AddDeoptIndexAtCall(deopt_id_after); |