Index: runtime/vm/flow_graph_compiler_arm64.cc |
diff --git a/runtime/vm/flow_graph_compiler_arm64.cc b/runtime/vm/flow_graph_compiler_arm64.cc |
index f7ce841bb8f3ea3b09cd7eafafe65a3c6400379a..27b22e1f22943a142f24715e18e045148d7a2d8c 100644 |
--- a/runtime/vm/flow_graph_compiler_arm64.cc |
+++ b/runtime/vm/flow_graph_compiler_arm64.cc |
@@ -1191,20 +1191,19 @@ void FlowGraphCompiler::GenerateRuntimeCall(intptr_t token_pos, |
} |
-void FlowGraphCompiler::EmitEdgeCounter() { |
+void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { |
// We do not check for overflow when incrementing the edge counter. The |
// function should normally be optimized long before the counter can |
// overflow; and though we do not reset the counters when we optimize or |
// deoptimize, there is a bound on the number of |
// optimization/deoptimization cycles we will attempt. |
+ ASSERT(!edge_counters_array_.IsNull()); |
ASSERT(assembler_->constant_pool_allowed()); |
- const Array& counter = Array::ZoneHandle(zone(), Array::New(1, Heap::kOld)); |
- counter.SetAt(0, Smi::Handle(zone(), Smi::New(0))); |
__ Comment("Edge counter"); |
- __ LoadUniqueObject(R0, counter); |
- __ LoadFieldFromOffset(TMP, R0, Array::element_offset(0)); |
+ __ LoadObject(R0, edge_counters_array_); |
+ __ LoadFieldFromOffset(TMP, R0, Array::element_offset(edge_id)); |
__ add(TMP, TMP, Operand(Smi::RawValue(1))); |
- __ StoreFieldToOffset(TMP, R0, Array::element_offset(0)); |
+ __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id)); |
} |