| Index: runtime/vm/flow_graph_compiler_arm64.cc
 | 
| diff --git a/runtime/vm/flow_graph_compiler_arm64.cc b/runtime/vm/flow_graph_compiler_arm64.cc
 | 
| index 57c9fd643a9d88c5807ad93135ef027c26029bc7..01d9561ac15be0ae7fe56e3cbf83c034e4dd61db 100644
 | 
| --- a/runtime/vm/flow_graph_compiler_arm64.cc
 | 
| +++ b/runtime/vm/flow_graph_compiler_arm64.cc
 | 
| @@ -1178,20 +1178,19 @@ void FlowGraphCompiler::GenerateRuntimeCall(intptr_t token_pos,
 | 
|  }
 | 
|  
 | 
|  
 | 
| -void FlowGraphCompiler::EmitEdgeCounter() {
 | 
| +void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
 | 
|    // We do not check for overflow when incrementing the edge counter.  The
 | 
|    // function should normally be optimized long before the counter can
 | 
|    // overflow; and though we do not reset the counters when we optimize or
 | 
|    // deoptimize, there is a bound on the number of
 | 
|    // optimization/deoptimization cycles we will attempt.
 | 
| +  ASSERT(!edge_counters_array_.IsNull());
 | 
|    ASSERT(assembler_->constant_pool_allowed());
 | 
| -  const Array& counter = Array::ZoneHandle(zone(), Array::New(1, Heap::kOld));
 | 
| -  counter.SetAt(0, Smi::Handle(zone(), Smi::New(0)));
 | 
|    __ Comment("Edge counter");
 | 
| -  __ LoadUniqueObject(R0, counter);
 | 
| -  __ LoadFieldFromOffset(TMP, R0, Array::element_offset(0));
 | 
| +  __ LoadObject(R0, edge_counters_array_);
 | 
| +  __ LoadFieldFromOffset(TMP, R0, Array::element_offset(edge_id));
 | 
|    __ add(TMP, TMP, Operand(Smi::RawValue(1)));
 | 
| -  __ StoreFieldToOffset(TMP, R0, Array::element_offset(0));
 | 
| +  __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id));
 | 
|  }
 | 
|  
 | 
|  
 | 
| 
 |