| Index: runtime/vm/flow_graph_compiler_mips.cc
|
| diff --git a/runtime/vm/flow_graph_compiler_mips.cc b/runtime/vm/flow_graph_compiler_mips.cc
|
| index b8b749b4b26803ce58425fe1e0423e63aa531c4f..481590c5d073e62a3c79bc85bb8fe62935446bae 100644
|
| --- a/runtime/vm/flow_graph_compiler_mips.cc
|
| +++ b/runtime/vm/flow_graph_compiler_mips.cc
|
| @@ -1257,28 +1257,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
| __ Comment("MegamorphicCall");
|
| // Load receiver into T0,
|
| __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
|
| - Label done;
|
| - if (ShouldInlineSmiStringHashCode(ic_data)) {
|
| - Label megamorphic_call;
|
| - __ Comment("Inlined get:hashCode for Smi and OneByteString");
|
| - __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
|
| - __ beq(CMPRES1, ZR, &done); // Is Smi.
|
| - __ delay_slot()->mov(V0, T0); // Move Smi hashcode to V0.
|
| -
|
| - __ LoadClassId(CMPRES1, T0); // Class ID check.
|
| - __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &megamorphic_call);
|
| -
|
| - __ lw(V0, FieldAddress(T0, String::hash_offset()));
|
| - __ bne(V0, ZR, &done);
|
| -
|
| - __ Bind(&megamorphic_call);
|
| - __ Comment("Slow case: megamorphic call");
|
| - }
|
| __ LoadObject(S5, cache);
|
| __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
|
| __ jalr(T9);
|
|
|
| - __ Bind(&done);
|
| RecordSafepoint(locs, slow_path_argument_count);
|
| const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
|
| if (FLAG_precompiled_mode) {
|
| @@ -1547,7 +1529,8 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
|
| intptr_t deopt_id,
|
| TokenPosition token_index,
|
| LocationSummary* locs,
|
| - bool complete) {
|
| + bool complete,
|
| + intptr_t total_ic_calls) {
|
| ASSERT(is_optimizing());
|
| __ Comment("EmitTestAndCall");
|
| const Array& arguments_descriptor = Array::ZoneHandle(
|
| @@ -1593,29 +1576,49 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
|
|
|
| __ Bind(&after_smi_test);
|
|
|
| - GrowableArray<CidTarget> sorted(num_checks);
|
| + ASSERT(!ic_data.IsNull() && (num_checks > 0));
|
| + GrowableArray<CidRangeTarget> sorted(num_checks);
|
| SortICDataByCount(ic_data, &sorted, /* drop_smi = */ true);
|
|
|
| - // Value is not Smi,
|
| - const intptr_t kSortedLen = sorted.length();
|
| - // If kSortedLen is 0 then only a Smi check was needed; the Smi check above
|
| + const intptr_t sorted_len = sorted.length();
|
| + // If sorted_len is 0 then only a Smi check was needed; the Smi check above
|
| // will fail if there was only one check and receiver is not Smi.
|
| - if (kSortedLen == 0) return;
|
| + if (sorted_len == 0) return;
|
|
|
| + // Value is not Smi,
|
| __ LoadClassId(T2, T0);
|
| - for (intptr_t i = 0; i < kSortedLen; i++) {
|
| - const bool kIsLastCheck = (i == (kSortedLen - 1));
|
| - ASSERT(sorted[i].cid != kSmiCid);
|
| +
|
| + bool add_megamorphic_call = false;
|
| + int bias = 0;
|
| +
|
| + for (intptr_t i = 0; i < sorted_len; i++) {
|
| + const bool is_last_check = (i == (sorted_len - 1));
|
| + int cid_start = sorted[i].cid_start;
|
| + int cid_end = sorted[i].cid_end;
|
| + int count = sorted[i].count;
|
| + if (!is_last_check && !complete && count < (total_ic_calls >> 5)) {
|
| + // This case is hit too rarely to be worth writing class-id checks inline
|
| + // for.
|
| + add_megamorphic_call = true;
|
| + break;
|
| + }
|
| + ASSERT(cid_start > kSmiCid || cid_end < kSmiCid);
|
| Label next_test;
|
| - if (!complete) {
|
| - if (kIsLastCheck) {
|
| - __ BranchNotEqual(T2, Immediate(sorted[i].cid), failed);
|
| + Condition no_match;
|
| + if (!complete || !is_last_check) {
|
| + Label* next_label = is_last_check ? failed : &next_test;
|
| + if (cid_start == cid_end) {
|
| + __ BranchNotEqual(T2, Immediate(cid_start - bias), next_label);
|
| } else {
|
| - __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test);
|
| - }
|
| - } else {
|
| - if (!kIsLastCheck) {
|
| - __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test);
|
| + __ AddImmediate(T2, T2, bias - cid_start);
|
| + bias = cid_start;
|
| + // TODO(erikcorry): We should use sltiu instead of the temporary TMP if
|
| + // the range is small enough.
|
| + __ LoadImmediate(TMP, cid_end - cid_end);
|
| + // Reverse comparison so we get 1 if biased cid > tmp ie cid is out of
|
| + // range.
|
| + __ sltu(TMP, TMP, T2);
|
| + __ bne(TMP, ZR, next_label);
|
| }
|
| }
|
| // Do not use the code from the function, but let the code be patched so
|
| @@ -1625,11 +1628,16 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
|
| *StubCode::CallStaticFunction_entry(),
|
| RawPcDescriptors::kOther, locs, function);
|
| __ Drop(argument_count);
|
| - if (!kIsLastCheck) {
|
| + if (!is_last_check) {
|
| __ b(match_found);
|
| }
|
| __ Bind(&next_test);
|
| }
|
| + if (add_megamorphic_call) {
|
| + int try_index = CatchClauseNode::kInvalidTryIndex;
|
| + EmitMegamorphicInstanceCall(ic_data, argument_count, deopt_id, token_index,
|
| + locs, try_index, argument_count);
|
| + }
|
| }
|
|
|
|
|
|
|