Index: runtime/vm/flow_graph_compiler_ia32.cc |
diff --git a/runtime/vm/flow_graph_compiler_ia32.cc b/runtime/vm/flow_graph_compiler_ia32.cc |
index c6094fe3d7294076be554901359e762e7f115d61..622a9a867d37d4767e30749938fa50356f063d2f 100644 |
--- a/runtime/vm/flow_graph_compiler_ia32.cc |
+++ b/runtime/vm/flow_graph_compiler_ia32.cc |
@@ -33,6 +33,18 @@ DECLARE_FLAG(bool, enable_simd_inline); |
DECLARE_FLAG(bool, use_megamorphic_stub); |
+void MegamorphicSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) { |
+ Assembler* assem = compiler->assembler(); |
+#define __ assem-> |
+ __ Bind(entry_label()); |
+ __ Comment("MegamorphicSlowPath"); |
+ compiler->EmitMegamorphicInstanceCall(ic_data_, argument_count_, deopt_id_, |
+ token_pos_, locs_, try_index_); |
+ __ jmp(exit_label()); |
+#undef __ |
+} |
+ |
+ |
FlowGraphCompiler::~FlowGraphCompiler() { |
// BlockInfos are zone-allocated, so their destructors are not called. |
// Verify the labels explicitly here. |
@@ -1278,7 +1290,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
intptr_t argument_count, |
intptr_t deopt_id, |
intptr_t token_pos, |
- LocationSummary* locs) { |
+ LocationSummary* locs, |
+ intptr_t try_index) { |
const String& name = String::Handle(zone(), ic_data.target_name()); |
const Array& arguments_descriptor = |
Array::ZoneHandle(zone(), ic_data.arguments_descriptor()); |
@@ -1300,7 +1313,18 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
Thread::kNoDeoptId, token_pos); |
RecordSafepoint(locs); |
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
- if (is_optimizing()) { |
+ if (Compiler::always_optimize()) { |
Florian Schneider
2015/12/14 16:07:03
This seems never true on ia32 and won't be tested.
srdjan
2015/12/15 17:52:03
Done.
|
+ // Megamorphic calls may occur in slow path stubs. |
+ // If valid use try_index argument. |
+ if (try_index == CatchClauseNode::kInvalidTryIndex) { |
+ try_index = CurrentTryIndex(); |
+ } |
+ pc_descriptors_list()->AddDescriptor(RawPcDescriptors::kOther, |
+ assembler()->CodeSize(), |
+ Thread::kNoDeoptId, |
+ token_pos, |
+ try_index); |
+ } else if (is_optimizing()) { |
AddDeoptIndexAtCall(deopt_id_after, token_pos); |
} else { |
// Add deoptimization continuation point after the call and before the |