Index: runtime/vm/flow_graph_compiler.cc |
diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc |
index b983754ae6edd309d28cb7db7e6bf2191e313d72..084579a1f813d44cf386a7d8780eed9c0f27d3c4 100644 |
--- a/runtime/vm/flow_graph_compiler.cc |
+++ b/runtime/vm/flow_graph_compiler.cc |
@@ -30,19 +30,33 @@ |
namespace dart { |
-DEFINE_FLAG(bool, enable_simd_inline, true, |
- "Enable inlining of SIMD related method calls."); |
-DEFINE_FLAG(bool, inline_smi_string_hashcode, true, |
+DEFINE_FLAG(bool, |
+ enable_simd_inline, |
+ true, |
+ "Enable inlining of SIMD related method calls."); |
+DEFINE_FLAG( |
+ bool, |
+ inline_smi_string_hashcode, |
+ true, |
"Inline hashcode for Smi and one-byte strings in case of megamorphic call"); |
-DEFINE_FLAG(int, inline_smi_string_hashcode_ratio, 50, |
+DEFINE_FLAG( |
+ int, |
+ inline_smi_string_hashcode_ratio, |
+ 50, |
"Minimal hotness (0..100) of one-byte-string before inlining its hashcode"); |
-DEFINE_FLAG(int, min_optimization_counter_threshold, 5000, |
- "The minimum invocation count for a function."); |
-DEFINE_FLAG(int, optimization_counter_scale, 2000, |
- "The scale of invocation count, by size of the function."); |
+DEFINE_FLAG(int, |
+ min_optimization_counter_threshold, |
+ 5000, |
+ "The minimum invocation count for a function."); |
+DEFINE_FLAG(int, |
+ optimization_counter_scale, |
+ 2000, |
+ "The scale of invocation count, by size of the function."); |
DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment."); |
-DEFINE_FLAG(bool, trace_inlining_intervals, false, |
- "Inlining interval diagnostics"); |
+DEFINE_FLAG(bool, |
+ trace_inlining_intervals, |
+ false, |
+ "Inlining interval diagnostics"); |
DECLARE_FLAG(bool, code_comments); |
DECLARE_FLAG(charp, deoptimize_filter); |
@@ -164,7 +178,7 @@ void CompilerDeoptInfo::EmitMaterializations(Environment* env, |
// TODO(srdjan): Add check and code if Smi class is hot. |
bool FlowGraphCompiler::ShouldInlineSmiStringHashCode(const ICData& ic_data) { |
if (!FLAG_inline_smi_string_hashcode || |
- (ic_data.target_name() != Symbols::hashCode().raw())) { |
+ (ic_data.target_name() != Symbols::hashCode().raw())) { |
return false; |
} |
// Precompiled code has no ICData, optimistically inline it. |
@@ -192,57 +206,55 @@ FlowGraphCompiler::FlowGraphCompiler( |
const GrowableArray<const Function*>& inline_id_to_function, |
const GrowableArray<TokenPosition>& inline_id_to_token_pos, |
const GrowableArray<intptr_t>& caller_inline_id) |
- : thread_(Thread::Current()), |
- zone_(Thread::Current()->zone()), |
- assembler_(assembler), |
- parsed_function_(parsed_function), |
- flow_graph_(*flow_graph), |
- block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), |
- current_block_(NULL), |
- exception_handlers_list_(NULL), |
- pc_descriptors_list_(NULL), |
- stackmap_table_builder_(NULL), |
- code_source_map_builder_(NULL), |
- saved_code_size_(0), |
- block_info_(block_order_.length()), |
- deopt_infos_(), |
- static_calls_target_table_(), |
- is_optimizing_(is_optimizing), |
- may_reoptimize_(false), |
- intrinsic_mode_(false), |
- double_class_(Class::ZoneHandle( |
- isolate()->object_store()->double_class())), |
- mint_class_(Class::ZoneHandle( |
- isolate()->object_store()->mint_class())), |
- float32x4_class_(Class::ZoneHandle( |
- isolate()->object_store()->float32x4_class())), |
- float64x2_class_(Class::ZoneHandle( |
- isolate()->object_store()->float64x2_class())), |
- int32x4_class_(Class::ZoneHandle( |
- isolate()->object_store()->int32x4_class())), |
- list_class_(Class::ZoneHandle( |
- Library::Handle(Library::CoreLibrary()). |
- LookupClass(Symbols::List()))), |
- parallel_move_resolver_(this), |
- pending_deoptimization_env_(NULL), |
- deopt_id_to_ic_data_(NULL), |
- edge_counters_array_(Array::ZoneHandle()), |
- inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())), |
- inline_id_to_function_(inline_id_to_function), |
- inline_id_to_token_pos_(inline_id_to_token_pos), |
- caller_inline_id_(caller_inline_id) { |
+ : thread_(Thread::Current()), |
+ zone_(Thread::Current()->zone()), |
+ assembler_(assembler), |
+ parsed_function_(parsed_function), |
+ flow_graph_(*flow_graph), |
+ block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), |
+ current_block_(NULL), |
+ exception_handlers_list_(NULL), |
+ pc_descriptors_list_(NULL), |
+ stackmap_table_builder_(NULL), |
+ code_source_map_builder_(NULL), |
+ saved_code_size_(0), |
+ block_info_(block_order_.length()), |
+ deopt_infos_(), |
+ static_calls_target_table_(), |
+ is_optimizing_(is_optimizing), |
+ may_reoptimize_(false), |
+ intrinsic_mode_(false), |
+ double_class_( |
+ Class::ZoneHandle(isolate()->object_store()->double_class())), |
+ mint_class_(Class::ZoneHandle(isolate()->object_store()->mint_class())), |
+ float32x4_class_( |
+ Class::ZoneHandle(isolate()->object_store()->float32x4_class())), |
+ float64x2_class_( |
+ Class::ZoneHandle(isolate()->object_store()->float64x2_class())), |
+ int32x4_class_( |
+ Class::ZoneHandle(isolate()->object_store()->int32x4_class())), |
+ list_class_(Class::ZoneHandle(Library::Handle(Library::CoreLibrary()) |
+ .LookupClass(Symbols::List()))), |
+ parallel_move_resolver_(this), |
+ pending_deoptimization_env_(NULL), |
+ deopt_id_to_ic_data_(NULL), |
+ edge_counters_array_(Array::ZoneHandle()), |
+ inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())), |
+ inline_id_to_function_(inline_id_to_function), |
+ inline_id_to_token_pos_(inline_id_to_token_pos), |
+ caller_inline_id_(caller_inline_id) { |
ASSERT(flow_graph->parsed_function().function().raw() == |
parsed_function.function().raw()); |
if (!is_optimizing) { |
const intptr_t len = thread()->deopt_id(); |
- deopt_id_to_ic_data_ = new(zone()) ZoneGrowableArray<const ICData*>(len); |
+ deopt_id_to_ic_data_ = new (zone()) ZoneGrowableArray<const ICData*>(len); |
deopt_id_to_ic_data_->SetLength(len); |
for (intptr_t i = 0; i < len; i++) { |
(*deopt_id_to_ic_data_)[i] = NULL; |
} |
// TODO(fschneider): Abstract iteration into ICDataArrayIterator. |
- const Array& old_saved_ic_data = Array::Handle(zone(), |
- flow_graph->function().ic_data_array()); |
+ const Array& old_saved_ic_data = |
+ Array::Handle(zone(), flow_graph->function().ic_data_array()); |
const intptr_t saved_len = |
old_saved_ic_data.IsNull() ? 0 : old_saved_ic_data.Length(); |
for (intptr_t i = 1; i < saved_len; i++) { |
@@ -257,16 +269,12 @@ FlowGraphCompiler::FlowGraphCompiler( |
bool FlowGraphCompiler::IsUnboxedField(const Field& field) { |
- bool valid_class = (SupportsUnboxedDoubles() && |
- (field.guarded_cid() == kDoubleCid)) || |
- (SupportsUnboxedSimd128() && |
- (field.guarded_cid() == kFloat32x4Cid)) || |
- (SupportsUnboxedSimd128() && |
- (field.guarded_cid() == kFloat64x2Cid)); |
- return field.is_unboxing_candidate() |
- && !field.is_final() |
- && !field.is_nullable() |
- && valid_class; |
+ bool valid_class = |
+ (SupportsUnboxedDoubles() && (field.guarded_cid() == kDoubleCid)) || |
+ (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat32x4Cid)) || |
+ (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat64x2Cid)); |
+ return field.is_unboxing_candidate() && !field.is_final() && |
+ !field.is_nullable() && valid_class; |
} |
@@ -278,8 +286,8 @@ bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) { |
void FlowGraphCompiler::InitCompiler() { |
- pc_descriptors_list_ = new(zone()) DescriptorList(64); |
- exception_handlers_list_ = new(zone()) ExceptionHandlerList(); |
+ pc_descriptors_list_ = new (zone()) DescriptorList(64); |
+ exception_handlers_list_ = new (zone()) ExceptionHandlerList(); |
block_info_.Clear(); |
// Conservative detection of leaf routines used to remove the stack check |
// on function entry. |
@@ -288,7 +296,7 @@ void FlowGraphCompiler::InitCompiler() { |
// indicating a non-leaf routine and calls without IC data indicating |
// possible reoptimization. |
for (int i = 0; i < block_order_.length(); ++i) { |
- block_info_.Add(new(zone()) BlockInfo()); |
+ block_info_.Add(new (zone()) BlockInfo()); |
if (is_optimizing() && !flow_graph().IsCompiledForOsr()) { |
BlockEntryInstr* entry = block_order_[i]; |
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
@@ -304,8 +312,7 @@ void FlowGraphCompiler::InitCompiler() { |
if ((ic_data != NULL) && (ic_data->NumberOfUsedChecks() == 0)) { |
may_reoptimize_ = true; |
} |
- if (is_leaf && |
- !current->IsCheckStackOverflow() && |
+ if (is_leaf && !current->IsCheckStackOverflow() && |
!current->IsParallelMove()) { |
// Note that we do not care if the code contains instructions that |
// can deoptimize. |
@@ -352,8 +359,7 @@ bool FlowGraphCompiler::CanOSRFunction() const { |
bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const { |
- if ((FLAG_stacktrace_every > 0) || |
- (FLAG_deoptimize_every > 0) || |
+ if ((FLAG_stacktrace_every > 0) || (FLAG_deoptimize_every > 0) || |
(isolate()->reload_every_n_stack_overflow_checks() > 0)) { |
return true; |
} |
@@ -362,8 +368,7 @@ bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const { |
FLAG_stacktrace_filter) != NULL) { |
return true; |
} |
- if (is_optimizing() && |
- FLAG_deoptimize_filter != NULL && |
+ if (is_optimizing() && FLAG_deoptimize_filter != NULL && |
strstr(parsed_function().function().ToFullyQualifiedCString(), |
FLAG_deoptimize_filter) != NULL) { |
return true; |
@@ -373,8 +378,7 @@ bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const { |
static bool IsEmptyBlock(BlockEntryInstr* block) { |
- return !block->IsCatchBlockEntry() && |
- !block->HasNonRedundantParallelMove() && |
+ return !block->IsCatchBlockEntry() && !block->HasNonRedundantParallelMove() && |
block->next()->IsGoto() && |
!block->next()->AsGoto()->HasNonRedundantParallelMove() && |
!block->IsIndirectEntry(); |
@@ -435,12 +439,11 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) { |
// Instructions that can be deoptimization targets need to record kDeopt |
// PcDescriptor corresponding to their deopt id. GotoInstr records its |
// own so that it can control the placement. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- instr->deopt_id(), |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, instr->deopt_id(), |
instr->token_pos()); |
} |
AllocateRegistersLocally(instr); |
- } else if (instr->MayThrow() && |
+ } else if (instr->MayThrow() && |
(CurrentTryIndex() != CatchClauseNode::kInvalidTryIndex)) { |
// Optimized try-block: Sync locals to fixed stack locations. |
EmitTrySync(instr, CurrentTryIndex()); |
@@ -448,7 +451,6 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) { |
} |
- |
void FlowGraphCompiler::EmitSourceLine(Instruction* instr) { |
if (!instr->token_pos().IsReal() || (instr->env() == NULL)) { |
return; |
@@ -459,10 +461,9 @@ void FlowGraphCompiler::EmitSourceLine(Instruction* instr) { |
intptr_t column_nr; |
script.GetTokenLocation(instr->token_pos(), &line_nr, &column_nr); |
const String& line = String::Handle(zone(), script.GetLine(line_nr)); |
- assembler()->Comment("Line %" Pd " in '%s':\n %s", |
- line_nr, |
- instr->env()->function().ToFullyQualifiedCString(), |
- line.ToCString()); |
+ assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr, |
+ instr->env()->function().ToFullyQualifiedCString(), |
+ line.ToCString()); |
} |
@@ -473,10 +474,9 @@ static void LoopInfoComment( |
if (Assembler::EmittingComments()) { |
for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { |
for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); |
- !loop_it.Done(); |
- loop_it.Advance()) { |
+ !loop_it.Done(); loop_it.Advance()) { |
if (loop_it.Current() == block.preorder_number()) { |
- assembler->Comment(" Loop %" Pd "", loop_id); |
+ assembler->Comment(" Loop %" Pd "", loop_id); |
} |
} |
} |
@@ -494,8 +494,8 @@ struct IntervalStruct { |
IntervalStruct(intptr_t s, TokenPosition tp, intptr_t id) |
: start(s), pos(tp), inlining_id(id) {} |
void Dump() { |
- THR_Print("start: 0x%" Px " iid: %" Pd " pos: %s", |
- start, inlining_id, pos.ToCString()); |
+ THR_Print("start: 0x%" Px " iid: %" Pd " pos: %s", start, inlining_id, |
+ pos.ToCString()); |
} |
}; |
@@ -562,8 +562,8 @@ void FlowGraphCompiler::VisitBlocks() { |
} |
} |
} |
- if (FLAG_code_comments || |
- FLAG_disassemble || FLAG_disassemble_optimized) { |
+ if (FLAG_code_comments || FLAG_disassemble || |
+ FLAG_disassemble_optimized) { |
if (FLAG_source_lines) { |
EmitSourceLine(instr); |
} |
@@ -609,8 +609,8 @@ void FlowGraphCompiler::VisitBlocks() { |
*inline_id_to_function_.At(intervals[i].inlining_id); |
intervals[i].Dump(); |
THR_Print(" parent iid %" Pd " %s\n", |
- caller_inline_id_[intervals[i].inlining_id], |
- function.ToQualifiedCString()); |
+ caller_inline_id_[intervals[i].inlining_id], |
+ function.ToQualifiedCString()); |
} |
const intptr_t id = intervals[i].inlining_id; |
@@ -628,8 +628,8 @@ void FlowGraphCompiler::VisitBlocks() { |
LogBlock lb; |
THR_Print("Intervals:\n"); |
for (intptr_t cc = 0; cc < caller_inline_id_.length(); cc++) { |
- THR_Print(" iid: %" Pd " caller iid: %" Pd "\n", |
- cc, caller_inline_id_[cc]); |
+ THR_Print(" iid: %" Pd " caller iid: %" Pd "\n", cc, |
+ caller_inline_id_[cc]); |
} |
Smi& temp = Smi::Handle(); |
for (intptr_t i = 0; i < inlined_code_intervals_.Length(); |
@@ -664,7 +664,7 @@ void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) { |
// Parameters first. |
intptr_t i = 0; |
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params(); |
- ParallelMoveInstr* move_instr = new(zone()) ParallelMoveInstr(); |
+ ParallelMoveInstr* move_instr = new (zone()) ParallelMoveInstr(); |
for (; i < num_non_copied_params; ++i) { |
// Don't sync captured parameters. They are not in the environment. |
if (flow_graph().captured_parameters()->Contains(i)) continue; |
@@ -717,20 +717,18 @@ intptr_t FlowGraphCompiler::StackSize() const { |
return flow_graph_.graph_entry()->spill_slot_count(); |
} else { |
return parsed_function_.num_stack_locals() + |
- parsed_function_.num_copied_params(); |
+ parsed_function_.num_copied_params(); |
} |
} |
-Label* FlowGraphCompiler::GetJumpLabel( |
- BlockEntryInstr* block_entry) const { |
+Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const { |
const intptr_t block_index = block_entry->postorder_number(); |
return block_info_[block_index]->jump_label(); |
} |
-bool FlowGraphCompiler::WasCompacted( |
- BlockEntryInstr* block_entry) const { |
+bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const { |
const intptr_t block_index = block_entry->postorder_number(); |
return block_info_[block_index]->WasCompacted(); |
} |
@@ -751,7 +749,7 @@ BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const { |
Label* true_label = GetJumpLabel(branch->true_successor()); |
Label* false_label = GetJumpLabel(branch->false_successor()); |
Label* fall_through = NextNonEmptyLabel(); |
- BranchLabels result = { true_label, false_label, fall_through }; |
+ BranchLabels result = {true_label, false_label, fall_through}; |
return result; |
} |
@@ -780,11 +778,8 @@ void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, |
intptr_t pc_offset, |
const Array& handler_types, |
bool needs_stacktrace) { |
- exception_handlers_list_->AddHandler(try_index, |
- outer_try_index, |
- pc_offset, |
- handler_types, |
- needs_stacktrace); |
+ exception_handlers_list_->AddHandler(try_index, outer_try_index, pc_offset, |
+ handler_types, needs_stacktrace); |
} |
@@ -799,25 +794,22 @@ void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind, |
TokenPosition token_pos) { |
// When running with optimizations disabled, don't emit deopt-descriptors. |
if (!CanOptimize() && (kind == RawPcDescriptors::kDeopt)) return; |
- pc_descriptors_list()->AddDescriptor(kind, |
- assembler()->CodeSize(), |
- deopt_id, |
- token_pos, |
- CurrentTryIndex()); |
+ pc_descriptors_list()->AddDescriptor(kind, assembler()->CodeSize(), deopt_id, |
+ token_pos, CurrentTryIndex()); |
} |
void FlowGraphCompiler::AddStaticCallTarget(const Function& func) { |
ASSERT(func.IsZoneHandle()); |
static_calls_target_table_.Add( |
- new(zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL)); |
+ new (zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL)); |
} |
void FlowGraphCompiler::AddStubCallTarget(const Code& code) { |
ASSERT(code.IsZoneHandle()); |
static_calls_target_table_.Add( |
- new(zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code)); |
+ new (zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code)); |
} |
@@ -825,10 +817,9 @@ void FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) { |
ASSERT(is_optimizing()); |
ASSERT(!intrinsic_mode()); |
CompilerDeoptInfo* info = |
- new(zone()) CompilerDeoptInfo(deopt_id, |
- ICData::kDeoptAtCall, |
- 0, // No flags. |
- pending_deoptimization_env_); |
+ new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall, |
+ 0, // No flags. |
+ pending_deoptimization_env_); |
info->set_pc_offset(assembler()->CodeSize()); |
deopt_infos_.Add(info); |
} |
@@ -841,28 +832,28 @@ void FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) { |
void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs, |
intptr_t slow_path_argument_count) { |
if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) { |
- const intptr_t spill_area_size = is_optimizing() ? |
- flow_graph_.graph_entry()->spill_slot_count() : 0; |
+ const intptr_t spill_area_size = |
+ is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0; |
RegisterSet* registers = locs->live_registers(); |
ASSERT(registers != NULL); |
- const intptr_t kFpuRegisterSpillFactor = |
- kFpuRegisterSize / kWordSize; |
- const intptr_t live_registers_size = registers->CpuRegisterCount() + |
+ const intptr_t kFpuRegisterSpillFactor = kFpuRegisterSize / kWordSize; |
+ const intptr_t live_registers_size = |
+ registers->CpuRegisterCount() + |
(registers->FpuRegisterCount() * kFpuRegisterSpillFactor); |
BitmapBuilder* bitmap = locs->stack_bitmap(); |
- // An instruction may have two safepoints in deferred code. The |
- // call to RecordSafepoint has the side-effect of appending the live |
- // registers to the bitmap. This is why the second call to RecordSafepoint |
- // with the same instruction (and same location summary) sees a bitmap that |
- // is larger that StackSize(). It will never be larger than StackSize() + |
- // live_registers_size. |
- // The first safepoint will grow the bitmap to be the size of |
- // spill_area_size but the second safepoint will truncate the bitmap and |
- // append the live registers to it again. The bitmap produced by both calls |
- // will be the same. |
+// An instruction may have two safepoints in deferred code. The |
+// call to RecordSafepoint has the side-effect of appending the live |
+// registers to the bitmap. This is why the second call to RecordSafepoint |
+// with the same instruction (and same location summary) sees a bitmap that |
+// is larger that StackSize(). It will never be larger than StackSize() + |
+// live_registers_size. |
+// The first safepoint will grow the bitmap to be the size of |
+// spill_area_size but the second safepoint will truncate the bitmap and |
+// append the live registers to it again. The bitmap produced by both calls |
+// will be the same. |
#if !defined(TARGET_ARCH_DBC) |
ASSERT(bitmap->Length() <= (spill_area_size + live_registers_size)); |
bitmap->SetLength(spill_area_size); |
@@ -913,8 +904,7 @@ void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs, |
// The slow path area Outside the spill area contains are live registers |
// and pushed arguments for calls inside the slow path. |
intptr_t slow_path_bit_count = bitmap->Length() - spill_area_size; |
- stackmap_table_builder()->AddEntry(assembler()->CodeSize(), |
- bitmap, |
+ stackmap_table_builder()->AddEntry(assembler()->CodeSize(), bitmap, |
slow_path_bit_count); |
} |
} |
@@ -969,8 +959,8 @@ Environment* FlowGraphCompiler::SlowPathEnvironmentFor( |
for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) { |
Location loc = it.CurrentLocation(); |
Value* value = it.CurrentValue(); |
- it.SetCurrentLocation(loc.RemapForSlowPath( |
- value->definition(), cpu_reg_slots, fpu_reg_slots)); |
+ it.SetCurrentLocation(loc.RemapForSlowPath(value->definition(), |
+ cpu_reg_slots, fpu_reg_slots)); |
} |
return env; |
@@ -997,11 +987,8 @@ Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, |
} |
ASSERT(is_optimizing_); |
- CompilerDeoptInfoWithStub* stub = |
- new(zone()) CompilerDeoptInfoWithStub(deopt_id, |
- reason, |
- flags, |
- pending_deoptimization_env_); |
+ CompilerDeoptInfoWithStub* stub = new (zone()) CompilerDeoptInfoWithStub( |
+ deopt_id, reason, flags, pending_deoptimization_env_); |
deopt_infos_.Add(stub); |
return stub->entry_label(); |
} |
@@ -1015,15 +1002,11 @@ void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id, |
ASSERT(!intrinsic_mode()); |
// The pending deoptimization environment may be changed after this deopt is |
// emitted, so we need to make a copy. |
- Environment* env_copy = |
- pending_deoptimization_env_->DeepCopy(zone()); |
+ Environment* env_copy = pending_deoptimization_env_->DeepCopy(zone()); |
CompilerDeoptInfo* info = |
- new(zone()) CompilerDeoptInfo(deopt_id, |
- reason, |
- flags, |
- env_copy); |
+ new (zone()) CompilerDeoptInfo(deopt_id, reason, flags, env_copy); |
deopt_infos_.Add(info); |
- assembler()->Deopt(0, /*is_eager =*/ 1); |
+ assembler()->Deopt(0, /*is_eager =*/1); |
info->set_pc_offset(assembler()->CodeSize()); |
} |
#endif // defined(TARGET_ARCH_DBC) |
@@ -1037,7 +1020,7 @@ void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { |
if (FLAG_compiler_stats) { |
Thread* thread = Thread::Current(); |
INC_STAT(thread, total_code_size, |
- ExceptionHandlers::InstanceSize(handlers.num_entries())); |
+ ExceptionHandlers::InstanceSize(handlers.num_entries())); |
INC_STAT(thread, total_code_size, handlers.num_entries() * sizeof(uword)); |
} |
} |
@@ -1077,8 +1060,7 @@ RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) { |
offset = Smi::New(deopt_infos_[i]->pc_offset()); |
info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array); |
reason_and_flags = DeoptTable::EncodeReasonAndFlags( |
- deopt_infos_[i]->reason(), |
- deopt_infos_[i]->flags()); |
+ deopt_infos_[i]->reason(), deopt_infos_[i]->flags()); |
DeoptTable::SetEntry(array, i, offset, info, reason_and_flags); |
} |
return array.raw(); |
@@ -1126,9 +1108,10 @@ void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) { |
void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) { |
ASSERT(code.static_calls_target_table() == Array::null()); |
- const Array& targets = Array::Handle(zone(), Array::New( |
- (static_calls_target_table_.length() * Code::kSCallTableEntryLength), |
- Heap::kOld)); |
+ const Array& targets = |
+ Array::Handle(zone(), Array::New((static_calls_target_table_.length() * |
+ Code::kSCallTableEntryLength), |
+ Heap::kOld)); |
Smi& smi_offset = Smi::Handle(zone()); |
for (intptr_t i = 0; i < static_calls_target_table_.length(); i++) { |
const intptr_t target_ix = Code::kSCallTableEntryLength * i; |
@@ -1136,16 +1119,15 @@ void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) { |
targets.SetAt(target_ix + Code::kSCallTableOffsetEntry, smi_offset); |
if (static_calls_target_table_[i]->function != NULL) { |
targets.SetAt(target_ix + Code::kSCallTableFunctionEntry, |
- *static_calls_target_table_[i]->function); |
+ *static_calls_target_table_[i]->function); |
} |
if (static_calls_target_table_[i]->code != NULL) { |
targets.SetAt(target_ix + Code::kSCallTableCodeEntry, |
- *static_calls_target_table_[i]->code); |
+ *static_calls_target_table_[i]->code); |
} |
} |
code.set_static_calls_target_table(targets); |
- INC_STAT(Thread::Current(), |
- total_code_size, |
+ INC_STAT(Thread::Current(), total_code_size, |
targets.Length() * sizeof(uword)); |
} |
@@ -1208,17 +1190,16 @@ bool FlowGraphCompiler::TryIntrinsify() { |
// DBC is very different from other architectures in how it performs instance |
// and static calls because it does not use stubs. |
#if !defined(TARGET_ARCH_DBC) |
-void FlowGraphCompiler::GenerateInstanceCall( |
- intptr_t deopt_id, |
- TokenPosition token_pos, |
- intptr_t argument_count, |
- LocationSummary* locs, |
- const ICData& ic_data_in) { |
+void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id, |
+ TokenPosition token_pos, |
+ intptr_t argument_count, |
+ LocationSummary* locs, |
+ const ICData& ic_data_in) { |
ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); |
if (FLAG_precompiled_mode) { |
ic_data = ic_data.AsUnaryClassChecks(); |
- EmitSwitchableInstanceCall(ic_data, argument_count, |
- deopt_id, token_pos, locs); |
+ EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos, |
+ locs); |
return; |
} |
ASSERT(!ic_data.IsNull()); |
@@ -1244,22 +1225,19 @@ void FlowGraphCompiler::GenerateInstanceCall( |
} |
if (is_optimizing()) { |
- EmitMegamorphicInstanceCall(ic_data_in, argument_count, |
- deopt_id, token_pos, locs, |
- CatchClauseNode::kInvalidTryIndex); |
+ EmitMegamorphicInstanceCall(ic_data_in, argument_count, deopt_id, token_pos, |
+ locs, CatchClauseNode::kInvalidTryIndex); |
return; |
} |
switch (ic_data.NumArgsTested()) { |
case 1: |
- EmitInstanceCall( |
- *StubCode::OneArgCheckInlineCache_entry(), ic_data, argument_count, |
- deopt_id, token_pos, locs); |
+ EmitInstanceCall(*StubCode::OneArgCheckInlineCache_entry(), ic_data, |
+ argument_count, deopt_id, token_pos, locs); |
break; |
case 2: |
- EmitInstanceCall( |
- *StubCode::TwoArgsCheckInlineCache_entry(), ic_data, argument_count, |
- deopt_id, token_pos, locs); |
+ EmitInstanceCall(*StubCode::TwoArgsCheckInlineCache_entry(), ic_data, |
+ argument_count, deopt_id, token_pos, locs); |
break; |
default: |
UNIMPLEMENTED(); |
@@ -1275,21 +1253,21 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id, |
LocationSummary* locs, |
const ICData& ic_data_in) { |
const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); |
- const Array& arguments_descriptor = Array::ZoneHandle( |
- ic_data.IsNull() ? ArgumentsDescriptor::New(argument_count, |
- argument_names) |
- : ic_data.arguments_descriptor()); |
+ const Array& arguments_descriptor = |
+ Array::ZoneHandle(ic_data.IsNull() ? ArgumentsDescriptor::New( |
+ argument_count, argument_names) |
+ : ic_data.arguments_descriptor()); |
if (is_optimizing()) { |
- EmitOptimizedStaticCall(function, arguments_descriptor, |
- argument_count, deopt_id, token_pos, locs); |
+ EmitOptimizedStaticCall(function, arguments_descriptor, argument_count, |
+ deopt_id, token_pos, locs); |
} else { |
ICData& call_ic_data = ICData::ZoneHandle(ic_data.raw()); |
if (call_ic_data.IsNull()) { |
const intptr_t kNumArgsChecked = 0; |
- call_ic_data = GetOrAddStaticCallICData(deopt_id, |
- function, |
- arguments_descriptor, |
- kNumArgsChecked)->raw(); |
+ call_ic_data = |
+ GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor, |
+ kNumArgsChecked) |
+ ->raw(); |
} |
EmitUnoptimizedStaticCall(argument_count, deopt_id, token_pos, locs, |
call_ic_data); |
@@ -1361,9 +1339,9 @@ void FlowGraphCompiler::EmitComment(Instruction* instr) { |
bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) { |
// Only emit an edge counter if there is not goto at the end of the block, |
// except for the entry block. |
- return (FLAG_reorder_basic_blocks |
- && (!block->last_instruction()->IsGoto() |
- || (block == flow_graph().graph_entry()->normal_entry()))); |
+ return (FLAG_reorder_basic_blocks && |
+ (!block->last_instruction()->IsGoto() || |
+ (block == flow_graph().graph_entry()->normal_entry()))); |
} |
@@ -1383,11 +1361,10 @@ static Register AllocateFreeRegister(bool* blocked_registers) { |
void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) { |
ASSERT(!is_optimizing()); |
- instr->InitializeLocationSummary(zone(), |
- false); // Not optimizing. |
+ instr->InitializeLocationSummary(zone(), false); // Not optimizing. |
- // No need to allocate registers based on LocationSummary on DBC as in |
- // unoptimized mode it's a stack based bytecode just like IR itself. |
+// No need to allocate registers based on LocationSummary on DBC as in |
+// unoptimized mode it's a stack based bytecode just like IR itself. |
#if !defined(TARGET_ARCH_DBC) |
LocationSummary* locs = instr->locs(); |
@@ -1453,8 +1430,7 @@ void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) { |
Location loc = locs->temp(i); |
if (loc.IsUnallocated()) { |
ASSERT(loc.policy() == Location::kRequiresRegister); |
- loc = Location::RegisterLocation( |
- AllocateFreeRegister(blocked_registers)); |
+ loc = Location::RegisterLocation(AllocateFreeRegister(blocked_registers)); |
locs->set_temp(i, loc); |
} |
} |
@@ -1466,8 +1442,8 @@ void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) { |
case Location::kPrefersRegister: |
case Location::kRequiresRegister: |
case Location::kWritableRegister: |
- result_location = Location::RegisterLocation( |
- AllocateFreeRegister(blocked_registers)); |
+ result_location = |
+ Location::RegisterLocation(AllocateFreeRegister(blocked_registers)); |
break; |
case Location::kSameAsFirstInput: |
result_location = locs->in(0); |
@@ -1654,19 +1630,15 @@ intptr_t ParallelMoveResolver::AllocateScratchRegister( |
ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope( |
- ParallelMoveResolver* resolver, FpuRegister blocked) |
- : resolver_(resolver), |
- reg_(kNoFpuRegister), |
- spilled_(false) { |
+ ParallelMoveResolver* resolver, |
+ FpuRegister blocked) |
+ : resolver_(resolver), reg_(kNoFpuRegister), spilled_(false) { |
COMPILE_ASSERT(FpuTMP != kNoFpuRegister); |
- uword blocked_mask = ((blocked != kNoFpuRegister) ? 1 << blocked : 0) |
- | 1 << FpuTMP; |
- reg_ = static_cast<FpuRegister>( |
- resolver_->AllocateScratchRegister(Location::kFpuRegister, |
- blocked_mask, |
- 0, |
- kNumberOfFpuRegisters - 1, |
- &spilled_)); |
+ uword blocked_mask = |
+ ((blocked != kNoFpuRegister) ? 1 << blocked : 0) | 1 << FpuTMP; |
+ reg_ = static_cast<FpuRegister>(resolver_->AllocateScratchRegister( |
+ Location::kFpuRegister, blocked_mask, 0, kNumberOfFpuRegisters - 1, |
+ &spilled_)); |
if (spilled_) { |
resolver->SpillFpuScratch(reg_); |
@@ -1682,10 +1654,9 @@ ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() { |
ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope( |
- ParallelMoveResolver* resolver, Register blocked) |
- : resolver_(resolver), |
- reg_(kNoRegister), |
- spilled_(false) { |
+ ParallelMoveResolver* resolver, |
+ Register blocked) |
+ : resolver_(resolver), reg_(kNoRegister), spilled_(false) { |
uword blocked_mask = RegMaskBit(blocked) | kReservedCpuRegisters; |
if (resolver->compiler_->intrinsic_mode()) { |
// Block additional registers that must be preserved for intrinsics. |
@@ -1697,11 +1668,8 @@ ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope( |
#endif |
} |
reg_ = static_cast<Register>( |
- resolver_->AllocateScratchRegister(Location::kRegister, |
- blocked_mask, |
- 0, |
- kNumberOfCpuRegisters - 1, |
- &spilled_)); |
+ resolver_->AllocateScratchRegister(Location::kRegister, blocked_mask, 0, |
+ kNumberOfCpuRegisters - 1, &spilled_)); |
if (spilled_) { |
resolver->SpillScratch(reg_); |
@@ -1756,9 +1724,10 @@ const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData( |
ASSERT(!res->is_static_call()); |
return res; |
} |
- const ICData& ic_data = ICData::ZoneHandle(zone(), ICData::New( |
- parsed_function().function(), target_name, |
- arguments_descriptor, deopt_id, num_args_tested, false)); |
+ const ICData& ic_data = |
+ ICData::ZoneHandle(zone(), ICData::New(parsed_function().function(), |
+ target_name, arguments_descriptor, |
+ deopt_id, num_args_tested, false)); |
#if defined(TAG_IC_DATA) |
ic_data.set_tag(Instruction::kInstanceCall); |
#endif |
@@ -1784,9 +1753,11 @@ const ICData* FlowGraphCompiler::GetOrAddStaticCallICData( |
ASSERT(res->is_static_call()); |
return res; |
} |
- const ICData& ic_data = ICData::ZoneHandle(zone(), ICData::New( |
- parsed_function().function(), String::Handle(zone(), target.name()), |
- arguments_descriptor, deopt_id, num_args_tested, true)); |
+ const ICData& ic_data = ICData::ZoneHandle( |
+ zone(), |
+ ICData::New(parsed_function().function(), |
+ String::Handle(zone(), target.name()), arguments_descriptor, |
+ deopt_id, num_args_tested, true)); |
ic_data.AddTarget(target); |
#if defined(TAG_IC_DATA) |
ic_data.set_tag(Instruction::kStaticCall); |
@@ -1808,7 +1779,7 @@ intptr_t FlowGraphCompiler::GetOptimizationThreshold() const { |
const intptr_t basic_blocks = flow_graph().preorder().length(); |
ASSERT(basic_blocks > 0); |
threshold = FLAG_optimization_counter_scale * basic_blocks + |
- FLAG_min_optimization_counter_threshold; |
+ FLAG_min_optimization_counter_threshold; |
if (threshold > FLAG_optimization_counter_threshold) { |
threshold = FLAG_optimization_counter_threshold; |
} |
@@ -1840,8 +1811,8 @@ RawArray* FlowGraphCompiler::InliningIdToFunction() const { |
if (inline_id_to_function_.length() == 0) { |
return Object::empty_array().raw(); |
} |
- const Array& res = Array::Handle( |
- Array::New(inline_id_to_function_.length(), Heap::kOld)); |
+ const Array& res = |
+ Array::Handle(Array::New(inline_id_to_function_.length(), Heap::kOld)); |
for (intptr_t i = 0; i < inline_id_to_function_.length(); i++) { |
res.SetAt(i, *inline_id_to_function_[i]); |
} |
@@ -1853,8 +1824,8 @@ RawArray* FlowGraphCompiler::InliningIdToTokenPos() const { |
if (inline_id_to_token_pos_.length() == 0) { |
return Object::empty_array().raw(); |
} |
- const Array& res = Array::Handle(zone(), |
- Array::New(inline_id_to_token_pos_.length(), Heap::kOld)); |
+ const Array& res = Array::Handle( |
+ zone(), Array::New(inline_id_to_token_pos_.length(), Heap::kOld)); |
Smi& smi = Smi::Handle(zone()); |
for (intptr_t i = 0; i < inline_id_to_token_pos_.length(); i++) { |
smi = Smi::New(inline_id_to_token_pos_[i].value()); |
@@ -1868,8 +1839,8 @@ RawArray* FlowGraphCompiler::CallerInliningIdMap() const { |
if (caller_inline_id_.length() == 0) { |
return Object::empty_array().raw(); |
} |
- const Array& res = Array::Handle( |
- Array::New(caller_inline_id_.length(), Heap::kOld)); |
+ const Array& res = |
+ Array::Handle(Array::New(caller_inline_id_.length(), Heap::kOld)); |
Smi& smi = Smi::Handle(); |
for (intptr_t i = 0; i < caller_inline_id_.length(); i++) { |
smi = Smi::New(caller_inline_id_[i]); |
@@ -1880,16 +1851,16 @@ RawArray* FlowGraphCompiler::CallerInliningIdMap() const { |
void FlowGraphCompiler::BeginCodeSourceRange() { |
-NOT_IN_PRODUCT( |
+#if !defined(PRODUCT) |
// Remember how many bytes of code we emitted so far. This function |
// is called before we call into an instruction's EmitNativeCode. |
saved_code_size_ = assembler()->CodeSize(); |
-); |
+#endif // !defined(PRODUCT) |
} |
bool FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) { |
-NOT_IN_PRODUCT( |
+#if !defined(PRODUCT) |
// This function is called after each instructions' EmitNativeCode. |
if (saved_code_size_ < assembler()->CodeSize()) { |
// We emitted more code, now associate the emitted code chunk with |
@@ -1898,7 +1869,7 @@ NOT_IN_PRODUCT( |
BeginCodeSourceRange(); |
return true; |
} |
-); |
+#endif // !defined(PRODUCT) |
return false; |
} |
@@ -1906,17 +1877,16 @@ NOT_IN_PRODUCT( |
#if !defined(TARGET_ARCH_DBC) |
// DBC emits calls very differently from other architectures due to its |
// interpreted nature. |
-void FlowGraphCompiler::EmitPolymorphicInstanceCall( |
- const ICData& ic_data, |
- intptr_t argument_count, |
- const Array& argument_names, |
- intptr_t deopt_id, |
- TokenPosition token_pos, |
- LocationSummary* locs, |
- bool complete) { |
+void FlowGraphCompiler::EmitPolymorphicInstanceCall(const ICData& ic_data, |
+ intptr_t argument_count, |
+ const Array& argument_names, |
+ intptr_t deopt_id, |
+ TokenPosition token_pos, |
+ LocationSummary* locs, |
+ bool complete) { |
if (FLAG_polymorphic_with_deopt) { |
- Label* deopt = AddDeoptStub(deopt_id, |
- ICData::kDeoptPolymorphicInstanceCallTestFail); |
+ Label* deopt = |
+ AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail); |
Label ok; |
EmitTestAndCall(ic_data, argument_count, argument_names, |
deopt, // No cid match. |
@@ -1927,13 +1897,13 @@ void FlowGraphCompiler::EmitPolymorphicInstanceCall( |
if (complete) { |
Label ok; |
EmitTestAndCall(ic_data, argument_count, argument_names, |
- NULL, // No cid match. |
- &ok, // Found cid. |
+ NULL, // No cid match. |
+ &ok, // Found cid. |
deopt_id, token_pos, locs, true); |
assembler()->Bind(&ok); |
} else { |
- EmitSwitchableInstanceCall(ic_data, argument_count, |
- deopt_id, token_pos, locs); |
+ EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos, |
+ locs); |
} |
} |
} |
@@ -1953,7 +1923,7 @@ void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) { |
case Instruction::kDropTemps: |
FrameStatePop(instr->locs()->input_count() + |
- instr->AsDropTemps()->num_temps()); |
+ instr->AsDropTemps()->num_temps()); |
break; |
default: |
@@ -1973,8 +1943,7 @@ void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) { |
void FlowGraphCompiler::FrameStatePush(Definition* defn) { |
Representation rep = defn->representation(); |
- if ((rep == kUnboxedDouble) || |
- (rep == kUnboxedFloat64x2) || |
+ if ((rep == kUnboxedDouble) || (rep == kUnboxedFloat64x2) || |
(rep == kUnboxedFloat32x4)) { |
// LoadField instruction lies about its representation in the unoptimized |
// code because Definition::representation() can't depend on the type of |
@@ -1992,8 +1961,8 @@ void FlowGraphCompiler::FrameStatePush(Definition* defn) { |
void FlowGraphCompiler::FrameStatePop(intptr_t count) { |
ASSERT(!is_optimizing()); |
- frame_state_.TruncateTo(Utils::Maximum(static_cast<intptr_t>(0), |
- frame_state_.length() - count)); |
+ frame_state_.TruncateTo( |
+ Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count)); |
} |