| Index: runtime/vm/flow_graph_compiler.cc
|
| diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc
|
| index 74aef8565abac970fac71d35d3f46549421054a2..690c6d4adffcf5028eec60a7377e06679774a51c 100644
|
| --- a/runtime/vm/flow_graph_compiler.cc
|
| +++ b/runtime/vm/flow_graph_compiler.cc
|
| @@ -112,7 +112,6 @@ COMPILE_ASSERT(FLAG_load_deferred_eagerly);
|
|
|
| #endif // DART_PRECOMPILED_RUNTIME
|
|
|
| -
|
| // Assign locations to incoming arguments, i.e., values pushed above spill slots
|
| // with PushArgument. Recursively allocates from outermost to innermost
|
| // environment.
|
| @@ -129,7 +128,6 @@ void CompilerDeoptInfo::AllocateIncomingParametersRecursive(
|
| }
|
| }
|
|
|
| -
|
| void CompilerDeoptInfo::EmitMaterializations(Environment* env,
|
| DeoptInfoBuilder* builder) {
|
| for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
|
| @@ -142,7 +140,6 @@ void CompilerDeoptInfo::EmitMaterializations(Environment* env,
|
| }
|
| }
|
|
|
| -
|
| FlowGraphCompiler::FlowGraphCompiler(
|
| Assembler* assembler,
|
| FlowGraph* flow_graph,
|
| @@ -213,7 +210,6 @@ FlowGraphCompiler::FlowGraphCompiler(
|
| inline_id_to_token_pos, inline_id_to_function);
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::IsUnboxedField(const Field& field) {
|
| bool valid_class =
|
| (SupportsUnboxedDoubles() && (field.guarded_cid() == kDoubleCid)) ||
|
| @@ -223,14 +219,12 @@ bool FlowGraphCompiler::IsUnboxedField(const Field& field) {
|
| !field.is_nullable() && valid_class;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) {
|
| return field.is_unboxing_candidate() &&
|
| (FlowGraphCompiler::IsUnboxedField(field) ||
|
| (!field.is_final() && (field.guarded_cid() == kIllegalCid)));
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::InitCompiler() {
|
| pc_descriptors_list_ = new (zone()) DescriptorList(64);
|
| exception_handlers_list_ = new (zone()) ExceptionHandlerList();
|
| @@ -289,22 +283,18 @@ void FlowGraphCompiler::InitCompiler() {
|
| }
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::CanOptimize() {
|
| return FLAG_optimization_counter_threshold >= 0;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::CanOptimizeFunction() const {
|
| return CanOptimize() && !parsed_function().function().HasBreakpoint();
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::CanOSRFunction() const {
|
| return isolate()->use_osr() && CanOptimizeFunction() && !is_optimizing();
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const {
|
| if ((FLAG_stacktrace_every > 0) || (FLAG_deoptimize_every > 0) ||
|
| (isolate()->reload_every_n_stack_overflow_checks() > 0)) {
|
| @@ -323,7 +313,6 @@ bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const {
|
| return false;
|
| }
|
|
|
| -
|
| static bool IsEmptyBlock(BlockEntryInstr* block) {
|
| return !block->IsCatchBlockEntry() && !block->HasNonRedundantParallelMove() &&
|
| block->next()->IsGoto() &&
|
| @@ -331,7 +320,6 @@ static bool IsEmptyBlock(BlockEntryInstr* block) {
|
| !block->IsIndirectEntry();
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
|
| BlockInfo* block_info = block_info_[block->postorder_number()];
|
|
|
| @@ -350,7 +338,6 @@ void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::CompactBlocks() {
|
| // This algorithm does not garbage collect blocks in place, but merely
|
| // records forwarding label information. In this way it avoids having to
|
| @@ -379,7 +366,6 @@ void FlowGraphCompiler::CompactBlocks() {
|
| block_info->set_next_nonempty_label(nonempty_label);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
|
| intptr_t try_index) {
|
| #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
|
| @@ -454,7 +440,6 @@ void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
|
| #endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitCallsiteMetaData(TokenPosition token_pos,
|
| intptr_t deopt_id,
|
| RawPcDescriptors::Kind kind,
|
| @@ -464,7 +449,6 @@ void FlowGraphCompiler::EmitCallsiteMetaData(TokenPosition token_pos,
|
| EmitCatchEntryState();
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
|
| if (!is_optimizing()) {
|
| if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) {
|
| @@ -478,7 +462,6 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
|
| if (!instr->token_pos().IsReal() || (instr->env() == NULL)) {
|
| return;
|
| @@ -494,7 +477,6 @@ void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
|
| line.ToCString());
|
| }
|
|
|
| -
|
| static void LoopInfoComment(
|
| Assembler* assembler,
|
| const BlockEntryInstr& block,
|
| @@ -511,7 +493,6 @@ static void LoopInfoComment(
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::VisitBlocks() {
|
| CompactBlocks();
|
| const ZoneGrowableArray<BlockEntryInstr*>* loop_headers = NULL;
|
| @@ -587,12 +568,10 @@ void FlowGraphCompiler::VisitBlocks() {
|
| set_current_block(NULL);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::Bailout(const char* reason) {
|
| parsed_function_.Bailout("FlowGraphCompiler", reason);
|
| }
|
|
|
| -
|
| intptr_t FlowGraphCompiler::StackSize() const {
|
| if (is_optimizing_) {
|
| return flow_graph_.graph_entry()->spill_slot_count();
|
| @@ -602,30 +581,25 @@ intptr_t FlowGraphCompiler::StackSize() const {
|
| }
|
| }
|
|
|
| -
|
| Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const {
|
| const intptr_t block_index = block_entry->postorder_number();
|
| return block_info_[block_index]->jump_label();
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const {
|
| const intptr_t block_index = block_entry->postorder_number();
|
| return block_info_[block_index]->WasCompacted();
|
| }
|
|
|
| -
|
| Label* FlowGraphCompiler::NextNonEmptyLabel() const {
|
| const intptr_t current_index = current_block()->postorder_number();
|
| return block_info_[current_index]->next_nonempty_label();
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const {
|
| return NextNonEmptyLabel() == GetJumpLabel(block_entry);
|
| }
|
|
|
| -
|
| BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const {
|
| Label* true_label = GetJumpLabel(branch->true_successor());
|
| Label* false_label = GetJumpLabel(branch->false_successor());
|
| @@ -634,12 +608,10 @@ BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const {
|
| return result;
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) {
|
| slow_path_code_.Add(code);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateDeferredCode() {
|
| for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
|
| BeginCodeSourceRange();
|
| @@ -653,7 +625,6 @@ void FlowGraphCompiler::GenerateDeferredCode() {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index,
|
| intptr_t outer_try_index,
|
| intptr_t pc_offset,
|
| @@ -666,12 +637,10 @@ void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index,
|
| needs_stacktrace);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::SetNeedsStackTrace(intptr_t try_index) {
|
| exception_handlers_list_->SetNeedsStackTrace(try_index);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind,
|
| intptr_t pc_offset,
|
| intptr_t deopt_id,
|
| @@ -684,7 +653,6 @@ void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind,
|
| try_index);
|
| }
|
|
|
| -
|
| // Uses current pc position and try-index.
|
| void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind,
|
| intptr_t deopt_id,
|
| @@ -693,21 +661,18 @@ void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind,
|
| CurrentTryIndex());
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::AddStaticCallTarget(const Function& func) {
|
| ASSERT(func.IsZoneHandle());
|
| static_calls_target_table_.Add(
|
| new (zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL));
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::AddStubCallTarget(const Code& code) {
|
| ASSERT(code.IsZoneHandle());
|
| static_calls_target_table_.Add(
|
| new (zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code));
|
| }
|
|
|
| -
|
| CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
|
| ASSERT(is_optimizing());
|
| ASSERT(!intrinsic_mode());
|
| @@ -720,7 +685,6 @@ CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
|
| return info;
|
| }
|
|
|
| -
|
| // This function must be in sync with FlowGraphCompiler::SaveLiveRegisters
|
| // and FlowGraphCompiler::SlowPathEnvironmentFor.
|
| // See StackFrame::VisitObjectPointers for the details of how stack map is
|
| @@ -805,7 +769,6 @@ void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
|
| }
|
| }
|
|
|
| -
|
| // This function must be kept in sync with:
|
| //
|
| // FlowGraphCompiler::RecordSafepoint
|
| @@ -862,7 +825,6 @@ Environment* FlowGraphCompiler::SlowPathEnvironmentFor(
|
| return env;
|
| }
|
|
|
| -
|
| Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
|
| ICData::DeoptReasonId reason,
|
| uint32_t flags) {
|
| @@ -889,7 +851,6 @@ Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
|
| return stub->entry_label();
|
| }
|
|
|
| -
|
| #if defined(TARGET_ARCH_DBC)
|
| void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id,
|
| ICData::DeoptReasonId reason,
|
| @@ -907,7 +868,6 @@ void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id,
|
| }
|
| #endif // defined(TARGET_ARCH_DBC)
|
|
|
| -
|
| void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
|
| ASSERT(exception_handlers_list_ != NULL);
|
| const ExceptionHandlers& handlers = ExceptionHandlers::Handle(
|
| @@ -921,7 +881,6 @@ void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
|
| ASSERT(pc_descriptors_list_ != NULL);
|
| const PcDescriptors& descriptors = PcDescriptors::Handle(
|
| @@ -930,7 +889,6 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
|
| code.set_pc_descriptors(descriptors);
|
| }
|
|
|
| -
|
| RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) {
|
| // No deopt information if we precompile (no deoptimization allowed).
|
| if (FLAG_precompiled_mode) {
|
| @@ -963,7 +921,6 @@ RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FinalizeStackMaps(const Code& code) {
|
| if (stackmap_table_builder_ == NULL) {
|
| code.set_stackmaps(Object::null_array());
|
| @@ -974,7 +931,6 @@ void FlowGraphCompiler::FinalizeStackMaps(const Code& code) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
|
| if (code.is_optimized()) {
|
| // Optimized code does not need variable descriptors. They are
|
| @@ -1036,7 +992,6 @@ void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) {
|
| targets.Length() * sizeof(uword));
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FinalizeCodeSourceMap(const Code& code) {
|
| const Array& inlined_id_array =
|
| Array::Handle(zone(), code_source_map_builder_->InliningIdToFunction());
|
| @@ -1059,7 +1014,6 @@ void FlowGraphCompiler::FinalizeCodeSourceMap(const Code& code) {
|
| #endif
|
| }
|
|
|
| -
|
| // Returns 'true' if regular code generation should be skipped.
|
| bool FlowGraphCompiler::TryIntrinsify() {
|
| // Intrinsification skips arguments checks, therefore disable if in checked
|
| @@ -1114,7 +1068,6 @@ bool FlowGraphCompiler::TryIntrinsify() {
|
| return complete;
|
| }
|
|
|
| -
|
| // DBC is very different from other architectures in how it performs instance
|
| // and static calls because it does not use stubs.
|
| #if !defined(TARGET_ARCH_DBC)
|
| @@ -1134,7 +1087,6 @@ void FlowGraphCompiler::GenerateCallWithDeopt(TokenPosition token_pos,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id,
|
| TokenPosition token_pos,
|
| intptr_t argument_count,
|
| @@ -1193,7 +1145,6 @@ void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
|
| TokenPosition token_pos,
|
| const Function& function,
|
| @@ -1224,7 +1175,6 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg,
|
| const AbstractType& type,
|
| Label* is_instance_lbl,
|
| @@ -1244,7 +1194,6 @@ void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg,
|
| CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateStringTypeCheck(Register kClassIdReg,
|
| Label* is_instance_lbl,
|
| Label* is_not_instance_lbl) {
|
| @@ -1257,7 +1206,6 @@ void FlowGraphCompiler::GenerateStringTypeCheck(Register kClassIdReg,
|
| CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateListTypeCheck(Register kClassIdReg,
|
| Label* is_instance_lbl) {
|
| assembler()->Comment("ListTypeCheck");
|
| @@ -1283,7 +1231,6 @@ void FlowGraphCompiler::EmitComment(Instruction* instr) {
|
| #endif
|
| }
|
|
|
| -
|
| #if !defined(TARGET_ARCH_DBC)
|
| // TODO(vegorov) enable edge-counters on DBC if we consider them beneficial.
|
| bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) {
|
| @@ -1294,7 +1241,6 @@ bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) {
|
| (block == flow_graph().graph_entry()->normal_entry())));
|
| }
|
|
|
| -
|
| // Allocate a register that is not explicitly blocked.
|
| static Register AllocateFreeRegister(bool* blocked_registers) {
|
| for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) {
|
| @@ -1308,7 +1254,6 @@ static Register AllocateFreeRegister(bool* blocked_registers) {
|
| }
|
| #endif
|
|
|
| -
|
| void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
|
| ASSERT(!is_optimizing());
|
| instr->InitializeLocationSummary(zone(), false); // Not optimizing.
|
| @@ -1407,16 +1352,13 @@ void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
|
| #endif // !defined(TARGET_ARCH_DBC)
|
| }
|
|
|
| -
|
| static uword RegMaskBit(Register reg) {
|
| return ((reg) != kNoRegister) ? (1 << (reg)) : 0;
|
| }
|
|
|
| -
|
| ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler)
|
| : compiler_(compiler), moves_(32) {}
|
|
|
| -
|
| void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) {
|
| ASSERT(moves_.is_empty());
|
| // Build up a worklist of moves.
|
| @@ -1444,7 +1386,6 @@ void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) {
|
| moves_.Clear();
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::BuildInitialMoveList(
|
| ParallelMoveInstr* parallel_move) {
|
| // Perform a linear sweep of the moves to add them to the initial list of
|
| @@ -1457,7 +1398,6 @@ void ParallelMoveResolver::BuildInitialMoveList(
|
| }
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::PerformMove(int index) {
|
| // Each call to this function performs a move and deletes it from the move
|
| // graph. We first recursively perform any move blocking this one. We
|
| @@ -1526,7 +1466,6 @@ void ParallelMoveResolver::PerformMove(int index) {
|
| compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
|
| }
|
|
|
| -
|
| bool ParallelMoveResolver::IsScratchLocation(Location loc) {
|
| for (int i = 0; i < moves_.length(); ++i) {
|
| if (moves_[i]->Blocks(loc)) {
|
| @@ -1543,7 +1482,6 @@ bool ParallelMoveResolver::IsScratchLocation(Location loc) {
|
| return false;
|
| }
|
|
|
| -
|
| intptr_t ParallelMoveResolver::AllocateScratchRegister(
|
| Location::Kind kind,
|
| uword blocked_mask,
|
| @@ -1578,7 +1516,6 @@ intptr_t ParallelMoveResolver::AllocateScratchRegister(
|
| return scratch;
|
| }
|
|
|
| -
|
| ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope(
|
| ParallelMoveResolver* resolver,
|
| FpuRegister blocked)
|
| @@ -1595,14 +1532,12 @@ ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope(
|
| }
|
| }
|
|
|
| -
|
| ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() {
|
| if (spilled_) {
|
| resolver_->RestoreFpuScratch(reg_);
|
| }
|
| }
|
|
|
| -
|
| ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
|
| ParallelMoveResolver* resolver,
|
| Register blocked)
|
| @@ -1626,14 +1561,12 @@ ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
|
| }
|
| }
|
|
|
| -
|
| ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
|
| if (spilled_) {
|
| resolver_->RestoreScratch(reg_);
|
| }
|
| }
|
|
|
| -
|
| const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData(
|
| intptr_t deopt_id,
|
| const String& target_name,
|
| @@ -1664,7 +1597,6 @@ const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData(
|
| return &ic_data;
|
| }
|
|
|
| -
|
| const ICData* FlowGraphCompiler::GetOrAddStaticCallICData(
|
| intptr_t deopt_id,
|
| const Function& target,
|
| @@ -1696,7 +1628,6 @@ const ICData* FlowGraphCompiler::GetOrAddStaticCallICData(
|
| return &ic_data;
|
| }
|
|
|
| -
|
| intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
|
| intptr_t threshold;
|
| if (is_optimizing()) {
|
| @@ -1715,7 +1646,6 @@ intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
|
| return threshold;
|
| }
|
|
|
| -
|
| const Class& FlowGraphCompiler::BoxClassFor(Representation rep) {
|
| switch (rep) {
|
| case kUnboxedDouble:
|
| @@ -1734,18 +1664,15 @@ const Class& FlowGraphCompiler::BoxClassFor(Representation rep) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::BeginCodeSourceRange() {
|
| code_source_map_builder_->BeginCodeSourceRange(assembler()->CodeSize());
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) {
|
| code_source_map_builder_->EndCodeSourceRange(assembler()->CodeSize(),
|
| token_pos);
|
| }
|
|
|
| -
|
| const CallTargets* FlowGraphCompiler::ResolveCallTargetsForReceiverCid(
|
| intptr_t cid,
|
| const String& selector,
|
| @@ -1763,7 +1690,6 @@ const CallTargets* FlowGraphCompiler::ResolveCallTargetsForReceiverCid(
|
| return targets;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::LookupMethodFor(int class_id,
|
| const String& name,
|
| const ArgumentsDescriptor& args_desc,
|
| @@ -1794,7 +1720,6 @@ bool FlowGraphCompiler::LookupMethodFor(int class_id,
|
| return true;
|
| }
|
|
|
| -
|
| #if !defined(TARGET_ARCH_DBC)
|
| // DBC emits calls very differently from other architectures due to its
|
| // interpreted nature.
|
| @@ -1833,7 +1758,6 @@ void FlowGraphCompiler::EmitPolymorphicInstanceCall(
|
| }
|
| }
|
|
|
| -
|
| #define __ assembler()->
|
| void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
|
| const String& function_name,
|
| @@ -1984,7 +1908,6 @@ void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FrameStatePush(Definition* defn) {
|
| Representation rep = defn->representation();
|
| if ((rep == kUnboxedDouble) || (rep == kUnboxedFloat64x2) ||
|
| @@ -2002,14 +1925,12 @@ void FlowGraphCompiler::FrameStatePush(Definition* defn) {
|
| frame_state_.Add(rep);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FrameStatePop(intptr_t count) {
|
| ASSERT(!is_optimizing());
|
| frame_state_.TruncateTo(
|
| Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count));
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::FrameStateIsSafeToCall() {
|
| ASSERT(!is_optimizing());
|
| for (intptr_t i = 0; i < frame_state_.length(); i++) {
|
| @@ -2020,12 +1941,10 @@ bool FlowGraphCompiler::FrameStateIsSafeToCall() {
|
| return true;
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::FrameStateClear() {
|
| ASSERT(!is_optimizing());
|
| frame_state_.TruncateTo(0);
|
| }
|
| #endif // defined(DEBUG) && !defined(TARGET_ARCH_DBC)
|
|
|
| -
|
| } // namespace dart
|
|
|