| Index: runtime/vm/flow_graph_compiler_arm64.cc
|
| diff --git a/runtime/vm/flow_graph_compiler_arm64.cc b/runtime/vm/flow_graph_compiler_arm64.cc
|
| index b5cbad9544a16aa429a7eb3db209241edb87947c..40b461bd2f6d0040266306101c9808baa8cf43c1 100644
|
| --- a/runtime/vm/flow_graph_compiler_arm64.cc
|
| +++ b/runtime/vm/flow_graph_compiler_arm64.cc
|
| @@ -26,7 +26,6 @@ namespace dart {
|
| DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
|
| DECLARE_FLAG(bool, enable_simd_inline);
|
|
|
| -
|
| FlowGraphCompiler::~FlowGraphCompiler() {
|
| // BlockInfos are zone-allocated, so their destructors are not called.
|
| // Verify the labels explicitly here.
|
| @@ -35,47 +34,39 @@ FlowGraphCompiler::~FlowGraphCompiler() {
|
| }
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::SupportsUnboxedDoubles() {
|
| return true;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::SupportsUnboxedMints() {
|
| return false;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::SupportsUnboxedSimd128() {
|
| return FLAG_enable_simd_inline;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
|
| // ARM does not have a short instruction sequence for converting int64 to
|
| // double.
|
| return false;
|
| }
|
|
|
| -
|
| bool FlowGraphCompiler::SupportsHardwareDivision() {
|
| return true;
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EnterIntrinsicMode() {
|
| ASSERT(!intrinsic_mode());
|
| intrinsic_mode_ = true;
|
| ASSERT(!assembler()->constant_pool_allowed());
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::ExitIntrinsicMode() {
|
| ASSERT(intrinsic_mode());
|
| intrinsic_mode_ = false;
|
| }
|
|
|
| -
|
| RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
|
| DeoptInfoBuilder* builder,
|
| const Array& deopt_table) {
|
| @@ -162,7 +153,6 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
|
| return builder->CreateDeoptInfo(deopt_table);
|
| }
|
|
|
| -
|
| void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
|
| intptr_t stub_ix) {
|
| // Calls do not need stubs, they share a deoptimization trampoline.
|
| @@ -182,10 +172,8 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
|
| #undef __
|
| }
|
|
|
| -
|
| #define __ assembler()->
|
|
|
| -
|
| // Fall through if bool_register contains null.
|
| void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
|
| Label* is_true,
|
| @@ -199,7 +187,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
|
| __ Bind(&fall_through);
|
| }
|
|
|
| -
|
| // R0: instance (must be preserved).
|
| // R1: instantiator type arguments (if used).
|
| // R2: function type arguments (if used).
|
| @@ -236,7 +223,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
|
| return type_test_cache.raw();
|
| }
|
|
|
| -
|
| // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if
|
| // type test is conclusive, otherwise fallthrough if a type test could not
|
| // be completed.
|
| @@ -315,7 +301,6 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
|
| is_instance_lbl, is_not_instance_lbl);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
|
| const GrowableArray<intptr_t>& class_ids,
|
| Label* is_equal_lbl,
|
| @@ -327,7 +312,6 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
|
| __ b(is_not_equal_lbl);
|
| }
|
|
|
| -
|
| // Testing against an instantiated type with no arguments, without
|
| // SubtypeTestCache.
|
| // R0: instance being type checked (preserved).
|
| @@ -395,7 +379,6 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
|
| return true;
|
| }
|
|
|
| -
|
| // Uses SubtypeTestCache to store instance class and result.
|
| // R0: instance to test.
|
| // Clobbers R1-R5.
|
| @@ -427,7 +410,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
|
| is_instance_lbl, is_not_instance_lbl);
|
| }
|
|
|
| -
|
| // Generates inlined check if 'type' is a type parameter or type itself
|
| // R0: instance (preserved).
|
| RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
|
| @@ -506,7 +488,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
|
| return SubtypeTestCache::null();
|
| }
|
|
|
| -
|
| // Inputs:
|
| // - R0: instance being type checked (preserved).
|
| // - R1: optional instantiator type arguments (preserved).
|
| @@ -549,7 +530,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
|
| is_not_instance_lbl);
|
| }
|
|
|
| -
|
| // If instanceof type test cannot be performed successfully at compile time and
|
| // therefore eliminated, optimize it by adding inlined tests for:
|
| // - NULL -> return type == Null (type is not Object or dynamic).
|
| @@ -622,7 +602,6 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
|
| __ Drop(2);
|
| }
|
|
|
| -
|
| // Optimize assignable type check by adding inlined tests for:
|
| // - NULL -> return NULL.
|
| // - Smi -> compile time subtype check (only if dst class is not parameterized).
|
| @@ -696,7 +675,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
|
| __ PopPair(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
|
| if (is_optimizing()) {
|
| return;
|
| @@ -707,7 +685,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
|
| }
|
| }
|
|
|
| -
|
| // Input parameters:
|
| // R4: arguments descriptor array.
|
| void FlowGraphCompiler::CopyParameters() {
|
| @@ -801,8 +778,9 @@ void FlowGraphCompiler::CopyParameters() {
|
| __ add(R7, FP, Operand(R7, LSL, 2));
|
| __ AddImmediate(R7, kParamEndSlotFromFp * kWordSize);
|
| // Let R6 point to the entry of the first named argument.
|
| - __ add(R6, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() -
|
| - kHeapObjectTag));
|
| + __ add(R6, R4,
|
| + Operand(ArgumentsDescriptor::first_named_entry_offset() -
|
| + kHeapObjectTag));
|
| for (int i = 0; i < num_opt_named_params; i++) {
|
| Label load_default_value, assign_optional_parameter;
|
| const int param_pos = opt_param_position[i];
|
| @@ -909,7 +887,6 @@ void FlowGraphCompiler::CopyParameters() {
|
| __ b(&null_args_loop, PL);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
|
| // LR: return address.
|
| // SP: receiver.
|
| @@ -920,7 +897,6 @@ void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
|
| __ ret();
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
|
| // LR: return address.
|
| // SP+1: receiver.
|
| @@ -934,7 +910,6 @@ void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
|
| __ ret();
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitFrameEntry() {
|
| const Function& function = parsed_function().function();
|
| Register new_pp = kNoRegister;
|
| @@ -978,7 +953,6 @@ void FlowGraphCompiler::EmitFrameEntry() {
|
| }
|
| }
|
|
|
| -
|
| // Input parameters:
|
| // LR: return address.
|
| // SP: address of last argument.
|
| @@ -1112,7 +1086,6 @@ void FlowGraphCompiler::CompileGraph() {
|
| GenerateDeferredCode();
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
|
| const StubEntry& stub_entry,
|
| RawPcDescriptors::Kind kind,
|
| @@ -1121,7 +1094,6 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
|
| EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
|
| const StubEntry& stub_entry,
|
| RawPcDescriptors::Kind kind,
|
| @@ -1130,7 +1102,6 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
|
| EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
| TokenPosition token_pos,
|
| const StubEntry& stub_entry,
|
| @@ -1150,7 +1121,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
| TokenPosition token_pos,
|
| const StubEntry& stub_entry,
|
| @@ -1177,7 +1147,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
| AddStaticCallTarget(target);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
|
| intptr_t deopt_id,
|
| const RuntimeEntry& entry,
|
| @@ -1199,7 +1168,6 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
|
| // We do not check for overflow when incrementing the edge counter. The
|
| // function should normally be optimized long before the counter can
|
| @@ -1215,7 +1183,6 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
|
| __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id));
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
|
| const ICData& ic_data,
|
| intptr_t argument_count,
|
| @@ -1237,7 +1204,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
|
| const ICData& ic_data,
|
| intptr_t argument_count,
|
| @@ -1251,7 +1217,6 @@ void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
| const String& name,
|
| const Array& arguments_descriptor,
|
| @@ -1299,7 +1264,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
|
| intptr_t argument_count,
|
| intptr_t deopt_id,
|
| @@ -1329,7 +1293,6 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
|
| intptr_t deopt_id,
|
| TokenPosition token_pos,
|
| @@ -1343,7 +1306,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitOptimizedStaticCall(
|
| const Function& function,
|
| const Array& arguments_descriptor,
|
| @@ -1366,7 +1328,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
| __ Drop(argument_count);
|
| }
|
|
|
| -
|
| Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
|
| Register reg,
|
| const Object& obj,
|
| @@ -1394,7 +1355,6 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
|
| return EQ;
|
| }
|
|
|
| -
|
| Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
|
| Register right,
|
| bool needs_number_check,
|
| @@ -1420,7 +1380,6 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
|
| return EQ;
|
| }
|
|
|
| -
|
| // This function must be in sync with FlowGraphCompiler::RecordSafepoint and
|
| // FlowGraphCompiler::SlowPathEnvironmentFor.
|
| void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
|
| @@ -1452,7 +1411,6 @@ void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
|
| for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
|
| Register reg = static_cast<Register>(i);
|
| @@ -1473,7 +1431,6 @@ void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
|
| }
|
| }
|
|
|
| -
|
| #if defined(DEBUG)
|
| void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
|
| // Clobber temporaries that have not been manually preserved.
|
| @@ -1488,7 +1445,6 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
|
| }
|
| #endif
|
|
|
| -
|
| void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
|
| intptr_t argument_count,
|
| const Array& arguments_descriptor) {
|
| @@ -1498,19 +1454,16 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
|
| __ LoadObject(R4, arguments_descriptor);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
|
| __ tsti(R0, Immediate(kSmiTagMask));
|
| // Jump if receiver is not Smi.
|
| __ b(label, if_smi ? EQ : NE);
|
| }
|
|
|
| -
|
| void FlowGraphCompiler::EmitTestAndCallLoadCid() {
|
| __ LoadClassId(R2, R0);
|
| }
|
|
|
| -
|
| int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
|
| const CidRange& range,
|
| int bias) {
|
| @@ -1527,11 +1480,9 @@ int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
|
| return bias;
|
| }
|
|
|
| -
|
| #undef __
|
| #define __ compiler_->assembler()->
|
|
|
| -
|
| void ParallelMoveResolver::EmitMove(int index) {
|
| MoveOperands* move = moves_[index];
|
| const Location source = move->src();
|
| @@ -1644,7 +1595,6 @@ void ParallelMoveResolver::EmitMove(int index) {
|
| move->Eliminate();
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::EmitSwap(int index) {
|
| MoveOperands* move = moves_[index];
|
| const Location source = move->src();
|
| @@ -1733,32 +1683,27 @@ void ParallelMoveResolver::EmitSwap(int index) {
|
| }
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
|
| const Address& src) {
|
| UNREACHABLE();
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
|
| UNREACHABLE();
|
| }
|
|
|
| -
|
| // Do not call or implement this function. Instead, use the form below that
|
| // uses an offset from the frame pointer instead of an Address.
|
| void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
|
| UNREACHABLE();
|
| }
|
|
|
| -
|
| // Do not call or implement this function. Instead, use the form below that
|
| // uses offsets from the frame pointer instead of Addresses.
|
| void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
|
| UNREACHABLE();
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::Exchange(Register reg,
|
| Register base_reg,
|
| intptr_t stack_offset) {
|
| @@ -1768,7 +1713,6 @@ void ParallelMoveResolver::Exchange(Register reg,
|
| __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::Exchange(Register base_reg1,
|
| intptr_t stack_offset1,
|
| Register base_reg2,
|
| @@ -1781,27 +1725,22 @@ void ParallelMoveResolver::Exchange(Register base_reg1,
|
| __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::SpillScratch(Register reg) {
|
| __ Push(reg);
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::RestoreScratch(Register reg) {
|
| __ Pop(reg);
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
|
| __ PushDouble(reg);
|
| }
|
|
|
| -
|
| void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
|
| __ PopDouble(reg);
|
| }
|
|
|
| -
|
| #undef __
|
|
|
| } // namespace dart
|
|
|