Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Unified Diff: runtime/vm/flow_graph_compiler_arm.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/flow_graph_compiler_arm.cc
diff --git a/runtime/vm/flow_graph_compiler_arm.cc b/runtime/vm/flow_graph_compiler_arm.cc
index 37b2e1f6440c58c7cc9031bae07aa35aead42b2c..bfc04a3d5fcf78b0a15c6299fb52b72455d81009 100644
--- a/runtime/vm/flow_graph_compiler_arm.cc
+++ b/runtime/vm/flow_graph_compiler_arm.cc
@@ -28,7 +28,6 @@ DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic.");
DECLARE_FLAG(bool, enable_simd_inline);
-
FlowGraphCompiler::~FlowGraphCompiler() {
// BlockInfos are zone-allocated, so their destructors are not called.
// Verify the labels explicitly here.
@@ -37,47 +36,39 @@ FlowGraphCompiler::~FlowGraphCompiler() {
}
}
-
bool FlowGraphCompiler::SupportsUnboxedDoubles() {
return TargetCPUFeatures::vfp_supported() && FLAG_unbox_doubles;
}
-
bool FlowGraphCompiler::SupportsUnboxedMints() {
return FLAG_unbox_mints;
}
-
bool FlowGraphCompiler::SupportsUnboxedSimd128() {
return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline;
}
-
bool FlowGraphCompiler::SupportsHardwareDivision() {
return TargetCPUFeatures::can_divide();
}
-
bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
// ARM does not have a short instruction sequence for converting int64 to
// double.
return false;
}
-
void FlowGraphCompiler::EnterIntrinsicMode() {
ASSERT(!intrinsic_mode());
intrinsic_mode_ = true;
ASSERT(!assembler()->constant_pool_allowed());
}
-
void FlowGraphCompiler::ExitIntrinsicMode() {
ASSERT(intrinsic_mode());
intrinsic_mode_ = false;
}
-
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
@@ -164,7 +155,6 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
return builder->CreateDeoptInfo(deopt_table);
}
-
void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
@@ -189,10 +179,8 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
#undef __
}
-
#define __ assembler()->
-
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
@@ -206,7 +194,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
-
// R0: instance (must be preserved).
// R2: instantiator type arguments (if used).
// R1: function type arguments (if used).
@@ -244,7 +231,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
return type_test_cache.raw();
}
-
// Jumps to labels 'is_instance' or 'is_not_instance' respectively, if
// type test is conclusive, otherwise fallthrough if a type test could not
// be completed.
@@ -323,7 +309,6 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
is_instance_lbl, is_not_instance_lbl);
}
-
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
@@ -335,7 +320,6 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
__ b(is_not_equal_lbl);
}
-
// Testing against an instantiated type with no arguments, without
// SubtypeTestCache.
// R0: instance being type checked (preserved).
@@ -403,7 +387,6 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
return true;
}
-
// Uses SubtypeTestCache to store instance class and result.
// R0: instance to test.
// Clobbers R1-R4, R8, R9.
@@ -435,7 +418,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
is_instance_lbl, is_not_instance_lbl);
}
-
// Generates inlined check if 'type' is a type parameter or type itself
// R0: instance (preserved).
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
@@ -450,8 +432,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
const TypeParameter& type_param = TypeParameter::Cast(type);
const Register kInstantiatorTypeArgumentsReg = R2;
const Register kFunctionTypeArgumentsReg = R1;
- __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) |
- (1 << kInstantiatorTypeArgumentsReg));
+ __ ldm(IA, SP,
+ (1 << kFunctionTypeArgumentsReg) |
+ (1 << kInstantiatorTypeArgumentsReg));
// R2: instantiator type arguments.
// R1: function type arguments.
const Register kTypeArgumentsReg =
@@ -501,8 +484,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
const Register kFunctionTypeArgumentsReg = R1;
__ tst(kInstanceReg, Operand(kSmiTagMask)); // Is instance Smi?
__ b(is_not_instance_lbl, EQ);
- __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) |
- (1 << kInstantiatorTypeArgumentsReg));
+ __ ldm(IA, SP,
+ (1 << kFunctionTypeArgumentsReg) |
+ (1 << kInstantiatorTypeArgumentsReg));
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator(s).
const Register kTempReg = kNoRegister;
@@ -514,7 +498,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
return SubtypeTestCache::null();
}
-
// Inputs:
// - R0: instance being type checked (preserved).
// - R2: optional instantiator type arguments (preserved).
@@ -557,7 +540,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
is_not_instance_lbl);
}
-
// If instanceof type test cannot be performed successfully at compile time and
// therefore eliminated, optimize it by adding inlined tests for:
// - NULL -> return type == Null (type is not Object or dynamic).
@@ -603,8 +585,9 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
Label done;
if (!test_cache.IsNull()) {
// Generate runtime call.
- __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) |
- (1 << kInstantiatorTypeArgumentsReg));
+ __ ldm(IA, SP,
+ (1 << kFunctionTypeArgumentsReg) |
+ (1 << kInstantiatorTypeArgumentsReg));
__ PushObject(Object::null_object()); // Make room for the result.
__ Push(R0); // Push the instance.
__ PushObject(type); // Push the type.
@@ -630,7 +613,6 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ Drop(2);
}
-
// Optimize assignable type check by adding inlined tests for:
// - NULL -> return NULL.
// - Smi -> compile time subtype check (only if dst class is not parameterized).
@@ -687,8 +669,9 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
&runtime_call);
__ Bind(&runtime_call);
- __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) |
- (1 << kInstantiatorTypeArgumentsReg));
+ __ ldm(
+ IA, SP,
+ (1 << kFunctionTypeArgumentsReg) | (1 << kInstantiatorTypeArgumentsReg));
__ PushObject(Object::null_object()); // Make room for the result.
__ Push(R0); // Push the source object.
__ PushObject(dst_type); // Push the type of the destination.
@@ -708,7 +691,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
(1 << kInstantiatorTypeArgumentsReg));
}
-
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
@@ -719,7 +701,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
}
}
-
// Input parameters:
// R4: arguments descriptor array.
void FlowGraphCompiler::CopyParameters() {
@@ -812,8 +793,9 @@ void FlowGraphCompiler::CopyParameters() {
__ add(NOTFP, FP, Operand(NOTFP, LSL, 1));
__ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize);
// Let R8 point to the entry of the first named argument.
- __ add(R8, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() -
- kHeapObjectTag));
+ __ add(R8, R4,
+ Operand(ArgumentsDescriptor::first_named_entry_offset() -
+ kHeapObjectTag));
for (int i = 0; i < num_opt_named_params; i++) {
Label load_default_value, assign_optional_parameter;
const int param_pos = opt_param_position[i];
@@ -921,7 +903,6 @@ void FlowGraphCompiler::CopyParameters() {
__ b(&null_args_loop, PL);
}
-
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// LR: return address.
// SP: receiver.
@@ -932,7 +913,6 @@ void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
__ Ret();
}
-
void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
// LR: return address.
// SP+1: receiver.
@@ -946,10 +926,8 @@ void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
__ Ret();
}
-
static const Register new_pp = NOTFP;
-
void FlowGraphCompiler::EmitFrameEntry() {
const Function& function = parsed_function().function();
if (CanOptimizeFunction() && function.IsOptimizable() &&
@@ -985,7 +963,6 @@ void FlowGraphCompiler::EmitFrameEntry() {
}
}
-
// Input parameters:
// LR: return address.
// SP: address of last argument.
@@ -1120,7 +1097,6 @@ void FlowGraphCompiler::CompileGraph() {
GenerateDeferredCode();
}
-
void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
const StubEntry& stub_entry,
RawPcDescriptors::Kind kind,
@@ -1129,7 +1105,6 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
}
-
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
const StubEntry& stub_entry,
RawPcDescriptors::Kind kind,
@@ -1138,7 +1113,6 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
}
-
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const StubEntry& stub_entry,
@@ -1158,7 +1132,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
}
}
-
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const StubEntry& stub_entry,
@@ -1185,7 +1158,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
AddStaticCallTarget(target);
}
-
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t deopt_id,
const RuntimeEntry& entry,
@@ -1207,7 +1179,6 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
}
}
-
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
// We do not check for overflow when incrementing the edge counter. The
// function should normally be optimized long before the counter can
@@ -1230,7 +1201,6 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
#endif // DEBUG
}
-
void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
const ICData& ic_data,
intptr_t argument_count,
@@ -1252,7 +1222,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
__ Drop(argument_count);
}
-
void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
const ICData& ic_data,
intptr_t argument_count,
@@ -1266,7 +1235,6 @@ void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
__ Drop(argument_count);
}
-
void FlowGraphCompiler::EmitMegamorphicInstanceCall(
const String& name,
const Array& arguments_descriptor,
@@ -1313,7 +1281,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Drop(argument_count);
}
-
void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
intptr_t argument_count,
intptr_t deopt_id,
@@ -1344,7 +1311,6 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
__ Drop(argument_count);
}
-
void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
intptr_t deopt_id,
TokenPosition token_pos,
@@ -1358,7 +1324,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
__ Drop(argument_count);
}
-
void FlowGraphCompiler::EmitOptimizedStaticCall(
const Function& function,
const Array& arguments_descriptor,
@@ -1381,7 +1346,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
__ Drop(argument_count);
}
-
Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
Register reg,
const Object& obj,
@@ -1409,7 +1373,6 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
return EQ;
}
-
Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
Register right,
bool needs_number_check,
@@ -1435,7 +1398,6 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
return EQ;
}
-
// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
// FlowGraphCompiler::SlowPathEnvironmentFor.
void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
@@ -1479,7 +1441,6 @@ void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
}
}
-
void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
RegList reg_list = 0;
for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
@@ -1509,7 +1470,6 @@ void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
}
}
-
#if defined(DEBUG)
void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// Clobber temporaries that have not been manually preserved.
@@ -1524,7 +1484,6 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
}
#endif
-
void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
intptr_t argument_count,
const Array& arguments_descriptor) {
@@ -1534,19 +1493,16 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
__ LoadObject(R4, arguments_descriptor);
}
-
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
__ tst(R0, Operand(kSmiTagMask));
// Jump if receiver is not Smi.
__ b(label, if_smi ? EQ : NE);
}
-
void FlowGraphCompiler::EmitTestAndCallLoadCid() {
__ LoadClassId(R2, R0);
}
-
int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
const CidRange& range,
int bias) {
@@ -1563,11 +1519,9 @@ int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
return bias;
}
-
#undef __
#define __ compiler_->assembler()->
-
void ParallelMoveResolver::EmitMove(int index) {
MoveOperands* move = moves_[index];
const Location source = move->src();
@@ -1688,7 +1642,6 @@ void ParallelMoveResolver::EmitMove(int index) {
move->Eliminate();
}
-
void ParallelMoveResolver::EmitSwap(int index) {
MoveOperands* move = moves_[index];
const Location source = move->src();
@@ -1787,32 +1740,27 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
-
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
UNREACHABLE();
}
-
void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
UNREACHABLE();
}
-
// Do not call or implement this function. Instead, use the form below that
// uses an offset from the frame pointer instead of an Address.
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
UNREACHABLE();
}
-
// Do not call or implement this function. Instead, use the form below that
// uses offsets from the frame pointer instead of Addresses.
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
UNREACHABLE();
}
-
void ParallelMoveResolver::Exchange(Register reg,
Register base_reg,
intptr_t stack_offset) {
@@ -1822,7 +1770,6 @@ void ParallelMoveResolver::Exchange(Register reg,
__ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset);
}
-
void ParallelMoveResolver::Exchange(Register base_reg1,
intptr_t stack_offset1,
Register base_reg2,
@@ -1835,29 +1782,24 @@ void ParallelMoveResolver::Exchange(Register base_reg1,
__ StoreToOffset(kWord, tmp2.reg(), base_reg1, stack_offset1);
}
-
void ParallelMoveResolver::SpillScratch(Register reg) {
__ Push(reg);
}
-
void ParallelMoveResolver::RestoreScratch(Register reg) {
__ Pop(reg);
}
-
void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
DRegister dreg = EvenDRegisterOf(reg);
__ vstrd(dreg, Address(SP, -kDoubleSize, Address::PreIndex));
}
-
void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
DRegister dreg = EvenDRegisterOf(reg);
__ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex));
}
-
#undef __
} // namespace dart
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698