Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(491)

Unified Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.cc ('k') | runtime/vm/flow_graph_inliner.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/flow_graph_compiler_x64.cc
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc
index b75a1ae17cf285f62abe2545dae0721d51dab88b..5c97ebc35cd47c19082c477200441f0eef8cbcd3 100644
--- a/runtime/vm/flow_graph_compiler_x64.cc
+++ b/runtime/vm/flow_graph_compiler_x64.cc
@@ -26,7 +26,6 @@ DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
DECLARE_FLAG(bool, enable_simd_inline);
-
FlowGraphCompiler::~FlowGraphCompiler() {
// BlockInfos are zone-allocated, so their destructors are not called.
// Verify the labels explicitly here.
@@ -36,45 +35,37 @@ FlowGraphCompiler::~FlowGraphCompiler() {
}
}
-
bool FlowGraphCompiler::SupportsUnboxedDoubles() {
return true;
}
-
bool FlowGraphCompiler::SupportsUnboxedMints() {
return FLAG_unbox_mints;
}
-
bool FlowGraphCompiler::SupportsUnboxedSimd128() {
return FLAG_enable_simd_inline;
}
-
bool FlowGraphCompiler::SupportsHardwareDivision() {
return true;
}
-
bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
return false;
}
-
void FlowGraphCompiler::EnterIntrinsicMode() {
ASSERT(!intrinsic_mode());
intrinsic_mode_ = true;
ASSERT(!assembler()->constant_pool_allowed());
}
-
void FlowGraphCompiler::ExitIntrinsicMode() {
ASSERT(intrinsic_mode());
intrinsic_mode_ = false;
}
-
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
@@ -161,7 +152,6 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
return builder->CreateDeoptInfo(deopt_table);
}
-
void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
@@ -183,10 +173,8 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
#undef __
}
-
#define __ assembler()->
-
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
@@ -200,7 +188,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
-
// Clobbers RCX.
RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
@@ -244,7 +231,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
return type_test_cache.raw();
}
-
// Jumps to labels 'is_instance' or 'is_not_instance' respectively, if
// type test is conclusive, otherwise fallthrough if a type test could not
// be completed.
@@ -322,7 +308,6 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
is_instance_lbl, is_not_instance_lbl);
}
-
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
@@ -334,7 +319,6 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
__ jmp(is_not_equal_lbl);
}
-
// Testing against an instantiated type with no arguments, without
// SubtypeTestCache.
// RAX: instance to test against (preserved).
@@ -402,7 +386,6 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
return true;
}
-
// Uses SubtypeTestCache to store instance class and result.
// RAX: instance to test.
// Clobbers R10, R13.
@@ -434,7 +417,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
is_instance_lbl, is_not_instance_lbl);
}
-
// Generates inlined check if 'type' is a type parameter or type itself
// RAX: instance (preserved).
// Clobbers RDI, RDX, R10.
@@ -515,7 +497,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
return SubtypeTestCache::null();
}
-
// Inputs:
// - RAX: instance to test against (preserved).
// - RDX: optional instantiator type arguments (preserved).
@@ -558,7 +539,6 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
is_not_instance_lbl);
}
-
// If instanceof type test cannot be performed successfully at compile time and
// therefore eliminated, optimize it by adding inlined tests for:
// - NULL -> return type == Null (type is not Object or dynamic).
@@ -606,11 +586,11 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
// Generate runtime call.
__ movq(RDX, Address(RSP, 1 * kWordSize)); // Get instantiator type args.
__ movq(RCX, Address(RSP, 0 * kWordSize)); // Get function type args.
- __ PushObject(Object::null_object()); // Make room for the result.
- __ pushq(RAX); // Push the instance.
- __ PushObject(type); // Push the type.
- __ pushq(RDX); // Instantiator type arguments.
- __ pushq(RCX); // Function type arguments.
+ __ PushObject(Object::null_object()); // Make room for the result.
+ __ pushq(RAX); // Push the instance.
+ __ PushObject(type); // Push the type.
+ __ pushq(RDX); // Instantiator type arguments.
+ __ pushq(RCX); // Function type arguments.
__ LoadUniqueObject(RAX, test_cache);
__ pushq(RAX);
GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs);
@@ -631,7 +611,6 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ popq(RDX); // Remove pushed instantiator type arguments.
}
-
// Optimize assignable type check by adding inlined tests for:
// - NULL -> return NULL.
// - Smi -> compile time subtype check (only if dst class is not parameterized).
@@ -688,12 +667,12 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
__ Bind(&runtime_call);
__ movq(RDX, Address(RSP, 1 * kWordSize)); // Get instantiator type args.
__ movq(RCX, Address(RSP, 0 * kWordSize)); // Get function type args.
- __ PushObject(Object::null_object()); // Make room for the result.
- __ pushq(RAX); // Push the source object.
- __ PushObject(dst_type); // Push the type of the destination.
- __ pushq(RDX); // Instantiator type arguments.
- __ pushq(RCX); // Function type arguments.
- __ PushObject(dst_name); // Push the name of the destination.
+ __ PushObject(Object::null_object()); // Make room for the result.
+ __ pushq(RAX); // Push the source object.
+ __ PushObject(dst_type); // Push the type of the destination.
+ __ pushq(RDX); // Instantiator type arguments.
+ __ pushq(RCX); // Function type arguments.
+ __ PushObject(dst_name); // Push the name of the destination.
__ LoadUniqueObject(RAX, test_cache);
__ pushq(RAX);
GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs);
@@ -707,7 +686,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
__ popq(RDX); // Remove pushed instantiator type arguments.
}
-
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
@@ -726,7 +704,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
}
}
-
void FlowGraphCompiler::CopyParameters() {
__ Comment("Copy parameters");
const Function& function = parsed_function().function();
@@ -929,7 +906,6 @@ void FlowGraphCompiler::CopyParameters() {
__ j(POSITIVE, &null_args_loop, Assembler::kNearJump);
}
-
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// TOS: return address.
// +1 : receiver.
@@ -940,7 +916,6 @@ void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
__ ret();
}
-
void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
// TOS: return address.
// +1 : value
@@ -954,7 +929,6 @@ void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
__ ret();
}
-
// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
// needs to be updated to match.
void FlowGraphCompiler::EmitFrameEntry() {
@@ -991,7 +965,6 @@ void FlowGraphCompiler::EmitFrameEntry() {
}
}
-
void FlowGraphCompiler::CompileGraph() {
InitCompiler();
const Function& function = parsed_function().function();
@@ -1127,7 +1100,6 @@ void FlowGraphCompiler::CompileGraph() {
GenerateDeferredCode();
}
-
void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
const StubEntry& stub_entry,
RawPcDescriptors::Kind kind,
@@ -1136,7 +1108,6 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
}
-
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
const StubEntry& stub_entry,
RawPcDescriptors::Kind kind,
@@ -1145,7 +1116,6 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
}
-
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const StubEntry& stub_entry,
@@ -1165,7 +1135,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
}
}
-
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const StubEntry& stub_entry,
@@ -1192,7 +1161,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
AddStaticCallTarget(target);
}
-
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t deopt_id,
const RuntimeEntry& entry,
@@ -1214,7 +1182,6 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
}
}
-
void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
intptr_t deopt_id,
TokenPosition token_pos,
@@ -1228,7 +1195,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
__ Drop(argument_count, RCX);
}
-
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
// We do not check for overflow when incrementing the edge counter. The
// function should normally be optimized long before the counter can
@@ -1242,7 +1208,6 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
__ IncrementSmiField(FieldAddress(RAX, Array::element_offset(edge_id)), 1);
}
-
void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
const ICData& ic_data,
intptr_t argument_count,
@@ -1263,7 +1228,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
__ Drop(argument_count, RCX);
}
-
void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
const ICData& ic_data,
intptr_t argument_count,
@@ -1277,7 +1241,6 @@ void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
__ Drop(argument_count, RCX);
}
-
void FlowGraphCompiler::EmitMegamorphicInstanceCall(
const String& name,
const Array& arguments_descriptor,
@@ -1322,7 +1285,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Drop(argument_count, RCX);
}
-
void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
intptr_t argument_count,
intptr_t deopt_id,
@@ -1339,7 +1301,6 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
__ LoadUniqueObject(RBX, ic_data);
__ call(RCX);
-
EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
if (is_optimizing()) {
@@ -1352,7 +1313,6 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
__ Drop(argument_count, RCX);
}
-
void FlowGraphCompiler::EmitOptimizedStaticCall(
const Function& function,
const Array& arguments_descriptor,
@@ -1375,7 +1335,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
__ Drop(argument_count, RCX);
}
-
Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
Register reg,
const Object& obj,
@@ -1409,7 +1368,6 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
return EQUAL;
}
-
Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
Register right,
bool needs_number_check,
@@ -1433,7 +1391,6 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
return EQUAL;
}
-
// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
// FlowGraphCompiler::SlowPathEnvironmentFor.
void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
@@ -1447,13 +1404,11 @@ void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
locs->live_registers()->fpu_registers());
}
-
void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
__ PopRegisters(locs->live_registers()->cpu_registers(),
locs->live_registers()->fpu_registers());
}
-
#if defined(DEBUG)
void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// Clobber temporaries that have not been manually preserved.
@@ -1468,7 +1423,6 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
}
#endif
-
void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
intptr_t argument_count,
const Array& arguments_descriptor) {
@@ -1478,19 +1432,16 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
__ LoadObject(R10, arguments_descriptor);
}
-
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
__ testq(RAX, Immediate(kSmiTagMask));
// Jump if receiver is (not) Smi.
__ j(if_smi ? ZERO : NOT_ZERO, label);
}
-
void FlowGraphCompiler::EmitTestAndCallLoadCid() {
__ LoadClassId(RDI, RAX);
}
-
int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
const CidRange& range,
int bias) {
@@ -1507,11 +1458,9 @@ int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
return bias;
}
-
#undef __
#define __ compiler_->assembler()->
-
void ParallelMoveResolver::EmitMove(int index) {
MoveOperands* move = moves_[index];
const Location source = move->src();
@@ -1605,7 +1554,6 @@ void ParallelMoveResolver::EmitMove(int index) {
move->Eliminate();
}
-
void ParallelMoveResolver::EmitSwap(int index) {
MoveOperands* move = moves_[index];
const Location source = move->src();
@@ -1681,35 +1629,29 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
-
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
__ MoveMemoryToMemory(dst, src);
}
-
void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
__ StoreObject(dst, obj);
}
-
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
__ Exchange(reg, mem);
}
-
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
__ Exchange(mem1, mem2);
}
-
void ParallelMoveResolver::Exchange(Register reg,
Register base_reg,
intptr_t stack_offset) {
UNREACHABLE();
}
-
void ParallelMoveResolver::Exchange(Register base_reg1,
intptr_t stack_offset1,
Register base_reg2,
@@ -1717,29 +1659,24 @@ void ParallelMoveResolver::Exchange(Register base_reg1,
UNREACHABLE();
}
-
void ParallelMoveResolver::SpillScratch(Register reg) {
__ pushq(reg);
}
-
void ParallelMoveResolver::RestoreScratch(Register reg) {
__ popq(reg);
}
-
void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
__ AddImmediate(RSP, Immediate(-kFpuRegisterSize));
__ movups(Address(RSP, 0), reg);
}
-
void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
__ movups(reg, Address(RSP, 0));
__ AddImmediate(RSP, Immediate(kFpuRegisterSize));
}
-
#undef __
} // namespace dart
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.cc ('k') | runtime/vm/flow_graph_inliner.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698