Index: runtime/vm/flow_graph_compiler_arm.cc |
diff --git a/runtime/vm/flow_graph_compiler_arm.cc b/runtime/vm/flow_graph_compiler_arm.cc |
index bc1ebced22ec273007d2e5c3b63e13fdbf75941e..738a2bb797fa719d51cf79d3c72620c15e0c0cc0 100644 |
--- a/runtime/vm/flow_graph_compiler_arm.cc |
+++ b/runtime/vm/flow_graph_compiler_arm.cc |
@@ -118,8 +118,7 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
// For the innermost environment, set outgoing arguments and the locals. |
for (intptr_t i = current->Length() - 1; |
- i >= current->fixed_parameter_count(); |
- i--) { |
+ i >= current->fixed_parameter_count(); i--) { |
builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); |
} |
@@ -132,26 +131,21 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
// For any outer environment the deopt id is that of the call instruction |
// which is recorded in the outer environment. |
- builder->AddReturnAddress( |
- current->function(), |
- Thread::ToDeoptAfter(current->deopt_id()), |
- slot_ix++); |
+ builder->AddReturnAddress(current->function(), |
+ Thread::ToDeoptAfter(current->deopt_id()), |
+ slot_ix++); |
// The values of outgoing arguments can be changed from the inlined call so |
// we must read them from the previous environment. |
for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
- builder->AddCopy(previous->ValueAt(i), |
- previous->LocationAt(i), |
+ builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), |
slot_ix++); |
} |
// Set the locals, note that outgoing arguments are not in the environment. |
for (intptr_t i = current->Length() - 1; |
- i >= current->fixed_parameter_count(); |
- i--) { |
- builder->AddCopy(current->ValueAt(i), |
- current->LocationAt(i), |
- slot_ix++); |
+ i >= current->fixed_parameter_count(); i--) { |
+ builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); |
} |
// Iterate on the outer environment. |
@@ -288,7 +282,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
const TypeArguments& type_arguments = |
TypeArguments::ZoneHandle(zone(), type.arguments()); |
const bool is_raw_type = type_arguments.IsNull() || |
- type_arguments.IsRaw(from_index, num_type_params); |
+ type_arguments.IsRaw(from_index, num_type_params); |
if (is_raw_type) { |
const Register kClassIdReg = R2; |
// dynamic type argument, check only classes. |
@@ -304,8 +298,8 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
} |
// If one type argument only, check if type argument is Object or dynamic. |
if (type_arguments.Length() == 1) { |
- const AbstractType& tp_argument = AbstractType::ZoneHandle( |
- zone(), type_arguments.TypeAt(0)); |
+ const AbstractType& tp_argument = |
+ AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0)); |
ASSERT(!tp_argument.IsMalformed()); |
if (tp_argument.IsType()) { |
ASSERT(tp_argument.HasResolvedTypeClass()); |
@@ -323,12 +317,9 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
const Register kTypeArgumentsReg = kNoRegister; |
const Register kTempReg = kNoRegister; |
// R0: instance (must be preserved). |
- return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, |
- kInstanceReg, |
- kTypeArgumentsReg, |
- kTempReg, |
- is_instance_lbl, |
- is_not_instance_lbl); |
+ return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, |
+ kTypeArgumentsReg, kTempReg, |
+ is_instance_lbl, is_not_instance_lbl); |
} |
@@ -367,11 +358,8 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( |
__ tst(kInstanceReg, Operand(kSmiTagMask)); |
// If instance is Smi, check directly. |
const Class& smi_class = Class::Handle(zone(), Smi::Class()); |
- if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), |
- type_class, |
- TypeArguments::Handle(zone()), |
- NULL, |
- NULL, |
+ if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), type_class, |
+ TypeArguments::Handle(zone()), NULL, NULL, |
Heap::kOld)) { |
__ b(is_instance_lbl, EQ); |
} else { |
@@ -391,8 +379,8 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( |
// Custom checking for numbers (Smi, Mint, Bigint and Double). |
// Note that instance is not Smi (checked above). |
if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { |
- GenerateNumberTypeCheck( |
- kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); |
+ GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl, |
+ is_not_instance_lbl); |
return false; |
} |
if (type.IsStringType()) { |
@@ -439,12 +427,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( |
const Register kTypeArgumentsReg = kNoRegister; |
const Register kTempReg = kNoRegister; |
- return GenerateCallSubtypeTestStub(kTestTypeOneArg, |
- kInstanceReg, |
- kTypeArgumentsReg, |
- kTempReg, |
- is_instance_lbl, |
- is_not_instance_lbl); |
+ return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, |
+ kTypeArgumentsReg, kTempReg, |
+ is_instance_lbl, is_not_instance_lbl); |
} |
@@ -467,7 +452,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
__ CompareObject(R1, Object::null_object()); |
__ b(is_instance_lbl, EQ); |
__ ldr(R2, |
- FieldAddress(R1, TypeArguments::type_at_offset(type_param.index()))); |
+ FieldAddress(R1, TypeArguments::type_at_offset(type_param.index()))); |
// R2: concrete type of type. |
// Check if type argument is dynamic. |
__ CompareObject(R2, Object::dynamic_type()); |
@@ -493,14 +478,10 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
const Register kInstanceReg = R0; |
const Register kTypeArgumentsReg = R1; |
const Register kTempReg = kNoRegister; |
- const SubtypeTestCache& type_test_cache = |
- SubtypeTestCache::ZoneHandle(zone(), |
- GenerateCallSubtypeTestStub(kTestTypeThreeArgs, |
- kInstanceReg, |
- kTypeArgumentsReg, |
- kTempReg, |
- is_instance_lbl, |
- is_not_instance_lbl)); |
+ const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle( |
+ zone(), GenerateCallSubtypeTestStub( |
+ kTestTypeThreeArgs, kInstanceReg, kTypeArgumentsReg, |
+ kTempReg, is_instance_lbl, is_not_instance_lbl)); |
__ Bind(&fall_through); |
return type_test_cache.raw(); |
} |
@@ -513,12 +494,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
// Uninstantiated type class is known at compile time, but the type |
// arguments are determined at runtime by the instantiator. |
const Register kTempReg = kNoRegister; |
- return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, |
- kInstanceReg, |
- kTypeArgumentsReg, |
- kTempReg, |
- is_instance_lbl, |
- is_not_instance_lbl); |
+ return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, kInstanceReg, |
+ kTypeArgumentsReg, kTempReg, |
+ is_instance_lbl, is_not_instance_lbl); |
} |
return SubtypeTestCache::null(); |
} |
@@ -550,17 +528,12 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( |
// function type) of a non-parameterized class or with a raw dst type of |
// a parameterized class. |
if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) { |
- return GenerateInstantiatedTypeWithArgumentsTest(token_pos, |
- type, |
- is_instance_lbl, |
- is_not_instance_lbl); |
+ return GenerateInstantiatedTypeWithArgumentsTest( |
+ token_pos, type, is_instance_lbl, is_not_instance_lbl); |
// Fall through to runtime call. |
} |
- const bool has_fall_through = |
- GenerateInstantiatedTypeNoArgumentsTest(token_pos, |
- type, |
- is_instance_lbl, |
- is_not_instance_lbl); |
+ const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest( |
+ token_pos, type, is_instance_lbl, is_not_instance_lbl); |
if (has_fall_through) { |
// If test non-conclusive so far, try the inlined type-test cache. |
// 'type' is known at compile time. |
@@ -570,9 +543,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( |
return SubtypeTestCache::null(); |
} |
} |
- return GenerateUninstantiatedTypeTest(token_pos, |
- type, |
- is_instance_lbl, |
+ return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, |
is_not_instance_lbl); |
} |
@@ -614,8 +585,8 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, |
// Generate inline instanceof test. |
SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); |
- test_cache = GenerateInlineInstanceof(token_pos, type, |
- &is_instance, &is_not_instance); |
+ test_cache = |
+ GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); |
// test_cache is null if there is no fall-through. |
Label done; |
@@ -624,8 +595,8 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, |
// Load instantiator type arguments (R1). |
__ ldr(R1, Address(SP, 0 * kWordSize)); |
__ PushObject(Object::null_object()); // Make room for the result. |
- __ Push(R0); // Push the instance. |
- __ PushObject(type); // Push the type. |
+ __ Push(R0); // Push the instance. |
+ __ PushObject(type); // Push the type. |
__ Push(R1); // Push instantiator type arguments (R1). |
__ LoadUniqueObject(R0, test_cache); |
__ Push(R0); |
@@ -688,13 +659,10 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, |
// Generate throw new TypeError() if the type is malformed or malbounded. |
if (dst_type.IsMalformedOrMalbounded()) { |
__ PushObject(Object::null_object()); // Make room for the result. |
- __ Push(R0); // Push the source object. |
- __ PushObject(dst_name); // Push the name of the destination. |
- __ PushObject(dst_type); // Push the type of the destination. |
- GenerateRuntimeCall(token_pos, |
- deopt_id, |
- kBadTypeErrorRuntimeEntry, |
- 3, |
+ __ Push(R0); // Push the source object. |
+ __ PushObject(dst_name); // Push the name of the destination. |
+ __ PushObject(dst_type); // Push the type of the destination. |
+ GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3, |
locs); |
// We should never return here. |
__ bkpt(0); |
@@ -707,16 +675,16 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, |
// Generate inline type check, linking to runtime call if not assignable. |
SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); |
- test_cache = GenerateInlineInstanceof(token_pos, dst_type, |
- &is_assignable, &runtime_call); |
+ test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, |
+ &runtime_call); |
__ Bind(&runtime_call); |
// Load instantiator type arguments (R1). |
__ ldr(R1, Address(SP, 0 * kWordSize)); |
__ PushObject(Object::null_object()); // Make room for the result. |
- __ Push(R0); // Push the source object. |
- __ PushObject(dst_type); // Push the type of the destination. |
- __ Push(R1); // Push instantiator type arguments (R1). |
+ __ Push(R0); // Push the source object. |
+ __ PushObject(dst_type); // Push the type of the destination. |
+ __ Push(R1); // Push instantiator type arguments (R1). |
__ PushObject(dst_name); // Push the name of the destination. |
__ LoadUniqueObject(R0, test_cache); |
__ Push(R0); |
@@ -805,9 +773,9 @@ void FlowGraphCompiler::CopyParameters() { |
// Copy or initialize optional named arguments. |
Label all_arguments_processed; |
#ifdef DEBUG |
- const bool check_correct_named_args = true; |
+ const bool check_correct_named_args = true; |
#else |
- const bool check_correct_named_args = function.IsClosureFunction(); |
+ const bool check_correct_named_args = function.IsClosureFunction(); |
#endif |
if (num_opt_named_params > 0) { |
// Start by alphabetically sorting the names of the optional parameters. |
@@ -838,8 +806,8 @@ void FlowGraphCompiler::CopyParameters() { |
__ add(NOTFP, FP, Operand(NOTFP, LSL, 1)); |
__ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize); |
// Let R8 point to the entry of the first named argument. |
- __ add(R8, R4, Operand( |
- ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); |
+ __ add(R8, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() - |
+ kHeapObjectTag)); |
for (int i = 0; i < num_opt_named_params; i++) { |
Label load_default_value, assign_optional_parameter; |
const int param_pos = opt_param_position[i]; |
@@ -978,8 +946,7 @@ static const Register new_pp = NOTFP; |
void FlowGraphCompiler::EmitFrameEntry() { |
const Function& function = parsed_function().function(); |
- if (CanOptimizeFunction() && |
- function.IsOptimizable() && |
+ if (CanOptimizeFunction() && function.IsOptimizable() && |
(!is_optimizing() || may_reoptimize())) { |
__ Comment("Invocation Count Check"); |
const Register function_reg = R8; |
@@ -989,14 +956,12 @@ void FlowGraphCompiler::EmitFrameEntry() { |
// Load function object from object pool. |
__ LoadFunctionFromCalleePool(function_reg, function, new_pp); |
- __ ldr(R3, FieldAddress(function_reg, |
- Function::usage_counter_offset())); |
+ __ ldr(R3, FieldAddress(function_reg, Function::usage_counter_offset())); |
// Reoptimization of an optimized function is triggered by counting in |
// IC stubs, but not at the entry of the function. |
if (!is_optimizing()) { |
__ add(R3, R3, Operand(1)); |
- __ str(R3, FieldAddress(function_reg, |
- Function::usage_counter_offset())); |
+ __ str(R3, FieldAddress(function_reg, Function::usage_counter_offset())); |
} |
__ CompareImmediate(R3, GetOptimizationThreshold()); |
ASSERT(function_reg == R8); |
@@ -1004,9 +969,8 @@ void FlowGraphCompiler::EmitFrameEntry() { |
} |
__ Comment("Enter frame"); |
if (flow_graph().IsCompiledForOsr()) { |
- intptr_t extra_slots = StackSize() |
- - flow_graph().num_stack_locals() |
- - flow_graph().num_copied_params(); |
+ intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - |
+ flow_graph().num_copied_params(); |
ASSERT(extra_slots >= 0); |
__ EnterOsrFrame(extra_slots * kWordSize); |
} else { |
@@ -1059,8 +1023,8 @@ void FlowGraphCompiler::CompileGraph() { |
__ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset())); |
__ CompareImmediate(R0, Smi::RawValue(num_fixed_params)); |
__ b(&wrong_num_arguments, NE); |
- __ ldr(R1, FieldAddress(R4, |
- ArgumentsDescriptor::positional_count_offset())); |
+ __ ldr(R1, |
+ FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); |
__ cmp(R0, Operand(R1)); |
__ b(&correct_num_arguments, EQ); |
__ Bind(&wrong_num_arguments); |
@@ -1146,8 +1110,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, |
} else { |
// Add deoptimization continuation point after the call and before the |
// arguments are removed. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- deopt_id_after, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
} |
@@ -1174,8 +1137,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, |
} else { |
// Add deoptimization continuation point after the call and before the |
// arguments are removed. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- deopt_id_after, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
AddStaticCallTarget(target); |
} |
@@ -1198,9 +1160,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, |
} else { |
// Add deoptimization continuation point after the call and before the |
// arguments are removed. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- deopt_id_after, |
- token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
} |
} |
@@ -1229,13 +1189,12 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { |
} |
-void FlowGraphCompiler::EmitOptimizedInstanceCall( |
- const StubEntry& stub_entry, |
- const ICData& ic_data, |
- intptr_t argument_count, |
- intptr_t deopt_id, |
- TokenPosition token_pos, |
- LocationSummary* locs) { |
+void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, |
+ const ICData& ic_data, |
+ intptr_t argument_count, |
+ intptr_t deopt_id, |
+ TokenPosition token_pos, |
+ LocationSummary* locs) { |
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); |
// Each ICData propagated from unoptimized to optimized code contains the |
// function that corresponds to the Dart function of that IC call. Due |
@@ -1246,10 +1205,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall( |
__ LoadObject(R8, parsed_function().function()); |
__ LoadUniqueObject(R9, ic_data); |
- GenerateDartCall(deopt_id, |
- token_pos, |
- stub_entry, |
- RawPcDescriptors::kIcCall, |
+ GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, |
locs); |
__ Drop(argument_count); |
} |
@@ -1263,10 +1219,7 @@ void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, |
LocationSummary* locs) { |
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); |
__ LoadUniqueObject(R9, ic_data); |
- GenerateDartCall(deopt_id, |
- token_pos, |
- stub_entry, |
- RawPcDescriptors::kIcCall, |
+ GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, |
locs); |
__ Drop(argument_count); |
} |
@@ -1284,7 +1237,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
const Array& arguments_descriptor = |
Array::ZoneHandle(zone(), ic_data.arguments_descriptor()); |
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); |
- const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(zone(), |
+ const MegamorphicCache& cache = MegamorphicCache::ZoneHandle( |
+ zone(), |
MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor)); |
__ Comment("MegamorphicCall"); |
@@ -1306,7 +1260,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
ASSERT(Smi::New(0) == 0); |
__ cmp(R0, Operand(0)); |
- __ b(&done, NE); // Return if already computed. |
+ __ b(&done, NE); // Return if already computed. |
__ mov(R0, Operand(R9)); // Restore receiver in R0. |
__ Bind(&megamorphic_call); |
@@ -1325,36 +1279,32 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
if (try_index == CatchClauseNode::kInvalidTryIndex) { |
try_index = CurrentTryIndex(); |
} |
- pc_descriptors_list()->AddDescriptor(RawPcDescriptors::kOther, |
- assembler()->CodeSize(), |
- Thread::kNoDeoptId, |
- token_pos, |
- try_index); |
+ pc_descriptors_list()->AddDescriptor( |
+ RawPcDescriptors::kOther, assembler()->CodeSize(), Thread::kNoDeoptId, |
+ token_pos, try_index); |
} else if (is_optimizing()) { |
- AddCurrentDescriptor(RawPcDescriptors::kOther, |
- Thread::kNoDeoptId, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, |
+ token_pos); |
AddDeoptIndexAtCall(deopt_id_after); |
} else { |
- AddCurrentDescriptor(RawPcDescriptors::kOther, |
- Thread::kNoDeoptId, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, |
+ token_pos); |
// Add deoptimization continuation point after the call and before the |
// arguments are removed. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- deopt_id_after, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
__ Drop(argument_count); |
} |
-void FlowGraphCompiler::EmitSwitchableInstanceCall( |
- const ICData& ic_data, |
- intptr_t argument_count, |
- intptr_t deopt_id, |
- TokenPosition token_pos, |
- LocationSummary* locs) { |
+void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, |
+ intptr_t argument_count, |
+ intptr_t deopt_id, |
+ TokenPosition token_pos, |
+ LocationSummary* locs) { |
ASSERT(ic_data.NumArgsTested() == 1); |
- const Code& initial_stub = Code::ZoneHandle( |
- StubCode::ICCallThroughFunction_entry()->code()); |
+ const Code& initial_stub = |
+ Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); |
__ Comment("SwitchableCall"); |
@@ -1372,27 +1322,22 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall( |
} else { |
// Add deoptimization continuation point after the call and before the |
// arguments are removed. |
- AddCurrentDescriptor(RawPcDescriptors::kDeopt, |
- deopt_id_after, token_pos); |
+ AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
} |
__ Drop(argument_count); |
} |
-void FlowGraphCompiler::EmitUnoptimizedStaticCall( |
- intptr_t argument_count, |
- intptr_t deopt_id, |
- TokenPosition token_pos, |
- LocationSummary* locs, |
- const ICData& ic_data) { |
+void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, |
+ intptr_t deopt_id, |
+ TokenPosition token_pos, |
+ LocationSummary* locs, |
+ const ICData& ic_data) { |
const StubEntry* stub_entry = |
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); |
__ LoadObject(R9, ic_data); |
- GenerateDartCall(deopt_id, |
- token_pos, |
- *stub_entry, |
- RawPcDescriptors::kUnoptStaticCall, |
- locs); |
+ GenerateDartCall(deopt_id, token_pos, *stub_entry, |
+ RawPcDescriptors::kUnoptStaticCall, locs); |
__ Drop(argument_count); |
} |
@@ -1412,12 +1357,9 @@ void FlowGraphCompiler::EmitOptimizedStaticCall( |
} |
// Do not use the code from the function, but let the code be patched so that |
// we can record the outgoing edges to other code. |
- GenerateStaticDartCall(deopt_id, |
- token_pos, |
+ GenerateStaticDartCall(deopt_id, token_pos, |
*StubCode::CallStaticFunction_entry(), |
- RawPcDescriptors::kOther, |
- locs, |
- function); |
+ RawPcDescriptors::kOther, locs, function); |
__ Drop(argument_count); |
} |
@@ -1439,12 +1381,11 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare( |
*StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); |
} |
if (token_pos.IsReal()) { |
- AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, |
- Thread::kNoDeoptId, |
+ AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId, |
token_pos); |
} |
// Stub returns result in flags (result of a cmp, we need Z computed). |
- __ Drop(1); // Discard constant. |
+ __ Drop(1); // Discard constant. |
__ Pop(reg); // Restore 'reg'. |
} else { |
__ CompareObject(reg, obj); |
@@ -1469,8 +1410,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare( |
*StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); |
} |
if (token_pos.IsReal()) { |
- AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, |
- Thread::kNoDeoptId, |
+ AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId, |
token_pos); |
} |
// Stub returns result in flags (result of a cmp, we need Z computed). |
@@ -1587,9 +1527,8 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, |
bool complete) { |
ASSERT(is_optimizing()); |
__ Comment("EmitTestAndCall"); |
- const Array& arguments_descriptor = |
- Array::ZoneHandle(zone(), ArgumentsDescriptor::New(argument_count, |
- argument_names)); |
+ const Array& arguments_descriptor = Array::ZoneHandle( |
+ zone(), ArgumentsDescriptor::New(argument_count, argument_names)); |
// Load receiver into R0. |
__ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); |
@@ -1611,14 +1550,11 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, |
} |
// Do not use the code from the function, but let the code be patched so |
// that we can record the outgoing edges to other code. |
- const Function& function = Function::ZoneHandle( |
- zone(), ic_data.GetTargetAt(0)); |
- GenerateStaticDartCall(deopt_id, |
- token_index, |
+ const Function& function = |
+ Function::ZoneHandle(zone(), ic_data.GetTargetAt(0)); |
+ GenerateStaticDartCall(deopt_id, token_index, |
*StubCode::CallStaticFunction_entry(), |
- RawPcDescriptors::kOther, |
- locs, |
- function); |
+ RawPcDescriptors::kOther, locs, function); |
__ Drop(argument_count); |
if (kNumChecks > 1) { |
__ b(match_found); |
@@ -1664,12 +1600,9 @@ void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, |
// Do not use the code from the function, but let the code be patched so |
// that we can record the outgoing edges to other code. |
const Function& function = *sorted[i].target; |
- GenerateStaticDartCall(deopt_id, |
- token_index, |
+ GenerateStaticDartCall(deopt_id, token_index, |
*StubCode::CallStaticFunction_entry(), |
- RawPcDescriptors::kOther, |
- locs, |
- function); |
+ RawPcDescriptors::kOther, locs, function); |
__ Drop(argument_count); |
if (!kIsLastCheck) { |
__ b(match_found); |
@@ -1694,14 +1627,14 @@ void ParallelMoveResolver::EmitMove(int index) { |
} else { |
ASSERT(destination.IsStackSlot()); |
const intptr_t dest_offset = destination.ToStackSlotOffset(); |
- __ StoreToOffset( |
- kWord, source.reg(), destination.base_reg(), dest_offset); |
+ __ StoreToOffset(kWord, source.reg(), destination.base_reg(), |
+ dest_offset); |
} |
} else if (source.IsStackSlot()) { |
if (destination.IsRegister()) { |
const intptr_t source_offset = source.ToStackSlotOffset(); |
- __ LoadFromOffset( |
- kWord, destination.reg(), source.base_reg(), source_offset); |
+ __ LoadFromOffset(kWord, destination.reg(), source.base_reg(), |
+ source_offset); |
} else { |
ASSERT(destination.IsStackSlot()); |
const intptr_t source_offset = source.ToStackSlotOffset(); |
@@ -1728,8 +1661,8 @@ void ParallelMoveResolver::EmitMove(int index) { |
ASSERT(destination.IsQuadStackSlot()); |
const intptr_t dest_offset = destination.ToStackSlotOffset(); |
const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg()); |
- __ StoreMultipleDToOffset( |
- dsrc0, 2, destination.base_reg(), dest_offset); |
+ __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), |
+ dest_offset); |
} |
} |
} else if (source.IsDoubleStackSlot()) { |
@@ -1816,11 +1749,10 @@ void ParallelMoveResolver::EmitSwap(int index) { |
__ mov(source.reg(), Operand(destination.reg())); |
__ mov(destination.reg(), Operand(IP)); |
} else if (source.IsRegister() && destination.IsStackSlot()) { |
- Exchange(source.reg(), |
- destination.base_reg(), destination.ToStackSlotOffset()); |
+ Exchange(source.reg(), destination.base_reg(), |
+ destination.ToStackSlotOffset()); |
} else if (source.IsStackSlot() && destination.IsRegister()) { |
- Exchange(destination.reg(), |
- source.base_reg(), source.ToStackSlotOffset()); |
+ Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset()); |
} else if (source.IsStackSlot() && destination.IsStackSlot()) { |
Exchange(source.base_reg(), source.ToStackSlotOffset(), |
destination.base_reg(), destination.ToStackSlotOffset()); |
@@ -1839,21 +1771,18 @@ void ParallelMoveResolver::EmitSwap(int index) { |
__ vmovd(dst, DTMP); |
} |
} else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
- ASSERT(destination.IsDoubleStackSlot() || |
- destination.IsQuadStackSlot() || |
- source.IsDoubleStackSlot() || |
- source.IsQuadStackSlot()); |
- bool double_width = destination.IsDoubleStackSlot() || |
- source.IsDoubleStackSlot(); |
- QRegister qreg = source.IsFpuRegister() ? source.fpu_reg() |
- : destination.fpu_reg(); |
+ ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() || |
+ source.IsDoubleStackSlot() || source.IsQuadStackSlot()); |
+ bool double_width = |
+ destination.IsDoubleStackSlot() || source.IsDoubleStackSlot(); |
+ QRegister qreg = |
+ source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg(); |
DRegister reg = EvenDRegisterOf(qreg); |
- Register base_reg = source.IsFpuRegister() |
- ? destination.base_reg() |
- : source.base_reg(); |
+ Register base_reg = |
+ source.IsFpuRegister() ? destination.base_reg() : source.base_reg(); |
const intptr_t slot_offset = source.IsFpuRegister() |
- ? destination.ToStackSlotOffset() |
- : source.ToStackSlotOffset(); |
+ ? destination.ToStackSlotOffset() |
+ : source.ToStackSlotOffset(); |
if (double_width) { |
__ LoadDFromOffset(DTMP, base_reg, slot_offset); |
@@ -1883,8 +1812,8 @@ void ParallelMoveResolver::EmitSwap(int index) { |
__ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset); |
__ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset); |
__ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset); |
- __ StoreMultipleDToOffset( |
- scratch, 2, destination.base_reg(), source_offset); |
+ __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(), |
+ source_offset); |
} else { |
UNREACHABLE(); |
} |