Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(432)

Unified Diff: runtime/vm/stub_code_arm.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/stub_code_arm.cc
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index 14fe95c825941b3af74f1d20de70111b6b6f4d30..c94c9a134b41c0a7cb1245a5735d967383b8ed0f 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -103,14 +103,12 @@ void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
__ Ret();
}
-
// Print the stop message.
DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) {
OS::Print("Stop message: %s\n", message);
}
END_LEAF_RUNTIME_ENTRY
-
// Input parameters:
// R0 : stop message (const char*).
// Must preserve all registers.
@@ -122,7 +120,6 @@ void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
__ Ret();
}
-
// Input parameters:
// LR : return address.
// SP : address of return value.
@@ -205,21 +202,18 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ Ret();
}
-
void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR, Thread::no_scope_native_wrapper_entry_point_offset()));
}
-
void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset()));
}
-
// Input parameters:
// LR : return address.
// SP : address of return value.
@@ -298,7 +292,6 @@ void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) {
__ Ret();
}
-
// Input parameters:
// R4: arguments descriptor array.
void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
@@ -319,7 +312,6 @@ void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ bx(R0);
}
-
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
@@ -345,7 +337,6 @@ void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
__ bx(R0);
}
-
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
@@ -368,7 +359,6 @@ void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
__ bx(R0);
}
-
// Input parameters:
// R2: smi-tagged argument count, may be zero.
// FP[kParamEndSlotFromFp + 1]: last argument.
@@ -398,7 +388,6 @@ static void PushArgumentsArray(Assembler* assembler) {
__ b(&loop, PL);
}
-
// Used by eager and lazy deoptimization. Preserve result in R0 if necessary.
// This stub translates optimized frame into unoptimized frame. The optimized
// frame can contain values in registers and on stack, the unoptimized
@@ -555,7 +544,6 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// The caller is responsible for emitting the return instruction.
}
-
// R0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
@@ -568,7 +556,6 @@ void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
__ Ret();
}
-
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
@@ -582,13 +569,11 @@ void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
__ Ret();
}
-
void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ Ret();
}
-
static void GenerateDispatcherCode(Assembler* assembler,
Label* call_target_function) {
__ Comment("NoSuchMethodDispatch");
@@ -622,7 +607,6 @@ static void GenerateDispatcherCode(Assembler* assembler,
__ Ret();
}
-
void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
__ EnterStubFrame();
@@ -662,7 +646,6 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
__ bx(R2);
}
-
// Called for inline allocation of arrays.
// Input parameters:
// LR: return address.
@@ -785,7 +768,6 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
__ Ret();
}
-
// Called when invoking Dart code from C++ (VM code).
// Input parameters:
// LR : points to return address.
@@ -906,7 +888,6 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Ret();
}
-
// Called for inline allocation of contexts.
// Input:
// R1: number of context variables.
@@ -1028,7 +1009,6 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
__ Ret();
}
-
// Helper stub to implement Assembler::StoreIntoObject.
// Input parameters:
// R0: address (i.e. object) being stored into.
@@ -1101,7 +1081,6 @@ void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) {
__ Ret();
}
-
// Called for inline allocation of objects.
// Input parameters:
// LR : return address.
@@ -1238,7 +1217,6 @@ void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
__ Ret();
}
-
// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
// from the entry code of a dart function after an error in passed argument
// name or number is detected.
@@ -1274,7 +1252,6 @@ void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
__ bkpt(0);
}
-
// R8: function object.
// R9: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
@@ -1297,7 +1274,6 @@ void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
__ str(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
}
-
// Loads function into 'temp_reg'.
void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
@@ -1313,7 +1289,6 @@ void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
}
}
-
// Note: R9 must be preserved.
// Attempt a quick Smi operation for known operations ('kind'). The ICData
// must have been primed with a Smi/Smi check that will be used for counting
@@ -1377,7 +1352,6 @@ static void EmitFastSmiOp(Assembler* assembler,
__ Ret();
}
-
// Generate inline cache check for 'num_args'.
// LR: return address.
// R9: inline cache data object.
@@ -1552,7 +1526,6 @@ void StubCode::GenerateNArgsCheckInlineCacheStub(
}
}
-
// Use inline cache data array to invoke the target or continue in inline
// cache miss handler. Stub for 1-argument check (receiver class).
// LR: return address.
@@ -1569,7 +1542,6 @@ void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-
void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(assembler, 2,
@@ -1577,28 +1549,24 @@ void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
Token::kILLEGAL);
}
-
void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
}
-
void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
}
-
void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
}
-
void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
@@ -1607,7 +1575,6 @@ void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
Token::kILLEGAL, true /* optimized */);
}
-
void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateOptimizedUsageCounterIncrement(assembler);
@@ -1616,7 +1583,6 @@ void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Token::kILLEGAL, true /* optimized */);
}
-
// Intermediary stub between a static call and its target. ICData contains
// the target function and the call count.
// R9: ICData
@@ -1683,21 +1649,18 @@ void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
}
}
-
void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
}
-
void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
}
-
// Stub for compiling a function and jumping to the compiled code.
// R9: IC-Data (for methods).
// R4: Arguments descriptor.
@@ -1717,7 +1680,6 @@ void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
__ bx(R2);
}
-
// R9: Contains an ICData.
void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
@@ -1732,7 +1694,6 @@ void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
__ bx(R0);
}
-
void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ EnterStubFrame();
__ LoadImmediate(R0, 0);
@@ -1745,7 +1706,6 @@ void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
__ bx(R0);
}
-
// Called only from unoptimized code. All relevant registers have been saved.
void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
// Check single stepping.
@@ -1764,7 +1724,6 @@ void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
__ b(&done_stepping);
}
-
// Used to check class and type arguments. Arguments passed in registers:
// LR: return address.
// R0: instance (must be preserved).
@@ -1850,7 +1809,6 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
__ Ret();
}
-
// Used to check class and type arguments. Arguments passed in registers:
// LR: return address.
// R0: instance (must be preserved).
@@ -1862,7 +1820,6 @@ void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
-
// Used to check class and type arguments. Arguments passed in registers:
// LR: return address.
// R0: instance (must be preserved).
@@ -1874,7 +1831,6 @@ void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 2);
}
-
// Used to check class and type arguments. Arguments passed in registers:
// LR: return address.
// R0: instance (must be preserved).
@@ -1886,14 +1842,12 @@ void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 4);
}
-
// Return the current stack pointer address, used to do stack alignment checks.
void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mov(R0, Operand(SP));
__ Ret();
}
-
// Jump to a frame on the call stack.
// LR: return address.
// R0: program_counter.
@@ -1904,11 +1858,11 @@ void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == R0);
ASSERT(kStackTraceObjectReg == R1);
- __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
- __ mov(LR, Operand(R0)); // Program counter.
- __ mov(THR, Operand(R3)); // Thread.
- __ mov(FP, Operand(R2)); // Frame_pointer.
- __ mov(SP, Operand(IP)); // Set Stack pointer.
+ __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
+ __ mov(LR, Operand(R0)); // Program counter.
+ __ mov(THR, Operand(R3)); // Thread.
+ __ mov(FP, Operand(R2)); // Frame_pointer.
+ __ mov(SP, Operand(IP)); // Set Stack pointer.
// Set the tag.
__ LoadImmediate(R2, VMTag::kDartTagId);
__ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
@@ -1921,7 +1875,6 @@ void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
__ bx(LR); // Jump to continuation point.
}
-
// Run an exception handler. Execution comes from JumpToFrame
// stub or from the simulator.
//
@@ -1942,7 +1895,6 @@ void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) {
__ bx(LR); // Jump to the exception handler code.
}
-
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
@@ -1962,7 +1914,6 @@ void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) {
__ bkpt(0);
}
-
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
// R4: argument descriptor (preserved).
@@ -1983,7 +1934,6 @@ void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ bkpt(0);
}
-
// Does identical check (object references are equal or not equal) with special
// checks for boxed numbers.
// LR: return address.
@@ -2050,7 +2000,6 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
__ Bind(&done);
}
-
// Called only from unoptimized code. All relevant registers have been saved.
// LR: return address.
// SP + 4: left operand.
@@ -2086,7 +2035,6 @@ void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
}
}
-
// Called from optimized code only.
// LR: return address.
// SP + 4: left operand.
@@ -2103,7 +2051,6 @@ void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
__ Ret();
}
-
// Called from megamorphic calls.
// R0: receiver
// R9: MegamorphicCache (preserved)
@@ -2158,7 +2105,6 @@ void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) {
__ b(&loop);
}
-
// Called from switchable IC calls.
// R0: receiver
// R9: ICData (preserved)
@@ -2199,7 +2145,6 @@ void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) {
__ bx(R1);
}
-
void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
@@ -2234,7 +2179,6 @@ void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) {
__ bx(R1);
}
-
// Called from switchable IC calls.
// R0: receiver
// R9: UnlinkedCall
@@ -2258,7 +2202,6 @@ void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) {
__ bx(R1);
}
-
// Called from switchable IC calls.
// R0: receiver
// R9: SingleTargetCache
@@ -2298,7 +2241,6 @@ void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) {
__ bx(R1);
}
-
// Called from the monomorphic checked entry.
// R0: receiver
void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
@@ -2321,12 +2263,10 @@ void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) {
__ bx(R1);
}
-
void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
__ bkpt(0);
}
-
void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ bkpt(0);
}
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698