Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(930)

Unified Diff: runtime/vm/assembler_arm64.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/assembler_arm64.cc
diff --git a/runtime/vm/assembler_arm64.cc b/runtime/vm/assembler_arm64.cc
index e7cab4041cd359c342ee6f7c067250f7f2b0f75e..8f3dc143ccd937f6e374e5ced888693cd646b9c7 100644
--- a/runtime/vm/assembler_arm64.cc
+++ b/runtime/vm/assembler_arm64.cc
@@ -27,8 +27,7 @@ Assembler::Assembler(bool use_far_branches)
has_single_entry_point_(true),
use_far_branches_(use_far_branches),
comments_(),
- constant_pool_allowed_(false) {
-}
+ constant_pool_allowed_(false) {}
void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
@@ -49,10 +48,9 @@ void Assembler::Emit(int32_t value) {
static const char* cpu_reg_names[kNumberOfCpuRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31",
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+ "r22", "r23", "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31",
};
@@ -63,10 +61,9 @@ const char* Assembler::RegisterName(Register reg) {
static const char* fpu_reg_names[kNumberOfFpuRegisters] = {
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
- "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
- "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
- "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
};
@@ -136,14 +133,14 @@ void Assembler::Bind(Label* label) {
int32_t encoded_guard_branch =
EncodeImm19BranchOffset(dest, guard_branch);
const Condition c = DecodeImm19BranchCondition(encoded_guard_branch);
- encoded_guard_branch = EncodeImm19BranchCondition(
- InvertCondition(c), encoded_guard_branch);
+ encoded_guard_branch =
+ EncodeImm19BranchCondition(InvertCondition(c), encoded_guard_branch);
// Write back the re-encoded instructions. The far branch becomes a nop.
- buffer_.Store<int32_t>(
- position + 0 * Instr::kInstrSize, encoded_guard_branch);
- buffer_.Store<int32_t>(
- position + 1 * Instr::kInstrSize, Instr::kNopInstruction);
+ buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
+ encoded_guard_branch);
+ buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
+ Instr::kNopInstruction);
label->position_ = next;
} else {
const int32_t next = buffer_.Load<int32_t>(position);
@@ -184,7 +181,7 @@ static int CountLeadingZeros(uint64_t value, int width) {
static int CountOneBits(uint64_t value, int width) {
// Mask out unused bits to ensure that they are not counted.
- value &= (0xffffffffffffffffUL >> (64-width));
+ value &= (0xffffffffffffffffUL >> (64 - width));
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
@@ -382,8 +379,7 @@ bool Assembler::CanLoadFromObjectPool(const Object& object) const {
}
-void Assembler::LoadNativeEntry(Register dst,
- const ExternalLabel* label) {
+void Assembler::LoadNativeEntry(Register dst, const ExternalLabel* label) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper_.FindNativeEntry(label, kNotPatchable));
LoadWordFromPoolOffset(dst, offset);
@@ -741,8 +737,10 @@ void Assembler::CompareImmediate(Register rn, int64_t imm) {
}
-void Assembler::LoadFromOffset(
- Register dest, Register base, int32_t offset, OperandSize sz) {
+void Assembler::LoadFromOffset(Register dest,
+ Register base,
+ int32_t offset,
+ OperandSize sz) {
if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
ldr(dest, Address(base, offset, Address::Offset, sz), sz);
} else {
@@ -775,8 +773,10 @@ void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
}
-void Assembler::StoreToOffset(
- Register src, Register base, int32_t offset, OperandSize sz) {
+void Assembler::StoreToOffset(Register src,
+ Register base,
+ int32_t offset,
+ OperandSize sz) {
ASSERT(base != TMP2);
if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
str(src, Address(base, offset, Address::Offset, sz), sz);
@@ -832,9 +832,9 @@ void Assembler::VRSqrts(VRegister vd, VRegister vn) {
vrsqrtes(vd, vn);
// 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2.
// First step.
- vmuls(VTMP, vd, vd); // VTMP <- xn^2
+ vmuls(VTMP, vd, vd); // VTMP <- xn^2
vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2.
- vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP
+ vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP
// Second step.
vmuls(VTMP, vd, vd);
vrsqrtss(VTMP, vn, VTMP);
@@ -842,7 +842,6 @@ void Assembler::VRSqrts(VRegister vd, VRegister vn) {
}
-
// Store into object.
// Preserves object and value registers.
void Assembler::StoreIntoObjectFilterNoSmi(Register object,
@@ -880,8 +879,8 @@ void Assembler::StoreIntoObjectOffset(Register object,
Register value,
bool can_value_be_smi) {
if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
- StoreIntoObject(
- object, FieldAddress(object, offset), value, can_value_be_smi);
+ StoreIntoObject(object, FieldAddress(object, offset), value,
+ can_value_be_smi);
} else {
AddImmediate(TMP, object, offset - kHeapObjectTag);
StoreIntoObject(object, Address(TMP), value, can_value_be_smi);
@@ -976,8 +975,8 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32);
ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32);
- const intptr_t class_id_offset = Object::tags_offset() +
- RawObject::kClassIdTagPos / kBitsPerByte;
+ const intptr_t class_id_offset =
+ Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedWord);
}
@@ -1290,8 +1289,7 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
}
-void Assembler::UpdateAllocationStats(intptr_t cid,
- Heap::Space space) {
+void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
@@ -1311,12 +1309,14 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
Heap::Space space) {
ASSERT(cid > 0);
const uword class_offset = ClassTable::ClassOffsetFor(cid);
- const uword count_field_offset = (space == Heap::kNew) ?
- ClassHeapStats::allocated_since_gc_new_space_offset() :
- ClassHeapStats::allocated_since_gc_old_space_offset();
- const uword size_field_offset = (space == Heap::kNew) ?
- ClassHeapStats::allocated_size_since_gc_new_space_offset() :
- ClassHeapStats::allocated_size_since_gc_old_space_offset();
+ const uword count_field_offset =
+ (space == Heap::kNew)
+ ? ClassHeapStats::allocated_since_gc_new_space_offset()
+ : ClassHeapStats::allocated_since_gc_old_space_offset();
+ const uword size_field_offset =
+ (space == Heap::kNew)
+ ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
+ : ClassHeapStats::allocated_size_since_gc_old_space_offset();
LoadIsolate(TMP2);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
@@ -1360,8 +1360,7 @@ void Assembler::TryAllocate(const Class& cls,
str(instance_reg, Address(temp_reg, Heap::TopOffset(space)));
ASSERT(instance_size >= kHeapObjectTag);
- AddImmediate(
- instance_reg, instance_reg, -instance_size + kHeapObjectTag);
+ AddImmediate(instance_reg, instance_reg, -instance_size + kHeapObjectTag);
NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space));
uword tags = 0;
@@ -1427,7 +1426,8 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
intptr_t index_scale,
Register array,
intptr_t index) const {
- const int64_t offset = index * index_scale +
+ const int64_t offset =
+ index * index_scale +
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
ASSERT(Utils::IsInt(32, offset));
const OperandSize size = Address::OperandSizeFor(cid);
@@ -1442,7 +1442,8 @@ void Assembler::LoadElementAddressForIntIndex(Register address,
intptr_t index_scale,
Register array,
intptr_t index) {
- const int64_t offset = index * index_scale +
+ const int64_t offset =
+ index * index_scale +
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
AddImmediate(address, array, offset);
}
@@ -1500,7 +1501,9 @@ void Assembler::LoadElementAddressForRegIndex(Register address,
}
-void Assembler::LoadUnaligned(Register dst, Register addr, Register tmp,
+void Assembler::LoadUnaligned(Register dst,
+ Register addr,
+ Register tmp,
OperandSize sz) {
ASSERT(dst != addr);
ldr(dst, Address(addr, 0), kUnsignedByte);
@@ -1541,7 +1544,9 @@ void Assembler::LoadUnaligned(Register dst, Register addr, Register tmp,
}
-void Assembler::StoreUnaligned(Register src, Register addr, Register tmp,
+void Assembler::StoreUnaligned(Register src,
+ Register addr,
+ Register tmp,
OperandSize sz) {
str(src, Address(addr, 0), kUnsignedByte);
LsrImmediate(tmp, src, 8);
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698