Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1201)

Side by Side Diff: runtime/vm/assembler_arm64.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/longjump.h" 10 #include "vm/longjump.h"
11 #include "vm/runtime_entry.h" 11 #include "vm/runtime_entry.h"
12 #include "vm/simulator.h" 12 #include "vm/simulator.h"
13 #include "vm/stack_frame.h" 13 #include "vm/stack_frame.h"
14 #include "vm/stub_code.h" 14 #include "vm/stub_code.h"
15 15
16 namespace dart { 16 namespace dart {
17 17
18 DECLARE_FLAG(bool, check_code_pointer); 18 DECLARE_FLAG(bool, check_code_pointer);
19 DECLARE_FLAG(bool, inline_alloc); 19 DECLARE_FLAG(bool, inline_alloc);
20 20
21 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); 21 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
22 22
23
24 Assembler::Assembler(bool use_far_branches) 23 Assembler::Assembler(bool use_far_branches)
25 : buffer_(), 24 : buffer_(),
26 prologue_offset_(-1), 25 prologue_offset_(-1),
27 has_single_entry_point_(true), 26 has_single_entry_point_(true),
28 use_far_branches_(use_far_branches), 27 use_far_branches_(use_far_branches),
29 comments_(), 28 comments_(),
30 constant_pool_allowed_(false) {} 29 constant_pool_allowed_(false) {}
31 30
32
33 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { 31 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
34 ASSERT(Utils::IsAligned(data, 4)); 32 ASSERT(Utils::IsAligned(data, 4));
35 ASSERT(Utils::IsAligned(length, 4)); 33 ASSERT(Utils::IsAligned(length, 4));
36 const uword end = data + length; 34 const uword end = data + length;
37 while (data < end) { 35 while (data < end) {
38 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; 36 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction;
39 data += 4; 37 data += 4;
40 } 38 }
41 } 39 }
42 40
43
44 void Assembler::Emit(int32_t value) { 41 void Assembler::Emit(int32_t value) {
45 AssemblerBuffer::EnsureCapacity ensured(&buffer_); 42 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
46 buffer_.Emit<int32_t>(value); 43 buffer_.Emit<int32_t>(value);
47 } 44 }
48 45
49
50 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { 46 static const char* cpu_reg_names[kNumberOfCpuRegisters] = {
51 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", 47 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
52 "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", 48 "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
53 "r22", "r23", "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31", 49 "r22", "r23", "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31",
54 }; 50 };
55 51
56
57 const char* Assembler::RegisterName(Register reg) { 52 const char* Assembler::RegisterName(Register reg) {
58 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); 53 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters));
59 return cpu_reg_names[reg]; 54 return cpu_reg_names[reg];
60 } 55 }
61 56
62
63 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { 57 static const char* fpu_reg_names[kNumberOfFpuRegisters] = {
64 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", 58 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
65 "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", 59 "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
66 "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", 60 "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
67 }; 61 };
68 62
69
70 const char* Assembler::FpuRegisterName(FpuRegister reg) { 63 const char* Assembler::FpuRegisterName(FpuRegister reg) {
71 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); 64 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
72 return fpu_reg_names[reg]; 65 return fpu_reg_names[reg];
73 } 66 }
74 67
75
76 void Assembler::Bind(Label* label) { 68 void Assembler::Bind(Label* label) {
77 ASSERT(!label->IsBound()); 69 ASSERT(!label->IsBound());
78 const intptr_t bound_pc = buffer_.Size(); 70 const intptr_t bound_pc = buffer_.Size();
79 71
80 while (label->IsLinked()) { 72 while (label->IsLinked()) {
81 const int64_t position = label->Position(); 73 const int64_t position = label->Position();
82 const int64_t dest = bound_pc - position; 74 const int64_t dest = bound_pc - position;
83 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) { 75 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
84 // Far branches are enabled, and we can't encode the branch offset in 76 // Far branches are enabled, and we can't encode the branch offset in
85 // 19 bits. 77 // 19 bits.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
145 } else { 137 } else {
146 const int32_t next = buffer_.Load<int32_t>(position); 138 const int32_t next = buffer_.Load<int32_t>(position);
147 const int32_t encoded = EncodeImm19BranchOffset(dest, next); 139 const int32_t encoded = EncodeImm19BranchOffset(dest, next);
148 buffer_.Store<int32_t>(position, encoded); 140 buffer_.Store<int32_t>(position, encoded);
149 label->position_ = DecodeImm19BranchOffset(next); 141 label->position_ = DecodeImm19BranchOffset(next);
150 } 142 }
151 } 143 }
152 label->BindTo(bound_pc); 144 label->BindTo(bound_pc);
153 } 145 }
154 146
155
156 void Assembler::Stop(const char* message) { 147 void Assembler::Stop(const char* message) {
157 if (FLAG_print_stop_message) { 148 if (FLAG_print_stop_message) {
158 UNIMPLEMENTED(); 149 UNIMPLEMENTED();
159 } 150 }
160 Label stop; 151 Label stop;
161 b(&stop); 152 b(&stop);
162 Emit(Utils::Low32Bits(reinterpret_cast<int64_t>(message))); 153 Emit(Utils::Low32Bits(reinterpret_cast<int64_t>(message)));
163 Emit(Utils::High32Bits(reinterpret_cast<int64_t>(message))); 154 Emit(Utils::High32Bits(reinterpret_cast<int64_t>(message)));
164 Bind(&stop); 155 Bind(&stop);
165 brk(Instr::kStopMessageCode); 156 brk(Instr::kStopMessageCode);
166 } 157 }
167 158
168
169 static int CountLeadingZeros(uint64_t value, int width) { 159 static int CountLeadingZeros(uint64_t value, int width) {
170 ASSERT((width == 32) || (width == 64)); 160 ASSERT((width == 32) || (width == 64));
171 if (value == 0) { 161 if (value == 0) {
172 return width; 162 return width;
173 } 163 }
174 int count = 0; 164 int count = 0;
175 do { 165 do {
176 count++; 166 count++;
177 } while (value >>= 1); 167 } while (value >>= 1);
178 return width - count; 168 return width - count;
179 } 169 }
180 170
181
182 static int CountOneBits(uint64_t value, int width) { 171 static int CountOneBits(uint64_t value, int width) {
183 // Mask out unused bits to ensure that they are not counted. 172 // Mask out unused bits to ensure that they are not counted.
184 value &= (0xffffffffffffffffULL >> (64 - width)); 173 value &= (0xffffffffffffffffULL >> (64 - width));
185 174
186 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); 175 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
187 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); 176 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
188 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); 177 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
189 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); 178 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
190 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); 179 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
191 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); 180 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
192 181
193 return value; 182 return value;
194 } 183 }
195 184
196
197 // Test if a given value can be encoded in the immediate field of a logical 185 // Test if a given value can be encoded in the immediate field of a logical
198 // instruction. 186 // instruction.
199 // If it can be encoded, the function returns true, and values pointed to by n, 187 // If it can be encoded, the function returns true, and values pointed to by n,
200 // imm_s and imm_r are updated with immediates encoded in the format required 188 // imm_s and imm_r are updated with immediates encoded in the format required
201 // by the corresponding fields in the logical instruction. 189 // by the corresponding fields in the logical instruction.
202 // If it can't be encoded, the function returns false, and the operand is 190 // If it can't be encoded, the function returns false, and the operand is
203 // undefined. 191 // undefined.
204 bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) { 192 bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) {
205 ASSERT(imm_op != NULL); 193 ASSERT(imm_op != NULL);
206 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); 194 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 set_bits >>= 1; 276 set_bits >>= 1;
289 imm_s_fixed >>= 1; 277 imm_s_fixed >>= 1;
290 continue; 278 continue;
291 } 279 }
292 280
293 // 6. Otherwise, the value can't be encoded. 281 // 6. Otherwise, the value can't be encoded.
294 return false; 282 return false;
295 } 283 }
296 } 284 }
297 285
298
299 void Assembler::LoadPoolPointer(Register pp) { 286 void Assembler::LoadPoolPointer(Register pp) {
300 CheckCodePointer(); 287 CheckCodePointer();
301 ldr(pp, FieldAddress(CODE_REG, Code::object_pool_offset())); 288 ldr(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
302 289
303 // When in the PP register, the pool pointer is untagged. When we 290 // When in the PP register, the pool pointer is untagged. When we
304 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP 291 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
305 // then untags when restoring from the stack. This will make loading from the 292 // then untags when restoring from the stack. This will make loading from the
306 // object pool only one instruction for the first 4096 entries. Otherwise, 293 // object pool only one instruction for the first 4096 entries. Otherwise,
307 // because the offset wouldn't be aligned, it would be only one instruction 294 // because the offset wouldn't be aligned, it would be only one instruction
308 // for the first 64 entries. 295 // for the first 64 entries.
309 sub(pp, pp, Operand(kHeapObjectTag)); 296 sub(pp, pp, Operand(kHeapObjectTag));
310 set_constant_pool_allowed(pp == PP); 297 set_constant_pool_allowed(pp == PP);
311 } 298 }
312 299
313
314 void Assembler::LoadWordFromPoolOffset(Register dst, 300 void Assembler::LoadWordFromPoolOffset(Register dst,
315 uint32_t offset, 301 uint32_t offset,
316 Register pp) { 302 Register pp) {
317 ASSERT((pp != PP) || constant_pool_allowed()); 303 ASSERT((pp != PP) || constant_pool_allowed());
318 ASSERT(dst != pp); 304 ASSERT(dst != pp);
319 Operand op; 305 Operand op;
320 const uint32_t upper20 = offset & 0xfffff000; 306 const uint32_t upper20 = offset & 0xfffff000;
321 if (Address::CanHoldOffset(offset)) { 307 if (Address::CanHoldOffset(offset)) {
322 ldr(dst, Address(pp, offset)); 308 ldr(dst, Address(pp, offset));
323 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) == 309 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
324 Operand::Immediate) { 310 Operand::Immediate) {
325 const uint32_t lower12 = offset & 0x00000fff; 311 const uint32_t lower12 = offset & 0x00000fff;
326 ASSERT(Address::CanHoldOffset(lower12)); 312 ASSERT(Address::CanHoldOffset(lower12));
327 add(dst, pp, op); 313 add(dst, pp, op);
328 ldr(dst, Address(dst, lower12)); 314 ldr(dst, Address(dst, lower12));
329 } else { 315 } else {
330 const uint16_t offset_low = Utils::Low16Bits(offset); 316 const uint16_t offset_low = Utils::Low16Bits(offset);
331 const uint16_t offset_high = Utils::High16Bits(offset); 317 const uint16_t offset_high = Utils::High16Bits(offset);
332 movz(dst, Immediate(offset_low), 0); 318 movz(dst, Immediate(offset_low), 0);
333 if (offset_high != 0) { 319 if (offset_high != 0) {
334 movk(dst, Immediate(offset_high), 1); 320 movk(dst, Immediate(offset_high), 1);
335 } 321 }
336 ldr(dst, Address(pp, dst)); 322 ldr(dst, Address(pp, dst));
337 } 323 }
338 } 324 }
339 325
340
341 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset) { 326 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset) {
342 ASSERT(constant_pool_allowed()); 327 ASSERT(constant_pool_allowed());
343 ASSERT(dst != PP); 328 ASSERT(dst != PP);
344 Operand op; 329 Operand op;
345 const uint32_t upper20 = offset & 0xfffff000; 330 const uint32_t upper20 = offset & 0xfffff000;
346 const uint32_t lower12 = offset & 0x00000fff; 331 const uint32_t lower12 = offset & 0x00000fff;
347 const Operand::OperandType ot = 332 const Operand::OperandType ot =
348 Operand::CanHold(upper20, kXRegSizeInBits, &op); 333 Operand::CanHold(upper20, kXRegSizeInBits, &op);
349 ASSERT(ot == Operand::Immediate); 334 ASSERT(ot == Operand::Immediate);
350 ASSERT(Address::CanHoldOffset(lower12)); 335 ASSERT(Address::CanHoldOffset(lower12));
351 add(dst, PP, op); 336 add(dst, PP, op);
352 ldr(dst, Address(dst, lower12)); 337 ldr(dst, Address(dst, lower12));
353 } 338 }
354 339
355
356 intptr_t Assembler::FindImmediate(int64_t imm) { 340 intptr_t Assembler::FindImmediate(int64_t imm) {
357 return object_pool_wrapper_.FindImmediate(imm); 341 return object_pool_wrapper_.FindImmediate(imm);
358 } 342 }
359 343
360
361 bool Assembler::CanLoadFromObjectPool(const Object& object) const { 344 bool Assembler::CanLoadFromObjectPool(const Object& object) const {
362 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); 345 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
363 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); 346 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
364 ASSERT(!Thread::CanLoadFromThread(object)); 347 ASSERT(!Thread::CanLoadFromThread(object));
365 if (!constant_pool_allowed()) { 348 if (!constant_pool_allowed()) {
366 return false; 349 return false;
367 } 350 }
368 351
369 // TODO(zra, kmillikin): Also load other large immediates from the object 352 // TODO(zra, kmillikin): Also load other large immediates from the object
370 // pool 353 // pool
371 if (object.IsSmi()) { 354 if (object.IsSmi()) {
372 // If the raw smi does not fit into a 32-bit signed int, then we'll keep 355 // If the raw smi does not fit into a 32-bit signed int, then we'll keep
373 // the raw value in the object pool. 356 // the raw value in the object pool.
374 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); 357 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
375 } 358 }
376 ASSERT(object.IsNotTemporaryScopedHandle()); 359 ASSERT(object.IsNotTemporaryScopedHandle());
377 ASSERT(object.IsOld()); 360 ASSERT(object.IsOld());
378 return true; 361 return true;
379 } 362 }
380 363
381
382 void Assembler::LoadNativeEntry(Register dst, const ExternalLabel* label) { 364 void Assembler::LoadNativeEntry(Register dst, const ExternalLabel* label) {
383 const int32_t offset = ObjectPool::element_offset( 365 const int32_t offset = ObjectPool::element_offset(
384 object_pool_wrapper_.FindNativeEntry(label, kNotPatchable)); 366 object_pool_wrapper_.FindNativeEntry(label, kNotPatchable));
385 LoadWordFromPoolOffset(dst, offset); 367 LoadWordFromPoolOffset(dst, offset);
386 } 368 }
387 369
388
389 void Assembler::LoadIsolate(Register dst) { 370 void Assembler::LoadIsolate(Register dst) {
390 ldr(dst, Address(THR, Thread::isolate_offset())); 371 ldr(dst, Address(THR, Thread::isolate_offset()));
391 } 372 }
392 373
393
394 void Assembler::LoadObjectHelper(Register dst, 374 void Assembler::LoadObjectHelper(Register dst,
395 const Object& object, 375 const Object& object,
396 bool is_unique) { 376 bool is_unique) {
397 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); 377 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
398 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); 378 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
399 if (Thread::CanLoadFromThread(object)) { 379 if (Thread::CanLoadFromThread(object)) {
400 ldr(dst, Address(THR, Thread::OffsetFromThread(object))); 380 ldr(dst, Address(THR, Thread::OffsetFromThread(object)));
401 } else if (CanLoadFromObjectPool(object)) { 381 } else if (CanLoadFromObjectPool(object)) {
402 const int32_t offset = ObjectPool::element_offset( 382 const int32_t offset = ObjectPool::element_offset(
403 is_unique ? object_pool_wrapper_.AddObject(object) 383 is_unique ? object_pool_wrapper_.AddObject(object)
404 : object_pool_wrapper_.FindObject(object)); 384 : object_pool_wrapper_.FindObject(object));
405 LoadWordFromPoolOffset(dst, offset); 385 LoadWordFromPoolOffset(dst, offset);
406 } else { 386 } else {
407 ASSERT(object.IsSmi()); 387 ASSERT(object.IsSmi());
408 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw())); 388 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw()));
409 } 389 }
410 } 390 }
411 391
412
413 void Assembler::LoadFunctionFromCalleePool(Register dst, 392 void Assembler::LoadFunctionFromCalleePool(Register dst,
414 const Function& function, 393 const Function& function,
415 Register new_pp) { 394 Register new_pp) {
416 ASSERT(!constant_pool_allowed()); 395 ASSERT(!constant_pool_allowed());
417 ASSERT(new_pp != PP); 396 ASSERT(new_pp != PP);
418 const int32_t offset = 397 const int32_t offset =
419 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); 398 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function));
420 ASSERT(Address::CanHoldOffset(offset)); 399 ASSERT(Address::CanHoldOffset(offset));
421 ldr(dst, Address(new_pp, offset)); 400 ldr(dst, Address(new_pp, offset));
422 } 401 }
423 402
424
425 void Assembler::LoadObject(Register dst, const Object& object) { 403 void Assembler::LoadObject(Register dst, const Object& object) {
426 LoadObjectHelper(dst, object, false); 404 LoadObjectHelper(dst, object, false);
427 } 405 }
428 406
429
430 void Assembler::LoadUniqueObject(Register dst, const Object& object) { 407 void Assembler::LoadUniqueObject(Register dst, const Object& object) {
431 LoadObjectHelper(dst, object, true); 408 LoadObjectHelper(dst, object, true);
432 } 409 }
433 410
434
435 void Assembler::CompareObject(Register reg, const Object& object) { 411 void Assembler::CompareObject(Register reg, const Object& object) {
436 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); 412 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
437 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); 413 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
438 if (Thread::CanLoadFromThread(object)) { 414 if (Thread::CanLoadFromThread(object)) {
439 ldr(TMP, Address(THR, Thread::OffsetFromThread(object))); 415 ldr(TMP, Address(THR, Thread::OffsetFromThread(object)));
440 CompareRegisters(reg, TMP); 416 CompareRegisters(reg, TMP);
441 } else if (CanLoadFromObjectPool(object)) { 417 } else if (CanLoadFromObjectPool(object)) {
442 LoadObject(TMP, object); 418 LoadObject(TMP, object);
443 CompareRegisters(reg, TMP); 419 CompareRegisters(reg, TMP);
444 } else { 420 } else {
445 ASSERT(object.IsSmi()); 421 ASSERT(object.IsSmi());
446 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw())); 422 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()));
447 } 423 }
448 } 424 }
449 425
450
451 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm) { 426 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm) {
452 if (constant_pool_allowed()) { 427 if (constant_pool_allowed()) {
453 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); 428 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm));
454 LoadWordFromPoolOffset(reg, offset); 429 LoadWordFromPoolOffset(reg, offset);
455 } else { 430 } else {
456 // TODO(zra): Since this sequence only needs to be decodable, it can be 431 // TODO(zra): Since this sequence only needs to be decodable, it can be
457 // of variable length. 432 // of variable length.
458 LoadImmediateFixed(reg, imm); 433 LoadImmediateFixed(reg, imm);
459 } 434 }
460 } 435 }
461 436
462
463 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) { 437 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) {
464 const uint32_t w0 = Utils::Low32Bits(imm); 438 const uint32_t w0 = Utils::Low32Bits(imm);
465 const uint32_t w1 = Utils::High32Bits(imm); 439 const uint32_t w1 = Utils::High32Bits(imm);
466 const uint16_t h0 = Utils::Low16Bits(w0); 440 const uint16_t h0 = Utils::Low16Bits(w0);
467 const uint16_t h1 = Utils::High16Bits(w0); 441 const uint16_t h1 = Utils::High16Bits(w0);
468 const uint16_t h2 = Utils::Low16Bits(w1); 442 const uint16_t h2 = Utils::Low16Bits(w1);
469 const uint16_t h3 = Utils::High16Bits(w1); 443 const uint16_t h3 = Utils::High16Bits(w1);
470 movz(reg, Immediate(h0), 0); 444 movz(reg, Immediate(h0), 0);
471 movk(reg, Immediate(h1), 1); 445 movk(reg, Immediate(h1), 1);
472 movk(reg, Immediate(h2), 2); 446 movk(reg, Immediate(h2), 2);
473 movk(reg, Immediate(h3), 3); 447 movk(reg, Immediate(h3), 3);
474 } 448 }
475 449
476
477 void Assembler::LoadImmediate(Register reg, int64_t imm) { 450 void Assembler::LoadImmediate(Register reg, int64_t imm) {
478 Comment("LoadImmediate"); 451 Comment("LoadImmediate");
479 // Is it 0? 452 // Is it 0?
480 if (imm == 0) { 453 if (imm == 0) {
481 movz(reg, Immediate(0), 0); 454 movz(reg, Immediate(0), 0);
482 return; 455 return;
483 } 456 }
484 457
485 // Can we use one orri operation? 458 // Can we use one orri operation?
486 Operand op; 459 Operand op;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 } 526 }
554 if (h3 != 0) { 527 if (h3 != 0) {
555 if (initialized) { 528 if (initialized) {
556 movk(reg, Immediate(h3), 3); 529 movk(reg, Immediate(h3), 3);
557 } else { 530 } else {
558 movz(reg, Immediate(h3), 3); 531 movz(reg, Immediate(h3), 3);
559 } 532 }
560 } 533 }
561 } 534 }
562 535
563
564 void Assembler::LoadDImmediate(VRegister vd, double immd) { 536 void Assembler::LoadDImmediate(VRegister vd, double immd) {
565 if (!fmovdi(vd, immd)) { 537 if (!fmovdi(vd, immd)) {
566 int64_t imm = bit_cast<int64_t, double>(immd); 538 int64_t imm = bit_cast<int64_t, double>(immd);
567 LoadImmediate(TMP, imm); 539 LoadImmediate(TMP, imm);
568 fmovdr(vd, TMP); 540 fmovdr(vd, TMP);
569 } 541 }
570 } 542 }
571 543
572
573 void Assembler::Branch(const StubEntry& stub_entry, 544 void Assembler::Branch(const StubEntry& stub_entry,
574 Register pp, 545 Register pp,
575 Patchability patchable) { 546 Patchability patchable) {
576 const Code& target = Code::ZoneHandle(stub_entry.code()); 547 const Code& target = Code::ZoneHandle(stub_entry.code());
577 const int32_t offset = ObjectPool::element_offset( 548 const int32_t offset = ObjectPool::element_offset(
578 object_pool_wrapper_.FindObject(target, patchable)); 549 object_pool_wrapper_.FindObject(target, patchable));
579 LoadWordFromPoolOffset(CODE_REG, offset, pp); 550 LoadWordFromPoolOffset(CODE_REG, offset, pp);
580 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset())); 551 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
581 br(TMP); 552 br(TMP);
582 } 553 }
583 554
584 void Assembler::BranchPatchable(const StubEntry& stub_entry) { 555 void Assembler::BranchPatchable(const StubEntry& stub_entry) {
585 Branch(stub_entry, PP, kPatchable); 556 Branch(stub_entry, PP, kPatchable);
586 } 557 }
587 558
588
589 void Assembler::BranchLink(const StubEntry& stub_entry, 559 void Assembler::BranchLink(const StubEntry& stub_entry,
590 Patchability patchable) { 560 Patchability patchable) {
591 const Code& target = Code::ZoneHandle(stub_entry.code()); 561 const Code& target = Code::ZoneHandle(stub_entry.code());
592 const int32_t offset = ObjectPool::element_offset( 562 const int32_t offset = ObjectPool::element_offset(
593 object_pool_wrapper_.FindObject(target, patchable)); 563 object_pool_wrapper_.FindObject(target, patchable));
594 LoadWordFromPoolOffset(CODE_REG, offset); 564 LoadWordFromPoolOffset(CODE_REG, offset);
595 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset())); 565 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
596 blr(TMP); 566 blr(TMP);
597 } 567 }
598 568
599
600 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { 569 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
601 BranchLink(stub_entry, kPatchable); 570 BranchLink(stub_entry, kPatchable);
602 } 571 }
603 572
604
605 void Assembler::BranchLinkToRuntime() { 573 void Assembler::BranchLinkToRuntime() {
606 ldr(LR, Address(THR, Thread::call_to_runtime_entry_point_offset())); 574 ldr(LR, Address(THR, Thread::call_to_runtime_entry_point_offset()));
607 ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset())); 575 ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset()));
608 blr(LR); 576 blr(LR);
609 } 577 }
610 578
611
612 void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry, 579 void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry,
613 const Object& equivalence) { 580 const Object& equivalence) {
614 const Code& target = Code::ZoneHandle(stub_entry.code()); 581 const Code& target = Code::ZoneHandle(stub_entry.code());
615 const int32_t offset = ObjectPool::element_offset( 582 const int32_t offset = ObjectPool::element_offset(
616 object_pool_wrapper_.FindObject(target, equivalence)); 583 object_pool_wrapper_.FindObject(target, equivalence));
617 LoadWordFromPoolOffset(CODE_REG, offset); 584 LoadWordFromPoolOffset(CODE_REG, offset);
618 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset())); 585 ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
619 blr(TMP); 586 blr(TMP);
620 } 587 }
621 588
622
623 void Assembler::AddImmediate(Register dest, Register rn, int64_t imm) { 589 void Assembler::AddImmediate(Register dest, Register rn, int64_t imm) {
624 Operand op; 590 Operand op;
625 if (imm == 0) { 591 if (imm == 0) {
626 if (dest != rn) { 592 if (dest != rn) {
627 mov(dest, rn); 593 mov(dest, rn);
628 } 594 }
629 return; 595 return;
630 } 596 }
631 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 597 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
632 add(dest, rn, op); 598 add(dest, rn, op);
633 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 599 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
634 Operand::Immediate) { 600 Operand::Immediate) {
635 sub(dest, rn, op); 601 sub(dest, rn, op);
636 } else { 602 } else {
637 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. 603 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
638 ASSERT(rn != TMP2); 604 ASSERT(rn != TMP2);
639 LoadImmediate(TMP2, imm); 605 LoadImmediate(TMP2, imm);
640 add(dest, rn, Operand(TMP2)); 606 add(dest, rn, Operand(TMP2));
641 } 607 }
642 } 608 }
643 609
644
645 void Assembler::AddImmediateSetFlags(Register dest, Register rn, int64_t imm) { 610 void Assembler::AddImmediateSetFlags(Register dest, Register rn, int64_t imm) {
646 Operand op; 611 Operand op;
647 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 612 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
648 // Handles imm == kMinInt64. 613 // Handles imm == kMinInt64.
649 adds(dest, rn, op); 614 adds(dest, rn, op);
650 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 615 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
651 Operand::Immediate) { 616 Operand::Immediate) {
652 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. 617 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
653 subs(dest, rn, op); 618 subs(dest, rn, op);
654 } else { 619 } else {
655 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. 620 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
656 ASSERT(rn != TMP2); 621 ASSERT(rn != TMP2);
657 LoadImmediate(TMP2, imm); 622 LoadImmediate(TMP2, imm);
658 adds(dest, rn, Operand(TMP2)); 623 adds(dest, rn, Operand(TMP2));
659 } 624 }
660 } 625 }
661 626
662
663 void Assembler::SubImmediateSetFlags(Register dest, Register rn, int64_t imm) { 627 void Assembler::SubImmediateSetFlags(Register dest, Register rn, int64_t imm) {
664 Operand op; 628 Operand op;
665 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 629 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
666 // Handles imm == kMinInt64. 630 // Handles imm == kMinInt64.
667 subs(dest, rn, op); 631 subs(dest, rn, op);
668 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 632 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
669 Operand::Immediate) { 633 Operand::Immediate) {
670 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. 634 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
671 adds(dest, rn, op); 635 adds(dest, rn, op);
672 } else { 636 } else {
673 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits. 637 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits.
674 ASSERT(rn != TMP2); 638 ASSERT(rn != TMP2);
675 LoadImmediate(TMP2, imm); 639 LoadImmediate(TMP2, imm);
676 subs(dest, rn, Operand(TMP2)); 640 subs(dest, rn, Operand(TMP2));
677 } 641 }
678 } 642 }
679 643
680
681 void Assembler::AndImmediate(Register rd, Register rn, int64_t imm) { 644 void Assembler::AndImmediate(Register rd, Register rn, int64_t imm) {
682 Operand imm_op; 645 Operand imm_op;
683 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 646 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
684 andi(rd, rn, Immediate(imm)); 647 andi(rd, rn, Immediate(imm));
685 } else { 648 } else {
686 LoadImmediate(TMP, imm); 649 LoadImmediate(TMP, imm);
687 and_(rd, rn, Operand(TMP)); 650 and_(rd, rn, Operand(TMP));
688 } 651 }
689 } 652 }
690 653
691
692 void Assembler::OrImmediate(Register rd, Register rn, int64_t imm) { 654 void Assembler::OrImmediate(Register rd, Register rn, int64_t imm) {
693 Operand imm_op; 655 Operand imm_op;
694 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 656 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
695 orri(rd, rn, Immediate(imm)); 657 orri(rd, rn, Immediate(imm));
696 } else { 658 } else {
697 LoadImmediate(TMP, imm); 659 LoadImmediate(TMP, imm);
698 orr(rd, rn, Operand(TMP)); 660 orr(rd, rn, Operand(TMP));
699 } 661 }
700 } 662 }
701 663
702
703 void Assembler::XorImmediate(Register rd, Register rn, int64_t imm) { 664 void Assembler::XorImmediate(Register rd, Register rn, int64_t imm) {
704 Operand imm_op; 665 Operand imm_op;
705 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 666 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
706 eori(rd, rn, Immediate(imm)); 667 eori(rd, rn, Immediate(imm));
707 } else { 668 } else {
708 LoadImmediate(TMP, imm); 669 LoadImmediate(TMP, imm);
709 eor(rd, rn, Operand(TMP)); 670 eor(rd, rn, Operand(TMP));
710 } 671 }
711 } 672 }
712 673
713
714 void Assembler::TestImmediate(Register rn, int64_t imm) { 674 void Assembler::TestImmediate(Register rn, int64_t imm) {
715 Operand imm_op; 675 Operand imm_op;
716 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 676 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
717 tsti(rn, Immediate(imm)); 677 tsti(rn, Immediate(imm));
718 } else { 678 } else {
719 LoadImmediate(TMP, imm); 679 LoadImmediate(TMP, imm);
720 tst(rn, Operand(TMP)); 680 tst(rn, Operand(TMP));
721 } 681 }
722 } 682 }
723 683
724
725 void Assembler::CompareImmediate(Register rn, int64_t imm) { 684 void Assembler::CompareImmediate(Register rn, int64_t imm) {
726 Operand op; 685 Operand op;
727 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 686 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
728 cmp(rn, op); 687 cmp(rn, op);
729 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 688 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
730 Operand::Immediate) { 689 Operand::Immediate) {
731 cmn(rn, op); 690 cmn(rn, op);
732 } else { 691 } else {
733 ASSERT(rn != TMP2); 692 ASSERT(rn != TMP2);
734 LoadImmediate(TMP2, imm); 693 LoadImmediate(TMP2, imm);
735 cmp(rn, Operand(TMP2)); 694 cmp(rn, Operand(TMP2));
736 } 695 }
737 } 696 }
738 697
739
740 void Assembler::LoadFromOffset(Register dest, 698 void Assembler::LoadFromOffset(Register dest,
741 Register base, 699 Register base,
742 int32_t offset, 700 int32_t offset,
743 OperandSize sz) { 701 OperandSize sz) {
744 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { 702 if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
745 ldr(dest, Address(base, offset, Address::Offset, sz), sz); 703 ldr(dest, Address(base, offset, Address::Offset, sz), sz);
746 } else { 704 } else {
747 ASSERT(base != TMP2); 705 ASSERT(base != TMP2);
748 AddImmediate(TMP2, base, offset); 706 AddImmediate(TMP2, base, offset);
749 ldr(dest, Address(TMP2), sz); 707 ldr(dest, Address(TMP2), sz);
750 } 708 }
751 } 709 }
752 710
753
754 void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) { 711 void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) {
755 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { 712 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) {
756 fldrd(dest, Address(base, offset, Address::Offset, kDWord)); 713 fldrd(dest, Address(base, offset, Address::Offset, kDWord));
757 } else { 714 } else {
758 ASSERT(base != TMP2); 715 ASSERT(base != TMP2);
759 AddImmediate(TMP2, base, offset); 716 AddImmediate(TMP2, base, offset);
760 fldrd(dest, Address(TMP2)); 717 fldrd(dest, Address(TMP2));
761 } 718 }
762 } 719 }
763 720
764
765 void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) { 721 void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
766 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { 722 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) {
767 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); 723 fldrq(dest, Address(base, offset, Address::Offset, kQWord));
768 } else { 724 } else {
769 ASSERT(base != TMP2); 725 ASSERT(base != TMP2);
770 AddImmediate(TMP2, base, offset); 726 AddImmediate(TMP2, base, offset);
771 fldrq(dest, Address(TMP2)); 727 fldrq(dest, Address(TMP2));
772 } 728 }
773 } 729 }
774 730
775
776 void Assembler::StoreToOffset(Register src, 731 void Assembler::StoreToOffset(Register src,
777 Register base, 732 Register base,
778 int32_t offset, 733 int32_t offset,
779 OperandSize sz) { 734 OperandSize sz) {
780 ASSERT(base != TMP2); 735 ASSERT(base != TMP2);
781 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { 736 if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
782 str(src, Address(base, offset, Address::Offset, sz), sz); 737 str(src, Address(base, offset, Address::Offset, sz), sz);
783 } else { 738 } else {
784 ASSERT(src != TMP2); 739 ASSERT(src != TMP2);
785 AddImmediate(TMP2, base, offset); 740 AddImmediate(TMP2, base, offset);
786 str(src, Address(TMP2), sz); 741 str(src, Address(TMP2), sz);
787 } 742 }
788 } 743 }
789 744
790
791 void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) { 745 void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) {
792 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { 746 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) {
793 fstrd(src, Address(base, offset, Address::Offset, kDWord)); 747 fstrd(src, Address(base, offset, Address::Offset, kDWord));
794 } else { 748 } else {
795 ASSERT(base != TMP2); 749 ASSERT(base != TMP2);
796 AddImmediate(TMP2, base, offset); 750 AddImmediate(TMP2, base, offset);
797 fstrd(src, Address(TMP2)); 751 fstrd(src, Address(TMP2));
798 } 752 }
799 } 753 }
800 754
801
802 void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) { 755 void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) {
803 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { 756 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) {
804 fstrq(src, Address(base, offset, Address::Offset, kQWord)); 757 fstrq(src, Address(base, offset, Address::Offset, kQWord));
805 } else { 758 } else {
806 ASSERT(base != TMP2); 759 ASSERT(base != TMP2);
807 AddImmediate(TMP2, base, offset); 760 AddImmediate(TMP2, base, offset);
808 fstrq(src, Address(TMP2)); 761 fstrq(src, Address(TMP2));
809 } 762 }
810 } 763 }
811 764
812
813 void Assembler::VRecps(VRegister vd, VRegister vn) { 765 void Assembler::VRecps(VRegister vd, VRegister vn) {
814 ASSERT(vn != VTMP); 766 ASSERT(vn != VTMP);
815 ASSERT(vd != VTMP); 767 ASSERT(vd != VTMP);
816 768
817 // Reciprocal estimate. 769 // Reciprocal estimate.
818 vrecpes(vd, vn); 770 vrecpes(vd, vn);
819 // 2 Newton-Raphson steps. 771 // 2 Newton-Raphson steps.
820 vrecpss(VTMP, vn, vd); 772 vrecpss(VTMP, vn, vd);
821 vmuls(vd, vd, VTMP); 773 vmuls(vd, vd, VTMP);
822 vrecpss(VTMP, vn, vd); 774 vrecpss(VTMP, vn, vd);
823 vmuls(vd, vd, VTMP); 775 vmuls(vd, vd, VTMP);
824 } 776 }
825 777
826
827 void Assembler::VRSqrts(VRegister vd, VRegister vn) { 778 void Assembler::VRSqrts(VRegister vd, VRegister vn) {
828 ASSERT(vd != VTMP); 779 ASSERT(vd != VTMP);
829 ASSERT(vn != VTMP); 780 ASSERT(vn != VTMP);
830 781
831 // Reciprocal square root estimate. 782 // Reciprocal square root estimate.
832 vrsqrtes(vd, vn); 783 vrsqrtes(vd, vn);
833 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2. 784 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2.
834 // First step. 785 // First step.
835 vmuls(VTMP, vd, vd); // VTMP <- xn^2 786 vmuls(VTMP, vd, vd); // VTMP <- xn^2
836 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2. 787 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2.
837 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP 788 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP
838 // Second step. 789 // Second step.
839 vmuls(VTMP, vd, vd); 790 vmuls(VTMP, vd, vd);
840 vrsqrtss(VTMP, vn, VTMP); 791 vrsqrtss(VTMP, vn, VTMP);
841 vmuls(vd, vd, VTMP); 792 vmuls(vd, vd, VTMP);
842 } 793 }
843 794
844
845 // Store into object. 795 // Store into object.
846 // Preserves object and value registers. 796 // Preserves object and value registers.
847 void Assembler::StoreIntoObjectFilterNoSmi(Register object, 797 void Assembler::StoreIntoObjectFilterNoSmi(Register object,
848 Register value, 798 Register value,
849 Label* no_update) { 799 Label* no_update) {
850 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && 800 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
851 (kOldObjectAlignmentOffset == 0)); 801 (kOldObjectAlignmentOffset == 0));
852 802
853 // Write-barrier triggers if the value is in the new space (has bit set) and 803 // Write-barrier triggers if the value is in the new space (has bit set) and
854 // the object is in the old space (has bit cleared). 804 // the object is in the old space (has bit cleared).
855 // To check that, we compute value & ~object and skip the write barrier 805 // To check that, we compute value & ~object and skip the write barrier
856 // if the bit is not set. We can't destroy the object. 806 // if the bit is not set. We can't destroy the object.
857 bic(TMP, value, Operand(object)); 807 bic(TMP, value, Operand(object));
858 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); 808 tsti(TMP, Immediate(kNewObjectAlignmentOffset));
859 b(no_update, EQ); 809 b(no_update, EQ);
860 } 810 }
861 811
862
863 // Preserves object and value registers. 812 // Preserves object and value registers.
864 void Assembler::StoreIntoObjectFilter(Register object, 813 void Assembler::StoreIntoObjectFilter(Register object,
865 Register value, 814 Register value,
866 Label* no_update) { 815 Label* no_update) {
867 // For the value we are only interested in the new/old bit and the tag bit. 816 // For the value we are only interested in the new/old bit and the tag bit.
868 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. 817 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
869 and_(TMP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1)); 818 and_(TMP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1));
870 // And the result with the negated space bit of the object. 819 // And the result with the negated space bit of the object.
871 bic(TMP, TMP, Operand(object)); 820 bic(TMP, TMP, Operand(object));
872 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); 821 tsti(TMP, Immediate(kNewObjectAlignmentOffset));
873 b(no_update, EQ); 822 b(no_update, EQ);
874 } 823 }
875 824
876
877 void Assembler::StoreIntoObjectOffset(Register object, 825 void Assembler::StoreIntoObjectOffset(Register object,
878 int32_t offset, 826 int32_t offset,
879 Register value, 827 Register value,
880 bool can_value_be_smi) { 828 bool can_value_be_smi) {
881 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 829 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
882 StoreIntoObject(object, FieldAddress(object, offset), value, 830 StoreIntoObject(object, FieldAddress(object, offset), value,
883 can_value_be_smi); 831 can_value_be_smi);
884 } else { 832 } else {
885 AddImmediate(TMP, object, offset - kHeapObjectTag); 833 AddImmediate(TMP, object, offset - kHeapObjectTag);
886 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); 834 StoreIntoObject(object, Address(TMP), value, can_value_be_smi);
887 } 835 }
888 } 836 }
889 837
890
891 void Assembler::StoreIntoObject(Register object, 838 void Assembler::StoreIntoObject(Register object,
892 const Address& dest, 839 const Address& dest,
893 Register value, 840 Register value,
894 bool can_value_be_smi) { 841 bool can_value_be_smi) {
895 ASSERT(object != value); 842 ASSERT(object != value);
896 str(value, dest); 843 str(value, dest);
897 Label done; 844 Label done;
898 if (can_value_be_smi) { 845 if (can_value_be_smi) {
899 StoreIntoObjectFilter(object, value, &done); 846 StoreIntoObjectFilter(object, value, &done);
900 } else { 847 } else {
(...skipping 12 matching lines...) Expand all
913 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset())); 860 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset()));
914 blr(TMP); 861 blr(TMP);
915 Pop(LR); 862 Pop(LR);
916 if (value != R0) { 863 if (value != R0) {
917 // Restore R0. 864 // Restore R0.
918 Pop(R0); 865 Pop(R0);
919 } 866 }
920 Bind(&done); 867 Bind(&done);
921 } 868 }
922 869
923
924 void Assembler::StoreIntoObjectNoBarrier(Register object, 870 void Assembler::StoreIntoObjectNoBarrier(Register object,
925 const Address& dest, 871 const Address& dest,
926 Register value) { 872 Register value) {
927 str(value, dest); 873 str(value, dest);
928 #if defined(DEBUG) 874 #if defined(DEBUG)
929 Label done; 875 Label done;
930 StoreIntoObjectFilter(object, value, &done); 876 StoreIntoObjectFilter(object, value, &done);
931 Stop("Store buffer update is required"); 877 Stop("Store buffer update is required");
932 Bind(&done); 878 Bind(&done);
933 #endif // defined(DEBUG) 879 #endif // defined(DEBUG)
934 // No store buffer update. 880 // No store buffer update.
935 } 881 }
936 882
937
938 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, 883 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
939 int32_t offset, 884 int32_t offset,
940 Register value) { 885 Register value) {
941 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 886 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
942 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); 887 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
943 } else { 888 } else {
944 AddImmediate(TMP, object, offset - kHeapObjectTag); 889 AddImmediate(TMP, object, offset - kHeapObjectTag);
945 StoreIntoObjectNoBarrier(object, Address(TMP), value); 890 StoreIntoObjectNoBarrier(object, Address(TMP), value);
946 } 891 }
947 } 892 }
948 893
949
950 void Assembler::StoreIntoObjectNoBarrier(Register object, 894 void Assembler::StoreIntoObjectNoBarrier(Register object,
951 const Address& dest, 895 const Address& dest,
952 const Object& value) { 896 const Object& value) {
953 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); 897 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
954 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); 898 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
955 ASSERT(value.IsSmi() || value.InVMHeap() || 899 ASSERT(value.IsSmi() || value.InVMHeap() ||
956 (value.IsOld() && value.IsNotTemporaryScopedHandle())); 900 (value.IsOld() && value.IsNotTemporaryScopedHandle()));
957 // No store buffer update. 901 // No store buffer update.
958 LoadObject(TMP2, value); 902 LoadObject(TMP2, value);
959 str(TMP2, dest); 903 str(TMP2, dest);
960 } 904 }
961 905
962
963 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, 906 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
964 int32_t offset, 907 int32_t offset,
965 const Object& value) { 908 const Object& value) {
966 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 909 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
967 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); 910 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
968 } else { 911 } else {
969 AddImmediate(TMP, object, offset - kHeapObjectTag); 912 AddImmediate(TMP, object, offset - kHeapObjectTag);
970 StoreIntoObjectNoBarrier(object, Address(TMP), value); 913 StoreIntoObjectNoBarrier(object, Address(TMP), value);
971 } 914 }
972 } 915 }
973 916
974
975 void Assembler::LoadClassId(Register result, Register object) { 917 void Assembler::LoadClassId(Register result, Register object) {
976 ASSERT(RawObject::kClassIdTagPos == 16); 918 ASSERT(RawObject::kClassIdTagPos == 16);
977 ASSERT(RawObject::kClassIdTagSize == 16); 919 ASSERT(RawObject::kClassIdTagSize == 16);
978 const intptr_t class_id_offset = 920 const intptr_t class_id_offset =
979 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; 921 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
980 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, 922 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
981 kUnsignedHalfword); 923 kUnsignedHalfword);
982 } 924 }
983 925
984
985 void Assembler::LoadClassById(Register result, Register class_id) { 926 void Assembler::LoadClassById(Register result, Register class_id) {
986 ASSERT(result != class_id); 927 ASSERT(result != class_id);
987 LoadIsolate(result); 928 LoadIsolate(result);
988 const intptr_t offset = 929 const intptr_t offset =
989 Isolate::class_table_offset() + ClassTable::table_offset(); 930 Isolate::class_table_offset() + ClassTable::table_offset();
990 LoadFromOffset(result, result, offset); 931 LoadFromOffset(result, result, offset);
991 ldr(result, Address(result, class_id, UXTX, Address::Scaled)); 932 ldr(result, Address(result, class_id, UXTX, Address::Scaled));
992 } 933 }
993 934
994
995 void Assembler::LoadClass(Register result, Register object) { 935 void Assembler::LoadClass(Register result, Register object) {
996 ASSERT(object != TMP); 936 ASSERT(object != TMP);
997 LoadClassId(TMP, object); 937 LoadClassId(TMP, object);
998 LoadClassById(result, TMP); 938 LoadClassById(result, TMP);
999 } 939 }
1000 940
1001
1002 void Assembler::CompareClassId(Register object, intptr_t class_id) { 941 void Assembler::CompareClassId(Register object, intptr_t class_id) {
1003 LoadClassId(TMP, object); 942 LoadClassId(TMP, object);
1004 CompareImmediate(TMP, class_id); 943 CompareImmediate(TMP, class_id);
1005 } 944 }
1006 945
1007
1008 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { 946 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
1009 // Load up a null object. We only need it so we can use LoadClassId on it in 947 // Load up a null object. We only need it so we can use LoadClassId on it in
1010 // the case that object is a Smi.. 948 // the case that object is a Smi..
1011 LoadObject(TMP, Object::null_object()); 949 LoadObject(TMP, Object::null_object());
1012 // Check if the object is a Smi. 950 // Check if the object is a Smi.
1013 tsti(object, Immediate(kSmiTagMask)); 951 tsti(object, Immediate(kSmiTagMask));
1014 // If the object *is* a Smi, use the null object instead. o/w leave alone. 952 // If the object *is* a Smi, use the null object instead. o/w leave alone.
1015 csel(TMP, TMP, object, EQ); 953 csel(TMP, TMP, object, EQ);
1016 // Loads either the cid of the object if it isn't a Smi, or the cid of null 954 // Loads either the cid of the object if it isn't a Smi, or the cid of null
1017 // if it is a Smi, which will be ignored. 955 // if it is a Smi, which will be ignored.
1018 LoadClassId(result, TMP); 956 LoadClassId(result, TMP);
1019 957
1020 LoadImmediate(TMP, kSmiCid); 958 LoadImmediate(TMP, kSmiCid);
1021 // If object is a Smi, move the Smi cid into result. o/w leave alone. 959 // If object is a Smi, move the Smi cid into result. o/w leave alone.
1022 csel(result, TMP, result, EQ); 960 csel(result, TMP, result, EQ);
1023 } 961 }
1024 962
1025
1026 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { 963 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
1027 LoadClassIdMayBeSmi(result, object); 964 LoadClassIdMayBeSmi(result, object);
1028 // Finally, tag the result. 965 // Finally, tag the result.
1029 SmiTag(result); 966 SmiTag(result);
1030 } 967 }
1031 968
1032
1033 // Frame entry and exit. 969 // Frame entry and exit.
1034 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { 970 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1035 // Reserve space for arguments and align frame before entering 971 // Reserve space for arguments and align frame before entering
1036 // the C++ world. 972 // the C++ world.
1037 if (frame_space != 0) { 973 if (frame_space != 0) {
1038 AddImmediate(SP, -frame_space); 974 AddImmediate(SP, -frame_space);
1039 } 975 }
1040 if (OS::ActivationFrameAlignment() > 1) { 976 if (OS::ActivationFrameAlignment() > 1) {
1041 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1))); 977 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1042 } 978 }
1043 } 979 }
1044 980
1045
1046 void Assembler::RestoreCodePointer() { 981 void Assembler::RestoreCodePointer() {
1047 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); 982 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize));
1048 CheckCodePointer(); 983 CheckCodePointer();
1049 } 984 }
1050 985
1051
1052 void Assembler::CheckCodePointer() { 986 void Assembler::CheckCodePointer() {
1053 #ifdef DEBUG 987 #ifdef DEBUG
1054 if (!FLAG_check_code_pointer) { 988 if (!FLAG_check_code_pointer) {
1055 return; 989 return;
1056 } 990 }
1057 Comment("CheckCodePointer"); 991 Comment("CheckCodePointer");
1058 Label cid_ok, instructions_ok; 992 Label cid_ok, instructions_ok;
1059 Push(R0); 993 Push(R0);
1060 CompareClassId(CODE_REG, kCodeCid); 994 CompareClassId(CODE_REG, kCodeCid);
1061 b(&cid_ok, EQ); 995 b(&cid_ok, EQ);
1062 brk(0); 996 brk(0);
1063 Bind(&cid_ok); 997 Bind(&cid_ok);
1064 998
1065 const intptr_t entry_offset = 999 const intptr_t entry_offset =
1066 CodeSize() + Instructions::HeaderSize() - kHeapObjectTag; 1000 CodeSize() + Instructions::HeaderSize() - kHeapObjectTag;
1067 adr(R0, Immediate(-entry_offset)); 1001 adr(R0, Immediate(-entry_offset));
1068 ldr(TMP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); 1002 ldr(TMP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
1069 cmp(R0, Operand(TMP)); 1003 cmp(R0, Operand(TMP));
1070 b(&instructions_ok, EQ); 1004 b(&instructions_ok, EQ);
1071 brk(1); 1005 brk(1);
1072 Bind(&instructions_ok); 1006 Bind(&instructions_ok);
1073 Pop(R0); 1007 Pop(R0);
1074 #endif 1008 #endif
1075 } 1009 }
1076 1010
1077
1078 void Assembler::SetupDartSP() { 1011 void Assembler::SetupDartSP() {
1079 mov(SP, CSP); 1012 mov(SP, CSP);
1080 } 1013 }
1081 1014
1082
1083 void Assembler::RestoreCSP() { 1015 void Assembler::RestoreCSP() {
1084 mov(CSP, SP); 1016 mov(CSP, SP);
1085 } 1017 }
1086 1018
1087
1088 void Assembler::EnterFrame(intptr_t frame_size) { 1019 void Assembler::EnterFrame(intptr_t frame_size) {
1089 // The ARM64 ABI requires at all times 1020 // The ARM64 ABI requires at all times
1090 // - stack limit < CSP <= stack base 1021 // - stack limit < CSP <= stack base
1091 // - CSP mod 16 = 0 1022 // - CSP mod 16 = 0
1092 // - we do not access stack memory below CSP 1023 // - we do not access stack memory below CSP
1093 // Pratically, this means we need to keep the C stack pointer ahead of the 1024 // Pratically, this means we need to keep the C stack pointer ahead of the
1094 // Dart stack pointer and 16-byte aligned for signal handlers. If we knew the 1025 // Dart stack pointer and 16-byte aligned for signal handlers. If we knew the
1095 // real stack limit, we could just set CSP to a value near it during 1026 // real stack limit, we could just set CSP to a value near it during
1096 // SetupDartSP, but we do not know the real stack limit for the initial 1027 // SetupDartSP, but we do not know the real stack limit for the initial
1097 // thread or threads created by the embedder. 1028 // thread or threads created by the embedder.
1098 // TODO(26472): It would be safer to use CSP as the Dart stack pointer, but 1029 // TODO(26472): It would be safer to use CSP as the Dart stack pointer, but
1099 // this requires adjustments to stack handling to maintain the 16-byte 1030 // this requires adjustments to stack handling to maintain the 16-byte
1100 // alignment. 1031 // alignment.
1101 const intptr_t kMaxDartFrameSize = 4096; 1032 const intptr_t kMaxDartFrameSize = 4096;
1102 sub(TMP, SP, Operand(kMaxDartFrameSize)); 1033 sub(TMP, SP, Operand(kMaxDartFrameSize));
1103 andi(CSP, TMP, Immediate(~15)); 1034 andi(CSP, TMP, Immediate(~15));
1104 1035
1105 PushPair(FP, LR); // low: FP, high: LR. 1036 PushPair(FP, LR); // low: FP, high: LR.
1106 mov(FP, SP); 1037 mov(FP, SP);
1107 1038
1108 if (frame_size > 0) { 1039 if (frame_size > 0) {
1109 sub(SP, SP, Operand(frame_size)); 1040 sub(SP, SP, Operand(frame_size));
1110 } 1041 }
1111 } 1042 }
1112 1043
1113
1114 void Assembler::LeaveFrame() { 1044 void Assembler::LeaveFrame() {
1115 mov(SP, FP); 1045 mov(SP, FP);
1116 PopPair(FP, LR); // low: FP, high: LR. 1046 PopPair(FP, LR); // low: FP, high: LR.
1117 } 1047 }
1118 1048
1119
1120 void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) { 1049 void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
1121 ASSERT(!constant_pool_allowed()); 1050 ASSERT(!constant_pool_allowed());
1122 // Setup the frame. 1051 // Setup the frame.
1123 EnterFrame(0); 1052 EnterFrame(0);
1124 TagAndPushPPAndPcMarker(); // Save PP and PC marker. 1053 TagAndPushPPAndPcMarker(); // Save PP and PC marker.
1125 1054
1126 // Load the pool pointer. 1055 // Load the pool pointer.
1127 if (new_pp == kNoRegister) { 1056 if (new_pp == kNoRegister) {
1128 LoadPoolPointer(); 1057 LoadPoolPointer();
1129 } else { 1058 } else {
1130 mov(PP, new_pp); 1059 mov(PP, new_pp);
1131 set_constant_pool_allowed(true); 1060 set_constant_pool_allowed(true);
1132 } 1061 }
1133 1062
1134 // Reserve space. 1063 // Reserve space.
1135 if (frame_size > 0) { 1064 if (frame_size > 0) {
1136 AddImmediate(SP, -frame_size); 1065 AddImmediate(SP, -frame_size);
1137 } 1066 }
1138 } 1067 }
1139 1068
1140
1141 // On entry to a function compiled for OSR, the caller's frame pointer, the 1069 // On entry to a function compiled for OSR, the caller's frame pointer, the
1142 // stack locals, and any copied parameters are already in place. The frame 1070 // stack locals, and any copied parameters are already in place. The frame
1143 // pointer is already set up. The PC marker is not correct for the 1071 // pointer is already set up. The PC marker is not correct for the
1144 // optimized function and there may be extra space for spill slots to 1072 // optimized function and there may be extra space for spill slots to
1145 // allocate. We must also set up the pool pointer for the function. 1073 // allocate. We must also set up the pool pointer for the function.
1146 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) { 1074 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
1147 ASSERT(!constant_pool_allowed()); 1075 ASSERT(!constant_pool_allowed());
1148 Comment("EnterOsrFrame"); 1076 Comment("EnterOsrFrame");
1149 RestoreCodePointer(); 1077 RestoreCodePointer();
1150 LoadPoolPointer(); 1078 LoadPoolPointer();
1151 1079
1152 if (extra_size > 0) { 1080 if (extra_size > 0) {
1153 AddImmediate(SP, -extra_size); 1081 AddImmediate(SP, -extra_size);
1154 } 1082 }
1155 } 1083 }
1156 1084
1157
1158 void Assembler::LeaveDartFrame(RestorePP restore_pp) { 1085 void Assembler::LeaveDartFrame(RestorePP restore_pp) {
1159 if (restore_pp == kRestoreCallerPP) { 1086 if (restore_pp == kRestoreCallerPP) {
1160 set_constant_pool_allowed(false); 1087 set_constant_pool_allowed(false);
1161 // Restore and untag PP. 1088 // Restore and untag PP.
1162 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize); 1089 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize);
1163 sub(PP, PP, Operand(kHeapObjectTag)); 1090 sub(PP, PP, Operand(kHeapObjectTag));
1164 } 1091 }
1165 LeaveFrame(); 1092 LeaveFrame();
1166 } 1093 }
1167 1094
1168
1169 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) { 1095 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) {
1170 Comment("EnterCallRuntimeFrame"); 1096 Comment("EnterCallRuntimeFrame");
1171 EnterStubFrame(); 1097 EnterStubFrame();
1172 1098
1173 // Store fpu registers with the lowest register number at the lowest 1099 // Store fpu registers with the lowest register number at the lowest
1174 // address. 1100 // address.
1175 for (int i = kNumberOfVRegisters - 1; i >= 0; i--) { 1101 for (int i = kNumberOfVRegisters - 1; i >= 0; i--) {
1176 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) { 1102 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) {
1177 // TODO(zra): When SIMD is added, we must also preserve the top 1103 // TODO(zra): When SIMD is added, we must also preserve the top
1178 // 64-bits of the callee-saved registers. 1104 // 64-bits of the callee-saved registers.
1179 continue; 1105 continue;
1180 } 1106 }
1181 // TODO(zra): Save the whole V register. 1107 // TODO(zra): Save the whole V register.
1182 VRegister reg = static_cast<VRegister>(i); 1108 VRegister reg = static_cast<VRegister>(i);
1183 PushDouble(reg); 1109 PushDouble(reg);
1184 } 1110 }
1185 1111
1186 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { 1112 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) {
1187 const Register reg = static_cast<Register>(i); 1113 const Register reg = static_cast<Register>(i);
1188 Push(reg); 1114 Push(reg);
1189 } 1115 }
1190 1116
1191 ReserveAlignedFrameSpace(frame_size); 1117 ReserveAlignedFrameSpace(frame_size);
1192 } 1118 }
1193 1119
1194
1195 void Assembler::LeaveCallRuntimeFrame() { 1120 void Assembler::LeaveCallRuntimeFrame() {
1196 // SP might have been modified to reserve space for arguments 1121 // SP might have been modified to reserve space for arguments
1197 // and ensure proper alignment of the stack frame. 1122 // and ensure proper alignment of the stack frame.
1198 // We need to restore it before restoring registers. 1123 // We need to restore it before restoring registers.
1199 const intptr_t kPushedRegistersSize = 1124 const intptr_t kPushedRegistersSize =
1200 kDartVolatileCpuRegCount * kWordSize + 1125 kDartVolatileCpuRegCount * kWordSize +
1201 kDartVolatileFpuRegCount * kWordSize + 1126 kDartVolatileFpuRegCount * kWordSize +
1202 2 * kWordSize; // PP and pc marker from EnterStubFrame. 1127 2 * kWordSize; // PP and pc marker from EnterStubFrame.
1203 AddImmediate(SP, FP, -kPushedRegistersSize); 1128 AddImmediate(SP, FP, -kPushedRegistersSize);
1204 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) { 1129 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
1205 const Register reg = static_cast<Register>(i); 1130 const Register reg = static_cast<Register>(i);
1206 Pop(reg); 1131 Pop(reg);
1207 } 1132 }
1208 1133
1209 for (int i = 0; i < kNumberOfVRegisters; i++) { 1134 for (int i = 0; i < kNumberOfVRegisters; i++) {
1210 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) { 1135 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) {
1211 // TODO(zra): When SIMD is added, we must also restore the top 1136 // TODO(zra): When SIMD is added, we must also restore the top
1212 // 64-bits of the callee-saved registers. 1137 // 64-bits of the callee-saved registers.
1213 continue; 1138 continue;
1214 } 1139 }
1215 // TODO(zra): Restore the whole V register. 1140 // TODO(zra): Restore the whole V register.
1216 VRegister reg = static_cast<VRegister>(i); 1141 VRegister reg = static_cast<VRegister>(i);
1217 PopDouble(reg); 1142 PopDouble(reg);
1218 } 1143 }
1219 1144
1220 LeaveStubFrame(); 1145 LeaveStubFrame();
1221 } 1146 }
1222 1147
1223
1224 void Assembler::CallRuntime(const RuntimeEntry& entry, 1148 void Assembler::CallRuntime(const RuntimeEntry& entry,
1225 intptr_t argument_count) { 1149 intptr_t argument_count) {
1226 entry.Call(this, argument_count); 1150 entry.Call(this, argument_count);
1227 } 1151 }
1228 1152
1229
1230 void Assembler::EnterStubFrame() { 1153 void Assembler::EnterStubFrame() {
1231 EnterDartFrame(0); 1154 EnterDartFrame(0);
1232 } 1155 }
1233 1156
1234
1235 void Assembler::LeaveStubFrame() { 1157 void Assembler::LeaveStubFrame() {
1236 LeaveDartFrame(); 1158 LeaveDartFrame();
1237 } 1159 }
1238 1160
1239
1240 // R0 receiver, R5 guarded cid as Smi 1161 // R0 receiver, R5 guarded cid as Smi
1241 void Assembler::MonomorphicCheckedEntry() { 1162 void Assembler::MonomorphicCheckedEntry() {
1242 ASSERT(has_single_entry_point_); 1163 ASSERT(has_single_entry_point_);
1243 has_single_entry_point_ = false; 1164 has_single_entry_point_ = false;
1244 bool saved_use_far_branches = use_far_branches(); 1165 bool saved_use_far_branches = use_far_branches();
1245 set_use_far_branches(false); 1166 set_use_far_branches(false);
1246 1167
1247 Label immediate, have_cid, miss; 1168 Label immediate, have_cid, miss;
1248 Bind(&miss); 1169 Bind(&miss);
1249 ldr(IP0, Address(THR, Thread::monomorphic_miss_entry_offset())); 1170 ldr(IP0, Address(THR, Thread::monomorphic_miss_entry_offset()));
(...skipping 14 matching lines...) Expand all
1264 Bind(&have_cid); 1185 Bind(&have_cid);
1265 cmp(R4, Operand(R5)); 1186 cmp(R4, Operand(R5));
1266 b(&miss, NE); 1187 b(&miss, NE);
1267 1188
1268 // Fall through to unchecked entry. 1189 // Fall through to unchecked entry.
1269 ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset); 1190 ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset);
1270 1191
1271 set_use_far_branches(saved_use_far_branches); 1192 set_use_far_branches(saved_use_far_branches);
1272 } 1193 }
1273 1194
1274
1275 #ifndef PRODUCT 1195 #ifndef PRODUCT
1276 void Assembler::MaybeTraceAllocation(intptr_t cid, 1196 void Assembler::MaybeTraceAllocation(intptr_t cid,
1277 Register temp_reg, 1197 Register temp_reg,
1278 Label* trace) { 1198 Label* trace) {
1279 ASSERT(cid > 0); 1199 ASSERT(cid > 0);
1280 intptr_t state_offset = ClassTable::StateOffsetFor(cid); 1200 intptr_t state_offset = ClassTable::StateOffsetFor(cid);
1281 LoadIsolate(temp_reg); 1201 LoadIsolate(temp_reg);
1282 intptr_t table_offset = 1202 intptr_t table_offset =
1283 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1203 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1284 ldr(temp_reg, Address(temp_reg, table_offset)); 1204 ldr(temp_reg, Address(temp_reg, table_offset));
1285 AddImmediate(temp_reg, state_offset); 1205 AddImmediate(temp_reg, state_offset);
1286 ldr(temp_reg, Address(temp_reg, 0)); 1206 ldr(temp_reg, Address(temp_reg, 0));
1287 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); 1207 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask()));
1288 b(trace, NE); 1208 b(trace, NE);
1289 } 1209 }
1290 1210
1291
1292 void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) { 1211 void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
1293 ASSERT(cid > 0); 1212 ASSERT(cid > 0);
1294 intptr_t counter_offset = 1213 intptr_t counter_offset =
1295 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); 1214 ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
1296 LoadIsolate(TMP2); 1215 LoadIsolate(TMP2);
1297 intptr_t table_offset = 1216 intptr_t table_offset =
1298 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1217 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1299 ldr(TMP, Address(TMP2, table_offset)); 1218 ldr(TMP, Address(TMP2, table_offset));
1300 AddImmediate(TMP2, TMP, counter_offset); 1219 AddImmediate(TMP2, TMP, counter_offset);
1301 ldr(TMP, Address(TMP2, 0)); 1220 ldr(TMP, Address(TMP2, 0));
1302 AddImmediate(TMP, 1); 1221 AddImmediate(TMP, 1);
1303 str(TMP, Address(TMP2, 0)); 1222 str(TMP, Address(TMP2, 0));
1304 } 1223 }
1305 1224
1306
1307 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, 1225 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
1308 Register size_reg, 1226 Register size_reg,
1309 Heap::Space space) { 1227 Heap::Space space) {
1310 ASSERT(cid > 0); 1228 ASSERT(cid > 0);
1311 const uword class_offset = ClassTable::ClassOffsetFor(cid); 1229 const uword class_offset = ClassTable::ClassOffsetFor(cid);
1312 const uword count_field_offset = 1230 const uword count_field_offset =
1313 (space == Heap::kNew) 1231 (space == Heap::kNew)
1314 ? ClassHeapStats::allocated_since_gc_new_space_offset() 1232 ? ClassHeapStats::allocated_since_gc_new_space_offset()
1315 : ClassHeapStats::allocated_since_gc_old_space_offset(); 1233 : ClassHeapStats::allocated_since_gc_old_space_offset();
1316 const uword size_field_offset = 1234 const uword size_field_offset =
1317 (space == Heap::kNew) 1235 (space == Heap::kNew)
1318 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() 1236 ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
1319 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); 1237 : ClassHeapStats::allocated_size_since_gc_old_space_offset();
1320 LoadIsolate(TMP2); 1238 LoadIsolate(TMP2);
1321 intptr_t table_offset = 1239 intptr_t table_offset =
1322 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1240 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1323 ldr(TMP, Address(TMP2, table_offset)); 1241 ldr(TMP, Address(TMP2, table_offset));
1324 AddImmediate(TMP2, TMP, class_offset); 1242 AddImmediate(TMP2, TMP, class_offset);
1325 ldr(TMP, Address(TMP2, count_field_offset)); 1243 ldr(TMP, Address(TMP2, count_field_offset));
1326 AddImmediate(TMP, 1); 1244 AddImmediate(TMP, 1);
1327 str(TMP, Address(TMP2, count_field_offset)); 1245 str(TMP, Address(TMP2, count_field_offset));
1328 ldr(TMP, Address(TMP2, size_field_offset)); 1246 ldr(TMP, Address(TMP2, size_field_offset));
1329 add(TMP, TMP, Operand(size_reg)); 1247 add(TMP, TMP, Operand(size_reg));
1330 str(TMP, Address(TMP2, size_field_offset)); 1248 str(TMP, Address(TMP2, size_field_offset));
1331 } 1249 }
1332 #endif // !PRODUCT 1250 #endif // !PRODUCT
1333 1251
1334
1335 void Assembler::TryAllocate(const Class& cls, 1252 void Assembler::TryAllocate(const Class& cls,
1336 Label* failure, 1253 Label* failure,
1337 Register instance_reg, 1254 Register instance_reg,
1338 Register temp_reg) { 1255 Register temp_reg) {
1339 ASSERT(failure != NULL); 1256 ASSERT(failure != NULL);
1340 if (FLAG_inline_alloc) { 1257 if (FLAG_inline_alloc) {
1341 // If this allocation is traced, program will jump to failure path 1258 // If this allocation is traced, program will jump to failure path
1342 // (i.e. the allocation stub) which will allocate the object and trace the 1259 // (i.e. the allocation stub) which will allocate the object and trace the
1343 // allocation call site. 1260 // allocation call site.
1344 NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), temp_reg, failure)); 1261 NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), temp_reg, failure));
(...skipping 24 matching lines...) Expand all
1369 tags = RawObject::ClassIdTag::update(cls.id(), tags); 1286 tags = RawObject::ClassIdTag::update(cls.id(), tags);
1370 // Extends the 32 bit tags with zeros, which is the uninitialized 1287 // Extends the 32 bit tags with zeros, which is the uninitialized
1371 // hash code. 1288 // hash code.
1372 LoadImmediate(TMP, tags); 1289 LoadImmediate(TMP, tags);
1373 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset()); 1290 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset());
1374 } else { 1291 } else {
1375 b(failure); 1292 b(failure);
1376 } 1293 }
1377 } 1294 }
1378 1295
1379
1380 void Assembler::TryAllocateArray(intptr_t cid, 1296 void Assembler::TryAllocateArray(intptr_t cid,
1381 intptr_t instance_size, 1297 intptr_t instance_size,
1382 Label* failure, 1298 Label* failure,
1383 Register instance, 1299 Register instance,
1384 Register end_address, 1300 Register end_address,
1385 Register temp1, 1301 Register temp1,
1386 Register temp2) { 1302 Register temp2) {
1387 if (FLAG_inline_alloc) { 1303 if (FLAG_inline_alloc) {
1388 // If this allocation is traced, program will jump to failure path 1304 // If this allocation is traced, program will jump to failure path
1389 // (i.e. the allocation stub) which will allocate the object and trace the 1305 // (i.e. the allocation stub) which will allocate the object and trace the
(...skipping 27 matching lines...) Expand all
1417 tags = RawObject::SizeTag::update(instance_size, tags); 1333 tags = RawObject::SizeTag::update(instance_size, tags);
1418 // Extends the 32 bit tags with zeros, which is the uninitialized 1334 // Extends the 32 bit tags with zeros, which is the uninitialized
1419 // hash code. 1335 // hash code.
1420 LoadImmediate(temp2, tags); 1336 LoadImmediate(temp2, tags);
1421 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags. 1337 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags.
1422 } else { 1338 } else {
1423 b(failure); 1339 b(failure);
1424 } 1340 }
1425 } 1341 }
1426 1342
1427
1428 Address Assembler::ElementAddressForIntIndex(bool is_external, 1343 Address Assembler::ElementAddressForIntIndex(bool is_external,
1429 intptr_t cid, 1344 intptr_t cid,
1430 intptr_t index_scale, 1345 intptr_t index_scale,
1431 Register array, 1346 Register array,
1432 intptr_t index) const { 1347 intptr_t index) const {
1433 const int64_t offset = 1348 const int64_t offset =
1434 index * index_scale + 1349 index * index_scale +
1435 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); 1350 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
1436 ASSERT(Utils::IsInt(32, offset)); 1351 ASSERT(Utils::IsInt(32, offset));
1437 const OperandSize size = Address::OperandSizeFor(cid); 1352 const OperandSize size = Address::OperandSizeFor(cid);
1438 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); 1353 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
1439 return Address(array, static_cast<int32_t>(offset), Address::Offset, size); 1354 return Address(array, static_cast<int32_t>(offset), Address::Offset, size);
1440 } 1355 }
1441 1356
1442
1443 void Assembler::LoadElementAddressForIntIndex(Register address, 1357 void Assembler::LoadElementAddressForIntIndex(Register address,
1444 bool is_external, 1358 bool is_external,
1445 intptr_t cid, 1359 intptr_t cid,
1446 intptr_t index_scale, 1360 intptr_t index_scale,
1447 Register array, 1361 Register array,
1448 intptr_t index) { 1362 intptr_t index) {
1449 const int64_t offset = 1363 const int64_t offset =
1450 index * index_scale + 1364 index * index_scale +
1451 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); 1365 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
1452 AddImmediate(address, array, offset); 1366 AddImmediate(address, array, offset);
1453 } 1367 }
1454 1368
1455
1456 Address Assembler::ElementAddressForRegIndex(bool is_load, 1369 Address Assembler::ElementAddressForRegIndex(bool is_load,
1457 bool is_external, 1370 bool is_external,
1458 intptr_t cid, 1371 intptr_t cid,
1459 intptr_t index_scale, 1372 intptr_t index_scale,
1460 Register array, 1373 Register array,
1461 Register index) { 1374 Register index) {
1462 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. 1375 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
1463 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; 1376 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
1464 const int32_t offset = 1377 const int32_t offset =
1465 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); 1378 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
1466 ASSERT(array != TMP); 1379 ASSERT(array != TMP);
1467 ASSERT(index != TMP); 1380 ASSERT(index != TMP);
1468 const Register base = is_load ? TMP : index; 1381 const Register base = is_load ? TMP : index;
1469 if ((offset == 0) && (shift == 0)) { 1382 if ((offset == 0) && (shift == 0)) {
1470 return Address(array, index, UXTX, Address::Unscaled); 1383 return Address(array, index, UXTX, Address::Unscaled);
1471 } else if (shift < 0) { 1384 } else if (shift < 0) {
1472 ASSERT(shift == -1); 1385 ASSERT(shift == -1);
1473 add(base, array, Operand(index, ASR, 1)); 1386 add(base, array, Operand(index, ASR, 1));
1474 } else { 1387 } else {
1475 add(base, array, Operand(index, LSL, shift)); 1388 add(base, array, Operand(index, LSL, shift));
1476 } 1389 }
1477 const OperandSize size = Address::OperandSizeFor(cid); 1390 const OperandSize size = Address::OperandSizeFor(cid);
1478 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); 1391 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
1479 return Address(base, offset, Address::Offset, size); 1392 return Address(base, offset, Address::Offset, size);
1480 } 1393 }
1481 1394
1482
1483 void Assembler::LoadElementAddressForRegIndex(Register address, 1395 void Assembler::LoadElementAddressForRegIndex(Register address,
1484 bool is_load, 1396 bool is_load,
1485 bool is_external, 1397 bool is_external,
1486 intptr_t cid, 1398 intptr_t cid,
1487 intptr_t index_scale, 1399 intptr_t index_scale,
1488 Register array, 1400 Register array,
1489 Register index) { 1401 Register index) {
1490 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. 1402 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
1491 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; 1403 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
1492 const int32_t offset = 1404 const int32_t offset =
1493 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); 1405 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
1494 if (shift == 0) { 1406 if (shift == 0) {
1495 add(address, array, Operand(index)); 1407 add(address, array, Operand(index));
1496 } else if (shift < 0) { 1408 } else if (shift < 0) {
1497 ASSERT(shift == -1); 1409 ASSERT(shift == -1);
1498 add(address, array, Operand(index, ASR, 1)); 1410 add(address, array, Operand(index, ASR, 1));
1499 } else { 1411 } else {
1500 add(address, array, Operand(index, LSL, shift)); 1412 add(address, array, Operand(index, LSL, shift));
1501 } 1413 }
1502 if (offset != 0) { 1414 if (offset != 0) {
1503 AddImmediate(address, offset); 1415 AddImmediate(address, offset);
1504 } 1416 }
1505 } 1417 }
1506 1418
1507
1508 void Assembler::LoadUnaligned(Register dst, 1419 void Assembler::LoadUnaligned(Register dst,
1509 Register addr, 1420 Register addr,
1510 Register tmp, 1421 Register tmp,
1511 OperandSize sz) { 1422 OperandSize sz) {
1512 ASSERT(dst != addr); 1423 ASSERT(dst != addr);
1513 ldr(dst, Address(addr, 0), kUnsignedByte); 1424 ldr(dst, Address(addr, 0), kUnsignedByte);
1514 if (sz == kHalfword) { 1425 if (sz == kHalfword) {
1515 ldr(tmp, Address(addr, 1), kByte); 1426 ldr(tmp, Address(addr, 1), kByte);
1516 orr(dst, dst, Operand(tmp, LSL, 8)); 1427 orr(dst, dst, Operand(tmp, LSL, 8));
1517 return; 1428 return;
(...skipping 22 matching lines...) Expand all
1540 ldr(tmp, Address(addr, 6), kUnsignedByte); 1451 ldr(tmp, Address(addr, 6), kUnsignedByte);
1541 orr(dst, dst, Operand(tmp, LSL, 48)); 1452 orr(dst, dst, Operand(tmp, LSL, 48));
1542 ldr(tmp, Address(addr, 7), kUnsignedByte); 1453 ldr(tmp, Address(addr, 7), kUnsignedByte);
1543 orr(dst, dst, Operand(tmp, LSL, 56)); 1454 orr(dst, dst, Operand(tmp, LSL, 56));
1544 if (sz == kDoubleWord) { 1455 if (sz == kDoubleWord) {
1545 return; 1456 return;
1546 } 1457 }
1547 UNIMPLEMENTED(); 1458 UNIMPLEMENTED();
1548 } 1459 }
1549 1460
1550
1551 void Assembler::StoreUnaligned(Register src, 1461 void Assembler::StoreUnaligned(Register src,
1552 Register addr, 1462 Register addr,
1553 Register tmp, 1463 Register tmp,
1554 OperandSize sz) { 1464 OperandSize sz) {
1555 str(src, Address(addr, 0), kUnsignedByte); 1465 str(src, Address(addr, 0), kUnsignedByte);
1556 LsrImmediate(tmp, src, 8); 1466 LsrImmediate(tmp, src, 8);
1557 str(tmp, Address(addr, 1), kUnsignedByte); 1467 str(tmp, Address(addr, 1), kUnsignedByte);
1558 if ((sz == kHalfword) || (sz == kUnsignedHalfword)) { 1468 if ((sz == kHalfword) || (sz == kUnsignedHalfword)) {
1559 return; 1469 return;
1560 } 1470 }
(...skipping 14 matching lines...) Expand all
1575 str(tmp, Address(addr, 7), kUnsignedByte); 1485 str(tmp, Address(addr, 7), kUnsignedByte);
1576 if (sz == kDoubleWord) { 1486 if (sz == kDoubleWord) {
1577 return; 1487 return;
1578 } 1488 }
1579 UNIMPLEMENTED(); 1489 UNIMPLEMENTED();
1580 } 1490 }
1581 1491
1582 } // namespace dart 1492 } // namespace dart
1583 1493
1584 #endif // defined TARGET_ARCH_ARM64 1494 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698