OLD | NEW |
---|---|
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
(...skipping 12 matching lines...) Expand all Loading... | |
23 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); | 23 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); |
24 DEFINE_FLAG(bool, print_stop_message, false, "Print stop message."); | 24 DEFINE_FLAG(bool, print_stop_message, false, "Print stop message."); |
25 DECLARE_FLAG(bool, inline_alloc); | 25 DECLARE_FLAG(bool, inline_alloc); |
26 | 26 |
27 | 27 |
28 Assembler::Assembler(bool use_far_branches) | 28 Assembler::Assembler(bool use_far_branches) |
29 : buffer_(), | 29 : buffer_(), |
30 prologue_offset_(-1), | 30 prologue_offset_(-1), |
31 use_far_branches_(use_far_branches), | 31 use_far_branches_(use_far_branches), |
32 comments_(), | 32 comments_(), |
33 constant_pool_allowed_(true) { | 33 constant_pool_allowed_(false) { |
34 } | 34 } |
35 | 35 |
36 | 36 |
37 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { | 37 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { |
38 ASSERT(Utils::IsAligned(data, 4)); | 38 ASSERT(Utils::IsAligned(data, 4)); |
39 ASSERT(Utils::IsAligned(length, 4)); | 39 ASSERT(Utils::IsAligned(length, 4)); |
40 const uword end = data + length; | 40 const uword end = data + length; |
41 while (data < end) { | 41 while (data < end) { |
42 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; | 42 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; |
43 data += 4; | 43 data += 4; |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
295 imm_s_fixed >>= 1; | 295 imm_s_fixed >>= 1; |
296 continue; | 296 continue; |
297 } | 297 } |
298 | 298 |
299 // 6. Otherwise, the value can't be encoded. | 299 // 6. Otherwise, the value can't be encoded. |
300 return false; | 300 return false; |
301 } | 301 } |
302 } | 302 } |
303 | 303 |
304 | 304 |
305 void Assembler::LoadPoolPointer(Register pp) { | 305 void Assembler::LoadPoolPointer() { |
306 const intptr_t object_pool_pc_dist = | 306 const intptr_t object_pool_pc_dist = |
307 Instructions::HeaderSize() - Instructions::object_pool_offset() + | 307 Instructions::HeaderSize() - Instructions::object_pool_offset() + |
308 CodeSize(); | 308 CodeSize(); |
309 // PP <- Read(PC - object_pool_pc_dist). | 309 // PP <- Read(PC - object_pool_pc_dist). |
310 ldr(pp, Address::PC(-object_pool_pc_dist)); | 310 ldr(PP, Address::PC(-object_pool_pc_dist)); |
311 | 311 |
312 // When in the PP register, the pool pointer is untagged. When we | 312 // When in the PP register, the pool pointer is untagged. When we |
313 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP | 313 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP |
314 // then untags when restoring from the stack. This will make loading from the | 314 // then untags when restoring from the stack. This will make loading from the |
315 // object pool only one instruction for the first 4096 entries. Otherwise, | 315 // object pool only one instruction for the first 4096 entries. Otherwise, |
316 // because the offset wouldn't be aligned, it would be only one instruction | 316 // because the offset wouldn't be aligned, it would be only one instruction |
317 // for the first 64 entries. | 317 // for the first 64 entries. |
318 sub(pp, pp, Operand(kHeapObjectTag)); | 318 sub(PP, PP, Operand(kHeapObjectTag)); |
319 set_constant_pool_allowed(true); | |
319 } | 320 } |
320 | 321 |
321 | 322 |
322 void Assembler::LoadWordFromPoolOffset(Register dst, Register pp, | 323 void Assembler::LoadWordFromPoolOffset(Register dst, uint32_t offset) { |
323 uint32_t offset) { | 324 ASSERT(constant_pool_allowed()); |
324 ASSERT(dst != pp); | 325 ASSERT(dst != PP); |
325 Operand op; | 326 Operand op; |
326 const uint32_t upper20 = offset & 0xfffff000; | 327 const uint32_t upper20 = offset & 0xfffff000; |
327 if (Address::CanHoldOffset(offset)) { | 328 if (Address::CanHoldOffset(offset)) { |
328 ldr(dst, Address(pp, offset)); | 329 ldr(dst, Address(PP, offset)); |
329 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) == | 330 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) == |
330 Operand::Immediate) { | 331 Operand::Immediate) { |
331 const uint32_t lower12 = offset & 0x00000fff; | 332 const uint32_t lower12 = offset & 0x00000fff; |
332 ASSERT(Address::CanHoldOffset(lower12)); | 333 ASSERT(Address::CanHoldOffset(lower12)); |
333 add(dst, pp, op); | 334 add(dst, PP, op); |
334 ldr(dst, Address(dst, lower12)); | 335 ldr(dst, Address(dst, lower12)); |
335 } else { | 336 } else { |
336 const uint16_t offset_low = Utils::Low16Bits(offset); | 337 const uint16_t offset_low = Utils::Low16Bits(offset); |
337 const uint16_t offset_high = Utils::High16Bits(offset); | 338 const uint16_t offset_high = Utils::High16Bits(offset); |
338 movz(dst, Immediate(offset_low), 0); | 339 movz(dst, Immediate(offset_low), 0); |
339 if (offset_high != 0) { | 340 if (offset_high != 0) { |
340 movk(dst, Immediate(offset_high), 1); | 341 movk(dst, Immediate(offset_high), 1); |
341 } | 342 } |
342 ldr(dst, Address(pp, dst)); | 343 ldr(dst, Address(PP, dst)); |
343 } | 344 } |
344 } | 345 } |
345 | 346 |
346 | 347 |
347 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, Register pp, | 348 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset) { |
348 uint32_t offset) { | 349 ASSERT(constant_pool_allowed()); |
349 ASSERT(dst != pp); | 350 ASSERT(dst != PP); |
350 Operand op; | 351 Operand op; |
351 const uint32_t upper20 = offset & 0xfffff000; | 352 const uint32_t upper20 = offset & 0xfffff000; |
352 const uint32_t lower12 = offset & 0x00000fff; | 353 const uint32_t lower12 = offset & 0x00000fff; |
353 const Operand::OperandType ot = | 354 const Operand::OperandType ot = |
354 Operand::CanHold(upper20, kXRegSizeInBits, &op); | 355 Operand::CanHold(upper20, kXRegSizeInBits, &op); |
355 ASSERT(ot == Operand::Immediate); | 356 ASSERT(ot == Operand::Immediate); |
356 ASSERT(Address::CanHoldOffset(lower12)); | 357 ASSERT(Address::CanHoldOffset(lower12)); |
357 add(dst, pp, op); | 358 add(dst, PP, op); |
358 ldr(dst, Address(dst, lower12)); | 359 ldr(dst, Address(dst, lower12)); |
359 } | 360 } |
360 | 361 |
361 | 362 |
362 intptr_t Assembler::FindImmediate(int64_t imm) { | 363 intptr_t Assembler::FindImmediate(int64_t imm) { |
363 return object_pool_wrapper_.FindImmediate(imm); | 364 return object_pool_wrapper_.FindImmediate(imm); |
364 } | 365 } |
365 | 366 |
366 | 367 |
367 bool Assembler::CanLoadFromObjectPool(const Object& object) const { | 368 bool Assembler::CanLoadFromObjectPool(const Object& object) const { |
368 ASSERT(!Thread::CanLoadFromThread(object)); | 369 ASSERT(!Thread::CanLoadFromThread(object)); |
369 if (!constant_pool_allowed()) { | 370 if (!constant_pool_allowed()) { |
370 return false; | 371 return false; |
371 } | 372 } |
372 | 373 |
373 // TODO(zra, kmillikin): Also load other large immediates from the object | 374 // TODO(zra, kmillikin): Also load other large immediates from the object |
374 // pool | 375 // pool |
375 if (object.IsSmi()) { | 376 if (object.IsSmi()) { |
376 // If the raw smi does not fit into a 32-bit signed int, then we'll keep | 377 // If the raw smi does not fit into a 32-bit signed int, then we'll keep |
377 // the raw value in the object pool. | 378 // the raw value in the object pool. |
378 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); | 379 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); |
379 } | 380 } |
380 ASSERT(object.IsNotTemporaryScopedHandle()); | 381 ASSERT(object.IsNotTemporaryScopedHandle()); |
381 ASSERT(object.IsOld()); | 382 ASSERT(object.IsOld()); |
382 return true; | 383 return true; |
383 } | 384 } |
384 | 385 |
385 | 386 |
386 bool Assembler::CanLoadImmediateFromPool(int64_t imm, Register pp) { | 387 void Assembler::LoadExternalLabel(Register dst, const ExternalLabel* label) { |
387 if (!constant_pool_allowed()) { | |
388 return false; | |
389 } | |
390 return !Utils::IsInt(32, imm) && (pp != kNoPP); | |
391 } | |
392 | |
393 | |
394 void Assembler::LoadExternalLabel(Register dst, | |
395 const ExternalLabel* label, | |
396 Patchability patchable, | |
397 Register pp) { | |
398 const int64_t target = static_cast<int64_t>(label->address()); | 388 const int64_t target = static_cast<int64_t>(label->address()); |
399 if (CanLoadImmediateFromPool(target, pp)) { | 389 LoadImmediate(dst, target); |
Florian Schneider
2015/07/29 09:56:07
This will have change again with precompiled code
Florian Schneider
2015/07/29 10:22:44
Alternatively, I would suggest to leave loading of
regis
2015/07/29 15:35:02
Do you mean that the external label will be patcha
| |
400 const int32_t offset = ObjectPool::element_offset( | |
401 object_pool_wrapper_.FindExternalLabel(label, patchable)); | |
402 LoadWordFromPoolOffset(dst, pp, offset); | |
403 } else { | |
404 LoadImmediate(dst, target, kNoPP); | |
405 } | |
406 } | 390 } |
407 | 391 |
408 | 392 |
409 void Assembler::LoadExternalLabelFixed(Register dst, | 393 void Assembler::LoadExternalLabelFixed(Register dst, |
410 const ExternalLabel* label, | 394 const ExternalLabel* label, |
411 Patchability patchable, | 395 Patchability patchable) { |
412 Register pp) { | |
413 const int32_t offset = ObjectPool::element_offset( | 396 const int32_t offset = ObjectPool::element_offset( |
414 object_pool_wrapper_.FindExternalLabel(label, patchable)); | 397 object_pool_wrapper_.FindExternalLabel(label, patchable)); |
415 LoadWordFromPoolOffsetFixed(dst, pp, offset); | 398 LoadWordFromPoolOffsetFixed(dst, offset); |
416 } | 399 } |
417 | 400 |
418 | 401 |
419 void Assembler::LoadIsolate(Register dst) { | 402 void Assembler::LoadIsolate(Register dst) { |
420 ldr(dst, Address(THR, Thread::isolate_offset())); | 403 ldr(dst, Address(THR, Thread::isolate_offset())); |
421 } | 404 } |
422 | 405 |
423 | 406 |
424 void Assembler::LoadObjectHelper(Register dst, | 407 void Assembler::LoadObjectHelper(Register dst, |
425 const Object& object, | 408 const Object& object, |
426 Register pp, | |
427 bool is_unique) { | 409 bool is_unique) { |
428 if (Thread::CanLoadFromThread(object)) { | 410 if (Thread::CanLoadFromThread(object)) { |
429 ldr(dst, Address(THR, Thread::OffsetFromThread(object))); | 411 ldr(dst, Address(THR, Thread::OffsetFromThread(object))); |
430 } else if (CanLoadFromObjectPool(object)) { | 412 } else if (CanLoadFromObjectPool(object)) { |
431 const int32_t offset = ObjectPool::element_offset( | 413 const int32_t offset = ObjectPool::element_offset( |
432 is_unique ? object_pool_wrapper_.AddObject(object) | 414 is_unique ? object_pool_wrapper_.AddObject(object) |
433 : object_pool_wrapper_.FindObject(object)); | 415 : object_pool_wrapper_.FindObject(object)); |
434 LoadWordFromPoolOffset(dst, pp, offset); | 416 LoadWordFromPoolOffset(dst, offset); |
435 } else { | 417 } else { |
436 ASSERT(object.IsSmi() || object.InVMHeap()); | 418 ASSERT(object.IsSmi() || object.InVMHeap()); |
437 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw()), pp); | 419 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw())); |
438 } | 420 } |
439 } | 421 } |
440 | 422 |
441 | 423 |
442 void Assembler::LoadObject(Register dst, const Object& object, Register pp) { | 424 void Assembler::LoadObject(Register dst, const Object& object) { |
443 LoadObjectHelper(dst, object, pp, false); | 425 LoadObjectHelper(dst, object, false); |
444 } | 426 } |
445 | 427 |
446 | 428 |
447 void Assembler::LoadUniqueObject(Register dst, | 429 void Assembler::LoadUniqueObject(Register dst, const Object& object) { |
448 const Object& object, | 430 LoadObjectHelper(dst, object, true); |
449 Register pp) { | |
450 LoadObjectHelper(dst, object, pp, true); | |
451 } | 431 } |
452 | 432 |
453 | 433 |
454 void Assembler::CompareObject(Register reg, const Object& object, Register pp) { | 434 void Assembler::CompareObject(Register reg, const Object& object) { |
455 if (Thread::CanLoadFromThread(object)) { | 435 if (Thread::CanLoadFromThread(object)) { |
456 ldr(TMP, Address(THR, Thread::OffsetFromThread(object))); | 436 ldr(TMP, Address(THR, Thread::OffsetFromThread(object))); |
457 CompareRegisters(reg, TMP); | 437 CompareRegisters(reg, TMP); |
458 } else if (CanLoadFromObjectPool(object)) { | 438 } else if (CanLoadFromObjectPool(object)) { |
459 LoadObject(TMP, object, pp); | 439 LoadObject(TMP, object); |
460 CompareRegisters(reg, TMP); | 440 CompareRegisters(reg, TMP); |
461 } else { | 441 } else { |
462 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()), pp); | 442 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw())); |
463 } | 443 } |
464 } | 444 } |
465 | 445 |
466 | 446 |
467 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm, Register pp) { | 447 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm) { |
468 if ((pp != kNoPP) && constant_pool_allowed()) { | 448 if (constant_pool_allowed()) { |
469 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); | 449 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); |
470 LoadWordFromPoolOffset(reg, pp, offset); | 450 LoadWordFromPoolOffset(reg, offset); |
471 } else { | 451 } else { |
472 // TODO(zra): Since this sequence only needs to be decodable, it can be | 452 // TODO(zra): Since this sequence only needs to be decodable, it can be |
473 // of variable length. | 453 // of variable length. |
474 LoadImmediateFixed(reg, imm); | 454 LoadImmediateFixed(reg, imm); |
475 } | 455 } |
476 } | 456 } |
477 | 457 |
478 | 458 |
479 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) { | 459 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) { |
480 const uint32_t w0 = Utils::Low32Bits(imm); | 460 const uint32_t w0 = Utils::Low32Bits(imm); |
481 const uint32_t w1 = Utils::High32Bits(imm); | 461 const uint32_t w1 = Utils::High32Bits(imm); |
482 const uint16_t h0 = Utils::Low16Bits(w0); | 462 const uint16_t h0 = Utils::Low16Bits(w0); |
483 const uint16_t h1 = Utils::High16Bits(w0); | 463 const uint16_t h1 = Utils::High16Bits(w0); |
484 const uint16_t h2 = Utils::Low16Bits(w1); | 464 const uint16_t h2 = Utils::Low16Bits(w1); |
485 const uint16_t h3 = Utils::High16Bits(w1); | 465 const uint16_t h3 = Utils::High16Bits(w1); |
486 movz(reg, Immediate(h0), 0); | 466 movz(reg, Immediate(h0), 0); |
487 movk(reg, Immediate(h1), 1); | 467 movk(reg, Immediate(h1), 1); |
488 movk(reg, Immediate(h2), 2); | 468 movk(reg, Immediate(h2), 2); |
489 movk(reg, Immediate(h3), 3); | 469 movk(reg, Immediate(h3), 3); |
490 } | 470 } |
491 | 471 |
492 | 472 |
493 void Assembler::LoadImmediate(Register reg, int64_t imm, Register pp) { | 473 void Assembler::LoadImmediate(Register reg, int64_t imm) { |
494 Comment("LoadImmediate"); | 474 Comment("LoadImmediate"); |
495 if (CanLoadImmediateFromPool(imm, pp)) { | 475 // Is it 0? |
476 if (imm == 0) { | |
477 movz(reg, Immediate(0), 0); | |
478 return; | |
479 } | |
480 | |
481 // Can we use one orri operation? | |
482 Operand op; | |
483 Operand::OperandType ot; | |
484 ot = Operand::CanHold(imm, kXRegSizeInBits, &op); | |
485 if (ot == Operand::BitfieldImm) { | |
486 orri(reg, ZR, Immediate(imm)); | |
487 return; | |
488 } | |
489 | |
490 // We may fall back on movz, movk, movn. | |
491 const uint32_t w0 = Utils::Low32Bits(imm); | |
492 const uint32_t w1 = Utils::High32Bits(imm); | |
493 const uint16_t h0 = Utils::Low16Bits(w0); | |
494 const uint16_t h1 = Utils::High16Bits(w0); | |
495 const uint16_t h2 = Utils::Low16Bits(w1); | |
496 const uint16_t h3 = Utils::High16Bits(w1); | |
497 | |
498 // Special case for w1 == 0xffffffff | |
499 if (w1 == 0xffffffff) { | |
500 if (h1 == 0xffff) { | |
501 movn(reg, Immediate(~h0), 0); | |
502 } else { | |
503 movn(reg, Immediate(~h1), 1); | |
504 movk(reg, Immediate(h0), 0); | |
505 } | |
506 return; | |
507 } | |
508 | |
509 // Special case for h3 == 0xffff | |
510 if (h3 == 0xffff) { | |
511 // We know h2 != 0xffff. | |
512 movn(reg, Immediate(~h2), 2); | |
513 if (h1 != 0xffff) { | |
514 movk(reg, Immediate(h1), 1); | |
515 } | |
516 if (h0 != 0xffff) { | |
517 movk(reg, Immediate(h0), 0); | |
518 } | |
519 return; | |
520 } | |
521 | |
522 // Use constant pool if allowed, unless we can load imm with 2 instructions. | |
523 if ((w1 != 0) && constant_pool_allowed()) { | |
496 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); | 524 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); |
497 LoadWordFromPoolOffset(reg, pp, offset); | 525 LoadWordFromPoolOffset(reg, offset); |
498 } else { | 526 return; |
499 // 0. Is it 0? | 527 } |
500 if (imm == 0) { | |
501 movz(reg, Immediate(0), 0); | |
502 return; | |
503 } | |
504 | 528 |
505 // 1. Can we use one orri operation? | 529 bool initialized = false; |
506 Operand op; | 530 if (h0 != 0) { |
507 Operand::OperandType ot; | 531 movz(reg, Immediate(h0), 0); |
508 ot = Operand::CanHold(imm, kXRegSizeInBits, &op); | 532 initialized = true; |
509 if (ot == Operand::BitfieldImm) { | 533 } |
510 orri(reg, ZR, Immediate(imm)); | 534 if (h1 != 0) { |
511 return; | 535 if (initialized) { |
512 } | 536 movk(reg, Immediate(h1), 1); |
513 | 537 } else { |
514 // 2. Fall back on movz, movk, movn. | 538 movz(reg, Immediate(h1), 1); |
515 const uint32_t w0 = Utils::Low32Bits(imm); | |
516 const uint32_t w1 = Utils::High32Bits(imm); | |
517 const uint16_t h0 = Utils::Low16Bits(w0); | |
518 const uint16_t h1 = Utils::High16Bits(w0); | |
519 const uint16_t h2 = Utils::Low16Bits(w1); | |
520 const uint16_t h3 = Utils::High16Bits(w1); | |
521 | |
522 // Special case for w1 == 0xffffffff | |
523 if (w1 == 0xffffffff) { | |
524 if (h1 == 0xffff) { | |
525 movn(reg, Immediate(~h0), 0); | |
526 } else { | |
527 movn(reg, Immediate(~h1), 1); | |
528 movk(reg, Immediate(h0), 0); | |
529 } | |
530 return; | |
531 } | |
532 | |
533 // Special case for h3 == 0xffff | |
534 if (h3 == 0xffff) { | |
535 // We know h2 != 0xffff. | |
536 movn(reg, Immediate(~h2), 2); | |
537 if (h1 != 0xffff) { | |
538 movk(reg, Immediate(h1), 1); | |
539 } | |
540 if (h0 != 0xffff) { | |
541 movk(reg, Immediate(h0), 0); | |
542 } | |
543 return; | |
544 } | |
545 | |
546 bool initialized = false; | |
547 if (h0 != 0) { | |
548 movz(reg, Immediate(h0), 0); | |
549 initialized = true; | 539 initialized = true; |
550 } | 540 } |
551 if (h1 != 0) { | 541 } |
552 if (initialized) { | 542 if (h2 != 0) { |
553 movk(reg, Immediate(h1), 1); | 543 if (initialized) { |
554 } else { | 544 movk(reg, Immediate(h2), 2); |
555 movz(reg, Immediate(h1), 1); | 545 } else { |
556 initialized = true; | 546 movz(reg, Immediate(h2), 2); |
557 } | 547 initialized = true; |
558 } | 548 } |
559 if (h2 != 0) { | 549 } |
560 if (initialized) { | 550 if (h3 != 0) { |
561 movk(reg, Immediate(h2), 2); | 551 if (initialized) { |
562 } else { | 552 movk(reg, Immediate(h3), 3); |
563 movz(reg, Immediate(h2), 2); | 553 } else { |
564 initialized = true; | 554 movz(reg, Immediate(h3), 3); |
565 } | |
566 } | |
567 if (h3 != 0) { | |
568 if (initialized) { | |
569 movk(reg, Immediate(h3), 3); | |
570 } else { | |
571 movz(reg, Immediate(h3), 3); | |
572 } | |
573 } | 555 } |
574 } | 556 } |
575 } | 557 } |
576 | 558 |
577 | 559 |
578 void Assembler::LoadDImmediate(VRegister vd, double immd, Register pp) { | 560 void Assembler::LoadDImmediate(VRegister vd, double immd) { |
579 if (!fmovdi(vd, immd)) { | 561 if (!fmovdi(vd, immd)) { |
580 int64_t imm = bit_cast<int64_t, double>(immd); | 562 int64_t imm = bit_cast<int64_t, double>(immd); |
581 LoadImmediate(TMP, imm, pp); | 563 LoadImmediate(TMP, imm); |
582 fmovdr(vd, TMP); | 564 fmovdr(vd, TMP); |
583 } | 565 } |
584 } | 566 } |
585 | 567 |
586 | 568 |
587 void Assembler::AddImmediate( | 569 void Assembler::AddImmediate(Register dest, Register rn, int64_t imm) { |
588 Register dest, Register rn, int64_t imm, Register pp) { | |
589 Operand op; | 570 Operand op; |
590 if (imm == 0) { | 571 if (imm == 0) { |
591 if (dest != rn) { | 572 if (dest != rn) { |
592 mov(dest, rn); | 573 mov(dest, rn); |
593 } | 574 } |
594 return; | 575 return; |
595 } | 576 } |
596 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { | 577 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { |
597 add(dest, rn, op); | 578 add(dest, rn, op); |
598 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == | 579 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == |
599 Operand::Immediate) { | 580 Operand::Immediate) { |
600 sub(dest, rn, op); | 581 sub(dest, rn, op); |
601 } else { | 582 } else { |
602 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. | 583 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. |
603 ASSERT(rn != TMP2); | 584 ASSERT(rn != TMP2); |
604 LoadImmediate(TMP2, imm, pp); | 585 LoadImmediate(TMP2, imm); |
605 add(dest, rn, Operand(TMP2)); | 586 add(dest, rn, Operand(TMP2)); |
606 } | 587 } |
607 } | 588 } |
608 | 589 |
609 | 590 |
610 void Assembler::AddImmediateSetFlags( | 591 void Assembler::AddImmediateSetFlags(Register dest, Register rn, int64_t imm) { |
611 Register dest, Register rn, int64_t imm, Register pp) { | |
612 Operand op; | 592 Operand op; |
613 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { | 593 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { |
614 // Handles imm == kMinInt64. | 594 // Handles imm == kMinInt64. |
615 adds(dest, rn, op); | 595 adds(dest, rn, op); |
616 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == | 596 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == |
617 Operand::Immediate) { | 597 Operand::Immediate) { |
618 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. | 598 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. |
619 subs(dest, rn, op); | 599 subs(dest, rn, op); |
620 } else { | 600 } else { |
621 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. | 601 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. |
622 ASSERT(rn != TMP2); | 602 ASSERT(rn != TMP2); |
623 LoadImmediate(TMP2, imm, pp); | 603 LoadImmediate(TMP2, imm); |
624 adds(dest, rn, Operand(TMP2)); | 604 adds(dest, rn, Operand(TMP2)); |
625 } | 605 } |
626 } | 606 } |
627 | 607 |
628 | 608 |
629 void Assembler::SubImmediateSetFlags( | 609 void Assembler::SubImmediateSetFlags(Register dest, Register rn, int64_t imm) { |
630 Register dest, Register rn, int64_t imm, Register pp) { | |
631 Operand op; | 610 Operand op; |
632 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { | 611 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { |
633 // Handles imm == kMinInt64. | 612 // Handles imm == kMinInt64. |
634 subs(dest, rn, op); | 613 subs(dest, rn, op); |
635 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == | 614 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == |
636 Operand::Immediate) { | 615 Operand::Immediate) { |
637 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. | 616 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. |
638 adds(dest, rn, op); | 617 adds(dest, rn, op); |
639 } else { | 618 } else { |
640 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits. | 619 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits. |
641 ASSERT(rn != TMP2); | 620 ASSERT(rn != TMP2); |
642 LoadImmediate(TMP2, imm, pp); | 621 LoadImmediate(TMP2, imm); |
643 subs(dest, rn, Operand(TMP2)); | 622 subs(dest, rn, Operand(TMP2)); |
644 } | 623 } |
645 } | 624 } |
646 | 625 |
647 | 626 |
648 void Assembler::AndImmediate( | 627 void Assembler::AndImmediate(Register rd, Register rn, int64_t imm) { |
649 Register rd, Register rn, int64_t imm, Register pp) { | |
650 Operand imm_op; | 628 Operand imm_op; |
651 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { | 629 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { |
652 andi(rd, rn, Immediate(imm)); | 630 andi(rd, rn, Immediate(imm)); |
653 } else { | 631 } else { |
654 LoadImmediate(TMP, imm, pp); | 632 LoadImmediate(TMP, imm); |
655 and_(rd, rn, Operand(TMP)); | 633 and_(rd, rn, Operand(TMP)); |
656 } | 634 } |
657 } | 635 } |
658 | 636 |
659 | 637 |
660 void Assembler::OrImmediate( | 638 void Assembler::OrImmediate(Register rd, Register rn, int64_t imm) { |
661 Register rd, Register rn, int64_t imm, Register pp) { | |
662 Operand imm_op; | 639 Operand imm_op; |
663 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { | 640 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { |
664 orri(rd, rn, Immediate(imm)); | 641 orri(rd, rn, Immediate(imm)); |
665 } else { | 642 } else { |
666 LoadImmediate(TMP, imm, pp); | 643 LoadImmediate(TMP, imm); |
667 orr(rd, rn, Operand(TMP)); | 644 orr(rd, rn, Operand(TMP)); |
668 } | 645 } |
669 } | 646 } |
670 | 647 |
671 | 648 |
672 void Assembler::XorImmediate( | 649 void Assembler::XorImmediate(Register rd, Register rn, int64_t imm) { |
673 Register rd, Register rn, int64_t imm, Register pp) { | |
674 Operand imm_op; | 650 Operand imm_op; |
675 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { | 651 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { |
676 eori(rd, rn, Immediate(imm)); | 652 eori(rd, rn, Immediate(imm)); |
677 } else { | 653 } else { |
678 LoadImmediate(TMP, imm, pp); | 654 LoadImmediate(TMP, imm); |
679 eor(rd, rn, Operand(TMP)); | 655 eor(rd, rn, Operand(TMP)); |
680 } | 656 } |
681 } | 657 } |
682 | 658 |
683 | 659 |
684 void Assembler::TestImmediate(Register rn, int64_t imm, Register pp) { | 660 void Assembler::TestImmediate(Register rn, int64_t imm) { |
685 Operand imm_op; | 661 Operand imm_op; |
686 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { | 662 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { |
687 tsti(rn, Immediate(imm)); | 663 tsti(rn, Immediate(imm)); |
688 } else { | 664 } else { |
689 LoadImmediate(TMP, imm, pp); | 665 LoadImmediate(TMP, imm); |
690 tst(rn, Operand(TMP)); | 666 tst(rn, Operand(TMP)); |
691 } | 667 } |
692 } | 668 } |
693 | 669 |
694 | 670 |
695 void Assembler::CompareImmediate(Register rn, int64_t imm, Register pp) { | 671 void Assembler::CompareImmediate(Register rn, int64_t imm) { |
696 Operand op; | 672 Operand op; |
697 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { | 673 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { |
698 cmp(rn, op); | 674 cmp(rn, op); |
699 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == | 675 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == |
700 Operand::Immediate) { | 676 Operand::Immediate) { |
701 cmn(rn, op); | 677 cmn(rn, op); |
702 } else { | 678 } else { |
703 ASSERT(rn != TMP2); | 679 ASSERT(rn != TMP2); |
704 LoadImmediate(TMP2, imm, pp); | 680 LoadImmediate(TMP2, imm); |
705 cmp(rn, Operand(TMP2)); | 681 cmp(rn, Operand(TMP2)); |
706 } | 682 } |
707 } | 683 } |
708 | 684 |
709 | 685 |
710 void Assembler::LoadFromOffset( | 686 void Assembler::LoadFromOffset( |
711 Register dest, Register base, int32_t offset, Register pp, OperandSize sz) { | 687 Register dest, Register base, int32_t offset, OperandSize sz) { |
712 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { | 688 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { |
713 ldr(dest, Address(base, offset, Address::Offset, sz), sz); | 689 ldr(dest, Address(base, offset, Address::Offset, sz), sz); |
714 } else { | 690 } else { |
715 ASSERT(base != TMP2); | 691 ASSERT(base != TMP2); |
716 AddImmediate(TMP2, base, offset, pp); | 692 AddImmediate(TMP2, base, offset); |
717 ldr(dest, Address(TMP2), sz); | 693 ldr(dest, Address(TMP2), sz); |
718 } | 694 } |
719 } | 695 } |
720 | 696 |
721 | 697 |
722 void Assembler::LoadDFromOffset( | 698 void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) { |
723 VRegister dest, Register base, int32_t offset, Register pp) { | |
724 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { | 699 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { |
725 fldrd(dest, Address(base, offset, Address::Offset, kDWord)); | 700 fldrd(dest, Address(base, offset, Address::Offset, kDWord)); |
726 } else { | 701 } else { |
727 ASSERT(base != TMP2); | 702 ASSERT(base != TMP2); |
728 AddImmediate(TMP2, base, offset, pp); | 703 AddImmediate(TMP2, base, offset); |
729 fldrd(dest, Address(TMP2)); | 704 fldrd(dest, Address(TMP2)); |
730 } | 705 } |
731 } | 706 } |
732 | 707 |
733 | 708 |
734 void Assembler::LoadQFromOffset( | 709 void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) { |
735 VRegister dest, Register base, int32_t offset, Register pp) { | |
736 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { | 710 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { |
737 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); | 711 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); |
738 } else { | 712 } else { |
739 ASSERT(base != TMP2); | 713 ASSERT(base != TMP2); |
740 AddImmediate(TMP2, base, offset, pp); | 714 AddImmediate(TMP2, base, offset); |
741 fldrq(dest, Address(TMP2)); | 715 fldrq(dest, Address(TMP2)); |
742 } | 716 } |
743 } | 717 } |
744 | 718 |
745 | 719 |
746 void Assembler::StoreToOffset( | 720 void Assembler::StoreToOffset( |
747 Register src, Register base, int32_t offset, Register pp, OperandSize sz) { | 721 Register src, Register base, int32_t offset, OperandSize sz) { |
748 ASSERT(base != TMP2); | 722 ASSERT(base != TMP2); |
749 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { | 723 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { |
750 str(src, Address(base, offset, Address::Offset, sz), sz); | 724 str(src, Address(base, offset, Address::Offset, sz), sz); |
751 } else { | 725 } else { |
752 ASSERT(src != TMP2); | 726 ASSERT(src != TMP2); |
753 AddImmediate(TMP2, base, offset, pp); | 727 AddImmediate(TMP2, base, offset); |
754 str(src, Address(TMP2), sz); | 728 str(src, Address(TMP2), sz); |
755 } | 729 } |
756 } | 730 } |
757 | 731 |
758 | 732 |
759 void Assembler::StoreDToOffset( | 733 void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) { |
760 VRegister src, Register base, int32_t offset, Register pp) { | |
761 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { | 734 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { |
762 fstrd(src, Address(base, offset, Address::Offset, kDWord)); | 735 fstrd(src, Address(base, offset, Address::Offset, kDWord)); |
763 } else { | 736 } else { |
764 ASSERT(base != TMP2); | 737 ASSERT(base != TMP2); |
765 AddImmediate(TMP2, base, offset, pp); | 738 AddImmediate(TMP2, base, offset); |
766 fstrd(src, Address(TMP2)); | 739 fstrd(src, Address(TMP2)); |
767 } | 740 } |
768 } | 741 } |
769 | 742 |
770 | 743 |
771 void Assembler::StoreQToOffset( | 744 void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) { |
772 VRegister src, Register base, int32_t offset, Register pp) { | |
773 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { | 745 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { |
774 fstrq(src, Address(base, offset, Address::Offset, kQWord)); | 746 fstrq(src, Address(base, offset, Address::Offset, kQWord)); |
775 } else { | 747 } else { |
776 ASSERT(base != TMP2); | 748 ASSERT(base != TMP2); |
777 AddImmediate(TMP2, base, offset, pp); | 749 AddImmediate(TMP2, base, offset); |
778 fstrq(src, Address(TMP2)); | 750 fstrq(src, Address(TMP2)); |
779 } | 751 } |
780 } | 752 } |
781 | 753 |
782 | 754 |
783 void Assembler::VRecps(VRegister vd, VRegister vn) { | 755 void Assembler::VRecps(VRegister vd, VRegister vn) { |
784 ASSERT(vn != VTMP); | 756 ASSERT(vn != VTMP); |
785 ASSERT(vd != VTMP); | 757 ASSERT(vd != VTMP); |
786 | 758 |
787 // Reciprocal estimate. | 759 // Reciprocal estimate. |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
841 // And the result with the negated space bit of the object. | 813 // And the result with the negated space bit of the object. |
842 bic(TMP, TMP, Operand(object)); | 814 bic(TMP, TMP, Operand(object)); |
843 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); | 815 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); |
844 b(no_update, EQ); | 816 b(no_update, EQ); |
845 } | 817 } |
846 | 818 |
847 | 819 |
848 void Assembler::StoreIntoObjectOffset(Register object, | 820 void Assembler::StoreIntoObjectOffset(Register object, |
849 int32_t offset, | 821 int32_t offset, |
850 Register value, | 822 Register value, |
851 Register pp, | |
852 bool can_value_be_smi) { | 823 bool can_value_be_smi) { |
853 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | 824 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { |
854 StoreIntoObject( | 825 StoreIntoObject( |
855 object, FieldAddress(object, offset), value, can_value_be_smi); | 826 object, FieldAddress(object, offset), value, can_value_be_smi); |
856 } else { | 827 } else { |
857 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); | 828 AddImmediate(TMP, object, offset - kHeapObjectTag); |
858 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); | 829 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); |
859 } | 830 } |
860 } | 831 } |
861 | 832 |
862 | 833 |
863 void Assembler::StoreIntoObject(Register object, | 834 void Assembler::StoreIntoObject(Register object, |
864 const Address& dest, | 835 const Address& dest, |
865 Register value, | 836 Register value, |
866 bool can_value_be_smi) { | 837 bool can_value_be_smi) { |
867 ASSERT(object != value); | 838 ASSERT(object != value); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
901 StoreIntoObjectFilter(object, value, &done); | 872 StoreIntoObjectFilter(object, value, &done); |
902 Stop("Store buffer update is required"); | 873 Stop("Store buffer update is required"); |
903 Bind(&done); | 874 Bind(&done); |
904 #endif // defined(DEBUG) | 875 #endif // defined(DEBUG) |
905 // No store buffer update. | 876 // No store buffer update. |
906 } | 877 } |
907 | 878 |
908 | 879 |
909 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, | 880 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, |
910 int32_t offset, | 881 int32_t offset, |
911 Register value, | 882 Register value) { |
912 Register pp) { | |
913 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | 883 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { |
914 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | 884 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
915 } else { | 885 } else { |
916 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); | 886 AddImmediate(TMP, object, offset - kHeapObjectTag); |
917 StoreIntoObjectNoBarrier(object, Address(TMP), value); | 887 StoreIntoObjectNoBarrier(object, Address(TMP), value); |
918 } | 888 } |
919 } | 889 } |
920 | 890 |
921 | 891 |
922 void Assembler::StoreIntoObjectNoBarrier(Register object, | 892 void Assembler::StoreIntoObjectNoBarrier(Register object, |
923 const Address& dest, | 893 const Address& dest, |
924 const Object& value) { | 894 const Object& value) { |
925 ASSERT(value.IsSmi() || value.InVMHeap() || | 895 ASSERT(value.IsSmi() || value.InVMHeap() || |
926 (value.IsOld() && value.IsNotTemporaryScopedHandle())); | 896 (value.IsOld() && value.IsNotTemporaryScopedHandle())); |
927 // No store buffer update. | 897 // No store buffer update. |
928 LoadObject(TMP2, value, PP); | 898 LoadObject(TMP2, value); |
929 str(TMP2, dest); | 899 str(TMP2, dest); |
930 } | 900 } |
931 | 901 |
932 | 902 |
933 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, | 903 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, |
934 int32_t offset, | 904 int32_t offset, |
935 const Object& value, | 905 const Object& value) { |
936 Register pp) { | |
937 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | 906 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { |
938 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | 907 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
939 } else { | 908 } else { |
940 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); | 909 AddImmediate(TMP, object, offset - kHeapObjectTag); |
941 StoreIntoObjectNoBarrier(object, Address(TMP), value); | 910 StoreIntoObjectNoBarrier(object, Address(TMP), value); |
942 } | 911 } |
943 } | 912 } |
944 | 913 |
945 | 914 |
946 void Assembler::LoadClassId(Register result, Register object, Register pp) { | 915 void Assembler::LoadClassId(Register result, Register object) { |
947 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); | 916 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); |
948 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); | 917 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); |
949 const intptr_t class_id_offset = Object::tags_offset() + | 918 const intptr_t class_id_offset = Object::tags_offset() + |
950 RawObject::kClassIdTagPos / kBitsPerByte; | 919 RawObject::kClassIdTagPos / kBitsPerByte; |
951 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, pp, | 920 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, |
952 kUnsignedWord); | 921 kUnsignedWord); |
953 } | 922 } |
954 | 923 |
955 | 924 |
956 void Assembler::LoadClassById(Register result, Register class_id, Register pp) { | 925 void Assembler::LoadClassById(Register result, Register class_id) { |
957 ASSERT(result != class_id); | 926 ASSERT(result != class_id); |
958 LoadIsolate(result); | 927 LoadIsolate(result); |
959 const intptr_t offset = | 928 const intptr_t offset = |
960 Isolate::class_table_offset() + ClassTable::table_offset(); | 929 Isolate::class_table_offset() + ClassTable::table_offset(); |
961 LoadFromOffset(result, result, offset, pp); | 930 LoadFromOffset(result, result, offset); |
962 ldr(result, Address(result, class_id, UXTX, Address::Scaled)); | 931 ldr(result, Address(result, class_id, UXTX, Address::Scaled)); |
963 } | 932 } |
964 | 933 |
965 | 934 |
966 void Assembler::LoadClass(Register result, Register object, Register pp) { | 935 void Assembler::LoadClass(Register result, Register object) { |
967 ASSERT(object != TMP); | 936 ASSERT(object != TMP); |
968 LoadClassId(TMP, object, pp); | 937 LoadClassId(TMP, object); |
969 LoadClassById(result, TMP, pp); | 938 LoadClassById(result, TMP); |
970 } | 939 } |
971 | 940 |
972 | 941 |
973 void Assembler::CompareClassId( | 942 void Assembler::CompareClassId(Register object, intptr_t class_id) { |
974 Register object, intptr_t class_id, Register pp) { | 943 LoadClassId(TMP, object); |
975 LoadClassId(TMP, object, pp); | 944 CompareImmediate(TMP, class_id); |
976 CompareImmediate(TMP, class_id, pp); | |
977 } | 945 } |
978 | 946 |
979 | 947 |
980 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { | 948 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { |
981 // Load up a null object. We only need it so we can use LoadClassId on it in | 949 // Load up a null object. We only need it so we can use LoadClassId on it in |
982 // the case that object is a Smi.. | 950 // the case that object is a Smi.. |
983 LoadObject(TMP, Object::null_object(), PP); | 951 LoadObject(TMP, Object::null_object()); |
984 // Check if the object is a Smi. | 952 // Check if the object is a Smi. |
985 tsti(object, Immediate(kSmiTagMask)); | 953 tsti(object, Immediate(kSmiTagMask)); |
986 // If the object *is* a Smi, use the null object instead. o/w leave alone. | 954 // If the object *is* a Smi, use the null object instead. o/w leave alone. |
987 csel(TMP, TMP, object, EQ); | 955 csel(TMP, TMP, object, EQ); |
988 // Loads either the cid of the object if it isn't a Smi, or the cid of null | 956 // Loads either the cid of the object if it isn't a Smi, or the cid of null |
989 // if it is a Smi, which will be ignored. | 957 // if it is a Smi, which will be ignored. |
990 LoadClassId(result, TMP, PP); | 958 LoadClassId(result, TMP); |
991 | 959 |
992 LoadImmediate(TMP, kSmiCid, PP); | 960 LoadImmediate(TMP, kSmiCid); |
993 // If object is a Smi, move the Smi cid into result. o/w leave alone. | 961 // If object is a Smi, move the Smi cid into result. o/w leave alone. |
994 csel(result, TMP, result, EQ); | 962 csel(result, TMP, result, EQ); |
995 } | 963 } |
996 | 964 |
997 | 965 |
998 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { | 966 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { |
999 LoadClassIdMayBeSmi(result, object); | 967 LoadClassIdMayBeSmi(result, object); |
1000 // Finally, tag the result. | 968 // Finally, tag the result. |
1001 SmiTag(result); | 969 SmiTag(result); |
1002 } | 970 } |
1003 | 971 |
1004 | 972 |
1005 void Assembler::ComputeRange(Register result, | 973 void Assembler::ComputeRange(Register result, |
1006 Register value, | 974 Register value, |
1007 Register scratch, | 975 Register scratch, |
1008 Label* not_mint) { | 976 Label* not_mint) { |
1009 Label done, not_smi; | 977 Label done, not_smi; |
1010 tsti(value, Immediate(kSmiTagMask)); | 978 tsti(value, Immediate(kSmiTagMask)); |
1011 b(¬_smi, NE); | 979 b(¬_smi, NE); |
1012 | 980 |
1013 AsrImmediate(scratch, value, 32); | 981 AsrImmediate(scratch, value, 32); |
1014 LoadImmediate(result, ICData::kUint32RangeBit, PP); | 982 LoadImmediate(result, ICData::kUint32RangeBit); |
1015 cmp(scratch, Operand(1)); | 983 cmp(scratch, Operand(1)); |
1016 b(&done, EQ); | 984 b(&done, EQ); |
1017 | 985 |
1018 neg(scratch, scratch); | 986 neg(scratch, scratch); |
1019 add(result, scratch, Operand(ICData::kInt32RangeBit)); | 987 add(result, scratch, Operand(ICData::kInt32RangeBit)); |
1020 cmp(scratch, Operand(1)); | 988 cmp(scratch, Operand(1)); |
1021 LoadImmediate(TMP, ICData::kSignedRangeBit, PP); | 989 LoadImmediate(TMP, ICData::kSignedRangeBit); |
1022 csel(result, result, TMP, LS); | 990 csel(result, result, TMP, LS); |
1023 b(&done); | 991 b(&done); |
1024 | 992 |
1025 Bind(¬_smi); | 993 Bind(¬_smi); |
1026 CompareClassId(value, kMintCid, PP); | 994 CompareClassId(value, kMintCid); |
1027 b(not_mint, NE); | 995 b(not_mint, NE); |
1028 | 996 |
1029 LoadImmediate(result, ICData::kInt64RangeBit, PP); | 997 LoadImmediate(result, ICData::kInt64RangeBit); |
1030 Bind(&done); | 998 Bind(&done); |
1031 } | 999 } |
1032 | 1000 |
1033 | 1001 |
1034 void Assembler::UpdateRangeFeedback(Register value, | 1002 void Assembler::UpdateRangeFeedback(Register value, |
1035 intptr_t index, | 1003 intptr_t index, |
1036 Register ic_data, | 1004 Register ic_data, |
1037 Register scratch1, | 1005 Register scratch1, |
1038 Register scratch2, | 1006 Register scratch2, |
1039 Label* miss) { | 1007 Label* miss) { |
1040 ASSERT(ICData::IsValidRangeFeedbackIndex(index)); | 1008 ASSERT(ICData::IsValidRangeFeedbackIndex(index)); |
1041 ComputeRange(scratch1, value, scratch2, miss); | 1009 ComputeRange(scratch1, value, scratch2, miss); |
1042 ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); | 1010 ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); |
1043 orrw(scratch2, | 1011 orrw(scratch2, |
1044 scratch2, | 1012 scratch2, |
1045 Operand(scratch1, LSL, ICData::RangeFeedbackShift(index))); | 1013 Operand(scratch1, LSL, ICData::RangeFeedbackShift(index))); |
1046 str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); | 1014 str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); |
1047 } | 1015 } |
1048 | 1016 |
1049 | 1017 |
1050 // Frame entry and exit. | 1018 // Frame entry and exit. |
1051 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { | 1019 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { |
1052 // Reserve space for arguments and align frame before entering | 1020 // Reserve space for arguments and align frame before entering |
1053 // the C++ world. | 1021 // the C++ world. |
1054 if (frame_space != 0) { | 1022 if (frame_space != 0) { |
1055 AddImmediate(SP, SP, -frame_space, kNoPP); | 1023 AddImmediate(SP, SP, -frame_space); |
1056 } | 1024 } |
1057 if (OS::ActivationFrameAlignment() > 1) { | 1025 if (OS::ActivationFrameAlignment() > 1) { |
1058 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1))); | 1026 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1))); |
1059 } | 1027 } |
1060 } | 1028 } |
1061 | 1029 |
1062 | 1030 |
1063 void Assembler::EnterFrame(intptr_t frame_size) { | 1031 void Assembler::EnterFrame(intptr_t frame_size) { |
1064 PushPair(LR, FP); | 1032 PushPair(LR, FP); |
1065 mov(FP, SP); | 1033 mov(FP, SP); |
1066 | 1034 |
1067 if (frame_size > 0) { | 1035 if (frame_size > 0) { |
1068 sub(SP, SP, Operand(frame_size)); | 1036 sub(SP, SP, Operand(frame_size)); |
1069 } | 1037 } |
1070 } | 1038 } |
1071 | 1039 |
1072 | 1040 |
1073 void Assembler::LeaveFrame() { | 1041 void Assembler::LeaveFrame() { |
1074 mov(SP, FP); | 1042 mov(SP, FP); |
1075 PopPair(LR, FP); | 1043 PopPair(LR, FP); |
1076 } | 1044 } |
1077 | 1045 |
1078 | 1046 |
1079 void Assembler::EnterDartFrame(intptr_t frame_size) { | 1047 void Assembler::EnterDartFrame(intptr_t frame_size) { |
1048 ASSERT(!constant_pool_allowed()); | |
1080 // Setup the frame. | 1049 // Setup the frame. |
1081 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. | 1050 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. |
1082 EnterFrame(0); | 1051 EnterFrame(0); |
1083 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. | 1052 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. |
1084 | 1053 |
1085 // Load the pool pointer. | 1054 // Load the pool pointer. |
1086 LoadPoolPointer(PP); | 1055 LoadPoolPointer(); |
1087 | 1056 |
1088 // Reserve space. | 1057 // Reserve space. |
1089 if (frame_size > 0) { | 1058 if (frame_size > 0) { |
1090 AddImmediate(SP, SP, -frame_size, PP); | 1059 AddImmediate(SP, SP, -frame_size); |
1091 } | 1060 } |
1092 } | 1061 } |
1093 | 1062 |
1094 | 1063 |
1095 void Assembler::EnterDartFrameWithInfo(intptr_t frame_size, Register new_pp) { | 1064 void Assembler::EnterDartFrameWithInfo(intptr_t frame_size, Register new_pp) { |
1065 ASSERT(!constant_pool_allowed()); | |
1096 // Setup the frame. | 1066 // Setup the frame. |
1097 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. | 1067 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. |
1098 EnterFrame(0); | 1068 EnterFrame(0); |
1099 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. | 1069 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. |
1100 | 1070 |
1101 // Load the pool pointer. | 1071 // Load the pool pointer. |
1102 if (new_pp == kNoPP) { | 1072 if (new_pp == kNoRegister) { |
1103 LoadPoolPointer(PP); | 1073 LoadPoolPointer(); |
1104 } else { | 1074 } else { |
1105 mov(PP, new_pp); | 1075 mov(PP, new_pp); |
1076 set_constant_pool_allowed(true); | |
1106 } | 1077 } |
1107 | 1078 |
1108 // Reserve space. | 1079 // Reserve space. |
1109 if (frame_size > 0) { | 1080 if (frame_size > 0) { |
1110 AddImmediate(SP, SP, -frame_size, PP); | 1081 AddImmediate(SP, SP, -frame_size); |
1111 } | 1082 } |
1112 } | 1083 } |
1113 | 1084 |
1114 | 1085 |
1115 // On entry to a function compiled for OSR, the caller's frame pointer, the | 1086 // On entry to a function compiled for OSR, the caller's frame pointer, the |
1116 // stack locals, and any copied parameters are already in place. The frame | 1087 // stack locals, and any copied parameters are already in place. The frame |
1117 // pointer is already set up. The PC marker is not correct for the | 1088 // pointer is already set up. The PC marker is not correct for the |
1118 // optimized function and there may be extra space for spill slots to | 1089 // optimized function and there may be extra space for spill slots to |
1119 // allocate. We must also set up the pool pointer for the function. | 1090 // allocate. We must also set up the pool pointer for the function. |
1120 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) { | 1091 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) { |
1092 ASSERT(!constant_pool_allowed()); | |
1121 Comment("EnterOsrFrame"); | 1093 Comment("EnterOsrFrame"); |
1122 adr(TMP, Immediate(-CodeSize())); | 1094 adr(TMP, Immediate(-CodeSize())); |
1123 | 1095 |
1124 StoreToOffset(TMP, FP, kPcMarkerSlotFromFp * kWordSize, kNoPP); | 1096 StoreToOffset(TMP, FP, kPcMarkerSlotFromFp * kWordSize); |
1125 | 1097 |
1126 // Setup pool pointer for this dart function. | 1098 // Setup pool pointer for this dart function. |
1127 if (new_pp == kNoPP) { | 1099 if (new_pp == kNoRegister) { |
1128 LoadPoolPointer(PP); | 1100 LoadPoolPointer(); |
1129 } else { | 1101 } else { |
1130 mov(PP, new_pp); | 1102 mov(PP, new_pp); |
1103 set_constant_pool_allowed(true); | |
1131 } | 1104 } |
1132 | 1105 |
1133 if (extra_size > 0) { | 1106 if (extra_size > 0) { |
1134 AddImmediate(SP, SP, -extra_size, PP); | 1107 AddImmediate(SP, SP, -extra_size); |
1135 } | 1108 } |
1136 } | 1109 } |
1137 | 1110 |
1138 | 1111 |
1139 void Assembler::LeaveDartFrame() { | 1112 void Assembler::LeaveDartFrame() { |
1113 // LeaveDartFrame is called from stubs (pp disallowed) and from Dart code (pp | |
1114 // allowed), so there is no point in checking the current value of | |
1115 // constant_pool_allowed(). | |
1116 set_constant_pool_allowed(false); | |
1140 // Restore and untag PP. | 1117 // Restore and untag PP. |
1141 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize, kNoPP); | 1118 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize); |
1142 sub(PP, PP, Operand(kHeapObjectTag)); | 1119 sub(PP, PP, Operand(kHeapObjectTag)); |
1143 LeaveFrame(); | 1120 LeaveFrame(); |
1144 } | 1121 } |
1145 | 1122 |
1146 | 1123 |
1147 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) { | 1124 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) { |
1148 EnterFrame(0); | 1125 EnterFrame(0); |
1149 | 1126 |
1150 // Store fpu registers with the lowest register number at the lowest | 1127 // Store fpu registers with the lowest register number at the lowest |
1151 // address. | 1128 // address. |
(...skipping 17 matching lines...) Expand all Loading... | |
1169 } | 1146 } |
1170 | 1147 |
1171 | 1148 |
1172 void Assembler::LeaveCallRuntimeFrame() { | 1149 void Assembler::LeaveCallRuntimeFrame() { |
1173 // SP might have been modified to reserve space for arguments | 1150 // SP might have been modified to reserve space for arguments |
1174 // and ensure proper alignment of the stack frame. | 1151 // and ensure proper alignment of the stack frame. |
1175 // We need to restore it before restoring registers. | 1152 // We need to restore it before restoring registers. |
1176 const intptr_t kPushedRegistersSize = | 1153 const intptr_t kPushedRegistersSize = |
1177 kDartVolatileCpuRegCount * kWordSize + | 1154 kDartVolatileCpuRegCount * kWordSize + |
1178 kDartVolatileFpuRegCount * kWordSize; | 1155 kDartVolatileFpuRegCount * kWordSize; |
1179 AddImmediate(SP, FP, -kPushedRegistersSize, PP); | 1156 AddImmediate(SP, FP, -kPushedRegistersSize); |
1180 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) { | 1157 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) { |
1181 const Register reg = static_cast<Register>(i); | 1158 const Register reg = static_cast<Register>(i); |
1182 Pop(reg); | 1159 Pop(reg); |
1183 } | 1160 } |
1184 | 1161 |
1185 for (int i = 0; i < kNumberOfVRegisters; i++) { | 1162 for (int i = 0; i < kNumberOfVRegisters; i++) { |
1186 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) { | 1163 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) { |
1187 // TODO(zra): When SIMD is added, we must also restore the top | 1164 // TODO(zra): When SIMD is added, we must also restore the top |
1188 // 64-bits of the callee-saved registers. | 1165 // 64-bits of the callee-saved registers. |
1189 continue; | 1166 continue; |
1190 } | 1167 } |
1191 // TODO(zra): Restore the whole V register. | 1168 // TODO(zra): Restore the whole V register. |
1192 VRegister reg = static_cast<VRegister>(i); | 1169 VRegister reg = static_cast<VRegister>(i); |
1193 PopDouble(reg); | 1170 PopDouble(reg); |
1194 } | 1171 } |
1195 | 1172 |
1196 PopPair(LR, FP); | 1173 PopPair(LR, FP); |
1197 } | 1174 } |
1198 | 1175 |
1199 | 1176 |
1200 void Assembler::CallRuntime(const RuntimeEntry& entry, | 1177 void Assembler::CallRuntime(const RuntimeEntry& entry, |
1201 intptr_t argument_count) { | 1178 intptr_t argument_count) { |
1202 entry.Call(this, argument_count); | 1179 entry.Call(this, argument_count); |
1203 } | 1180 } |
1204 | 1181 |
1205 | 1182 |
1206 void Assembler::EnterStubFrame() { | 1183 void Assembler::EnterStubFrame() { |
1184 set_constant_pool_allowed(false); | |
1207 EnterFrame(0); | 1185 EnterFrame(0); |
1208 // Save caller's pool pointer. Push 0 in the saved PC area for stub frames. | 1186 // Save caller's pool pointer. Push 0 in the saved PC area for stub frames. |
1209 TagAndPushPPAndPcMarker(ZR); | 1187 TagAndPushPPAndPcMarker(ZR); |
1210 LoadPoolPointer(PP); | 1188 LoadPoolPointer(); |
1211 } | 1189 } |
1212 | 1190 |
1213 | 1191 |
1214 void Assembler::LeaveStubFrame() { | 1192 void Assembler::LeaveStubFrame() { |
1193 set_constant_pool_allowed(false); | |
1215 // Restore and untag PP. | 1194 // Restore and untag PP. |
1216 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize, kNoPP); | 1195 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize); |
1217 sub(PP, PP, Operand(kHeapObjectTag)); | 1196 sub(PP, PP, Operand(kHeapObjectTag)); |
1218 LeaveFrame(); | 1197 LeaveFrame(); |
1219 } | 1198 } |
1220 | 1199 |
1221 | 1200 |
1222 void Assembler::UpdateAllocationStats(intptr_t cid, | 1201 void Assembler::UpdateAllocationStats(intptr_t cid, |
1223 Register pp, | |
1224 Heap::Space space, | 1202 Heap::Space space, |
1225 bool inline_isolate) { | 1203 bool inline_isolate) { |
1226 ASSERT(cid > 0); | 1204 ASSERT(cid > 0); |
1227 intptr_t counter_offset = | 1205 intptr_t counter_offset = |
1228 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); | 1206 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); |
1229 if (inline_isolate) { | 1207 if (inline_isolate) { |
1230 ClassTable* class_table = Isolate::Current()->class_table(); | 1208 ClassTable* class_table = Isolate::Current()->class_table(); |
1231 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); | 1209 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); |
1232 if (cid < kNumPredefinedCids) { | 1210 if (cid < kNumPredefinedCids) { |
1233 LoadImmediate( | 1211 LoadImmediate( |
1234 TMP2, reinterpret_cast<uword>(*table_ptr) + counter_offset, pp); | 1212 TMP2, reinterpret_cast<uword>(*table_ptr) + counter_offset); |
1235 } else { | 1213 } else { |
1236 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr), pp); | 1214 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr)); |
1237 ldr(TMP, Address(TMP2)); | 1215 ldr(TMP, Address(TMP2)); |
1238 AddImmediate(TMP2, TMP, counter_offset, pp); | 1216 AddImmediate(TMP2, TMP, counter_offset); |
1239 } | 1217 } |
1240 } else { | 1218 } else { |
1241 LoadIsolate(TMP2); | 1219 LoadIsolate(TMP2); |
1242 intptr_t table_offset = | 1220 intptr_t table_offset = |
1243 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1221 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1244 ldr(TMP, Address(TMP2, table_offset)); | 1222 ldr(TMP, Address(TMP2, table_offset)); |
1245 AddImmediate(TMP2, TMP, counter_offset, pp); | 1223 AddImmediate(TMP2, TMP, counter_offset); |
1246 } | 1224 } |
1247 ldr(TMP, Address(TMP2, 0)); | 1225 ldr(TMP, Address(TMP2, 0)); |
1248 AddImmediate(TMP, TMP, 1, pp); | 1226 AddImmediate(TMP, TMP, 1); |
1249 str(TMP, Address(TMP2, 0)); | 1227 str(TMP, Address(TMP2, 0)); |
1250 } | 1228 } |
1251 | 1229 |
1252 | 1230 |
1253 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, | 1231 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, |
1254 Register size_reg, | 1232 Register size_reg, |
1255 Register pp, | |
1256 Heap::Space space, | 1233 Heap::Space space, |
1257 bool inline_isolate) { | 1234 bool inline_isolate) { |
1258 ASSERT(cid > 0); | 1235 ASSERT(cid > 0); |
1259 const uword class_offset = ClassTable::ClassOffsetFor(cid); | 1236 const uword class_offset = ClassTable::ClassOffsetFor(cid); |
1260 const uword count_field_offset = (space == Heap::kNew) ? | 1237 const uword count_field_offset = (space == Heap::kNew) ? |
1261 ClassHeapStats::allocated_since_gc_new_space_offset() : | 1238 ClassHeapStats::allocated_since_gc_new_space_offset() : |
1262 ClassHeapStats::allocated_since_gc_old_space_offset(); | 1239 ClassHeapStats::allocated_since_gc_old_space_offset(); |
1263 const uword size_field_offset = (space == Heap::kNew) ? | 1240 const uword size_field_offset = (space == Heap::kNew) ? |
1264 ClassHeapStats::allocated_size_since_gc_new_space_offset() : | 1241 ClassHeapStats::allocated_size_since_gc_new_space_offset() : |
1265 ClassHeapStats::allocated_size_since_gc_old_space_offset(); | 1242 ClassHeapStats::allocated_size_since_gc_old_space_offset(); |
1266 if (inline_isolate) { | 1243 if (inline_isolate) { |
1267 ClassTable* class_table = Isolate::Current()->class_table(); | 1244 ClassTable* class_table = Isolate::Current()->class_table(); |
1268 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); | 1245 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); |
1269 if (cid < kNumPredefinedCids) { | 1246 if (cid < kNumPredefinedCids) { |
1270 LoadImmediate(TMP2, | 1247 LoadImmediate(TMP2, |
1271 reinterpret_cast<uword>(*table_ptr) + class_offset, pp); | 1248 reinterpret_cast<uword>(*table_ptr) + class_offset); |
1272 } else { | 1249 } else { |
1273 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr), pp); | 1250 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr)); |
1274 ldr(TMP, Address(TMP2)); | 1251 ldr(TMP, Address(TMP2)); |
1275 AddImmediate(TMP2, TMP, class_offset, pp); | 1252 AddImmediate(TMP2, TMP, class_offset); |
1276 } | 1253 } |
1277 } else { | 1254 } else { |
1278 LoadIsolate(TMP2); | 1255 LoadIsolate(TMP2); |
1279 intptr_t table_offset = | 1256 intptr_t table_offset = |
1280 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1257 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1281 ldr(TMP, Address(TMP2, table_offset)); | 1258 ldr(TMP, Address(TMP2, table_offset)); |
1282 AddImmediate(TMP2, TMP, class_offset, pp); | 1259 AddImmediate(TMP2, TMP, class_offset); |
1283 } | 1260 } |
1284 ldr(TMP, Address(TMP2, count_field_offset)); | 1261 ldr(TMP, Address(TMP2, count_field_offset)); |
1285 AddImmediate(TMP, TMP, 1, pp); | 1262 AddImmediate(TMP, TMP, 1); |
1286 str(TMP, Address(TMP2, count_field_offset)); | 1263 str(TMP, Address(TMP2, count_field_offset)); |
1287 ldr(TMP, Address(TMP2, size_field_offset)); | 1264 ldr(TMP, Address(TMP2, size_field_offset)); |
1288 add(TMP, TMP, Operand(size_reg)); | 1265 add(TMP, TMP, Operand(size_reg)); |
1289 str(TMP, Address(TMP2, size_field_offset)); | 1266 str(TMP, Address(TMP2, size_field_offset)); |
1290 } | 1267 } |
1291 | 1268 |
1292 | 1269 |
1293 void Assembler::MaybeTraceAllocation(intptr_t cid, | 1270 void Assembler::MaybeTraceAllocation(intptr_t cid, |
1294 Register temp_reg, | 1271 Register temp_reg, |
1295 Register pp, | |
1296 Label* trace, | 1272 Label* trace, |
1297 bool inline_isolate) { | 1273 bool inline_isolate) { |
1298 ASSERT(cid > 0); | 1274 ASSERT(cid > 0); |
1299 intptr_t state_offset = ClassTable::StateOffsetFor(cid); | 1275 intptr_t state_offset = ClassTable::StateOffsetFor(cid); |
1300 if (inline_isolate) { | 1276 if (inline_isolate) { |
1301 ClassTable* class_table = Isolate::Current()->class_table(); | 1277 ClassTable* class_table = Isolate::Current()->class_table(); |
1302 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); | 1278 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); |
1303 if (cid < kNumPredefinedCids) { | 1279 if (cid < kNumPredefinedCids) { |
1304 LoadImmediate( | 1280 LoadImmediate( |
1305 temp_reg, reinterpret_cast<uword>(*table_ptr) + state_offset, pp); | 1281 temp_reg, reinterpret_cast<uword>(*table_ptr) + state_offset); |
1306 } else { | 1282 } else { |
1307 LoadImmediate(temp_reg, reinterpret_cast<uword>(table_ptr), pp); | 1283 LoadImmediate(temp_reg, reinterpret_cast<uword>(table_ptr)); |
1308 ldr(temp_reg, Address(temp_reg, 0)); | 1284 ldr(temp_reg, Address(temp_reg, 0)); |
1309 AddImmediate(temp_reg, temp_reg, state_offset, pp); | 1285 AddImmediate(temp_reg, temp_reg, state_offset); |
1310 } | 1286 } |
1311 } else { | 1287 } else { |
1312 LoadIsolate(temp_reg); | 1288 LoadIsolate(temp_reg); |
1313 intptr_t table_offset = | 1289 intptr_t table_offset = |
1314 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1290 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1315 ldr(temp_reg, Address(temp_reg, table_offset)); | 1291 ldr(temp_reg, Address(temp_reg, table_offset)); |
1316 AddImmediate(temp_reg, temp_reg, state_offset, pp); | 1292 AddImmediate(temp_reg, temp_reg, state_offset); |
1317 } | 1293 } |
1318 ldr(temp_reg, Address(temp_reg, 0)); | 1294 ldr(temp_reg, Address(temp_reg, 0)); |
1319 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); | 1295 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); |
1320 b(trace, NE); | 1296 b(trace, NE); |
1321 } | 1297 } |
1322 | 1298 |
1323 | 1299 |
1324 void Assembler::TryAllocate(const Class& cls, | 1300 void Assembler::TryAllocate(const Class& cls, |
1325 Label* failure, | 1301 Label* failure, |
1326 Register instance_reg, | 1302 Register instance_reg, |
1327 Register temp_reg, | 1303 Register temp_reg) { |
1328 Register pp) { | |
1329 ASSERT(failure != NULL); | 1304 ASSERT(failure != NULL); |
1330 if (FLAG_inline_alloc) { | 1305 if (FLAG_inline_alloc) { |
1331 // If this allocation is traced, program will jump to failure path | 1306 // If this allocation is traced, program will jump to failure path |
1332 // (i.e. the allocation stub) which will allocate the object and trace the | 1307 // (i.e. the allocation stub) which will allocate the object and trace the |
1333 // allocation call site. | 1308 // allocation call site. |
1334 MaybeTraceAllocation(cls.id(), temp_reg, pp, failure); | 1309 MaybeTraceAllocation(cls.id(), temp_reg, failure); |
1335 const intptr_t instance_size = cls.instance_size(); | 1310 const intptr_t instance_size = cls.instance_size(); |
1336 Heap* heap = Isolate::Current()->heap(); | 1311 Heap* heap = Isolate::Current()->heap(); |
1337 Heap::Space space = heap->SpaceForAllocation(cls.id()); | 1312 Heap::Space space = heap->SpaceForAllocation(cls.id()); |
1338 const uword top_address = heap->TopAddress(space); | 1313 const uword top_address = heap->TopAddress(space); |
1339 LoadImmediate(temp_reg, top_address, pp); | 1314 LoadImmediate(temp_reg, top_address); |
1340 ldr(instance_reg, Address(temp_reg)); | 1315 ldr(instance_reg, Address(temp_reg)); |
1341 // TODO(koda): Protect against unsigned overflow here. | 1316 // TODO(koda): Protect against unsigned overflow here. |
1342 AddImmediateSetFlags(instance_reg, instance_reg, instance_size, pp); | 1317 AddImmediateSetFlags(instance_reg, instance_reg, instance_size); |
1343 | 1318 |
1344 // instance_reg: potential next object start. | 1319 // instance_reg: potential next object start. |
1345 const uword end_address = heap->EndAddress(space); | 1320 const uword end_address = heap->EndAddress(space); |
1346 ASSERT(top_address < end_address); | 1321 ASSERT(top_address < end_address); |
1347 // Could use ldm to load (top, end), but no benefit seen experimentally. | 1322 // Could use ldm to load (top, end), but no benefit seen experimentally. |
1348 ldr(TMP, Address(temp_reg, end_address - top_address)); | 1323 ldr(TMP, Address(temp_reg, end_address - top_address)); |
1349 CompareRegisters(TMP, instance_reg); | 1324 CompareRegisters(TMP, instance_reg); |
1350 // fail if heap end unsigned less than or equal to instance_reg. | 1325 // fail if heap end unsigned less than or equal to instance_reg. |
1351 b(failure, LS); | 1326 b(failure, LS); |
1352 | 1327 |
1353 // Successfully allocated the object, now update top to point to | 1328 // Successfully allocated the object, now update top to point to |
1354 // next object start and store the class in the class field of object. | 1329 // next object start and store the class in the class field of object. |
1355 str(instance_reg, Address(temp_reg)); | 1330 str(instance_reg, Address(temp_reg)); |
1356 | 1331 |
1357 ASSERT(instance_size >= kHeapObjectTag); | 1332 ASSERT(instance_size >= kHeapObjectTag); |
1358 AddImmediate( | 1333 AddImmediate( |
1359 instance_reg, instance_reg, -instance_size + kHeapObjectTag, pp); | 1334 instance_reg, instance_reg, -instance_size + kHeapObjectTag); |
1360 UpdateAllocationStats(cls.id(), pp, space); | 1335 UpdateAllocationStats(cls.id(), space); |
1361 | 1336 |
1362 uword tags = 0; | 1337 uword tags = 0; |
1363 tags = RawObject::SizeTag::update(instance_size, tags); | 1338 tags = RawObject::SizeTag::update(instance_size, tags); |
1364 ASSERT(cls.id() != kIllegalCid); | 1339 ASSERT(cls.id() != kIllegalCid); |
1365 tags = RawObject::ClassIdTag::update(cls.id(), tags); | 1340 tags = RawObject::ClassIdTag::update(cls.id(), tags); |
1366 LoadImmediate(TMP, tags, pp); | 1341 LoadImmediate(TMP, tags); |
1367 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset(), pp); | 1342 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset()); |
1368 } else { | 1343 } else { |
1369 b(failure); | 1344 b(failure); |
1370 } | 1345 } |
1371 } | 1346 } |
1372 | 1347 |
1373 | 1348 |
1374 void Assembler::TryAllocateArray(intptr_t cid, | 1349 void Assembler::TryAllocateArray(intptr_t cid, |
1375 intptr_t instance_size, | 1350 intptr_t instance_size, |
1376 Label* failure, | 1351 Label* failure, |
1377 Register instance, | 1352 Register instance, |
1378 Register end_address, | 1353 Register end_address, |
1379 Register temp1, | 1354 Register temp1, |
1380 Register temp2) { | 1355 Register temp2) { |
1381 if (FLAG_inline_alloc) { | 1356 if (FLAG_inline_alloc) { |
1382 // If this allocation is traced, program will jump to failure path | 1357 // If this allocation is traced, program will jump to failure path |
1383 // (i.e. the allocation stub) which will allocate the object and trace the | 1358 // (i.e. the allocation stub) which will allocate the object and trace the |
1384 // allocation call site. | 1359 // allocation call site. |
1385 MaybeTraceAllocation(cid, temp1, PP, failure); | 1360 MaybeTraceAllocation(cid, temp1, failure); |
1386 Isolate* isolate = Isolate::Current(); | 1361 Isolate* isolate = Isolate::Current(); |
1387 Heap* heap = isolate->heap(); | 1362 Heap* heap = isolate->heap(); |
1388 Heap::Space space = heap->SpaceForAllocation(cid); | 1363 Heap::Space space = heap->SpaceForAllocation(cid); |
1389 LoadImmediate(temp1, heap->TopAddress(space), PP); | 1364 LoadImmediate(temp1, heap->TopAddress(space)); |
1390 ldr(instance, Address(temp1, 0)); // Potential new object start. | 1365 ldr(instance, Address(temp1, 0)); // Potential new object start. |
1391 AddImmediateSetFlags(end_address, instance, instance_size, PP); | 1366 AddImmediateSetFlags(end_address, instance, instance_size); |
1392 b(failure, CS); // Fail on unsigned overflow. | 1367 b(failure, CS); // Fail on unsigned overflow. |
1393 | 1368 |
1394 // Check if the allocation fits into the remaining space. | 1369 // Check if the allocation fits into the remaining space. |
1395 // instance: potential new object start. | 1370 // instance: potential new object start. |
1396 // end_address: potential next object start. | 1371 // end_address: potential next object start. |
1397 LoadImmediate(temp2, heap->EndAddress(space), PP); | 1372 LoadImmediate(temp2, heap->EndAddress(space)); |
1398 ldr(temp2, Address(temp2, 0)); | 1373 ldr(temp2, Address(temp2, 0)); |
1399 cmp(end_address, Operand(temp2)); | 1374 cmp(end_address, Operand(temp2)); |
1400 b(failure, CS); | 1375 b(failure, CS); |
1401 | 1376 |
1402 // Successfully allocated the object(s), now update top to point to | 1377 // Successfully allocated the object(s), now update top to point to |
1403 // next object start and initialize the object. | 1378 // next object start and initialize the object. |
1404 str(end_address, Address(temp1, 0)); | 1379 str(end_address, Address(temp1, 0)); |
1405 add(instance, instance, Operand(kHeapObjectTag)); | 1380 add(instance, instance, Operand(kHeapObjectTag)); |
1406 LoadImmediate(temp2, instance_size, PP); | 1381 LoadImmediate(temp2, instance_size); |
1407 UpdateAllocationStatsWithSize(cid, temp2, PP, space); | 1382 UpdateAllocationStatsWithSize(cid, temp2, space); |
1408 | 1383 |
1409 // Initialize the tags. | 1384 // Initialize the tags. |
1410 // instance: new object start as a tagged pointer. | 1385 // instance: new object start as a tagged pointer. |
1411 uword tags = 0; | 1386 uword tags = 0; |
1412 tags = RawObject::ClassIdTag::update(cid, tags); | 1387 tags = RawObject::ClassIdTag::update(cid, tags); |
1413 tags = RawObject::SizeTag::update(instance_size, tags); | 1388 tags = RawObject::SizeTag::update(instance_size, tags); |
1414 LoadImmediate(temp2, tags, PP); | 1389 LoadImmediate(temp2, tags); |
1415 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags. | 1390 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags. |
1416 } else { | 1391 } else { |
1417 b(failure); | 1392 b(failure); |
1418 } | 1393 } |
1419 } | 1394 } |
1420 | 1395 |
1421 | 1396 |
1422 Address Assembler::ElementAddressForIntIndex(bool is_external, | 1397 Address Assembler::ElementAddressForIntIndex(bool is_external, |
1423 intptr_t cid, | 1398 intptr_t cid, |
1424 intptr_t index_scale, | 1399 intptr_t index_scale, |
(...skipping 30 matching lines...) Expand all Loading... | |
1455 add(base, array, Operand(index, LSL, shift)); | 1430 add(base, array, Operand(index, LSL, shift)); |
1456 } | 1431 } |
1457 const OperandSize size = Address::OperandSizeFor(cid); | 1432 const OperandSize size = Address::OperandSizeFor(cid); |
1458 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); | 1433 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); |
1459 return Address(base, offset, Address::Offset, size); | 1434 return Address(base, offset, Address::Offset, size); |
1460 } | 1435 } |
1461 | 1436 |
1462 } // namespace dart | 1437 } // namespace dart |
1463 | 1438 |
1464 #endif // defined TARGET_ARCH_ARM64 | 1439 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |