Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(608)

Side by Side Diff: runtime/vm/assembler_arm64.cc

Issue 1264543002: Simplify constant pool usage in arm64 code generator (by removing extra argument (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: address comments Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/longjump.h" 10 #include "vm/longjump.h"
(...skipping 12 matching lines...) Expand all
23 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); 23 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
24 DEFINE_FLAG(bool, print_stop_message, false, "Print stop message."); 24 DEFINE_FLAG(bool, print_stop_message, false, "Print stop message.");
25 DECLARE_FLAG(bool, inline_alloc); 25 DECLARE_FLAG(bool, inline_alloc);
26 26
27 27
28 Assembler::Assembler(bool use_far_branches) 28 Assembler::Assembler(bool use_far_branches)
29 : buffer_(), 29 : buffer_(),
30 prologue_offset_(-1), 30 prologue_offset_(-1),
31 use_far_branches_(use_far_branches), 31 use_far_branches_(use_far_branches),
32 comments_(), 32 comments_(),
33 constant_pool_allowed_(true) { 33 constant_pool_allowed_(false) {
34 } 34 }
35 35
36 36
37 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { 37 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
38 ASSERT(Utils::IsAligned(data, 4)); 38 ASSERT(Utils::IsAligned(data, 4));
39 ASSERT(Utils::IsAligned(length, 4)); 39 ASSERT(Utils::IsAligned(length, 4));
40 const uword end = data + length; 40 const uword end = data + length;
41 while (data < end) { 41 while (data < end) {
42 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; 42 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction;
43 data += 4; 43 data += 4;
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
295 imm_s_fixed >>= 1; 295 imm_s_fixed >>= 1;
296 continue; 296 continue;
297 } 297 }
298 298
299 // 6. Otherwise, the value can't be encoded. 299 // 6. Otherwise, the value can't be encoded.
300 return false; 300 return false;
301 } 301 }
302 } 302 }
303 303
304 304
305 void Assembler::LoadPoolPointer(Register pp) { 305 void Assembler::LoadPoolPointer() {
306 const intptr_t object_pool_pc_dist = 306 const intptr_t object_pool_pc_dist =
307 Instructions::HeaderSize() - Instructions::object_pool_offset() + 307 Instructions::HeaderSize() - Instructions::object_pool_offset() +
308 CodeSize(); 308 CodeSize();
309 // PP <- Read(PC - object_pool_pc_dist). 309 // PP <- Read(PC - object_pool_pc_dist).
310 ldr(pp, Address::PC(-object_pool_pc_dist)); 310 ldr(PP, Address::PC(-object_pool_pc_dist));
311 311
312 // When in the PP register, the pool pointer is untagged. When we 312 // When in the PP register, the pool pointer is untagged. When we
313 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP 313 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
314 // then untags when restoring from the stack. This will make loading from the 314 // then untags when restoring from the stack. This will make loading from the
315 // object pool only one instruction for the first 4096 entries. Otherwise, 315 // object pool only one instruction for the first 4096 entries. Otherwise,
316 // because the offset wouldn't be aligned, it would be only one instruction 316 // because the offset wouldn't be aligned, it would be only one instruction
317 // for the first 64 entries. 317 // for the first 64 entries.
318 sub(pp, pp, Operand(kHeapObjectTag)); 318 sub(PP, PP, Operand(kHeapObjectTag));
319 set_constant_pool_allowed(true);
319 } 320 }
320 321
321 322
322 void Assembler::LoadWordFromPoolOffset(Register dst, Register pp, 323 void Assembler::LoadWordFromPoolOffset(Register dst, uint32_t offset) {
323 uint32_t offset) { 324 ASSERT(constant_pool_allowed());
324 ASSERT(dst != pp); 325 ASSERT(dst != PP);
325 Operand op; 326 Operand op;
326 const uint32_t upper20 = offset & 0xfffff000; 327 const uint32_t upper20 = offset & 0xfffff000;
327 if (Address::CanHoldOffset(offset)) { 328 if (Address::CanHoldOffset(offset)) {
328 ldr(dst, Address(pp, offset)); 329 ldr(dst, Address(PP, offset));
329 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) == 330 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
330 Operand::Immediate) { 331 Operand::Immediate) {
331 const uint32_t lower12 = offset & 0x00000fff; 332 const uint32_t lower12 = offset & 0x00000fff;
332 ASSERT(Address::CanHoldOffset(lower12)); 333 ASSERT(Address::CanHoldOffset(lower12));
333 add(dst, pp, op); 334 add(dst, PP, op);
334 ldr(dst, Address(dst, lower12)); 335 ldr(dst, Address(dst, lower12));
335 } else { 336 } else {
336 const uint16_t offset_low = Utils::Low16Bits(offset); 337 const uint16_t offset_low = Utils::Low16Bits(offset);
337 const uint16_t offset_high = Utils::High16Bits(offset); 338 const uint16_t offset_high = Utils::High16Bits(offset);
338 movz(dst, Immediate(offset_low), 0); 339 movz(dst, Immediate(offset_low), 0);
339 if (offset_high != 0) { 340 if (offset_high != 0) {
340 movk(dst, Immediate(offset_high), 1); 341 movk(dst, Immediate(offset_high), 1);
341 } 342 }
342 ldr(dst, Address(pp, dst)); 343 ldr(dst, Address(PP, dst));
343 } 344 }
344 } 345 }
345 346
346 347
347 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, Register pp, 348 void Assembler::LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset) {
348 uint32_t offset) { 349 ASSERT(constant_pool_allowed());
349 ASSERT(dst != pp); 350 ASSERT(dst != PP);
350 Operand op; 351 Operand op;
351 const uint32_t upper20 = offset & 0xfffff000; 352 const uint32_t upper20 = offset & 0xfffff000;
352 const uint32_t lower12 = offset & 0x00000fff; 353 const uint32_t lower12 = offset & 0x00000fff;
353 const Operand::OperandType ot = 354 const Operand::OperandType ot =
354 Operand::CanHold(upper20, kXRegSizeInBits, &op); 355 Operand::CanHold(upper20, kXRegSizeInBits, &op);
355 ASSERT(ot == Operand::Immediate); 356 ASSERT(ot == Operand::Immediate);
356 ASSERT(Address::CanHoldOffset(lower12)); 357 ASSERT(Address::CanHoldOffset(lower12));
357 add(dst, pp, op); 358 add(dst, PP, op);
358 ldr(dst, Address(dst, lower12)); 359 ldr(dst, Address(dst, lower12));
359 } 360 }
360 361
361 362
362 intptr_t Assembler::FindImmediate(int64_t imm) { 363 intptr_t Assembler::FindImmediate(int64_t imm) {
363 return object_pool_wrapper_.FindImmediate(imm); 364 return object_pool_wrapper_.FindImmediate(imm);
364 } 365 }
365 366
366 367
367 bool Assembler::CanLoadFromObjectPool(const Object& object) const { 368 bool Assembler::CanLoadFromObjectPool(const Object& object) const {
368 ASSERT(!Thread::CanLoadFromThread(object)); 369 ASSERT(!Thread::CanLoadFromThread(object));
369 if (!constant_pool_allowed()) { 370 if (!constant_pool_allowed()) {
370 return false; 371 return false;
371 } 372 }
372 373
373 // TODO(zra, kmillikin): Also load other large immediates from the object 374 // TODO(zra, kmillikin): Also load other large immediates from the object
374 // pool 375 // pool
375 if (object.IsSmi()) { 376 if (object.IsSmi()) {
376 // If the raw smi does not fit into a 32-bit signed int, then we'll keep 377 // If the raw smi does not fit into a 32-bit signed int, then we'll keep
377 // the raw value in the object pool. 378 // the raw value in the object pool.
378 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); 379 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
379 } 380 }
380 ASSERT(object.IsNotTemporaryScopedHandle()); 381 ASSERT(object.IsNotTemporaryScopedHandle());
381 ASSERT(object.IsOld()); 382 ASSERT(object.IsOld());
382 return true; 383 return true;
383 } 384 }
384 385
385 386
386 bool Assembler::CanLoadImmediateFromPool(int64_t imm, Register pp) { 387 void Assembler::LoadExternalLabel(Register dst, const ExternalLabel* label) {
387 if (!constant_pool_allowed()) { 388 if (constant_pool_allowed()) {
388 return false;
389 }
390 return !Utils::IsInt(32, imm) && (pp != kNoPP);
391 }
392
393
394 void Assembler::LoadExternalLabel(Register dst,
395 const ExternalLabel* label,
396 Patchability patchable,
397 Register pp) {
398 const int64_t target = static_cast<int64_t>(label->address());
399 if (CanLoadImmediateFromPool(target, pp)) {
400 const int32_t offset = ObjectPool::element_offset( 389 const int32_t offset = ObjectPool::element_offset(
401 object_pool_wrapper_.FindExternalLabel(label, patchable)); 390 object_pool_wrapper_.FindExternalLabel(label, kNotPatchable));
402 LoadWordFromPoolOffset(dst, pp, offset); 391 LoadWordFromPoolOffset(dst, offset);
403 } else { 392 } else {
404 LoadImmediate(dst, target, kNoPP); 393 const int64_t target = static_cast<int64_t>(label->address());
394 LoadImmediate(dst, target);
405 } 395 }
406 } 396 }
407 397
408 398
409 void Assembler::LoadExternalLabelFixed(Register dst, 399 void Assembler::LoadExternalLabelFixed(Register dst,
410 const ExternalLabel* label, 400 const ExternalLabel* label,
411 Patchability patchable, 401 Patchability patchable) {
412 Register pp) {
413 const int32_t offset = ObjectPool::element_offset( 402 const int32_t offset = ObjectPool::element_offset(
414 object_pool_wrapper_.FindExternalLabel(label, patchable)); 403 object_pool_wrapper_.FindExternalLabel(label, patchable));
415 LoadWordFromPoolOffsetFixed(dst, pp, offset); 404 LoadWordFromPoolOffsetFixed(dst, offset);
416 } 405 }
417 406
418 407
419 void Assembler::LoadIsolate(Register dst) { 408 void Assembler::LoadIsolate(Register dst) {
420 ldr(dst, Address(THR, Thread::isolate_offset())); 409 ldr(dst, Address(THR, Thread::isolate_offset()));
421 } 410 }
422 411
423 412
424 void Assembler::LoadObjectHelper(Register dst, 413 void Assembler::LoadObjectHelper(Register dst,
425 const Object& object, 414 const Object& object,
426 Register pp,
427 bool is_unique) { 415 bool is_unique) {
428 if (Thread::CanLoadFromThread(object)) { 416 if (Thread::CanLoadFromThread(object)) {
429 ldr(dst, Address(THR, Thread::OffsetFromThread(object))); 417 ldr(dst, Address(THR, Thread::OffsetFromThread(object)));
430 } else if (CanLoadFromObjectPool(object)) { 418 } else if (CanLoadFromObjectPool(object)) {
431 const int32_t offset = ObjectPool::element_offset( 419 const int32_t offset = ObjectPool::element_offset(
432 is_unique ? object_pool_wrapper_.AddObject(object) 420 is_unique ? object_pool_wrapper_.AddObject(object)
433 : object_pool_wrapper_.FindObject(object)); 421 : object_pool_wrapper_.FindObject(object));
434 LoadWordFromPoolOffset(dst, pp, offset); 422 LoadWordFromPoolOffset(dst, offset);
435 } else { 423 } else {
436 ASSERT(object.IsSmi() || object.InVMHeap()); 424 ASSERT(object.IsSmi() || object.InVMHeap());
437 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw()), pp); 425 LoadDecodableImmediate(dst, reinterpret_cast<int64_t>(object.raw()));
438 } 426 }
439 } 427 }
440 428
441 429
442 void Assembler::LoadObject(Register dst, const Object& object, Register pp) { 430 void Assembler::LoadObject(Register dst, const Object& object) {
443 LoadObjectHelper(dst, object, pp, false); 431 LoadObjectHelper(dst, object, false);
444 } 432 }
445 433
446 434
447 void Assembler::LoadUniqueObject(Register dst, 435 void Assembler::LoadUniqueObject(Register dst, const Object& object) {
448 const Object& object, 436 LoadObjectHelper(dst, object, true);
449 Register pp) {
450 LoadObjectHelper(dst, object, pp, true);
451 } 437 }
452 438
453 439
454 void Assembler::CompareObject(Register reg, const Object& object, Register pp) { 440 void Assembler::CompareObject(Register reg, const Object& object) {
455 if (Thread::CanLoadFromThread(object)) { 441 if (Thread::CanLoadFromThread(object)) {
456 ldr(TMP, Address(THR, Thread::OffsetFromThread(object))); 442 ldr(TMP, Address(THR, Thread::OffsetFromThread(object)));
457 CompareRegisters(reg, TMP); 443 CompareRegisters(reg, TMP);
458 } else if (CanLoadFromObjectPool(object)) { 444 } else if (CanLoadFromObjectPool(object)) {
459 LoadObject(TMP, object, pp); 445 LoadObject(TMP, object);
460 CompareRegisters(reg, TMP); 446 CompareRegisters(reg, TMP);
461 } else { 447 } else {
462 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()), pp); 448 CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()));
463 } 449 }
464 } 450 }
465 451
466 452
467 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm, Register pp) { 453 void Assembler::LoadDecodableImmediate(Register reg, int64_t imm) {
468 if ((pp != kNoPP) && constant_pool_allowed()) { 454 if (constant_pool_allowed()) {
469 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); 455 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm));
470 LoadWordFromPoolOffset(reg, pp, offset); 456 LoadWordFromPoolOffset(reg, offset);
471 } else { 457 } else {
472 // TODO(zra): Since this sequence only needs to be decodable, it can be 458 // TODO(zra): Since this sequence only needs to be decodable, it can be
473 // of variable length. 459 // of variable length.
474 LoadImmediateFixed(reg, imm); 460 LoadImmediateFixed(reg, imm);
475 } 461 }
476 } 462 }
477 463
478 464
479 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) { 465 void Assembler::LoadImmediateFixed(Register reg, int64_t imm) {
480 const uint32_t w0 = Utils::Low32Bits(imm); 466 const uint32_t w0 = Utils::Low32Bits(imm);
481 const uint32_t w1 = Utils::High32Bits(imm); 467 const uint32_t w1 = Utils::High32Bits(imm);
482 const uint16_t h0 = Utils::Low16Bits(w0); 468 const uint16_t h0 = Utils::Low16Bits(w0);
483 const uint16_t h1 = Utils::High16Bits(w0); 469 const uint16_t h1 = Utils::High16Bits(w0);
484 const uint16_t h2 = Utils::Low16Bits(w1); 470 const uint16_t h2 = Utils::Low16Bits(w1);
485 const uint16_t h3 = Utils::High16Bits(w1); 471 const uint16_t h3 = Utils::High16Bits(w1);
486 movz(reg, Immediate(h0), 0); 472 movz(reg, Immediate(h0), 0);
487 movk(reg, Immediate(h1), 1); 473 movk(reg, Immediate(h1), 1);
488 movk(reg, Immediate(h2), 2); 474 movk(reg, Immediate(h2), 2);
489 movk(reg, Immediate(h3), 3); 475 movk(reg, Immediate(h3), 3);
490 } 476 }
491 477
492 478
493 void Assembler::LoadImmediate(Register reg, int64_t imm, Register pp) { 479 void Assembler::LoadImmediate(Register reg, int64_t imm) {
494 Comment("LoadImmediate"); 480 Comment("LoadImmediate");
495 if (CanLoadImmediateFromPool(imm, pp)) { 481 // Is it 0?
482 if (imm == 0) {
483 movz(reg, Immediate(0), 0);
484 return;
485 }
486
487 // Can we use one orri operation?
488 Operand op;
489 Operand::OperandType ot;
490 ot = Operand::CanHold(imm, kXRegSizeInBits, &op);
491 if (ot == Operand::BitfieldImm) {
492 orri(reg, ZR, Immediate(imm));
493 return;
494 }
495
496 // We may fall back on movz, movk, movn.
497 const uint32_t w0 = Utils::Low32Bits(imm);
498 const uint32_t w1 = Utils::High32Bits(imm);
499 const uint16_t h0 = Utils::Low16Bits(w0);
500 const uint16_t h1 = Utils::High16Bits(w0);
501 const uint16_t h2 = Utils::Low16Bits(w1);
502 const uint16_t h3 = Utils::High16Bits(w1);
503
504 // Special case for w1 == 0xffffffff
505 if (w1 == 0xffffffff) {
506 if (h1 == 0xffff) {
507 movn(reg, Immediate(~h0), 0);
508 } else {
509 movn(reg, Immediate(~h1), 1);
510 movk(reg, Immediate(h0), 0);
511 }
512 return;
513 }
514
515 // Special case for h3 == 0xffff
516 if (h3 == 0xffff) {
517 // We know h2 != 0xffff.
518 movn(reg, Immediate(~h2), 2);
519 if (h1 != 0xffff) {
520 movk(reg, Immediate(h1), 1);
521 }
522 if (h0 != 0xffff) {
523 movk(reg, Immediate(h0), 0);
524 }
525 return;
526 }
527
528 // Use constant pool if allowed, unless we can load imm with 2 instructions.
529 if ((w1 != 0) && constant_pool_allowed()) {
496 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm)); 530 const int32_t offset = ObjectPool::element_offset(FindImmediate(imm));
497 LoadWordFromPoolOffset(reg, pp, offset); 531 LoadWordFromPoolOffset(reg, offset);
498 } else { 532 return;
499 // 0. Is it 0? 533 }
500 if (imm == 0) {
501 movz(reg, Immediate(0), 0);
502 return;
503 }
504 534
505 // 1. Can we use one orri operation? 535 bool initialized = false;
506 Operand op; 536 if (h0 != 0) {
507 Operand::OperandType ot; 537 movz(reg, Immediate(h0), 0);
508 ot = Operand::CanHold(imm, kXRegSizeInBits, &op); 538 initialized = true;
509 if (ot == Operand::BitfieldImm) { 539 }
510 orri(reg, ZR, Immediate(imm)); 540 if (h1 != 0) {
511 return; 541 if (initialized) {
512 } 542 movk(reg, Immediate(h1), 1);
513 543 } else {
514 // 2. Fall back on movz, movk, movn. 544 movz(reg, Immediate(h1), 1);
515 const uint32_t w0 = Utils::Low32Bits(imm);
516 const uint32_t w1 = Utils::High32Bits(imm);
517 const uint16_t h0 = Utils::Low16Bits(w0);
518 const uint16_t h1 = Utils::High16Bits(w0);
519 const uint16_t h2 = Utils::Low16Bits(w1);
520 const uint16_t h3 = Utils::High16Bits(w1);
521
522 // Special case for w1 == 0xffffffff
523 if (w1 == 0xffffffff) {
524 if (h1 == 0xffff) {
525 movn(reg, Immediate(~h0), 0);
526 } else {
527 movn(reg, Immediate(~h1), 1);
528 movk(reg, Immediate(h0), 0);
529 }
530 return;
531 }
532
533 // Special case for h3 == 0xffff
534 if (h3 == 0xffff) {
535 // We know h2 != 0xffff.
536 movn(reg, Immediate(~h2), 2);
537 if (h1 != 0xffff) {
538 movk(reg, Immediate(h1), 1);
539 }
540 if (h0 != 0xffff) {
541 movk(reg, Immediate(h0), 0);
542 }
543 return;
544 }
545
546 bool initialized = false;
547 if (h0 != 0) {
548 movz(reg, Immediate(h0), 0);
549 initialized = true; 545 initialized = true;
550 } 546 }
551 if (h1 != 0) { 547 }
552 if (initialized) { 548 if (h2 != 0) {
553 movk(reg, Immediate(h1), 1); 549 if (initialized) {
554 } else { 550 movk(reg, Immediate(h2), 2);
555 movz(reg, Immediate(h1), 1); 551 } else {
556 initialized = true; 552 movz(reg, Immediate(h2), 2);
557 } 553 initialized = true;
558 } 554 }
559 if (h2 != 0) { 555 }
560 if (initialized) { 556 if (h3 != 0) {
561 movk(reg, Immediate(h2), 2); 557 if (initialized) {
562 } else { 558 movk(reg, Immediate(h3), 3);
563 movz(reg, Immediate(h2), 2); 559 } else {
564 initialized = true; 560 movz(reg, Immediate(h3), 3);
565 }
566 }
567 if (h3 != 0) {
568 if (initialized) {
569 movk(reg, Immediate(h3), 3);
570 } else {
571 movz(reg, Immediate(h3), 3);
572 }
573 } 561 }
574 } 562 }
575 } 563 }
576 564
577 565
578 void Assembler::LoadDImmediate(VRegister vd, double immd, Register pp) { 566 void Assembler::LoadDImmediate(VRegister vd, double immd) {
579 if (!fmovdi(vd, immd)) { 567 if (!fmovdi(vd, immd)) {
580 int64_t imm = bit_cast<int64_t, double>(immd); 568 int64_t imm = bit_cast<int64_t, double>(immd);
581 LoadImmediate(TMP, imm, pp); 569 LoadImmediate(TMP, imm);
582 fmovdr(vd, TMP); 570 fmovdr(vd, TMP);
583 } 571 }
584 } 572 }
585 573
586 574
587 void Assembler::AddImmediate( 575 void Assembler::AddImmediate(Register dest, Register rn, int64_t imm) {
588 Register dest, Register rn, int64_t imm, Register pp) {
589 Operand op; 576 Operand op;
590 if (imm == 0) { 577 if (imm == 0) {
591 if (dest != rn) { 578 if (dest != rn) {
592 mov(dest, rn); 579 mov(dest, rn);
593 } 580 }
594 return; 581 return;
595 } 582 }
596 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 583 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
597 add(dest, rn, op); 584 add(dest, rn, op);
598 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 585 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
599 Operand::Immediate) { 586 Operand::Immediate) {
600 sub(dest, rn, op); 587 sub(dest, rn, op);
601 } else { 588 } else {
602 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. 589 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
603 ASSERT(rn != TMP2); 590 ASSERT(rn != TMP2);
604 LoadImmediate(TMP2, imm, pp); 591 LoadImmediate(TMP2, imm);
605 add(dest, rn, Operand(TMP2)); 592 add(dest, rn, Operand(TMP2));
606 } 593 }
607 } 594 }
608 595
609 596
610 void Assembler::AddImmediateSetFlags( 597 void Assembler::AddImmediateSetFlags(Register dest, Register rn, int64_t imm) {
611 Register dest, Register rn, int64_t imm, Register pp) {
612 Operand op; 598 Operand op;
613 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 599 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
614 // Handles imm == kMinInt64. 600 // Handles imm == kMinInt64.
615 adds(dest, rn, op); 601 adds(dest, rn, op);
616 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 602 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
617 Operand::Immediate) { 603 Operand::Immediate) {
618 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. 604 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
619 subs(dest, rn, op); 605 subs(dest, rn, op);
620 } else { 606 } else {
621 // TODO(zra): Try adding top 12 bits, then bottom 12 bits. 607 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
622 ASSERT(rn != TMP2); 608 ASSERT(rn != TMP2);
623 LoadImmediate(TMP2, imm, pp); 609 LoadImmediate(TMP2, imm);
624 adds(dest, rn, Operand(TMP2)); 610 adds(dest, rn, Operand(TMP2));
625 } 611 }
626 } 612 }
627 613
628 614
629 void Assembler::SubImmediateSetFlags( 615 void Assembler::SubImmediateSetFlags(Register dest, Register rn, int64_t imm) {
630 Register dest, Register rn, int64_t imm, Register pp) {
631 Operand op; 616 Operand op;
632 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 617 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
633 // Handles imm == kMinInt64. 618 // Handles imm == kMinInt64.
634 subs(dest, rn, op); 619 subs(dest, rn, op);
635 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 620 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
636 Operand::Immediate) { 621 Operand::Immediate) {
637 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection. 622 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
638 adds(dest, rn, op); 623 adds(dest, rn, op);
639 } else { 624 } else {
640 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits. 625 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits.
641 ASSERT(rn != TMP2); 626 ASSERT(rn != TMP2);
642 LoadImmediate(TMP2, imm, pp); 627 LoadImmediate(TMP2, imm);
643 subs(dest, rn, Operand(TMP2)); 628 subs(dest, rn, Operand(TMP2));
644 } 629 }
645 } 630 }
646 631
647 632
648 void Assembler::AndImmediate( 633 void Assembler::AndImmediate(Register rd, Register rn, int64_t imm) {
649 Register rd, Register rn, int64_t imm, Register pp) {
650 Operand imm_op; 634 Operand imm_op;
651 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 635 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
652 andi(rd, rn, Immediate(imm)); 636 andi(rd, rn, Immediate(imm));
653 } else { 637 } else {
654 LoadImmediate(TMP, imm, pp); 638 LoadImmediate(TMP, imm);
655 and_(rd, rn, Operand(TMP)); 639 and_(rd, rn, Operand(TMP));
656 } 640 }
657 } 641 }
658 642
659 643
660 void Assembler::OrImmediate( 644 void Assembler::OrImmediate(Register rd, Register rn, int64_t imm) {
661 Register rd, Register rn, int64_t imm, Register pp) {
662 Operand imm_op; 645 Operand imm_op;
663 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 646 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
664 orri(rd, rn, Immediate(imm)); 647 orri(rd, rn, Immediate(imm));
665 } else { 648 } else {
666 LoadImmediate(TMP, imm, pp); 649 LoadImmediate(TMP, imm);
667 orr(rd, rn, Operand(TMP)); 650 orr(rd, rn, Operand(TMP));
668 } 651 }
669 } 652 }
670 653
671 654
672 void Assembler::XorImmediate( 655 void Assembler::XorImmediate(Register rd, Register rn, int64_t imm) {
673 Register rd, Register rn, int64_t imm, Register pp) {
674 Operand imm_op; 656 Operand imm_op;
675 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 657 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
676 eori(rd, rn, Immediate(imm)); 658 eori(rd, rn, Immediate(imm));
677 } else { 659 } else {
678 LoadImmediate(TMP, imm, pp); 660 LoadImmediate(TMP, imm);
679 eor(rd, rn, Operand(TMP)); 661 eor(rd, rn, Operand(TMP));
680 } 662 }
681 } 663 }
682 664
683 665
684 void Assembler::TestImmediate(Register rn, int64_t imm, Register pp) { 666 void Assembler::TestImmediate(Register rn, int64_t imm) {
685 Operand imm_op; 667 Operand imm_op;
686 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) { 668 if (Operand::IsImmLogical(imm, kXRegSizeInBits, &imm_op)) {
687 tsti(rn, Immediate(imm)); 669 tsti(rn, Immediate(imm));
688 } else { 670 } else {
689 LoadImmediate(TMP, imm, pp); 671 LoadImmediate(TMP, imm);
690 tst(rn, Operand(TMP)); 672 tst(rn, Operand(TMP));
691 } 673 }
692 } 674 }
693 675
694 676
695 void Assembler::CompareImmediate(Register rn, int64_t imm, Register pp) { 677 void Assembler::CompareImmediate(Register rn, int64_t imm) {
696 Operand op; 678 Operand op;
697 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) { 679 if (Operand::CanHold(imm, kXRegSizeInBits, &op) == Operand::Immediate) {
698 cmp(rn, op); 680 cmp(rn, op);
699 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) == 681 } else if (Operand::CanHold(-imm, kXRegSizeInBits, &op) ==
700 Operand::Immediate) { 682 Operand::Immediate) {
701 cmn(rn, op); 683 cmn(rn, op);
702 } else { 684 } else {
703 ASSERT(rn != TMP2); 685 ASSERT(rn != TMP2);
704 LoadImmediate(TMP2, imm, pp); 686 LoadImmediate(TMP2, imm);
705 cmp(rn, Operand(TMP2)); 687 cmp(rn, Operand(TMP2));
706 } 688 }
707 } 689 }
708 690
709 691
710 void Assembler::LoadFromOffset( 692 void Assembler::LoadFromOffset(
711 Register dest, Register base, int32_t offset, Register pp, OperandSize sz) { 693 Register dest, Register base, int32_t offset, OperandSize sz) {
712 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { 694 if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
713 ldr(dest, Address(base, offset, Address::Offset, sz), sz); 695 ldr(dest, Address(base, offset, Address::Offset, sz), sz);
714 } else { 696 } else {
715 ASSERT(base != TMP2); 697 ASSERT(base != TMP2);
716 AddImmediate(TMP2, base, offset, pp); 698 AddImmediate(TMP2, base, offset);
717 ldr(dest, Address(TMP2), sz); 699 ldr(dest, Address(TMP2), sz);
718 } 700 }
719 } 701 }
720 702
721 703
722 void Assembler::LoadDFromOffset( 704 void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) {
723 VRegister dest, Register base, int32_t offset, Register pp) {
724 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { 705 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) {
725 fldrd(dest, Address(base, offset, Address::Offset, kDWord)); 706 fldrd(dest, Address(base, offset, Address::Offset, kDWord));
726 } else { 707 } else {
727 ASSERT(base != TMP2); 708 ASSERT(base != TMP2);
728 AddImmediate(TMP2, base, offset, pp); 709 AddImmediate(TMP2, base, offset);
729 fldrd(dest, Address(TMP2)); 710 fldrd(dest, Address(TMP2));
730 } 711 }
731 } 712 }
732 713
733 714
734 void Assembler::LoadQFromOffset( 715 void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
735 VRegister dest, Register base, int32_t offset, Register pp) {
736 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { 716 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) {
737 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); 717 fldrq(dest, Address(base, offset, Address::Offset, kQWord));
738 } else { 718 } else {
739 ASSERT(base != TMP2); 719 ASSERT(base != TMP2);
740 AddImmediate(TMP2, base, offset, pp); 720 AddImmediate(TMP2, base, offset);
741 fldrq(dest, Address(TMP2)); 721 fldrq(dest, Address(TMP2));
742 } 722 }
743 } 723 }
744 724
745 725
746 void Assembler::StoreToOffset( 726 void Assembler::StoreToOffset(
747 Register src, Register base, int32_t offset, Register pp, OperandSize sz) { 727 Register src, Register base, int32_t offset, OperandSize sz) {
748 ASSERT(base != TMP2); 728 ASSERT(base != TMP2);
749 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { 729 if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
750 str(src, Address(base, offset, Address::Offset, sz), sz); 730 str(src, Address(base, offset, Address::Offset, sz), sz);
751 } else { 731 } else {
752 ASSERT(src != TMP2); 732 ASSERT(src != TMP2);
753 AddImmediate(TMP2, base, offset, pp); 733 AddImmediate(TMP2, base, offset);
754 str(src, Address(TMP2), sz); 734 str(src, Address(TMP2), sz);
755 } 735 }
756 } 736 }
757 737
758 738
759 void Assembler::StoreDToOffset( 739 void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) {
760 VRegister src, Register base, int32_t offset, Register pp) {
761 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) { 740 if (Address::CanHoldOffset(offset, Address::Offset, kDWord)) {
762 fstrd(src, Address(base, offset, Address::Offset, kDWord)); 741 fstrd(src, Address(base, offset, Address::Offset, kDWord));
763 } else { 742 } else {
764 ASSERT(base != TMP2); 743 ASSERT(base != TMP2);
765 AddImmediate(TMP2, base, offset, pp); 744 AddImmediate(TMP2, base, offset);
766 fstrd(src, Address(TMP2)); 745 fstrd(src, Address(TMP2));
767 } 746 }
768 } 747 }
769 748
770 749
771 void Assembler::StoreQToOffset( 750 void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) {
772 VRegister src, Register base, int32_t offset, Register pp) {
773 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { 751 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) {
774 fstrq(src, Address(base, offset, Address::Offset, kQWord)); 752 fstrq(src, Address(base, offset, Address::Offset, kQWord));
775 } else { 753 } else {
776 ASSERT(base != TMP2); 754 ASSERT(base != TMP2);
777 AddImmediate(TMP2, base, offset, pp); 755 AddImmediate(TMP2, base, offset);
778 fstrq(src, Address(TMP2)); 756 fstrq(src, Address(TMP2));
779 } 757 }
780 } 758 }
781 759
782 760
783 void Assembler::VRecps(VRegister vd, VRegister vn) { 761 void Assembler::VRecps(VRegister vd, VRegister vn) {
784 ASSERT(vn != VTMP); 762 ASSERT(vn != VTMP);
785 ASSERT(vd != VTMP); 763 ASSERT(vd != VTMP);
786 764
787 // Reciprocal estimate. 765 // Reciprocal estimate.
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
841 // And the result with the negated space bit of the object. 819 // And the result with the negated space bit of the object.
842 bic(TMP, TMP, Operand(object)); 820 bic(TMP, TMP, Operand(object));
843 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); 821 tsti(TMP, Immediate(kNewObjectAlignmentOffset));
844 b(no_update, EQ); 822 b(no_update, EQ);
845 } 823 }
846 824
847 825
848 void Assembler::StoreIntoObjectOffset(Register object, 826 void Assembler::StoreIntoObjectOffset(Register object,
849 int32_t offset, 827 int32_t offset,
850 Register value, 828 Register value,
851 Register pp,
852 bool can_value_be_smi) { 829 bool can_value_be_smi) {
853 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 830 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
854 StoreIntoObject( 831 StoreIntoObject(
855 object, FieldAddress(object, offset), value, can_value_be_smi); 832 object, FieldAddress(object, offset), value, can_value_be_smi);
856 } else { 833 } else {
857 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); 834 AddImmediate(TMP, object, offset - kHeapObjectTag);
858 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); 835 StoreIntoObject(object, Address(TMP), value, can_value_be_smi);
859 } 836 }
860 } 837 }
861 838
862 839
863 void Assembler::StoreIntoObject(Register object, 840 void Assembler::StoreIntoObject(Register object,
864 const Address& dest, 841 const Address& dest,
865 Register value, 842 Register value,
866 bool can_value_be_smi) { 843 bool can_value_be_smi) {
867 ASSERT(object != value); 844 ASSERT(object != value);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
901 StoreIntoObjectFilter(object, value, &done); 878 StoreIntoObjectFilter(object, value, &done);
902 Stop("Store buffer update is required"); 879 Stop("Store buffer update is required");
903 Bind(&done); 880 Bind(&done);
904 #endif // defined(DEBUG) 881 #endif // defined(DEBUG)
905 // No store buffer update. 882 // No store buffer update.
906 } 883 }
907 884
908 885
909 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, 886 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
910 int32_t offset, 887 int32_t offset,
911 Register value, 888 Register value) {
912 Register pp) {
913 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 889 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
914 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); 890 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
915 } else { 891 } else {
916 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); 892 AddImmediate(TMP, object, offset - kHeapObjectTag);
917 StoreIntoObjectNoBarrier(object, Address(TMP), value); 893 StoreIntoObjectNoBarrier(object, Address(TMP), value);
918 } 894 }
919 } 895 }
920 896
921 897
922 void Assembler::StoreIntoObjectNoBarrier(Register object, 898 void Assembler::StoreIntoObjectNoBarrier(Register object,
923 const Address& dest, 899 const Address& dest,
924 const Object& value) { 900 const Object& value) {
925 ASSERT(value.IsSmi() || value.InVMHeap() || 901 ASSERT(value.IsSmi() || value.InVMHeap() ||
926 (value.IsOld() && value.IsNotTemporaryScopedHandle())); 902 (value.IsOld() && value.IsNotTemporaryScopedHandle()));
927 // No store buffer update. 903 // No store buffer update.
928 LoadObject(TMP2, value, PP); 904 LoadObject(TMP2, value);
929 str(TMP2, dest); 905 str(TMP2, dest);
930 } 906 }
931 907
932 908
933 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, 909 void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
934 int32_t offset, 910 int32_t offset,
935 const Object& value, 911 const Object& value) {
936 Register pp) {
937 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { 912 if (Address::CanHoldOffset(offset - kHeapObjectTag)) {
938 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); 913 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
939 } else { 914 } else {
940 AddImmediate(TMP, object, offset - kHeapObjectTag, pp); 915 AddImmediate(TMP, object, offset - kHeapObjectTag);
941 StoreIntoObjectNoBarrier(object, Address(TMP), value); 916 StoreIntoObjectNoBarrier(object, Address(TMP), value);
942 } 917 }
943 } 918 }
944 919
945 920
946 void Assembler::LoadClassId(Register result, Register object, Register pp) { 921 void Assembler::LoadClassId(Register result, Register object) {
947 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); 922 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32);
948 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); 923 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32);
949 const intptr_t class_id_offset = Object::tags_offset() + 924 const intptr_t class_id_offset = Object::tags_offset() +
950 RawObject::kClassIdTagPos / kBitsPerByte; 925 RawObject::kClassIdTagPos / kBitsPerByte;
951 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, pp, 926 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
952 kUnsignedWord); 927 kUnsignedWord);
953 } 928 }
954 929
955 930
956 void Assembler::LoadClassById(Register result, Register class_id, Register pp) { 931 void Assembler::LoadClassById(Register result, Register class_id) {
957 ASSERT(result != class_id); 932 ASSERT(result != class_id);
958 LoadIsolate(result); 933 LoadIsolate(result);
959 const intptr_t offset = 934 const intptr_t offset =
960 Isolate::class_table_offset() + ClassTable::table_offset(); 935 Isolate::class_table_offset() + ClassTable::table_offset();
961 LoadFromOffset(result, result, offset, pp); 936 LoadFromOffset(result, result, offset);
962 ldr(result, Address(result, class_id, UXTX, Address::Scaled)); 937 ldr(result, Address(result, class_id, UXTX, Address::Scaled));
963 } 938 }
964 939
965 940
966 void Assembler::LoadClass(Register result, Register object, Register pp) { 941 void Assembler::LoadClass(Register result, Register object) {
967 ASSERT(object != TMP); 942 ASSERT(object != TMP);
968 LoadClassId(TMP, object, pp); 943 LoadClassId(TMP, object);
969 LoadClassById(result, TMP, pp); 944 LoadClassById(result, TMP);
970 } 945 }
971 946
972 947
973 void Assembler::CompareClassId( 948 void Assembler::CompareClassId(Register object, intptr_t class_id) {
974 Register object, intptr_t class_id, Register pp) { 949 LoadClassId(TMP, object);
975 LoadClassId(TMP, object, pp); 950 CompareImmediate(TMP, class_id);
976 CompareImmediate(TMP, class_id, pp);
977 } 951 }
978 952
979 953
980 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { 954 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
981 // Load up a null object. We only need it so we can use LoadClassId on it in 955 // Load up a null object. We only need it so we can use LoadClassId on it in
982 // the case that object is a Smi.. 956 // the case that object is a Smi..
983 LoadObject(TMP, Object::null_object(), PP); 957 LoadObject(TMP, Object::null_object());
984 // Check if the object is a Smi. 958 // Check if the object is a Smi.
985 tsti(object, Immediate(kSmiTagMask)); 959 tsti(object, Immediate(kSmiTagMask));
986 // If the object *is* a Smi, use the null object instead. o/w leave alone. 960 // If the object *is* a Smi, use the null object instead. o/w leave alone.
987 csel(TMP, TMP, object, EQ); 961 csel(TMP, TMP, object, EQ);
988 // Loads either the cid of the object if it isn't a Smi, or the cid of null 962 // Loads either the cid of the object if it isn't a Smi, or the cid of null
989 // if it is a Smi, which will be ignored. 963 // if it is a Smi, which will be ignored.
990 LoadClassId(result, TMP, PP); 964 LoadClassId(result, TMP);
991 965
992 LoadImmediate(TMP, kSmiCid, PP); 966 LoadImmediate(TMP, kSmiCid);
993 // If object is a Smi, move the Smi cid into result. o/w leave alone. 967 // If object is a Smi, move the Smi cid into result. o/w leave alone.
994 csel(result, TMP, result, EQ); 968 csel(result, TMP, result, EQ);
995 } 969 }
996 970
997 971
998 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { 972 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
999 LoadClassIdMayBeSmi(result, object); 973 LoadClassIdMayBeSmi(result, object);
1000 // Finally, tag the result. 974 // Finally, tag the result.
1001 SmiTag(result); 975 SmiTag(result);
1002 } 976 }
1003 977
1004 978
1005 void Assembler::ComputeRange(Register result, 979 void Assembler::ComputeRange(Register result,
1006 Register value, 980 Register value,
1007 Register scratch, 981 Register scratch,
1008 Label* not_mint) { 982 Label* not_mint) {
1009 Label done, not_smi; 983 Label done, not_smi;
1010 tsti(value, Immediate(kSmiTagMask)); 984 tsti(value, Immediate(kSmiTagMask));
1011 b(&not_smi, NE); 985 b(&not_smi, NE);
1012 986
1013 AsrImmediate(scratch, value, 32); 987 AsrImmediate(scratch, value, 32);
1014 LoadImmediate(result, ICData::kUint32RangeBit, PP); 988 LoadImmediate(result, ICData::kUint32RangeBit);
1015 cmp(scratch, Operand(1)); 989 cmp(scratch, Operand(1));
1016 b(&done, EQ); 990 b(&done, EQ);
1017 991
1018 neg(scratch, scratch); 992 neg(scratch, scratch);
1019 add(result, scratch, Operand(ICData::kInt32RangeBit)); 993 add(result, scratch, Operand(ICData::kInt32RangeBit));
1020 cmp(scratch, Operand(1)); 994 cmp(scratch, Operand(1));
1021 LoadImmediate(TMP, ICData::kSignedRangeBit, PP); 995 LoadImmediate(TMP, ICData::kSignedRangeBit);
1022 csel(result, result, TMP, LS); 996 csel(result, result, TMP, LS);
1023 b(&done); 997 b(&done);
1024 998
1025 Bind(&not_smi); 999 Bind(&not_smi);
1026 CompareClassId(value, kMintCid, PP); 1000 CompareClassId(value, kMintCid);
1027 b(not_mint, NE); 1001 b(not_mint, NE);
1028 1002
1029 LoadImmediate(result, ICData::kInt64RangeBit, PP); 1003 LoadImmediate(result, ICData::kInt64RangeBit);
1030 Bind(&done); 1004 Bind(&done);
1031 } 1005 }
1032 1006
1033 1007
1034 void Assembler::UpdateRangeFeedback(Register value, 1008 void Assembler::UpdateRangeFeedback(Register value,
1035 intptr_t index, 1009 intptr_t index,
1036 Register ic_data, 1010 Register ic_data,
1037 Register scratch1, 1011 Register scratch1,
1038 Register scratch2, 1012 Register scratch2,
1039 Label* miss) { 1013 Label* miss) {
1040 ASSERT(ICData::IsValidRangeFeedbackIndex(index)); 1014 ASSERT(ICData::IsValidRangeFeedbackIndex(index));
1041 ComputeRange(scratch1, value, scratch2, miss); 1015 ComputeRange(scratch1, value, scratch2, miss);
1042 ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); 1016 ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord);
1043 orrw(scratch2, 1017 orrw(scratch2,
1044 scratch2, 1018 scratch2,
1045 Operand(scratch1, LSL, ICData::RangeFeedbackShift(index))); 1019 Operand(scratch1, LSL, ICData::RangeFeedbackShift(index)));
1046 str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord); 1020 str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()), kWord);
1047 } 1021 }
1048 1022
1049 1023
1050 // Frame entry and exit. 1024 // Frame entry and exit.
1051 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { 1025 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1052 // Reserve space for arguments and align frame before entering 1026 // Reserve space for arguments and align frame before entering
1053 // the C++ world. 1027 // the C++ world.
1054 if (frame_space != 0) { 1028 if (frame_space != 0) {
1055 AddImmediate(SP, SP, -frame_space, kNoPP); 1029 AddImmediate(SP, SP, -frame_space);
1056 } 1030 }
1057 if (OS::ActivationFrameAlignment() > 1) { 1031 if (OS::ActivationFrameAlignment() > 1) {
1058 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1))); 1032 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1059 } 1033 }
1060 } 1034 }
1061 1035
1062 1036
1063 void Assembler::EnterFrame(intptr_t frame_size) { 1037 void Assembler::EnterFrame(intptr_t frame_size) {
1064 PushPair(LR, FP); 1038 PushPair(LR, FP);
1065 mov(FP, SP); 1039 mov(FP, SP);
1066 1040
1067 if (frame_size > 0) { 1041 if (frame_size > 0) {
1068 sub(SP, SP, Operand(frame_size)); 1042 sub(SP, SP, Operand(frame_size));
1069 } 1043 }
1070 } 1044 }
1071 1045
1072 1046
1073 void Assembler::LeaveFrame() { 1047 void Assembler::LeaveFrame() {
1074 mov(SP, FP); 1048 mov(SP, FP);
1075 PopPair(LR, FP); 1049 PopPair(LR, FP);
1076 } 1050 }
1077 1051
1078 1052
1079 void Assembler::EnterDartFrame(intptr_t frame_size) { 1053 void Assembler::EnterDartFrame(intptr_t frame_size) {
1054 ASSERT(!constant_pool_allowed());
1080 // Setup the frame. 1055 // Setup the frame.
1081 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. 1056 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker.
1082 EnterFrame(0); 1057 EnterFrame(0);
1083 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. 1058 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker.
1084 1059
1085 // Load the pool pointer. 1060 // Load the pool pointer.
1086 LoadPoolPointer(PP); 1061 LoadPoolPointer();
1087 1062
1088 // Reserve space. 1063 // Reserve space.
1089 if (frame_size > 0) { 1064 if (frame_size > 0) {
1090 AddImmediate(SP, SP, -frame_size, PP); 1065 AddImmediate(SP, SP, -frame_size);
1091 } 1066 }
1092 } 1067 }
1093 1068
1094 1069
1095 void Assembler::EnterDartFrameWithInfo(intptr_t frame_size, Register new_pp) { 1070 void Assembler::EnterDartFrameWithInfo(intptr_t frame_size, Register new_pp) {
1071 ASSERT(!constant_pool_allowed());
1096 // Setup the frame. 1072 // Setup the frame.
1097 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker. 1073 adr(TMP, Immediate(-CodeSize())); // TMP gets PC marker.
1098 EnterFrame(0); 1074 EnterFrame(0);
1099 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker. 1075 TagAndPushPPAndPcMarker(TMP); // Save PP and PC marker.
1100 1076
1101 // Load the pool pointer. 1077 // Load the pool pointer.
1102 if (new_pp == kNoPP) { 1078 if (new_pp == kNoRegister) {
1103 LoadPoolPointer(PP); 1079 LoadPoolPointer();
1104 } else { 1080 } else {
1105 mov(PP, new_pp); 1081 mov(PP, new_pp);
1082 set_constant_pool_allowed(true);
1106 } 1083 }
1107 1084
1108 // Reserve space. 1085 // Reserve space.
1109 if (frame_size > 0) { 1086 if (frame_size > 0) {
1110 AddImmediate(SP, SP, -frame_size, PP); 1087 AddImmediate(SP, SP, -frame_size);
1111 } 1088 }
1112 } 1089 }
1113 1090
1114 1091
1115 // On entry to a function compiled for OSR, the caller's frame pointer, the 1092 // On entry to a function compiled for OSR, the caller's frame pointer, the
1116 // stack locals, and any copied parameters are already in place. The frame 1093 // stack locals, and any copied parameters are already in place. The frame
1117 // pointer is already set up. The PC marker is not correct for the 1094 // pointer is already set up. The PC marker is not correct for the
1118 // optimized function and there may be extra space for spill slots to 1095 // optimized function and there may be extra space for spill slots to
1119 // allocate. We must also set up the pool pointer for the function. 1096 // allocate. We must also set up the pool pointer for the function.
1120 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) { 1097 void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
1098 ASSERT(!constant_pool_allowed());
1121 Comment("EnterOsrFrame"); 1099 Comment("EnterOsrFrame");
1122 adr(TMP, Immediate(-CodeSize())); 1100 adr(TMP, Immediate(-CodeSize()));
1123 1101
1124 StoreToOffset(TMP, FP, kPcMarkerSlotFromFp * kWordSize, kNoPP); 1102 StoreToOffset(TMP, FP, kPcMarkerSlotFromFp * kWordSize);
1125 1103
1126 // Setup pool pointer for this dart function. 1104 // Setup pool pointer for this dart function.
1127 if (new_pp == kNoPP) { 1105 if (new_pp == kNoRegister) {
1128 LoadPoolPointer(PP); 1106 LoadPoolPointer();
1129 } else { 1107 } else {
1130 mov(PP, new_pp); 1108 mov(PP, new_pp);
1109 set_constant_pool_allowed(true);
1131 } 1110 }
1132 1111
1133 if (extra_size > 0) { 1112 if (extra_size > 0) {
1134 AddImmediate(SP, SP, -extra_size, PP); 1113 AddImmediate(SP, SP, -extra_size);
1135 } 1114 }
1136 } 1115 }
1137 1116
1138 1117
1139 void Assembler::LeaveDartFrame() { 1118 void Assembler::LeaveDartFrame() {
1119 // LeaveDartFrame is called from stubs (pp disallowed) and from Dart code (pp
1120 // allowed), so there is no point in checking the current value of
1121 // constant_pool_allowed().
1122 set_constant_pool_allowed(false);
1140 // Restore and untag PP. 1123 // Restore and untag PP.
1141 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize, kNoPP); 1124 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize);
1142 sub(PP, PP, Operand(kHeapObjectTag)); 1125 sub(PP, PP, Operand(kHeapObjectTag));
1143 LeaveFrame(); 1126 LeaveFrame();
1144 } 1127 }
1145 1128
1146 1129
1147 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) { 1130 void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) {
1148 EnterFrame(0); 1131 EnterFrame(0);
1149 1132
1150 // Store fpu registers with the lowest register number at the lowest 1133 // Store fpu registers with the lowest register number at the lowest
1151 // address. 1134 // address.
(...skipping 17 matching lines...) Expand all
1169 } 1152 }
1170 1153
1171 1154
1172 void Assembler::LeaveCallRuntimeFrame() { 1155 void Assembler::LeaveCallRuntimeFrame() {
1173 // SP might have been modified to reserve space for arguments 1156 // SP might have been modified to reserve space for arguments
1174 // and ensure proper alignment of the stack frame. 1157 // and ensure proper alignment of the stack frame.
1175 // We need to restore it before restoring registers. 1158 // We need to restore it before restoring registers.
1176 const intptr_t kPushedRegistersSize = 1159 const intptr_t kPushedRegistersSize =
1177 kDartVolatileCpuRegCount * kWordSize + 1160 kDartVolatileCpuRegCount * kWordSize +
1178 kDartVolatileFpuRegCount * kWordSize; 1161 kDartVolatileFpuRegCount * kWordSize;
1179 AddImmediate(SP, FP, -kPushedRegistersSize, PP); 1162 AddImmediate(SP, FP, -kPushedRegistersSize);
1180 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) { 1163 for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
1181 const Register reg = static_cast<Register>(i); 1164 const Register reg = static_cast<Register>(i);
1182 Pop(reg); 1165 Pop(reg);
1183 } 1166 }
1184 1167
1185 for (int i = 0; i < kNumberOfVRegisters; i++) { 1168 for (int i = 0; i < kNumberOfVRegisters; i++) {
1186 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) { 1169 if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) {
1187 // TODO(zra): When SIMD is added, we must also restore the top 1170 // TODO(zra): When SIMD is added, we must also restore the top
1188 // 64-bits of the callee-saved registers. 1171 // 64-bits of the callee-saved registers.
1189 continue; 1172 continue;
1190 } 1173 }
1191 // TODO(zra): Restore the whole V register. 1174 // TODO(zra): Restore the whole V register.
1192 VRegister reg = static_cast<VRegister>(i); 1175 VRegister reg = static_cast<VRegister>(i);
1193 PopDouble(reg); 1176 PopDouble(reg);
1194 } 1177 }
1195 1178
1196 PopPair(LR, FP); 1179 PopPair(LR, FP);
1197 } 1180 }
1198 1181
1199 1182
1200 void Assembler::CallRuntime(const RuntimeEntry& entry, 1183 void Assembler::CallRuntime(const RuntimeEntry& entry,
1201 intptr_t argument_count) { 1184 intptr_t argument_count) {
1202 entry.Call(this, argument_count); 1185 entry.Call(this, argument_count);
1203 } 1186 }
1204 1187
1205 1188
1206 void Assembler::EnterStubFrame() { 1189 void Assembler::EnterStubFrame() {
1190 set_constant_pool_allowed(false);
1207 EnterFrame(0); 1191 EnterFrame(0);
1208 // Save caller's pool pointer. Push 0 in the saved PC area for stub frames. 1192 // Save caller's pool pointer. Push 0 in the saved PC area for stub frames.
1209 TagAndPushPPAndPcMarker(ZR); 1193 TagAndPushPPAndPcMarker(ZR);
1210 LoadPoolPointer(PP); 1194 LoadPoolPointer();
1211 } 1195 }
1212 1196
1213 1197
1214 void Assembler::LeaveStubFrame() { 1198 void Assembler::LeaveStubFrame() {
1199 set_constant_pool_allowed(false);
1215 // Restore and untag PP. 1200 // Restore and untag PP.
1216 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize, kNoPP); 1201 LoadFromOffset(PP, FP, kSavedCallerPpSlotFromFp * kWordSize);
1217 sub(PP, PP, Operand(kHeapObjectTag)); 1202 sub(PP, PP, Operand(kHeapObjectTag));
1218 LeaveFrame(); 1203 LeaveFrame();
1219 } 1204 }
1220 1205
1221 1206
1222 void Assembler::UpdateAllocationStats(intptr_t cid, 1207 void Assembler::UpdateAllocationStats(intptr_t cid,
1223 Register pp,
1224 Heap::Space space, 1208 Heap::Space space,
1225 bool inline_isolate) { 1209 bool inline_isolate) {
1226 ASSERT(cid > 0); 1210 ASSERT(cid > 0);
1227 intptr_t counter_offset = 1211 intptr_t counter_offset =
1228 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); 1212 ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
1229 if (inline_isolate) { 1213 if (inline_isolate) {
1230 ClassTable* class_table = Isolate::Current()->class_table(); 1214 ClassTable* class_table = Isolate::Current()->class_table();
1231 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); 1215 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid);
1232 if (cid < kNumPredefinedCids) { 1216 if (cid < kNumPredefinedCids) {
1233 LoadImmediate( 1217 LoadImmediate(
1234 TMP2, reinterpret_cast<uword>(*table_ptr) + counter_offset, pp); 1218 TMP2, reinterpret_cast<uword>(*table_ptr) + counter_offset);
1235 } else { 1219 } else {
1236 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr), pp); 1220 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr));
1237 ldr(TMP, Address(TMP2)); 1221 ldr(TMP, Address(TMP2));
1238 AddImmediate(TMP2, TMP, counter_offset, pp); 1222 AddImmediate(TMP2, TMP, counter_offset);
1239 } 1223 }
1240 } else { 1224 } else {
1241 LoadIsolate(TMP2); 1225 LoadIsolate(TMP2);
1242 intptr_t table_offset = 1226 intptr_t table_offset =
1243 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1227 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1244 ldr(TMP, Address(TMP2, table_offset)); 1228 ldr(TMP, Address(TMP2, table_offset));
1245 AddImmediate(TMP2, TMP, counter_offset, pp); 1229 AddImmediate(TMP2, TMP, counter_offset);
1246 } 1230 }
1247 ldr(TMP, Address(TMP2, 0)); 1231 ldr(TMP, Address(TMP2, 0));
1248 AddImmediate(TMP, TMP, 1, pp); 1232 AddImmediate(TMP, TMP, 1);
1249 str(TMP, Address(TMP2, 0)); 1233 str(TMP, Address(TMP2, 0));
1250 } 1234 }
1251 1235
1252 1236
1253 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, 1237 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
1254 Register size_reg, 1238 Register size_reg,
1255 Register pp,
1256 Heap::Space space, 1239 Heap::Space space,
1257 bool inline_isolate) { 1240 bool inline_isolate) {
1258 ASSERT(cid > 0); 1241 ASSERT(cid > 0);
1259 const uword class_offset = ClassTable::ClassOffsetFor(cid); 1242 const uword class_offset = ClassTable::ClassOffsetFor(cid);
1260 const uword count_field_offset = (space == Heap::kNew) ? 1243 const uword count_field_offset = (space == Heap::kNew) ?
1261 ClassHeapStats::allocated_since_gc_new_space_offset() : 1244 ClassHeapStats::allocated_since_gc_new_space_offset() :
1262 ClassHeapStats::allocated_since_gc_old_space_offset(); 1245 ClassHeapStats::allocated_since_gc_old_space_offset();
1263 const uword size_field_offset = (space == Heap::kNew) ? 1246 const uword size_field_offset = (space == Heap::kNew) ?
1264 ClassHeapStats::allocated_size_since_gc_new_space_offset() : 1247 ClassHeapStats::allocated_size_since_gc_new_space_offset() :
1265 ClassHeapStats::allocated_size_since_gc_old_space_offset(); 1248 ClassHeapStats::allocated_size_since_gc_old_space_offset();
1266 if (inline_isolate) { 1249 if (inline_isolate) {
1267 ClassTable* class_table = Isolate::Current()->class_table(); 1250 ClassTable* class_table = Isolate::Current()->class_table();
1268 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); 1251 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid);
1269 if (cid < kNumPredefinedCids) { 1252 if (cid < kNumPredefinedCids) {
1270 LoadImmediate(TMP2, 1253 LoadImmediate(TMP2,
1271 reinterpret_cast<uword>(*table_ptr) + class_offset, pp); 1254 reinterpret_cast<uword>(*table_ptr) + class_offset);
1272 } else { 1255 } else {
1273 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr), pp); 1256 LoadImmediate(TMP2, reinterpret_cast<uword>(table_ptr));
1274 ldr(TMP, Address(TMP2)); 1257 ldr(TMP, Address(TMP2));
1275 AddImmediate(TMP2, TMP, class_offset, pp); 1258 AddImmediate(TMP2, TMP, class_offset);
1276 } 1259 }
1277 } else { 1260 } else {
1278 LoadIsolate(TMP2); 1261 LoadIsolate(TMP2);
1279 intptr_t table_offset = 1262 intptr_t table_offset =
1280 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1263 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1281 ldr(TMP, Address(TMP2, table_offset)); 1264 ldr(TMP, Address(TMP2, table_offset));
1282 AddImmediate(TMP2, TMP, class_offset, pp); 1265 AddImmediate(TMP2, TMP, class_offset);
1283 } 1266 }
1284 ldr(TMP, Address(TMP2, count_field_offset)); 1267 ldr(TMP, Address(TMP2, count_field_offset));
1285 AddImmediate(TMP, TMP, 1, pp); 1268 AddImmediate(TMP, TMP, 1);
1286 str(TMP, Address(TMP2, count_field_offset)); 1269 str(TMP, Address(TMP2, count_field_offset));
1287 ldr(TMP, Address(TMP2, size_field_offset)); 1270 ldr(TMP, Address(TMP2, size_field_offset));
1288 add(TMP, TMP, Operand(size_reg)); 1271 add(TMP, TMP, Operand(size_reg));
1289 str(TMP, Address(TMP2, size_field_offset)); 1272 str(TMP, Address(TMP2, size_field_offset));
1290 } 1273 }
1291 1274
1292 1275
1293 void Assembler::MaybeTraceAllocation(intptr_t cid, 1276 void Assembler::MaybeTraceAllocation(intptr_t cid,
1294 Register temp_reg, 1277 Register temp_reg,
1295 Register pp,
1296 Label* trace, 1278 Label* trace,
1297 bool inline_isolate) { 1279 bool inline_isolate) {
1298 ASSERT(cid > 0); 1280 ASSERT(cid > 0);
1299 intptr_t state_offset = ClassTable::StateOffsetFor(cid); 1281 intptr_t state_offset = ClassTable::StateOffsetFor(cid);
1300 if (inline_isolate) { 1282 if (inline_isolate) {
1301 ClassTable* class_table = Isolate::Current()->class_table(); 1283 ClassTable* class_table = Isolate::Current()->class_table();
1302 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid); 1284 ClassHeapStats** table_ptr = class_table->TableAddressFor(cid);
1303 if (cid < kNumPredefinedCids) { 1285 if (cid < kNumPredefinedCids) {
1304 LoadImmediate( 1286 LoadImmediate(
1305 temp_reg, reinterpret_cast<uword>(*table_ptr) + state_offset, pp); 1287 temp_reg, reinterpret_cast<uword>(*table_ptr) + state_offset);
1306 } else { 1288 } else {
1307 LoadImmediate(temp_reg, reinterpret_cast<uword>(table_ptr), pp); 1289 LoadImmediate(temp_reg, reinterpret_cast<uword>(table_ptr));
1308 ldr(temp_reg, Address(temp_reg, 0)); 1290 ldr(temp_reg, Address(temp_reg, 0));
1309 AddImmediate(temp_reg, temp_reg, state_offset, pp); 1291 AddImmediate(temp_reg, temp_reg, state_offset);
1310 } 1292 }
1311 } else { 1293 } else {
1312 LoadIsolate(temp_reg); 1294 LoadIsolate(temp_reg);
1313 intptr_t table_offset = 1295 intptr_t table_offset =
1314 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); 1296 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
1315 ldr(temp_reg, Address(temp_reg, table_offset)); 1297 ldr(temp_reg, Address(temp_reg, table_offset));
1316 AddImmediate(temp_reg, temp_reg, state_offset, pp); 1298 AddImmediate(temp_reg, temp_reg, state_offset);
1317 } 1299 }
1318 ldr(temp_reg, Address(temp_reg, 0)); 1300 ldr(temp_reg, Address(temp_reg, 0));
1319 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); 1301 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask()));
1320 b(trace, NE); 1302 b(trace, NE);
1321 } 1303 }
1322 1304
1323 1305
1324 void Assembler::TryAllocate(const Class& cls, 1306 void Assembler::TryAllocate(const Class& cls,
1325 Label* failure, 1307 Label* failure,
1326 Register instance_reg, 1308 Register instance_reg,
1327 Register temp_reg, 1309 Register temp_reg) {
1328 Register pp) {
1329 ASSERT(failure != NULL); 1310 ASSERT(failure != NULL);
1330 if (FLAG_inline_alloc) { 1311 if (FLAG_inline_alloc) {
1331 // If this allocation is traced, program will jump to failure path 1312 // If this allocation is traced, program will jump to failure path
1332 // (i.e. the allocation stub) which will allocate the object and trace the 1313 // (i.e. the allocation stub) which will allocate the object and trace the
1333 // allocation call site. 1314 // allocation call site.
1334 MaybeTraceAllocation(cls.id(), temp_reg, pp, failure); 1315 MaybeTraceAllocation(cls.id(), temp_reg, failure);
1335 const intptr_t instance_size = cls.instance_size(); 1316 const intptr_t instance_size = cls.instance_size();
1336 Heap* heap = Isolate::Current()->heap(); 1317 Heap* heap = Isolate::Current()->heap();
1337 Heap::Space space = heap->SpaceForAllocation(cls.id()); 1318 Heap::Space space = heap->SpaceForAllocation(cls.id());
1338 const uword top_address = heap->TopAddress(space); 1319 const uword top_address = heap->TopAddress(space);
1339 LoadImmediate(temp_reg, top_address, pp); 1320 LoadImmediate(temp_reg, top_address);
1340 ldr(instance_reg, Address(temp_reg)); 1321 ldr(instance_reg, Address(temp_reg));
1341 // TODO(koda): Protect against unsigned overflow here. 1322 // TODO(koda): Protect against unsigned overflow here.
1342 AddImmediateSetFlags(instance_reg, instance_reg, instance_size, pp); 1323 AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
1343 1324
1344 // instance_reg: potential next object start. 1325 // instance_reg: potential next object start.
1345 const uword end_address = heap->EndAddress(space); 1326 const uword end_address = heap->EndAddress(space);
1346 ASSERT(top_address < end_address); 1327 ASSERT(top_address < end_address);
1347 // Could use ldm to load (top, end), but no benefit seen experimentally. 1328 // Could use ldm to load (top, end), but no benefit seen experimentally.
1348 ldr(TMP, Address(temp_reg, end_address - top_address)); 1329 ldr(TMP, Address(temp_reg, end_address - top_address));
1349 CompareRegisters(TMP, instance_reg); 1330 CompareRegisters(TMP, instance_reg);
1350 // fail if heap end unsigned less than or equal to instance_reg. 1331 // fail if heap end unsigned less than or equal to instance_reg.
1351 b(failure, LS); 1332 b(failure, LS);
1352 1333
1353 // Successfully allocated the object, now update top to point to 1334 // Successfully allocated the object, now update top to point to
1354 // next object start and store the class in the class field of object. 1335 // next object start and store the class in the class field of object.
1355 str(instance_reg, Address(temp_reg)); 1336 str(instance_reg, Address(temp_reg));
1356 1337
1357 ASSERT(instance_size >= kHeapObjectTag); 1338 ASSERT(instance_size >= kHeapObjectTag);
1358 AddImmediate( 1339 AddImmediate(
1359 instance_reg, instance_reg, -instance_size + kHeapObjectTag, pp); 1340 instance_reg, instance_reg, -instance_size + kHeapObjectTag);
1360 UpdateAllocationStats(cls.id(), pp, space); 1341 UpdateAllocationStats(cls.id(), space);
1361 1342
1362 uword tags = 0; 1343 uword tags = 0;
1363 tags = RawObject::SizeTag::update(instance_size, tags); 1344 tags = RawObject::SizeTag::update(instance_size, tags);
1364 ASSERT(cls.id() != kIllegalCid); 1345 ASSERT(cls.id() != kIllegalCid);
1365 tags = RawObject::ClassIdTag::update(cls.id(), tags); 1346 tags = RawObject::ClassIdTag::update(cls.id(), tags);
1366 LoadImmediate(TMP, tags, pp); 1347 LoadImmediate(TMP, tags);
1367 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset(), pp); 1348 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset());
1368 } else { 1349 } else {
1369 b(failure); 1350 b(failure);
1370 } 1351 }
1371 } 1352 }
1372 1353
1373 1354
1374 void Assembler::TryAllocateArray(intptr_t cid, 1355 void Assembler::TryAllocateArray(intptr_t cid,
1375 intptr_t instance_size, 1356 intptr_t instance_size,
1376 Label* failure, 1357 Label* failure,
1377 Register instance, 1358 Register instance,
1378 Register end_address, 1359 Register end_address,
1379 Register temp1, 1360 Register temp1,
1380 Register temp2) { 1361 Register temp2) {
1381 if (FLAG_inline_alloc) { 1362 if (FLAG_inline_alloc) {
1382 // If this allocation is traced, program will jump to failure path 1363 // If this allocation is traced, program will jump to failure path
1383 // (i.e. the allocation stub) which will allocate the object and trace the 1364 // (i.e. the allocation stub) which will allocate the object and trace the
1384 // allocation call site. 1365 // allocation call site.
1385 MaybeTraceAllocation(cid, temp1, PP, failure); 1366 MaybeTraceAllocation(cid, temp1, failure);
1386 Isolate* isolate = Isolate::Current(); 1367 Isolate* isolate = Isolate::Current();
1387 Heap* heap = isolate->heap(); 1368 Heap* heap = isolate->heap();
1388 Heap::Space space = heap->SpaceForAllocation(cid); 1369 Heap::Space space = heap->SpaceForAllocation(cid);
1389 LoadImmediate(temp1, heap->TopAddress(space), PP); 1370 LoadImmediate(temp1, heap->TopAddress(space));
1390 ldr(instance, Address(temp1, 0)); // Potential new object start. 1371 ldr(instance, Address(temp1, 0)); // Potential new object start.
1391 AddImmediateSetFlags(end_address, instance, instance_size, PP); 1372 AddImmediateSetFlags(end_address, instance, instance_size);
1392 b(failure, CS); // Fail on unsigned overflow. 1373 b(failure, CS); // Fail on unsigned overflow.
1393 1374
1394 // Check if the allocation fits into the remaining space. 1375 // Check if the allocation fits into the remaining space.
1395 // instance: potential new object start. 1376 // instance: potential new object start.
1396 // end_address: potential next object start. 1377 // end_address: potential next object start.
1397 LoadImmediate(temp2, heap->EndAddress(space), PP); 1378 LoadImmediate(temp2, heap->EndAddress(space));
1398 ldr(temp2, Address(temp2, 0)); 1379 ldr(temp2, Address(temp2, 0));
1399 cmp(end_address, Operand(temp2)); 1380 cmp(end_address, Operand(temp2));
1400 b(failure, CS); 1381 b(failure, CS);
1401 1382
1402 // Successfully allocated the object(s), now update top to point to 1383 // Successfully allocated the object(s), now update top to point to
1403 // next object start and initialize the object. 1384 // next object start and initialize the object.
1404 str(end_address, Address(temp1, 0)); 1385 str(end_address, Address(temp1, 0));
1405 add(instance, instance, Operand(kHeapObjectTag)); 1386 add(instance, instance, Operand(kHeapObjectTag));
1406 LoadImmediate(temp2, instance_size, PP); 1387 LoadImmediate(temp2, instance_size);
1407 UpdateAllocationStatsWithSize(cid, temp2, PP, space); 1388 UpdateAllocationStatsWithSize(cid, temp2, space);
1408 1389
1409 // Initialize the tags. 1390 // Initialize the tags.
1410 // instance: new object start as a tagged pointer. 1391 // instance: new object start as a tagged pointer.
1411 uword tags = 0; 1392 uword tags = 0;
1412 tags = RawObject::ClassIdTag::update(cid, tags); 1393 tags = RawObject::ClassIdTag::update(cid, tags);
1413 tags = RawObject::SizeTag::update(instance_size, tags); 1394 tags = RawObject::SizeTag::update(instance_size, tags);
1414 LoadImmediate(temp2, tags, PP); 1395 LoadImmediate(temp2, tags);
1415 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags. 1396 str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags.
1416 } else { 1397 } else {
1417 b(failure); 1398 b(failure);
1418 } 1399 }
1419 } 1400 }
1420 1401
1421 1402
1422 Address Assembler::ElementAddressForIntIndex(bool is_external, 1403 Address Assembler::ElementAddressForIntIndex(bool is_external,
1423 intptr_t cid, 1404 intptr_t cid,
1424 intptr_t index_scale, 1405 intptr_t index_scale,
(...skipping 30 matching lines...) Expand all
1455 add(base, array, Operand(index, LSL, shift)); 1436 add(base, array, Operand(index, LSL, shift));
1456 } 1437 }
1457 const OperandSize size = Address::OperandSizeFor(cid); 1438 const OperandSize size = Address::OperandSizeFor(cid);
1458 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); 1439 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
1459 return Address(base, offset, Address::Offset, size); 1440 return Address(base, offset, Address::Offset, size);
1460 } 1441 }
1461 1442
1462 } // namespace dart 1443 } // namespace dart
1463 1444
1464 #endif // defined TARGET_ARCH_ARM64 1445 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/assembler_arm64.h ('k') | runtime/vm/assembler_arm64_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698