Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(188)

Side by Side Diff: runtime/vm/assembler_mips.cc

Issue 59613005: Merge (x & y) == 0 pattern to emit a single test instruction. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/longjump.h" 9 #include "vm/longjump.h"
10 #include "vm/runtime_entry.h" 10 #include "vm/runtime_entry.h"
(...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after
360 } else { 360 } else {
361 lw(rd, Address(PP, offset_low)); 361 lw(rd, Address(PP, offset_low));
362 } 362 }
363 } 363 }
364 } 364 }
365 365
366 366
367 void Assembler::AdduDetectOverflow(Register rd, Register rs, Register rt, 367 void Assembler::AdduDetectOverflow(Register rd, Register rs, Register rt,
368 Register ro, Register scratch) { 368 Register ro, Register scratch) {
369 ASSERT(rd != ro); 369 ASSERT(rd != ro);
370 ASSERT(rd != TMP1); 370 ASSERT(rd != TMP);
371 ASSERT(ro != TMP1); 371 ASSERT(ro != TMP);
372 ASSERT(ro != rs); 372 ASSERT(ro != rs);
373 ASSERT(ro != rt); 373 ASSERT(ro != rt);
374 374
375 if ((rs == rt) && (rd == rs)) { 375 if ((rs == rt) && (rd == rs)) {
376 ASSERT(scratch != kNoRegister); 376 ASSERT(scratch != kNoRegister);
377 ASSERT(scratch != TMP1); 377 ASSERT(scratch != TMP);
378 ASSERT(rd != scratch); 378 ASSERT(rd != scratch);
379 ASSERT(ro != scratch); 379 ASSERT(ro != scratch);
380 ASSERT(rs != scratch); 380 ASSERT(rs != scratch);
381 ASSERT(rt != scratch); 381 ASSERT(rt != scratch);
382 mov(scratch, rt); 382 mov(scratch, rt);
383 rt = scratch; 383 rt = scratch;
384 } 384 }
385 385
386 if (rd == rs) { 386 if (rd == rs) {
387 mov(TMP1, rs); // Preserve rs. 387 mov(TMP, rs); // Preserve rs.
388 addu(rd, rs, rt); // rs is overwritten. 388 addu(rd, rs, rt); // rs is overwritten.
389 xor_(TMP1, rd, TMP1); // Original rs. 389 xor_(TMP, rd, TMP); // Original rs.
390 xor_(ro, rd, rt); 390 xor_(ro, rd, rt);
391 and_(ro, ro, TMP1); 391 and_(ro, ro, TMP);
392 } else if (rd == rt) { 392 } else if (rd == rt) {
393 mov(TMP1, rt); // Preserve rt. 393 mov(TMP, rt); // Preserve rt.
394 addu(rd, rs, rt); // rt is overwritten. 394 addu(rd, rs, rt); // rt is overwritten.
395 xor_(TMP1, rd, TMP1); // Original rt. 395 xor_(TMP, rd, TMP); // Original rt.
396 xor_(ro, rd, rs); 396 xor_(ro, rd, rs);
397 and_(ro, ro, TMP1); 397 and_(ro, ro, TMP);
398 } else { 398 } else {
399 addu(rd, rs, rt); 399 addu(rd, rs, rt);
400 xor_(ro, rd, rs); 400 xor_(ro, rd, rs);
401 xor_(TMP1, rd, rt); 401 xor_(TMP, rd, rt);
402 and_(ro, TMP1, ro); 402 and_(ro, TMP, ro);
403 } 403 }
404 } 404 }
405 405
406 406
407 void Assembler::SubuDetectOverflow(Register rd, Register rs, Register rt, 407 void Assembler::SubuDetectOverflow(Register rd, Register rs, Register rt,
408 Register ro) { 408 Register ro) {
409 ASSERT(rd != ro); 409 ASSERT(rd != ro);
410 ASSERT(rd != TMP1); 410 ASSERT(rd != TMP);
411 ASSERT(ro != TMP1); 411 ASSERT(ro != TMP);
412 ASSERT(ro != rs); 412 ASSERT(ro != rs);
413 ASSERT(ro != rt); 413 ASSERT(ro != rt);
414 ASSERT(rs != TMP1); 414 ASSERT(rs != TMP);
415 ASSERT(rt != TMP1); 415 ASSERT(rt != TMP);
416 416
417 // This happens with some crankshaft code. Since Subu works fine if 417 // This happens with some crankshaft code. Since Subu works fine if
418 // left == right, let's not make that restriction here. 418 // left == right, let's not make that restriction here.
419 if (rs == rt) { 419 if (rs == rt) {
420 mov(rd, ZR); 420 mov(rd, ZR);
421 mov(ro, ZR); 421 mov(ro, ZR);
422 return; 422 return;
423 } 423 }
424 424
425 if (rd == rs) { 425 if (rd == rs) {
426 mov(TMP1, rs); // Preserve left. 426 mov(TMP, rs); // Preserve left.
427 subu(rd, rs, rt); // Left is overwritten. 427 subu(rd, rs, rt); // Left is overwritten.
428 xor_(ro, rd, TMP1); // scratch is original left. 428 xor_(ro, rd, TMP); // scratch is original left.
429 xor_(TMP1, TMP1, rs); // scratch is original left. 429 xor_(TMP, TMP, rs); // scratch is original left.
430 and_(ro, TMP1, ro); 430 and_(ro, TMP, ro);
431 } else if (rd == rt) { 431 } else if (rd == rt) {
432 mov(TMP1, rt); // Preserve right. 432 mov(TMP, rt); // Preserve right.
433 subu(rd, rs, rt); // Right is overwritten. 433 subu(rd, rs, rt); // Right is overwritten.
434 xor_(ro, rd, rs); 434 xor_(ro, rd, rs);
435 xor_(TMP1, rs, TMP1); // Original right. 435 xor_(TMP, rs, TMP); // Original right.
436 and_(ro, TMP1, ro); 436 and_(ro, TMP, ro);
437 } else { 437 } else {
438 subu(rd, rs, rt); 438 subu(rd, rs, rt);
439 xor_(ro, rd, rs); 439 xor_(ro, rd, rs);
440 xor_(TMP1, rs, rt); 440 xor_(TMP, rs, rt);
441 and_(ro, TMP1, ro); 441 and_(ro, TMP, ro);
442 } 442 }
443 } 443 }
444 444
445 445
446 void Assembler::LoadObject(Register rd, const Object& object) { 446 void Assembler::LoadObject(Register rd, const Object& object) {
447 // Smis and VM heap objects are never relocated; do not use object pool. 447 // Smis and VM heap objects are never relocated; do not use object pool.
448 if (object.IsSmi()) { 448 if (object.IsSmi()) {
449 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw())); 449 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()));
450 } else if (object.InVMHeap()) { 450 } else if (object.InVMHeap()) {
451 // Make sure that class CallPattern is able to decode this load immediate. 451 // Make sure that class CallPattern is able to decode this load immediate.
(...skipping 24 matching lines...) Expand all
476 if (object_pool_.At(i) == obj.raw()) { 476 if (object_pool_.At(i) == obj.raw()) {
477 return i; 477 return i;
478 } 478 }
479 } 479 }
480 object_pool_.Add(obj, Heap::kOld); 480 object_pool_.Add(obj, Heap::kOld);
481 return object_pool_.Length() - 1; 481 return object_pool_.Length() - 1;
482 } 482 }
483 483
484 484
485 void Assembler::PushObject(const Object& object) { 485 void Assembler::PushObject(const Object& object) {
486 LoadObject(TMP1, object); 486 LoadObject(TMP, object);
487 Push(TMP1); 487 Push(TMP);
488 } 488 }
489 489
490 490
491 void Assembler::CompareObject(Register rd1, Register rd2, 491 void Assembler::CompareObject(Register rd1, Register rd2,
492 Register rn, const Object& object) { 492 Register rn, const Object& object) {
493 ASSERT(rn != TMP1); 493 ASSERT(rn != TMP);
494 ASSERT(rd1 != TMP1); 494 ASSERT(rd1 != TMP);
495 ASSERT(rd1 != rd2); 495 ASSERT(rd1 != rd2);
496 LoadObject(TMP1, object); 496 LoadObject(TMP, object);
497 slt(rd1, rn, TMP1); 497 slt(rd1, rn, TMP);
498 slt(rd2, TMP1, rn); 498 slt(rd2, TMP, rn);
499 } 499 }
500 500
501 501
502 // Preserves object and value registers. 502 // Preserves object and value registers.
503 void Assembler::StoreIntoObjectFilterNoSmi(Register object, 503 void Assembler::StoreIntoObjectFilterNoSmi(Register object,
504 Register value, 504 Register value,
505 Label* no_update) { 505 Label* no_update) {
506 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && 506 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
507 (kOldObjectAlignmentOffset == 0), young_alignment); 507 (kOldObjectAlignmentOffset == 0), young_alignment);
508 508
509 // Write-barrier triggers if the value is in the new space (has bit set) and 509 // Write-barrier triggers if the value is in the new space (has bit set) and
510 // the object is in the old space (has bit cleared). 510 // the object is in the old space (has bit cleared).
511 // To check that, we compute value & ~object and skip the write barrier 511 // To check that, we compute value & ~object and skip the write barrier
512 // if the bit is not set. We can't destroy the object. 512 // if the bit is not set. We can't destroy the object.
513 nor(TMP1, ZR, object); 513 nor(TMP, ZR, object);
514 and_(TMP1, value, TMP1); 514 and_(TMP, value, TMP);
515 andi(CMPRES1, TMP1, Immediate(kNewObjectAlignmentOffset)); 515 andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset));
516 beq(CMPRES1, ZR, no_update); 516 beq(CMPRES1, ZR, no_update);
517 } 517 }
518 518
519 519
520 // Preserves object and value registers. 520 // Preserves object and value registers.
521 void Assembler::StoreIntoObjectFilter(Register object, 521 void Assembler::StoreIntoObjectFilter(Register object,
522 Register value, 522 Register value,
523 Label* no_update) { 523 Label* no_update) {
524 // For the value we are only interested in the new/old bit and the tag bit. 524 // For the value we are only interested in the new/old bit and the tag bit.
525 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. 525 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
526 sll(TMP1, value, kObjectAlignmentLog2 - 1); 526 sll(TMP, value, kObjectAlignmentLog2 - 1);
527 and_(TMP1, value, TMP1); 527 and_(TMP, value, TMP);
528 // And the result with the negated space bit of the object. 528 // And the result with the negated space bit of the object.
529 nor(CMPRES1, ZR, object); 529 nor(CMPRES1, ZR, object);
530 and_(TMP1, TMP1, CMPRES1); 530 and_(TMP, TMP, CMPRES1);
531 andi(CMPRES1, TMP1, Immediate(kNewObjectAlignmentOffset)); 531 andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset));
532 beq(CMPRES1, ZR, no_update); 532 beq(CMPRES1, ZR, no_update);
533 } 533 }
534 534
535 535
536 void Assembler::StoreIntoObject(Register object, 536 void Assembler::StoreIntoObject(Register object,
537 const Address& dest, 537 const Address& dest,
538 Register value, 538 Register value,
539 bool can_value_be_smi) { 539 bool can_value_be_smi) {
540 ASSERT(object != value); 540 ASSERT(object != value);
541 sw(value, dest); 541 sw(value, dest);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
583 // No store buffer update. 583 // No store buffer update.
584 } 584 }
585 585
586 586
587 void Assembler::StoreIntoObjectNoBarrier(Register object, 587 void Assembler::StoreIntoObjectNoBarrier(Register object,
588 const Address& dest, 588 const Address& dest,
589 const Object& value) { 589 const Object& value) {
590 ASSERT(value.IsSmi() || value.InVMHeap() || 590 ASSERT(value.IsSmi() || value.InVMHeap() ||
591 (value.IsOld() && value.IsNotTemporaryScopedHandle())); 591 (value.IsOld() && value.IsNotTemporaryScopedHandle()));
592 // No store buffer update. 592 // No store buffer update.
593 LoadObject(TMP1, value); 593 LoadObject(TMP, value);
594 sw(TMP1, dest); 594 sw(TMP, dest);
595 } 595 }
596 596
597 597
598 void Assembler::LoadClassId(Register result, Register object) { 598 void Assembler::LoadClassId(Register result, Register object) {
599 ASSERT(RawObject::kClassIdTagBit == 16); 599 ASSERT(RawObject::kClassIdTagBit == 16);
600 ASSERT(RawObject::kClassIdTagSize == 16); 600 ASSERT(RawObject::kClassIdTagSize == 16);
601 const intptr_t class_id_offset = Object::tags_offset() + 601 const intptr_t class_id_offset = Object::tags_offset() +
602 RawObject::kClassIdTagBit / kBitsPerByte; 602 RawObject::kClassIdTagBit / kBitsPerByte;
603 lhu(result, FieldAddress(object, class_id_offset)); 603 lhu(result, FieldAddress(object, class_id_offset));
604 } 604 }
605 605
606 606
607 void Assembler::LoadClassById(Register result, Register class_id) { 607 void Assembler::LoadClassById(Register result, Register class_id) {
608 ASSERT(result != class_id); 608 ASSERT(result != class_id);
609 lw(result, FieldAddress(CTX, Context::isolate_offset())); 609 lw(result, FieldAddress(CTX, Context::isolate_offset()));
610 const intptr_t table_offset_in_isolate = 610 const intptr_t table_offset_in_isolate =
611 Isolate::class_table_offset() + ClassTable::table_offset(); 611 Isolate::class_table_offset() + ClassTable::table_offset();
612 lw(result, Address(result, table_offset_in_isolate)); 612 lw(result, Address(result, table_offset_in_isolate));
613 sll(TMP1, class_id, 2); 613 sll(TMP, class_id, 2);
614 addu(result, result, TMP1); 614 addu(result, result, TMP);
615 lw(result, Address(result)); 615 lw(result, Address(result));
616 } 616 }
617 617
618 618
619 void Assembler::LoadClass(Register result, Register object) { 619 void Assembler::LoadClass(Register result, Register object) {
620 ASSERT(TMP1 != result); 620 ASSERT(TMP != result);
621 LoadClassId(TMP1, object); 621 LoadClassId(TMP, object);
622 622
623 lw(result, FieldAddress(CTX, Context::isolate_offset())); 623 lw(result, FieldAddress(CTX, Context::isolate_offset()));
624 const intptr_t table_offset_in_isolate = 624 const intptr_t table_offset_in_isolate =
625 Isolate::class_table_offset() + ClassTable::table_offset(); 625 Isolate::class_table_offset() + ClassTable::table_offset();
626 lw(result, Address(result, table_offset_in_isolate)); 626 lw(result, Address(result, table_offset_in_isolate));
627 sll(TMP1, TMP1, 2); 627 sll(TMP, TMP, 2);
628 addu(result, result, TMP1); 628 addu(result, result, TMP);
629 lw(result, Address(result)); 629 lw(result, Address(result));
630 } 630 }
631 631
632 632
633 void Assembler::EnterStubFrame(bool uses_pp) { 633 void Assembler::EnterStubFrame(bool uses_pp) {
634 SetPrologueOffset(); 634 SetPrologueOffset();
635 if (uses_pp) { 635 if (uses_pp) {
636 addiu(SP, SP, Immediate(-4 * kWordSize)); 636 addiu(SP, SP, Immediate(-4 * kWordSize));
637 sw(ZR, Address(SP, 3 * kWordSize)); // PC marker is 0 in stubs. 637 sw(ZR, Address(SP, 3 * kWordSize)); // PC marker is 0 in stubs.
638 sw(RA, Address(SP, 2 * kWordSize)); 638 sw(RA, Address(SP, 2 * kWordSize));
639 sw(FP, Address(SP, 1 * kWordSize)); 639 sw(FP, Address(SP, 1 * kWordSize));
640 sw(PP, Address(SP, 0 * kWordSize)); 640 sw(PP, Address(SP, 0 * kWordSize));
641 addiu(FP, SP, Immediate(1 * kWordSize)); 641 addiu(FP, SP, Immediate(1 * kWordSize));
642 // Setup pool pointer for this stub. 642 // Setup pool pointer for this stub.
643 643
644 GetNextPC(TMP1); // TMP1 gets the address of the next instruction. 644 GetNextPC(TMP); // TMP gets the address of the next instruction.
645 645
646 const intptr_t object_pool_pc_dist = 646 const intptr_t object_pool_pc_dist =
647 Instructions::HeaderSize() - Instructions::object_pool_offset() + 647 Instructions::HeaderSize() - Instructions::object_pool_offset() +
648 CodeSize(); 648 CodeSize();
649 649
650 lw(PP, Address(TMP1, -object_pool_pc_dist)); 650 lw(PP, Address(TMP, -object_pool_pc_dist));
651 } else { 651 } else {
652 addiu(SP, SP, Immediate(-3 * kWordSize)); 652 addiu(SP, SP, Immediate(-3 * kWordSize));
653 sw(ZR, Address(SP, 2 * kWordSize)); // PC marker is 0 in stubs. 653 sw(ZR, Address(SP, 2 * kWordSize)); // PC marker is 0 in stubs.
654 sw(RA, Address(SP, 1 * kWordSize)); 654 sw(RA, Address(SP, 1 * kWordSize));
655 sw(FP, Address(SP, 0 * kWordSize)); 655 sw(FP, Address(SP, 0 * kWordSize));
656 mov(FP, SP); 656 mov(FP, SP);
657 } 657 }
658 } 658 }
659 659
660 660
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
738 void Assembler::EnterDartFrame(intptr_t frame_size) { 738 void Assembler::EnterDartFrame(intptr_t frame_size) {
739 const intptr_t offset = CodeSize(); 739 const intptr_t offset = CodeSize();
740 740
741 SetPrologueOffset(); 741 SetPrologueOffset();
742 742
743 addiu(SP, SP, Immediate(-4 * kWordSize)); 743 addiu(SP, SP, Immediate(-4 * kWordSize));
744 sw(RA, Address(SP, 2 * kWordSize)); 744 sw(RA, Address(SP, 2 * kWordSize));
745 sw(FP, Address(SP, 1 * kWordSize)); 745 sw(FP, Address(SP, 1 * kWordSize));
746 sw(PP, Address(SP, 0 * kWordSize)); 746 sw(PP, Address(SP, 0 * kWordSize));
747 747
748 GetNextPC(TMP1); // TMP1 gets the address of the next instruction. 748 GetNextPC(TMP); // TMP gets the address of the next instruction.
749 749
750 // Calculate the offset of the pool pointer from the PC. 750 // Calculate the offset of the pool pointer from the PC.
751 const intptr_t object_pool_pc_dist = 751 const intptr_t object_pool_pc_dist =
752 Instructions::HeaderSize() - Instructions::object_pool_offset() + 752 Instructions::HeaderSize() - Instructions::object_pool_offset() +
753 CodeSize(); 753 CodeSize();
754 754
755 // Save PC in frame for fast identification of corresponding code. 755 // Save PC in frame for fast identification of corresponding code.
756 AddImmediate(TMP1, -offset); 756 AddImmediate(TMP, -offset);
757 sw(TMP1, Address(SP, 3 * kWordSize)); 757 sw(TMP, Address(SP, 3 * kWordSize));
758 758
759 // Set FP to the saved previous FP. 759 // Set FP to the saved previous FP.
760 addiu(FP, SP, Immediate(kWordSize)); 760 addiu(FP, SP, Immediate(kWordSize));
761 761
762 // Load the pool pointer. offset has already been subtracted from TMP1. 762 // Load the pool pointer. offset has already been subtracted from TMP.
763 lw(PP, Address(TMP1, -object_pool_pc_dist + offset)); 763 lw(PP, Address(TMP, -object_pool_pc_dist + offset));
764 764
765 // Reserve space for locals. 765 // Reserve space for locals.
766 AddImmediate(SP, -frame_size); 766 AddImmediate(SP, -frame_size);
767 } 767 }
768 768
769 769
770 // On entry to a function compiled for OSR, the caller's frame pointer, the 770 // On entry to a function compiled for OSR, the caller's frame pointer, the
771 // stack locals, and any copied parameters are already in place. The frame 771 // stack locals, and any copied parameters are already in place. The frame
772 // pointer is already set up. The PC marker is not correct for the 772 // pointer is already set up. The PC marker is not correct for the
773 // optimized function and there may be extra space for spill slots to 773 // optimized function and there may be extra space for spill slots to
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
824 Ret(); 824 Ret();
825 delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize)); 825 delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize));
826 } 826 }
827 827
828 828
829 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { 829 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
830 // Reserve space for arguments and align frame before entering 830 // Reserve space for arguments and align frame before entering
831 // the C++ world. 831 // the C++ world.
832 AddImmediate(SP, -frame_space); 832 AddImmediate(SP, -frame_space);
833 if (OS::ActivationFrameAlignment() > 1) { 833 if (OS::ActivationFrameAlignment() > 1) {
834 LoadImmediate(TMP1, ~(OS::ActivationFrameAlignment() - 1)); 834 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1));
835 and_(SP, SP, TMP1); 835 and_(SP, SP, TMP);
836 } 836 }
837 } 837 }
838 838
839 839
840 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { 840 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
841 const intptr_t kPushedRegistersSize = 841 const intptr_t kPushedRegistersSize =
842 kDartVolatileCpuRegCount * kWordSize + 842 kDartVolatileCpuRegCount * kWordSize +
843 2 * kWordSize + // FP and RA. 843 2 * kWordSize + // FP and RA.
844 kDartVolatileFpuRegCount * kWordSize; 844 kDartVolatileFpuRegCount * kWordSize;
845 845
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
978 Bind(&msg); 978 Bind(&msg);
979 break_(Instr::kMsgMessageCode); 979 break_(Instr::kMsgMessageCode);
980 } 980 }
981 #endif 981 #endif
982 } 982 }
983 983
984 } // namespace dart 984 } // namespace dart
985 985
986 #endif // defined TARGET_ARCH_MIPS 986 #endif // defined TARGET_ARCH_MIPS
987 987
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698