Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 426863006: MIPS64: Add support for architecture revision 6. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #if V8_TARGET_ARCH_MIPS64 9 #if V8_TARGET_ARCH_MIPS64
10 10
(...skipping 677 matching lines...) Expand 10 before | Expand all | Expand 10 after
688 if (kArchVariant == kLoongson) { 688 if (kArchVariant == kLoongson) {
689 mult(rs, at); 689 mult(rs, at);
690 mflo(rd); 690 mflo(rd);
691 } else { 691 } else {
692 mul(rd, rs, at); 692 mul(rd, rs, at);
693 } 693 }
694 } 694 }
695 } 695 }
696 696
697 697
698 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
699 if (rt.is_reg()) {
700 if (kArchVariant != kMips64r6) {
701 mult(rs, rt.rm());
702 mfhi(rd);
703 } else {
704 muh(rd, rs, rt.rm());
705 }
706 } else {
707 // li handles the relocation.
708 ASSERT(!rs.is(at));
709 li(at, rt);
710 if (kArchVariant != kMips64r6) {
711 mult(rs, at);
712 mfhi(rd);
713 } else {
714 muh(rd, rs, at);
715 }
716 }
717 }
718
719
698 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { 720 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
699 if (rt.is_reg()) { 721 if (rt.is_reg()) {
700 if (kArchVariant == kLoongson) { 722 if (kArchVariant == kLoongson) {
701 dmult(rs, rt.rm()); 723 dmult(rs, rt.rm());
702 mflo(rd); 724 mflo(rd);
725 } else if (kArchVariant == kMips64r6) {
726 dmul(rd, rs, rt.rm());
703 } else { 727 } else {
704 // TODO(yuyin): 728 // TODO(yuyin):
705 // dmul(rd, rs, rt.rm()); 729 // dmul(rd, rs, rt.rm());
706 dmult(rs, rt.rm()); 730 dmult(rs, rt.rm());
707 mflo(rd); 731 mflo(rd);
708 } 732 }
709 } else { 733 } else {
710 // li handles the relocation. 734 // li handles the relocation.
711 ASSERT(!rs.is(at)); 735 ASSERT(!rs.is(at));
712 li(at, rt); 736 li(at, rt);
713 if (kArchVariant == kLoongson) { 737 if (kArchVariant == kLoongson) {
714 dmult(rs, at); 738 dmult(rs, at);
715 mflo(rd); 739 mflo(rd);
740 } else if (kArchVariant == kMips64r6) {
741 dmul(rd, rs, at);
716 } else { 742 } else {
717 // TODO(yuyin): 743 // TODO(yuyin):
718 // dmul(rd, rs, at); 744 // dmul(rd, rs, at);
719 dmult(rs, at); 745 dmult(rs, at);
720 mflo(rd); 746 mflo(rd);
721 } 747 }
722 } 748 }
723 } 749 }
724 750
725 751
752 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
753 if (rt.is_reg()) {
754 if (kArchVariant == kLoongson) {
755 dmult(rs, rt.rm());
756 mfhi(rd);
757 } else if (kArchVariant == kMips64r6) {
758 dmuh(rd, rs, rt.rm());
759 } else {
760 // TODO(yuyin):
761 // dmul(rd, rs, rt.rm());
paul.l... 2014/07/29 14:58:04 I think we can now remove this TODO, and the one j
dusmil.imgtec 2014/07/29 17:39:12 Done.
762 dmult(rs, rt.rm());
763 mfhi(rd);
764 }
765 } else {
766 // li handles the relocation.
767 ASSERT(!rs.is(at));
768 li(at, rt);
769 if (kArchVariant == kLoongson) {
770 dmult(rs, at);
771 mfhi(rd);
772 } else if (kArchVariant == kMips64r6) {
773 dmuh(rd, rs, at);
774 } else {
775 // TODO(yuyin):
776 // dmul(rd, rs, at);
777 dmult(rs, at);
778 mfhi(rd);
779 }
780 }
781 }
782
783
726 void MacroAssembler::Mult(Register rs, const Operand& rt) { 784 void MacroAssembler::Mult(Register rs, const Operand& rt) {
727 if (rt.is_reg()) { 785 if (rt.is_reg()) {
728 mult(rs, rt.rm()); 786 mult(rs, rt.rm());
729 } else { 787 } else {
730 // li handles the relocation. 788 // li handles the relocation.
731 ASSERT(!rs.is(at)); 789 ASSERT(!rs.is(at));
732 li(at, rt); 790 li(at, rt);
733 mult(rs, at); 791 mult(rs, at);
734 } 792 }
735 } 793 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 ddiv(rs, rt.rm()); 846 ddiv(rs, rt.rm());
789 } else { 847 } else {
790 // li handles the relocation. 848 // li handles the relocation.
791 ASSERT(!rs.is(at)); 849 ASSERT(!rs.is(at));
792 li(at, rt); 850 li(at, rt);
793 ddiv(rs, at); 851 ddiv(rs, at);
794 } 852 }
795 } 853 }
796 854
797 855
856 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
857 if (kArchVariant != kMips64r6) {
858 if (rt.is_reg()) {
859 ddiv(rs, rt.rm());
860 mflo(rd);
861 } else {
862 // li handles the relocation.
863 ASSERT(!rs.is(at));
864 li(at, rt);
865 ddiv(rs, at);
866 mflo(rd);
867 }
868 } else {
869 if (rt.is_reg()) {
870 ddiv(rd, rs, rt.rm());
871 } else {
872 // li handles the relocation.
873 ASSERT(!rs.is(at));
874 li(at, rt);
875 ddiv(rd, rs, at);
876 }
877 }
878 }
879
880
798 void MacroAssembler::Divu(Register rs, const Operand& rt) { 881 void MacroAssembler::Divu(Register rs, const Operand& rt) {
799 if (rt.is_reg()) { 882 if (rt.is_reg()) {
800 divu(rs, rt.rm()); 883 divu(rs, rt.rm());
801 } else { 884 } else {
802 // li handles the relocation. 885 // li handles the relocation.
803 ASSERT(!rs.is(at)); 886 ASSERT(!rs.is(at));
804 li(at, rt); 887 li(at, rt);
805 divu(rs, at); 888 divu(rs, at);
806 } 889 }
807 } 890 }
808 891
809 892
810 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { 893 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
811 if (rt.is_reg()) { 894 if (rt.is_reg()) {
812 ddivu(rs, rt.rm()); 895 ddivu(rs, rt.rm());
813 } else { 896 } else {
814 // li handles the relocation. 897 // li handles the relocation.
815 ASSERT(!rs.is(at)); 898 ASSERT(!rs.is(at));
816 li(at, rt); 899 li(at, rt);
817 ddivu(rs, at); 900 ddivu(rs, at);
818 } 901 }
819 } 902 }
820 903
821 904
905 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
906 if (kArchVariant != kMips64r6) {
907 if (rt.is_reg()) {
908 ddiv(rs, rt.rm());
909 mfhi(rd);
910 } else {
911 // li handles the relocation.
912 ASSERT(!rs.is(at));
913 li(at, rt);
914 ddiv(rs, at);
915 mfhi(rd);
916 }
917 } else {
918 if (rt.is_reg()) {
919 dmod(rd, rs, rt.rm());
920 } else {
921 // li handles the relocation.
922 ASSERT(!rs.is(at));
923 li(at, rt);
924 dmod(rd, rs, at);
925 }
926 }
927 }
928
929
822 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 930 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
823 if (rt.is_reg()) { 931 if (rt.is_reg()) {
824 and_(rd, rs, rt.rm()); 932 and_(rd, rs, rt.rm());
825 } else { 933 } else {
826 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 934 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
827 andi(rd, rs, rt.imm64_); 935 andi(rd, rs, rt.imm64_);
828 } else { 936 } else {
829 // li handles the relocation. 937 // li handles the relocation.
830 ASSERT(!rs.is(at)); 938 ASSERT(!rs.is(at));
831 li(at, rt); 939 li(at, rt);
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after
1377 BranchDelaySlot bd) { 1485 BranchDelaySlot bd) {
1378 BlockTrampolinePoolScope block_trampoline_pool(this); 1486 BlockTrampolinePoolScope block_trampoline_pool(this);
1379 if (cc == al) { 1487 if (cc == al) {
1380 Branch(bd, target); 1488 Branch(bd, target);
1381 return; 1489 return;
1382 } 1490 }
1383 1491
1384 ASSERT(nan || target); 1492 ASSERT(nan || target);
1385 // Check for unordered (NaN) cases. 1493 // Check for unordered (NaN) cases.
1386 if (nan) { 1494 if (nan) {
1387 c(UN, D, cmp1, cmp2); 1495 if (kArchVariant != kMips64r6) {
1388 bc1t(nan); 1496 c(UN, D, cmp1, cmp2);
1389 } 1497 bc1t(nan);
1390 1498 } else {
1391 if (target) { 1499 // Use f31 for comparison result. It has to be unavailable to lithium
1392 // Here NaN cases were either handled by this function or are assumed to 1500 // register allocator.
1393 // have been handled by the caller. 1501 ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
1394 // Unsigned conditions are treated as their signed counterpart. 1502 cmp(UN, L, f31, cmp1, cmp2);
1395 switch (cc) { 1503 bc1nez(nan, f31);
1396 case lt:
1397 c(OLT, D, cmp1, cmp2);
1398 bc1t(target);
1399 break;
1400 case gt:
1401 c(ULE, D, cmp1, cmp2);
1402 bc1f(target);
1403 break;
1404 case ge:
1405 c(ULT, D, cmp1, cmp2);
1406 bc1f(target);
1407 break;
1408 case le:
1409 c(OLE, D, cmp1, cmp2);
1410 bc1t(target);
1411 break;
1412 case eq:
1413 c(EQ, D, cmp1, cmp2);
1414 bc1t(target);
1415 break;
1416 case ueq:
1417 c(UEQ, D, cmp1, cmp2);
1418 bc1t(target);
1419 break;
1420 case ne:
1421 c(EQ, D, cmp1, cmp2);
1422 bc1f(target);
1423 break;
1424 case nue:
1425 c(UEQ, D, cmp1, cmp2);
1426 bc1f(target);
1427 break;
1428 default:
1429 CHECK(0);
1430 } 1504 }
1431 } 1505 }
1432 1506
1507 if (kArchVariant != kMips64r6) {
1508 if (target) {
1509 // Here NaN cases were either handled by this function or are assumed to
1510 // have been handled by the caller.
1511 switch (cc) {
1512 case lt:
1513 c(OLT, D, cmp1, cmp2);
1514 bc1t(target);
1515 break;
1516 case gt:
1517 c(ULE, D, cmp1, cmp2);
1518 bc1f(target);
1519 break;
1520 case ge:
1521 c(ULT, D, cmp1, cmp2);
1522 bc1f(target);
1523 break;
1524 case le:
1525 c(OLE, D, cmp1, cmp2);
1526 bc1t(target);
1527 break;
1528 case eq:
1529 c(EQ, D, cmp1, cmp2);
1530 bc1t(target);
1531 break;
1532 case ueq:
1533 c(UEQ, D, cmp1, cmp2);
1534 bc1t(target);
1535 break;
1536 case ne:
1537 c(EQ, D, cmp1, cmp2);
1538 bc1f(target);
1539 break;
1540 case nue:
1541 c(UEQ, D, cmp1, cmp2);
1542 bc1f(target);
1543 break;
1544 default:
1545 CHECK(0);
1546 }
1547 }
1548 } else {
1549 if (target) {
1550 // Here NaN cases were either handled by this function or are assumed to
1551 // have been handled by the caller.
1552 // Unsigned conditions are treated as their signed counterpart.
1553 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1554 ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
1555 switch (cc) {
1556 case lt:
1557 cmp(OLT, L, f31, cmp1, cmp2);
1558 bc1nez(target, f31);
1559 break;
1560 case gt:
1561 cmp(ULE, L, f31, cmp1, cmp2);
1562 bc1eqz(target, f31);
1563 break;
1564 case ge:
1565 cmp(ULT, L, f31, cmp1, cmp2);
1566 bc1eqz(target, f31);
1567 break;
1568 case le:
1569 cmp(OLE, L, f31, cmp1, cmp2);
1570 bc1nez(target, f31);
1571 break;
1572 case eq:
1573 cmp(EQ, L, f31, cmp1, cmp2);
1574 bc1nez(target, f31);
1575 break;
1576 case ueq:
1577 cmp(UEQ, L, f31, cmp1, cmp2);
1578 bc1nez(target, f31);
1579 break;
1580 case ne:
1581 cmp(EQ, L, f31, cmp1, cmp2);
1582 bc1eqz(target, f31);
1583 break;
1584 case nue:
1585 cmp(UEQ, L, f31, cmp1, cmp2);
1586 bc1eqz(target, f31);
1587 break;
1588 default:
1589 CHECK(0);
1590 }
1591 }
1592 }
1593
1433 if (bd == PROTECT) { 1594 if (bd == PROTECT) {
1434 nop(); 1595 nop();
1435 } 1596 }
1436 } 1597 }
1437 1598
1438 1599
1439 void MacroAssembler::Move(FPURegister dst, double imm) { 1600 void MacroAssembler::Move(FPURegister dst, double imm) {
1440 static const DoubleRepresentation minus_zero(-0.0); 1601 static const DoubleRepresentation minus_zero(-0.0);
1441 static const DoubleRepresentation zero(0.0); 1602 static const DoubleRepresentation zero(0.0);
1442 DoubleRepresentation value_rep(imm); 1603 DoubleRepresentation value_rep(imm);
(...skipping 20 matching lines...) Expand all
1463 li(at, Operand(hi)); 1624 li(at, Operand(hi));
1464 mthc1(at, dst); 1625 mthc1(at, dst);
1465 } else { 1626 } else {
1466 mthc1(zero_reg, dst); 1627 mthc1(zero_reg, dst);
1467 } 1628 }
1468 } 1629 }
1469 } 1630 }
1470 1631
1471 1632
1472 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { 1633 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1473 if (kArchVariant == kLoongson) { 1634 if (kArchVariant == kLoongson || kArchVariant == kMips64r6) {
1474 Label done; 1635 Label done;
1475 Branch(&done, ne, rt, Operand(zero_reg)); 1636 Branch(&done, ne, rt, Operand(zero_reg));
1476 mov(rd, rs); 1637 mov(rd, rs);
1477 bind(&done); 1638 bind(&done);
1478 } else { 1639 } else {
1479 movz(rd, rs, rt); 1640 movz(rd, rs, rt);
1480 } 1641 }
1481 } 1642 }
1482 1643
1483 1644
1484 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { 1645 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1485 if (kArchVariant == kLoongson) { 1646 if (kArchVariant == kLoongson || kArchVariant == kMips64r6) {
1486 Label done; 1647 Label done;
1487 Branch(&done, eq, rt, Operand(zero_reg)); 1648 Branch(&done, eq, rt, Operand(zero_reg));
1488 mov(rd, rs); 1649 mov(rd, rs);
1489 bind(&done); 1650 bind(&done);
1490 } else { 1651 } else {
1491 movn(rd, rs, rt); 1652 movn(rd, rs, rt);
1492 } 1653 }
1493 } 1654 }
1494 1655
1495 1656
(...skipping 868 matching lines...) Expand 10 before | Expand all | Expand 10 after
2364 bal(offset); 2525 bal(offset);
2365 break; 2526 break;
2366 case ne: 2527 case ne:
2367 beq(rs, r2, 2); 2528 beq(rs, r2, 2);
2368 nop(); 2529 nop();
2369 bal(offset); 2530 bal(offset);
2370 break; 2531 break;
2371 2532
2372 // Signed comparison. 2533 // Signed comparison.
2373 case greater: 2534 case greater:
2535 // rs > rt
2374 slt(scratch, r2, rs); 2536 slt(scratch, r2, rs);
2375 daddiu(scratch, scratch, -1); 2537 beq(scratch, zero_reg, 2);
2376 bgezal(scratch, offset); 2538 nop();
2539 bal(offset);
2377 break; 2540 break;
2378 case greater_equal: 2541 case greater_equal:
2542 // rs >= rt
2379 slt(scratch, rs, r2); 2543 slt(scratch, rs, r2);
2380 daddiu(scratch, scratch, -1); 2544 bne(scratch, zero_reg, 2);
2381 bltzal(scratch, offset); 2545 nop();
2546 bal(offset);
2382 break; 2547 break;
2383 case less: 2548 case less:
2549 // rs < r2
2384 slt(scratch, rs, r2); 2550 slt(scratch, rs, r2);
2385 daddiu(scratch, scratch, -1); 2551 bne(scratch, zero_reg, 2);
2386 bgezal(scratch, offset); 2552 nop();
2553 bal(offset);
2387 break; 2554 break;
2388 case less_equal: 2555 case less_equal:
2556 // rs <= r2
2389 slt(scratch, r2, rs); 2557 slt(scratch, r2, rs);
2390 daddiu(scratch, scratch, -1); 2558 bne(scratch, zero_reg, 2);
2391 bltzal(scratch, offset); 2559 nop();
2560 bal(offset);
2392 break; 2561 break;
2393 2562
2563
2394 // Unsigned comparison. 2564 // Unsigned comparison.
2395 case Ugreater: 2565 case Ugreater:
2566 // rs > rt
2396 sltu(scratch, r2, rs); 2567 sltu(scratch, r2, rs);
2397 daddiu(scratch, scratch, -1); 2568 beq(scratch, zero_reg, 2);
2398 bgezal(scratch, offset); 2569 nop();
2570 bal(offset);
2399 break; 2571 break;
2400 case Ugreater_equal: 2572 case Ugreater_equal:
2573 // rs >= rt
2401 sltu(scratch, rs, r2); 2574 sltu(scratch, rs, r2);
2402 daddiu(scratch, scratch, -1); 2575 bne(scratch, zero_reg, 2);
2403 bltzal(scratch, offset); 2576 nop();
2577 bal(offset);
2404 break; 2578 break;
2405 case Uless: 2579 case Uless:
2580 // rs < r2
2406 sltu(scratch, rs, r2); 2581 sltu(scratch, rs, r2);
2407 daddiu(scratch, scratch, -1); 2582 bne(scratch, zero_reg, 2);
2408 bgezal(scratch, offset); 2583 nop();
2584 bal(offset);
2409 break; 2585 break;
2410 case Uless_equal: 2586 case Uless_equal:
2587 // rs <= r2
2411 sltu(scratch, r2, rs); 2588 sltu(scratch, r2, rs);
2412 daddiu(scratch, scratch, -1); 2589 bne(scratch, zero_reg, 2);
2413 bltzal(scratch, offset); 2590 nop();
2591 bal(offset);
2414 break; 2592 break;
2415
2416 default: 2593 default:
2417 UNREACHABLE(); 2594 UNREACHABLE();
2418 } 2595 }
2419 } 2596 }
2420 // Emit a nop in the branch delay slot if required. 2597 // Emit a nop in the branch delay slot if required.
2421 if (bdslot == PROTECT) 2598 if (bdslot == PROTECT)
2422 nop(); 2599 nop();
2423 } 2600 }
2424 2601
2425 2602
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2462 break; 2639 break;
2463 case ne: 2640 case ne:
2464 beq(rs, r2, 2); 2641 beq(rs, r2, 2);
2465 nop(); 2642 nop();
2466 offset = shifted_branch_offset(L, false); 2643 offset = shifted_branch_offset(L, false);
2467 bal(offset); 2644 bal(offset);
2468 break; 2645 break;
2469 2646
2470 // Signed comparison. 2647 // Signed comparison.
2471 case greater: 2648 case greater:
2649 // rs > rt
2472 slt(scratch, r2, rs); 2650 slt(scratch, r2, rs);
2473 daddiu(scratch, scratch, -1); 2651 beq(scratch, zero_reg, 2);
2652 nop();
2474 offset = shifted_branch_offset(L, false); 2653 offset = shifted_branch_offset(L, false);
2475 bgezal(scratch, offset); 2654 bal(offset);
2476 break; 2655 break;
2477 case greater_equal: 2656 case greater_equal:
2657 // rs >= rt
2478 slt(scratch, rs, r2); 2658 slt(scratch, rs, r2);
2479 daddiu(scratch, scratch, -1); 2659 bne(scratch, zero_reg, 2);
2660 nop();
2480 offset = shifted_branch_offset(L, false); 2661 offset = shifted_branch_offset(L, false);
2481 bltzal(scratch, offset); 2662 bal(offset);
2482 break; 2663 break;
2483 case less: 2664 case less:
2665 // rs < r2
2484 slt(scratch, rs, r2); 2666 slt(scratch, rs, r2);
2485 daddiu(scratch, scratch, -1); 2667 bne(scratch, zero_reg, 2);
2668 nop();
2486 offset = shifted_branch_offset(L, false); 2669 offset = shifted_branch_offset(L, false);
2487 bgezal(scratch, offset); 2670 bal(offset);
2488 break; 2671 break;
2489 case less_equal: 2672 case less_equal:
2673 // rs <= r2
2490 slt(scratch, r2, rs); 2674 slt(scratch, r2, rs);
2491 daddiu(scratch, scratch, -1); 2675 bne(scratch, zero_reg, 2);
2676 nop();
2492 offset = shifted_branch_offset(L, false); 2677 offset = shifted_branch_offset(L, false);
2493 bltzal(scratch, offset); 2678 bal(offset);
2494 break; 2679 break;
2495 2680
2681
2496 // Unsigned comparison. 2682 // Unsigned comparison.
2497 case Ugreater: 2683 case Ugreater:
2684 // rs > rt
2498 sltu(scratch, r2, rs); 2685 sltu(scratch, r2, rs);
2499 daddiu(scratch, scratch, -1); 2686 beq(scratch, zero_reg, 2);
2687 nop();
2500 offset = shifted_branch_offset(L, false); 2688 offset = shifted_branch_offset(L, false);
2501 bgezal(scratch, offset); 2689 bal(offset);
2502 break; 2690 break;
2503 case Ugreater_equal: 2691 case Ugreater_equal:
2692 // rs >= rt
2504 sltu(scratch, rs, r2); 2693 sltu(scratch, rs, r2);
2505 daddiu(scratch, scratch, -1); 2694 bne(scratch, zero_reg, 2);
2695 nop();
2506 offset = shifted_branch_offset(L, false); 2696 offset = shifted_branch_offset(L, false);
2507 bltzal(scratch, offset); 2697 bal(offset);
2508 break; 2698 break;
2509 case Uless: 2699 case Uless:
2700 // rs < r2
2510 sltu(scratch, rs, r2); 2701 sltu(scratch, rs, r2);
2511 daddiu(scratch, scratch, -1); 2702 bne(scratch, zero_reg, 2);
2703 nop();
2512 offset = shifted_branch_offset(L, false); 2704 offset = shifted_branch_offset(L, false);
2513 bgezal(scratch, offset); 2705 bal(offset);
2514 break; 2706 break;
2515 case Uless_equal: 2707 case Uless_equal:
2708 // rs <= r2
2516 sltu(scratch, r2, rs); 2709 sltu(scratch, r2, rs);
2517 daddiu(scratch, scratch, -1); 2710 bne(scratch, zero_reg, 2);
2711 nop();
2518 offset = shifted_branch_offset(L, false); 2712 offset = shifted_branch_offset(L, false);
2519 bltzal(scratch, offset); 2713 bal(offset);
2520 break; 2714 break;
2521 2715
2522 default: 2716 default:
2523 UNREACHABLE(); 2717 UNREACHABLE();
2524 } 2718 }
2525 } 2719 }
2526 // Check that offset could actually hold on an int16_t. 2720 // Check that offset could actually hold on an int16_t.
2527 ASSERT(is_int16(offset)); 2721 ASSERT(is_int16(offset));
2528 2722
2529 // Emit a nop in the branch delay slot if required. 2723 // Emit a nop in the branch delay slot if required.
(...skipping 2918 matching lines...) Expand 10 before | Expand all | Expand 10 after
5448 dsra(value, value, kImm16Bits); 5642 dsra(value, value, kImm16Bits);
5449 } 5643 }
5450 5644
5451 5645
5452 void MacroAssembler::CheckPageFlag( 5646 void MacroAssembler::CheckPageFlag(
5453 Register object, 5647 Register object,
5454 Register scratch, 5648 Register scratch,
5455 int mask, 5649 int mask,
5456 Condition cc, 5650 Condition cc,
5457 Label* condition_met) { 5651 Label* condition_met) {
5458 // TODO(plind): Fix li() so we can use constant embedded inside And(). 5652 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5459 // And(scratch, object, Operand(~Page::kPageAlignmentMask));
5460 li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK
5461 And(scratch, object, at);
5462 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); 5653 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5463 And(scratch, scratch, Operand(mask)); 5654 And(scratch, scratch, Operand(mask));
5464 Branch(condition_met, cc, scratch, Operand(zero_reg)); 5655 Branch(condition_met, cc, scratch, Operand(zero_reg));
5465 } 5656 }
5466 5657
5467 5658
5468 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, 5659 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5469 Register scratch, 5660 Register scratch,
5470 Label* if_deprecated) { 5661 Label* if_deprecated) {
5471 if (map->CanBeDeprecated()) { 5662 if (map->CanBeDeprecated()) {
(...skipping 453 matching lines...) Expand 10 before | Expand all | Expand 10 after
5925 6116
5926 6117
5927 void MacroAssembler::TruncatingDiv(Register result, 6118 void MacroAssembler::TruncatingDiv(Register result,
5928 Register dividend, 6119 Register dividend,
5929 int32_t divisor) { 6120 int32_t divisor) {
5930 ASSERT(!dividend.is(result)); 6121 ASSERT(!dividend.is(result));
5931 ASSERT(!dividend.is(at)); 6122 ASSERT(!dividend.is(at));
5932 ASSERT(!result.is(at)); 6123 ASSERT(!result.is(at));
5933 MultiplierAndShift ms(divisor); 6124 MultiplierAndShift ms(divisor);
5934 li(at, Operand(ms.multiplier())); 6125 li(at, Operand(ms.multiplier()));
5935 Mult(dividend, Operand(at)); 6126 Mulh(result, dividend, Operand(at));
5936 mfhi(result);
5937 if (divisor > 0 && ms.multiplier() < 0) { 6127 if (divisor > 0 && ms.multiplier() < 0) {
5938 Addu(result, result, Operand(dividend)); 6128 Addu(result, result, Operand(dividend));
5939 } 6129 }
5940 if (divisor < 0 && ms.multiplier() > 0) { 6130 if (divisor < 0 && ms.multiplier() > 0) {
5941 Subu(result, result, Operand(dividend)); 6131 Subu(result, result, Operand(dividend));
5942 } 6132 }
5943 if (ms.shift() > 0) sra(result, result, ms.shift()); 6133 if (ms.shift() > 0) sra(result, result, ms.shift());
5944 srl(at, dividend, 31); 6134 srl(at, dividend, 31);
5945 Addu(result, result, Operand(at)); 6135 Addu(result, result, Operand(at));
5946 } 6136 }
5947 6137
5948 6138
5949 } } // namespace v8::internal 6139 } } // namespace v8::internal
5950 6140
5951 #endif // V8_TARGET_ARCH_MIPS64 6141 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698