| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #if V8_TARGET_ARCH_MIPS64 | 9 #if V8_TARGET_ARCH_MIPS64 |
| 10 | 10 |
| (...skipping 657 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 668 ASSERT(!rs.is(at)); | 668 ASSERT(!rs.is(at)); |
| 669 li(at, rt); | 669 li(at, rt); |
| 670 dsubu(rd, rs, at); | 670 dsubu(rd, rs, at); |
| 671 } | 671 } |
| 672 } | 672 } |
| 673 } | 673 } |
| 674 | 674 |
| 675 | 675 |
| 676 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { | 676 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { |
| 677 if (rt.is_reg()) { | 677 if (rt.is_reg()) { |
| 678 if (kArchVariant == kLoongson) { | 678 mul(rd, rs, rt.rm()); |
| 679 } else { |
| 680 // li handles the relocation. |
| 681 ASSERT(!rs.is(at)); |
| 682 li(at, rt); |
| 683 mul(rd, rs, at); |
| 684 } |
| 685 } |
| 686 |
| 687 |
| 688 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { |
| 689 if (rt.is_reg()) { |
| 690 if (kArchVariant != kMips64r6) { |
| 679 mult(rs, rt.rm()); | 691 mult(rs, rt.rm()); |
| 680 mflo(rd); | 692 mfhi(rd); |
| 681 } else { | 693 } else { |
| 682 mul(rd, rs, rt.rm()); | 694 muh(rd, rs, rt.rm()); |
| 683 } | 695 } |
| 684 } else { | 696 } else { |
| 685 // li handles the relocation. | 697 // li handles the relocation. |
| 686 ASSERT(!rs.is(at)); | 698 ASSERT(!rs.is(at)); |
| 687 li(at, rt); | 699 li(at, rt); |
| 688 if (kArchVariant == kLoongson) { | 700 if (kArchVariant != kMips64r6) { |
| 689 mult(rs, at); | 701 mult(rs, at); |
| 690 mflo(rd); | 702 mfhi(rd); |
| 691 } else { | 703 } else { |
| 692 mul(rd, rs, at); | 704 muh(rd, rs, at); |
| 693 } | 705 } |
| 694 } | 706 } |
| 695 } | 707 } |
| 696 | 708 |
| 697 | 709 |
| 698 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { | 710 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { |
| 699 if (rt.is_reg()) { | 711 if (rt.is_reg()) { |
| 700 if (kArchVariant == kLoongson) { | 712 if (kArchVariant == kMips64r6) { |
| 701 dmult(rs, rt.rm()); | 713 dmul(rd, rs, rt.rm()); |
| 702 mflo(rd); | |
| 703 } else { | 714 } else { |
| 704 // TODO(yuyin): | |
| 705 // dmul(rd, rs, rt.rm()); | |
| 706 dmult(rs, rt.rm()); | 715 dmult(rs, rt.rm()); |
| 707 mflo(rd); | 716 mflo(rd); |
| 708 } | 717 } |
| 709 } else { | 718 } else { |
| 710 // li handles the relocation. | 719 // li handles the relocation. |
| 711 ASSERT(!rs.is(at)); | 720 ASSERT(!rs.is(at)); |
| 712 li(at, rt); | 721 li(at, rt); |
| 713 if (kArchVariant == kLoongson) { | 722 if (kArchVariant == kMips64r6) { |
| 714 dmult(rs, at); | 723 dmul(rd, rs, at); |
| 715 mflo(rd); | |
| 716 } else { | 724 } else { |
| 717 // TODO(yuyin): | |
| 718 // dmul(rd, rs, at); | |
| 719 dmult(rs, at); | 725 dmult(rs, at); |
| 720 mflo(rd); | 726 mflo(rd); |
| 721 } | 727 } |
| 722 } | 728 } |
| 723 } | 729 } |
| 730 |
| 731 |
| 732 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { |
| 733 if (rt.is_reg()) { |
| 734 if (kArchVariant == kMips64r6) { |
| 735 dmuh(rd, rs, rt.rm()); |
| 736 } else { |
| 737 dmult(rs, rt.rm()); |
| 738 mfhi(rd); |
| 739 } |
| 740 } else { |
| 741 // li handles the relocation. |
| 742 ASSERT(!rs.is(at)); |
| 743 li(at, rt); |
| 744 if (kArchVariant == kMips64r6) { |
| 745 dmuh(rd, rs, at); |
| 746 } else { |
| 747 dmult(rs, at); |
| 748 mfhi(rd); |
| 749 } |
| 750 } |
| 751 } |
| 724 | 752 |
| 725 | 753 |
| 726 void MacroAssembler::Mult(Register rs, const Operand& rt) { | 754 void MacroAssembler::Mult(Register rs, const Operand& rt) { |
| 727 if (rt.is_reg()) { | 755 if (rt.is_reg()) { |
| 728 mult(rs, rt.rm()); | 756 mult(rs, rt.rm()); |
| 729 } else { | 757 } else { |
| 730 // li handles the relocation. | 758 // li handles the relocation. |
| 731 ASSERT(!rs.is(at)); | 759 ASSERT(!rs.is(at)); |
| 732 li(at, rt); | 760 li(at, rt); |
| 733 mult(rs, at); | 761 mult(rs, at); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 788 ddiv(rs, rt.rm()); | 816 ddiv(rs, rt.rm()); |
| 789 } else { | 817 } else { |
| 790 // li handles the relocation. | 818 // li handles the relocation. |
| 791 ASSERT(!rs.is(at)); | 819 ASSERT(!rs.is(at)); |
| 792 li(at, rt); | 820 li(at, rt); |
| 793 ddiv(rs, at); | 821 ddiv(rs, at); |
| 794 } | 822 } |
| 795 } | 823 } |
| 796 | 824 |
| 797 | 825 |
| 826 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { |
| 827 if (kArchVariant != kMips64r6) { |
| 828 if (rt.is_reg()) { |
| 829 ddiv(rs, rt.rm()); |
| 830 mflo(rd); |
| 831 } else { |
| 832 // li handles the relocation. |
| 833 ASSERT(!rs.is(at)); |
| 834 li(at, rt); |
| 835 ddiv(rs, at); |
| 836 mflo(rd); |
| 837 } |
| 838 } else { |
| 839 if (rt.is_reg()) { |
| 840 ddiv(rd, rs, rt.rm()); |
| 841 } else { |
| 842 // li handles the relocation. |
| 843 ASSERT(!rs.is(at)); |
| 844 li(at, rt); |
| 845 ddiv(rd, rs, at); |
| 846 } |
| 847 } |
| 848 } |
| 849 |
| 850 |
| 798 void MacroAssembler::Divu(Register rs, const Operand& rt) { | 851 void MacroAssembler::Divu(Register rs, const Operand& rt) { |
| 799 if (rt.is_reg()) { | 852 if (rt.is_reg()) { |
| 800 divu(rs, rt.rm()); | 853 divu(rs, rt.rm()); |
| 801 } else { | 854 } else { |
| 802 // li handles the relocation. | 855 // li handles the relocation. |
| 803 ASSERT(!rs.is(at)); | 856 ASSERT(!rs.is(at)); |
| 804 li(at, rt); | 857 li(at, rt); |
| 805 divu(rs, at); | 858 divu(rs, at); |
| 806 } | 859 } |
| 807 } | 860 } |
| 808 | 861 |
| 809 | 862 |
| 810 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { | 863 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { |
| 811 if (rt.is_reg()) { | 864 if (rt.is_reg()) { |
| 812 ddivu(rs, rt.rm()); | 865 ddivu(rs, rt.rm()); |
| 813 } else { | 866 } else { |
| 814 // li handles the relocation. | 867 // li handles the relocation. |
| 815 ASSERT(!rs.is(at)); | 868 ASSERT(!rs.is(at)); |
| 816 li(at, rt); | 869 li(at, rt); |
| 817 ddivu(rs, at); | 870 ddivu(rs, at); |
| 818 } | 871 } |
| 819 } | 872 } |
| 820 | 873 |
| 821 | 874 |
| 875 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { |
| 876 if (kArchVariant != kMips64r6) { |
| 877 if (rt.is_reg()) { |
| 878 ddiv(rs, rt.rm()); |
| 879 mfhi(rd); |
| 880 } else { |
| 881 // li handles the relocation. |
| 882 ASSERT(!rs.is(at)); |
| 883 li(at, rt); |
| 884 ddiv(rs, at); |
| 885 mfhi(rd); |
| 886 } |
| 887 } else { |
| 888 if (rt.is_reg()) { |
| 889 dmod(rd, rs, rt.rm()); |
| 890 } else { |
| 891 // li handles the relocation. |
| 892 ASSERT(!rs.is(at)); |
| 893 li(at, rt); |
| 894 dmod(rd, rs, at); |
| 895 } |
| 896 } |
| 897 } |
| 898 |
| 899 |
| 822 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { | 900 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { |
| 823 if (rt.is_reg()) { | 901 if (rt.is_reg()) { |
| 824 and_(rd, rs, rt.rm()); | 902 and_(rd, rs, rt.rm()); |
| 825 } else { | 903 } else { |
| 826 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { | 904 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
| 827 andi(rd, rs, rt.imm64_); | 905 andi(rd, rs, rt.imm64_); |
| 828 } else { | 906 } else { |
| 829 // li handles the relocation. | 907 // li handles the relocation. |
| 830 ASSERT(!rs.is(at)); | 908 ASSERT(!rs.is(at)); |
| 831 li(at, rt); | 909 li(at, rt); |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 949 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { | 1027 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { |
| 950 if (rt.is_reg()) { | 1028 if (rt.is_reg()) { |
| 951 drotrv(rd, rs, rt.rm()); | 1029 drotrv(rd, rs, rt.rm()); |
| 952 } else { | 1030 } else { |
| 953 drotr(rd, rs, rt.imm64_); | 1031 drotr(rd, rs, rt.imm64_); |
| 954 } | 1032 } |
| 955 } | 1033 } |
| 956 | 1034 |
| 957 | 1035 |
| 958 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { | 1036 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { |
| 959 if (kArchVariant == kLoongson) { | |
| 960 lw(zero_reg, rs); | |
| 961 } else { | |
| 962 pref(hint, rs); | 1037 pref(hint, rs); |
| 963 } | |
| 964 } | 1038 } |
| 965 | 1039 |
| 966 | 1040 |
| 967 // ------------Pseudo-instructions------------- | 1041 // ------------Pseudo-instructions------------- |
| 968 | 1042 |
| 969 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 1043 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
| 970 lwr(rd, rs); | 1044 lwr(rd, rs); |
| 971 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); | 1045 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); |
| 972 } | 1046 } |
| 973 | 1047 |
| (...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1377 BranchDelaySlot bd) { | 1451 BranchDelaySlot bd) { |
| 1378 BlockTrampolinePoolScope block_trampoline_pool(this); | 1452 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1379 if (cc == al) { | 1453 if (cc == al) { |
| 1380 Branch(bd, target); | 1454 Branch(bd, target); |
| 1381 return; | 1455 return; |
| 1382 } | 1456 } |
| 1383 | 1457 |
| 1384 ASSERT(nan || target); | 1458 ASSERT(nan || target); |
| 1385 // Check for unordered (NaN) cases. | 1459 // Check for unordered (NaN) cases. |
| 1386 if (nan) { | 1460 if (nan) { |
| 1387 c(UN, D, cmp1, cmp2); | 1461 if (kArchVariant != kMips64r6) { |
| 1388 bc1t(nan); | 1462 c(UN, D, cmp1, cmp2); |
| 1389 } | 1463 bc1t(nan); |
| 1390 | 1464 } else { |
| 1391 if (target) { | 1465 // Use f31 for comparison result. It has to be unavailable to lithium |
| 1392 // Here NaN cases were either handled by this function or are assumed to | 1466 // register allocator. |
| 1393 // have been handled by the caller. | 1467 ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); |
| 1394 // Unsigned conditions are treated as their signed counterpart. | 1468 cmp(UN, L, f31, cmp1, cmp2); |
| 1395 switch (cc) { | 1469 bc1nez(nan, f31); |
| 1396 case lt: | |
| 1397 c(OLT, D, cmp1, cmp2); | |
| 1398 bc1t(target); | |
| 1399 break; | |
| 1400 case gt: | |
| 1401 c(ULE, D, cmp1, cmp2); | |
| 1402 bc1f(target); | |
| 1403 break; | |
| 1404 case ge: | |
| 1405 c(ULT, D, cmp1, cmp2); | |
| 1406 bc1f(target); | |
| 1407 break; | |
| 1408 case le: | |
| 1409 c(OLE, D, cmp1, cmp2); | |
| 1410 bc1t(target); | |
| 1411 break; | |
| 1412 case eq: | |
| 1413 c(EQ, D, cmp1, cmp2); | |
| 1414 bc1t(target); | |
| 1415 break; | |
| 1416 case ueq: | |
| 1417 c(UEQ, D, cmp1, cmp2); | |
| 1418 bc1t(target); | |
| 1419 break; | |
| 1420 case ne: | |
| 1421 c(EQ, D, cmp1, cmp2); | |
| 1422 bc1f(target); | |
| 1423 break; | |
| 1424 case nue: | |
| 1425 c(UEQ, D, cmp1, cmp2); | |
| 1426 bc1f(target); | |
| 1427 break; | |
| 1428 default: | |
| 1429 CHECK(0); | |
| 1430 } | 1470 } |
| 1431 } | 1471 } |
| 1432 | 1472 |
| 1473 if (kArchVariant != kMips64r6) { |
| 1474 if (target) { |
| 1475 // Here NaN cases were either handled by this function or are assumed to |
| 1476 // have been handled by the caller. |
| 1477 switch (cc) { |
| 1478 case lt: |
| 1479 c(OLT, D, cmp1, cmp2); |
| 1480 bc1t(target); |
| 1481 break; |
| 1482 case gt: |
| 1483 c(ULE, D, cmp1, cmp2); |
| 1484 bc1f(target); |
| 1485 break; |
| 1486 case ge: |
| 1487 c(ULT, D, cmp1, cmp2); |
| 1488 bc1f(target); |
| 1489 break; |
| 1490 case le: |
| 1491 c(OLE, D, cmp1, cmp2); |
| 1492 bc1t(target); |
| 1493 break; |
| 1494 case eq: |
| 1495 c(EQ, D, cmp1, cmp2); |
| 1496 bc1t(target); |
| 1497 break; |
| 1498 case ueq: |
| 1499 c(UEQ, D, cmp1, cmp2); |
| 1500 bc1t(target); |
| 1501 break; |
| 1502 case ne: |
| 1503 c(EQ, D, cmp1, cmp2); |
| 1504 bc1f(target); |
| 1505 break; |
| 1506 case nue: |
| 1507 c(UEQ, D, cmp1, cmp2); |
| 1508 bc1f(target); |
| 1509 break; |
| 1510 default: |
| 1511 CHECK(0); |
| 1512 } |
| 1513 } |
| 1514 } else { |
| 1515 if (target) { |
| 1516 // Here NaN cases were either handled by this function or are assumed to |
| 1517 // have been handled by the caller. |
| 1518 // Unsigned conditions are treated as their signed counterpart. |
| 1519 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. |
| 1520 ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); |
| 1521 switch (cc) { |
| 1522 case lt: |
| 1523 cmp(OLT, L, f31, cmp1, cmp2); |
| 1524 bc1nez(target, f31); |
| 1525 break; |
| 1526 case gt: |
| 1527 cmp(ULE, L, f31, cmp1, cmp2); |
| 1528 bc1eqz(target, f31); |
| 1529 break; |
| 1530 case ge: |
| 1531 cmp(ULT, L, f31, cmp1, cmp2); |
| 1532 bc1eqz(target, f31); |
| 1533 break; |
| 1534 case le: |
| 1535 cmp(OLE, L, f31, cmp1, cmp2); |
| 1536 bc1nez(target, f31); |
| 1537 break; |
| 1538 case eq: |
| 1539 cmp(EQ, L, f31, cmp1, cmp2); |
| 1540 bc1nez(target, f31); |
| 1541 break; |
| 1542 case ueq: |
| 1543 cmp(UEQ, L, f31, cmp1, cmp2); |
| 1544 bc1nez(target, f31); |
| 1545 break; |
| 1546 case ne: |
| 1547 cmp(EQ, L, f31, cmp1, cmp2); |
| 1548 bc1eqz(target, f31); |
| 1549 break; |
| 1550 case nue: |
| 1551 cmp(UEQ, L, f31, cmp1, cmp2); |
| 1552 bc1eqz(target, f31); |
| 1553 break; |
| 1554 default: |
| 1555 CHECK(0); |
| 1556 } |
| 1557 } |
| 1558 } |
| 1559 |
| 1433 if (bd == PROTECT) { | 1560 if (bd == PROTECT) { |
| 1434 nop(); | 1561 nop(); |
| 1435 } | 1562 } |
| 1436 } | 1563 } |
| 1437 | 1564 |
| 1438 | 1565 |
| 1439 void MacroAssembler::Move(FPURegister dst, double imm) { | 1566 void MacroAssembler::Move(FPURegister dst, double imm) { |
| 1440 static const DoubleRepresentation minus_zero(-0.0); | 1567 static const DoubleRepresentation minus_zero(-0.0); |
| 1441 static const DoubleRepresentation zero(0.0); | 1568 static const DoubleRepresentation zero(0.0); |
| 1442 DoubleRepresentation value_rep(imm); | 1569 DoubleRepresentation value_rep(imm); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1463 li(at, Operand(hi)); | 1590 li(at, Operand(hi)); |
| 1464 mthc1(at, dst); | 1591 mthc1(at, dst); |
| 1465 } else { | 1592 } else { |
| 1466 mthc1(zero_reg, dst); | 1593 mthc1(zero_reg, dst); |
| 1467 } | 1594 } |
| 1468 } | 1595 } |
| 1469 } | 1596 } |
| 1470 | 1597 |
| 1471 | 1598 |
| 1472 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { | 1599 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
| 1473 if (kArchVariant == kLoongson) { | 1600 if (kArchVariant == kMips64r6) { |
| 1474 Label done; | 1601 Label done; |
| 1475 Branch(&done, ne, rt, Operand(zero_reg)); | 1602 Branch(&done, ne, rt, Operand(zero_reg)); |
| 1476 mov(rd, rs); | 1603 mov(rd, rs); |
| 1477 bind(&done); | 1604 bind(&done); |
| 1478 } else { | 1605 } else { |
| 1479 movz(rd, rs, rt); | 1606 movz(rd, rs, rt); |
| 1480 } | 1607 } |
| 1481 } | 1608 } |
| 1482 | 1609 |
| 1483 | 1610 |
| 1484 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { | 1611 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { |
| 1485 if (kArchVariant == kLoongson) { | 1612 if (kArchVariant == kMips64r6) { |
| 1486 Label done; | 1613 Label done; |
| 1487 Branch(&done, eq, rt, Operand(zero_reg)); | 1614 Branch(&done, eq, rt, Operand(zero_reg)); |
| 1488 mov(rd, rs); | 1615 mov(rd, rs); |
| 1489 bind(&done); | 1616 bind(&done); |
| 1490 } else { | 1617 } else { |
| 1491 movn(rd, rs, rt); | 1618 movn(rd, rs, rt); |
| 1492 } | 1619 } |
| 1493 } | 1620 } |
| 1494 | 1621 |
| 1495 | 1622 |
| (...skipping 868 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2364 bal(offset); | 2491 bal(offset); |
| 2365 break; | 2492 break; |
| 2366 case ne: | 2493 case ne: |
| 2367 beq(rs, r2, 2); | 2494 beq(rs, r2, 2); |
| 2368 nop(); | 2495 nop(); |
| 2369 bal(offset); | 2496 bal(offset); |
| 2370 break; | 2497 break; |
| 2371 | 2498 |
| 2372 // Signed comparison. | 2499 // Signed comparison. |
| 2373 case greater: | 2500 case greater: |
| 2501 // rs > rt |
| 2374 slt(scratch, r2, rs); | 2502 slt(scratch, r2, rs); |
| 2375 daddiu(scratch, scratch, -1); | 2503 beq(scratch, zero_reg, 2); |
| 2376 bgezal(scratch, offset); | 2504 nop(); |
| 2505 bal(offset); |
| 2377 break; | 2506 break; |
| 2378 case greater_equal: | 2507 case greater_equal: |
| 2508 // rs >= rt |
| 2379 slt(scratch, rs, r2); | 2509 slt(scratch, rs, r2); |
| 2380 daddiu(scratch, scratch, -1); | 2510 bne(scratch, zero_reg, 2); |
| 2381 bltzal(scratch, offset); | 2511 nop(); |
| 2512 bal(offset); |
| 2382 break; | 2513 break; |
| 2383 case less: | 2514 case less: |
| 2515 // rs < r2 |
| 2384 slt(scratch, rs, r2); | 2516 slt(scratch, rs, r2); |
| 2385 daddiu(scratch, scratch, -1); | 2517 bne(scratch, zero_reg, 2); |
| 2386 bgezal(scratch, offset); | 2518 nop(); |
| 2519 bal(offset); |
| 2387 break; | 2520 break; |
| 2388 case less_equal: | 2521 case less_equal: |
| 2522 // rs <= r2 |
| 2389 slt(scratch, r2, rs); | 2523 slt(scratch, r2, rs); |
| 2390 daddiu(scratch, scratch, -1); | 2524 bne(scratch, zero_reg, 2); |
| 2391 bltzal(scratch, offset); | 2525 nop(); |
| 2526 bal(offset); |
| 2392 break; | 2527 break; |
| 2393 | 2528 |
| 2529 |
| 2394 // Unsigned comparison. | 2530 // Unsigned comparison. |
| 2395 case Ugreater: | 2531 case Ugreater: |
| 2532 // rs > rt |
| 2396 sltu(scratch, r2, rs); | 2533 sltu(scratch, r2, rs); |
| 2397 daddiu(scratch, scratch, -1); | 2534 beq(scratch, zero_reg, 2); |
| 2398 bgezal(scratch, offset); | 2535 nop(); |
| 2536 bal(offset); |
| 2399 break; | 2537 break; |
| 2400 case Ugreater_equal: | 2538 case Ugreater_equal: |
| 2539 // rs >= rt |
| 2401 sltu(scratch, rs, r2); | 2540 sltu(scratch, rs, r2); |
| 2402 daddiu(scratch, scratch, -1); | 2541 bne(scratch, zero_reg, 2); |
| 2403 bltzal(scratch, offset); | 2542 nop(); |
| 2543 bal(offset); |
| 2404 break; | 2544 break; |
| 2405 case Uless: | 2545 case Uless: |
| 2546 // rs < r2 |
| 2406 sltu(scratch, rs, r2); | 2547 sltu(scratch, rs, r2); |
| 2407 daddiu(scratch, scratch, -1); | 2548 bne(scratch, zero_reg, 2); |
| 2408 bgezal(scratch, offset); | 2549 nop(); |
| 2550 bal(offset); |
| 2409 break; | 2551 break; |
| 2410 case Uless_equal: | 2552 case Uless_equal: |
| 2553 // rs <= r2 |
| 2411 sltu(scratch, r2, rs); | 2554 sltu(scratch, r2, rs); |
| 2412 daddiu(scratch, scratch, -1); | 2555 bne(scratch, zero_reg, 2); |
| 2413 bltzal(scratch, offset); | 2556 nop(); |
| 2557 bal(offset); |
| 2414 break; | 2558 break; |
| 2415 | |
| 2416 default: | 2559 default: |
| 2417 UNREACHABLE(); | 2560 UNREACHABLE(); |
| 2418 } | 2561 } |
| 2419 } | 2562 } |
| 2420 // Emit a nop in the branch delay slot if required. | 2563 // Emit a nop in the branch delay slot if required. |
| 2421 if (bdslot == PROTECT) | 2564 if (bdslot == PROTECT) |
| 2422 nop(); | 2565 nop(); |
| 2423 } | 2566 } |
| 2424 | 2567 |
| 2425 | 2568 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2462 break; | 2605 break; |
| 2463 case ne: | 2606 case ne: |
| 2464 beq(rs, r2, 2); | 2607 beq(rs, r2, 2); |
| 2465 nop(); | 2608 nop(); |
| 2466 offset = shifted_branch_offset(L, false); | 2609 offset = shifted_branch_offset(L, false); |
| 2467 bal(offset); | 2610 bal(offset); |
| 2468 break; | 2611 break; |
| 2469 | 2612 |
| 2470 // Signed comparison. | 2613 // Signed comparison. |
| 2471 case greater: | 2614 case greater: |
| 2615 // rs > rt |
| 2472 slt(scratch, r2, rs); | 2616 slt(scratch, r2, rs); |
| 2473 daddiu(scratch, scratch, -1); | 2617 beq(scratch, zero_reg, 2); |
| 2618 nop(); |
| 2474 offset = shifted_branch_offset(L, false); | 2619 offset = shifted_branch_offset(L, false); |
| 2475 bgezal(scratch, offset); | 2620 bal(offset); |
| 2476 break; | 2621 break; |
| 2477 case greater_equal: | 2622 case greater_equal: |
| 2623 // rs >= rt |
| 2478 slt(scratch, rs, r2); | 2624 slt(scratch, rs, r2); |
| 2479 daddiu(scratch, scratch, -1); | 2625 bne(scratch, zero_reg, 2); |
| 2626 nop(); |
| 2480 offset = shifted_branch_offset(L, false); | 2627 offset = shifted_branch_offset(L, false); |
| 2481 bltzal(scratch, offset); | 2628 bal(offset); |
| 2482 break; | 2629 break; |
| 2483 case less: | 2630 case less: |
| 2631 // rs < r2 |
| 2484 slt(scratch, rs, r2); | 2632 slt(scratch, rs, r2); |
| 2485 daddiu(scratch, scratch, -1); | 2633 bne(scratch, zero_reg, 2); |
| 2634 nop(); |
| 2486 offset = shifted_branch_offset(L, false); | 2635 offset = shifted_branch_offset(L, false); |
| 2487 bgezal(scratch, offset); | 2636 bal(offset); |
| 2488 break; | 2637 break; |
| 2489 case less_equal: | 2638 case less_equal: |
| 2639 // rs <= r2 |
| 2490 slt(scratch, r2, rs); | 2640 slt(scratch, r2, rs); |
| 2491 daddiu(scratch, scratch, -1); | 2641 bne(scratch, zero_reg, 2); |
| 2642 nop(); |
| 2492 offset = shifted_branch_offset(L, false); | 2643 offset = shifted_branch_offset(L, false); |
| 2493 bltzal(scratch, offset); | 2644 bal(offset); |
| 2494 break; | 2645 break; |
| 2495 | 2646 |
| 2647 |
| 2496 // Unsigned comparison. | 2648 // Unsigned comparison. |
| 2497 case Ugreater: | 2649 case Ugreater: |
| 2650 // rs > rt |
| 2498 sltu(scratch, r2, rs); | 2651 sltu(scratch, r2, rs); |
| 2499 daddiu(scratch, scratch, -1); | 2652 beq(scratch, zero_reg, 2); |
| 2653 nop(); |
| 2500 offset = shifted_branch_offset(L, false); | 2654 offset = shifted_branch_offset(L, false); |
| 2501 bgezal(scratch, offset); | 2655 bal(offset); |
| 2502 break; | 2656 break; |
| 2503 case Ugreater_equal: | 2657 case Ugreater_equal: |
| 2658 // rs >= rt |
| 2504 sltu(scratch, rs, r2); | 2659 sltu(scratch, rs, r2); |
| 2505 daddiu(scratch, scratch, -1); | 2660 bne(scratch, zero_reg, 2); |
| 2661 nop(); |
| 2506 offset = shifted_branch_offset(L, false); | 2662 offset = shifted_branch_offset(L, false); |
| 2507 bltzal(scratch, offset); | 2663 bal(offset); |
| 2508 break; | 2664 break; |
| 2509 case Uless: | 2665 case Uless: |
| 2666 // rs < r2 |
| 2510 sltu(scratch, rs, r2); | 2667 sltu(scratch, rs, r2); |
| 2511 daddiu(scratch, scratch, -1); | 2668 bne(scratch, zero_reg, 2); |
| 2669 nop(); |
| 2512 offset = shifted_branch_offset(L, false); | 2670 offset = shifted_branch_offset(L, false); |
| 2513 bgezal(scratch, offset); | 2671 bal(offset); |
| 2514 break; | 2672 break; |
| 2515 case Uless_equal: | 2673 case Uless_equal: |
| 2674 // rs <= r2 |
| 2516 sltu(scratch, r2, rs); | 2675 sltu(scratch, r2, rs); |
| 2517 daddiu(scratch, scratch, -1); | 2676 bne(scratch, zero_reg, 2); |
| 2677 nop(); |
| 2518 offset = shifted_branch_offset(L, false); | 2678 offset = shifted_branch_offset(L, false); |
| 2519 bltzal(scratch, offset); | 2679 bal(offset); |
| 2520 break; | 2680 break; |
| 2521 | 2681 |
| 2522 default: | 2682 default: |
| 2523 UNREACHABLE(); | 2683 UNREACHABLE(); |
| 2524 } | 2684 } |
| 2525 } | 2685 } |
| 2526 // Check that offset could actually hold on an int16_t. | 2686 // Check that offset could actually hold on an int16_t. |
| 2527 ASSERT(is_int16(offset)); | 2687 ASSERT(is_int16(offset)); |
| 2528 | 2688 |
| 2529 // Emit a nop in the branch delay slot if required. | 2689 // Emit a nop in the branch delay slot if required. |
| (...skipping 2918 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5448 dsra(value, value, kImm16Bits); | 5608 dsra(value, value, kImm16Bits); |
| 5449 } | 5609 } |
| 5450 | 5610 |
| 5451 | 5611 |
| 5452 void MacroAssembler::CheckPageFlag( | 5612 void MacroAssembler::CheckPageFlag( |
| 5453 Register object, | 5613 Register object, |
| 5454 Register scratch, | 5614 Register scratch, |
| 5455 int mask, | 5615 int mask, |
| 5456 Condition cc, | 5616 Condition cc, |
| 5457 Label* condition_met) { | 5617 Label* condition_met) { |
| 5458 // TODO(plind): Fix li() so we can use constant embedded inside And(). | 5618 And(scratch, object, Operand(~Page::kPageAlignmentMask)); |
| 5459 // And(scratch, object, Operand(~Page::kPageAlignmentMask)); | |
| 5460 li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK | |
| 5461 And(scratch, object, at); | |
| 5462 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | 5619 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 5463 And(scratch, scratch, Operand(mask)); | 5620 And(scratch, scratch, Operand(mask)); |
| 5464 Branch(condition_met, cc, scratch, Operand(zero_reg)); | 5621 Branch(condition_met, cc, scratch, Operand(zero_reg)); |
| 5465 } | 5622 } |
| 5466 | 5623 |
| 5467 | 5624 |
| 5468 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, | 5625 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, |
| 5469 Register scratch, | 5626 Register scratch, |
| 5470 Label* if_deprecated) { | 5627 Label* if_deprecated) { |
| 5471 if (map->CanBeDeprecated()) { | 5628 if (map->CanBeDeprecated()) { |
| (...skipping 453 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5925 | 6082 |
| 5926 | 6083 |
| 5927 void MacroAssembler::TruncatingDiv(Register result, | 6084 void MacroAssembler::TruncatingDiv(Register result, |
| 5928 Register dividend, | 6085 Register dividend, |
| 5929 int32_t divisor) { | 6086 int32_t divisor) { |
| 5930 ASSERT(!dividend.is(result)); | 6087 ASSERT(!dividend.is(result)); |
| 5931 ASSERT(!dividend.is(at)); | 6088 ASSERT(!dividend.is(at)); |
| 5932 ASSERT(!result.is(at)); | 6089 ASSERT(!result.is(at)); |
| 5933 MultiplierAndShift ms(divisor); | 6090 MultiplierAndShift ms(divisor); |
| 5934 li(at, Operand(ms.multiplier())); | 6091 li(at, Operand(ms.multiplier())); |
| 5935 Mult(dividend, Operand(at)); | 6092 Mulh(result, dividend, Operand(at)); |
| 5936 mfhi(result); | |
| 5937 if (divisor > 0 && ms.multiplier() < 0) { | 6093 if (divisor > 0 && ms.multiplier() < 0) { |
| 5938 Addu(result, result, Operand(dividend)); | 6094 Addu(result, result, Operand(dividend)); |
| 5939 } | 6095 } |
| 5940 if (divisor < 0 && ms.multiplier() > 0) { | 6096 if (divisor < 0 && ms.multiplier() > 0) { |
| 5941 Subu(result, result, Operand(dividend)); | 6097 Subu(result, result, Operand(dividend)); |
| 5942 } | 6098 } |
| 5943 if (ms.shift() > 0) sra(result, result, ms.shift()); | 6099 if (ms.shift() > 0) sra(result, result, ms.shift()); |
| 5944 srl(at, dividend, 31); | 6100 srl(at, dividend, 31); |
| 5945 Addu(result, result, Operand(at)); | 6101 Addu(result, result, Operand(at)); |
| 5946 } | 6102 } |
| 5947 | 6103 |
| 5948 | 6104 |
| 5949 } } // namespace v8::internal | 6105 } } // namespace v8::internal |
| 5950 | 6106 |
| 5951 #endif // V8_TARGET_ARCH_MIPS64 | 6107 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |