OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #if V8_TARGET_ARCH_MIPS64 | 9 #if V8_TARGET_ARCH_MIPS64 |
10 | 10 |
11 #include "src/base/division-by-constant.h" | 11 #include "src/base/division-by-constant.h" |
12 #include "src/bootstrapper.h" | 12 #include "src/bootstrapper.h" |
13 #include "src/codegen.h" | 13 #include "src/codegen.h" |
14 #include "src/cpu-profiler.h" | 14 #include "src/cpu-profiler.h" |
15 #include "src/debug.h" | 15 #include "src/debug.h" |
16 #include "src/isolate-inl.h" | 16 #include "src/isolate-inl.h" |
17 #include "src/runtime/runtime.h" | 17 #include "src/runtime/runtime.h" |
18 | 18 |
19 namespace v8 { | 19 namespace v8 { |
20 namespace internal { | 20 namespace internal { |
21 | 21 |
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) | 22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) |
23 : Assembler(arg_isolate, buffer, size), | 23 : Assembler(arg_isolate, buffer, size), |
24 generating_stub_(false), | 24 generating_stub_(false), |
25 has_frame_(false) { | 25 has_frame_(false), |
| 26 has_double_zero_reg_set_(false) { |
26 if (isolate() != NULL) { | 27 if (isolate() != NULL) { |
27 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
28 isolate()); | 29 isolate()); |
29 } | 30 } |
30 } | 31 } |
31 | 32 |
32 | 33 |
33 void MacroAssembler::Load(Register dst, | 34 void MacroAssembler::Load(Register dst, |
34 const MemOperand& src, | 35 const MemOperand& src, |
35 Representation r) { | 36 Representation r) { |
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
704 if (kArchVariant != kMips64r6) { | 705 if (kArchVariant != kMips64r6) { |
705 mult(rs, at); | 706 mult(rs, at); |
706 mfhi(rd); | 707 mfhi(rd); |
707 } else { | 708 } else { |
708 muh(rd, rs, at); | 709 muh(rd, rs, at); |
709 } | 710 } |
710 } | 711 } |
711 } | 712 } |
712 | 713 |
713 | 714 |
| 715 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { |
| 716 if (rt.is_reg()) { |
| 717 if (kArchVariant != kMips64r6) { |
| 718 multu(rs, rt.rm()); |
| 719 mfhi(rd); |
| 720 } else { |
| 721 muhu(rd, rs, rt.rm()); |
| 722 } |
| 723 } else { |
| 724 // li handles the relocation. |
| 725 DCHECK(!rs.is(at)); |
| 726 li(at, rt); |
| 727 if (kArchVariant != kMips64r6) { |
| 728 multu(rs, at); |
| 729 mfhi(rd); |
| 730 } else { |
| 731 muhu(rd, rs, at); |
| 732 } |
| 733 } |
| 734 } |
| 735 |
| 736 |
714 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { | 737 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { |
715 if (rt.is_reg()) { | 738 if (rt.is_reg()) { |
716 if (kArchVariant == kMips64r6) { | 739 if (kArchVariant == kMips64r6) { |
717 dmul(rd, rs, rt.rm()); | 740 dmul(rd, rs, rt.rm()); |
718 } else { | 741 } else { |
719 dmult(rs, rt.rm()); | 742 dmult(rs, rt.rm()); |
720 mflo(rd); | 743 mflo(rd); |
721 } | 744 } |
722 } else { | 745 } else { |
723 // li handles the relocation. | 746 // li handles the relocation. |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
808 div(rs, rt.rm()); | 831 div(rs, rt.rm()); |
809 } else { | 832 } else { |
810 // li handles the relocation. | 833 // li handles the relocation. |
811 DCHECK(!rs.is(at)); | 834 DCHECK(!rs.is(at)); |
812 li(at, rt); | 835 li(at, rt); |
813 div(rs, at); | 836 div(rs, at); |
814 } | 837 } |
815 } | 838 } |
816 | 839 |
817 | 840 |
| 841 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { |
| 842 if (rt.is_reg()) { |
| 843 if (kArchVariant != kMips64r6) { |
| 844 div(rs, rt.rm()); |
| 845 mflo(res); |
| 846 } else { |
| 847 div(res, rs, rt.rm()); |
| 848 } |
| 849 } else { |
| 850 // li handles the relocation. |
| 851 DCHECK(!rs.is(at)); |
| 852 li(at, rt); |
| 853 if (kArchVariant != kMips64r6) { |
| 854 div(rs, at); |
| 855 mflo(res); |
| 856 } else { |
| 857 div(res, rs, at); |
| 858 } |
| 859 } |
| 860 } |
| 861 |
| 862 |
| 863 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { |
| 864 if (rt.is_reg()) { |
| 865 if (kArchVariant != kMips64r6) { |
| 866 div(rs, rt.rm()); |
| 867 mfhi(rd); |
| 868 } else { |
| 869 mod(rd, rs, rt.rm()); |
| 870 } |
| 871 } else { |
| 872 // li handles the relocation. |
| 873 DCHECK(!rs.is(at)); |
| 874 li(at, rt); |
| 875 if (kArchVariant != kMips64r6) { |
| 876 div(rs, at); |
| 877 mfhi(rd); |
| 878 } else { |
| 879 mod(rd, rs, at); |
| 880 } |
| 881 } |
| 882 } |
| 883 |
| 884 |
| 885 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { |
| 886 if (rt.is_reg()) { |
| 887 if (kArchVariant != kMips64r6) { |
| 888 divu(rs, rt.rm()); |
| 889 mfhi(rd); |
| 890 } else { |
| 891 modu(rd, rs, rt.rm()); |
| 892 } |
| 893 } else { |
| 894 // li handles the relocation. |
| 895 DCHECK(!rs.is(at)); |
| 896 li(at, rt); |
| 897 if (kArchVariant != kMips64r6) { |
| 898 divu(rs, at); |
| 899 mfhi(rd); |
| 900 } else { |
| 901 modu(rd, rs, at); |
| 902 } |
| 903 } |
| 904 } |
| 905 |
| 906 |
818 void MacroAssembler::Ddiv(Register rs, const Operand& rt) { | 907 void MacroAssembler::Ddiv(Register rs, const Operand& rt) { |
819 if (rt.is_reg()) { | 908 if (rt.is_reg()) { |
820 ddiv(rs, rt.rm()); | 909 ddiv(rs, rt.rm()); |
821 } else { | 910 } else { |
822 // li handles the relocation. | 911 // li handles the relocation. |
823 DCHECK(!rs.is(at)); | 912 DCHECK(!rs.is(at)); |
824 li(at, rt); | 913 li(at, rt); |
825 ddiv(rs, at); | 914 ddiv(rs, at); |
826 } | 915 } |
827 } | 916 } |
(...skipping 29 matching lines...) Expand all Loading... |
857 divu(rs, rt.rm()); | 946 divu(rs, rt.rm()); |
858 } else { | 947 } else { |
859 // li handles the relocation. | 948 // li handles the relocation. |
860 DCHECK(!rs.is(at)); | 949 DCHECK(!rs.is(at)); |
861 li(at, rt); | 950 li(at, rt); |
862 divu(rs, at); | 951 divu(rs, at); |
863 } | 952 } |
864 } | 953 } |
865 | 954 |
866 | 955 |
| 956 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { |
| 957 if (rt.is_reg()) { |
| 958 if (kArchVariant != kMips64r6) { |
| 959 divu(rs, rt.rm()); |
| 960 mflo(res); |
| 961 } else { |
| 962 divu(res, rs, rt.rm()); |
| 963 } |
| 964 } else { |
| 965 // li handles the relocation. |
| 966 DCHECK(!rs.is(at)); |
| 967 li(at, rt); |
| 968 if (kArchVariant != kMips64r6) { |
| 969 divu(rs, at); |
| 970 mflo(res); |
| 971 } else { |
| 972 divu(res, rs, at); |
| 973 } |
| 974 } |
| 975 } |
| 976 |
| 977 |
867 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { | 978 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { |
868 if (rt.is_reg()) { | 979 if (rt.is_reg()) { |
869 ddivu(rs, rt.rm()); | 980 ddivu(rs, rt.rm()); |
870 } else { | 981 } else { |
871 // li handles the relocation. | 982 // li handles the relocation. |
872 DCHECK(!rs.is(at)); | 983 DCHECK(!rs.is(at)); |
873 li(at, rt); | 984 li(at, rt); |
874 ddivu(rs, at); | 985 ddivu(rs, at); |
875 } | 986 } |
876 } | 987 } |
877 | 988 |
878 | 989 |
| 990 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) { |
| 991 if (rt.is_reg()) { |
| 992 if (kArchVariant != kMips64r6) { |
| 993 ddivu(rs, rt.rm()); |
| 994 mflo(res); |
| 995 } else { |
| 996 ddivu(res, rs, rt.rm()); |
| 997 } |
| 998 } else { |
| 999 // li handles the relocation. |
| 1000 DCHECK(!rs.is(at)); |
| 1001 li(at, rt); |
| 1002 if (kArchVariant != kMips64r6) { |
| 1003 ddivu(rs, at); |
| 1004 mflo(res); |
| 1005 } else { |
| 1006 ddivu(res, rs, at); |
| 1007 } |
| 1008 } |
| 1009 } |
| 1010 |
| 1011 |
879 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { | 1012 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { |
880 if (kArchVariant != kMips64r6) { | 1013 if (kArchVariant != kMips64r6) { |
881 if (rt.is_reg()) { | 1014 if (rt.is_reg()) { |
882 ddiv(rs, rt.rm()); | 1015 ddiv(rs, rt.rm()); |
883 mfhi(rd); | 1016 mfhi(rd); |
884 } else { | 1017 } else { |
885 // li handles the relocation. | 1018 // li handles the relocation. |
886 DCHECK(!rs.is(at)); | 1019 DCHECK(!rs.is(at)); |
887 li(at, rt); | 1020 li(at, rt); |
888 ddiv(rs, at); | 1021 ddiv(rs, at); |
889 mfhi(rd); | 1022 mfhi(rd); |
890 } | 1023 } |
891 } else { | 1024 } else { |
892 if (rt.is_reg()) { | 1025 if (rt.is_reg()) { |
893 dmod(rd, rs, rt.rm()); | 1026 dmod(rd, rs, rt.rm()); |
894 } else { | 1027 } else { |
895 // li handles the relocation. | 1028 // li handles the relocation. |
896 DCHECK(!rs.is(at)); | 1029 DCHECK(!rs.is(at)); |
897 li(at, rt); | 1030 li(at, rt); |
898 dmod(rd, rs, at); | 1031 dmod(rd, rs, at); |
899 } | 1032 } |
900 } | 1033 } |
901 } | 1034 } |
902 | 1035 |
903 | 1036 |
| 1037 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { |
| 1038 if (kArchVariant != kMips64r6) { |
| 1039 if (rt.is_reg()) { |
| 1040 ddivu(rs, rt.rm()); |
| 1041 mfhi(rd); |
| 1042 } else { |
| 1043 // li handles the relocation. |
| 1044 DCHECK(!rs.is(at)); |
| 1045 li(at, rt); |
| 1046 ddivu(rs, at); |
| 1047 mfhi(rd); |
| 1048 } |
| 1049 } else { |
| 1050 if (rt.is_reg()) { |
| 1051 dmodu(rd, rs, rt.rm()); |
| 1052 } else { |
| 1053 // li handles the relocation. |
| 1054 DCHECK(!rs.is(at)); |
| 1055 li(at, rt); |
| 1056 dmodu(rd, rs, at); |
| 1057 } |
| 1058 } |
| 1059 } |
| 1060 |
| 1061 |
904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { | 1062 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { |
905 if (rt.is_reg()) { | 1063 if (rt.is_reg()) { |
906 and_(rd, rs, rt.rm()); | 1064 and_(rd, rs, rt.rm()); |
907 } else { | 1065 } else { |
908 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { | 1066 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
909 andi(rd, rs, rt.imm64_); | 1067 andi(rd, rs, rt.imm64_); |
910 } else { | 1068 } else { |
911 // li handles the relocation. | 1069 // li handles the relocation. |
912 DCHECK(!rs.is(at)); | 1070 DCHECK(!rs.is(at)); |
913 li(at, rt); | 1071 li(at, rt); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
996 // li handles the relocation. | 1154 // li handles the relocation. |
997 DCHECK(!rs.is(at)); | 1155 DCHECK(!rs.is(at)); |
998 li(at, rt); | 1156 li(at, rt); |
999 sltu(rd, rs, at); | 1157 sltu(rd, rs, at); |
1000 } | 1158 } |
1001 } | 1159 } |
1002 } | 1160 } |
1003 | 1161 |
1004 | 1162 |
1005 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { | 1163 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { |
1006 if (kArchVariant == kMips64r2) { | 1164 if (rt.is_reg()) { |
1007 if (rt.is_reg()) { | 1165 rotrv(rd, rs, rt.rm()); |
1008 rotrv(rd, rs, rt.rm()); | |
1009 } else { | |
1010 rotr(rd, rs, rt.imm64_); | |
1011 } | |
1012 } else { | 1166 } else { |
1013 if (rt.is_reg()) { | 1167 rotr(rd, rs, rt.imm64_); |
1014 subu(at, zero_reg, rt.rm()); | |
1015 sllv(at, rs, at); | |
1016 srlv(rd, rs, rt.rm()); | |
1017 or_(rd, rd, at); | |
1018 } else { | |
1019 if (rt.imm64_ == 0) { | |
1020 srl(rd, rs, 0); | |
1021 } else { | |
1022 srl(at, rs, rt.imm64_); | |
1023 sll(rd, rs, (0x20 - rt.imm64_) & 0x1f); | |
1024 or_(rd, rd, at); | |
1025 } | |
1026 } | |
1027 } | 1168 } |
1028 } | 1169 } |
1029 | 1170 |
1030 | 1171 |
1031 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { | 1172 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { |
1032 if (rt.is_reg()) { | 1173 if (rt.is_reg()) { |
1033 drotrv(rd, rs, rt.rm()); | 1174 drotrv(rd, rs, rt.rm()); |
1034 } else { | 1175 } else { |
1035 drotr(rd, rs, rt.imm64_); | 1176 drotr(rd, rs, rt.imm64_); |
1036 } | 1177 } |
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1274 void MacroAssembler::Ext(Register rt, | 1415 void MacroAssembler::Ext(Register rt, |
1275 Register rs, | 1416 Register rs, |
1276 uint16_t pos, | 1417 uint16_t pos, |
1277 uint16_t size) { | 1418 uint16_t size) { |
1278 DCHECK(pos < 32); | 1419 DCHECK(pos < 32); |
1279 DCHECK(pos + size < 33); | 1420 DCHECK(pos + size < 33); |
1280 ext_(rt, rs, pos, size); | 1421 ext_(rt, rs, pos, size); |
1281 } | 1422 } |
1282 | 1423 |
1283 | 1424 |
| 1425 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos, |
| 1426 uint16_t size) { |
| 1427 DCHECK(pos < 32); |
| 1428 DCHECK(pos + size < 33); |
| 1429 dext_(rt, rs, pos, size); |
| 1430 } |
| 1431 |
| 1432 |
1284 void MacroAssembler::Ins(Register rt, | 1433 void MacroAssembler::Ins(Register rt, |
1285 Register rs, | 1434 Register rs, |
1286 uint16_t pos, | 1435 uint16_t pos, |
1287 uint16_t size) { | 1436 uint16_t size) { |
1288 DCHECK(pos < 32); | 1437 DCHECK(pos < 32); |
1289 DCHECK(pos + size <= 32); | 1438 DCHECK(pos + size <= 32); |
1290 DCHECK(size != 0); | 1439 DCHECK(size != 0); |
1291 ins_(rt, rs, pos, size); | 1440 ins_(rt, rs, pos, size); |
1292 } | 1441 } |
1293 | 1442 |
(...skipping 271 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1565 nop(); | 1714 nop(); |
1566 } | 1715 } |
1567 } | 1716 } |
1568 | 1717 |
1569 | 1718 |
1570 void MacroAssembler::Move(FPURegister dst, double imm) { | 1719 void MacroAssembler::Move(FPURegister dst, double imm) { |
1571 static const DoubleRepresentation minus_zero(-0.0); | 1720 static const DoubleRepresentation minus_zero(-0.0); |
1572 static const DoubleRepresentation zero(0.0); | 1721 static const DoubleRepresentation zero(0.0); |
1573 DoubleRepresentation value_rep(imm); | 1722 DoubleRepresentation value_rep(imm); |
1574 // Handle special values first. | 1723 // Handle special values first. |
1575 bool force_load = dst.is(kDoubleRegZero); | 1724 if (value_rep == zero && has_double_zero_reg_set_) { |
1576 if (value_rep == zero && !force_load) { | |
1577 mov_d(dst, kDoubleRegZero); | 1725 mov_d(dst, kDoubleRegZero); |
1578 } else if (value_rep == minus_zero && !force_load) { | 1726 } else if (value_rep == minus_zero && has_double_zero_reg_set_) { |
1579 neg_d(dst, kDoubleRegZero); | 1727 neg_d(dst, kDoubleRegZero); |
1580 } else { | 1728 } else { |
1581 uint32_t lo, hi; | 1729 uint32_t lo, hi; |
1582 DoubleAsTwoUInt32(imm, &lo, &hi); | 1730 DoubleAsTwoUInt32(imm, &lo, &hi); |
1583 // Move the low part of the double into the lower bits of the corresponding | 1731 // Move the low part of the double into the lower bits of the corresponding |
1584 // FPU register. | 1732 // FPU register. |
1585 if (lo != 0) { | 1733 if (lo != 0) { |
1586 li(at, Operand(lo)); | 1734 li(at, Operand(lo)); |
1587 mtc1(at, dst); | 1735 mtc1(at, dst); |
1588 } else { | 1736 } else { |
1589 mtc1(zero_reg, dst); | 1737 mtc1(zero_reg, dst); |
1590 } | 1738 } |
1591 // Move the high part of the double into the high bits of the corresponding | 1739 // Move the high part of the double into the high bits of the corresponding |
1592 // FPU register. | 1740 // FPU register. |
1593 if (hi != 0) { | 1741 if (hi != 0) { |
1594 li(at, Operand(hi)); | 1742 li(at, Operand(hi)); |
1595 mthc1(at, dst); | 1743 mthc1(at, dst); |
1596 } else { | 1744 } else { |
1597 mthc1(zero_reg, dst); | 1745 mthc1(zero_reg, dst); |
1598 } | 1746 } |
| 1747 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; |
1599 } | 1748 } |
1600 } | 1749 } |
1601 | 1750 |
1602 | 1751 |
1603 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { | 1752 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
1604 if (kArchVariant == kMips64r6) { | 1753 if (kArchVariant == kMips64r6) { |
1605 Label done; | 1754 Label done; |
1606 Branch(&done, ne, rt, Operand(zero_reg)); | 1755 Branch(&done, ne, rt, Operand(zero_reg)); |
1607 mov(rd, rs); | 1756 mov(rd, rs); |
1608 bind(&done); | 1757 bind(&done); |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1993 } else { | 2142 } else { |
1994 // Be careful to always use shifted_branch_offset only just before the | 2143 // Be careful to always use shifted_branch_offset only just before the |
1995 // branch instruction, as the location will be remember for patching the | 2144 // branch instruction, as the location will be remember for patching the |
1996 // target. | 2145 // target. |
1997 BlockTrampolinePoolScope block_trampoline_pool(this); | 2146 BlockTrampolinePoolScope block_trampoline_pool(this); |
1998 switch (cond) { | 2147 switch (cond) { |
1999 case cc_always: | 2148 case cc_always: |
2000 b(offset); | 2149 b(offset); |
2001 break; | 2150 break; |
2002 case eq: | 2151 case eq: |
2003 // We don't want any other register but scratch clobbered. | 2152 if (rt.imm64_ == 0) { |
2004 DCHECK(!scratch.is(rs)); | 2153 beq(rs, zero_reg, offset); |
2005 r2 = scratch; | 2154 } else { |
2006 li(r2, rt); | 2155 // We don't want any other register but scratch clobbered. |
2007 beq(rs, r2, offset); | 2156 DCHECK(!scratch.is(rs)); |
| 2157 r2 = scratch; |
| 2158 li(r2, rt); |
| 2159 beq(rs, r2, offset); |
| 2160 } |
2008 break; | 2161 break; |
2009 case ne: | 2162 case ne: |
2010 // We don't want any other register but scratch clobbered. | 2163 if (rt.imm64_ == 0) { |
2011 DCHECK(!scratch.is(rs)); | 2164 bne(rs, zero_reg, offset); |
2012 r2 = scratch; | 2165 } else { |
2013 li(r2, rt); | 2166 // We don't want any other register but scratch clobbered. |
2014 bne(rs, r2, offset); | 2167 DCHECK(!scratch.is(rs)); |
| 2168 r2 = scratch; |
| 2169 li(r2, rt); |
| 2170 bne(rs, r2, offset); |
| 2171 } |
2015 break; | 2172 break; |
2016 // Signed comparison. | 2173 // Signed comparison. |
2017 case greater: | 2174 case greater: |
2018 if (rt.imm64_ == 0) { | 2175 if (rt.imm64_ == 0) { |
2019 bgtz(rs, offset); | 2176 bgtz(rs, offset); |
2020 } else { | 2177 } else { |
2021 r2 = scratch; | 2178 r2 = scratch; |
2022 li(r2, rt); | 2179 li(r2, rt); |
2023 slt(scratch, r2, rs); | 2180 slt(scratch, r2, rs); |
2024 bne(scratch, zero_reg, offset); | 2181 bne(scratch, zero_reg, offset); |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2246 // Be careful to always use shifted_branch_offset only just before the | 2403 // Be careful to always use shifted_branch_offset only just before the |
2247 // branch instruction, as the location will be remember for patching the | 2404 // branch instruction, as the location will be remember for patching the |
2248 // target. | 2405 // target. |
2249 BlockTrampolinePoolScope block_trampoline_pool(this); | 2406 BlockTrampolinePoolScope block_trampoline_pool(this); |
2250 switch (cond) { | 2407 switch (cond) { |
2251 case cc_always: | 2408 case cc_always: |
2252 offset = shifted_branch_offset(L, false); | 2409 offset = shifted_branch_offset(L, false); |
2253 b(offset); | 2410 b(offset); |
2254 break; | 2411 break; |
2255 case eq: | 2412 case eq: |
2256 DCHECK(!scratch.is(rs)); | 2413 if (rt.imm64_ == 0) { |
2257 r2 = scratch; | 2414 offset = shifted_branch_offset(L, false); |
2258 li(r2, rt); | 2415 beq(rs, zero_reg, offset); |
2259 offset = shifted_branch_offset(L, false); | 2416 } else { |
2260 beq(rs, r2, offset); | 2417 DCHECK(!scratch.is(rs)); |
| 2418 r2 = scratch; |
| 2419 li(r2, rt); |
| 2420 offset = shifted_branch_offset(L, false); |
| 2421 beq(rs, r2, offset); |
| 2422 } |
2261 break; | 2423 break; |
2262 case ne: | 2424 case ne: |
2263 DCHECK(!scratch.is(rs)); | 2425 if (rt.imm64_ == 0) { |
2264 r2 = scratch; | 2426 offset = shifted_branch_offset(L, false); |
2265 li(r2, rt); | 2427 bne(rs, zero_reg, offset); |
2266 offset = shifted_branch_offset(L, false); | 2428 } else { |
2267 bne(rs, r2, offset); | 2429 DCHECK(!scratch.is(rs)); |
| 2430 r2 = scratch; |
| 2431 li(r2, rt); |
| 2432 offset = shifted_branch_offset(L, false); |
| 2433 bne(rs, r2, offset); |
| 2434 } |
2268 break; | 2435 break; |
2269 // Signed comparison. | 2436 // Signed comparison. |
2270 case greater: | 2437 case greater: |
2271 if (rt.imm64_ == 0) { | 2438 if (rt.imm64_ == 0) { |
2272 offset = shifted_branch_offset(L, false); | 2439 offset = shifted_branch_offset(L, false); |
2273 bgtz(rs, offset); | 2440 bgtz(rs, offset); |
2274 } else { | 2441 } else { |
2275 DCHECK(!scratch.is(rs)); | 2442 DCHECK(!scratch.is(rs)); |
2276 r2 = scratch; | 2443 r2 = scratch; |
2277 li(r2, rt); | 2444 li(r2, rt); |
(...skipping 2082 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4360 void MacroAssembler::SmiToDoubleFPURegister(Register smi, | 4527 void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
4361 FPURegister value, | 4528 FPURegister value, |
4362 Register scratch1) { | 4529 Register scratch1) { |
4363 // dsra(scratch1, smi, kSmiTagSize); | 4530 // dsra(scratch1, smi, kSmiTagSize); |
4364 dsra32(scratch1, smi, 0); | 4531 dsra32(scratch1, smi, 0); |
4365 mtc1(scratch1, value); | 4532 mtc1(scratch1, value); |
4366 cvt_d_w(value, value); | 4533 cvt_d_w(value, value); |
4367 } | 4534 } |
4368 | 4535 |
4369 | 4536 |
| 4537 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, |
| 4538 const Operand& right, |
| 4539 Register overflow_dst, |
| 4540 Register scratch) { |
| 4541 if (right.is_reg()) { |
| 4542 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); |
| 4543 } else { |
| 4544 if (dst.is(left)) { |
| 4545 mov(scratch, left); // Preserve left. |
| 4546 daddiu(dst, left, right.immediate()); // Left is overwritten. |
| 4547 xor_(scratch, dst, scratch); // Original left. |
| 4548 // Load right since xori takes uint16 as immediate. |
| 4549 daddiu(t9, zero_reg, right.immediate()); |
| 4550 xor_(overflow_dst, dst, t9); |
| 4551 and_(overflow_dst, overflow_dst, scratch); |
| 4552 } else { |
| 4553 daddiu(dst, left, right.immediate()); |
| 4554 xor_(overflow_dst, dst, left); |
| 4555 // Load right since xori takes uint16 as immediate. |
| 4556 daddiu(t9, zero_reg, right.immediate()); |
| 4557 xor_(scratch, dst, t9); |
| 4558 and_(overflow_dst, scratch, overflow_dst); |
| 4559 } |
| 4560 } |
| 4561 } |
| 4562 |
| 4563 |
4370 void MacroAssembler::AdduAndCheckForOverflow(Register dst, | 4564 void MacroAssembler::AdduAndCheckForOverflow(Register dst, |
4371 Register left, | 4565 Register left, |
4372 Register right, | 4566 Register right, |
4373 Register overflow_dst, | 4567 Register overflow_dst, |
4374 Register scratch) { | 4568 Register scratch) { |
4375 DCHECK(!dst.is(overflow_dst)); | 4569 DCHECK(!dst.is(overflow_dst)); |
4376 DCHECK(!dst.is(scratch)); | 4570 DCHECK(!dst.is(scratch)); |
4377 DCHECK(!overflow_dst.is(scratch)); | 4571 DCHECK(!overflow_dst.is(scratch)); |
4378 DCHECK(!overflow_dst.is(left)); | 4572 DCHECK(!overflow_dst.is(left)); |
4379 DCHECK(!overflow_dst.is(right)); | 4573 DCHECK(!overflow_dst.is(right)); |
(...skipping 22 matching lines...) Expand all Loading... |
4402 and_(overflow_dst, overflow_dst, scratch); | 4596 and_(overflow_dst, overflow_dst, scratch); |
4403 } else { | 4597 } else { |
4404 daddu(dst, left, right); | 4598 daddu(dst, left, right); |
4405 xor_(overflow_dst, dst, left); | 4599 xor_(overflow_dst, dst, left); |
4406 xor_(scratch, dst, right); | 4600 xor_(scratch, dst, right); |
4407 and_(overflow_dst, scratch, overflow_dst); | 4601 and_(overflow_dst, scratch, overflow_dst); |
4408 } | 4602 } |
4409 } | 4603 } |
4410 | 4604 |
4411 | 4605 |
| 4606 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, |
| 4607 const Operand& right, |
| 4608 Register overflow_dst, |
| 4609 Register scratch) { |
| 4610 if (right.is_reg()) { |
| 4611 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); |
| 4612 } else { |
| 4613 if (dst.is(left)) { |
| 4614 mov(scratch, left); // Preserve left. |
| 4615 daddiu(dst, left, -(right.immediate())); // Left is overwritten. |
| 4616 xor_(overflow_dst, dst, scratch); // scratch is original left. |
| 4617 // Load right since xori takes uint16 as immediate. |
| 4618 daddiu(t9, zero_reg, right.immediate()); |
| 4619 xor_(scratch, scratch, t9); // scratch is original left. |
| 4620 and_(overflow_dst, scratch, overflow_dst); |
| 4621 } else { |
| 4622 daddiu(dst, left, -(right.immediate())); |
| 4623 xor_(overflow_dst, dst, left); |
| 4624 // Load right since xori takes uint16 as immediate. |
| 4625 daddiu(t9, zero_reg, right.immediate()); |
| 4626 xor_(scratch, left, t9); |
| 4627 and_(overflow_dst, scratch, overflow_dst); |
| 4628 } |
| 4629 } |
| 4630 } |
| 4631 |
| 4632 |
4412 void MacroAssembler::SubuAndCheckForOverflow(Register dst, | 4633 void MacroAssembler::SubuAndCheckForOverflow(Register dst, |
4413 Register left, | 4634 Register left, |
4414 Register right, | 4635 Register right, |
4415 Register overflow_dst, | 4636 Register overflow_dst, |
4416 Register scratch) { | 4637 Register scratch) { |
4417 DCHECK(!dst.is(overflow_dst)); | 4638 DCHECK(!dst.is(overflow_dst)); |
4418 DCHECK(!dst.is(scratch)); | 4639 DCHECK(!dst.is(scratch)); |
4419 DCHECK(!overflow_dst.is(scratch)); | 4640 DCHECK(!overflow_dst.is(scratch)); |
4420 DCHECK(!overflow_dst.is(left)); | 4641 DCHECK(!overflow_dst.is(left)); |
4421 DCHECK(!overflow_dst.is(right)); | 4642 DCHECK(!overflow_dst.is(right)); |
(...skipping 1665 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6087 } | 6308 } |
6088 if (mag.shift > 0) sra(result, result, mag.shift); | 6309 if (mag.shift > 0) sra(result, result, mag.shift); |
6089 srl(at, dividend, 31); | 6310 srl(at, dividend, 31); |
6090 Addu(result, result, Operand(at)); | 6311 Addu(result, result, Operand(at)); |
6091 } | 6312 } |
6092 | 6313 |
6093 | 6314 |
6094 } } // namespace v8::internal | 6315 } } // namespace v8::internal |
6095 | 6316 |
6096 #endif // V8_TARGET_ARCH_MIPS64 | 6317 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |