OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #if V8_TARGET_ARCH_MIPS | 9 #if V8_TARGET_ARCH_MIPS |
10 | 10 |
(...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
634 DCHECK(!rs.is(at)); | 634 DCHECK(!rs.is(at)); |
635 li(at, rt); | 635 li(at, rt); |
636 subu(rd, rs, at); | 636 subu(rd, rs, at); |
637 } | 637 } |
638 } | 638 } |
639 } | 639 } |
640 | 640 |
641 | 641 |
642 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { | 642 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { |
643 if (rt.is_reg()) { | 643 if (rt.is_reg()) { |
644 if (kArchVariant == kLoongson) { | 644 if (IsMipsArchVariant(kLoongson)) { |
645 mult(rs, rt.rm()); | 645 mult(rs, rt.rm()); |
646 mflo(rd); | 646 mflo(rd); |
647 } else { | 647 } else { |
648 mul(rd, rs, rt.rm()); | 648 mul(rd, rs, rt.rm()); |
649 } | 649 } |
650 } else { | 650 } else { |
651 // li handles the relocation. | 651 // li handles the relocation. |
652 DCHECK(!rs.is(at)); | 652 DCHECK(!rs.is(at)); |
653 li(at, rt); | 653 li(at, rt); |
654 if (kArchVariant == kLoongson) { | 654 if (IsMipsArchVariant(kLoongson)) { |
655 mult(rs, at); | 655 mult(rs, at); |
656 mflo(rd); | 656 mflo(rd); |
657 } else { | 657 } else { |
658 mul(rd, rs, at); | 658 mul(rd, rs, at); |
659 } | 659 } |
660 } | 660 } |
661 } | 661 } |
662 | 662 |
663 | 663 |
| 664 void MacroAssembler::Mul(Register rd_hi, Register rd_lo, |
| 665 Register rs, const Operand& rt) { |
| 666 if (rt.is_reg()) { |
| 667 if (!IsMipsArchVariant(kMips32r6)) { |
| 668 mult(rs, rt.rm()); |
| 669 mflo(rd_lo); |
| 670 mfhi(rd_hi); |
| 671 } else { |
| 672 if (rd_lo.is(rs)) { |
| 673 DCHECK(!rd_hi.is(rs)); |
| 674 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm())); |
| 675 muh(rd_hi, rs, rt.rm()); |
| 676 mul(rd_lo, rs, rt.rm()); |
| 677 } else { |
| 678 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm())); |
| 679 mul(rd_lo, rs, rt.rm()); |
| 680 muh(rd_hi, rs, rt.rm()); |
| 681 } |
| 682 } |
| 683 } else { |
| 684 // li handles the relocation. |
| 685 DCHECK(!rs.is(at)); |
| 686 li(at, rt); |
| 687 if (!IsMipsArchVariant(kMips32r6)) { |
| 688 mult(rs, at); |
| 689 mflo(rd_lo); |
| 690 mfhi(rd_hi); |
| 691 } else { |
| 692 if (rd_lo.is(rs)) { |
| 693 DCHECK(!rd_hi.is(rs)); |
| 694 DCHECK(!rd_hi.is(at) && !rd_lo.is(at)); |
| 695 muh(rd_hi, rs, at); |
| 696 mul(rd_lo, rs, at); |
| 697 } else { |
| 698 DCHECK(!rd_hi.is(at) && !rd_lo.is(at)); |
| 699 mul(rd_lo, rs, at); |
| 700 muh(rd_hi, rs, at); |
| 701 } |
| 702 } |
| 703 } |
| 704 } |
| 705 |
| 706 |
| 707 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { |
| 708 if (rt.is_reg()) { |
| 709 if (!IsMipsArchVariant(kMips32r6)) { |
| 710 mult(rs, rt.rm()); |
| 711 mfhi(rd); |
| 712 } else { |
| 713 muh(rd, rs, rt.rm()); |
| 714 } |
| 715 } else { |
| 716 // li handles the relocation. |
| 717 DCHECK(!rs.is(at)); |
| 718 li(at, rt); |
| 719 if (!IsMipsArchVariant(kMips32r6)) { |
| 720 mult(rs, at); |
| 721 mfhi(rd); |
| 722 } else { |
| 723 muh(rd, rs, at); |
| 724 } |
| 725 } |
| 726 } |
| 727 |
| 728 |
664 void MacroAssembler::Mult(Register rs, const Operand& rt) { | 729 void MacroAssembler::Mult(Register rs, const Operand& rt) { |
665 if (rt.is_reg()) { | 730 if (rt.is_reg()) { |
666 mult(rs, rt.rm()); | 731 mult(rs, rt.rm()); |
667 } else { | 732 } else { |
668 // li handles the relocation. | 733 // li handles the relocation. |
669 DCHECK(!rs.is(at)); | 734 DCHECK(!rs.is(at)); |
670 li(at, rt); | 735 li(at, rt); |
671 mult(rs, at); | 736 mult(rs, at); |
672 } | 737 } |
673 } | 738 } |
(...skipping 16 matching lines...) Expand all Loading... |
690 div(rs, rt.rm()); | 755 div(rs, rt.rm()); |
691 } else { | 756 } else { |
692 // li handles the relocation. | 757 // li handles the relocation. |
693 DCHECK(!rs.is(at)); | 758 DCHECK(!rs.is(at)); |
694 li(at, rt); | 759 li(at, rt); |
695 div(rs, at); | 760 div(rs, at); |
696 } | 761 } |
697 } | 762 } |
698 | 763 |
699 | 764 |
| 765 void MacroAssembler::Div(Register rem, Register res, |
| 766 Register rs, const Operand& rt) { |
| 767 if (rt.is_reg()) { |
| 768 if (!IsMipsArchVariant(kMips32r6)) { |
| 769 div(rs, rt.rm()); |
| 770 mflo(res); |
| 771 mfhi(rem); |
| 772 } else { |
| 773 div(res, rs, rt.rm()); |
| 774 mod(rem, rs, rt.rm()); |
| 775 } |
| 776 } else { |
| 777 // li handles the relocation. |
| 778 DCHECK(!rs.is(at)); |
| 779 li(at, rt); |
| 780 if (!IsMipsArchVariant(kMips32r6)) { |
| 781 div(rs, at); |
| 782 mflo(res); |
| 783 mfhi(rem); |
| 784 } else { |
| 785 div(res, rs, at); |
| 786 mod(rem, rs, at); |
| 787 } |
| 788 } |
| 789 } |
| 790 |
| 791 |
| 792 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { |
| 793 if (rt.is_reg()) { |
| 794 if (!IsMipsArchVariant(kMips32r6)) { |
| 795 div(rs, rt.rm()); |
| 796 mfhi(rd); |
| 797 } else { |
| 798 mod(rd, rs, rt.rm()); |
| 799 } |
| 800 } else { |
| 801 // li handles the relocation. |
| 802 DCHECK(!rs.is(at)); |
| 803 li(at, rt); |
| 804 if (!IsMipsArchVariant(kMips32r6)) { |
| 805 div(rs, at); |
| 806 mfhi(rd); |
| 807 } else { |
| 808 mod(rd, rs, at); |
| 809 } |
| 810 } |
| 811 } |
| 812 |
| 813 |
700 void MacroAssembler::Divu(Register rs, const Operand& rt) { | 814 void MacroAssembler::Divu(Register rs, const Operand& rt) { |
701 if (rt.is_reg()) { | 815 if (rt.is_reg()) { |
702 divu(rs, rt.rm()); | 816 divu(rs, rt.rm()); |
703 } else { | 817 } else { |
704 // li handles the relocation. | 818 // li handles the relocation. |
705 DCHECK(!rs.is(at)); | 819 DCHECK(!rs.is(at)); |
706 li(at, rt); | 820 li(at, rt); |
707 divu(rs, at); | 821 divu(rs, at); |
708 } | 822 } |
709 } | 823 } |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
804 // li handles the relocation. | 918 // li handles the relocation. |
805 DCHECK(!rs.is(at)); | 919 DCHECK(!rs.is(at)); |
806 li(at, rt); | 920 li(at, rt); |
807 sltu(rd, rs, at); | 921 sltu(rd, rs, at); |
808 } | 922 } |
809 } | 923 } |
810 } | 924 } |
811 | 925 |
812 | 926 |
813 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { | 927 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { |
814 if (kArchVariant == kMips32r2) { | 928 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
815 if (rt.is_reg()) { | 929 if (rt.is_reg()) { |
816 rotrv(rd, rs, rt.rm()); | 930 rotrv(rd, rs, rt.rm()); |
817 } else { | 931 } else { |
818 rotr(rd, rs, rt.imm32_); | 932 rotr(rd, rs, rt.imm32_); |
819 } | 933 } |
820 } else { | 934 } else { |
821 if (rt.is_reg()) { | 935 if (rt.is_reg()) { |
822 subu(at, zero_reg, rt.rm()); | 936 subu(at, zero_reg, rt.rm()); |
823 sllv(at, rs, at); | 937 sllv(at, rs, at); |
824 srlv(rd, rs, rt.rm()); | 938 srlv(rd, rs, rt.rm()); |
825 or_(rd, rd, at); | 939 or_(rd, rd, at); |
826 } else { | 940 } else { |
827 if (rt.imm32_ == 0) { | 941 if (rt.imm32_ == 0) { |
828 srl(rd, rs, 0); | 942 srl(rd, rs, 0); |
829 } else { | 943 } else { |
830 srl(at, rs, rt.imm32_); | 944 srl(at, rs, rt.imm32_); |
831 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); | 945 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); |
832 or_(rd, rd, at); | 946 or_(rd, rd, at); |
833 } | 947 } |
834 } | 948 } |
835 } | 949 } |
836 } | 950 } |
837 | 951 |
838 | 952 |
839 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { | 953 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { |
840 if (kArchVariant == kLoongson) { | 954 if (IsMipsArchVariant(kLoongson)) { |
841 lw(zero_reg, rs); | 955 lw(zero_reg, rs); |
842 } else { | 956 } else { |
843 pref(hint, rs); | 957 pref(hint, rs); |
844 } | 958 } |
845 } | 959 } |
846 | 960 |
847 | 961 |
848 // ------------Pseudo-instructions------------- | 962 // ------------Pseudo-instructions------------- |
849 | 963 |
850 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 964 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1026 } | 1140 } |
1027 | 1141 |
1028 | 1142 |
1029 void MacroAssembler::Ext(Register rt, | 1143 void MacroAssembler::Ext(Register rt, |
1030 Register rs, | 1144 Register rs, |
1031 uint16_t pos, | 1145 uint16_t pos, |
1032 uint16_t size) { | 1146 uint16_t size) { |
1033 DCHECK(pos < 32); | 1147 DCHECK(pos < 32); |
1034 DCHECK(pos + size < 33); | 1148 DCHECK(pos + size < 33); |
1035 | 1149 |
1036 if (kArchVariant == kMips32r2) { | 1150 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
1037 ext_(rt, rs, pos, size); | 1151 ext_(rt, rs, pos, size); |
1038 } else { | 1152 } else { |
1039 // Move rs to rt and shift it left then right to get the | 1153 // Move rs to rt and shift it left then right to get the |
1040 // desired bitfield on the right side and zeroes on the left. | 1154 // desired bitfield on the right side and zeroes on the left. |
1041 int shift_left = 32 - (pos + size); | 1155 int shift_left = 32 - (pos + size); |
1042 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. | 1156 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. |
1043 | 1157 |
1044 int shift_right = 32 - size; | 1158 int shift_right = 32 - size; |
1045 if (shift_right > 0) { | 1159 if (shift_right > 0) { |
1046 srl(rt, rt, shift_right); | 1160 srl(rt, rt, shift_right); |
1047 } | 1161 } |
1048 } | 1162 } |
1049 } | 1163 } |
1050 | 1164 |
1051 | 1165 |
1052 void MacroAssembler::Ins(Register rt, | 1166 void MacroAssembler::Ins(Register rt, |
1053 Register rs, | 1167 Register rs, |
1054 uint16_t pos, | 1168 uint16_t pos, |
1055 uint16_t size) { | 1169 uint16_t size) { |
1056 DCHECK(pos < 32); | 1170 DCHECK(pos < 32); |
1057 DCHECK(pos + size <= 32); | 1171 DCHECK(pos + size <= 32); |
1058 DCHECK(size != 0); | 1172 DCHECK(size != 0); |
1059 | 1173 |
1060 if (kArchVariant == kMips32r2) { | 1174 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
1061 ins_(rt, rs, pos, size); | 1175 ins_(rt, rs, pos, size); |
1062 } else { | 1176 } else { |
1063 DCHECK(!rt.is(t8) && !rs.is(t8)); | 1177 DCHECK(!rt.is(t8) && !rs.is(t8)); |
1064 Subu(at, zero_reg, Operand(1)); | 1178 Subu(at, zero_reg, Operand(1)); |
1065 srl(at, at, 32 - size); | 1179 srl(at, at, 32 - size); |
1066 and_(t8, rs, at); | 1180 and_(t8, rs, at); |
1067 sll(t8, t8, pos); | 1181 sll(t8, t8, pos); |
1068 sll(at, at, pos); | 1182 sll(at, at, pos); |
1069 nor(at, at, zero_reg); | 1183 nor(at, at, zero_reg); |
1070 and_(at, rt, at); | 1184 and_(at, rt, at); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1104 cvt_d_w(fd, fd); | 1218 cvt_d_w(fd, fd); |
1105 | 1219 |
1106 Label conversion_done; | 1220 Label conversion_done; |
1107 | 1221 |
1108 // If rs's MSB was 0, it's done. | 1222 // If rs's MSB was 0, it's done. |
1109 // Otherwise we need to add that to the FP register. | 1223 // Otherwise we need to add that to the FP register. |
1110 Branch(&conversion_done, eq, t9, Operand(zero_reg)); | 1224 Branch(&conversion_done, eq, t9, Operand(zero_reg)); |
1111 | 1225 |
1112 // Load 2^31 into f20 as its float representation. | 1226 // Load 2^31 into f20 as its float representation. |
1113 li(at, 0x41E00000); | 1227 li(at, 0x41E00000); |
1114 mtc1(at, FPURegister::from_code(scratch.code() + 1)); | |
1115 mtc1(zero_reg, scratch); | 1228 mtc1(zero_reg, scratch); |
| 1229 Mthc1(at, scratch); |
1116 // Add it to fd. | 1230 // Add it to fd. |
1117 add_d(fd, fd, scratch); | 1231 add_d(fd, fd, scratch); |
1118 | 1232 |
1119 bind(&conversion_done); | 1233 bind(&conversion_done); |
1120 } | 1234 } |
1121 | 1235 |
1122 | 1236 |
1123 void MacroAssembler::Trunc_uw_d(FPURegister fd, | 1237 void MacroAssembler::Trunc_uw_d(FPURegister fd, |
1124 FPURegister fs, | 1238 FPURegister fs, |
1125 FPURegister scratch) { | 1239 FPURegister scratch) { |
1126 Trunc_uw_d(fs, t8, scratch); | 1240 Trunc_uw_d(fs, t8, scratch); |
1127 mtc1(t8, fd); | 1241 mtc1(t8, fd); |
1128 } | 1242 } |
1129 | 1243 |
1130 | 1244 |
1131 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { | 1245 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { |
1132 if (kArchVariant == kLoongson && fd.is(fs)) { | 1246 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) { |
1133 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1247 Mfhc1(t8, fs); |
1134 trunc_w_d(fd, fs); | 1248 trunc_w_d(fd, fs); |
1135 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1249 Mthc1(t8, fs); |
1136 } else { | 1250 } else { |
1137 trunc_w_d(fd, fs); | 1251 trunc_w_d(fd, fs); |
1138 } | 1252 } |
1139 } | 1253 } |
1140 | 1254 |
1141 | 1255 |
1142 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { | 1256 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { |
1143 if (kArchVariant == kLoongson && fd.is(fs)) { | 1257 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) { |
1144 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1258 Mfhc1(t8, fs); |
1145 round_w_d(fd, fs); | 1259 round_w_d(fd, fs); |
1146 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1260 Mthc1(t8, fs); |
1147 } else { | 1261 } else { |
1148 round_w_d(fd, fs); | 1262 round_w_d(fd, fs); |
1149 } | 1263 } |
1150 } | 1264 } |
1151 | 1265 |
1152 | 1266 |
1153 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { | 1267 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { |
1154 if (kArchVariant == kLoongson && fd.is(fs)) { | 1268 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) { |
1155 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1269 Mfhc1(t8, fs); |
1156 floor_w_d(fd, fs); | 1270 floor_w_d(fd, fs); |
1157 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1271 Mthc1(t8, fs); |
1158 } else { | 1272 } else { |
1159 floor_w_d(fd, fs); | 1273 floor_w_d(fd, fs); |
1160 } | 1274 } |
1161 } | 1275 } |
1162 | 1276 |
1163 | 1277 |
1164 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { | 1278 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { |
1165 if (kArchVariant == kLoongson && fd.is(fs)) { | 1279 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) { |
1166 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1280 Mfhc1(t8, fs); |
1167 ceil_w_d(fd, fs); | 1281 ceil_w_d(fd, fs); |
1168 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1282 Mthc1(t8, fs); |
1169 } else { | 1283 } else { |
1170 ceil_w_d(fd, fs); | 1284 ceil_w_d(fd, fs); |
1171 } | 1285 } |
1172 } | 1286 } |
1173 | 1287 |
1174 | 1288 |
1175 void MacroAssembler::Trunc_uw_d(FPURegister fd, | 1289 void MacroAssembler::Trunc_uw_d(FPURegister fd, |
1176 Register rs, | 1290 Register rs, |
1177 FPURegister scratch) { | 1291 FPURegister scratch) { |
1178 DCHECK(!fd.is(scratch)); | 1292 DCHECK(!fd.is(scratch)); |
1179 DCHECK(!rs.is(at)); | 1293 DCHECK(!rs.is(at)); |
1180 | 1294 |
1181 // Load 2^31 into scratch as its float representation. | 1295 // Load 2^31 into scratch as its float representation. |
1182 li(at, 0x41E00000); | 1296 li(at, 0x41E00000); |
1183 mtc1(at, FPURegister::from_code(scratch.code() + 1)); | |
1184 mtc1(zero_reg, scratch); | 1297 mtc1(zero_reg, scratch); |
| 1298 Mthc1(at, scratch); |
1185 // Test if scratch > fd. | 1299 // Test if scratch > fd. |
1186 // If fd < 2^31 we can convert it normally. | 1300 // If fd < 2^31 we can convert it normally. |
1187 Label simple_convert; | 1301 Label simple_convert; |
1188 BranchF(&simple_convert, NULL, lt, fd, scratch); | 1302 BranchF(&simple_convert, NULL, lt, fd, scratch); |
1189 | 1303 |
1190 // First we subtract 2^31 from fd, then trunc it to rs | 1304 // First we subtract 2^31 from fd, then trunc it to rs |
1191 // and add 2^31 to rs. | 1305 // and add 2^31 to rs. |
1192 sub_d(scratch, fd, scratch); | 1306 sub_d(scratch, fd, scratch); |
1193 trunc_w_d(scratch, scratch); | 1307 trunc_w_d(scratch, scratch); |
1194 mfc1(rs, scratch); | 1308 mfc1(rs, scratch); |
1195 Or(rs, rs, 1 << 31); | 1309 Or(rs, rs, 1 << 31); |
1196 | 1310 |
1197 Label done; | 1311 Label done; |
1198 Branch(&done); | 1312 Branch(&done); |
1199 // Simple conversion. | 1313 // Simple conversion. |
1200 bind(&simple_convert); | 1314 bind(&simple_convert); |
1201 trunc_w_d(scratch, fd); | 1315 trunc_w_d(scratch, fd); |
1202 mfc1(rs, scratch); | 1316 mfc1(rs, scratch); |
1203 | 1317 |
1204 bind(&done); | 1318 bind(&done); |
1205 } | 1319 } |
1206 | 1320 |
1207 | 1321 |
| 1322 void MacroAssembler::Mthc1(Register rt, FPURegister fs) { |
| 1323 if (IsFp64Mode()) { |
| 1324 mthc1(rt, fs); |
| 1325 } else { |
| 1326 mtc1(rt, fs.high()); |
| 1327 } |
| 1328 } |
| 1329 |
| 1330 |
| 1331 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) { |
| 1332 if (IsFp64Mode()) { |
| 1333 mfhc1(rt, fs); |
| 1334 } else { |
| 1335 mfc1(rt, fs.high()); |
| 1336 } |
| 1337 } |
| 1338 |
| 1339 |
1208 void MacroAssembler::BranchF(Label* target, | 1340 void MacroAssembler::BranchF(Label* target, |
1209 Label* nan, | 1341 Label* nan, |
1210 Condition cc, | 1342 Condition cc, |
1211 FPURegister cmp1, | 1343 FPURegister cmp1, |
1212 FPURegister cmp2, | 1344 FPURegister cmp2, |
1213 BranchDelaySlot bd) { | 1345 BranchDelaySlot bd) { |
1214 BlockTrampolinePoolScope block_trampoline_pool(this); | 1346 BlockTrampolinePoolScope block_trampoline_pool(this); |
1215 if (cc == al) { | 1347 if (cc == al) { |
1216 Branch(bd, target); | 1348 Branch(bd, target); |
1217 return; | 1349 return; |
1218 } | 1350 } |
1219 | 1351 |
1220 DCHECK(nan || target); | 1352 DCHECK(nan || target); |
1221 // Check for unordered (NaN) cases. | 1353 // Check for unordered (NaN) cases. |
1222 if (nan) { | 1354 if (nan) { |
1223 c(UN, D, cmp1, cmp2); | 1355 if (!IsMipsArchVariant(kMips32r6)) { |
1224 bc1t(nan); | 1356 c(UN, D, cmp1, cmp2); |
1225 } | 1357 bc1t(nan); |
1226 | 1358 } else { |
1227 if (target) { | 1359 // Use kDoubleCompareReg for comparison result. It has to be unavailable |
1228 // Here NaN cases were either handled by this function or are assumed to | 1360 // to lithium register allocator. |
1229 // have been handled by the caller. | 1361 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg)); |
1230 // Unsigned conditions are treated as their signed counterpart. | 1362 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2); |
1231 switch (cc) { | 1363 bc1nez(nan, kDoubleCompareReg); |
1232 case lt: | |
1233 c(OLT, D, cmp1, cmp2); | |
1234 bc1t(target); | |
1235 break; | |
1236 case gt: | |
1237 c(ULE, D, cmp1, cmp2); | |
1238 bc1f(target); | |
1239 break; | |
1240 case ge: | |
1241 c(ULT, D, cmp1, cmp2); | |
1242 bc1f(target); | |
1243 break; | |
1244 case le: | |
1245 c(OLE, D, cmp1, cmp2); | |
1246 bc1t(target); | |
1247 break; | |
1248 case eq: | |
1249 c(EQ, D, cmp1, cmp2); | |
1250 bc1t(target); | |
1251 break; | |
1252 case ueq: | |
1253 c(UEQ, D, cmp1, cmp2); | |
1254 bc1t(target); | |
1255 break; | |
1256 case ne: | |
1257 c(EQ, D, cmp1, cmp2); | |
1258 bc1f(target); | |
1259 break; | |
1260 case nue: | |
1261 c(UEQ, D, cmp1, cmp2); | |
1262 bc1f(target); | |
1263 break; | |
1264 default: | |
1265 CHECK(0); | |
1266 } | 1364 } |
1267 } | 1365 } |
1268 | 1366 |
| 1367 if (!IsMipsArchVariant(kMips32r6)) { |
| 1368 if (target) { |
| 1369 // Here NaN cases were either handled by this function or are assumed to |
| 1370 // have been handled by the caller. |
| 1371 switch (cc) { |
| 1372 case lt: |
| 1373 c(OLT, D, cmp1, cmp2); |
| 1374 bc1t(target); |
| 1375 break; |
| 1376 case gt: |
| 1377 c(ULE, D, cmp1, cmp2); |
| 1378 bc1f(target); |
| 1379 break; |
| 1380 case ge: |
| 1381 c(ULT, D, cmp1, cmp2); |
| 1382 bc1f(target); |
| 1383 break; |
| 1384 case le: |
| 1385 c(OLE, D, cmp1, cmp2); |
| 1386 bc1t(target); |
| 1387 break; |
| 1388 case eq: |
| 1389 c(EQ, D, cmp1, cmp2); |
| 1390 bc1t(target); |
| 1391 break; |
| 1392 case ueq: |
| 1393 c(UEQ, D, cmp1, cmp2); |
| 1394 bc1t(target); |
| 1395 break; |
| 1396 case ne: |
| 1397 c(EQ, D, cmp1, cmp2); |
| 1398 bc1f(target); |
| 1399 break; |
| 1400 case nue: |
| 1401 c(UEQ, D, cmp1, cmp2); |
| 1402 bc1f(target); |
| 1403 break; |
| 1404 default: |
| 1405 CHECK(0); |
| 1406 } |
| 1407 } |
| 1408 } else { |
| 1409 if (target) { |
| 1410 // Here NaN cases were either handled by this function or are assumed to |
| 1411 // have been handled by the caller. |
| 1412 // Unsigned conditions are treated as their signed counterpart. |
| 1413 // Use kDoubleCompareReg for comparison result, it is |
| 1414 // valid in fp64 (FR = 1) mode which is implied for mips32r6. |
| 1415 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg)); |
| 1416 switch (cc) { |
| 1417 case lt: |
| 1418 cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2); |
| 1419 bc1nez(target, kDoubleCompareReg); |
| 1420 break; |
| 1421 case gt: |
| 1422 cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2); |
| 1423 bc1eqz(target, kDoubleCompareReg); |
| 1424 break; |
| 1425 case ge: |
| 1426 cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2); |
| 1427 bc1eqz(target, kDoubleCompareReg); |
| 1428 break; |
| 1429 case le: |
| 1430 cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2); |
| 1431 bc1nez(target, kDoubleCompareReg); |
| 1432 break; |
| 1433 case eq: |
| 1434 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2); |
| 1435 bc1nez(target, kDoubleCompareReg); |
| 1436 break; |
| 1437 case ueq: |
| 1438 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2); |
| 1439 bc1nez(target, kDoubleCompareReg); |
| 1440 break; |
| 1441 case ne: |
| 1442 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2); |
| 1443 bc1eqz(target, kDoubleCompareReg); |
| 1444 break; |
| 1445 case nue: |
| 1446 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2); |
| 1447 bc1eqz(target, kDoubleCompareReg); |
| 1448 break; |
| 1449 default: |
| 1450 CHECK(0); |
| 1451 } |
| 1452 } |
| 1453 } |
| 1454 |
1269 if (bd == PROTECT) { | 1455 if (bd == PROTECT) { |
1270 nop(); | 1456 nop(); |
1271 } | 1457 } |
1272 } | 1458 } |
1273 | 1459 |
1274 | 1460 |
1275 void MacroAssembler::Move(FPURegister dst, double imm) { | 1461 void MacroAssembler::Move(FPURegister dst, double imm) { |
1276 static const DoubleRepresentation minus_zero(-0.0); | 1462 static const DoubleRepresentation minus_zero(-0.0); |
1277 static const DoubleRepresentation zero(0.0); | 1463 static const DoubleRepresentation zero(0.0); |
1278 DoubleRepresentation value_rep(imm); | 1464 DoubleRepresentation value_rep(imm); |
(...skipping 11 matching lines...) Expand all Loading... |
1290 if (lo != 0) { | 1476 if (lo != 0) { |
1291 li(at, Operand(lo)); | 1477 li(at, Operand(lo)); |
1292 mtc1(at, dst); | 1478 mtc1(at, dst); |
1293 } else { | 1479 } else { |
1294 mtc1(zero_reg, dst); | 1480 mtc1(zero_reg, dst); |
1295 } | 1481 } |
1296 // Move the high part of the double into the higher of the corresponding FPU | 1482 // Move the high part of the double into the higher of the corresponding FPU |
1297 // register of FPU register pair. | 1483 // register of FPU register pair. |
1298 if (hi != 0) { | 1484 if (hi != 0) { |
1299 li(at, Operand(hi)); | 1485 li(at, Operand(hi)); |
1300 mtc1(at, dst.high()); | 1486 Mthc1(at, dst); |
1301 } else { | 1487 } else { |
1302 mtc1(zero_reg, dst.high()); | 1488 Mthc1(zero_reg, dst); |
1303 } | 1489 } |
1304 } | 1490 } |
1305 } | 1491 } |
1306 | 1492 |
1307 | 1493 |
1308 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { | 1494 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
1309 if (kArchVariant == kLoongson) { | 1495 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
1310 Label done; | 1496 Label done; |
1311 Branch(&done, ne, rt, Operand(zero_reg)); | 1497 Branch(&done, ne, rt, Operand(zero_reg)); |
1312 mov(rd, rs); | 1498 mov(rd, rs); |
1313 bind(&done); | 1499 bind(&done); |
1314 } else { | 1500 } else { |
1315 movz(rd, rs, rt); | 1501 movz(rd, rs, rt); |
1316 } | 1502 } |
1317 } | 1503 } |
1318 | 1504 |
1319 | 1505 |
1320 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { | 1506 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { |
1321 if (kArchVariant == kLoongson) { | 1507 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
1322 Label done; | 1508 Label done; |
1323 Branch(&done, eq, rt, Operand(zero_reg)); | 1509 Branch(&done, eq, rt, Operand(zero_reg)); |
1324 mov(rd, rs); | 1510 mov(rd, rs); |
1325 bind(&done); | 1511 bind(&done); |
1326 } else { | 1512 } else { |
1327 movn(rd, rs, rt); | 1513 movn(rd, rs, rt); |
1328 } | 1514 } |
1329 } | 1515 } |
1330 | 1516 |
1331 | 1517 |
1332 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { | 1518 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { |
1333 if (kArchVariant == kLoongson) { | 1519 if (IsMipsArchVariant(kLoongson)) { |
1334 // Tests an FP condition code and then conditionally move rs to rd. | 1520 // Tests an FP condition code and then conditionally move rs to rd. |
1335 // We do not currently use any FPU cc bit other than bit 0. | 1521 // We do not currently use any FPU cc bit other than bit 0. |
1336 DCHECK(cc == 0); | 1522 DCHECK(cc == 0); |
1337 DCHECK(!(rs.is(t8) || rd.is(t8))); | 1523 DCHECK(!(rs.is(t8) || rd.is(t8))); |
1338 Label done; | 1524 Label done; |
1339 Register scratch = t8; | 1525 Register scratch = t8; |
1340 // For testing purposes we need to fetch content of the FCSR register and | 1526 // For testing purposes we need to fetch content of the FCSR register and |
1341 // than test its cc (floating point condition code) bit (for cc = 0, it is | 1527 // than test its cc (floating point condition code) bit (for cc = 0, it is |
1342 // 24. bit of the FCSR). | 1528 // 24. bit of the FCSR). |
1343 cfc1(scratch, FCSR); | 1529 cfc1(scratch, FCSR); |
1344 // For the MIPS I, II and III architectures, the contents of scratch is | 1530 // For the MIPS I, II and III architectures, the contents of scratch is |
1345 // UNPREDICTABLE for the instruction immediately following CFC1. | 1531 // UNPREDICTABLE for the instruction immediately following CFC1. |
1346 nop(); | 1532 nop(); |
1347 srl(scratch, scratch, 16); | 1533 srl(scratch, scratch, 16); |
1348 andi(scratch, scratch, 0x0080); | 1534 andi(scratch, scratch, 0x0080); |
1349 Branch(&done, eq, scratch, Operand(zero_reg)); | 1535 Branch(&done, eq, scratch, Operand(zero_reg)); |
1350 mov(rd, rs); | 1536 mov(rd, rs); |
1351 bind(&done); | 1537 bind(&done); |
1352 } else { | 1538 } else { |
1353 movt(rd, rs, cc); | 1539 movt(rd, rs, cc); |
1354 } | 1540 } |
1355 } | 1541 } |
1356 | 1542 |
1357 | 1543 |
1358 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { | 1544 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { |
1359 if (kArchVariant == kLoongson) { | 1545 if (IsMipsArchVariant(kLoongson)) { |
1360 // Tests an FP condition code and then conditionally move rs to rd. | 1546 // Tests an FP condition code and then conditionally move rs to rd. |
1361 // We do not currently use any FPU cc bit other than bit 0. | 1547 // We do not currently use any FPU cc bit other than bit 0. |
1362 DCHECK(cc == 0); | 1548 DCHECK(cc == 0); |
1363 DCHECK(!(rs.is(t8) || rd.is(t8))); | 1549 DCHECK(!(rs.is(t8) || rd.is(t8))); |
1364 Label done; | 1550 Label done; |
1365 Register scratch = t8; | 1551 Register scratch = t8; |
1366 // For testing purposes we need to fetch content of the FCSR register and | 1552 // For testing purposes we need to fetch content of the FCSR register and |
1367 // than test its cc (floating point condition code) bit (for cc = 0, it is | 1553 // than test its cc (floating point condition code) bit (for cc = 0, it is |
1368 // 24. bit of the FCSR). | 1554 // 24. bit of the FCSR). |
1369 cfc1(scratch, FCSR); | 1555 cfc1(scratch, FCSR); |
1370 // For the MIPS I, II and III architectures, the contents of scratch is | 1556 // For the MIPS I, II and III architectures, the contents of scratch is |
1371 // UNPREDICTABLE for the instruction immediately following CFC1. | 1557 // UNPREDICTABLE for the instruction immediately following CFC1. |
1372 nop(); | 1558 nop(); |
1373 srl(scratch, scratch, 16); | 1559 srl(scratch, scratch, 16); |
1374 andi(scratch, scratch, 0x0080); | 1560 andi(scratch, scratch, 0x0080); |
1375 Branch(&done, ne, scratch, Operand(zero_reg)); | 1561 Branch(&done, ne, scratch, Operand(zero_reg)); |
1376 mov(rd, rs); | 1562 mov(rd, rs); |
1377 bind(&done); | 1563 bind(&done); |
1378 } else { | 1564 } else { |
1379 movf(rd, rs, cc); | 1565 movf(rd, rs, cc); |
1380 } | 1566 } |
1381 } | 1567 } |
1382 | 1568 |
1383 | 1569 |
1384 void MacroAssembler::Clz(Register rd, Register rs) { | 1570 void MacroAssembler::Clz(Register rd, Register rs) { |
1385 if (kArchVariant == kLoongson) { | 1571 if (IsMipsArchVariant(kLoongson)) { |
1386 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); | 1572 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); |
1387 Register mask = t8; | 1573 Register mask = t8; |
1388 Register scratch = t9; | 1574 Register scratch = t9; |
1389 Label loop, end; | 1575 Label loop, end; |
1390 mov(at, rs); | 1576 mov(at, rs); |
1391 mov(rd, zero_reg); | 1577 mov(rd, zero_reg); |
1392 lui(mask, 0x8000); | 1578 lui(mask, 0x8000); |
1393 bind(&loop); | 1579 bind(&loop); |
1394 and_(scratch, at, mask); | 1580 and_(scratch, at, mask); |
1395 Branch(&end, ne, scratch, Operand(zero_reg)); | 1581 Branch(&end, ne, scratch, Operand(zero_reg)); |
(...skipping 841 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2237 Register r2 = no_reg; | 2423 Register r2 = no_reg; |
2238 Register scratch = at; | 2424 Register scratch = at; |
2239 | 2425 |
2240 if (rt.is_reg()) { | 2426 if (rt.is_reg()) { |
2241 r2 = rt.rm_; | 2427 r2 = rt.rm_; |
2242 } else if (cond != cc_always) { | 2428 } else if (cond != cc_always) { |
2243 r2 = scratch; | 2429 r2 = scratch; |
2244 li(r2, rt); | 2430 li(r2, rt); |
2245 } | 2431 } |
2246 | 2432 |
2247 { | 2433 if (!IsMipsArchVariant(kMips32r6)) { |
2248 BlockTrampolinePoolScope block_trampoline_pool(this); | 2434 BlockTrampolinePoolScope block_trampoline_pool(this); |
2249 switch (cond) { | 2435 switch (cond) { |
2250 case cc_always: | 2436 case cc_always: |
2251 bal(offset); | 2437 bal(offset); |
2252 break; | 2438 break; |
2253 case eq: | 2439 case eq: |
2254 bne(rs, r2, 2); | 2440 bne(rs, r2, 2); |
2255 nop(); | 2441 nop(); |
2256 bal(offset); | 2442 bal(offset); |
2257 break; | 2443 break; |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2301 break; | 2487 break; |
2302 case Uless_equal: | 2488 case Uless_equal: |
2303 sltu(scratch, r2, rs); | 2489 sltu(scratch, r2, rs); |
2304 addiu(scratch, scratch, -1); | 2490 addiu(scratch, scratch, -1); |
2305 bltzal(scratch, offset); | 2491 bltzal(scratch, offset); |
2306 break; | 2492 break; |
2307 | 2493 |
2308 default: | 2494 default: |
2309 UNREACHABLE(); | 2495 UNREACHABLE(); |
2310 } | 2496 } |
| 2497 } else { |
| 2498 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2499 switch (cond) { |
| 2500 case cc_always: |
| 2501 bal(offset); |
| 2502 break; |
| 2503 case eq: |
| 2504 bne(rs, r2, 2); |
| 2505 nop(); |
| 2506 bal(offset); |
| 2507 break; |
| 2508 case ne: |
| 2509 beq(rs, r2, 2); |
| 2510 nop(); |
| 2511 bal(offset); |
| 2512 break; |
| 2513 |
| 2514 // Signed comparison. |
| 2515 case greater: |
| 2516 // rs > rt |
| 2517 slt(scratch, r2, rs); |
| 2518 beq(scratch, zero_reg, 2); |
| 2519 nop(); |
| 2520 bal(offset); |
| 2521 break; |
| 2522 case greater_equal: |
| 2523 // rs >= rt |
| 2524 slt(scratch, rs, r2); |
| 2525 bne(scratch, zero_reg, 2); |
| 2526 nop(); |
| 2527 bal(offset); |
| 2528 break; |
| 2529 case less: |
| 2530 // rs < r2 |
| 2531 slt(scratch, rs, r2); |
| 2532 bne(scratch, zero_reg, 2); |
| 2533 nop(); |
| 2534 bal(offset); |
| 2535 break; |
| 2536 case less_equal: |
| 2537 // rs <= r2 |
| 2538 slt(scratch, r2, rs); |
| 2539 bne(scratch, zero_reg, 2); |
| 2540 nop(); |
| 2541 bal(offset); |
| 2542 break; |
| 2543 |
| 2544 |
| 2545 // Unsigned comparison. |
| 2546 case Ugreater: |
| 2547 // rs > rt |
| 2548 sltu(scratch, r2, rs); |
| 2549 beq(scratch, zero_reg, 2); |
| 2550 nop(); |
| 2551 bal(offset); |
| 2552 break; |
| 2553 case Ugreater_equal: |
| 2554 // rs >= rt |
| 2555 sltu(scratch, rs, r2); |
| 2556 bne(scratch, zero_reg, 2); |
| 2557 nop(); |
| 2558 bal(offset); |
| 2559 break; |
| 2560 case Uless: |
| 2561 // rs < r2 |
| 2562 sltu(scratch, rs, r2); |
| 2563 bne(scratch, zero_reg, 2); |
| 2564 nop(); |
| 2565 bal(offset); |
| 2566 break; |
| 2567 case Uless_equal: |
| 2568 // rs <= r2 |
| 2569 sltu(scratch, r2, rs); |
| 2570 bne(scratch, zero_reg, 2); |
| 2571 nop(); |
| 2572 bal(offset); |
| 2573 break; |
| 2574 default: |
| 2575 UNREACHABLE(); |
| 2576 } |
2311 } | 2577 } |
| 2578 |
2312 // Emit a nop in the branch delay slot if required. | 2579 // Emit a nop in the branch delay slot if required. |
2313 if (bdslot == PROTECT) | 2580 if (bdslot == PROTECT) |
2314 nop(); | 2581 nop(); |
2315 } | 2582 } |
2316 | 2583 |
2317 | 2584 |
2318 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { | 2585 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { |
2319 bal(shifted_branch_offset(L, false)); | 2586 bal(shifted_branch_offset(L, false)); |
2320 | 2587 |
2321 // Emit a nop in the branch delay slot if required. | 2588 // Emit a nop in the branch delay slot if required. |
(...skipping 10 matching lines...) Expand all Loading... |
2332 int32_t offset = 0; | 2599 int32_t offset = 0; |
2333 Register r2 = no_reg; | 2600 Register r2 = no_reg; |
2334 Register scratch = at; | 2601 Register scratch = at; |
2335 if (rt.is_reg()) { | 2602 if (rt.is_reg()) { |
2336 r2 = rt.rm_; | 2603 r2 = rt.rm_; |
2337 } else if (cond != cc_always) { | 2604 } else if (cond != cc_always) { |
2338 r2 = scratch; | 2605 r2 = scratch; |
2339 li(r2, rt); | 2606 li(r2, rt); |
2340 } | 2607 } |
2341 | 2608 |
2342 { | 2609 if (!IsMipsArchVariant(kMips32r6)) { |
2343 BlockTrampolinePoolScope block_trampoline_pool(this); | 2610 BlockTrampolinePoolScope block_trampoline_pool(this); |
2344 switch (cond) { | 2611 switch (cond) { |
2345 case cc_always: | 2612 case cc_always: |
2346 offset = shifted_branch_offset(L, false); | 2613 offset = shifted_branch_offset(L, false); |
2347 bal(offset); | 2614 bal(offset); |
2348 break; | 2615 break; |
2349 case eq: | 2616 case eq: |
2350 bne(rs, r2, 2); | 2617 bne(rs, r2, 2); |
2351 nop(); | 2618 nop(); |
2352 offset = shifted_branch_offset(L, false); | 2619 offset = shifted_branch_offset(L, false); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2407 case Uless_equal: | 2674 case Uless_equal: |
2408 sltu(scratch, r2, rs); | 2675 sltu(scratch, r2, rs); |
2409 addiu(scratch, scratch, -1); | 2676 addiu(scratch, scratch, -1); |
2410 offset = shifted_branch_offset(L, false); | 2677 offset = shifted_branch_offset(L, false); |
2411 bltzal(scratch, offset); | 2678 bltzal(scratch, offset); |
2412 break; | 2679 break; |
2413 | 2680 |
2414 default: | 2681 default: |
2415 UNREACHABLE(); | 2682 UNREACHABLE(); |
2416 } | 2683 } |
| 2684 } else { |
| 2685 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2686 switch (cond) { |
| 2687 case cc_always: |
| 2688 offset = shifted_branch_offset(L, false); |
| 2689 bal(offset); |
| 2690 break; |
| 2691 case eq: |
| 2692 bne(rs, r2, 2); |
| 2693 nop(); |
| 2694 offset = shifted_branch_offset(L, false); |
| 2695 bal(offset); |
| 2696 break; |
| 2697 case ne: |
| 2698 beq(rs, r2, 2); |
| 2699 nop(); |
| 2700 offset = shifted_branch_offset(L, false); |
| 2701 bal(offset); |
| 2702 break; |
| 2703 |
| 2704 // Signed comparison. |
| 2705 case greater: |
| 2706 // rs > rt |
| 2707 slt(scratch, r2, rs); |
| 2708 beq(scratch, zero_reg, 2); |
| 2709 nop(); |
| 2710 offset = shifted_branch_offset(L, false); |
| 2711 bal(offset); |
| 2712 break; |
| 2713 case greater_equal: |
| 2714 // rs >= rt |
| 2715 slt(scratch, rs, r2); |
| 2716 bne(scratch, zero_reg, 2); |
| 2717 nop(); |
| 2718 offset = shifted_branch_offset(L, false); |
| 2719 bal(offset); |
| 2720 break; |
| 2721 case less: |
| 2722 // rs < r2 |
| 2723 slt(scratch, rs, r2); |
| 2724 bne(scratch, zero_reg, 2); |
| 2725 nop(); |
| 2726 offset = shifted_branch_offset(L, false); |
| 2727 bal(offset); |
| 2728 break; |
| 2729 case less_equal: |
| 2730 // rs <= r2 |
| 2731 slt(scratch, r2, rs); |
| 2732 bne(scratch, zero_reg, 2); |
| 2733 nop(); |
| 2734 offset = shifted_branch_offset(L, false); |
| 2735 bal(offset); |
| 2736 break; |
| 2737 |
| 2738 |
| 2739 // Unsigned comparison. |
| 2740 case Ugreater: |
| 2741 // rs > rt |
| 2742 sltu(scratch, r2, rs); |
| 2743 beq(scratch, zero_reg, 2); |
| 2744 nop(); |
| 2745 offset = shifted_branch_offset(L, false); |
| 2746 bal(offset); |
| 2747 break; |
| 2748 case Ugreater_equal: |
| 2749 // rs >= rt |
| 2750 sltu(scratch, rs, r2); |
| 2751 bne(scratch, zero_reg, 2); |
| 2752 nop(); |
| 2753 offset = shifted_branch_offset(L, false); |
| 2754 bal(offset); |
| 2755 break; |
| 2756 case Uless: |
| 2757 // rs < r2 |
| 2758 sltu(scratch, rs, r2); |
| 2759 bne(scratch, zero_reg, 2); |
| 2760 nop(); |
| 2761 offset = shifted_branch_offset(L, false); |
| 2762 bal(offset); |
| 2763 break; |
| 2764 case Uless_equal: |
| 2765 // rs <= r2 |
| 2766 sltu(scratch, r2, rs); |
| 2767 bne(scratch, zero_reg, 2); |
| 2768 nop(); |
| 2769 offset = shifted_branch_offset(L, false); |
| 2770 bal(offset); |
| 2771 break; |
| 2772 |
| 2773 default: |
| 2774 UNREACHABLE(); |
| 2775 } |
2417 } | 2776 } |
| 2777 |
2418 // Check that offset could actually hold on an int16_t. | 2778 // Check that offset could actually hold on an int16_t. |
2419 DCHECK(is_int16(offset)); | 2779 DCHECK(is_int16(offset)); |
2420 | 2780 |
2421 // Emit a nop in the branch delay slot if required. | 2781 // Emit a nop in the branch delay slot if required. |
2422 if (bdslot == PROTECT) | 2782 if (bdslot == PROTECT) |
2423 nop(); | 2783 nop(); |
2424 } | 2784 } |
2425 | 2785 |
2426 | 2786 |
2427 void MacroAssembler::Jump(Register target, | 2787 void MacroAssembler::Jump(Register target, |
(...skipping 3313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5741 | 6101 |
5742 | 6102 |
5743 void MacroAssembler::TruncatingDiv(Register result, | 6103 void MacroAssembler::TruncatingDiv(Register result, |
5744 Register dividend, | 6104 Register dividend, |
5745 int32_t divisor) { | 6105 int32_t divisor) { |
5746 DCHECK(!dividend.is(result)); | 6106 DCHECK(!dividend.is(result)); |
5747 DCHECK(!dividend.is(at)); | 6107 DCHECK(!dividend.is(at)); |
5748 DCHECK(!result.is(at)); | 6108 DCHECK(!result.is(at)); |
5749 MultiplierAndShift ms(divisor); | 6109 MultiplierAndShift ms(divisor); |
5750 li(at, Operand(ms.multiplier())); | 6110 li(at, Operand(ms.multiplier())); |
5751 Mult(dividend, Operand(at)); | 6111 Mulh(result, dividend, Operand(at)); |
5752 mfhi(result); | |
5753 if (divisor > 0 && ms.multiplier() < 0) { | 6112 if (divisor > 0 && ms.multiplier() < 0) { |
5754 Addu(result, result, Operand(dividend)); | 6113 Addu(result, result, Operand(dividend)); |
5755 } | 6114 } |
5756 if (divisor < 0 && ms.multiplier() > 0) { | 6115 if (divisor < 0 && ms.multiplier() > 0) { |
5757 Subu(result, result, Operand(dividend)); | 6116 Subu(result, result, Operand(dividend)); |
5758 } | 6117 } |
5759 if (ms.shift() > 0) sra(result, result, ms.shift()); | 6118 if (ms.shift() > 0) sra(result, result, ms.shift()); |
5760 srl(at, dividend, 31); | 6119 srl(at, dividend, 31); |
5761 Addu(result, result, Operand(at)); | 6120 Addu(result, result, Operand(at)); |
5762 } | 6121 } |
5763 | 6122 |
5764 | 6123 |
5765 } } // namespace v8::internal | 6124 } } // namespace v8::internal |
5766 | 6125 |
5767 #endif // V8_TARGET_ARCH_MIPS | 6126 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |