OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 556 matching lines...) Loading... |
567 ASSERT(!rs.is(at)); | 567 ASSERT(!rs.is(at)); |
568 li(at, rt); | 568 li(at, rt); |
569 subu(rd, rs, at); | 569 subu(rd, rs, at); |
570 } | 570 } |
571 } | 571 } |
572 } | 572 } |
573 | 573 |
574 | 574 |
575 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { | 575 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { |
576 if (rt.is_reg()) { | 576 if (rt.is_reg()) { |
577 mul(rd, rs, rt.rm()); | 577 if (kArchVariant == kLoongson) { |
| 578 mult(rs, rt.rm()); |
| 579 mflo(rd); |
| 580 } else { |
| 581 mul(rd, rs, rt.rm()); |
| 582 } |
578 } else { | 583 } else { |
579 // li handles the relocation. | 584 // li handles the relocation. |
580 ASSERT(!rs.is(at)); | 585 ASSERT(!rs.is(at)); |
581 li(at, rt); | 586 li(at, rt); |
582 mul(rd, rs, at); | 587 if (kArchVariant == kLoongson) { |
| 588 mult(rs, at); |
| 589 mflo(rd); |
| 590 } else { |
| 591 mul(rd, rs, at); |
| 592 } |
583 } | 593 } |
584 } | 594 } |
585 | 595 |
586 | 596 |
587 void MacroAssembler::Mult(Register rs, const Operand& rt) { | 597 void MacroAssembler::Mult(Register rs, const Operand& rt) { |
588 if (rt.is_reg()) { | 598 if (rt.is_reg()) { |
589 mult(rs, rt.rm()); | 599 mult(rs, rt.rm()); |
590 } else { | 600 } else { |
591 // li handles the relocation. | 601 // li handles the relocation. |
592 ASSERT(!rs.is(at)); | 602 ASSERT(!rs.is(at)); |
(...skipping 134 matching lines...) Loading... |
727 // li handles the relocation. | 737 // li handles the relocation. |
728 ASSERT(!rs.is(at)); | 738 ASSERT(!rs.is(at)); |
729 li(at, rt); | 739 li(at, rt); |
730 sltu(rd, rs, at); | 740 sltu(rd, rs, at); |
731 } | 741 } |
732 } | 742 } |
733 } | 743 } |
734 | 744 |
735 | 745 |
736 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { | 746 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { |
737 if (mips32r2) { | 747 if (kArchVariant == kMips32r2) { |
738 if (rt.is_reg()) { | 748 if (rt.is_reg()) { |
739 rotrv(rd, rs, rt.rm()); | 749 rotrv(rd, rs, rt.rm()); |
740 } else { | 750 } else { |
741 rotr(rd, rs, rt.imm32_); | 751 rotr(rd, rs, rt.imm32_); |
742 } | 752 } |
743 } else { | 753 } else { |
744 if (rt.is_reg()) { | 754 if (rt.is_reg()) { |
745 subu(at, zero_reg, rt.rm()); | 755 subu(at, zero_reg, rt.rm()); |
746 sllv(at, rs, at); | 756 sllv(at, rs, at); |
747 srlv(rd, rs, rt.rm()); | 757 srlv(rd, rs, rt.rm()); |
(...skipping 167 matching lines...) Loading... |
915 } | 925 } |
916 | 926 |
917 | 927 |
918 void MacroAssembler::Ext(Register rt, | 928 void MacroAssembler::Ext(Register rt, |
919 Register rs, | 929 Register rs, |
920 uint16_t pos, | 930 uint16_t pos, |
921 uint16_t size) { | 931 uint16_t size) { |
922 ASSERT(pos < 32); | 932 ASSERT(pos < 32); |
923 ASSERT(pos + size < 33); | 933 ASSERT(pos + size < 33); |
924 | 934 |
925 if (mips32r2) { | 935 if (kArchVariant == kMips32r2) { |
926 ext_(rt, rs, pos, size); | 936 ext_(rt, rs, pos, size); |
927 } else { | 937 } else { |
928 // Move rs to rt and shift it left then right to get the | 938 // Move rs to rt and shift it left then right to get the |
929 // desired bitfield on the right side and zeroes on the left. | 939 // desired bitfield on the right side and zeroes on the left. |
930 int shift_left = 32 - (pos + size); | 940 int shift_left = 32 - (pos + size); |
931 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. | 941 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. |
932 | 942 |
933 int shift_right = 32 - size; | 943 int shift_right = 32 - size; |
934 if (shift_right > 0) { | 944 if (shift_right > 0) { |
935 srl(rt, rt, shift_right); | 945 srl(rt, rt, shift_right); |
936 } | 946 } |
937 } | 947 } |
938 } | 948 } |
939 | 949 |
940 | 950 |
941 void MacroAssembler::Ins(Register rt, | 951 void MacroAssembler::Ins(Register rt, |
942 Register rs, | 952 Register rs, |
943 uint16_t pos, | 953 uint16_t pos, |
944 uint16_t size) { | 954 uint16_t size) { |
945 ASSERT(pos < 32); | 955 ASSERT(pos < 32); |
946 ASSERT(pos + size <= 32); | 956 ASSERT(pos + size <= 32); |
947 ASSERT(size != 0); | 957 ASSERT(size != 0); |
948 | 958 |
949 if (mips32r2) { | 959 if (kArchVariant == kMips32r2) { |
950 ins_(rt, rs, pos, size); | 960 ins_(rt, rs, pos, size); |
951 } else { | 961 } else { |
952 ASSERT(!rt.is(t8) && !rs.is(t8)); | 962 ASSERT(!rt.is(t8) && !rs.is(t8)); |
953 Subu(at, zero_reg, Operand(1)); | 963 Subu(at, zero_reg, Operand(1)); |
954 srl(at, at, 32 - size); | 964 srl(at, at, 32 - size); |
955 and_(t8, rs, at); | 965 and_(t8, rs, at); |
956 sll(t8, t8, pos); | 966 sll(t8, t8, pos); |
957 sll(at, at, pos); | 967 sll(at, at, pos); |
958 nor(at, at, zero_reg); | 968 nor(at, at, zero_reg); |
959 and_(at, rt, at); | 969 and_(at, rt, at); |
(...skipping 49 matching lines...) Loading... |
1009 } | 1019 } |
1010 | 1020 |
1011 | 1021 |
1012 void MacroAssembler::Trunc_uw_d(FPURegister fd, | 1022 void MacroAssembler::Trunc_uw_d(FPURegister fd, |
1013 FPURegister fs, | 1023 FPURegister fs, |
1014 FPURegister scratch) { | 1024 FPURegister scratch) { |
1015 Trunc_uw_d(fs, t8, scratch); | 1025 Trunc_uw_d(fs, t8, scratch); |
1016 mtc1(t8, fd); | 1026 mtc1(t8, fd); |
1017 } | 1027 } |
1018 | 1028 |
| 1029 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { |
| 1030 if (kArchVariant == kLoongson && fd.is(fs)) { |
| 1031 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1032 trunc_w_d(fd, fs); |
| 1033 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1034 } else { |
| 1035 trunc_w_d(fd, fs); |
| 1036 } |
| 1037 } |
| 1038 |
| 1039 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { |
| 1040 if (kArchVariant == kLoongson && fd.is(fs)) { |
| 1041 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1042 round_w_d(fd, fs); |
| 1043 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1044 } else { |
| 1045 round_w_d(fd, fs); |
| 1046 } |
| 1047 } |
| 1048 |
| 1049 |
| 1050 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { |
| 1051 if (kArchVariant == kLoongson && fd.is(fs)) { |
| 1052 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1053 floor_w_d(fd, fs); |
| 1054 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1055 } else { |
| 1056 floor_w_d(fd, fs); |
| 1057 } |
| 1058 } |
| 1059 |
| 1060 |
| 1061 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { |
| 1062 if (kArchVariant == kLoongson && fd.is(fs)) { |
| 1063 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1064 ceil_w_d(fd, fs); |
| 1065 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
| 1066 } else { |
| 1067 ceil_w_d(fd, fs); |
| 1068 } |
| 1069 } |
| 1070 |
1019 | 1071 |
1020 void MacroAssembler::Trunc_uw_d(FPURegister fd, | 1072 void MacroAssembler::Trunc_uw_d(FPURegister fd, |
1021 Register rs, | 1073 Register rs, |
1022 FPURegister scratch) { | 1074 FPURegister scratch) { |
1023 ASSERT(!fd.is(scratch)); | 1075 ASSERT(!fd.is(scratch)); |
1024 ASSERT(!rs.is(at)); | 1076 ASSERT(!rs.is(at)); |
1025 | 1077 |
1026 // Load 2^31 into scratch as its float representation. | 1078 // Load 2^31 into scratch as its float representation. |
1027 li(at, 0x41E00000); | 1079 li(at, 0x41E00000); |
1028 mtc1(at, FPURegister::from_code(scratch.code() + 1)); | 1080 mtc1(at, FPURegister::from_code(scratch.code() + 1)); |
(...skipping 110 matching lines...) Loading... |
1139 if (hi != 0) { | 1191 if (hi != 0) { |
1140 li(at, Operand(hi)); | 1192 li(at, Operand(hi)); |
1141 mtc1(at, dst.high()); | 1193 mtc1(at, dst.high()); |
1142 } else { | 1194 } else { |
1143 mtc1(zero_reg, dst.high()); | 1195 mtc1(zero_reg, dst.high()); |
1144 } | 1196 } |
1145 } | 1197 } |
1146 } | 1198 } |
1147 | 1199 |
1148 | 1200 |
| 1201 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
| 1202 if (kArchVariant == kLoongson) { |
| 1203 Label done; |
| 1204 Branch(&done, ne, rt, Operand(zero_reg)); |
| 1205 mov(rd, rs); |
| 1206 bind(&done); |
| 1207 } else { |
| 1208 movz(rd, rs, rt); |
| 1209 } |
| 1210 } |
| 1211 |
| 1212 |
| 1213 void MacroAssembler::Movn(Register rd, Register rs, Register rt) { |
| 1214 if (kArchVariant == kLoongson) { |
| 1215 Label done; |
| 1216 Branch(&done, eq, rt, Operand(zero_reg)); |
| 1217 mov(rd, rs); |
| 1218 bind(&done); |
| 1219 } else { |
| 1220 movn(rd, rs, rt); |
| 1221 } |
| 1222 } |
| 1223 |
| 1224 |
| 1225 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { |
| 1226 if (kArchVariant == kLoongson) { |
| 1227 // Tests an FP condition code and then conditionally move rs to rd. |
| 1228 // We do not currently use any FPU cc bit other than bit 0. |
| 1229 ASSERT(cc == 0); |
| 1230 ASSERT(!(rs.is(t8) || rd.is(t8))); |
| 1231 Label done; |
| 1232 Register scratch = t8; |
| 1233 // For testing purposes we need to fetch content of the FCSR register and |
| 1234 // than test its cc (floating point condition code) bit (for cc = 0, it is |
| 1235 // 24. bit of the FCSR). |
| 1236 cfc1(scratch, FCSR); |
| 1237 // For the MIPS I, II and III architectures, the contents of scratch is |
| 1238 // UNPREDICTABLE for the instruction immediately following CFC1. |
| 1239 nop(); |
| 1240 srl(scratch, scratch, 16); |
| 1241 andi(scratch, scratch, 0x0080); |
| 1242 Branch(&done, eq, scratch, Operand(zero_reg)); |
| 1243 mov(rd, rs); |
| 1244 bind(&done); |
| 1245 } else { |
| 1246 movt(rd, rs, cc); |
| 1247 } |
| 1248 } |
| 1249 |
| 1250 |
| 1251 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { |
| 1252 if (kArchVariant == kLoongson) { |
| 1253 // Tests an FP condition code and then conditionally move rs to rd. |
| 1254 // We do not currently use any FPU cc bit other than bit 0. |
| 1255 ASSERT(cc == 0); |
| 1256 ASSERT(!(rs.is(t8) || rd.is(t8))); |
| 1257 Label done; |
| 1258 Register scratch = t8; |
| 1259 // For testing purposes we need to fetch content of the FCSR register and |
| 1260 // than test its cc (floating point condition code) bit (for cc = 0, it is |
| 1261 // 24. bit of the FCSR). |
| 1262 cfc1(scratch, FCSR); |
| 1263 // For the MIPS I, II and III architectures, the contents of scratch is |
| 1264 // UNPREDICTABLE for the instruction immediately following CFC1. |
| 1265 nop(); |
| 1266 srl(scratch, scratch, 16); |
| 1267 andi(scratch, scratch, 0x0080); |
| 1268 Branch(&done, ne, scratch, Operand(zero_reg)); |
| 1269 mov(rd, rs); |
| 1270 bind(&done); |
| 1271 } else { |
| 1272 movf(rd, rs, cc); |
| 1273 } |
| 1274 } |
| 1275 |
| 1276 |
| 1277 void MacroAssembler::Clz(Register rd, Register rs) { |
| 1278 if (kArchVariant == kLoongson) { |
| 1279 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); |
| 1280 Register mask = t8; |
| 1281 Register scratch = t9; |
| 1282 Label loop, end; |
| 1283 mov(at, rs); |
| 1284 mov(rd, zero_reg); |
| 1285 lui(mask, 0x8000); |
| 1286 bind(&loop); |
| 1287 and_(scratch, at, mask); |
| 1288 Branch(&end, ne, scratch, Operand(zero_reg)); |
| 1289 addiu(rd, rd, 1); |
| 1290 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT); |
| 1291 srl(mask, mask, 1); |
| 1292 bind(&end); |
| 1293 } else { |
| 1294 clz(rd, rs); |
| 1295 } |
| 1296 } |
| 1297 |
| 1298 |
1149 // Tries to get a signed int32 out of a double precision floating point heap | 1299 // Tries to get a signed int32 out of a double precision floating point heap |
1150 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the | 1300 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the |
1151 // 32bits signed integer range. | 1301 // 32bits signed integer range. |
1152 // This method implementation differs from the ARM version for performance | 1302 // This method implementation differs from the ARM version for performance |
1153 // reasons. | 1303 // reasons. |
1154 void MacroAssembler::ConvertToInt32(Register source, | 1304 void MacroAssembler::ConvertToInt32(Register source, |
1155 Register dest, | 1305 Register dest, |
1156 Register scratch, | 1306 Register scratch, |
1157 Register scratch2, | 1307 Register scratch2, |
1158 FPURegister double_scratch, | 1308 FPURegister double_scratch, |
(...skipping 70 matching lines...) Loading... |
1229 // The width of the field here is the same as the shift amount above. | 1379 // The width of the field here is the same as the shift amount above. |
1230 const int field_width = shift_distance; | 1380 const int field_width = shift_distance; |
1231 Ext(scratch2, scratch2, 32-shift_distance, field_width); | 1381 Ext(scratch2, scratch2, 32-shift_distance, field_width); |
1232 Ins(scratch, scratch2, 0, field_width); | 1382 Ins(scratch, scratch2, 0, field_width); |
1233 // Move down according to the exponent. | 1383 // Move down according to the exponent. |
1234 srlv(scratch, scratch, dest); | 1384 srlv(scratch, scratch, dest); |
1235 // Prepare the negative version of our integer. | 1385 // Prepare the negative version of our integer. |
1236 subu(scratch2, zero_reg, scratch); | 1386 subu(scratch2, zero_reg, scratch); |
1237 // Trick to check sign bit (msb) held in dest, count leading zero. | 1387 // Trick to check sign bit (msb) held in dest, count leading zero. |
1238 // 0 indicates negative, save negative version with conditional move. | 1388 // 0 indicates negative, save negative version with conditional move. |
1239 clz(dest, dest); | 1389 Clz(dest, dest); |
1240 movz(scratch, scratch2, dest); | 1390 Movz(scratch, scratch2, dest); |
1241 mov(dest, scratch); | 1391 mov(dest, scratch); |
1242 } | 1392 } |
1243 bind(&done); | 1393 bind(&done); |
1244 } | 1394 } |
1245 | 1395 |
1246 | 1396 |
1247 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, | 1397 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, |
1248 FPURegister result, | 1398 FPURegister result, |
1249 DoubleRegister double_input, | 1399 DoubleRegister double_input, |
1250 Register scratch1, | 1400 Register scratch1, |
(...skipping 10 matching lines...) Loading... |
1261 } | 1411 } |
1262 | 1412 |
1263 // Save FCSR. | 1413 // Save FCSR. |
1264 cfc1(scratch1, FCSR); | 1414 cfc1(scratch1, FCSR); |
1265 // Disable FPU exceptions. | 1415 // Disable FPU exceptions. |
1266 ctc1(zero_reg, FCSR); | 1416 ctc1(zero_reg, FCSR); |
1267 | 1417 |
1268 // Do operation based on rounding mode. | 1418 // Do operation based on rounding mode. |
1269 switch (rounding_mode) { | 1419 switch (rounding_mode) { |
1270 case kRoundToNearest: | 1420 case kRoundToNearest: |
1271 round_w_d(result, double_input); | 1421 Round_w_d(result, double_input); |
1272 break; | 1422 break; |
1273 case kRoundToZero: | 1423 case kRoundToZero: |
1274 trunc_w_d(result, double_input); | 1424 Trunc_w_d(result, double_input); |
1275 break; | 1425 break; |
1276 case kRoundToPlusInf: | 1426 case kRoundToPlusInf: |
1277 ceil_w_d(result, double_input); | 1427 Ceil_w_d(result, double_input); |
1278 break; | 1428 break; |
1279 case kRoundToMinusInf: | 1429 case kRoundToMinusInf: |
1280 floor_w_d(result, double_input); | 1430 Floor_w_d(result, double_input); |
1281 break; | 1431 break; |
1282 } // End of switch-statement. | 1432 } // End of switch-statement. |
1283 | 1433 |
1284 // Retrieve FCSR. | 1434 // Retrieve FCSR. |
1285 cfc1(except_flag, FCSR); | 1435 cfc1(except_flag, FCSR); |
1286 // Restore FCSR. | 1436 // Restore FCSR. |
1287 ctc1(scratch1, FCSR); | 1437 ctc1(scratch1, FCSR); |
1288 | 1438 |
1289 // Check for fpu exceptions. | 1439 // Check for fpu exceptions. |
1290 And(except_flag, except_flag, Operand(except_mask)); | 1440 And(except_flag, except_flag, Operand(except_mask)); |
1291 } | 1441 } |
1292 | 1442 |
1293 | 1443 |
1294 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, | 1444 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, |
1295 Register input_high, | 1445 Register input_high, |
1296 Register input_low, | 1446 Register input_low, |
1297 Register scratch) { | 1447 Register scratch) { |
1298 Label done, normal_exponent, restore_sign; | 1448 Label done, normal_exponent, restore_sign; |
1299 // Extract the biased exponent in result. | 1449 // Extract the biased exponent in result. |
1300 Ext(result, | 1450 Ext(result, |
1301 input_high, | 1451 input_high, |
1302 HeapNumber::kExponentShift, | 1452 HeapNumber::kExponentShift, |
1303 HeapNumber::kExponentBits); | 1453 HeapNumber::kExponentBits); |
1304 | 1454 |
1305 // Check for Infinity and NaNs, which should return 0. | 1455 // Check for Infinity and NaNs, which should return 0. |
1306 Subu(scratch, result, HeapNumber::kExponentMask); | 1456 Subu(scratch, result, HeapNumber::kExponentMask); |
1307 movz(result, zero_reg, scratch); | 1457 Movz(result, zero_reg, scratch); |
1308 Branch(&done, eq, scratch, Operand(zero_reg)); | 1458 Branch(&done, eq, scratch, Operand(zero_reg)); |
1309 | 1459 |
1310 // Express exponent as delta to (number of mantissa bits + 31). | 1460 // Express exponent as delta to (number of mantissa bits + 31). |
1311 Subu(result, | 1461 Subu(result, |
1312 result, | 1462 result, |
1313 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); | 1463 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); |
1314 | 1464 |
1315 // If the delta is strictly positive, all bits would be shifted away, | 1465 // If the delta is strictly positive, all bits would be shifted away, |
1316 // which means that we can return 0. | 1466 // which means that we can return 0. |
1317 Branch(&normal_exponent, le, result, Operand(zero_reg)); | 1467 Branch(&normal_exponent, le, result, Operand(zero_reg)); |
(...skipping 43 matching lines...) Loading... |
1361 bind(&pos_shift); | 1511 bind(&pos_shift); |
1362 srlv(input_low, input_low, scratch); | 1512 srlv(input_low, input_low, scratch); |
1363 | 1513 |
1364 bind(&shift_done); | 1514 bind(&shift_done); |
1365 Or(input_high, input_high, Operand(input_low)); | 1515 Or(input_high, input_high, Operand(input_low)); |
1366 // Restore sign if necessary. | 1516 // Restore sign if necessary. |
1367 mov(scratch, sign); | 1517 mov(scratch, sign); |
1368 result = sign; | 1518 result = sign; |
1369 sign = no_reg; | 1519 sign = no_reg; |
1370 Subu(result, zero_reg, input_high); | 1520 Subu(result, zero_reg, input_high); |
1371 movz(result, input_high, scratch); | 1521 Movz(result, input_high, scratch); |
1372 bind(&done); | 1522 bind(&done); |
1373 } | 1523 } |
1374 | 1524 |
1375 | 1525 |
1376 void MacroAssembler::EmitECMATruncate(Register result, | 1526 void MacroAssembler::EmitECMATruncate(Register result, |
1377 FPURegister double_input, | 1527 FPURegister double_input, |
1378 FPURegister single_scratch, | 1528 FPURegister single_scratch, |
1379 Register scratch, | 1529 Register scratch, |
1380 Register scratch2, | 1530 Register scratch2, |
1381 Register scratch3) { | 1531 Register scratch3) { |
(...skipping 3857 matching lines...) Loading... |
5239 opcode == BGTZL); | 5389 opcode == BGTZL); |
5240 opcode = (cond == eq) ? BEQ : BNE; | 5390 opcode = (cond == eq) ? BEQ : BNE; |
5241 instr = (instr & ~kOpcodeMask) | opcode; | 5391 instr = (instr & ~kOpcodeMask) | opcode; |
5242 masm_.emit(instr); | 5392 masm_.emit(instr); |
5243 } | 5393 } |
5244 | 5394 |
5245 | 5395 |
5246 } } // namespace v8::internal | 5396 } } // namespace v8::internal |
5247 | 5397 |
5248 #endif // V8_TARGET_ARCH_MIPS | 5398 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |