Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(71)

Side by Side Diff: src/mips/assembler-mips.cc

Issue 453043002: MIPS: Add support for arch. revision 6 to mips32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed comments. Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
91 supported_ |= CpuFeaturesImpliedByCompiler(); 91 supported_ |= CpuFeaturesImpliedByCompiler();
92 92
93 // Only use statically determined features for cross compile (snapshot). 93 // Only use statically determined features for cross compile (snapshot).
94 if (cross_compile) return; 94 if (cross_compile) return;
95 95
96 // If the compiler is allowed to use fpu then we can use fpu too in our 96 // If the compiler is allowed to use fpu then we can use fpu too in our
97 // code generation. 97 // code generation.
98 #ifndef __mips__ 98 #ifndef __mips__
99 // For the simulator build, use FPU. 99 // For the simulator build, use FPU.
100 supported_ |= 1u << FPU; 100 supported_ |= 1u << FPU;
101 #if defined(_MIPS_ARCH_MIPS32R6)
102 // FP64 mode is implied on r6.
103 supported_ |= 1u << FP64;
104 #endif
105 #if defined(FPU_MODE_FP64)
106 supported_ |= 1u << FP64;
107 #endif
101 #else 108 #else
102 // Probe for additional features at runtime. 109 // Probe for additional features at runtime.
103 base::CPU cpu; 110 base::CPU cpu;
104 if (cpu.has_fpu()) supported_ |= 1u << FPU; 111 if (cpu.has_fpu()) supported_ |= 1u << FPU;
112 #if defined(FPU_MODE_FPXX)
113 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64;
114 #elif defined(FPU_MODE_FP64)
115 supported_ |= 1u << FP64;
116 #endif
117 #if defined(_MIPS_ARCH_MIPS32RX)
118 if (cpu.architecture() == 6) {
119 supported_ |= 1u << MIPSr6;
120 } else if (cpu.architecture() == 2) {
121 supported_ |= 1u << MIPSr1;
122 supported_ |= 1u << MIPSr2;
123 } else {
124 supported_ |= 1u << MIPSr1;
125 }
126 #endif
105 #endif 127 #endif
106 } 128 }
107 129
108 130
109 void CpuFeatures::PrintTarget() { } 131 void CpuFeatures::PrintTarget() { }
110 void CpuFeatures::PrintFeatures() { } 132 void CpuFeatures::PrintFeatures() { }
111 133
112 134
113 int ToNumber(Register reg) { 135 int ToNumber(Register reg) {
114 DCHECK(reg.is_valid()); 136 DCHECK(reg.is_valid());
(...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 return opcode == BEQ || 499 return opcode == BEQ ||
478 opcode == BNE || 500 opcode == BNE ||
479 opcode == BLEZ || 501 opcode == BLEZ ||
480 opcode == BGTZ || 502 opcode == BGTZ ||
481 opcode == BEQL || 503 opcode == BEQL ||
482 opcode == BNEL || 504 opcode == BNEL ||
483 opcode == BLEZL || 505 opcode == BLEZL ||
484 opcode == BGTZL || 506 opcode == BGTZL ||
485 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || 507 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
486 rt_field == BLTZAL || rt_field == BGEZAL)) || 508 rt_field == BLTZAL || rt_field == BGEZAL)) ||
487 (opcode == COP1 && rs_field == BC1); // Coprocessor branch. 509 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
510 (opcode == COP1 && rs_field == BC1EQZ) ||
511 (opcode == COP1 && rs_field == BC1NEZ);
488 } 512 }
489 513
490 514
491 bool Assembler::IsEmittedConstant(Instr instr) { 515 bool Assembler::IsEmittedConstant(Instr instr) {
492 uint32_t label_constant = GetLabelConst(instr); 516 uint32_t label_constant = GetLabelConst(instr);
493 return label_constant == 0; // Emitted label const in reg-exp engine. 517 return label_constant == 0; // Emitted label const in reg-exp engine.
494 } 518 }
495 519
496 520
497 bool Assembler::IsBeq(Instr instr) { 521 bool Assembler::IsBeq(Instr instr) {
(...skipping 24 matching lines...) Expand all
522 return opcode == J; 546 return opcode == J;
523 } 547 }
524 548
525 549
526 bool Assembler::IsJal(Instr instr) { 550 bool Assembler::IsJal(Instr instr) {
527 return GetOpcodeField(instr) == JAL; 551 return GetOpcodeField(instr) == JAL;
528 } 552 }
529 553
530 554
531 bool Assembler::IsJr(Instr instr) { 555 bool Assembler::IsJr(Instr instr) {
532 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; 556 if (!IsMipsArchVariant(kMips32r6)) {
557 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
558 } else {
559 return GetOpcodeField(instr) == SPECIAL &&
560 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
561 }
533 } 562 }
534 563
535 564
536 bool Assembler::IsJalr(Instr instr) { 565 bool Assembler::IsJalr(Instr instr) {
537 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR; 566 return GetOpcodeField(instr) == SPECIAL &&
567 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
538 } 568 }
539 569
540 570
541 bool Assembler::IsLui(Instr instr) { 571 bool Assembler::IsLui(Instr instr) {
542 uint32_t opcode = GetOpcodeField(instr); 572 uint32_t opcode = GetOpcodeField(instr);
543 // Checks if the instruction is a load upper immediate. 573 // Checks if the instruction is a load upper immediate.
544 return opcode == LUI; 574 return opcode == LUI;
545 } 575 }
546 576
547 577
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after
1012 } 1042 }
1013 1043
1014 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); 1044 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1015 DCHECK((offset & 3) == 0); 1045 DCHECK((offset & 3) == 0);
1016 DCHECK(is_int16(offset >> 2)); 1046 DCHECK(is_int16(offset >> 2));
1017 1047
1018 return offset; 1048 return offset;
1019 } 1049 }
1020 1050
1021 1051
1052 int32_t Assembler::branch_offset_compact(Label* L,
1053 bool jump_elimination_allowed) {
1054 int32_t target_pos;
1055 if (L->is_bound()) {
1056 target_pos = L->pos();
1057 } else {
1058 if (L->is_linked()) {
1059 target_pos = L->pos();
1060 L->link_to(pc_offset());
1061 } else {
1062 L->link_to(pc_offset());
1063 if (!trampoline_emitted_) {
1064 unbound_labels_count_++;
1065 next_buffer_check_ -= kTrampolineSlotsSize;
1066 }
1067 return kEndOfChain;
1068 }
1069 }
1070
1071 int32_t offset = target_pos - pc_offset();
1072 DCHECK((offset & 3) == 0);
1073 DCHECK(is_int16(offset >> 2));
1074
1075 return offset;
1076 }
1077
1078
1079 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1080 int32_t target_pos;
1081
1082 if (L->is_bound()) {
1083 target_pos = L->pos();
1084 } else {
1085 if (L->is_linked()) {
1086 target_pos = L->pos();
1087 L->link_to(pc_offset());
1088 } else {
1089 L->link_to(pc_offset());
1090 if (!trampoline_emitted_) {
1091 unbound_labels_count_++;
1092 next_buffer_check_ -= kTrampolineSlotsSize;
1093 }
1094 return kEndOfChain;
1095 }
1096 }
1097
1098 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1099 DCHECK((offset & 3) == 0);
1100 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1101
1102 return offset;
1103 }
1104
1105
1106 int32_t Assembler::branch_offset21_compact(Label* L,
1107 bool jump_elimination_allowed) {
1108 int32_t target_pos;
1109
1110 if (L->is_bound()) {
1111 target_pos = L->pos();
1112 } else {
1113 if (L->is_linked()) {
1114 target_pos = L->pos();
1115 L->link_to(pc_offset());
1116 } else {
1117 L->link_to(pc_offset());
1118 if (!trampoline_emitted_) {
1119 unbound_labels_count_++;
1120 next_buffer_check_ -= kTrampolineSlotsSize;
1121 }
1122 return kEndOfChain;
1123 }
1124 }
1125
1126 int32_t offset = target_pos - pc_offset();
1127 DCHECK((offset & 3) == 0);
1128 DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
1129
1130 return offset;
1131 }
1132
1133
1022 void Assembler::label_at_put(Label* L, int at_offset) { 1134 void Assembler::label_at_put(Label* L, int at_offset) {
1023 int target_pos; 1135 int target_pos;
1024 if (L->is_bound()) { 1136 if (L->is_bound()) {
1025 target_pos = L->pos(); 1137 target_pos = L->pos();
1026 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 1138 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1027 } else { 1139 } else {
1028 if (L->is_linked()) { 1140 if (L->is_linked()) {
1029 target_pos = L->pos(); // L's link. 1141 target_pos = L->pos(); // L's link.
1030 int32_t imm18 = target_pos - at_offset; 1142 int32_t imm18 = target_pos - at_offset;
1031 DCHECK((imm18 & 3) == 0); 1143 DCHECK((imm18 & 3) == 0);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1065 } 1177 }
1066 1178
1067 1179
1068 void Assembler::bgez(Register rs, int16_t offset) { 1180 void Assembler::bgez(Register rs, int16_t offset) {
1069 BlockTrampolinePoolScope block_trampoline_pool(this); 1181 BlockTrampolinePoolScope block_trampoline_pool(this);
1070 GenInstrImmediate(REGIMM, rs, BGEZ, offset); 1182 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1071 BlockTrampolinePoolFor(1); // For associated delay slot. 1183 BlockTrampolinePoolFor(1); // For associated delay slot.
1072 } 1184 }
1073 1185
1074 1186
1187 void Assembler::bgezc(Register rt, int16_t offset) {
1188 DCHECK(IsMipsArchVariant(kMips32r6));
1189 DCHECK(!(rt.is(zero_reg)));
1190 GenInstrImmediate(BLEZL, rt, rt, offset);
1191 }
1192
1193
1194 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1195 DCHECK(IsMipsArchVariant(kMips32r6));
1196 DCHECK(!(rs.is(zero_reg)));
1197 DCHECK(!(rt.is(zero_reg)));
1198 DCHECK(rs.code() != rt.code());
1199 GenInstrImmediate(BLEZ, rs, rt, offset);
1200 }
1201
1202
1203 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1204 DCHECK(IsMipsArchVariant(kMips32r6));
1205 DCHECK(!(rs.is(zero_reg)));
1206 DCHECK(!(rt.is(zero_reg)));
1207 DCHECK(rs.code() != rt.code());
1208 GenInstrImmediate(BLEZL, rs, rt, offset);
1209 }
1210
1211
1075 void Assembler::bgezal(Register rs, int16_t offset) { 1212 void Assembler::bgezal(Register rs, int16_t offset) {
1213 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1076 BlockTrampolinePoolScope block_trampoline_pool(this); 1214 BlockTrampolinePoolScope block_trampoline_pool(this);
1077 positions_recorder()->WriteRecordedPositions(); 1215 positions_recorder()->WriteRecordedPositions();
1078 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); 1216 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1079 BlockTrampolinePoolFor(1); // For associated delay slot. 1217 BlockTrampolinePoolFor(1); // For associated delay slot.
1080 } 1218 }
1081 1219
1082 1220
1083 void Assembler::bgtz(Register rs, int16_t offset) { 1221 void Assembler::bgtz(Register rs, int16_t offset) {
1084 BlockTrampolinePoolScope block_trampoline_pool(this); 1222 BlockTrampolinePoolScope block_trampoline_pool(this);
1085 GenInstrImmediate(BGTZ, rs, zero_reg, offset); 1223 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1086 BlockTrampolinePoolFor(1); // For associated delay slot. 1224 BlockTrampolinePoolFor(1); // For associated delay slot.
1087 } 1225 }
1088 1226
1089 1227
1228 void Assembler::bgtzc(Register rt, int16_t offset) {
1229 DCHECK(IsMipsArchVariant(kMips32r6));
1230 DCHECK(!(rt.is(zero_reg)));
1231 GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1232 }
1233
1234
1090 void Assembler::blez(Register rs, int16_t offset) { 1235 void Assembler::blez(Register rs, int16_t offset) {
1091 BlockTrampolinePoolScope block_trampoline_pool(this); 1236 BlockTrampolinePoolScope block_trampoline_pool(this);
1092 GenInstrImmediate(BLEZ, rs, zero_reg, offset); 1237 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1093 BlockTrampolinePoolFor(1); // For associated delay slot. 1238 BlockTrampolinePoolFor(1); // For associated delay slot.
1094 } 1239 }
1095 1240
1096 1241
1242 void Assembler::blezc(Register rt, int16_t offset) {
1243 DCHECK(IsMipsArchVariant(kMips32r6));
1244 DCHECK(!(rt.is(zero_reg)));
1245 GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1246 }
1247
1248
1249 void Assembler::bltzc(Register rt, int16_t offset) {
1250 DCHECK(IsMipsArchVariant(kMips32r6));
1251 DCHECK(!(rt.is(zero_reg)));
1252 GenInstrImmediate(BGTZL, rt, rt, offset);
1253 }
1254
1255
1256 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1257 DCHECK(IsMipsArchVariant(kMips32r6));
1258 DCHECK(!(rs.is(zero_reg)));
1259 DCHECK(!(rt.is(zero_reg)));
1260 DCHECK(rs.code() != rt.code());
1261 GenInstrImmediate(BGTZ, rs, rt, offset);
1262 }
1263
1264
1265 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1266 DCHECK(IsMipsArchVariant(kMips32r6));
1267 DCHECK(!(rs.is(zero_reg)));
1268 DCHECK(!(rt.is(zero_reg)));
1269 DCHECK(rs.code() != rt.code());
1270 GenInstrImmediate(BGTZL, rs, rt, offset);
1271 }
1272
1273
1097 void Assembler::bltz(Register rs, int16_t offset) { 1274 void Assembler::bltz(Register rs, int16_t offset) {
1098 BlockTrampolinePoolScope block_trampoline_pool(this); 1275 BlockTrampolinePoolScope block_trampoline_pool(this);
1099 GenInstrImmediate(REGIMM, rs, BLTZ, offset); 1276 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1100 BlockTrampolinePoolFor(1); // For associated delay slot. 1277 BlockTrampolinePoolFor(1); // For associated delay slot.
1101 } 1278 }
1102 1279
1103 1280
1104 void Assembler::bltzal(Register rs, int16_t offset) { 1281 void Assembler::bltzal(Register rs, int16_t offset) {
1282 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1105 BlockTrampolinePoolScope block_trampoline_pool(this); 1283 BlockTrampolinePoolScope block_trampoline_pool(this);
1106 positions_recorder()->WriteRecordedPositions(); 1284 positions_recorder()->WriteRecordedPositions();
1107 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); 1285 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1108 BlockTrampolinePoolFor(1); // For associated delay slot. 1286 BlockTrampolinePoolFor(1); // For associated delay slot.
1109 } 1287 }
1110 1288
1111 1289
1112 void Assembler::bne(Register rs, Register rt, int16_t offset) { 1290 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1113 BlockTrampolinePoolScope block_trampoline_pool(this); 1291 BlockTrampolinePoolScope block_trampoline_pool(this);
1114 GenInstrImmediate(BNE, rs, rt, offset); 1292 GenInstrImmediate(BNE, rs, rt, offset);
1115 BlockTrampolinePoolFor(1); // For associated delay slot. 1293 BlockTrampolinePoolFor(1); // For associated delay slot.
1116 } 1294 }
1117 1295
1118 1296
1297 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1298 DCHECK(IsMipsArchVariant(kMips32r6));
1299 DCHECK(!(rs.is(zero_reg)));
1300 DCHECK(rs.code() >= rt.code());
1301 GenInstrImmediate(ADDI, rs, rt, offset);
1302 }
1303
1304
1305 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1306 DCHECK(IsMipsArchVariant(kMips32r6));
1307 DCHECK(!(rs.is(zero_reg)));
1308 DCHECK(rs.code() >= rt.code());
1309 GenInstrImmediate(DADDI, rs, rt, offset);
1310 }
1311
1312
1313 void Assembler::blezalc(Register rt, int16_t offset) {
1314 DCHECK(IsMipsArchVariant(kMips32r6));
1315 DCHECK(!(rt.is(zero_reg)));
1316 GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1317 }
1318
1319
1320 void Assembler::bgezalc(Register rt, int16_t offset) {
1321 DCHECK(IsMipsArchVariant(kMips32r6));
1322 DCHECK(!(rt.is(zero_reg)));
1323 GenInstrImmediate(BLEZ, rt, rt, offset);
1324 }
1325
1326
1327 void Assembler::bgezall(Register rs, int16_t offset) {
1328 DCHECK(IsMipsArchVariant(kMips32r6));
1329 DCHECK(!(rs.is(zero_reg)));
1330 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1331 }
1332
1333
1334 void Assembler::bltzalc(Register rt, int16_t offset) {
1335 DCHECK(IsMipsArchVariant(kMips32r6));
1336 DCHECK(!(rt.is(zero_reg)));
1337 GenInstrImmediate(BGTZ, rt, rt, offset);
1338 }
1339
1340
1341 void Assembler::bgtzalc(Register rt, int16_t offset) {
1342 DCHECK(IsMipsArchVariant(kMips32r6));
1343 DCHECK(!(rt.is(zero_reg)));
1344 GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1345 }
1346
1347
1348 void Assembler::beqzalc(Register rt, int16_t offset) {
1349 DCHECK(IsMipsArchVariant(kMips32r6));
1350 DCHECK(!(rt.is(zero_reg)));
1351 GenInstrImmediate(ADDI, zero_reg, rt, offset);
1352 }
1353
1354
1355 void Assembler::bnezalc(Register rt, int16_t offset) {
1356 DCHECK(IsMipsArchVariant(kMips32r6));
1357 DCHECK(!(rt.is(zero_reg)));
1358 GenInstrImmediate(DADDI, zero_reg, rt, offset);
1359 }
1360
1361
1362 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1363 DCHECK(IsMipsArchVariant(kMips32r6));
1364 DCHECK(rs.code() < rt.code());
1365 GenInstrImmediate(ADDI, rs, rt, offset);
1366 }
1367
1368
1369 void Assembler::beqzc(Register rs, int32_t offset) {
1370 DCHECK(IsMipsArchVariant(kMips32r6));
1371 DCHECK(!(rs.is(zero_reg)));
1372 Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1373 emit(instr);
1374 }
1375
1376
1377 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1378 DCHECK(IsMipsArchVariant(kMips32r6));
1379 DCHECK(rs.code() < rt.code());
1380 GenInstrImmediate(DADDI, rs, rt, offset);
1381 }
1382
1383
1384 void Assembler::bnezc(Register rs, int32_t offset) {
1385 DCHECK(IsMipsArchVariant(kMips32r6));
1386 DCHECK(!(rs.is(zero_reg)));
1387 Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1388 emit(instr);
1389 }
1390
1391
1119 void Assembler::j(int32_t target) { 1392 void Assembler::j(int32_t target) {
1120 #if DEBUG 1393 #if DEBUG
1121 // Get pc of delay slot. 1394 // Get pc of delay slot.
1122 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1395 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1123 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1396 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1124 (kImm26Bits + kImmFieldShift)) == 0; 1397 (kImm26Bits + kImmFieldShift)) == 0;
1125 DCHECK(in_range && ((target & 3) == 0)); 1398 DCHECK(in_range && ((target & 3) == 0));
1126 #endif 1399 #endif
1127 GenInstrJump(J, target >> 2); 1400 GenInstrJump(J, target >> 2);
1128 } 1401 }
1129 1402
1130 1403
1131 void Assembler::jr(Register rs) { 1404 void Assembler::jr(Register rs) {
1132 BlockTrampolinePoolScope block_trampoline_pool(this); 1405 if (!IsMipsArchVariant(kMips32r6)) {
1133 if (rs.is(ra)) { 1406 BlockTrampolinePoolScope block_trampoline_pool(this);
1134 positions_recorder()->WriteRecordedPositions(); 1407 if (rs.is(ra)) {
1408 positions_recorder()->WriteRecordedPositions();
1409 }
1410 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1411 BlockTrampolinePoolFor(1); // For associated delay slot.
1412 } else {
1413 jalr(rs, zero_reg);
1135 } 1414 }
1136 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1137 BlockTrampolinePoolFor(1); // For associated delay slot.
1138 } 1415 }
1139 1416
1140 1417
1141 void Assembler::jal(int32_t target) { 1418 void Assembler::jal(int32_t target) {
1142 #ifdef DEBUG 1419 #ifdef DEBUG
1143 // Get pc of delay slot. 1420 // Get pc of delay slot.
1144 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1421 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1145 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1422 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1146 (kImm26Bits + kImmFieldShift)) == 0; 1423 (kImm26Bits + kImmFieldShift)) == 0;
1147 DCHECK(in_range && ((target & 3) == 0)); 1424 DCHECK(in_range && ((target & 3) == 0));
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1198 GenInstrImmediate(ADDIU, rs, rd, j); 1475 GenInstrImmediate(ADDIU, rs, rd, j);
1199 } 1476 }
1200 1477
1201 1478
1202 void Assembler::subu(Register rd, Register rs, Register rt) { 1479 void Assembler::subu(Register rd, Register rs, Register rt) {
1203 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); 1480 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1204 } 1481 }
1205 1482
1206 1483
1207 void Assembler::mul(Register rd, Register rs, Register rt) { 1484 void Assembler::mul(Register rd, Register rs, Register rt) {
1208 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); 1485 if (!IsMipsArchVariant(kMips32r6)) {
1486 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1487 } else {
1488 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1489 }
1209 } 1490 }
1210 1491
1211 1492
1493 void Assembler::mulu(Register rd, Register rs, Register rt) {
1494 DCHECK(IsMipsArchVariant(kMips32r6));
1495 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1496 }
1497
1498
1499 void Assembler::muh(Register rd, Register rs, Register rt) {
1500 DCHECK(IsMipsArchVariant(kMips32r6));
1501 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1502 }
1503
1504
1505 void Assembler::muhu(Register rd, Register rs, Register rt) {
1506 DCHECK(IsMipsArchVariant(kMips32r6));
1507 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1508 }
1509
1510
1511 void Assembler::mod(Register rd, Register rs, Register rt) {
1512 DCHECK(IsMipsArchVariant(kMips32r6));
1513 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1514 }
1515
1516
1517 void Assembler::modu(Register rd, Register rs, Register rt) {
1518 DCHECK(IsMipsArchVariant(kMips32r6));
1519 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1520 }
1521
1522
1212 void Assembler::mult(Register rs, Register rt) { 1523 void Assembler::mult(Register rs, Register rt) {
1213 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); 1524 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1214 } 1525 }
1215 1526
1216 1527
1217 void Assembler::multu(Register rs, Register rt) { 1528 void Assembler::multu(Register rs, Register rt) {
1218 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); 1529 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1219 } 1530 }
1220 1531
1221 1532
1222 void Assembler::div(Register rs, Register rt) { 1533 void Assembler::div(Register rs, Register rt) {
1223 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); 1534 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1224 } 1535 }
1225 1536
1226 1537
1538 void Assembler::div(Register rd, Register rs, Register rt) {
1539 DCHECK(IsMipsArchVariant(kMips32r6));
1540 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1541 }
1542
1543
1227 void Assembler::divu(Register rs, Register rt) { 1544 void Assembler::divu(Register rs, Register rt) {
1228 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); 1545 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1229 } 1546 }
1230 1547
1231 1548
1549 void Assembler::divu(Register rd, Register rs, Register rt) {
1550 DCHECK(IsMipsArchVariant(kMips32r6));
1551 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1552 }
1553
1554
1232 // Logical. 1555 // Logical.
1233 1556
1234 void Assembler::and_(Register rd, Register rs, Register rt) { 1557 void Assembler::and_(Register rd, Register rs, Register rt) {
1235 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); 1558 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1236 } 1559 }
1237 1560
1238 1561
1239 void Assembler::andi(Register rt, Register rs, int32_t j) { 1562 void Assembler::andi(Register rt, Register rs, int32_t j) {
1240 DCHECK(is_uint16(j)); 1563 DCHECK(is_uint16(j));
1241 GenInstrImmediate(ANDI, rs, rt, j); 1564 GenInstrImmediate(ANDI, rs, rt, j);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 1627
1305 1628
1306 void Assembler::srav(Register rd, Register rt, Register rs) { 1629 void Assembler::srav(Register rd, Register rt, Register rs) {
1307 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); 1630 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1308 } 1631 }
1309 1632
1310 1633
1311 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { 1634 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1312 // Should be called via MacroAssembler::Ror. 1635 // Should be called via MacroAssembler::Ror.
1313 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); 1636 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1314 DCHECK(kArchVariant == kMips32r2); 1637 DCHECK(IsMipsArchVariant(kMips32r2));
1315 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) 1638 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1316 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; 1639 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1317 emit(instr); 1640 emit(instr);
1318 } 1641 }
1319 1642
1320 1643
1321 void Assembler::rotrv(Register rd, Register rt, Register rs) { 1644 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1322 // Should be called via MacroAssembler::Ror. 1645 // Should be called via MacroAssembler::Ror.
1323 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); 1646 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1324 DCHECK(kArchVariant == kMips32r2); 1647 DCHECK(IsMipsArchVariant(kMips32r2));
1325 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) 1648 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1326 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; 1649 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1327 emit(instr); 1650 emit(instr);
1328 } 1651 }
1329 1652
1330 1653
1331 // ------------Memory-instructions------------- 1654 // ------------Memory-instructions-------------
1332 1655
1333 // Helper for base-reg + offset, when offset is larger than int16. 1656 // Helper for base-reg + offset, when offset is larger than int16.
1334 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { 1657 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
1438 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); 1761 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1439 } 1762 }
1440 1763
1441 1764
1442 void Assembler::lui(Register rd, int32_t j) { 1765 void Assembler::lui(Register rd, int32_t j) {
1443 DCHECK(is_uint16(j)); 1766 DCHECK(is_uint16(j));
1444 GenInstrImmediate(LUI, zero_reg, rd, j); 1767 GenInstrImmediate(LUI, zero_reg, rd, j);
1445 } 1768 }
1446 1769
1447 1770
1771 void Assembler::aui(Register rs, Register rt, int32_t j) {
1772 // This instruction uses same opcode as 'lui'. The difference in encoding is
1773 // 'lui' has zero reg. for rs field.
1774 DCHECK(is_uint16(j));
1775 GenInstrImmediate(LUI, rs, rt, j);
1776 }
1777
1778
1448 // -------------Misc-instructions-------------- 1779 // -------------Misc-instructions--------------
1449 1780
1450 // Break / Trap instructions. 1781 // Break / Trap instructions.
1451 void Assembler::break_(uint32_t code, bool break_as_stop) { 1782 void Assembler::break_(uint32_t code, bool break_as_stop) {
1452 DCHECK((code & ~0xfffff) == 0); 1783 DCHECK((code & ~0xfffff) == 0);
1453 // We need to invalidate breaks that could be stops as well because the 1784 // We need to invalidate breaks that could be stops as well because the
1454 // simulator expects a char pointer after the stop instruction. 1785 // simulator expects a char pointer after the stop instruction.
1455 // See constants-mips.h for explanation. 1786 // See constants-mips.h for explanation.
1456 DCHECK((break_as_stop && 1787 DCHECK((break_as_stop &&
1457 code <= kMaxStopCode && 1788 code <= kMaxStopCode &&
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
1581 1912
1582 void Assembler::movf(Register rd, Register rs, uint16_t cc) { 1913 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1583 Register rt; 1914 Register rt;
1584 rt.code_ = (cc & 0x0007) << 2 | 0; 1915 rt.code_ = (cc & 0x0007) << 2 | 0;
1585 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); 1916 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1586 } 1917 }
1587 1918
1588 1919
1589 // Bit twiddling. 1920 // Bit twiddling.
1590 void Assembler::clz(Register rd, Register rs) { 1921 void Assembler::clz(Register rd, Register rs) {
1591 // Clz instr requires same GPR number in 'rd' and 'rt' fields. 1922 if (!IsMipsArchVariant(kMips32r6)) {
1592 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); 1923 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1924 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1925 } else {
1926 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
1927 }
1593 } 1928 }
1594 1929
1595 1930
1596 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1931 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1597 // Should be called via MacroAssembler::Ins. 1932 // Should be called via MacroAssembler::Ins.
1598 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. 1933 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1599 DCHECK(kArchVariant == kMips32r2); 1934 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1600 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); 1935 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1601 } 1936 }
1602 1937
1603 1938
1604 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1939 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1605 // Should be called via MacroAssembler::Ext. 1940 // Should be called via MacroAssembler::Ext.
1606 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. 1941 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1607 DCHECK(kArchVariant == kMips32r2); 1942 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1608 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); 1943 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1609 } 1944 }
1610 1945
1611 1946
1612 void Assembler::pref(int32_t hint, const MemOperand& rs) { 1947 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1613 DCHECK(kArchVariant != kLoongson); 1948 DCHECK(!IsMipsArchVariant(kLoongson));
1614 DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); 1949 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
1615 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) 1950 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1616 | (rs.offset_); 1951 | (rs.offset_);
1617 emit(instr); 1952 emit(instr);
1618 } 1953 }
1619 1954
1620 1955
1621 // --------Coprocessor-instructions---------------- 1956 // --------Coprocessor-instructions----------------
1622 1957
1623 // Load, store, move. 1958 // Load, store, move.
1624 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { 1959 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1625 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); 1960 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1626 } 1961 }
1627 1962
1628 1963
1629 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { 1964 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1630 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1965 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1631 // load to two 32-bit loads. 1966 // load to two 32-bit loads.
1632 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + 1967 if (IsFp64Mode()) {
1633 Register::kMantissaOffset); 1968 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1634 FPURegister nextfpreg; 1969 Register::kMantissaOffset);
1635 nextfpreg.setcode(fd.code() + 1); 1970 GenInstrImmediate(LW, src.rm(), at, src.offset_ +
1636 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 1971 Register::kExponentOffset);
1637 Register::kExponentOffset); 1972 mthc1(at, fd);
1973 } else {
1974 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1975 Register::kMantissaOffset);
1976 FPURegister nextfpreg;
1977 nextfpreg.setcode(fd.code() + 1);
1978 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1979 Register::kExponentOffset);
1980 }
1638 } 1981 }
1639 1982
1640 1983
1641 void Assembler::swc1(FPURegister fd, const MemOperand& src) { 1984 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1642 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); 1985 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1643 } 1986 }
1644 1987
1645 1988
1646 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { 1989 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1647 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1990 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1648 // store to two 32-bit stores. 1991 // store to two 32-bit stores.
1649 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + 1992 if (IsFp64Mode()) {
1650 Register::kMantissaOffset); 1993 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1651 FPURegister nextfpreg; 1994 Register::kMantissaOffset);
1652 nextfpreg.setcode(fd.code() + 1); 1995 mfhc1(at, fd);
1653 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 1996 GenInstrImmediate(SW, src.rm(), at, src.offset_ +
1654 Register::kExponentOffset); 1997 Register::kExponentOffset);
1998 } else {
1999 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
2000 Register::kMantissaOffset);
2001 FPURegister nextfpreg;
2002 nextfpreg.setcode(fd.code() + 1);
2003 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
2004 Register::kExponentOffset);
2005 }
1655 } 2006 }
1656 2007
1657 2008
1658 void Assembler::mtc1(Register rt, FPURegister fs) { 2009 void Assembler::mtc1(Register rt, FPURegister fs) {
1659 GenInstrRegister(COP1, MTC1, rt, fs, f0); 2010 GenInstrRegister(COP1, MTC1, rt, fs, f0);
1660 } 2011 }
1661 2012
1662 2013
2014 void Assembler::mthc1(Register rt, FPURegister fs) {
2015 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2016 }
2017
2018
1663 void Assembler::mfc1(Register rt, FPURegister fs) { 2019 void Assembler::mfc1(Register rt, FPURegister fs) {
1664 GenInstrRegister(COP1, MFC1, rt, fs, f0); 2020 GenInstrRegister(COP1, MFC1, rt, fs, f0);
1665 } 2021 }
1666 2022
1667 2023
2024 void Assembler::mfhc1(Register rt, FPURegister fs) {
2025 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2026 }
2027
2028
1668 void Assembler::ctc1(Register rt, FPUControlRegister fs) { 2029 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1669 GenInstrRegister(COP1, CTC1, rt, fs); 2030 GenInstrRegister(COP1, CTC1, rt, fs);
1670 } 2031 }
1671 2032
1672 2033
1673 void Assembler::cfc1(Register rt, FPUControlRegister fs) { 2034 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1674 GenInstrRegister(COP1, CFC1, rt, fs); 2035 GenInstrRegister(COP1, CFC1, rt, fs);
1675 } 2036 }
1676 2037
1677 2038
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1778 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); 2139 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1779 } 2140 }
1780 2141
1781 2142
1782 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { 2143 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1783 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); 2144 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1784 } 2145 }
1785 2146
1786 2147
1787 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { 2148 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1788 DCHECK(kArchVariant == kMips32r2); 2149 DCHECK(IsMipsArchVariant(kMips32r2));
1789 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); 2150 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1790 } 2151 }
1791 2152
1792 2153
1793 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { 2154 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1794 DCHECK(kArchVariant == kMips32r2); 2155 DCHECK(IsMipsArchVariant(kMips32r2));
1795 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); 2156 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1796 } 2157 }
1797 2158
1798 2159
1799 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { 2160 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1800 DCHECK(kArchVariant == kMips32r2); 2161 DCHECK(IsMipsArchVariant(kMips32r2));
1801 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); 2162 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1802 } 2163 }
1803 2164
1804 2165
1805 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { 2166 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1806 DCHECK(kArchVariant == kMips32r2); 2167 DCHECK(IsMipsArchVariant(kMips32r2));
1807 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); 2168 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1808 } 2169 }
1809 2170
1810 2171
1811 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { 2172 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1812 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); 2173 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1813 } 2174 }
1814 2175
1815 2176
1816 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { 2177 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
(...skipping 14 matching lines...) Expand all
1831 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { 2192 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1832 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); 2193 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1833 } 2194 }
1834 2195
1835 2196
1836 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { 2197 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1837 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); 2198 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1838 } 2199 }
1839 2200
1840 2201
2202 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2203 FPURegister fs) {
2204 DCHECK(IsMipsArchVariant(kMips32r6));
2205 DCHECK((fmt == D) || (fmt == S));
2206 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2207 }
2208
2209
2210 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2211 FPURegister fs) {
2212 DCHECK(IsMipsArchVariant(kMips32r6));
2213 DCHECK((fmt == D) || (fmt == S));
2214 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2215 }
2216
2217
2218 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2219 FPURegister fs) {
2220 DCHECK(IsMipsArchVariant(kMips32r6));
2221 DCHECK((fmt == D) || (fmt == S));
2222 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2223 }
2224
2225
2226 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2227 FPURegister fs) {
2228 DCHECK(IsMipsArchVariant(kMips32r6));
2229 DCHECK((fmt == D) || (fmt == S));
2230 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2231 }
2232
2233
1841 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { 2234 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1842 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); 2235 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1843 } 2236 }
1844 2237
1845 2238
1846 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { 2239 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1847 DCHECK(kArchVariant == kMips32r2); 2240 DCHECK(IsMipsArchVariant(kMips32r2));
1848 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); 2241 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1849 } 2242 }
1850 2243
1851 2244
1852 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { 2245 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1853 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); 2246 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1854 } 2247 }
1855 2248
1856 2249
1857 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { 2250 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1858 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); 2251 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1859 } 2252 }
1860 2253
1861 2254
1862 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { 2255 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1863 DCHECK(kArchVariant == kMips32r2); 2256 DCHECK(IsMipsArchVariant(kMips32r2));
1864 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); 2257 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1865 } 2258 }
1866 2259
1867 2260
1868 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { 2261 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1869 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); 2262 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1870 } 2263 }
1871 2264
1872 2265
1873 // Conditions. 2266 // Conditions for >= MIPSr6.
2267 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2268 FPURegister fd, FPURegister fs, FPURegister ft) {
2269 DCHECK(IsMipsArchVariant(kMips32r6));
2270 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2271 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2272 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2273 emit(instr);
2274 }
2275
2276
2277 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2278 DCHECK(IsMipsArchVariant(kMips32r6));
2279 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2280 emit(instr);
2281 }
2282
2283
2284 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2285 DCHECK(IsMipsArchVariant(kMips32r6));
2286 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2287 emit(instr);
2288 }
2289
2290
2291 // Conditions for < MIPSr6.
1874 void Assembler::c(FPUCondition cond, SecondaryField fmt, 2292 void Assembler::c(FPUCondition cond, SecondaryField fmt,
1875 FPURegister fs, FPURegister ft, uint16_t cc) { 2293 FPURegister fs, FPURegister ft, uint16_t cc) {
1876 DCHECK(is_uint3(cc)); 2294 DCHECK(is_uint3(cc));
1877 DCHECK((fmt & ~(31 << kRsShift)) == 0); 2295 DCHECK((fmt & ~(31 << kRsShift)) == 0);
1878 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift 2296 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1879 | cc << 8 | 3 << 4 | cond; 2297 | cc << 8 | 3 << 4 | cond;
1880 emit(instr); 2298 emit(instr);
1881 } 2299 }
1882 2300
1883 2301
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
2177 #ifdef DEBUG 2595 #ifdef DEBUG
2178 // Check we have the result from a li macro-instruction, using instr pair. 2596 // Check we have the result from a li macro-instruction, using instr pair.
2179 Instr instr1 = instr_at(pc); 2597 Instr instr1 = instr_at(pc);
2180 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); 2598 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2181 #endif 2599 #endif
2182 2600
2183 // Must use 2 instructions to insure patchable code => just use lui and ori. 2601 // Must use 2 instructions to insure patchable code => just use lui and ori.
2184 // lui rt, upper-16. 2602 // lui rt, upper-16.
2185 // ori rt rt, lower-16. 2603 // ori rt rt, lower-16.
2186 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); 2604 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2187 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); 2605 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2188 2606
2189 // The following code is an optimization for the common case of Call() 2607 // The following code is an optimization for the common case of Call()
2190 // or Jump() which is load to register, and jump through register: 2608 // or Jump() which is load to register, and jump through register:
2191 // li(t9, address); jalr(t9) (or jr(t9)). 2609 // li(t9, address); jalr(t9) (or jr(t9)).
2192 // If the destination address is in the same 256 MB page as the call, it 2610 // If the destination address is in the same 256 MB page as the call, it
2193 // is faster to do a direct jal, or j, rather than jump thru register, since 2611 // is faster to do a direct jal, or j, rather than jump thru register, since
2194 // that lets the cpu pipeline prefetch the target address. However each 2612 // that lets the cpu pipeline prefetch the target address. However each
2195 // time the address above is patched, we have to patch the direct jal/j 2613 // time the address above is patched, we have to patch the direct jal/j
2196 // instruction, as well as possibly revert to jalr/jr if we now cross a 2614 // instruction, as well as possibly revert to jalr/jr if we now cross a
2197 // 256 MB page. Note that with the jal/j instructions, we do not need to 2615 // 256 MB page. Note that with the jal/j instructions, we do not need to
(...skipping 22 matching lines...) Expand all
2220 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1); 2638 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2221 uint32_t ipc_segment_addr = ipc & segment_mask; 2639 uint32_t ipc_segment_addr = ipc & segment_mask;
2222 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask) 2640 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2223 in_range = false; 2641 in_range = false;
2224 } 2642 }
2225 #endif 2643 #endif
2226 2644
2227 if (IsJalr(instr3)) { 2645 if (IsJalr(instr3)) {
2228 // Try to convert JALR to JAL. 2646 // Try to convert JALR to JAL.
2229 if (in_range && GetRt(instr2) == GetRs(instr3)) { 2647 if (in_range && GetRt(instr2) == GetRs(instr3)) {
2230 *(p+2) = JAL | target_field; 2648 *(p + 2) = JAL | target_field;
2231 patched_jump = true; 2649 patched_jump = true;
2232 } 2650 }
2233 } else if (IsJr(instr3)) { 2651 } else if (IsJr(instr3)) {
2234 // Try to convert JR to J, skip returns (jr ra). 2652 // Try to convert JR to J, skip returns (jr ra).
2235 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code(); 2653 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2236 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) { 2654 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2237 *(p+2) = J | target_field; 2655 *(p + 2) = J | target_field;
2238 patched_jump = true; 2656 patched_jump = true;
2239 } 2657 }
2240 } else if (IsJal(instr3)) { 2658 } else if (IsJal(instr3)) {
2241 if (in_range) { 2659 if (in_range) {
2242 // We are patching an already converted JAL. 2660 // We are patching an already converted JAL.
2243 *(p+2) = JAL | target_field; 2661 *(p + 2) = JAL | target_field;
2244 } else { 2662 } else {
2245 // Patch JAL, but out of range, revert to JALR. 2663 // Patch JAL, but out of range, revert to JALR.
2246 // JALR rs reg is the rt reg specified in the ORI instruction. 2664 // JALR rs reg is the rt reg specified in the ORI instruction.
2247 uint32_t rs_field = GetRt(instr2) << kRsShift; 2665 uint32_t rs_field = GetRt(instr2) << kRsShift;
2248 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. 2666 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2249 *(p+2) = SPECIAL | rs_field | rd_field | JALR; 2667 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2250 } 2668 }
2251 patched_jump = true; 2669 patched_jump = true;
2252 } else if (IsJ(instr3)) { 2670 } else if (IsJ(instr3)) {
2253 if (in_range) { 2671 if (in_range) {
2254 // We are patching an already converted J (jump). 2672 // We are patching an already converted J (jump).
2255 *(p+2) = J | target_field; 2673 *(p + 2) = J | target_field;
2256 } else { 2674 } else {
2257 // Trying patch J, but out of range, just go back to JR. 2675 // Trying patch J, but out of range, just go back to JR.
2258 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2). 2676 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2259 uint32_t rs_field = GetRt(instr2) << kRsShift; 2677 uint32_t rs_field = GetRt(instr2) << kRsShift;
2260 *(p+2) = SPECIAL | rs_field | JR; 2678 if (IsMipsArchVariant(kMips32r6)) {
2679 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2680 } else {
2681 *(p + 2) = SPECIAL | rs_field | JR;
2682 }
2261 } 2683 }
2262 patched_jump = true; 2684 patched_jump = true;
2263 } 2685 }
2264 2686
2265 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { 2687 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2266 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); 2688 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2267 } 2689 }
2268 } 2690 }
2269 2691
2270 2692
2271 void Assembler::JumpLabelToJumpRegister(Address pc) { 2693 void Assembler::JumpLabelToJumpRegister(Address pc) {
2272 // Address pc points to lui/ori instructions. 2694 // Address pc points to lui/ori instructions.
2273 // Jump to label may follow at pc + 2 * kInstrSize. 2695 // Jump to label may follow at pc + 2 * kInstrSize.
2274 uint32_t* p = reinterpret_cast<uint32_t*>(pc); 2696 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2275 #ifdef DEBUG 2697 #ifdef DEBUG
2276 Instr instr1 = instr_at(pc); 2698 Instr instr1 = instr_at(pc);
2277 #endif 2699 #endif
2278 Instr instr2 = instr_at(pc + 1 * kInstrSize); 2700 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2279 Instr instr3 = instr_at(pc + 2 * kInstrSize); 2701 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2280 bool patched = false; 2702 bool patched = false;
2281 2703
2282 if (IsJal(instr3)) { 2704 if (IsJal(instr3)) {
2283 DCHECK(GetOpcodeField(instr1) == LUI); 2705 DCHECK(GetOpcodeField(instr1) == LUI);
2284 DCHECK(GetOpcodeField(instr2) == ORI); 2706 DCHECK(GetOpcodeField(instr2) == ORI);
2285 2707
2286 uint32_t rs_field = GetRt(instr2) << kRsShift; 2708 uint32_t rs_field = GetRt(instr2) << kRsShift;
2287 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. 2709 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2288 *(p+2) = SPECIAL | rs_field | rd_field | JALR; 2710 *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
2289 patched = true; 2711 patched = true;
2290 } else if (IsJ(instr3)) { 2712 } else if (IsJ(instr3)) {
2291 DCHECK(GetOpcodeField(instr1) == LUI); 2713 DCHECK(GetOpcodeField(instr1) == LUI);
2292 DCHECK(GetOpcodeField(instr2) == ORI); 2714 DCHECK(GetOpcodeField(instr2) == ORI);
2293 2715
2294 uint32_t rs_field = GetRt(instr2) << kRsShift; 2716 uint32_t rs_field = GetRt(instr2) << kRsShift;
2295 *(p+2) = SPECIAL | rs_field | JR; 2717 if (IsMipsArchVariant(kMips32r6)) {
2718 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2719 } else {
2720 *(p + 2) = SPECIAL | rs_field | JR;
2721 }
2296 patched = true; 2722 patched = true;
2297 } 2723 }
2298 2724
2299 if (patched) { 2725 if (patched) {
2300 CpuFeatures::FlushICache(pc+2, sizeof(Address)); 2726 CpuFeatures::FlushICache(pc + 2, sizeof(Address));
2301 } 2727 }
2302 } 2728 }
2303 2729
2304 2730
2305 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { 2731 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2306 // No out-of-line constant pool support. 2732 // No out-of-line constant pool support.
2307 DCHECK(!FLAG_enable_ool_constant_pool); 2733 DCHECK(!FLAG_enable_ool_constant_pool);
2308 return isolate->factory()->empty_constant_pool_array(); 2734 return isolate->factory()->empty_constant_pool_array();
2309 } 2735 }
2310 2736
2311 2737
2312 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 2738 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2313 // No out-of-line constant pool support. 2739 // No out-of-line constant pool support.
2314 DCHECK(!FLAG_enable_ool_constant_pool); 2740 DCHECK(!FLAG_enable_ool_constant_pool);
2315 return; 2741 return;
2316 } 2742 }
2317 2743
2318 2744
2319 } } // namespace v8::internal 2745 } } // namespace v8::internal
2320 2746
2321 #endif // V8_TARGET_ARCH_MIPS 2747 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698