Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/mips/assembler-mips.cc

Issue 458193002: Revert 23028 - "MIPS: Add support for arch. revision 6 to mips32 port." (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
91 supported_ |= CpuFeaturesImpliedByCompiler(); 91 supported_ |= CpuFeaturesImpliedByCompiler();
92 92
93 // Only use statically determined features for cross compile (snapshot). 93 // Only use statically determined features for cross compile (snapshot).
94 if (cross_compile) return; 94 if (cross_compile) return;
95 95
96 // If the compiler is allowed to use fpu then we can use fpu too in our 96 // If the compiler is allowed to use fpu then we can use fpu too in our
97 // code generation. 97 // code generation.
98 #ifndef __mips__ 98 #ifndef __mips__
99 // For the simulator build, use FPU. 99 // For the simulator build, use FPU.
100 supported_ |= 1u << FPU; 100 supported_ |= 1u << FPU;
101 #if defined(_MIPS_ARCH_MIPS32R6)
102 // FP64 mode is implied on r6.
103 supported_ |= 1u << FP64;
104 #endif
105 #if defined(FPU_MODE_FP64)
106 supported_ |= 1u << FP64;
107 #endif
108 #else 101 #else
109 // Probe for additional features at runtime. 102 // Probe for additional features at runtime.
110 base::CPU cpu; 103 base::CPU cpu;
111 if (cpu.has_fpu()) supported_ |= 1u << FPU; 104 if (cpu.has_fpu()) supported_ |= 1u << FPU;
112 #if defined(FPU_MODE_FPXX)
113 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64;
114 #elif defined(FPU_MODE_FP64)
115 supported_ |= 1u << FP64;
116 #endif
117 #if defined(_MIPS_ARCH_MIPS32RX)
118 if (cpu.architecture() == 6) {
119 supported_ |= 1u << MIPSr6;
120 } else if (cpu.architecture() == 2) {
121 supported_ |= 1u << MIPSr1;
122 supported_ |= 1u << MIPSr2;
123 } else {
124 supported_ |= 1u << MIPSr1;
125 }
126 #endif
127 #endif 105 #endif
128 } 106 }
129 107
130 108
131 void CpuFeatures::PrintTarget() { } 109 void CpuFeatures::PrintTarget() { }
132 void CpuFeatures::PrintFeatures() { } 110 void CpuFeatures::PrintFeatures() { }
133 111
134 112
135 int ToNumber(Register reg) { 113 int ToNumber(Register reg) {
136 DCHECK(reg.is_valid()); 114 DCHECK(reg.is_valid());
(...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 return opcode == BEQ || 477 return opcode == BEQ ||
500 opcode == BNE || 478 opcode == BNE ||
501 opcode == BLEZ || 479 opcode == BLEZ ||
502 opcode == BGTZ || 480 opcode == BGTZ ||
503 opcode == BEQL || 481 opcode == BEQL ||
504 opcode == BNEL || 482 opcode == BNEL ||
505 opcode == BLEZL || 483 opcode == BLEZL ||
506 opcode == BGTZL || 484 opcode == BGTZL ||
507 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || 485 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
508 rt_field == BLTZAL || rt_field == BGEZAL)) || 486 rt_field == BLTZAL || rt_field == BGEZAL)) ||
509 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. 487 (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
510 (opcode == COP1 && rs_field == BC1EQZ) ||
511 (opcode == COP1 && rs_field == BC1NEZ);
512 } 488 }
513 489
514 490
515 bool Assembler::IsEmittedConstant(Instr instr) { 491 bool Assembler::IsEmittedConstant(Instr instr) {
516 uint32_t label_constant = GetLabelConst(instr); 492 uint32_t label_constant = GetLabelConst(instr);
517 return label_constant == 0; // Emitted label const in reg-exp engine. 493 return label_constant == 0; // Emitted label const in reg-exp engine.
518 } 494 }
519 495
520 496
521 bool Assembler::IsBeq(Instr instr) { 497 bool Assembler::IsBeq(Instr instr) {
(...skipping 24 matching lines...) Expand all
546 return opcode == J; 522 return opcode == J;
547 } 523 }
548 524
549 525
550 bool Assembler::IsJal(Instr instr) { 526 bool Assembler::IsJal(Instr instr) {
551 return GetOpcodeField(instr) == JAL; 527 return GetOpcodeField(instr) == JAL;
552 } 528 }
553 529
554 530
555 bool Assembler::IsJr(Instr instr) { 531 bool Assembler::IsJr(Instr instr) {
556 if (!IsMipsArchVariant(kMips32r6)) { 532 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
557 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
558 } else {
559 return GetOpcodeField(instr) == SPECIAL &&
560 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
561 }
562 } 533 }
563 534
564 535
565 bool Assembler::IsJalr(Instr instr) { 536 bool Assembler::IsJalr(Instr instr) {
566 return GetOpcodeField(instr) == SPECIAL && 537 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
567 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
568 } 538 }
569 539
570 540
571 bool Assembler::IsLui(Instr instr) { 541 bool Assembler::IsLui(Instr instr) {
572 uint32_t opcode = GetOpcodeField(instr); 542 uint32_t opcode = GetOpcodeField(instr);
573 // Checks if the instruction is a load upper immediate. 543 // Checks if the instruction is a load upper immediate.
574 return opcode == LUI; 544 return opcode == LUI;
575 } 545 }
576 546
577 547
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after
1042 } 1012 }
1043 1013
1044 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); 1014 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1045 DCHECK((offset & 3) == 0); 1015 DCHECK((offset & 3) == 0);
1046 DCHECK(is_int16(offset >> 2)); 1016 DCHECK(is_int16(offset >> 2));
1047 1017
1048 return offset; 1018 return offset;
1049 } 1019 }
1050 1020
1051 1021
1052 int32_t Assembler::branch_offset_compact(Label* L,
1053 bool jump_elimination_allowed) {
1054 int32_t target_pos;
1055 if (L->is_bound()) {
1056 target_pos = L->pos();
1057 } else {
1058 if (L->is_linked()) {
1059 target_pos = L->pos();
1060 L->link_to(pc_offset());
1061 } else {
1062 L->link_to(pc_offset());
1063 if (!trampoline_emitted_) {
1064 unbound_labels_count_++;
1065 next_buffer_check_ -= kTrampolineSlotsSize;
1066 }
1067 return kEndOfChain;
1068 }
1069 }
1070
1071 int32_t offset = target_pos - pc_offset();
1072 DCHECK((offset & 3) == 0);
1073 DCHECK(is_int16(offset >> 2));
1074
1075 return offset;
1076 }
1077
1078
1079 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1080 int32_t target_pos;
1081
1082 if (L->is_bound()) {
1083 target_pos = L->pos();
1084 } else {
1085 if (L->is_linked()) {
1086 target_pos = L->pos();
1087 L->link_to(pc_offset());
1088 } else {
1089 L->link_to(pc_offset());
1090 if (!trampoline_emitted_) {
1091 unbound_labels_count_++;
1092 next_buffer_check_ -= kTrampolineSlotsSize;
1093 }
1094 return kEndOfChain;
1095 }
1096 }
1097
1098 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1099 DCHECK((offset & 3) == 0);
1100 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1101
1102 return offset;
1103 }
1104
1105
1106 int32_t Assembler::branch_offset21_compact(Label* L,
1107 bool jump_elimination_allowed) {
1108 int32_t target_pos;
1109
1110 if (L->is_bound()) {
1111 target_pos = L->pos();
1112 } else {
1113 if (L->is_linked()) {
1114 target_pos = L->pos();
1115 L->link_to(pc_offset());
1116 } else {
1117 L->link_to(pc_offset());
1118 if (!trampoline_emitted_) {
1119 unbound_labels_count_++;
1120 next_buffer_check_ -= kTrampolineSlotsSize;
1121 }
1122 return kEndOfChain;
1123 }
1124 }
1125
1126 int32_t offset = target_pos - pc_offset();
1127 DCHECK((offset & 3) == 0);
1128 DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
1129
1130 return offset;
1131 }
1132
1133
1134 void Assembler::label_at_put(Label* L, int at_offset) { 1022 void Assembler::label_at_put(Label* L, int at_offset) {
1135 int target_pos; 1023 int target_pos;
1136 if (L->is_bound()) { 1024 if (L->is_bound()) {
1137 target_pos = L->pos(); 1025 target_pos = L->pos();
1138 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 1026 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1139 } else { 1027 } else {
1140 if (L->is_linked()) { 1028 if (L->is_linked()) {
1141 target_pos = L->pos(); // L's link. 1029 target_pos = L->pos(); // L's link.
1142 int32_t imm18 = target_pos - at_offset; 1030 int32_t imm18 = target_pos - at_offset;
1143 DCHECK((imm18 & 3) == 0); 1031 DCHECK((imm18 & 3) == 0);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 } 1065 }
1178 1066
1179 1067
1180 void Assembler::bgez(Register rs, int16_t offset) { 1068 void Assembler::bgez(Register rs, int16_t offset) {
1181 BlockTrampolinePoolScope block_trampoline_pool(this); 1069 BlockTrampolinePoolScope block_trampoline_pool(this);
1182 GenInstrImmediate(REGIMM, rs, BGEZ, offset); 1070 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1183 BlockTrampolinePoolFor(1); // For associated delay slot. 1071 BlockTrampolinePoolFor(1); // For associated delay slot.
1184 } 1072 }
1185 1073
1186 1074
1187 void Assembler::bgezc(Register rt, int16_t offset) {
1188 DCHECK(IsMipsArchVariant(kMips32r6));
1189 DCHECK(!(rt.is(zero_reg)));
1190 GenInstrImmediate(BLEZL, rt, rt, offset);
1191 }
1192
1193
1194 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1195 DCHECK(IsMipsArchVariant(kMips32r6));
1196 DCHECK(!(rs.is(zero_reg)));
1197 DCHECK(!(rt.is(zero_reg)));
1198 DCHECK(rs.code() != rt.code());
1199 GenInstrImmediate(BLEZ, rs, rt, offset);
1200 }
1201
1202
1203 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1204 DCHECK(IsMipsArchVariant(kMips32r6));
1205 DCHECK(!(rs.is(zero_reg)));
1206 DCHECK(!(rt.is(zero_reg)));
1207 DCHECK(rs.code() != rt.code());
1208 GenInstrImmediate(BLEZL, rs, rt, offset);
1209 }
1210
1211
1212 void Assembler::bgezal(Register rs, int16_t offset) { 1075 void Assembler::bgezal(Register rs, int16_t offset) {
1213 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1214 BlockTrampolinePoolScope block_trampoline_pool(this); 1076 BlockTrampolinePoolScope block_trampoline_pool(this);
1215 positions_recorder()->WriteRecordedPositions(); 1077 positions_recorder()->WriteRecordedPositions();
1216 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); 1078 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1217 BlockTrampolinePoolFor(1); // For associated delay slot. 1079 BlockTrampolinePoolFor(1); // For associated delay slot.
1218 } 1080 }
1219 1081
1220 1082
1221 void Assembler::bgtz(Register rs, int16_t offset) { 1083 void Assembler::bgtz(Register rs, int16_t offset) {
1222 BlockTrampolinePoolScope block_trampoline_pool(this); 1084 BlockTrampolinePoolScope block_trampoline_pool(this);
1223 GenInstrImmediate(BGTZ, rs, zero_reg, offset); 1085 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1224 BlockTrampolinePoolFor(1); // For associated delay slot. 1086 BlockTrampolinePoolFor(1); // For associated delay slot.
1225 } 1087 }
1226 1088
1227 1089
1228 void Assembler::bgtzc(Register rt, int16_t offset) {
1229 DCHECK(IsMipsArchVariant(kMips32r6));
1230 DCHECK(!(rt.is(zero_reg)));
1231 GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1232 }
1233
1234
1235 void Assembler::blez(Register rs, int16_t offset) { 1090 void Assembler::blez(Register rs, int16_t offset) {
1236 BlockTrampolinePoolScope block_trampoline_pool(this); 1091 BlockTrampolinePoolScope block_trampoline_pool(this);
1237 GenInstrImmediate(BLEZ, rs, zero_reg, offset); 1092 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1238 BlockTrampolinePoolFor(1); // For associated delay slot. 1093 BlockTrampolinePoolFor(1); // For associated delay slot.
1239 } 1094 }
1240 1095
1241 1096
1242 void Assembler::blezc(Register rt, int16_t offset) {
1243 DCHECK(IsMipsArchVariant(kMips32r6));
1244 DCHECK(!(rt.is(zero_reg)));
1245 GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1246 }
1247
1248
1249 void Assembler::bltzc(Register rt, int16_t offset) {
1250 DCHECK(IsMipsArchVariant(kMips32r6));
1251 DCHECK(!(rt.is(zero_reg)));
1252 GenInstrImmediate(BGTZL, rt, rt, offset);
1253 }
1254
1255
1256 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1257 DCHECK(IsMipsArchVariant(kMips32r6));
1258 DCHECK(!(rs.is(zero_reg)));
1259 DCHECK(!(rt.is(zero_reg)));
1260 DCHECK(rs.code() != rt.code());
1261 GenInstrImmediate(BGTZ, rs, rt, offset);
1262 }
1263
1264
1265 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1266 DCHECK(IsMipsArchVariant(kMips32r6));
1267 DCHECK(!(rs.is(zero_reg)));
1268 DCHECK(!(rt.is(zero_reg)));
1269 DCHECK(rs.code() != rt.code());
1270 GenInstrImmediate(BGTZL, rs, rt, offset);
1271 }
1272
1273
1274 void Assembler::bltz(Register rs, int16_t offset) { 1097 void Assembler::bltz(Register rs, int16_t offset) {
1275 BlockTrampolinePoolScope block_trampoline_pool(this); 1098 BlockTrampolinePoolScope block_trampoline_pool(this);
1276 GenInstrImmediate(REGIMM, rs, BLTZ, offset); 1099 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1277 BlockTrampolinePoolFor(1); // For associated delay slot. 1100 BlockTrampolinePoolFor(1); // For associated delay slot.
1278 } 1101 }
1279 1102
1280 1103
1281 void Assembler::bltzal(Register rs, int16_t offset) { 1104 void Assembler::bltzal(Register rs, int16_t offset) {
1282 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1283 BlockTrampolinePoolScope block_trampoline_pool(this); 1105 BlockTrampolinePoolScope block_trampoline_pool(this);
1284 positions_recorder()->WriteRecordedPositions(); 1106 positions_recorder()->WriteRecordedPositions();
1285 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); 1107 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1286 BlockTrampolinePoolFor(1); // For associated delay slot. 1108 BlockTrampolinePoolFor(1); // For associated delay slot.
1287 } 1109 }
1288 1110
1289 1111
1290 void Assembler::bne(Register rs, Register rt, int16_t offset) { 1112 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1291 BlockTrampolinePoolScope block_trampoline_pool(this); 1113 BlockTrampolinePoolScope block_trampoline_pool(this);
1292 GenInstrImmediate(BNE, rs, rt, offset); 1114 GenInstrImmediate(BNE, rs, rt, offset);
1293 BlockTrampolinePoolFor(1); // For associated delay slot. 1115 BlockTrampolinePoolFor(1); // For associated delay slot.
1294 } 1116 }
1295 1117
1296 1118
1297 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1298 DCHECK(IsMipsArchVariant(kMips32r6));
1299 DCHECK(!(rs.is(zero_reg)));
1300 DCHECK(rs.code() >= rt.code());
1301 GenInstrImmediate(ADDI, rs, rt, offset);
1302 }
1303
1304
1305 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1306 DCHECK(IsMipsArchVariant(kMips32r6));
1307 DCHECK(!(rs.is(zero_reg)));
1308 DCHECK(rs.code() >= rt.code());
1309 GenInstrImmediate(DADDI, rs, rt, offset);
1310 }
1311
1312
1313 void Assembler::blezalc(Register rt, int16_t offset) {
1314 DCHECK(IsMipsArchVariant(kMips32r6));
1315 DCHECK(!(rt.is(zero_reg)));
1316 GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1317 }
1318
1319
1320 void Assembler::bgezalc(Register rt, int16_t offset) {
1321 DCHECK(IsMipsArchVariant(kMips32r6));
1322 DCHECK(!(rt.is(zero_reg)));
1323 GenInstrImmediate(BLEZ, rt, rt, offset);
1324 }
1325
1326
1327 void Assembler::bgezall(Register rs, int16_t offset) {
1328 DCHECK(IsMipsArchVariant(kMips32r6));
1329 DCHECK(!(rs.is(zero_reg)));
1330 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1331 }
1332
1333
1334 void Assembler::bltzalc(Register rt, int16_t offset) {
1335 DCHECK(IsMipsArchVariant(kMips32r6));
1336 DCHECK(!(rt.is(zero_reg)));
1337 GenInstrImmediate(BGTZ, rt, rt, offset);
1338 }
1339
1340
1341 void Assembler::bgtzalc(Register rt, int16_t offset) {
1342 DCHECK(IsMipsArchVariant(kMips32r6));
1343 DCHECK(!(rt.is(zero_reg)));
1344 GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1345 }
1346
1347
1348 void Assembler::beqzalc(Register rt, int16_t offset) {
1349 DCHECK(IsMipsArchVariant(kMips32r6));
1350 DCHECK(!(rt.is(zero_reg)));
1351 GenInstrImmediate(ADDI, zero_reg, rt, offset);
1352 }
1353
1354
1355 void Assembler::bnezalc(Register rt, int16_t offset) {
1356 DCHECK(IsMipsArchVariant(kMips32r6));
1357 DCHECK(!(rt.is(zero_reg)));
1358 GenInstrImmediate(DADDI, zero_reg, rt, offset);
1359 }
1360
1361
1362 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1363 DCHECK(IsMipsArchVariant(kMips32r6));
1364 DCHECK(rs.code() < rt.code());
1365 GenInstrImmediate(ADDI, rs, rt, offset);
1366 }
1367
1368
1369 void Assembler::beqzc(Register rs, int32_t offset) {
1370 DCHECK(IsMipsArchVariant(kMips32r6));
1371 DCHECK(!(rs.is(zero_reg)));
1372 Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1373 emit(instr);
1374 }
1375
1376
1377 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1378 DCHECK(IsMipsArchVariant(kMips32r6));
1379 DCHECK(rs.code() < rt.code());
1380 GenInstrImmediate(DADDI, rs, rt, offset);
1381 }
1382
1383
1384 void Assembler::bnezc(Register rs, int32_t offset) {
1385 DCHECK(IsMipsArchVariant(kMips32r6));
1386 DCHECK(!(rs.is(zero_reg)));
1387 Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1388 emit(instr);
1389 }
1390
1391
1392 void Assembler::j(int32_t target) { 1119 void Assembler::j(int32_t target) {
1393 #if DEBUG 1120 #if DEBUG
1394 // Get pc of delay slot. 1121 // Get pc of delay slot.
1395 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1122 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1396 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1123 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1397 (kImm26Bits + kImmFieldShift)) == 0; 1124 (kImm26Bits + kImmFieldShift)) == 0;
1398 DCHECK(in_range && ((target & 3) == 0)); 1125 DCHECK(in_range && ((target & 3) == 0));
1399 #endif 1126 #endif
1400 GenInstrJump(J, target >> 2); 1127 GenInstrJump(J, target >> 2);
1401 } 1128 }
1402 1129
1403 1130
1404 void Assembler::jr(Register rs) { 1131 void Assembler::jr(Register rs) {
1405 if (!IsMipsArchVariant(kMips32r6)) { 1132 BlockTrampolinePoolScope block_trampoline_pool(this);
1406 BlockTrampolinePoolScope block_trampoline_pool(this); 1133 if (rs.is(ra)) {
1407 if (rs.is(ra)) { 1134 positions_recorder()->WriteRecordedPositions();
1408 positions_recorder()->WriteRecordedPositions();
1409 }
1410 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1411 BlockTrampolinePoolFor(1); // For associated delay slot.
1412 } else {
1413 jalr(rs, zero_reg);
1414 } 1135 }
1136 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1137 BlockTrampolinePoolFor(1); // For associated delay slot.
1415 } 1138 }
1416 1139
1417 1140
1418 void Assembler::jal(int32_t target) { 1141 void Assembler::jal(int32_t target) {
1419 #ifdef DEBUG 1142 #ifdef DEBUG
1420 // Get pc of delay slot. 1143 // Get pc of delay slot.
1421 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1144 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1422 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1145 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1423 (kImm26Bits + kImmFieldShift)) == 0; 1146 (kImm26Bits + kImmFieldShift)) == 0;
1424 DCHECK(in_range && ((target & 3) == 0)); 1147 DCHECK(in_range && ((target & 3) == 0));
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1475 GenInstrImmediate(ADDIU, rs, rd, j); 1198 GenInstrImmediate(ADDIU, rs, rd, j);
1476 } 1199 }
1477 1200
1478 1201
1479 void Assembler::subu(Register rd, Register rs, Register rt) { 1202 void Assembler::subu(Register rd, Register rs, Register rt) {
1480 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); 1203 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1481 } 1204 }
1482 1205
1483 1206
1484 void Assembler::mul(Register rd, Register rs, Register rt) { 1207 void Assembler::mul(Register rd, Register rs, Register rt) {
1485 if (!IsMipsArchVariant(kMips32r6)) { 1208 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1486 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1487 } else {
1488 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1489 }
1490 } 1209 }
1491 1210
1492 1211
1493 void Assembler::mulu(Register rd, Register rs, Register rt) {
1494 DCHECK(IsMipsArchVariant(kMips32r6));
1495 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1496 }
1497
1498
1499 void Assembler::muh(Register rd, Register rs, Register rt) {
1500 DCHECK(IsMipsArchVariant(kMips32r6));
1501 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1502 }
1503
1504
1505 void Assembler::muhu(Register rd, Register rs, Register rt) {
1506 DCHECK(IsMipsArchVariant(kMips32r6));
1507 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1508 }
1509
1510
1511 void Assembler::mod(Register rd, Register rs, Register rt) {
1512 DCHECK(IsMipsArchVariant(kMips32r6));
1513 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1514 }
1515
1516
1517 void Assembler::modu(Register rd, Register rs, Register rt) {
1518 DCHECK(IsMipsArchVariant(kMips32r6));
1519 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1520 }
1521
1522
1523 void Assembler::mult(Register rs, Register rt) { 1212 void Assembler::mult(Register rs, Register rt) {
1524 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); 1213 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1525 } 1214 }
1526 1215
1527 1216
1528 void Assembler::multu(Register rs, Register rt) { 1217 void Assembler::multu(Register rs, Register rt) {
1529 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); 1218 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1530 } 1219 }
1531 1220
1532 1221
1533 void Assembler::div(Register rs, Register rt) { 1222 void Assembler::div(Register rs, Register rt) {
1534 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); 1223 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1535 } 1224 }
1536 1225
1537 1226
1538 void Assembler::div(Register rd, Register rs, Register rt) {
1539 DCHECK(IsMipsArchVariant(kMips32r6));
1540 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1541 }
1542
1543
1544 void Assembler::divu(Register rs, Register rt) { 1227 void Assembler::divu(Register rs, Register rt) {
1545 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); 1228 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1546 } 1229 }
1547 1230
1548 1231
1549 void Assembler::divu(Register rd, Register rs, Register rt) {
1550 DCHECK(IsMipsArchVariant(kMips32r6));
1551 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1552 }
1553
1554
1555 // Logical. 1232 // Logical.
1556 1233
1557 void Assembler::and_(Register rd, Register rs, Register rt) { 1234 void Assembler::and_(Register rd, Register rs, Register rt) {
1558 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); 1235 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1559 } 1236 }
1560 1237
1561 1238
1562 void Assembler::andi(Register rt, Register rs, int32_t j) { 1239 void Assembler::andi(Register rt, Register rs, int32_t j) {
1563 DCHECK(is_uint16(j)); 1240 DCHECK(is_uint16(j));
1564 GenInstrImmediate(ANDI, rs, rt, j); 1241 GenInstrImmediate(ANDI, rs, rt, j);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1627 1304
1628 1305
1629 void Assembler::srav(Register rd, Register rt, Register rs) { 1306 void Assembler::srav(Register rd, Register rt, Register rs) {
1630 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); 1307 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1631 } 1308 }
1632 1309
1633 1310
1634 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { 1311 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1635 // Should be called via MacroAssembler::Ror. 1312 // Should be called via MacroAssembler::Ror.
1636 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); 1313 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1637 DCHECK(IsMipsArchVariant(kMips32r2)); 1314 DCHECK(kArchVariant == kMips32r2);
1638 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) 1315 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1639 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; 1316 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1640 emit(instr); 1317 emit(instr);
1641 } 1318 }
1642 1319
1643 1320
1644 void Assembler::rotrv(Register rd, Register rt, Register rs) { 1321 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1645 // Should be called via MacroAssembler::Ror. 1322 // Should be called via MacroAssembler::Ror.
1646 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); 1323 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1647 DCHECK(IsMipsArchVariant(kMips32r2)); 1324 DCHECK(kArchVariant == kMips32r2);
1648 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) 1325 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1649 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; 1326 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1650 emit(instr); 1327 emit(instr);
1651 } 1328 }
1652 1329
1653 1330
1654 // ------------Memory-instructions------------- 1331 // ------------Memory-instructions-------------
1655 1332
1656 // Helper for base-reg + offset, when offset is larger than int16. 1333 // Helper for base-reg + offset, when offset is larger than int16.
1657 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { 1334 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
1761 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); 1438 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1762 } 1439 }
1763 1440
1764 1441
1765 void Assembler::lui(Register rd, int32_t j) { 1442 void Assembler::lui(Register rd, int32_t j) {
1766 DCHECK(is_uint16(j)); 1443 DCHECK(is_uint16(j));
1767 GenInstrImmediate(LUI, zero_reg, rd, j); 1444 GenInstrImmediate(LUI, zero_reg, rd, j);
1768 } 1445 }
1769 1446
1770 1447
1771 void Assembler::aui(Register rs, Register rt, int32_t j) {
1772 // This instruction uses same opcode as 'lui'. The difference in encoding is
1773 // 'lui' has zero reg. for rs field.
1774 DCHECK(is_uint16(j));
1775 GenInstrImmediate(LUI, rs, rt, j);
1776 }
1777
1778
1779 // -------------Misc-instructions-------------- 1448 // -------------Misc-instructions--------------
1780 1449
1781 // Break / Trap instructions. 1450 // Break / Trap instructions.
1782 void Assembler::break_(uint32_t code, bool break_as_stop) { 1451 void Assembler::break_(uint32_t code, bool break_as_stop) {
1783 DCHECK((code & ~0xfffff) == 0); 1452 DCHECK((code & ~0xfffff) == 0);
1784 // We need to invalidate breaks that could be stops as well because the 1453 // We need to invalidate breaks that could be stops as well because the
1785 // simulator expects a char pointer after the stop instruction. 1454 // simulator expects a char pointer after the stop instruction.
1786 // See constants-mips.h for explanation. 1455 // See constants-mips.h for explanation.
1787 DCHECK((break_as_stop && 1456 DCHECK((break_as_stop &&
1788 code <= kMaxStopCode && 1457 code <= kMaxStopCode &&
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
1912 1581
1913 void Assembler::movf(Register rd, Register rs, uint16_t cc) { 1582 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1914 Register rt; 1583 Register rt;
1915 rt.code_ = (cc & 0x0007) << 2 | 0; 1584 rt.code_ = (cc & 0x0007) << 2 | 0;
1916 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); 1585 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1917 } 1586 }
1918 1587
1919 1588
1920 // Bit twiddling. 1589 // Bit twiddling.
1921 void Assembler::clz(Register rd, Register rs) { 1590 void Assembler::clz(Register rd, Register rs) {
1922 if (!IsMipsArchVariant(kMips32r6)) { 1591 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1923 // Clz instr requires same GPR number in 'rd' and 'rt' fields. 1592 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1924 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1925 } else {
1926 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
1927 }
1928 } 1593 }
1929 1594
1930 1595
1931 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1596 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1932 // Should be called via MacroAssembler::Ins. 1597 // Should be called via MacroAssembler::Ins.
1933 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. 1598 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1934 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); 1599 DCHECK(kArchVariant == kMips32r2);
1935 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); 1600 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1936 } 1601 }
1937 1602
1938 1603
1939 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1604 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1940 // Should be called via MacroAssembler::Ext. 1605 // Should be called via MacroAssembler::Ext.
1941 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. 1606 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1942 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); 1607 DCHECK(kArchVariant == kMips32r2);
1943 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); 1608 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1944 } 1609 }
1945 1610
1946 1611
1947 void Assembler::pref(int32_t hint, const MemOperand& rs) { 1612 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1948 DCHECK(!IsMipsArchVariant(kLoongson)); 1613 DCHECK(kArchVariant != kLoongson);
1949 DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); 1614 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
1950 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) 1615 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1951 | (rs.offset_); 1616 | (rs.offset_);
1952 emit(instr); 1617 emit(instr);
1953 } 1618 }
1954 1619
1955 1620
1956 // --------Coprocessor-instructions---------------- 1621 // --------Coprocessor-instructions----------------
1957 1622
1958 // Load, store, move. 1623 // Load, store, move.
1959 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { 1624 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1960 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); 1625 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1961 } 1626 }
1962 1627
1963 1628
1964 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { 1629 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1965 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1630 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1966 // load to two 32-bit loads. 1631 // load to two 32-bit loads.
1967 if (IsFp64Mode()) { 1632 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1968 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + 1633 Register::kMantissaOffset);
1969 Register::kMantissaOffset); 1634 FPURegister nextfpreg;
1970 GenInstrImmediate(LW, src.rm(), at, src.offset_ + 1635 nextfpreg.setcode(fd.code() + 1);
1971 Register::kExponentOffset); 1636 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1972 mthc1(at, fd); 1637 Register::kExponentOffset);
1973 } else {
1974 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1975 Register::kMantissaOffset);
1976 FPURegister nextfpreg;
1977 nextfpreg.setcode(fd.code() + 1);
1978 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1979 Register::kExponentOffset);
1980 }
1981 } 1638 }
1982 1639
1983 1640
1984 void Assembler::swc1(FPURegister fd, const MemOperand& src) { 1641 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1985 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); 1642 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1986 } 1643 }
1987 1644
1988 1645
1989 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { 1646 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1990 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1647 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1991 // store to two 32-bit stores. 1648 // store to two 32-bit stores.
1992 if (IsFp64Mode()) { 1649 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1993 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + 1650 Register::kMantissaOffset);
1994 Register::kMantissaOffset); 1651 FPURegister nextfpreg;
1995 mfhc1(at, fd); 1652 nextfpreg.setcode(fd.code() + 1);
1996 GenInstrImmediate(SW, src.rm(), at, src.offset_ + 1653 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
1997 Register::kExponentOffset); 1654 Register::kExponentOffset);
1998 } else {
1999 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
2000 Register::kMantissaOffset);
2001 FPURegister nextfpreg;
2002 nextfpreg.setcode(fd.code() + 1);
2003 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
2004 Register::kExponentOffset);
2005 }
2006 } 1655 }
2007 1656
2008 1657
2009 void Assembler::mtc1(Register rt, FPURegister fs) { 1658 void Assembler::mtc1(Register rt, FPURegister fs) {
2010 GenInstrRegister(COP1, MTC1, rt, fs, f0); 1659 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2011 } 1660 }
2012 1661
2013 1662
2014 void Assembler::mthc1(Register rt, FPURegister fs) {
2015 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2016 }
2017
2018
2019 void Assembler::mfc1(Register rt, FPURegister fs) { 1663 void Assembler::mfc1(Register rt, FPURegister fs) {
2020 GenInstrRegister(COP1, MFC1, rt, fs, f0); 1664 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2021 } 1665 }
2022 1666
2023 1667
2024 void Assembler::mfhc1(Register rt, FPURegister fs) {
2025 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2026 }
2027
2028
2029 void Assembler::ctc1(Register rt, FPUControlRegister fs) { 1668 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2030 GenInstrRegister(COP1, CTC1, rt, fs); 1669 GenInstrRegister(COP1, CTC1, rt, fs);
2031 } 1670 }
2032 1671
2033 1672
2034 void Assembler::cfc1(Register rt, FPUControlRegister fs) { 1673 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2035 GenInstrRegister(COP1, CFC1, rt, fs); 1674 GenInstrRegister(COP1, CFC1, rt, fs);
2036 } 1675 }
2037 1676
2038 1677
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
2139 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); 1778 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2140 } 1779 }
2141 1780
2142 1781
2143 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { 1782 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2144 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); 1783 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2145 } 1784 }
2146 1785
2147 1786
2148 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { 1787 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2149 DCHECK(IsMipsArchVariant(kMips32r2)); 1788 DCHECK(kArchVariant == kMips32r2);
2150 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); 1789 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2151 } 1790 }
2152 1791
2153 1792
2154 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { 1793 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2155 DCHECK(IsMipsArchVariant(kMips32r2)); 1794 DCHECK(kArchVariant == kMips32r2);
2156 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); 1795 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2157 } 1796 }
2158 1797
2159 1798
2160 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { 1799 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2161 DCHECK(IsMipsArchVariant(kMips32r2)); 1800 DCHECK(kArchVariant == kMips32r2);
2162 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); 1801 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2163 } 1802 }
2164 1803
2165 1804
2166 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { 1805 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2167 DCHECK(IsMipsArchVariant(kMips32r2)); 1806 DCHECK(kArchVariant == kMips32r2);
2168 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); 1807 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2169 } 1808 }
2170 1809
2171 1810
2172 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { 1811 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2173 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); 1812 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2174 } 1813 }
2175 1814
2176 1815
2177 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { 1816 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
(...skipping 14 matching lines...) Expand all
2192 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { 1831 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2193 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); 1832 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2194 } 1833 }
2195 1834
2196 1835
2197 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { 1836 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2198 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); 1837 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2199 } 1838 }
2200 1839
2201 1840
2202 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2203 FPURegister fs) {
2204 DCHECK(IsMipsArchVariant(kMips32r6));
2205 DCHECK((fmt == D) || (fmt == S));
2206 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2207 }
2208
2209
2210 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2211 FPURegister fs) {
2212 DCHECK(IsMipsArchVariant(kMips32r6));
2213 DCHECK((fmt == D) || (fmt == S));
2214 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2215 }
2216
2217
2218 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2219 FPURegister fs) {
2220 DCHECK(IsMipsArchVariant(kMips32r6));
2221 DCHECK((fmt == D) || (fmt == S));
2222 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2223 }
2224
2225
2226 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2227 FPURegister fs) {
2228 DCHECK(IsMipsArchVariant(kMips32r6));
2229 DCHECK((fmt == D) || (fmt == S));
2230 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2231 }
2232
2233
2234 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { 1841 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2235 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); 1842 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2236 } 1843 }
2237 1844
2238 1845
2239 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { 1846 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2240 DCHECK(IsMipsArchVariant(kMips32r2)); 1847 DCHECK(kArchVariant == kMips32r2);
2241 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); 1848 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2242 } 1849 }
2243 1850
2244 1851
2245 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { 1852 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2246 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); 1853 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2247 } 1854 }
2248 1855
2249 1856
2250 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { 1857 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2251 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); 1858 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2252 } 1859 }
2253 1860
2254 1861
2255 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { 1862 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2256 DCHECK(IsMipsArchVariant(kMips32r2)); 1863 DCHECK(kArchVariant == kMips32r2);
2257 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); 1864 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2258 } 1865 }
2259 1866
2260 1867
2261 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { 1868 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2262 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); 1869 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2263 } 1870 }
2264 1871
2265 1872
2266 // Conditions for >= MIPSr6. 1873 // Conditions.
2267 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2268 FPURegister fd, FPURegister fs, FPURegister ft) {
2269 DCHECK(IsMipsArchVariant(kMips32r6));
2270 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2271 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2272 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2273 emit(instr);
2274 }
2275
2276
2277 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2278 DCHECK(IsMipsArchVariant(kMips32r6));
2279 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2280 emit(instr);
2281 }
2282
2283
2284 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2285 DCHECK(IsMipsArchVariant(kMips32r6));
2286 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2287 emit(instr);
2288 }
2289
2290
2291 // Conditions for < MIPSr6.
2292 void Assembler::c(FPUCondition cond, SecondaryField fmt, 1874 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2293 FPURegister fs, FPURegister ft, uint16_t cc) { 1875 FPURegister fs, FPURegister ft, uint16_t cc) {
2294 DCHECK(is_uint3(cc)); 1876 DCHECK(is_uint3(cc));
2295 DCHECK((fmt & ~(31 << kRsShift)) == 0); 1877 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2296 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift 1878 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2297 | cc << 8 | 3 << 4 | cond; 1879 | cc << 8 | 3 << 4 | cond;
2298 emit(instr); 1880 emit(instr);
2299 } 1881 }
2300 1882
2301 1883
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
2595 #ifdef DEBUG 2177 #ifdef DEBUG
2596 // Check we have the result from a li macro-instruction, using instr pair. 2178 // Check we have the result from a li macro-instruction, using instr pair.
2597 Instr instr1 = instr_at(pc); 2179 Instr instr1 = instr_at(pc);
2598 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); 2180 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2599 #endif 2181 #endif
2600 2182
2601 // Must use 2 instructions to insure patchable code => just use lui and ori. 2183 // Must use 2 instructions to insure patchable code => just use lui and ori.
2602 // lui rt, upper-16. 2184 // lui rt, upper-16.
2603 // ori rt rt, lower-16. 2185 // ori rt rt, lower-16.
2604 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); 2186 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2605 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); 2187 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2606 2188
2607 // The following code is an optimization for the common case of Call() 2189 // The following code is an optimization for the common case of Call()
2608 // or Jump() which is load to register, and jump through register: 2190 // or Jump() which is load to register, and jump through register:
2609 // li(t9, address); jalr(t9) (or jr(t9)). 2191 // li(t9, address); jalr(t9) (or jr(t9)).
2610 // If the destination address is in the same 256 MB page as the call, it 2192 // If the destination address is in the same 256 MB page as the call, it
2611 // is faster to do a direct jal, or j, rather than jump thru register, since 2193 // is faster to do a direct jal, or j, rather than jump thru register, since
2612 // that lets the cpu pipeline prefetch the target address. However each 2194 // that lets the cpu pipeline prefetch the target address. However each
2613 // time the address above is patched, we have to patch the direct jal/j 2195 // time the address above is patched, we have to patch the direct jal/j
2614 // instruction, as well as possibly revert to jalr/jr if we now cross a 2196 // instruction, as well as possibly revert to jalr/jr if we now cross a
2615 // 256 MB page. Note that with the jal/j instructions, we do not need to 2197 // 256 MB page. Note that with the jal/j instructions, we do not need to
(...skipping 22 matching lines...) Expand all
2638 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1); 2220 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2639 uint32_t ipc_segment_addr = ipc & segment_mask; 2221 uint32_t ipc_segment_addr = ipc & segment_mask;
2640 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask) 2222 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2641 in_range = false; 2223 in_range = false;
2642 } 2224 }
2643 #endif 2225 #endif
2644 2226
2645 if (IsJalr(instr3)) { 2227 if (IsJalr(instr3)) {
2646 // Try to convert JALR to JAL. 2228 // Try to convert JALR to JAL.
2647 if (in_range && GetRt(instr2) == GetRs(instr3)) { 2229 if (in_range && GetRt(instr2) == GetRs(instr3)) {
2648 *(p + 2) = JAL | target_field; 2230 *(p+2) = JAL | target_field;
2649 patched_jump = true; 2231 patched_jump = true;
2650 } 2232 }
2651 } else if (IsJr(instr3)) { 2233 } else if (IsJr(instr3)) {
2652 // Try to convert JR to J, skip returns (jr ra). 2234 // Try to convert JR to J, skip returns (jr ra).
2653 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code(); 2235 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2654 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) { 2236 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2655 *(p + 2) = J | target_field; 2237 *(p+2) = J | target_field;
2656 patched_jump = true; 2238 patched_jump = true;
2657 } 2239 }
2658 } else if (IsJal(instr3)) { 2240 } else if (IsJal(instr3)) {
2659 if (in_range) { 2241 if (in_range) {
2660 // We are patching an already converted JAL. 2242 // We are patching an already converted JAL.
2661 *(p + 2) = JAL | target_field; 2243 *(p+2) = JAL | target_field;
2662 } else { 2244 } else {
2663 // Patch JAL, but out of range, revert to JALR. 2245 // Patch JAL, but out of range, revert to JALR.
2664 // JALR rs reg is the rt reg specified in the ORI instruction. 2246 // JALR rs reg is the rt reg specified in the ORI instruction.
2665 uint32_t rs_field = GetRt(instr2) << kRsShift; 2247 uint32_t rs_field = GetRt(instr2) << kRsShift;
2666 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. 2248 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2667 *(p+2) = SPECIAL | rs_field | rd_field | JALR; 2249 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2668 } 2250 }
2669 patched_jump = true; 2251 patched_jump = true;
2670 } else if (IsJ(instr3)) { 2252 } else if (IsJ(instr3)) {
2671 if (in_range) { 2253 if (in_range) {
2672 // We are patching an already converted J (jump). 2254 // We are patching an already converted J (jump).
2673 *(p + 2) = J | target_field; 2255 *(p+2) = J | target_field;
2674 } else { 2256 } else {
2675 // Trying patch J, but out of range, just go back to JR. 2257 // Trying patch J, but out of range, just go back to JR.
2676 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2). 2258 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2677 uint32_t rs_field = GetRt(instr2) << kRsShift; 2259 uint32_t rs_field = GetRt(instr2) << kRsShift;
2678 if (IsMipsArchVariant(kMips32r6)) { 2260 *(p+2) = SPECIAL | rs_field | JR;
2679 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2680 } else {
2681 *(p + 2) = SPECIAL | rs_field | JR;
2682 }
2683 } 2261 }
2684 patched_jump = true; 2262 patched_jump = true;
2685 } 2263 }
2686 2264
2687 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { 2265 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2688 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); 2266 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2689 } 2267 }
2690 } 2268 }
2691 2269
2692 2270
2693 void Assembler::JumpLabelToJumpRegister(Address pc) { 2271 void Assembler::JumpLabelToJumpRegister(Address pc) {
2694 // Address pc points to lui/ori instructions. 2272 // Address pc points to lui/ori instructions.
2695 // Jump to label may follow at pc + 2 * kInstrSize. 2273 // Jump to label may follow at pc + 2 * kInstrSize.
2696 uint32_t* p = reinterpret_cast<uint32_t*>(pc); 2274 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2697 #ifdef DEBUG 2275 #ifdef DEBUG
2698 Instr instr1 = instr_at(pc); 2276 Instr instr1 = instr_at(pc);
2699 #endif 2277 #endif
2700 Instr instr2 = instr_at(pc + 1 * kInstrSize); 2278 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2701 Instr instr3 = instr_at(pc + 2 * kInstrSize); 2279 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2702 bool patched = false; 2280 bool patched = false;
2703 2281
2704 if (IsJal(instr3)) { 2282 if (IsJal(instr3)) {
2705 DCHECK(GetOpcodeField(instr1) == LUI); 2283 DCHECK(GetOpcodeField(instr1) == LUI);
2706 DCHECK(GetOpcodeField(instr2) == ORI); 2284 DCHECK(GetOpcodeField(instr2) == ORI);
2707 2285
2708 uint32_t rs_field = GetRt(instr2) << kRsShift; 2286 uint32_t rs_field = GetRt(instr2) << kRsShift;
2709 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. 2287 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2710 *(p + 2) = SPECIAL | rs_field | rd_field | JALR; 2288 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2711 patched = true; 2289 patched = true;
2712 } else if (IsJ(instr3)) { 2290 } else if (IsJ(instr3)) {
2713 DCHECK(GetOpcodeField(instr1) == LUI); 2291 DCHECK(GetOpcodeField(instr1) == LUI);
2714 DCHECK(GetOpcodeField(instr2) == ORI); 2292 DCHECK(GetOpcodeField(instr2) == ORI);
2715 2293
2716 uint32_t rs_field = GetRt(instr2) << kRsShift; 2294 uint32_t rs_field = GetRt(instr2) << kRsShift;
2717 if (IsMipsArchVariant(kMips32r6)) { 2295 *(p+2) = SPECIAL | rs_field | JR;
2718 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2719 } else {
2720 *(p + 2) = SPECIAL | rs_field | JR;
2721 }
2722 patched = true; 2296 patched = true;
2723 } 2297 }
2724 2298
2725 if (patched) { 2299 if (patched) {
2726 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); 2300 CpuFeatures::FlushICache(pc+2, sizeof(Address));
2727 } 2301 }
2728 } 2302 }
2729 2303
2730 2304
2731 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { 2305 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2732 // No out-of-line constant pool support. 2306 // No out-of-line constant pool support.
2733 DCHECK(!FLAG_enable_ool_constant_pool); 2307 DCHECK(!FLAG_enable_ool_constant_pool);
2734 return isolate->factory()->empty_constant_pool_array(); 2308 return isolate->factory()->empty_constant_pool_array();
2735 } 2309 }
2736 2310
2737 2311
2738 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 2312 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2739 // No out-of-line constant pool support. 2313 // No out-of-line constant pool support.
2740 DCHECK(!FLAG_enable_ool_constant_pool); 2314 DCHECK(!FLAG_enable_ool_constant_pool);
2741 return; 2315 return;
2742 } 2316 }
2743 2317
2744 2318
2745 } } // namespace v8::internal 2319 } } // namespace v8::internal
2746 2320
2747 #endif // V8_TARGET_ARCH_MIPS 2321 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698