OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
(...skipping 1174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1185 DCHECK(!tmp.is(rt)); | 1185 DCHECK(!tmp.is(rt)); |
1186 sll(tmp, rs, sa); | 1186 sll(tmp, rs, sa); |
1187 Addu(rd, rt, tmp); | 1187 Addu(rd, rt, tmp); |
1188 } | 1188 } |
1189 } | 1189 } |
1190 | 1190 |
1191 | 1191 |
1192 // ------------Pseudo-instructions------------- | 1192 // ------------Pseudo-instructions------------- |
1193 | 1193 |
1194 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 1194 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
1195 lwr(rd, rs); | 1195 DCHECK(!rd.is(at)); |
1196 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); | 1196 DCHECK(!rs.rm().is(at)); |
| 1197 if (IsMipsArchVariant(kMips32r6)) { |
| 1198 lw(rd, rs); |
| 1199 } else { |
| 1200 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1201 IsMipsArchVariant(kLoongson)); |
| 1202 if (is_int16(rs.offset() + kMipsLwrOffset) && |
| 1203 is_int16(rs.offset() + kMipsLwlOffset)) { |
| 1204 if (!rd.is(rs.rm())) { |
| 1205 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); |
| 1206 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); |
| 1207 } else { |
| 1208 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); |
| 1209 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); |
| 1210 mov(rd, at); |
| 1211 } |
| 1212 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1213 LoadRegPlusOffsetToAt(rs); |
| 1214 lwr(rd, MemOperand(at, kMipsLwrOffset)); |
| 1215 lwl(rd, MemOperand(at, kMipsLwlOffset)); |
| 1216 } |
| 1217 } |
1197 } | 1218 } |
1198 | 1219 |
1199 | 1220 |
1200 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { | 1221 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { |
1201 swr(rd, rs); | 1222 DCHECK(!rd.is(at)); |
1202 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); | 1223 DCHECK(!rs.rm().is(at)); |
| 1224 if (IsMipsArchVariant(kMips32r6)) { |
| 1225 sw(rd, rs); |
| 1226 } else { |
| 1227 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1228 IsMipsArchVariant(kLoongson)); |
| 1229 if (is_int16(rs.offset() + kMipsSwrOffset) && |
| 1230 is_int16(rs.offset() + kMipsSwlOffset)) { |
| 1231 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset)); |
| 1232 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset)); |
| 1233 } else { |
| 1234 LoadRegPlusOffsetToAt(rs); |
| 1235 swr(rd, MemOperand(at, kMipsSwrOffset)); |
| 1236 swl(rd, MemOperand(at, kMipsSwlOffset)); |
| 1237 } |
| 1238 } |
| 1239 } |
| 1240 |
| 1241 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { |
| 1242 DCHECK(!rd.is(at)); |
| 1243 DCHECK(!rs.rm().is(at)); |
| 1244 if (IsMipsArchVariant(kMips32r6)) { |
| 1245 lh(rd, rs); |
| 1246 } else { |
| 1247 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1248 IsMipsArchVariant(kLoongson)); |
| 1249 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { |
| 1250 #if defined(V8_TARGET_LITTLE_ENDIAN) |
| 1251 lbu(at, rs); |
| 1252 lb(rd, MemOperand(rs.rm(), rs.offset() + 1)); |
| 1253 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 1254 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); |
| 1255 lb(rd, rs); |
| 1256 #endif |
| 1257 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1258 LoadRegPlusOffsetToAt(rs); |
| 1259 #if defined(V8_TARGET_LITTLE_ENDIAN) |
| 1260 lb(rd, MemOperand(at, 1)); |
| 1261 lbu(at, MemOperand(at, 0)); |
| 1262 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 1263 lb(rd, MemOperand(at, 0)); |
| 1264 lbu(at, MemOperand(at, 1)); |
| 1265 #endif |
| 1266 } |
| 1267 sll(rd, rd, 8); |
| 1268 or_(rd, rd, at); |
| 1269 } |
| 1270 } |
| 1271 |
| 1272 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { |
| 1273 DCHECK(!rd.is(at)); |
| 1274 DCHECK(!rs.rm().is(at)); |
| 1275 if (IsMipsArchVariant(kMips32r6)) { |
| 1276 lhu(rd, rs); |
| 1277 } else { |
| 1278 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1279 IsMipsArchVariant(kLoongson)); |
| 1280 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { |
| 1281 #if defined(V8_TARGET_LITTLE_ENDIAN) |
| 1282 lbu(at, rs); |
| 1283 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1)); |
| 1284 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 1285 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); |
| 1286 lbu(rd, rs); |
| 1287 #endif |
| 1288 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1289 LoadRegPlusOffsetToAt(rs); |
| 1290 #if defined(V8_TARGET_LITTLE_ENDIAN) |
| 1291 lbu(rd, MemOperand(at, 1)); |
| 1292 lbu(at, MemOperand(at, 0)); |
| 1293 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 1294 lbu(rd, MemOperand(at, 0)); |
| 1295 lbu(at, MemOperand(at, 1)); |
| 1296 #endif |
| 1297 } |
| 1298 sll(rd, rd, 8); |
| 1299 or_(rd, rd, at); |
| 1300 } |
| 1301 } |
| 1302 |
| 1303 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { |
| 1304 DCHECK(!rd.is(at)); |
| 1305 DCHECK(!rs.rm().is(at)); |
| 1306 DCHECK(!rs.rm().is(scratch)); |
| 1307 DCHECK(!scratch.is(at)); |
| 1308 if (IsMipsArchVariant(kMips32r6)) { |
| 1309 sh(rd, rs); |
| 1310 } else { |
| 1311 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1312 IsMipsArchVariant(kLoongson)); |
| 1313 MemOperand source = rs; |
| 1314 // If offset > 16 bits, load address to at with offset 0. |
| 1315 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) { |
| 1316 LoadRegPlusOffsetToAt(rs); |
| 1317 source = MemOperand(at, 0); |
| 1318 } |
| 1319 |
| 1320 if (!scratch.is(rd)) { |
| 1321 mov(scratch, rd); |
| 1322 } |
| 1323 |
| 1324 #if defined(V8_TARGET_LITTLE_ENDIAN) |
| 1325 sb(scratch, source); |
| 1326 srl(scratch, scratch, 8); |
| 1327 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| 1328 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 1329 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| 1330 srl(scratch, scratch, 8); |
| 1331 sb(scratch, source); |
| 1332 #endif |
| 1333 } |
| 1334 } |
| 1335 |
| 1336 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, |
| 1337 Register scratch) { |
| 1338 if (IsMipsArchVariant(kMips32r6)) { |
| 1339 lwc1(fd, rs); |
| 1340 } else { |
| 1341 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1342 IsMipsArchVariant(kLoongson)); |
| 1343 Ulw(scratch, rs); |
| 1344 mtc1(scratch, fd); |
| 1345 } |
| 1346 } |
| 1347 |
| 1348 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, |
| 1349 Register scratch) { |
| 1350 if (IsMipsArchVariant(kMips32r6)) { |
| 1351 swc1(fd, rs); |
| 1352 } else { |
| 1353 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1354 IsMipsArchVariant(kLoongson)); |
| 1355 mfc1(scratch, fd); |
| 1356 Usw(scratch, rs); |
| 1357 } |
| 1358 } |
| 1359 |
| 1360 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, |
| 1361 Register scratch) { |
| 1362 DCHECK(!scratch.is(at)); |
| 1363 if (IsMipsArchVariant(kMips32r6)) { |
| 1364 ldc1(fd, rs); |
| 1365 } else { |
| 1366 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1367 IsMipsArchVariant(kLoongson)); |
| 1368 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
| 1369 mtc1(scratch, fd); |
| 1370 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
| 1371 Mthc1(scratch, fd); |
| 1372 } |
| 1373 } |
| 1374 |
| 1375 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, |
| 1376 Register scratch) { |
| 1377 DCHECK(!scratch.is(at)); |
| 1378 if (IsMipsArchVariant(kMips32r6)) { |
| 1379 sdc1(fd, rs); |
| 1380 } else { |
| 1381 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| 1382 IsMipsArchVariant(kLoongson)); |
| 1383 mfc1(scratch, fd); |
| 1384 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
| 1385 Mfhc1(scratch, fd); |
| 1386 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
| 1387 } |
1203 } | 1388 } |
1204 | 1389 |
1205 | 1390 |
1206 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { | 1391 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { |
1207 AllowDeferredHandleDereference smi_check; | 1392 AllowDeferredHandleDereference smi_check; |
1208 if (value->IsSmi()) { | 1393 if (value->IsSmi()) { |
1209 li(dst, Operand(value), mode); | 1394 li(dst, Operand(value), mode); |
1210 } else { | 1395 } else { |
1211 DCHECK(value->IsHeapObject()); | 1396 DCHECK(value->IsHeapObject()); |
1212 if (isolate()->heap()->InNewSpace(*value)) { | 1397 if (isolate()->heap()->InNewSpace(*value)) { |
(...skipping 5316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6529 if (mag.shift > 0) sra(result, result, mag.shift); | 6714 if (mag.shift > 0) sra(result, result, mag.shift); |
6530 srl(at, dividend, 31); | 6715 srl(at, dividend, 31); |
6531 Addu(result, result, Operand(at)); | 6716 Addu(result, result, Operand(at)); |
6532 } | 6717 } |
6533 | 6718 |
6534 | 6719 |
6535 } // namespace internal | 6720 } // namespace internal |
6536 } // namespace v8 | 6721 } // namespace v8 |
6537 | 6722 |
6538 #endif // V8_TARGET_ARCH_MIPS | 6723 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |