Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
| (...skipping 1174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1185 DCHECK(!tmp.is(rt)); | 1185 DCHECK(!tmp.is(rt)); |
| 1186 sll(tmp, rs, sa); | 1186 sll(tmp, rs, sa); |
| 1187 Addu(rd, rt, tmp); | 1187 Addu(rd, rt, tmp); |
| 1188 } | 1188 } |
| 1189 } | 1189 } |
| 1190 | 1190 |
| 1191 | 1191 |
| 1192 // ------------Pseudo-instructions------------- | 1192 // ------------Pseudo-instructions------------- |
| 1193 | 1193 |
| 1194 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 1194 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
| 1195 lwr(rd, rs); | 1195 DCHECK(!rd.is(at)); |
| 1196 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); | 1196 DCHECK(!rs.rm().is(at)); |
| 1197 if (IsMipsArchVariant(kMips32r6)) { | |
| 1198 lw(rd, rs); | |
|
akos.palfi.imgtec
2016/04/26 13:07:24
From the mips32r6 doc:
"Pre-Release 6: The effect
| |
| 1199 } else { | |
| 1200 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1201 IsMipsArchVariant(kLoongson)); | |
| 1202 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) { | |
|
akos.palfi.imgtec
2016/04/26 13:07:24
I think, you could use the new constants here:
if
| |
| 1203 if (!rd.is(rs.rm())) { | |
| 1204 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); | |
| 1205 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); | |
| 1206 } else { | |
| 1207 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); | |
| 1208 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); | |
| 1209 mov(rd, at); | |
| 1210 } | |
| 1211 } else { // Offset > 16 bits, use multiple instructions to load. | |
| 1212 LoadRegPlusOffsetToAt(rs); | |
| 1213 lwr(rd, MemOperand(at, kMipsLwrOffset)); | |
| 1214 lwl(rd, MemOperand(at, kMipsLwlOffset)); | |
| 1215 } | |
| 1216 } | |
| 1197 } | 1217 } |
| 1198 | 1218 |
| 1199 | 1219 |
| 1200 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { | 1220 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { |
| 1201 swr(rd, rs); | 1221 DCHECK(!rd.is(at)); |
| 1202 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); | 1222 DCHECK(!rs.rm().is(at)); |
| 1223 if (IsMipsArchVariant(kMips32r6)) { | |
| 1224 sw(rd, rs); | |
| 1225 } else { | |
| 1226 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1227 IsMipsArchVariant(kLoongson)); | |
| 1228 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) { | |
|
akos.palfi.imgtec
2016/04/26 13:07:24
I think, you could use the new constants here:
if
| |
| 1229 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset)); | |
| 1230 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset)); | |
| 1231 } else { | |
| 1232 LoadRegPlusOffsetToAt(rs); | |
| 1233 swr(rd, MemOperand(at, kMipsSwrOffset)); | |
| 1234 swl(rd, MemOperand(at, kMipsSwlOffset)); | |
| 1235 } | |
| 1236 } | |
| 1237 } | |
| 1238 | |
| 1239 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { | |
| 1240 DCHECK(!rd.is(at)); | |
| 1241 DCHECK(!rs.rm().is(at)); | |
| 1242 if (IsMipsArchVariant(kMips32r6)) { | |
| 1243 lh(rd, rs); | |
| 1244 } else { | |
| 1245 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1246 IsMipsArchVariant(kLoongson)); | |
| 1247 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { | |
| 1248 #if defined(V8_TARGET_LITTLE_ENDIAN) | |
| 1249 lbu(at, rs); | |
| 1250 lb(rd, MemOperand(rs.rm(), rs.offset() + 1)); | |
| 1251 #elif defined(V8_TARGET_BIG_ENDIAN) | |
| 1252 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); | |
| 1253 lb(rd, rs); | |
| 1254 #endif | |
| 1255 } else { // Offset > 16 bits, use multiple instructions to load. | |
| 1256 LoadRegPlusOffsetToAt(rs); | |
| 1257 #if defined(V8_TARGET_LITTLE_ENDIAN) | |
| 1258 lb(rd, MemOperand(at, 1)); | |
| 1259 lbu(at, MemOperand(at, 0)); | |
| 1260 #elif defined(V8_TARGET_BIG_ENDIAN) | |
| 1261 lb(rd, MemOperand(at, 0)); | |
| 1262 lbu(at, MemOperand(at, 1)); | |
| 1263 #endif | |
| 1264 } | |
| 1265 sll(rd, rd, 8); | |
| 1266 or_(rd, rd, at); | |
| 1267 } | |
| 1268 } | |
| 1269 | |
| 1270 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { | |
| 1271 DCHECK(!rd.is(at)); | |
| 1272 DCHECK(!rs.rm().is(at)); | |
| 1273 if (IsMipsArchVariant(kMips32r6)) { | |
| 1274 lhu(rd, rs); | |
| 1275 } else { | |
| 1276 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1277 IsMipsArchVariant(kLoongson)); | |
| 1278 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { | |
| 1279 #if defined(V8_TARGET_LITTLE_ENDIAN) | |
| 1280 lbu(at, rs); | |
| 1281 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1)); | |
| 1282 #elif defined(V8_TARGET_BIG_ENDIAN) | |
| 1283 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); | |
| 1284 lbu(rd, rs); | |
| 1285 #endif | |
| 1286 } else { // Offset > 16 bits, use multiple instructions to load. | |
| 1287 LoadRegPlusOffsetToAt(rs); | |
| 1288 #if defined(V8_TARGET_LITTLE_ENDIAN) | |
| 1289 lbu(rd, MemOperand(at, 1)); | |
| 1290 lbu(at, MemOperand(at, 0)); | |
| 1291 #elif defined(V8_TARGET_BIG_ENDIAN) | |
| 1292 lbu(rd, MemOperand(at, 0)); | |
| 1293 lbu(at, MemOperand(at, 1)); | |
| 1294 #endif | |
| 1295 } | |
| 1296 sll(rd, rd, 8); | |
| 1297 or_(rd, rd, at); | |
| 1298 } | |
| 1299 } | |
| 1300 | |
| 1301 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { | |
| 1302 DCHECK(!rd.is(at)); | |
| 1303 DCHECK(!rs.rm().is(at)); | |
| 1304 DCHECK(!rs.rm().is(scratch)); | |
| 1305 DCHECK(!scratch.is(at)); | |
| 1306 if (IsMipsArchVariant(kMips32r6)) { | |
| 1307 sh(rd, rs); | |
| 1308 } else { | |
| 1309 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1310 IsMipsArchVariant(kLoongson)); | |
| 1311 MemOperand source = rs; | |
| 1312 // If offset > 16 bits, load address to at with offset 0. | |
| 1313 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) { | |
| 1314 LoadRegPlusOffsetToAt(rs); | |
| 1315 source = MemOperand(at, 0); | |
| 1316 } | |
| 1317 | |
| 1318 if (!scratch.is(rd)) { | |
| 1319 mov(scratch, rd); | |
| 1320 } | |
| 1321 | |
| 1322 #if defined(V8_TARGET_LITTLE_ENDIAN) | |
| 1323 sb(scratch, source); | |
| 1324 srl(scratch, scratch, 8); | |
| 1325 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); | |
| 1326 #elif defined(V8_TARGET_BIG_ENDIAN) | |
| 1327 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); | |
| 1328 srl(scratch, scratch, 8); | |
| 1329 sb(scratch, source); | |
| 1330 #endif | |
| 1331 } | |
| 1332 } | |
| 1333 | |
| 1334 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, | |
| 1335 Register scratch) { | |
| 1336 if (IsMipsArchVariant(kMips32r6)) { | |
| 1337 lwc1(fd, rs); | |
| 1338 } else { | |
| 1339 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1340 IsMipsArchVariant(kLoongson)); | |
| 1341 Ulw(scratch, rs); | |
| 1342 mtc1(scratch, fd); | |
| 1343 } | |
| 1344 } | |
| 1345 | |
| 1346 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, | |
| 1347 Register scratch) { | |
| 1348 if (IsMipsArchVariant(kMips32r6)) { | |
| 1349 swc1(fd, rs); | |
| 1350 } else { | |
| 1351 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1352 IsMipsArchVariant(kLoongson)); | |
| 1353 mfc1(scratch, fd); | |
| 1354 Usw(scratch, rs); | |
| 1355 } | |
| 1356 } | |
| 1357 | |
| 1358 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, | |
| 1359 Register scratch) { | |
| 1360 DCHECK(!scratch.is(at)); | |
| 1361 if (IsMipsArchVariant(kMips32r6)) { | |
| 1362 ldc1(fd, rs); | |
| 1363 } else { | |
| 1364 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1365 IsMipsArchVariant(kLoongson)); | |
| 1366 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); | |
| 1367 mtc1(scratch, fd); | |
| 1368 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); | |
| 1369 Mthc1(scratch, fd); | |
| 1370 } | |
| 1371 } | |
| 1372 | |
| 1373 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, | |
| 1374 Register scratch) { | |
| 1375 DCHECK(!scratch.is(at)); | |
| 1376 if (IsMipsArchVariant(kMips32r6)) { | |
| 1377 sdc1(fd, rs); | |
| 1378 } else { | |
| 1379 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | |
| 1380 IsMipsArchVariant(kLoongson)); | |
| 1381 mfc1(scratch, fd); | |
| 1382 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); | |
| 1383 Mfhc1(scratch, fd); | |
| 1384 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); | |
| 1385 } | |
| 1203 } | 1386 } |
| 1204 | 1387 |
| 1205 | 1388 |
| 1206 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { | 1389 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { |
| 1207 AllowDeferredHandleDereference smi_check; | 1390 AllowDeferredHandleDereference smi_check; |
| 1208 if (value->IsSmi()) { | 1391 if (value->IsSmi()) { |
| 1209 li(dst, Operand(value), mode); | 1392 li(dst, Operand(value), mode); |
| 1210 } else { | 1393 } else { |
| 1211 DCHECK(value->IsHeapObject()); | 1394 DCHECK(value->IsHeapObject()); |
| 1212 if (isolate()->heap()->InNewSpace(*value)) { | 1395 if (isolate()->heap()->InNewSpace(*value)) { |
| (...skipping 5316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6529 if (mag.shift > 0) sra(result, result, mag.shift); | 6712 if (mag.shift > 0) sra(result, result, mag.shift); |
| 6530 srl(at, dividend, 31); | 6713 srl(at, dividend, 31); |
| 6531 Addu(result, result, Operand(at)); | 6714 Addu(result, result, Operand(at)); |
| 6532 } | 6715 } |
| 6533 | 6716 |
| 6534 | 6717 |
| 6535 } // namespace internal | 6718 } // namespace internal |
| 6536 } // namespace v8 | 6719 } // namespace v8 |
| 6537 | 6720 |
| 6538 #endif // V8_TARGET_ARCH_MIPS | 6721 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |