Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 1779713009: Implement optional turbofan UnalignedLoad and UnalignedStore operators (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Unaligned access simulate using load/shift/or and store/shift/and Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 1307 matching lines...) Expand 10 before | Expand all | Expand 10 after
1318 DCHECK(!tmp.is(rt)); 1318 DCHECK(!tmp.is(rt));
1319 dsll(tmp, rs, sa); 1319 dsll(tmp, rs, sa);
1320 Daddu(rd, rt, tmp); 1320 Daddu(rd, rt, tmp);
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 1324
1325 // ------------Pseudo-instructions------------- 1325 // ------------Pseudo-instructions-------------
1326 1326
1327 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { 1327 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1328 lwr(rd, rs); 1328 DCHECK(!rd.is(at));
1329 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1329 DCHECK(!rs.rm().is(at));
1330 if (kArchVariant == kMips64r6) {
1331 lw(rd, rs);
1332 } else {
1333 DCHECK(kArchVariant == kMips64r2);
1334 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) {
1335 if (!rd.is(rs.rm())) {
1336 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1337 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1338 } else {
1339 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1340 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1341 mov(rd, at);
1342 }
1343 } else { // Offset > 16 bits, use multiple instructions to load.
1344 LoadRegPlusOffsetToAt(rs);
1345 lwr(rd, MemOperand(at, kMipsLwrOffset));
1346 lwl(rd, MemOperand(at, kMipsLwlOffset));
1347 }
1348 }
1349 }
1350
1351 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1352 if (kArchVariant == kMips64r6) {
1353 lwu(rd, rs);
1354 } else {
1355 DCHECK(kArchVariant == kMips64r2);
1356 Ulw(rd, rs);
1357 Dext(rd, rd, 0, 32);
1358 }
1330 } 1359 }
1331 1360
1332 1361
1333 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { 1362 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1334 swr(rd, rs); 1363 DCHECK(!rd.is(at));
1335 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1364 DCHECK(!rs.rm().is(at));
1365 if (kArchVariant == kMips64r6) {
1366 sw(rd, rs);
1367 } else {
1368 DCHECK(kArchVariant == kMips64r2);
1369 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) {
1370 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1371 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1372 } else {
1373 LoadRegPlusOffsetToAt(rs);
1374 swr(rd, MemOperand(at, kMipsSwrOffset));
1375 swl(rd, MemOperand(at, kMipsSwlOffset));
1376 }
1377 }
1378 }
1379
1380 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1381 DCHECK(!rd.is(at));
1382 DCHECK(!rs.rm().is(at));
1383 if (kArchVariant == kMips64r6) {
1384 lh(rd, rs);
1385 } else {
1386 DCHECK(kArchVariant == kMips64r2);
1387 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1388 #if defined(V8_TARGET_LITTLE_ENDIAN)
1389 lbu(at, rs);
1390 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1391 #elif defined(V8_TARGET_BIG_ENDIAN)
1392 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1393 lb(rd, rs);
1394 #endif
1395 } else { // Offset > 16 bits, use multiple instructions to load.
1396 LoadRegPlusOffsetToAt(rs);
1397 #if defined(V8_TARGET_LITTLE_ENDIAN)
1398 lb(rd, MemOperand(at, 1));
1399 lbu(at, MemOperand(at, 0));
1400 #elif defined(V8_TARGET_BIG_ENDIAN)
1401 lb(rd, MemOperand(at, 0));
1402 lbu(at, MemOperand(at, 1));
1403 #endif
1404 }
1405 dsll(rd, rd, 8);
1406 or_(rd, rd, at);
1407 }
1408 }
1409
1410 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1411 DCHECK(!rd.is(at));
1412 DCHECK(!rs.rm().is(at));
1413 if (kArchVariant == kMips64r6) {
1414 lhu(rd, rs);
1415 } else {
1416 DCHECK(kArchVariant == kMips64r2);
1417 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1418 #if defined(V8_TARGET_LITTLE_ENDIAN)
1419 lbu(at, rs);
1420 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1421 #elif defined(V8_TARGET_BIG_ENDIAN)
1422 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1423 lbu(rd, rs);
1424 #endif
1425 } else { // Offset > 16 bits, use multiple instructions to load.
1426 LoadRegPlusOffsetToAt(rs);
1427 #if defined(V8_TARGET_LITTLE_ENDIAN)
1428 lbu(rd, MemOperand(at, 1));
1429 lbu(at, MemOperand(at, 0));
1430 #elif defined(V8_TARGET_BIG_ENDIAN)
1431 lbu(rd, MemOperand(at, 0));
1432 lbu(at, MemOperand(at, 1));
1433 #endif
1434 }
1435 dsll(rd, rd, 8);
1436 or_(rd, rd, at);
1437 }
1438 }
1439
1440 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1441 DCHECK(!rd.is(at));
1442 DCHECK(!rs.rm().is(at));
1443 DCHECK(!rs.rm().is(scratch));
1444 DCHECK(!scratch.is(at));
1445 if (kArchVariant == kMips64r6) {
1446 sh(rd, rs);
1447 } else {
1448 DCHECK(kArchVariant == kMips64r2);
1449 MemOperand source = rs;
1450 // If offset > 16 bits, load address to at with offset 0.
1451 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1452 LoadRegPlusOffsetToAt(rs);
1453 source = MemOperand(at, 0);
1454 }
1455
1456 if (!scratch.is(rd)) {
1457 mov(scratch, rd);
1458 }
1459
1460 #if defined(V8_TARGET_LITTLE_ENDIAN)
1461 sb(scratch, source);
1462 srl(scratch, scratch, 8);
1463 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1464 #elif defined(V8_TARGET_BIG_ENDIAN)
1465 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1466 srl(scratch, scratch, 8);
1467 sb(scratch, source);
1468 #endif
1469 }
1470 }
1471
1472 void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1473 DCHECK(!rd.is(at));
1474 DCHECK(!rs.rm().is(at));
1475 if (kArchVariant == kMips64r6) {
1476 ld(rd, rs);
1477 } else {
1478 DCHECK(kArchVariant == kMips64r2);
1479 if (is_int16(rs.offset()) && is_int16(rs.offset() + 7)) {
1480 if (!rd.is(rs.rm())) {
1481 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1482 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1483 } else {
1484 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1485 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1486 mov(rd, at);
1487 }
1488 } else { // Offset > 16 bits, use multiple instructions to load.
1489 LoadRegPlusOffsetToAt(rs);
1490 ldr(rd, MemOperand(at, kMipsLdrOffset));
1491 ldl(rd, MemOperand(at, kMipsLdlOffset));
1492 }
1493 }
1336 } 1494 }
1337 1495
1338 1496
1339 // Do 64-bit load from unaligned address. Note this only handles
1340 // the specific case of 32-bit aligned, but not 64-bit aligned.
1341 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1342 // Assert fail if the offset from start of object IS actually aligned.
1343 // ONLY use with known misalignment, since there is performance cost.
1344 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1345 if (kArchEndian == kLittle) {
1346 lwu(rd, rs);
1347 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1348 dsll32(scratch, scratch, 0);
1349 } else {
1350 lw(rd, rs);
1351 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1352 dsll32(rd, rd, 0);
1353 }
1354 Daddu(rd, rd, scratch);
1355 }
1356
1357
1358 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low 1497 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1359 // bits, 1498 // bits,
1360 // second word in high bits. 1499 // second word in high bits.
1361 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, 1500 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1362 Register scratch) { 1501 Register scratch) {
1363 lwu(rd, rs); 1502 lwu(rd, rs);
1364 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1503 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1365 dsll32(scratch, scratch, 0); 1504 dsll32(scratch, scratch, 0);
1366 Daddu(rd, rd, scratch); 1505 Daddu(rd, rd, scratch);
1367 } 1506 }
1368 1507
1369 1508 void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1370 // Do 64-bit store to unaligned address. Note this only handles 1509 DCHECK(!rd.is(at));
1371 // the specific case of 32-bit aligned, but not 64-bit aligned. 1510 DCHECK(!rs.rm().is(at));
1372 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { 1511 if (kArchVariant == kMips64r6) {
1373 // Assert fail if the offset from start of object IS actually aligned. 1512 sd(rd, rs);
1374 // ONLY use with known misalignment, since there is performance cost.
1375 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1376 if (kArchEndian == kLittle) {
1377 sw(rd, rs);
1378 dsrl32(scratch, rd, 0);
1379 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1380 } else { 1513 } else {
1381 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1514 DCHECK(kArchVariant == kMips64r2);
1382 dsrl32(scratch, rd, 0); 1515 if (is_int16(rs.offset()) && is_int16(rs.offset() + 7)) {
1383 sw(scratch, rs); 1516 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1517 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1518 } else {
1519 LoadRegPlusOffsetToAt(rs);
1520 sdr(rd, MemOperand(at, kMipsSdrOffset));
1521 sdl(rd, MemOperand(at, kMipsSdlOffset));
1522 }
1384 } 1523 }
1385 } 1524 }
1386 1525
1387 1526
1388 // Do 64-bit store as two consequent 32-bit stores to unaligned address. 1527 // Do 64-bit store as two consequent 32-bit stores to unaligned address.
1389 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, 1528 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1390 Register scratch) { 1529 Register scratch) {
1391 sw(rd, rs); 1530 sw(rd, rs);
1392 dsrl32(scratch, rd, 0); 1531 dsrl32(scratch, rd, 0);
1393 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1532 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1394 } 1533 }
1395 1534
1535 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1536 Register scratch) {
1537 if (kArchVariant == kMips64r6) {
1538 lwc1(fd, rs);
1539 } else {
1540 DCHECK(kArchVariant == kMips64r2);
1541 Ulw(scratch, rs);
1542 mtc1(scratch, fd);
1543 }
1544 }
1545
1546 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1547 Register scratch) {
1548 if (kArchVariant == kMips64r6) {
1549 swc1(fd, rs);
1550 } else {
1551 DCHECK(kArchVariant == kMips64r2);
1552 mfc1(scratch, fd);
1553 Usw(scratch, rs);
1554 }
1555 }
1556
1557 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1558 Register scratch) {
1559 DCHECK(!scratch.is(at));
1560 if (kArchVariant == kMips64r6) {
1561 ldc1(fd, rs);
1562 } else {
1563 DCHECK(kArchVariant == kMips64r2);
1564 Uld(scratch, rs);
1565 dmtc1(scratch, fd);
1566 }
1567 }
1568
1569 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1570 Register scratch) {
1571 DCHECK(!scratch.is(at));
1572 if (kArchVariant == kMips64r6) {
1573 sdc1(fd, rs);
1574 } else {
1575 DCHECK(kArchVariant == kMips64r2);
1576 dmfc1(scratch, fd);
1577 Usd(scratch, rs);
1578 }
1579 }
1396 1580
1397 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1581 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1398 AllowDeferredHandleDereference smi_check; 1582 AllowDeferredHandleDereference smi_check;
1399 if (value->IsSmi()) { 1583 if (value->IsSmi()) {
1400 li(dst, Operand(value), mode); 1584 li(dst, Operand(value), mode);
1401 } else { 1585 } else {
1402 DCHECK(value->IsHeapObject()); 1586 DCHECK(value->IsHeapObject());
1403 if (isolate()->heap()->InNewSpace(*value)) { 1587 if (isolate()->heap()->InNewSpace(*value)) {
1404 Handle<Cell> cell = isolate()->factory()->NewCell(value); 1588 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1405 li(dst, Operand(cell)); 1589 li(dst, Operand(cell));
(...skipping 5483 matching lines...) Expand 10 before | Expand all | Expand 10 after
6889 if (mag.shift > 0) sra(result, result, mag.shift); 7073 if (mag.shift > 0) sra(result, result, mag.shift);
6890 srl(at, dividend, 31); 7074 srl(at, dividend, 31);
6891 Addu(result, result, Operand(at)); 7075 Addu(result, result, Operand(at));
6892 } 7076 }
6893 7077
6894 7078
6895 } // namespace internal 7079 } // namespace internal
6896 } // namespace v8 7080 } // namespace v8
6897 7081
6898 #endif // V8_TARGET_ARCH_MIPS64 7082 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698