Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(408)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 1902743002: MIPS: Implement unaligned access instruction. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Address code review remarks Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 1307 matching lines...) Expand 10 before | Expand all | Expand 10 after
1318 DCHECK(!tmp.is(rt)); 1318 DCHECK(!tmp.is(rt));
1319 dsll(tmp, rs, sa); 1319 dsll(tmp, rs, sa);
1320 Daddu(rd, rt, tmp); 1320 Daddu(rd, rt, tmp);
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 1324
1325 // ------------Pseudo-instructions------------- 1325 // ------------Pseudo-instructions-------------
1326 1326
1327 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { 1327 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1328 lwr(rd, rs); 1328 DCHECK(!rd.is(at));
1329 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1329 DCHECK(!rs.rm().is(at));
1330 if (kArchVariant == kMips64r6) {
1331 lw(rd, rs);
1332 } else {
1333 DCHECK(kArchVariant == kMips64r2);
1334 if (is_int16(rs.offset() + kMipsLwrOffset) &&
1335 is_int16(rs.offset() + kMipsLwlOffset)) {
1336 if (!rd.is(rs.rm())) {
1337 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1338 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1339 } else {
1340 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1341 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1342 mov(rd, at);
1343 }
1344 } else { // Offset > 16 bits, use multiple instructions to load.
1345 LoadRegPlusOffsetToAt(rs);
1346 lwr(rd, MemOperand(at, kMipsLwrOffset));
1347 lwl(rd, MemOperand(at, kMipsLwlOffset));
1348 }
1349 }
1350 }
1351
1352 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1353 if (kArchVariant == kMips64r6) {
1354 lwu(rd, rs);
1355 } else {
1356 DCHECK(kArchVariant == kMips64r2);
1357 Ulw(rd, rs);
1358 Dext(rd, rd, 0, 32);
1359 }
1330 } 1360 }
1331 1361
1332 1362
1333 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { 1363 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1334 swr(rd, rs); 1364 DCHECK(!rd.is(at));
1335 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1365 DCHECK(!rs.rm().is(at));
1366 if (kArchVariant == kMips64r6) {
1367 sw(rd, rs);
1368 } else {
1369 DCHECK(kArchVariant == kMips64r2);
1370 if (is_int16(rs.offset() + kMipsSwrOffset) &&
1371 is_int16(rs.offset() + kMipsSwlOffset)) {
1372 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1373 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1374 } else {
1375 LoadRegPlusOffsetToAt(rs);
1376 swr(rd, MemOperand(at, kMipsSwrOffset));
1377 swl(rd, MemOperand(at, kMipsSwlOffset));
1378 }
1379 }
1380 }
1381
1382 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1383 DCHECK(!rd.is(at));
1384 DCHECK(!rs.rm().is(at));
1385 if (kArchVariant == kMips64r6) {
1386 lh(rd, rs);
1387 } else {
1388 DCHECK(kArchVariant == kMips64r2);
1389 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1390 #if defined(V8_TARGET_LITTLE_ENDIAN)
1391 lbu(at, rs);
1392 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1393 #elif defined(V8_TARGET_BIG_ENDIAN)
1394 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1395 lb(rd, rs);
1396 #endif
1397 } else { // Offset > 16 bits, use multiple instructions to load.
1398 LoadRegPlusOffsetToAt(rs);
1399 #if defined(V8_TARGET_LITTLE_ENDIAN)
1400 lb(rd, MemOperand(at, 1));
1401 lbu(at, MemOperand(at, 0));
1402 #elif defined(V8_TARGET_BIG_ENDIAN)
1403 lb(rd, MemOperand(at, 0));
1404 lbu(at, MemOperand(at, 1));
1405 #endif
1406 }
1407 dsll(rd, rd, 8);
1408 or_(rd, rd, at);
1409 }
1410 }
1411
1412 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1413 DCHECK(!rd.is(at));
1414 DCHECK(!rs.rm().is(at));
1415 if (kArchVariant == kMips64r6) {
1416 lhu(rd, rs);
1417 } else {
1418 DCHECK(kArchVariant == kMips64r2);
1419 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1420 #if defined(V8_TARGET_LITTLE_ENDIAN)
1421 lbu(at, rs);
1422 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1423 #elif defined(V8_TARGET_BIG_ENDIAN)
1424 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1425 lbu(rd, rs);
1426 #endif
1427 } else { // Offset > 16 bits, use multiple instructions to load.
1428 LoadRegPlusOffsetToAt(rs);
1429 #if defined(V8_TARGET_LITTLE_ENDIAN)
1430 lbu(rd, MemOperand(at, 1));
1431 lbu(at, MemOperand(at, 0));
1432 #elif defined(V8_TARGET_BIG_ENDIAN)
1433 lbu(rd, MemOperand(at, 0));
1434 lbu(at, MemOperand(at, 1));
1435 #endif
1436 }
1437 dsll(rd, rd, 8);
1438 or_(rd, rd, at);
1439 }
1440 }
1441
1442 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1443 DCHECK(!rd.is(at));
1444 DCHECK(!rs.rm().is(at));
1445 DCHECK(!rs.rm().is(scratch));
1446 DCHECK(!scratch.is(at));
1447 if (kArchVariant == kMips64r6) {
1448 sh(rd, rs);
1449 } else {
1450 DCHECK(kArchVariant == kMips64r2);
1451 MemOperand source = rs;
1452 // If offset > 16 bits, load address to at with offset 0.
1453 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1454 LoadRegPlusOffsetToAt(rs);
1455 source = MemOperand(at, 0);
1456 }
1457
1458 if (!scratch.is(rd)) {
1459 mov(scratch, rd);
1460 }
1461
1462 #if defined(V8_TARGET_LITTLE_ENDIAN)
1463 sb(scratch, source);
1464 srl(scratch, scratch, 8);
1465 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1466 #elif defined(V8_TARGET_BIG_ENDIAN)
1467 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1468 srl(scratch, scratch, 8);
1469 sb(scratch, source);
1470 #endif
1471 }
1472 }
1473
1474 void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1475 DCHECK(!rd.is(at));
1476 DCHECK(!rs.rm().is(at));
1477 if (kArchVariant == kMips64r6) {
1478 ld(rd, rs);
1479 } else {
1480 DCHECK(kArchVariant == kMips64r2);
1481 if (is_int16(rs.offset() + kMipsLdrOffset) &&
1482 is_int16(rs.offset() + kMipsLdlOffset)) {
1483 if (!rd.is(rs.rm())) {
1484 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1485 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1486 } else {
1487 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1488 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1489 mov(rd, at);
1490 }
1491 } else { // Offset > 16 bits, use multiple instructions to load.
1492 LoadRegPlusOffsetToAt(rs);
1493 ldr(rd, MemOperand(at, kMipsLdrOffset));
1494 ldl(rd, MemOperand(at, kMipsLdlOffset));
1495 }
1496 }
1336 } 1497 }
1337 1498
1338 1499
1339 // Do 64-bit load from unaligned address. Note this only handles
1340 // the specific case of 32-bit aligned, but not 64-bit aligned.
1341 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1342 // Assert fail if the offset from start of object IS actually aligned.
1343 // ONLY use with known misalignment, since there is performance cost.
1344 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1345 if (kArchEndian == kLittle) {
1346 lwu(rd, rs);
1347 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1348 dsll32(scratch, scratch, 0);
1349 } else {
1350 lw(rd, rs);
1351 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1352 dsll32(rd, rd, 0);
1353 }
1354 Daddu(rd, rd, scratch);
1355 }
1356
1357
1358 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low 1500 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1359 // bits, 1501 // bits,
1360 // second word in high bits. 1502 // second word in high bits.
1361 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, 1503 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1362 Register scratch) { 1504 Register scratch) {
1363 lwu(rd, rs); 1505 lwu(rd, rs);
1364 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1506 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1365 dsll32(scratch, scratch, 0); 1507 dsll32(scratch, scratch, 0);
1366 Daddu(rd, rd, scratch); 1508 Daddu(rd, rd, scratch);
1367 } 1509 }
1368 1510
1369 1511 void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1370 // Do 64-bit store to unaligned address. Note this only handles 1512 DCHECK(!rd.is(at));
1371 // the specific case of 32-bit aligned, but not 64-bit aligned. 1513 DCHECK(!rs.rm().is(at));
1372 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { 1514 if (kArchVariant == kMips64r6) {
1373 // Assert fail if the offset from start of object IS actually aligned. 1515 sd(rd, rs);
1374 // ONLY use with known misalignment, since there is performance cost.
1375 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1376 if (kArchEndian == kLittle) {
1377 sw(rd, rs);
1378 dsrl32(scratch, rd, 0);
1379 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1380 } else { 1516 } else {
1381 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1517 DCHECK(kArchVariant == kMips64r2);
1382 dsrl32(scratch, rd, 0); 1518 if (is_int16(rs.offset() + kMipsSdrOffset) &&
1383 sw(scratch, rs); 1519 is_int16(rs.offset() + kMipsSdlOffset)) {
1520 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1521 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1522 } else {
1523 LoadRegPlusOffsetToAt(rs);
1524 sdr(rd, MemOperand(at, kMipsSdrOffset));
1525 sdl(rd, MemOperand(at, kMipsSdlOffset));
1526 }
1384 } 1527 }
1385 } 1528 }
1386 1529
1387 1530
1388 // Do 64-bit store as two consequent 32-bit stores to unaligned address. 1531 // Do 64-bit store as two consequent 32-bit stores to unaligned address.
1389 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, 1532 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1390 Register scratch) { 1533 Register scratch) {
1391 sw(rd, rs); 1534 sw(rd, rs);
1392 dsrl32(scratch, rd, 0); 1535 dsrl32(scratch, rd, 0);
1393 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1536 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1394 } 1537 }
1395 1538
1539 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1540 Register scratch) {
1541 if (kArchVariant == kMips64r6) {
1542 lwc1(fd, rs);
1543 } else {
1544 DCHECK(kArchVariant == kMips64r2);
1545 Ulw(scratch, rs);
1546 mtc1(scratch, fd);
1547 }
1548 }
1549
1550 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1551 Register scratch) {
1552 if (kArchVariant == kMips64r6) {
1553 swc1(fd, rs);
1554 } else {
1555 DCHECK(kArchVariant == kMips64r2);
1556 mfc1(scratch, fd);
1557 Usw(scratch, rs);
1558 }
1559 }
1560
1561 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1562 Register scratch) {
1563 DCHECK(!scratch.is(at));
1564 if (kArchVariant == kMips64r6) {
1565 ldc1(fd, rs);
1566 } else {
1567 DCHECK(kArchVariant == kMips64r2);
1568 Uld(scratch, rs);
1569 dmtc1(scratch, fd);
1570 }
1571 }
1572
1573 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1574 Register scratch) {
1575 DCHECK(!scratch.is(at));
1576 if (kArchVariant == kMips64r6) {
1577 sdc1(fd, rs);
1578 } else {
1579 DCHECK(kArchVariant == kMips64r2);
1580 dmfc1(scratch, fd);
1581 Usd(scratch, rs);
1582 }
1583 }
1396 1584
1397 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1585 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1398 AllowDeferredHandleDereference smi_check; 1586 AllowDeferredHandleDereference smi_check;
1399 if (value->IsSmi()) { 1587 if (value->IsSmi()) {
1400 li(dst, Operand(value), mode); 1588 li(dst, Operand(value), mode);
1401 } else { 1589 } else {
1402 DCHECK(value->IsHeapObject()); 1590 DCHECK(value->IsHeapObject());
1403 if (isolate()->heap()->InNewSpace(*value)) { 1591 if (isolate()->heap()->InNewSpace(*value)) {
1404 Handle<Cell> cell = isolate()->factory()->NewCell(value); 1592 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1405 li(dst, Operand(cell)); 1593 li(dst, Operand(cell));
(...skipping 5483 matching lines...) Expand 10 before | Expand all | Expand 10 after
6889 if (mag.shift > 0) sra(result, result, mag.shift); 7077 if (mag.shift > 0) sra(result, result, mag.shift);
6890 srl(at, dividend, 31); 7078 srl(at, dividend, 31);
6891 Addu(result, result, Operand(at)); 7079 Addu(result, result, Operand(at));
6892 } 7080 }
6893 7081
6894 7082
6895 } // namespace internal 7083 } // namespace internal
6896 } // namespace v8 7084 } // namespace v8
6897 7085
6898 #endif // V8_TARGET_ARCH_MIPS64 7086 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698