OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
560 static_cast<int64_t>(0xffffffffffffffff)}; | 560 static_cast<int64_t>(0xffffffffffffffff)}; |
561 return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]); | 561 return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]); |
562 } | 562 } |
563 | 563 |
564 // Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } | 564 // Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } |
565 #define FOR_INPUTS(ctype, itype, var, test_vector) \ | 565 #define FOR_INPUTS(ctype, itype, var, test_vector) \ |
566 std::vector<ctype> var##_vec = test_vector(); \ | 566 std::vector<ctype> var##_vec = test_vector(); \ |
567 for (std::vector<ctype>::iterator var = var##_vec.begin(); \ | 567 for (std::vector<ctype>::iterator var = var##_vec.begin(); \ |
568 var != var##_vec.end(); ++var) | 568 var != var##_vec.end(); ++var) |
569 | 569 |
| 570 #define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ |
| 571 std::vector<ctype> var##_vec = test_vector(); \ |
| 572 std::vector<ctype>::iterator var; \ |
| 573 std::vector<ctype>::reverse_iterator var2; \ |
| 574 for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ |
| 575 var != var##_vec.end(); ++var, ++var2) |
| 576 |
570 #define FOR_ENUM_INPUTS(var, type, test_vector) \ | 577 #define FOR_ENUM_INPUTS(var, type, test_vector) \ |
571 FOR_INPUTS(enum type, type, var, test_vector) | 578 FOR_INPUTS(enum type, type, var, test_vector) |
572 #define FOR_STRUCT_INPUTS(var, type, test_vector) \ | 579 #define FOR_STRUCT_INPUTS(var, type, test_vector) \ |
573 FOR_INPUTS(struct type, type, var, test_vector) | 580 FOR_INPUTS(struct type, type, var, test_vector) |
574 #define FOR_INT32_INPUTS(var, test_vector) \ | 581 #define FOR_INT32_INPUTS(var, test_vector) \ |
575 FOR_INPUTS(int32_t, int32, var, test_vector) | 582 FOR_INPUTS(int32_t, int32, var, test_vector) |
| 583 #define FOR_INT32_INPUTS2(var, var2, test_vector) \ |
| 584 FOR_INPUTS2(int32_t, int32, var, var2, test_vector) |
576 #define FOR_INT64_INPUTS(var, test_vector) \ | 585 #define FOR_INT64_INPUTS(var, test_vector) \ |
577 FOR_INPUTS(int64_t, int64, var, test_vector) | 586 FOR_INPUTS(int64_t, int64, var, test_vector) |
578 #define FOR_UINT32_INPUTS(var, test_vector) \ | 587 #define FOR_UINT32_INPUTS(var, test_vector) \ |
579 FOR_INPUTS(uint32_t, uint32, var, test_vector) | 588 FOR_INPUTS(uint32_t, uint32, var, test_vector) |
580 #define FOR_UINT64_INPUTS(var, test_vector) \ | 589 #define FOR_UINT64_INPUTS(var, test_vector) \ |
581 FOR_INPUTS(uint64_t, uint64, var, test_vector) | 590 FOR_INPUTS(uint64_t, uint64, var, test_vector) |
582 | 591 |
583 template <typename RET_TYPE, typename IN_TYPE, typename Func> | 592 template <typename RET_TYPE, typename IN_TYPE, typename Func> |
584 RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { | 593 RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { |
585 typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4); | 594 typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4); |
(...skipping 803 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1389 | 1398 |
1390 CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0); | 1399 CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0); |
1391 | 1400 |
1392 CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); | 1401 CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); |
1393 CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); | 1402 CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); |
1394 CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); | 1403 CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); |
1395 CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); | 1404 CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); |
1396 } | 1405 } |
1397 } | 1406 } |
1398 | 1407 |
| 1408 template <typename IN_TYPE, typename Func> |
| 1409 bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, |
| 1410 IN_TYPE value, Func GenerateUnalignedInstructionFunc) { |
| 1411 typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4); |
| 1412 |
| 1413 Isolate* isolate = CcTest::i_isolate(); |
| 1414 HandleScope scope(isolate); |
| 1415 MacroAssembler assm(isolate, nullptr, 0, |
| 1416 v8::internal::CodeObjectRequired::kYes); |
| 1417 MacroAssembler* masm = &assm; |
| 1418 IN_TYPE res; |
| 1419 |
| 1420 GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); |
| 1421 __ jr(ra); |
| 1422 __ nop(); |
| 1423 |
| 1424 CodeDesc desc; |
| 1425 assm.GetCode(&desc); |
| 1426 Handle<Code> code = isolate->factory()->NewCode( |
| 1427 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1428 |
| 1429 F_CVT f = FUNCTION_CAST<F_CVT>(code->entry()); |
| 1430 |
| 1431 MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); |
| 1432 CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0); |
| 1433 MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); |
| 1434 |
| 1435 return res == value; |
| 1436 } |
| 1437 |
| 1438 static const std::vector<uint64_t> unsigned_test_values() { |
| 1439 static const uint64_t kValues[] = { |
| 1440 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0, |
| 1441 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff, |
| 1442 }; |
| 1443 return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]); |
| 1444 } |
| 1445 |
| 1446 static const std::vector<int32_t> unsigned_test_offset() { |
| 1447 static const int32_t kValues[] = {// value, offset |
| 1448 -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; |
| 1449 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
| 1450 } |
| 1451 |
| 1452 static const std::vector<int32_t> unsigned_test_offset_increment() { |
| 1453 static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; |
| 1454 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
| 1455 } |
| 1456 |
| 1457 TEST(Ulh) { |
| 1458 CcTest::InitializeVM(); |
| 1459 |
| 1460 static const int kBufferSize = 300 * KB; |
| 1461 char memory_buffer[kBufferSize]; |
| 1462 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1463 |
| 1464 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1465 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1466 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1467 uint16_t value = static_cast<uint64_t>(*i & 0xFFFF); |
| 1468 int32_t in_offset = *j1 + *k1; |
| 1469 int32_t out_offset = *j2 + *k2; |
| 1470 |
| 1471 CHECK_EQ(true, run_Unaligned<uint16_t>( |
| 1472 buffer_middle, in_offset, out_offset, value, |
| 1473 [](MacroAssembler* masm, int32_t in_offset, |
| 1474 int32_t out_offset) { |
| 1475 __ Ulh(v0, MemOperand(a0, in_offset)); |
| 1476 __ Ush(v0, MemOperand(a0, out_offset), v0); |
| 1477 })); |
| 1478 CHECK_EQ(true, run_Unaligned<uint16_t>( |
| 1479 buffer_middle, in_offset, out_offset, value, |
| 1480 [](MacroAssembler* masm, int32_t in_offset, |
| 1481 int32_t out_offset) { |
| 1482 __ mov(t0, a0); |
| 1483 __ Ulh(a0, MemOperand(a0, in_offset)); |
| 1484 __ Ush(a0, MemOperand(t0, out_offset), v0); |
| 1485 })); |
| 1486 CHECK_EQ(true, run_Unaligned<uint16_t>( |
| 1487 buffer_middle, in_offset, out_offset, value, |
| 1488 [](MacroAssembler* masm, int32_t in_offset, |
| 1489 int32_t out_offset) { |
| 1490 __ mov(t0, a0); |
| 1491 __ Ulhu(a0, MemOperand(a0, in_offset)); |
| 1492 __ Ush(a0, MemOperand(t0, out_offset), t1); |
| 1493 })); |
| 1494 CHECK_EQ(true, run_Unaligned<uint16_t>( |
| 1495 buffer_middle, in_offset, out_offset, value, |
| 1496 [](MacroAssembler* masm, int32_t in_offset, |
| 1497 int32_t out_offset) { |
| 1498 __ Ulhu(v0, MemOperand(a0, in_offset)); |
| 1499 __ Ush(v0, MemOperand(a0, out_offset), t1); |
| 1500 })); |
| 1501 } |
| 1502 } |
| 1503 } |
| 1504 } |
| 1505 |
| 1506 TEST(Ulh_bitextension) { |
| 1507 CcTest::InitializeVM(); |
| 1508 |
| 1509 static const int kBufferSize = 300 * KB; |
| 1510 char memory_buffer[kBufferSize]; |
| 1511 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1512 |
| 1513 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1514 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1515 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1516 uint16_t value = static_cast<uint64_t>(*i & 0xFFFF); |
| 1517 int32_t in_offset = *j1 + *k1; |
| 1518 int32_t out_offset = *j2 + *k2; |
| 1519 |
| 1520 CHECK_EQ(true, run_Unaligned<uint16_t>( |
| 1521 buffer_middle, in_offset, out_offset, value, |
| 1522 [](MacroAssembler* masm, int32_t in_offset, |
| 1523 int32_t out_offset) { |
| 1524 Label success, fail, end, different; |
| 1525 __ Ulh(t0, MemOperand(a0, in_offset)); |
| 1526 __ Ulhu(t1, MemOperand(a0, in_offset)); |
| 1527 __ Branch(&different, ne, t0, Operand(t1)); |
| 1528 |
| 1529 // If signed and unsigned values are same, check |
| 1530 // the upper bits to see if they are zero |
| 1531 __ sra(t0, t0, 15); |
| 1532 __ Branch(&success, eq, t0, Operand(zero_reg)); |
| 1533 __ Branch(&fail); |
| 1534 |
| 1535 // If signed and unsigned values are different, |
| 1536 // check that the upper bits are complementary |
| 1537 __ bind(&different); |
| 1538 __ sra(t1, t1, 15); |
| 1539 __ Branch(&fail, ne, t1, Operand(1)); |
| 1540 __ sra(t0, t0, 15); |
| 1541 __ addiu(t0, t0, 1); |
| 1542 __ Branch(&fail, ne, t0, Operand(zero_reg)); |
| 1543 // Fall through to success |
| 1544 |
| 1545 __ bind(&success); |
| 1546 __ Ulh(t0, MemOperand(a0, in_offset)); |
| 1547 __ Ush(t0, MemOperand(a0, out_offset), v0); |
| 1548 __ Branch(&end); |
| 1549 __ bind(&fail); |
| 1550 __ Ush(zero_reg, MemOperand(a0, out_offset), v0); |
| 1551 __ bind(&end); |
| 1552 })); |
| 1553 } |
| 1554 } |
| 1555 } |
| 1556 } |
| 1557 |
| 1558 TEST(Ulw) { |
| 1559 CcTest::InitializeVM(); |
| 1560 |
| 1561 static const int kBufferSize = 300 * KB; |
| 1562 char memory_buffer[kBufferSize]; |
| 1563 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1564 |
| 1565 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1566 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1567 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1568 uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF); |
| 1569 int32_t in_offset = *j1 + *k1; |
| 1570 int32_t out_offset = *j2 + *k2; |
| 1571 |
| 1572 CHECK_EQ(true, run_Unaligned<uint32_t>( |
| 1573 buffer_middle, in_offset, out_offset, value, |
| 1574 [](MacroAssembler* masm, int32_t in_offset, |
| 1575 int32_t out_offset) { |
| 1576 __ Ulw(v0, MemOperand(a0, in_offset)); |
| 1577 __ Usw(v0, MemOperand(a0, out_offset)); |
| 1578 })); |
| 1579 CHECK_EQ(true, |
| 1580 run_Unaligned<uint32_t>( |
| 1581 buffer_middle, in_offset, out_offset, (uint32_t)value, |
| 1582 [](MacroAssembler* masm, int32_t in_offset, |
| 1583 int32_t out_offset) { |
| 1584 __ mov(t0, a0); |
| 1585 __ Ulw(a0, MemOperand(a0, in_offset)); |
| 1586 __ Usw(a0, MemOperand(t0, out_offset)); |
| 1587 })); |
| 1588 CHECK_EQ(true, run_Unaligned<uint32_t>( |
| 1589 buffer_middle, in_offset, out_offset, value, |
| 1590 [](MacroAssembler* masm, int32_t in_offset, |
| 1591 int32_t out_offset) { |
| 1592 __ Ulwu(v0, MemOperand(a0, in_offset)); |
| 1593 __ Usw(v0, MemOperand(a0, out_offset)); |
| 1594 })); |
| 1595 CHECK_EQ(true, |
| 1596 run_Unaligned<uint32_t>( |
| 1597 buffer_middle, in_offset, out_offset, (uint32_t)value, |
| 1598 [](MacroAssembler* masm, int32_t in_offset, |
| 1599 int32_t out_offset) { |
| 1600 __ mov(t0, a0); |
| 1601 __ Ulwu(a0, MemOperand(a0, in_offset)); |
| 1602 __ Usw(a0, MemOperand(t0, out_offset)); |
| 1603 })); |
| 1604 } |
| 1605 } |
| 1606 } |
| 1607 } |
| 1608 |
| 1609 TEST(Ulw_extension) { |
| 1610 CcTest::InitializeVM(); |
| 1611 |
| 1612 static const int kBufferSize = 300 * KB; |
| 1613 char memory_buffer[kBufferSize]; |
| 1614 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1615 |
| 1616 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1617 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1618 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1619 uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF); |
| 1620 int32_t in_offset = *j1 + *k1; |
| 1621 int32_t out_offset = *j2 + *k2; |
| 1622 |
| 1623 CHECK_EQ(true, run_Unaligned<uint32_t>( |
| 1624 buffer_middle, in_offset, out_offset, value, |
| 1625 [](MacroAssembler* masm, int32_t in_offset, |
| 1626 int32_t out_offset) { |
| 1627 Label success, fail, end, different; |
| 1628 __ Ulw(t0, MemOperand(a0, in_offset)); |
| 1629 __ Ulwu(t1, MemOperand(a0, in_offset)); |
| 1630 __ Branch(&different, ne, t0, Operand(t1)); |
| 1631 |
| 1632 // If signed and unsigned values are same, check |
| 1633 // the upper bits to see if they are zero |
| 1634 __ dsra(t0, t0, 31); |
| 1635 __ Branch(&success, eq, t0, Operand(zero_reg)); |
| 1636 __ Branch(&fail); |
| 1637 |
| 1638 // If signed and unsigned values are different, |
| 1639 // check that the upper bits are complementary |
| 1640 __ bind(&different); |
| 1641 __ dsra(t1, t1, 31); |
| 1642 __ Branch(&fail, ne, t1, Operand(1)); |
| 1643 __ dsra(t0, t0, 31); |
| 1644 __ daddiu(t0, t0, 1); |
| 1645 __ Branch(&fail, ne, t0, Operand(zero_reg)); |
| 1646 // Fall through to success |
| 1647 |
| 1648 __ bind(&success); |
| 1649 __ Ulw(t0, MemOperand(a0, in_offset)); |
| 1650 __ Usw(t0, MemOperand(a0, out_offset)); |
| 1651 __ Branch(&end); |
| 1652 __ bind(&fail); |
| 1653 __ Usw(zero_reg, MemOperand(a0, out_offset)); |
| 1654 __ bind(&end); |
| 1655 })); |
| 1656 } |
| 1657 } |
| 1658 } |
| 1659 } |
| 1660 |
| 1661 TEST(Uld) { |
| 1662 CcTest::InitializeVM(); |
| 1663 |
| 1664 static const int kBufferSize = 300 * KB; |
| 1665 char memory_buffer[kBufferSize]; |
| 1666 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1667 |
| 1668 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1669 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1670 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1671 uint64_t value = *i; |
| 1672 int32_t in_offset = *j1 + *k1; |
| 1673 int32_t out_offset = *j2 + *k2; |
| 1674 |
| 1675 CHECK_EQ(true, run_Unaligned<uint64_t>( |
| 1676 buffer_middle, in_offset, out_offset, value, |
| 1677 [](MacroAssembler* masm, int32_t in_offset, |
| 1678 int32_t out_offset) { |
| 1679 __ Uld(v0, MemOperand(a0, in_offset)); |
| 1680 __ Usd(v0, MemOperand(a0, out_offset)); |
| 1681 })); |
| 1682 CHECK_EQ(true, |
| 1683 run_Unaligned<uint64_t>( |
| 1684 buffer_middle, in_offset, out_offset, (uint32_t)value, |
| 1685 [](MacroAssembler* masm, int32_t in_offset, |
| 1686 int32_t out_offset) { |
| 1687 __ mov(t0, a0); |
| 1688 __ Uld(a0, MemOperand(a0, in_offset)); |
| 1689 __ Usd(a0, MemOperand(t0, out_offset)); |
| 1690 })); |
| 1691 } |
| 1692 } |
| 1693 } |
| 1694 } |
| 1695 |
| 1696 TEST(Ulwc1) { |
| 1697 CcTest::InitializeVM(); |
| 1698 |
| 1699 static const int kBufferSize = 300 * KB; |
| 1700 char memory_buffer[kBufferSize]; |
| 1701 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1702 |
| 1703 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1704 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1705 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1706 float value = static_cast<float>(*i & 0xFFFFFFFF); |
| 1707 int32_t in_offset = *j1 + *k1; |
| 1708 int32_t out_offset = *j2 + *k2; |
| 1709 |
| 1710 CHECK_EQ(true, run_Unaligned<float>( |
| 1711 buffer_middle, in_offset, out_offset, value, |
| 1712 [](MacroAssembler* masm, int32_t in_offset, |
| 1713 int32_t out_offset) { |
| 1714 __ Ulwc1(f0, MemOperand(a0, in_offset), t0); |
| 1715 __ Uswc1(f0, MemOperand(a0, out_offset), t0); |
| 1716 })); |
| 1717 } |
| 1718 } |
| 1719 } |
| 1720 } |
| 1721 |
| 1722 TEST(Uldc1) { |
| 1723 CcTest::InitializeVM(); |
| 1724 |
| 1725 static const int kBufferSize = 300 * KB; |
| 1726 char memory_buffer[kBufferSize]; |
| 1727 char* buffer_middle = memory_buffer + (kBufferSize / 2); |
| 1728 |
| 1729 FOR_UINT64_INPUTS(i, unsigned_test_values) { |
| 1730 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
| 1731 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
| 1732 double value = static_cast<double>(*i); |
| 1733 int32_t in_offset = *j1 + *k1; |
| 1734 int32_t out_offset = *j2 + *k2; |
| 1735 |
| 1736 CHECK_EQ(true, run_Unaligned<double>( |
| 1737 buffer_middle, in_offset, out_offset, value, |
| 1738 [](MacroAssembler* masm, int32_t in_offset, |
| 1739 int32_t out_offset) { |
| 1740 __ Uldc1(f0, MemOperand(a0, in_offset), t0); |
| 1741 __ Usdc1(f0, MemOperand(a0, out_offset), t0); |
| 1742 })); |
| 1743 } |
| 1744 } |
| 1745 } |
| 1746 } |
| 1747 |
1399 #undef __ | 1748 #undef __ |
OLD | NEW |