OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1283 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1294 float vabsf[4], vnegf[4]; | 1294 float vabsf[4], vnegf[4]; |
1295 uint32_t vabs_s8[4], vabs_s16[4], vabs_s32[4]; | 1295 uint32_t vabs_s8[4], vabs_s16[4], vabs_s32[4]; |
1296 uint32_t vneg_s8[4], vneg_s16[4], vneg_s32[4]; | 1296 uint32_t vneg_s8[4], vneg_s16[4], vneg_s32[4]; |
1297 uint32_t veor[4], vand[4], vorr[4]; | 1297 uint32_t veor[4], vand[4], vorr[4]; |
1298 float vdupf[4], vaddf[4], vsubf[4], vmulf[4]; | 1298 float vdupf[4], vaddf[4], vsubf[4], vmulf[4]; |
1299 uint32_t vmin_s8[4], vmin_u16[4], vmin_s32[4]; | 1299 uint32_t vmin_s8[4], vmin_u16[4], vmin_s32[4]; |
1300 uint32_t vmax_s8[4], vmax_u16[4], vmax_s32[4]; | 1300 uint32_t vmax_s8[4], vmax_u16[4], vmax_s32[4]; |
1301 uint32_t vadd8[4], vadd16[4], vadd32[4]; | 1301 uint32_t vadd8[4], vadd16[4], vadd32[4]; |
1302 uint32_t vsub8[4], vsub16[4], vsub32[4]; | 1302 uint32_t vsub8[4], vsub16[4], vsub32[4]; |
1303 uint32_t vmul8[4], vmul16[4], vmul32[4]; | 1303 uint32_t vmul8[4], vmul16[4], vmul32[4]; |
| 1304 uint32_t vshl8[4], vshl16[4], vshl32[5]; |
| 1305 uint32_t vshr_s8[4], vshr_u16[4], vshr_s32[5]; |
1304 uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4]; | 1306 uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4]; |
1305 uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4]; | 1307 uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4]; |
1306 uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4]; | 1308 uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4]; |
1307 float vrecpe[4], vrecps[4], vrsqrte[4], vrsqrts[4]; | 1309 float vrecpe[4], vrecps[4], vrsqrte[4], vrsqrts[4]; |
1308 float vminf[4], vmaxf[4]; | 1310 float vminf[4], vmaxf[4]; |
1309 uint32_t vtst[4], vbsl[4]; | 1311 uint32_t vtst[4], vbsl[4]; |
1310 uint32_t vext[4]; | 1312 uint32_t vext[4]; |
1311 uint32_t vzip8a[4], vzip8b[4], vzip16a[4], vzip16b[4], vzip32a[4], | 1313 uint32_t vzip8a[4], vzip8b[4], vzip16a[4], vzip16b[4], vzip32a[4], |
1312 vzip32b[4]; | 1314 vzip32b[4]; |
1313 uint32_t vrev64_32[4], vrev64_16[4], vrev64_8[4]; | 1315 uint32_t vrev64_32[4], vrev64_16[4], vrev64_8[4]; |
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1664 __ vdup(Neon16, q0, r4); | 1666 __ vdup(Neon16, q0, r4); |
1665 __ vmul(Neon16, q1, q0, q0); | 1667 __ vmul(Neon16, q1, q0, q0); |
1666 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul16)))); | 1668 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul16)))); |
1667 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); | 1669 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
1668 __ mov(r4, Operand(0x00000002)); | 1670 __ mov(r4, Operand(0x00000002)); |
1669 __ vdup(Neon32, q0, r4); | 1671 __ vdup(Neon32, q0, r4); |
1670 __ vmul(Neon32, q1, q0, q0); | 1672 __ vmul(Neon32, q1, q0, q0); |
1671 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul32)))); | 1673 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmul32)))); |
1672 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); | 1674 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
1673 | 1675 |
| 1676 // vshl. |
| 1677 __ mov(r4, Operand(0x55)); |
| 1678 __ vdup(Neon8, q0, r4); |
| 1679 __ vshl(NeonS8, q1, q0, 1); |
| 1680 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl8)))); |
| 1681 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1682 __ vshl(NeonU16, q1, q0, 9); |
| 1683 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl16)))); |
| 1684 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1685 __ vshl(NeonS32, q1, q0, 17); |
| 1686 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshl32)))); |
| 1687 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1688 |
| 1689 // vshr.s, vshr.u. |
| 1690 __ mov(r4, Operand(0x80)); |
| 1691 __ vdup(Neon8, q0, r4); |
| 1692 __ vshr(NeonS8, q1, q0, 1); |
| 1693 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_s8)))); |
| 1694 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1695 __ vshr(NeonU16, q1, q0, 9); |
| 1696 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_u16)))); |
| 1697 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1698 __ vshr(NeonS32, q1, q0, 17); |
| 1699 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_s32)))); |
| 1700 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
| 1701 |
1674 // vceq. | 1702 // vceq. |
1675 __ mov(r4, Operand(0x03)); | 1703 __ mov(r4, Operand(0x03)); |
1676 __ vdup(Neon8, q0, r4); | 1704 __ vdup(Neon8, q0, r4); |
1677 __ vdup(Neon16, q1, r4); | 1705 __ vdup(Neon16, q1, r4); |
1678 __ vceq(Neon8, q1, q0, q1); | 1706 __ vceq(Neon8, q1, q0, q1); |
1679 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vceq)))); | 1707 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vceq)))); |
1680 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); | 1708 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); |
1681 | 1709 |
1682 // vcge/vcgt (integer). | 1710 // vcge/vcgt (integer). |
1683 __ mov(r4, Operand(0x03)); | 1711 __ mov(r4, Operand(0x03)); |
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1919 CHECK_EQ_SPLAT(vmax_s32, 0xffu); | 1947 CHECK_EQ_SPLAT(vmax_s32, 0xffu); |
1920 CHECK_EQ_SPLAT(vadd8, 0x03030303u); | 1948 CHECK_EQ_SPLAT(vadd8, 0x03030303u); |
1921 CHECK_EQ_SPLAT(vadd16, 0x00030003u); | 1949 CHECK_EQ_SPLAT(vadd16, 0x00030003u); |
1922 CHECK_EQ_SPLAT(vadd32, 0x00000003u); | 1950 CHECK_EQ_SPLAT(vadd32, 0x00000003u); |
1923 CHECK_EQ_SPLAT(vsub8, 0xfefefefeu); | 1951 CHECK_EQ_SPLAT(vsub8, 0xfefefefeu); |
1924 CHECK_EQ_SPLAT(vsub16, 0xfffefffeu); | 1952 CHECK_EQ_SPLAT(vsub16, 0xfffefffeu); |
1925 CHECK_EQ_SPLAT(vsub32, 0xfffffffeu); | 1953 CHECK_EQ_SPLAT(vsub32, 0xfffffffeu); |
1926 CHECK_EQ_SPLAT(vmul8, 0x04040404u); | 1954 CHECK_EQ_SPLAT(vmul8, 0x04040404u); |
1927 CHECK_EQ_SPLAT(vmul16, 0x00040004u); | 1955 CHECK_EQ_SPLAT(vmul16, 0x00040004u); |
1928 CHECK_EQ_SPLAT(vmul32, 0x00000004u); | 1956 CHECK_EQ_SPLAT(vmul32, 0x00000004u); |
| 1957 CHECK_EQ_SPLAT(vshl8, 0xaaaaaaaau); |
| 1958 CHECK_EQ_SPLAT(vshl16, 0xaa00aa00u); |
| 1959 CHECK_EQ_SPLAT(vshl32, 0xaaaa0000u); |
| 1960 CHECK_EQ_SPLAT(vshr_s8, 0xc0c0c0c0u); |
| 1961 CHECK_EQ_SPLAT(vshr_u16, 0x00400040u); |
| 1962 CHECK_EQ_SPLAT(vshr_s32, 0xffffc040u); |
1929 CHECK_EQ_SPLAT(vceq, 0x00ff00ffu); | 1963 CHECK_EQ_SPLAT(vceq, 0x00ff00ffu); |
1930 // [0, 3, 0, 3, ...] >= [3, 3, 3, 3, ...] | 1964 // [0, 3, 0, 3, ...] >= [3, 3, 3, 3, ...] |
1931 CHECK_EQ_SPLAT(vcge_s8, 0x00ff00ffu); | 1965 CHECK_EQ_SPLAT(vcge_s8, 0x00ff00ffu); |
1932 CHECK_EQ_SPLAT(vcgt_s8, 0u); | 1966 CHECK_EQ_SPLAT(vcgt_s8, 0u); |
1933 // [0x00ff, 0x00ff, ...] >= [0xffff, 0xffff, ...] | 1967 // [0x00ff, 0x00ff, ...] >= [0xffff, 0xffff, ...] |
1934 CHECK_EQ_SPLAT(vcge_u16, 0u); | 1968 CHECK_EQ_SPLAT(vcge_u16, 0u); |
1935 CHECK_EQ_SPLAT(vcgt_u16, 0u); | 1969 CHECK_EQ_SPLAT(vcgt_u16, 0u); |
1936 // [0x000000ff, 0x000000ff, ...] >= [0xffffffff, 0xffffffff, ...] | 1970 // [0x000000ff, 0x000000ff, ...] >= [0xffffffff, 0xffffffff, ...] |
1937 CHECK_EQ_SPLAT(vcge_s32, 0xffffffffu); | 1971 CHECK_EQ_SPLAT(vcge_s32, 0xffffffffu); |
1938 CHECK_EQ_SPLAT(vcgt_s32, 0xffffffffu); | 1972 CHECK_EQ_SPLAT(vcgt_s32, 0xffffffffu); |
(...skipping 1699 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3638 HandleScope scope(isolate); | 3672 HandleScope scope(isolate); |
3639 | 3673 |
3640 Assembler assm(isolate, NULL, 0); | 3674 Assembler assm(isolate, NULL, 0); |
3641 __ mov(r0, Operand(isolate->factory()->infinity_value())); | 3675 __ mov(r0, Operand(isolate->factory()->infinity_value())); |
3642 __ BlockConstPoolFor(1019); | 3676 __ BlockConstPoolFor(1019); |
3643 for (int i = 0; i < 1019; ++i) __ nop(); | 3677 for (int i = 0; i < 1019; ++i) __ nop(); |
3644 __ vldr(d0, MemOperand(r0, 0)); | 3678 __ vldr(d0, MemOperand(r0, 0)); |
3645 } | 3679 } |
3646 | 3680 |
3647 #undef __ | 3681 #undef __ |
OLD | NEW |