Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(198)

Side by Side Diff: test/cctest/test-assembler-arm.cc

Issue 2801183002: [WASM SIMD] Implement primitive shuffles. (Closed)
Patch Set: Fix ARM release build. Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1300 matching lines...) Expand 10 before | Expand all | Expand 10 after
1311 uint32_t vshr_s8[4], vshr_u16[4], vshr_s32[5]; 1311 uint32_t vshr_s8[4], vshr_u16[4], vshr_s32[5];
1312 uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4]; 1312 uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4];
1313 uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4]; 1313 uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4];
1314 uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4]; 1314 uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4];
1315 float vrecpe[4], vrecps[4], vrsqrte[4], vrsqrts[4]; 1315 float vrecpe[4], vrecps[4], vrsqrte[4], vrsqrts[4];
1316 float vminf[4], vmaxf[4]; 1316 float vminf[4], vmaxf[4];
1317 uint32_t vtst[4], vbsl[4]; 1317 uint32_t vtst[4], vbsl[4];
1318 uint32_t vext[4]; 1318 uint32_t vext[4];
1319 uint32_t vzip8a[4], vzip8b[4], vzip16a[4], vzip16b[4], vzip32a[4], 1319 uint32_t vzip8a[4], vzip8b[4], vzip16a[4], vzip16b[4], vzip32a[4],
1320 vzip32b[4]; 1320 vzip32b[4];
1321 uint32_t vzipd8a[2], vzipd8b[2], vzipd16a[2], vzipd16b[2], vzipd32a[2],
1322 vzipd32b[2];
1321 uint32_t vuzp8a[4], vuzp8b[4], vuzp16a[4], vuzp16b[4], vuzp32a[4], 1323 uint32_t vuzp8a[4], vuzp8b[4], vuzp16a[4], vuzp16b[4], vuzp32a[4],
1322 vuzp32b[4]; 1324 vuzp32b[4];
1325 uint32_t vuzpd8a[2], vuzpd8b[2], vuzpd16a[2], vuzpd16b[2], vuzpd32a[2],
1326 vuzpd32b[2];
1323 uint32_t vrev64_32[4], vrev64_16[4], vrev64_8[4]; 1327 uint32_t vrev64_32[4], vrev64_16[4], vrev64_8[4];
1324 uint32_t vrev32_16[4], vrev32_8[4], vrev16_8[4]; 1328 uint32_t vrev32_16[4], vrev32_8[4], vrev16_8[4];
1325 uint32_t vtrn8a[4], vtrn8b[4], vtrn16a[4], vtrn16b[4], vtrn32a[4], 1329 uint32_t vtrn8a[4], vtrn8b[4], vtrn16a[4], vtrn16b[4], vtrn32a[4],
1326 vtrn32b[4]; 1330 vtrn32b[4];
1331 uint32_t vtrnd8a[2], vtrnd8b[2], vtrnd16a[2], vtrnd16b[2], vtrnd32a[2],
1332 vtrnd32b[2];
1327 uint32_t vtbl[2], vtbx[2]; 1333 uint32_t vtbl[2], vtbx[2];
1328 } T; 1334 } T;
1329 T t; 1335 T t;
1330 1336
1331 // Create a function that accepts &t, and loads, manipulates, and stores 1337 // Create a function that accepts &t, and loads, manipulates, and stores
1332 // the doubles, floats, and SIMD values. 1338 // the doubles, floats, and SIMD values.
1333 Assembler assm(isolate, NULL, 0); 1339 Assembler assm(isolate, NULL, 0);
1334 1340
1335 if (CpuFeatures::IsSupported(NEON)) { 1341 if (CpuFeatures::IsSupported(NEON)) {
1336 CpuFeatureScope scope(&assm, NEON); 1342 CpuFeatureScope scope(&assm, NEON);
(...skipping 512 matching lines...) Expand 10 before | Expand all | Expand 10 after
1849 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1855 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1850 1856
1851 // vext. 1857 // vext.
1852 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1858 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1853 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1859 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1854 __ vmov(q1, q0); 1860 __ vmov(q1, q0);
1855 __ vext(q2, q0, q1, 3); 1861 __ vext(q2, q0, q1, 3);
1856 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vext)))); 1862 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vext))));
1857 __ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4)); 1863 __ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
1858 1864
1859 // vzip. 1865 // vzip (q-register).
1860 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1866 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1861 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1867 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1862 __ vmov(q1, q0); 1868 __ vmov(q1, q0);
1863 __ vzip(Neon8, q0, q1); 1869 __ vzip(Neon8, q0, q1);
1864 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8a)))); 1870 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8a))));
1865 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1871 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1866 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8b)))); 1872 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip8b))));
1867 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1873 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1868 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1874 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1869 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1875 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1870 __ vmov(q1, q0); 1876 __ vmov(q1, q0);
1871 __ vzip(Neon16, q0, q1); 1877 __ vzip(Neon16, q0, q1);
1872 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16a)))); 1878 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16a))));
1873 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1879 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1874 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16b)))); 1880 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip16b))));
1875 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1881 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1876 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1882 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1877 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1883 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1878 __ vmov(q1, q0); 1884 __ vmov(q1, q0);
1879 __ vzip(Neon32, q0, q1); 1885 __ vzip(Neon32, q0, q1);
1880 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32a)))); 1886 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32a))));
1881 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1887 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1882 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32b)))); 1888 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vzip32b))));
1883 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1889 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1884 1890
1885 // vuzp. 1891 // vzip (d-register).
1892 __ vldr(d2, r0, offsetof(T, lane_test));
1893 __ vmov(d0, d2);
1894 __ vmov(d1, d2);
1895 __ vzip(Neon8, d0, d1);
1896 __ vstr(d0, r0, offsetof(T, vzipd8a));
1897 __ vstr(d1, r0, offsetof(T, vzipd8b));
1898 __ vmov(d0, d2);
1899 __ vmov(d1, d2);
1900 __ vzip(Neon16, d0, d1);
1901 __ vstr(d0, r0, offsetof(T, vzipd16a));
1902 __ vstr(d1, r0, offsetof(T, vzipd16b));
1903 __ vmov(d0, d2);
1904 __ vmov(d1, d2);
1905 __ vzip(Neon32, d0, d1);
1906 __ vstr(d0, r0, offsetof(T, vzipd32a));
1907 __ vstr(d1, r0, offsetof(T, vzipd32b));
1908
1909 // vuzp (q-register).
1886 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1910 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1887 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1911 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1888 __ vmov(q1, q0); 1912 __ vmov(q1, q0);
1889 __ vuzp(Neon8, q0, q1); 1913 __ vuzp(Neon8, q0, q1);
1890 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8a)))); 1914 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8a))));
1891 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1915 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1892 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8b)))); 1916 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp8b))));
1893 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1917 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1894 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1918 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1895 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1919 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1896 __ vmov(q1, q0); 1920 __ vmov(q1, q0);
1897 __ vuzp(Neon16, q0, q1); 1921 __ vuzp(Neon16, q0, q1);
1898 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16a)))); 1922 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16a))));
1899 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1923 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1900 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16b)))); 1924 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp16b))));
1901 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1925 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1902 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1926 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1903 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1927 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1904 __ vmov(q1, q0); 1928 __ vmov(q1, q0);
1905 __ vuzp(Neon32, q0, q1); 1929 __ vuzp(Neon32, q0, q1);
1906 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32a)))); 1930 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32a))));
1907 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1931 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1908 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32b)))); 1932 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vuzp32b))));
1909 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1933 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1910 1934
1911 // vtrn. 1935 // vuzp (d-register).
1936 __ vldr(d2, r0, offsetof(T, lane_test));
1937 __ vmov(d0, d2);
1938 __ vmov(d1, d2);
1939 __ vuzp(Neon8, d0, d1);
1940 __ vstr(d0, r0, offsetof(T, vuzpd8a));
1941 __ vstr(d1, r0, offsetof(T, vuzpd8b));
1942 __ vmov(d0, d2);
1943 __ vmov(d1, d2);
1944 __ vuzp(Neon16, d0, d1);
1945 __ vstr(d0, r0, offsetof(T, vuzpd16a));
1946 __ vstr(d1, r0, offsetof(T, vuzpd16b));
1947 __ vmov(d0, d2);
1948 __ vmov(d1, d2);
1949 __ vuzp(Neon32, d0, d1);
1950 __ vstr(d0, r0, offsetof(T, vuzpd32a));
1951 __ vstr(d1, r0, offsetof(T, vuzpd32b));
1952
1953 // vtrn (q-register).
1912 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1954 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1913 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1955 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1914 __ vmov(q1, q0); 1956 __ vmov(q1, q0);
1915 __ vtrn(Neon8, q0, q1); 1957 __ vtrn(Neon8, q0, q1);
1916 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8a)))); 1958 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8a))));
1917 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1959 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1918 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8b)))); 1960 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn8b))));
1919 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1961 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1920 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1962 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1921 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1963 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1922 __ vmov(q1, q0); 1964 __ vmov(q1, q0);
1923 __ vtrn(Neon16, q0, q1); 1965 __ vtrn(Neon16, q0, q1);
1924 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16a)))); 1966 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16a))));
1925 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1967 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1926 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16b)))); 1968 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn16b))));
1927 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1969 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1928 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1970 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1929 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1971 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1930 __ vmov(q1, q0); 1972 __ vmov(q1, q0);
1931 __ vtrn(Neon32, q0, q1); 1973 __ vtrn(Neon32, q0, q1);
1932 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32a)))); 1974 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32a))));
1933 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1975 __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1934 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32b)))); 1976 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vtrn32b))));
1935 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 1977 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1936 1978
1979 // vtrn (d-register).
1980 __ vldr(d2, r0, offsetof(T, lane_test));
1981 __ vmov(d0, d2);
1982 __ vmov(d1, d2);
1983 __ vtrn(Neon8, d0, d1);
1984 __ vstr(d0, r0, offsetof(T, vtrnd8a));
1985 __ vstr(d1, r0, offsetof(T, vtrnd8b));
1986 __ vmov(d0, d2);
1987 __ vmov(d1, d2);
1988 __ vtrn(Neon16, d0, d1);
1989 __ vstr(d0, r0, offsetof(T, vtrnd16a));
1990 __ vstr(d1, r0, offsetof(T, vtrnd16b));
1991 __ vmov(d0, d2);
1992 __ vmov(d1, d2);
1993 __ vtrn(Neon32, d0, d1);
1994 __ vstr(d0, r0, offsetof(T, vtrnd32a));
1995 __ vstr(d1, r0, offsetof(T, vtrnd32b));
1996
1937 // vrev64/32/16 1997 // vrev64/32/16
1938 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test)))); 1998 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, lane_test))));
1939 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4)); 1999 __ vld1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
1940 __ vrev64(Neon32, q1, q0); 2000 __ vrev64(Neon32, q1, q0);
1941 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_32)))); 2001 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_32))));
1942 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 2002 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1943 __ vrev64(Neon16, q1, q0); 2003 __ vrev64(Neon16, q1, q0);
1944 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_16)))); 2004 __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrev64_16))));
1945 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4)); 2005 __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
1946 __ vrev64(Neon8, q1, q0); 2006 __ vrev64(Neon8, q1, q0);
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after
2133 2193
2134 CHECK_EQ_32X4(vext, 0x06050403u, 0x0a090807u, 0x0e0d0c0bu, 0x0201000fu); 2194 CHECK_EQ_32X4(vext, 0x06050403u, 0x0a090807u, 0x0e0d0c0bu, 0x0201000fu);
2135 2195
2136 CHECK_EQ_32X4(vzip8a, 0x01010000u, 0x03030202u, 0x05050404u, 0x07070606u); 2196 CHECK_EQ_32X4(vzip8a, 0x01010000u, 0x03030202u, 0x05050404u, 0x07070606u);
2137 CHECK_EQ_32X4(vzip8b, 0x09090808u, 0x0b0b0a0au, 0x0d0d0c0cu, 0x0f0f0e0eu); 2197 CHECK_EQ_32X4(vzip8b, 0x09090808u, 0x0b0b0a0au, 0x0d0d0c0cu, 0x0f0f0e0eu);
2138 CHECK_EQ_32X4(vzip16a, 0x01000100u, 0x03020302u, 0x05040504u, 0x07060706u); 2198 CHECK_EQ_32X4(vzip16a, 0x01000100u, 0x03020302u, 0x05040504u, 0x07060706u);
2139 CHECK_EQ_32X4(vzip16b, 0x09080908u, 0x0b0a0b0au, 0x0d0c0d0cu, 0x0f0e0f0eu); 2199 CHECK_EQ_32X4(vzip16b, 0x09080908u, 0x0b0a0b0au, 0x0d0c0d0cu, 0x0f0e0f0eu);
2140 CHECK_EQ_32X4(vzip32a, 0x03020100u, 0x03020100u, 0x07060504u, 0x07060504u); 2200 CHECK_EQ_32X4(vzip32a, 0x03020100u, 0x03020100u, 0x07060504u, 0x07060504u);
2141 CHECK_EQ_32X4(vzip32b, 0x0b0a0908u, 0x0b0a0908u, 0x0f0e0d0cu, 0x0f0e0d0cu); 2201 CHECK_EQ_32X4(vzip32b, 0x0b0a0908u, 0x0b0a0908u, 0x0f0e0d0cu, 0x0f0e0d0cu);
2142 2202
2203 CHECK_EQ_32X2(vzipd8a, 0x01010000u, 0x03030202u);
2204 CHECK_EQ_32X2(vzipd8b, 0x05050404u, 0x07070606u);
2205 CHECK_EQ_32X2(vzipd16a, 0x01000100u, 0x03020302u);
2206 CHECK_EQ_32X2(vzipd16b, 0x05040504u, 0x07060706u);
2207 CHECK_EQ_32X2(vzipd32a, 0x03020100u, 0x03020100u);
2208 CHECK_EQ_32X2(vzipd32b, 0x07060504u, 0x07060504u);
2209
2143 CHECK_EQ_32X4(vuzp8a, 0x06040200u, 0x0e0c0a08u, 0x06040200u, 0x0e0c0a08u); 2210 CHECK_EQ_32X4(vuzp8a, 0x06040200u, 0x0e0c0a08u, 0x06040200u, 0x0e0c0a08u);
2144 CHECK_EQ_32X4(vuzp8b, 0x07050301u, 0x0f0d0b09u, 0x07050301u, 0x0f0d0b09u); 2211 CHECK_EQ_32X4(vuzp8b, 0x07050301u, 0x0f0d0b09u, 0x07050301u, 0x0f0d0b09u);
2145 CHECK_EQ_32X4(vuzp16a, 0x05040100u, 0x0d0c0908u, 0x05040100u, 0x0d0c0908u); 2212 CHECK_EQ_32X4(vuzp16a, 0x05040100u, 0x0d0c0908u, 0x05040100u, 0x0d0c0908u);
2146 CHECK_EQ_32X4(vuzp16b, 0x07060302u, 0x0f0e0b0au, 0x07060302u, 0x0f0e0b0au); 2213 CHECK_EQ_32X4(vuzp16b, 0x07060302u, 0x0f0e0b0au, 0x07060302u, 0x0f0e0b0au);
2147 CHECK_EQ_32X4(vuzp32a, 0x03020100u, 0x0b0a0908u, 0x03020100u, 0x0b0a0908u); 2214 CHECK_EQ_32X4(vuzp32a, 0x03020100u, 0x0b0a0908u, 0x03020100u, 0x0b0a0908u);
2148 CHECK_EQ_32X4(vuzp32b, 0x07060504u, 0x0f0e0d0cu, 0x07060504u, 0x0f0e0d0cu); 2215 CHECK_EQ_32X4(vuzp32b, 0x07060504u, 0x0f0e0d0cu, 0x07060504u, 0x0f0e0d0cu);
2149 2216
2217 CHECK_EQ_32X2(vuzpd8a, 0x06040200u, 0x06040200u);
2218 CHECK_EQ_32X2(vuzpd8b, 0x07050301u, 0x07050301u);
2219 CHECK_EQ_32X2(vuzpd16a, 0x05040100u, 0x05040100u);
2220 CHECK_EQ_32X2(vuzpd16b, 0x07060302u, 0x07060302u);
2221 CHECK_EQ_32X2(vuzpd32a, 0x03020100u, 0x03020100u);
2222 CHECK_EQ_32X2(vuzpd32b, 0x07060504u, 0x07060504u);
2223
2150 CHECK_EQ_32X4(vtrn8a, 0x02020000u, 0x06060404u, 0x0a0a0808u, 0x0e0e0c0cu); 2224 CHECK_EQ_32X4(vtrn8a, 0x02020000u, 0x06060404u, 0x0a0a0808u, 0x0e0e0c0cu);
2151 CHECK_EQ_32X4(vtrn8b, 0x03030101u, 0x07070505u, 0x0b0b0909u, 0x0f0f0d0du); 2225 CHECK_EQ_32X4(vtrn8b, 0x03030101u, 0x07070505u, 0x0b0b0909u, 0x0f0f0d0du);
2152 CHECK_EQ_32X4(vtrn16a, 0x01000100u, 0x05040504u, 0x09080908u, 0x0d0c0d0cu); 2226 CHECK_EQ_32X4(vtrn16a, 0x01000100u, 0x05040504u, 0x09080908u, 0x0d0c0d0cu);
2153 CHECK_EQ_32X4(vtrn16b, 0x03020302u, 0x07060706u, 0x0b0a0b0au, 0x0f0e0f0eu); 2227 CHECK_EQ_32X4(vtrn16b, 0x03020302u, 0x07060706u, 0x0b0a0b0au, 0x0f0e0f0eu);
2154 CHECK_EQ_32X4(vtrn32a, 0x03020100u, 0x03020100u, 0x0b0a0908u, 0x0b0a0908u); 2228 CHECK_EQ_32X4(vtrn32a, 0x03020100u, 0x03020100u, 0x0b0a0908u, 0x0b0a0908u);
2155 CHECK_EQ_32X4(vtrn32b, 0x07060504u, 0x07060504u, 0x0f0e0d0cu, 0x0f0e0d0cu); 2229 CHECK_EQ_32X4(vtrn32b, 0x07060504u, 0x07060504u, 0x0f0e0d0cu, 0x0f0e0d0cu);
2156 2230
2231 CHECK_EQ_32X2(vtrnd8a, 0x02020000u, 0x06060404u);
2232 CHECK_EQ_32X2(vtrnd8b, 0x03030101u, 0x07070505u);
2233 CHECK_EQ_32X2(vtrnd16a, 0x01000100u, 0x05040504u);
2234 CHECK_EQ_32X2(vtrnd16b, 0x03020302u, 0x07060706u);
2235 CHECK_EQ_32X2(vtrnd32a, 0x03020100u, 0x03020100u);
2236 CHECK_EQ_32X2(vtrnd32b, 0x07060504u, 0x07060504u);
2237
2157 // src: 0 1 2 3 4 5 6 7 8 9 a b c d e f (little endian) 2238 // src: 0 1 2 3 4 5 6 7 8 9 a b c d e f (little endian)
2158 CHECK_EQ_32X4(vrev64_32, 0x07060504u, 0x03020100u, 0x0f0e0d0cu, 2239 CHECK_EQ_32X4(vrev64_32, 0x07060504u, 0x03020100u, 0x0f0e0d0cu,
2159 0x0b0a0908u); 2240 0x0b0a0908u);
2160 CHECK_EQ_32X4(vrev64_16, 0x05040706u, 0x01000302u, 0x0d0c0f0eu, 2241 CHECK_EQ_32X4(vrev64_16, 0x05040706u, 0x01000302u, 0x0d0c0f0eu,
2161 0x09080b0au); 2242 0x09080b0au);
2162 CHECK_EQ_32X4(vrev64_8, 0x04050607u, 0x00010203u, 0x0c0d0e0fu, 0x08090a0bu); 2243 CHECK_EQ_32X4(vrev64_8, 0x04050607u, 0x00010203u, 0x0c0d0e0fu, 0x08090a0bu);
2163 CHECK_EQ_32X4(vrev32_16, 0x01000302u, 0x05040706u, 0x09080b0au, 2244 CHECK_EQ_32X4(vrev32_16, 0x01000302u, 0x05040706u, 0x09080b0au,
2164 0x0d0c0f0eu); 2245 0x0d0c0f0eu);
2165 CHECK_EQ_32X4(vrev32_8, 0x00010203u, 0x04050607u, 0x08090a0bu, 0x0c0d0e0fu); 2246 CHECK_EQ_32X4(vrev32_8, 0x00010203u, 0x04050607u, 0x08090a0bu, 0x0c0d0e0fu);
2166 CHECK_EQ_32X4(vrev16_8, 0x02030001u, 0x06070405u, 0x0a0b0809u, 0x0e0f0c0du); 2247 CHECK_EQ_32X4(vrev16_8, 0x02030001u, 0x06070405u, 0x0a0b0809u, 0x0e0f0c0du);
(...skipping 1677 matching lines...) Expand 10 before | Expand all | Expand 10 after
3844 HandleScope scope(isolate); 3925 HandleScope scope(isolate);
3845 3926
3846 Assembler assm(isolate, NULL, 0); 3927 Assembler assm(isolate, NULL, 0);
3847 __ mov(r0, Operand(isolate->factory()->infinity_value())); 3928 __ mov(r0, Operand(isolate->factory()->infinity_value()));
3848 __ BlockConstPoolFor(1019); 3929 __ BlockConstPoolFor(1019);
3849 for (int i = 0; i < 1019; ++i) __ nop(); 3930 for (int i = 0; i < 1019; ++i) __ nop();
3850 __ vldr(d0, MemOperand(r0, 0)); 3931 __ vldr(d0, MemOperand(r0, 0));
3851 } 3932 }
3852 3933
3853 #undef __ 3934 #undef __
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698