Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(437)

Side by Side Diff: src/arm64/assembler-arm64.cc

Issue 2622643005: ARM64: Add NEON support (Closed)
Patch Set: Fix Math.abs properly Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | src/arm64/assembler-arm64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // 2 //
3 // Redistribution and use in source and binary forms, with or without 3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are 4 // modification, are permitted provided that the following conditions are
5 // met: 5 // met:
6 // 6 //
7 // * Redistributions of source code must retain the above copyright 7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer. 8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above 9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following 10 // copyright notice, this list of conditions and the following
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 index = kRegListSizeInBits - 1 - index; 82 index = kRegListSizeInBits - 1 - index;
83 DCHECK((1 << index) & list_); 83 DCHECK((1 << index) & list_);
84 Remove(index); 84 Remove(index);
85 return CPURegister::Create(index, size_, type_); 85 return CPURegister::Create(index, size_, type_);
86 } 86 }
87 87
88 88
89 void CPURegList::RemoveCalleeSaved() { 89 void CPURegList::RemoveCalleeSaved() {
90 if (type() == CPURegister::kRegister) { 90 if (type() == CPURegister::kRegister) {
91 Remove(GetCalleeSaved(RegisterSizeInBits())); 91 Remove(GetCalleeSaved(RegisterSizeInBits()));
92 } else if (type() == CPURegister::kFPRegister) { 92 } else if (type() == CPURegister::kVRegister) {
93 Remove(GetCalleeSavedFP(RegisterSizeInBits())); 93 Remove(GetCalleeSavedV(RegisterSizeInBits()));
94 } else { 94 } else {
95 DCHECK(type() == CPURegister::kNoRegister); 95 DCHECK(type() == CPURegister::kNoRegister);
96 DCHECK(IsEmpty()); 96 DCHECK(IsEmpty());
97 // The list must already be empty, so do nothing. 97 // The list must already be empty, so do nothing.
98 } 98 }
99 } 99 }
100 100
101 101
102 CPURegList CPURegList::GetCalleeSaved(int size) { 102 CPURegList CPURegList::GetCalleeSaved(int size) {
103 return CPURegList(CPURegister::kRegister, size, 19, 29); 103 return CPURegList(CPURegister::kRegister, size, 19, 29);
104 } 104 }
105 105
106 106 CPURegList CPURegList::GetCalleeSavedV(int size) {
107 CPURegList CPURegList::GetCalleeSavedFP(int size) { 107 return CPURegList(CPURegister::kVRegister, size, 8, 15);
108 return CPURegList(CPURegister::kFPRegister, size, 8, 15);
109 } 108 }
110 109
111 110
112 CPURegList CPURegList::GetCallerSaved(int size) { 111 CPURegList CPURegList::GetCallerSaved(int size) {
113 // Registers x0-x18 and lr (x30) are caller-saved. 112 // Registers x0-x18 and lr (x30) are caller-saved.
114 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); 113 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
115 list.Combine(lr); 114 list.Combine(lr);
116 return list; 115 return list;
117 } 116 }
118 117
119 118 CPURegList CPURegList::GetCallerSavedV(int size) {
120 CPURegList CPURegList::GetCallerSavedFP(int size) {
121 // Registers d0-d7 and d16-d31 are caller-saved. 119 // Registers d0-d7 and d16-d31 are caller-saved.
122 CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7); 120 CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
123 list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31)); 121 list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
124 return list; 122 return list;
125 } 123 }
126 124
127 125
128 // This function defines the list of registers which are associated with a 126 // This function defines the list of registers which are associated with a
129 // safepoint slot. Safepoint register slots are saved contiguously on the stack. 127 // safepoint slot. Safepoint register slots are saved contiguously on the stack.
130 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register 128 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
131 // code to index in the safepoint register slots. Any change here can affect 129 // code to index in the safepoint register slots. Any change here can affect
132 // this mapping. 130 // this mapping.
133 CPURegList CPURegList::GetSafepointSavedRegisters() { 131 CPURegList CPURegList::GetSafepointSavedRegisters() {
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
233 231
234 RegList unique_regs = 0; 232 RegList unique_regs = 0;
235 RegList unique_fpregs = 0; 233 RegList unique_fpregs = 0;
236 234
237 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; 235 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
238 236
239 for (unsigned i = 0; i < arraysize(regs); i++) { 237 for (unsigned i = 0; i < arraysize(regs); i++) {
240 if (regs[i].IsRegister()) { 238 if (regs[i].IsRegister()) {
241 number_of_valid_regs++; 239 number_of_valid_regs++;
242 unique_regs |= regs[i].Bit(); 240 unique_regs |= regs[i].Bit();
243 } else if (regs[i].IsFPRegister()) { 241 } else if (regs[i].IsVRegister()) {
244 number_of_valid_fpregs++; 242 number_of_valid_fpregs++;
245 unique_fpregs |= regs[i].Bit(); 243 unique_fpregs |= regs[i].Bit();
246 } else { 244 } else {
247 DCHECK(!regs[i].IsValid()); 245 DCHECK(!regs[i].IsValid());
248 } 246 }
249 } 247 }
250 248
251 int number_of_unique_regs = 249 int number_of_unique_regs =
252 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); 250 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
253 int number_of_unique_fpregs = 251 int number_of_unique_fpregs =
(...skipping 16 matching lines...) Expand all
270 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); 268 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
271 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); 269 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
272 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); 270 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
273 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); 271 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
274 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); 272 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
275 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); 273 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
276 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); 274 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
277 return match; 275 return match;
278 } 276 }
279 277
278 bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
279 const VRegister& reg3, const VRegister& reg4) {
280 DCHECK(reg1.IsValid());
281 return (!reg2.IsValid() || reg2.IsSameFormat(reg1)) &&
282 (!reg3.IsValid() || reg3.IsSameFormat(reg1)) &&
283 (!reg4.IsValid() || reg4.IsSameFormat(reg1));
284 }
285
286 bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
287 const VRegister& reg3, const VRegister& reg4) {
288 DCHECK(reg1.IsValid());
289 if (!reg2.IsValid()) {
290 DCHECK(!reg3.IsValid() && !reg4.IsValid());
291 return true;
292 } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) {
293 return false;
294 }
295
296 if (!reg3.IsValid()) {
297 DCHECK(!reg4.IsValid());
298 return true;
299 } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) {
300 return false;
301 }
302
303 if (!reg4.IsValid()) {
304 return true;
305 } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) {
306 return false;
307 }
308
309 return true;
310 }
280 311
281 void Immediate::InitializeHandle(Handle<Object> handle) { 312 void Immediate::InitializeHandle(Handle<Object> handle) {
282 AllowDeferredHandleDereference using_raw_address; 313 AllowDeferredHandleDereference using_raw_address;
283 314
284 // Verify all Objects referred by code are NOT in new space. 315 // Verify all Objects referred by code are NOT in new space.
285 Object* obj = *handle; 316 Object* obj = *handle;
286 if (obj->IsHeapObject()) { 317 if (obj->IsHeapObject()) {
287 value_ = reinterpret_cast<intptr_t>(handle.location()); 318 value_ = reinterpret_cast<intptr_t>(handle.location());
288 rmode_ = RelocInfo::EMBEDDED_OBJECT; 319 rmode_ = RelocInfo::EMBEDDED_OBJECT;
289 } else { 320 } else {
(...skipping 1476 matching lines...) Expand 10 before | Expand all | Expand 10 after
1766 } 1797 }
1767 1798
1768 void Assembler::stlxrh(const Register& rs, const Register& rt, 1799 void Assembler::stlxrh(const Register& rs, const Register& rt,
1769 const Register& rn) { 1800 const Register& rn) {
1770 DCHECK(rs.Is32Bits()); 1801 DCHECK(rs.Is32Bits());
1771 DCHECK(rt.Is32Bits()); 1802 DCHECK(rt.Is32Bits());
1772 DCHECK(rn.Is64Bits()); 1803 DCHECK(rn.Is64Bits());
1773 Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt)); 1804 Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1774 } 1805 }
1775 1806
1807 void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn,
1808 const VRegister& vm, NEON3DifferentOp vop) {
1809 DCHECK(AreSameFormat(vn, vm));
1810 DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
1811 (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1812 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1813 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1814 Instr format, op = vop;
1815 if (vd.IsScalar()) {
1816 op |= NEON_Q | NEONScalar;
1817 format = SFormat(vn);
1818 } else {
1819 format = VFormat(vn);
1820 }
1821 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
1822 }
1823
1824 void Assembler::NEON3DifferentW(const VRegister& vd, const VRegister& vn,
1825 const VRegister& vm, NEON3DifferentOp vop) {
1826 DCHECK(AreSameFormat(vd, vn));
1827 DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
1828 (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
1829 (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
1830 Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1831 }
1832
1833 void Assembler::NEON3DifferentHN(const VRegister& vd, const VRegister& vn,
1834 const VRegister& vm, NEON3DifferentOp vop) {
1835 DCHECK(AreSameFormat(vm, vn));
1836 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1837 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1838 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1839 Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1840 }
1841
1842 #define NEON_3DIFF_LONG_LIST(V) \
1843 V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
1844 V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
1845 V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
1846 V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
1847 V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
1848 V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
1849 V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
1850 V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
1851 V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
1852 V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
1853 V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
1854 V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
1855 V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
1856 V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
1857 V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
1858 V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
1859 V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
1860 V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
1861 V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
1862 V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
1863 V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
1864 V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
1865 V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
1866 V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
1867 V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
1868 V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
1869 V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
1870 V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
1871 V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
1872 V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
1873 V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1874 V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1875 V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1876 V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1877 V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1878 V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S())
1879
1880 #define DEFINE_ASM_FUNC(FN, OP, AS) \
1881 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
1882 const VRegister& vm) { \
1883 DCHECK(AS); \
1884 NEON3DifferentL(vd, vn, vm, OP); \
1885 }
1886 NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
1887 #undef DEFINE_ASM_FUNC
1888
1889 #define NEON_3DIFF_HN_LIST(V) \
1890 V(addhn, NEON_ADDHN, vd.IsD()) \
1891 V(addhn2, NEON_ADDHN2, vd.IsQ()) \
1892 V(raddhn, NEON_RADDHN, vd.IsD()) \
1893 V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
1894 V(subhn, NEON_SUBHN, vd.IsD()) \
1895 V(subhn2, NEON_SUBHN2, vd.IsQ()) \
1896 V(rsubhn, NEON_RSUBHN, vd.IsD()) \
1897 V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
1898
1899 #define DEFINE_ASM_FUNC(FN, OP, AS) \
1900 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
1901 const VRegister& vm) { \
1902 DCHECK(AS); \
1903 NEON3DifferentHN(vd, vn, vm, OP); \
1904 }
1905 NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
1906 #undef DEFINE_ASM_FUNC
1907
1908 void Assembler::NEONPerm(const VRegister& vd, const VRegister& vn,
1909 const VRegister& vm, NEONPermOp op) {
1910 DCHECK(AreSameFormat(vd, vn, vm));
1911 DCHECK(!vd.Is1D());
1912 Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
1913 }
1914
1915 void Assembler::trn1(const VRegister& vd, const VRegister& vn,
1916 const VRegister& vm) {
1917 NEONPerm(vd, vn, vm, NEON_TRN1);
1918 }
1919
1920 void Assembler::trn2(const VRegister& vd, const VRegister& vn,
1921 const VRegister& vm) {
1922 NEONPerm(vd, vn, vm, NEON_TRN2);
1923 }
1924
1925 void Assembler::uzp1(const VRegister& vd, const VRegister& vn,
1926 const VRegister& vm) {
1927 NEONPerm(vd, vn, vm, NEON_UZP1);
1928 }
1929
1930 void Assembler::uzp2(const VRegister& vd, const VRegister& vn,
1931 const VRegister& vm) {
1932 NEONPerm(vd, vn, vm, NEON_UZP2);
1933 }
1934
1935 void Assembler::zip1(const VRegister& vd, const VRegister& vn,
1936 const VRegister& vm) {
1937 NEONPerm(vd, vn, vm, NEON_ZIP1);
1938 }
1939
1940 void Assembler::zip2(const VRegister& vd, const VRegister& vn,
1941 const VRegister& vm) {
1942 NEONPerm(vd, vn, vm, NEON_ZIP2);
1943 }
1944
1945 void Assembler::NEONShiftImmediate(const VRegister& vd, const VRegister& vn,
1946 NEONShiftImmediateOp op, int immh_immb) {
1947 DCHECK(AreSameFormat(vd, vn));
1948 Instr q, scalar;
1949 if (vn.IsScalar()) {
1950 q = NEON_Q;
1951 scalar = NEONScalar;
1952 } else {
1953 q = vd.IsD() ? 0 : NEON_Q;
1954 scalar = 0;
1955 }
1956 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
1957 }
1958
1959 void Assembler::NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn,
1960 int shift, NEONShiftImmediateOp op) {
1961 int laneSizeInBits = vn.LaneSizeInBits();
1962 DCHECK((shift >= 0) && (shift < laneSizeInBits));
1963 NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
1964 }
1965
1966 void Assembler::NEONShiftRightImmediate(const VRegister& vd,
1967 const VRegister& vn, int shift,
1968 NEONShiftImmediateOp op) {
1969 int laneSizeInBits = vn.LaneSizeInBits();
1970 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
1971 NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
1972 }
1973
1974 void Assembler::NEONShiftImmediateL(const VRegister& vd, const VRegister& vn,
1975 int shift, NEONShiftImmediateOp op) {
1976 int laneSizeInBits = vn.LaneSizeInBits();
1977 DCHECK((shift >= 0) && (shift < laneSizeInBits));
1978 int immh_immb = (laneSizeInBits + shift) << 16;
1979
1980 DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1981 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1982 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1983 Instr q;
1984 q = vn.IsD() ? 0 : NEON_Q;
1985 Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
1986 }
1987
1988 void Assembler::NEONShiftImmediateN(const VRegister& vd, const VRegister& vn,
1989 int shift, NEONShiftImmediateOp op) {
1990 Instr q, scalar;
1991 int laneSizeInBits = vd.LaneSizeInBits();
1992 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
1993 int immh_immb = (2 * laneSizeInBits - shift) << 16;
1994
1995 if (vn.IsScalar()) {
1996 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
1997 (vd.Is1S() && vn.Is1D()));
1998 q = NEON_Q;
1999 scalar = NEONScalar;
2000 } else {
2001 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
2002 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
2003 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
2004 scalar = 0;
2005 q = vd.IsD() ? 0 : NEON_Q;
2006 }
2007 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
2008 }
2009
2010 void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) {
2011 DCHECK(vd.IsVector() || vd.Is1D());
2012 NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
2013 }
2014
2015 void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) {
2016 DCHECK(vd.IsVector() || vd.Is1D());
2017 NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
2018 }
2019
2020 void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) {
2021 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
2022 }
2023
2024 void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) {
2025 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
2026 }
2027
2028 void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) {
2029 NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
2030 }
2031
2032 void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) {
2033 DCHECK(vn.IsD());
2034 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
2035 }
2036
2037 void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) {
2038 DCHECK(vn.IsQ());
2039 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
2040 }
2041
2042 void Assembler::sxtl(const VRegister& vd, const VRegister& vn) {
2043 sshll(vd, vn, 0);
2044 }
2045
2046 void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) {
2047 sshll2(vd, vn, 0);
2048 }
2049
2050 void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) {
2051 DCHECK(vn.IsD());
2052 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
2053 }
2054
2055 void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) {
2056 DCHECK(vn.IsQ());
2057 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
2058 }
2059
2060 void Assembler::uxtl(const VRegister& vd, const VRegister& vn) {
2061 ushll(vd, vn, 0);
2062 }
2063
2064 void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) {
2065 ushll2(vd, vn, 0);
2066 }
2067
2068 void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) {
2069 DCHECK(vd.IsVector() || vd.Is1D());
2070 NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
2071 }
2072
2073 void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) {
2074 DCHECK(vd.IsVector() || vd.Is1D());
2075 NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
2076 }
2077
2078 void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) {
2079 DCHECK(vd.IsVector() || vd.Is1D());
2080 NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
2081 }
2082
2083 void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) {
2084 DCHECK(vd.IsVector() || vd.Is1D());
2085 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
2086 }
2087
2088 void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) {
2089 DCHECK(vd.IsVector() || vd.Is1D());
2090 NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
2091 }
2092
2093 void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) {
2094 DCHECK(vd.IsVector() || vd.Is1D());
2095 NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
2096 }
2097
2098 void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) {
2099 DCHECK(vd.IsVector() || vd.Is1D());
2100 NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
2101 }
2102
2103 void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) {
2104 DCHECK(vd.IsVector() || vd.Is1D());
2105 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
2106 }
2107
2108 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
2109 DCHECK(vd.IsVector() || vd.Is1D());
2110 NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
2111 }
2112
2113 void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) {
2114 DCHECK(vn.IsVector() && vd.IsD());
2115 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
2116 }
2117
2118 void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) {
2119 DCHECK(vn.IsVector() && vd.IsQ());
2120 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
2121 }
2122
2123 void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) {
2124 DCHECK(vn.IsVector() && vd.IsD());
2125 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
2126 }
2127
2128 void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) {
2129 DCHECK(vn.IsVector() && vd.IsQ());
2130 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
2131 }
2132
2133 void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) {
2134 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2135 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2136 }
2137
2138 void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
2139 DCHECK(vn.IsVector() && vd.IsQ());
2140 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2141 }
2142
2143 void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
2144 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2145 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2146 }
2147
2148 void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
2149 DCHECK(vn.IsVector() && vd.IsQ());
2150 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2151 }
2152
2153 void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) {
2154 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2155 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2156 }
2157
2158 void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) {
2159 DCHECK(vn.IsVector() && vd.IsQ());
2160 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2161 }
2162
2163 void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) {
2164 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2165 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2166 }
2167
2168 void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) {
2169 DCHECK(vn.IsVector() && vd.IsQ());
2170 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2171 }
2172
2173 void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) {
2174 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2175 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2176 }
2177
2178 void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
2179 DCHECK(vn.IsVector() && vd.IsQ());
2180 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2181 }
2182
2183 void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
2184 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2185 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2186 }
2187
2188 void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
2189 DCHECK(vn.IsVector() && vd.IsQ());
2190 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2191 }
2192
2193 void Assembler::uaddw(const VRegister& vd, const VRegister& vn,
2194 const VRegister& vm) {
2195 DCHECK(vm.IsD());
2196 NEON3DifferentW(vd, vn, vm, NEON_UADDW);
2197 }
2198
2199 void Assembler::uaddw2(const VRegister& vd, const VRegister& vn,
2200 const VRegister& vm) {
2201 DCHECK(vm.IsQ());
2202 NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
2203 }
2204
2205 void Assembler::saddw(const VRegister& vd, const VRegister& vn,
2206 const VRegister& vm) {
2207 DCHECK(vm.IsD());
2208 NEON3DifferentW(vd, vn, vm, NEON_SADDW);
2209 }
2210
2211 void Assembler::saddw2(const VRegister& vd, const VRegister& vn,
2212 const VRegister& vm) {
2213 DCHECK(vm.IsQ());
2214 NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
2215 }
2216
2217 void Assembler::usubw(const VRegister& vd, const VRegister& vn,
2218 const VRegister& vm) {
2219 DCHECK(vm.IsD());
2220 NEON3DifferentW(vd, vn, vm, NEON_USUBW);
2221 }
2222
2223 void Assembler::usubw2(const VRegister& vd, const VRegister& vn,
2224 const VRegister& vm) {
2225 DCHECK(vm.IsQ());
2226 NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
2227 }
2228
2229 void Assembler::ssubw(const VRegister& vd, const VRegister& vn,
2230 const VRegister& vm) {
2231 DCHECK(vm.IsD());
2232 NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
2233 }
2234
2235 void Assembler::ssubw2(const VRegister& vd, const VRegister& vn,
2236 const VRegister& vm) {
2237 DCHECK(vm.IsQ());
2238 NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
2239 }
2240
1776 void Assembler::mov(const Register& rd, const Register& rm) { 2241 void Assembler::mov(const Register& rd, const Register& rm) {
1777 // Moves involving the stack pointer are encoded as add immediate with 2242 // Moves involving the stack pointer are encoded as add immediate with
1778 // second operand of zero. Otherwise, orr with first operand zr is 2243 // second operand of zero. Otherwise, orr with first operand zr is
1779 // used. 2244 // used.
1780 if (rd.IsSP() || rm.IsSP()) { 2245 if (rd.IsSP() || rm.IsSP()) {
1781 add(rd, rm, 0); 2246 add(rd, rm, 0);
1782 } else { 2247 } else {
1783 orr(rd, AppropriateZeroRegFor(rd), rm); 2248 orr(rd, AppropriateZeroRegFor(rd), rm);
1784 } 2249 }
1785 } 2250 }
1786 2251
2252 void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) {
2253 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
2254 // number of lanes, and T is b, h, s or d.
2255 int lane_size = vd.LaneSizeInBytes();
2256 NEONFormatField format;
2257 switch (lane_size) {
2258 case 1:
2259 format = NEON_16B;
2260 DCHECK(rn.IsW());
2261 break;
2262 case 2:
2263 format = NEON_8H;
2264 DCHECK(rn.IsW());
2265 break;
2266 case 4:
2267 format = NEON_4S;
2268 DCHECK(rn.IsW());
2269 break;
2270 default:
2271 DCHECK_EQ(lane_size, 8);
2272 DCHECK(rn.IsX());
2273 format = NEON_2D;
2274 break;
2275 }
2276
2277 DCHECK((0 <= vd_index) &&
2278 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2279 Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
2280 }
2281
2282 void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) {
2283 DCHECK_GE(vn.SizeInBytes(), 4);
2284 umov(rd, vn, vn_index);
2285 }
2286
2287 void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) {
2288 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
2289 // number of lanes, and T is b, h, s.
2290 int lane_size = vn.LaneSizeInBytes();
2291 NEONFormatField format;
2292 Instr q = 0;
2293 switch (lane_size) {
2294 case 1:
2295 format = NEON_16B;
2296 break;
2297 case 2:
2298 format = NEON_8H;
2299 break;
2300 default:
2301 DCHECK_EQ(lane_size, 4);
2302 DCHECK(rd.IsX());
2303 format = NEON_4S;
2304 break;
2305 }
2306 q = rd.IsW() ? 0 : NEON_Q;
2307 DCHECK((0 <= vn_index) &&
2308 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2309 Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2310 }
2311
2312 void Assembler::cls(const VRegister& vd, const VRegister& vn) {
2313 DCHECK(AreSameFormat(vd, vn));
2314 DCHECK(!vd.Is1D() && !vd.Is2D());
2315 Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
2316 }
2317
2318 void Assembler::clz(const VRegister& vd, const VRegister& vn) {
2319 DCHECK(AreSameFormat(vd, vn));
2320 DCHECK(!vd.Is1D() && !vd.Is2D());
2321 Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
2322 }
2323
2324 void Assembler::cnt(const VRegister& vd, const VRegister& vn) {
2325 DCHECK(AreSameFormat(vd, vn));
2326 DCHECK(vd.Is8B() || vd.Is16B());
2327 Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
2328 }
2329
2330 void Assembler::rev16(const VRegister& vd, const VRegister& vn) {
2331 DCHECK(AreSameFormat(vd, vn));
2332 DCHECK(vd.Is8B() || vd.Is16B());
2333 Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
2334 }
2335
2336 void Assembler::rev32(const VRegister& vd, const VRegister& vn) {
2337 DCHECK(AreSameFormat(vd, vn));
2338 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
2339 Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
2340 }
2341
2342 void Assembler::rev64(const VRegister& vd, const VRegister& vn) {
2343 DCHECK(AreSameFormat(vd, vn));
2344 DCHECK(!vd.Is1D() && !vd.Is2D());
2345 Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
2346 }
2347
2348 void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) {
2349 DCHECK(AreSameFormat(vd, vn));
2350 DCHECK(vd.Is2S() || vd.Is4S());
2351 Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
2352 }
2353
2354 void Assembler::urecpe(const VRegister& vd, const VRegister& vn) {
2355 DCHECK(AreSameFormat(vd, vn));
2356 DCHECK(vd.Is2S() || vd.Is4S());
2357 Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
2358 }
2359
2360 void Assembler::NEONAddlp(const VRegister& vd, const VRegister& vn,
2361 NEON2RegMiscOp op) {
2362 DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) ||
2363 (op == NEON_UADALP));
2364
2365 DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
2366 (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
2367 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
2368 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2369 }
2370
2371 void Assembler::saddlp(const VRegister& vd, const VRegister& vn) {
2372 NEONAddlp(vd, vn, NEON_SADDLP);
2373 }
2374
2375 void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) {
2376 NEONAddlp(vd, vn, NEON_UADDLP);
2377 }
2378
2379 void Assembler::sadalp(const VRegister& vd, const VRegister& vn) {
2380 NEONAddlp(vd, vn, NEON_SADALP);
2381 }
2382
2383 void Assembler::uadalp(const VRegister& vd, const VRegister& vn) {
2384 NEONAddlp(vd, vn, NEON_UADALP);
2385 }
2386
2387 void Assembler::NEONAcrossLanesL(const VRegister& vd, const VRegister& vn,
2388 NEONAcrossLanesOp op) {
2389 DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
2390 (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
2391 (vn.Is4S() && vd.Is1D()));
2392 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2393 }
2394
2395 void Assembler::saddlv(const VRegister& vd, const VRegister& vn) {
2396 NEONAcrossLanesL(vd, vn, NEON_SADDLV);
2397 }
2398
2399 void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) {
2400 NEONAcrossLanesL(vd, vn, NEON_UADDLV);
2401 }
2402
2403 void Assembler::NEONAcrossLanes(const VRegister& vd, const VRegister& vn,
2404 NEONAcrossLanesOp op) {
2405 DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
2406 (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
2407 (vn.Is4S() && vd.Is1S()));
2408 if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
2409 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2410 } else {
2411 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2412 }
2413 }
2414
2415 #define NEON_ACROSSLANES_LIST(V) \
2416 V(fmaxv, NEON_FMAXV, vd.Is1S()) \
2417 V(fminv, NEON_FMINV, vd.Is1S()) \
2418 V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
2419 V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
2420 V(addv, NEON_ADDV, true) \
2421 V(smaxv, NEON_SMAXV, true) \
2422 V(sminv, NEON_SMINV, true) \
2423 V(umaxv, NEON_UMAXV, true) \
2424 V(uminv, NEON_UMINV, true)
2425
2426 #define DEFINE_ASM_FUNC(FN, OP, AS) \
2427 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2428 DCHECK(AS); \
2429 NEONAcrossLanes(vd, vn, OP); \
2430 }
2431 NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
2432 #undef DEFINE_ASM_FUNC
2433
2434 void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) {
2435 ins(vd, vd_index, rn);
2436 }
2437
2438 void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) {
2439 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
2440 // number of lanes, and T is b, h, s or d.
2441 int lane_size = vn.LaneSizeInBytes();
2442 NEONFormatField format;
2443 Instr q = 0;
2444 switch (lane_size) {
2445 case 1:
2446 format = NEON_16B;
2447 DCHECK(rd.IsW());
2448 break;
2449 case 2:
2450 format = NEON_8H;
2451 DCHECK(rd.IsW());
2452 break;
2453 case 4:
2454 format = NEON_4S;
2455 DCHECK(rd.IsW());
2456 break;
2457 default:
2458 DCHECK_EQ(lane_size, 8);
2459 DCHECK(rd.IsX());
2460 format = NEON_2D;
2461 q = NEON_Q;
2462 break;
2463 }
2464
2465 DCHECK((0 <= vn_index) &&
2466 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2467 Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2468 }
2469
2470 void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) {
2471 DCHECK(vd.IsScalar());
2472 dup(vd, vn, vn_index);
2473 }
2474
2475 void Assembler::dup(const VRegister& vd, const Register& rn) {
2476 DCHECK(!vd.Is1D());
2477 DCHECK_EQ(vd.Is2D(), rn.IsX());
2478 Instr q = vd.IsD() ? 0 : NEON_Q;
2479 Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
2480 }
2481
2482 void Assembler::ins(const VRegister& vd, int vd_index, const VRegister& vn,
2483 int vn_index) {
2484 DCHECK(AreSameFormat(vd, vn));
2485 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
2486 // number of lanes, and T is b, h, s or d.
2487 int lane_size = vd.LaneSizeInBytes();
2488 NEONFormatField format;
2489 switch (lane_size) {
2490 case 1:
2491 format = NEON_16B;
2492 break;
2493 case 2:
2494 format = NEON_8H;
2495 break;
2496 case 4:
2497 format = NEON_4S;
2498 break;
2499 default:
2500 DCHECK_EQ(lane_size, 8);
2501 format = NEON_2D;
2502 break;
2503 }
2504
2505 DCHECK((0 <= vd_index) &&
2506 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2507 DCHECK((0 <= vn_index) &&
2508 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2509 Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
2510 ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
2511 }
2512
2513 void Assembler::NEONTable(const VRegister& vd, const VRegister& vn,
2514 const VRegister& vm, NEONTableOp op) {
2515 DCHECK(vd.Is16B() || vd.Is8B());
2516 DCHECK(vn.Is16B());
2517 DCHECK(AreSameFormat(vd, vm));
2518 Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
2519 }
2520
2521 void Assembler::tbl(const VRegister& vd, const VRegister& vn,
2522 const VRegister& vm) {
2523 NEONTable(vd, vn, vm, NEON_TBL_1v);
2524 }
2525
2526 void Assembler::tbl(const VRegister& vd, const VRegister& vn,
2527 const VRegister& vn2, const VRegister& vm) {
2528 USE(vn2);
2529 DCHECK(AreSameFormat(vn, vn2));
2530 DCHECK(AreConsecutive(vn, vn2));
2531 NEONTable(vd, vn, vm, NEON_TBL_2v);
2532 }
2533
2534 void Assembler::tbl(const VRegister& vd, const VRegister& vn,
2535 const VRegister& vn2, const VRegister& vn3,
2536 const VRegister& vm) {
2537 USE(vn2);
2538 USE(vn3);
2539 DCHECK(AreSameFormat(vn, vn2, vn3));
2540 DCHECK(AreConsecutive(vn, vn2, vn3));
2541 NEONTable(vd, vn, vm, NEON_TBL_3v);
2542 }
2543
2544 void Assembler::tbl(const VRegister& vd, const VRegister& vn,
2545 const VRegister& vn2, const VRegister& vn3,
2546 const VRegister& vn4, const VRegister& vm) {
2547 USE(vn2);
2548 USE(vn3);
2549 USE(vn4);
2550 DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
2551 DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
2552 NEONTable(vd, vn, vm, NEON_TBL_4v);
2553 }
2554
2555 void Assembler::tbx(const VRegister& vd, const VRegister& vn,
2556 const VRegister& vm) {
2557 NEONTable(vd, vn, vm, NEON_TBX_1v);
2558 }
2559
2560 void Assembler::tbx(const VRegister& vd, const VRegister& vn,
2561 const VRegister& vn2, const VRegister& vm) {
2562 USE(vn2);
2563 DCHECK(AreSameFormat(vn, vn2));
2564 DCHECK(AreConsecutive(vn, vn2));
2565 NEONTable(vd, vn, vm, NEON_TBX_2v);
2566 }
2567
2568 void Assembler::tbx(const VRegister& vd, const VRegister& vn,
2569 const VRegister& vn2, const VRegister& vn3,
2570 const VRegister& vm) {
2571 USE(vn2);
2572 USE(vn3);
2573 DCHECK(AreSameFormat(vn, vn2, vn3));
2574 DCHECK(AreConsecutive(vn, vn2, vn3));
2575 NEONTable(vd, vn, vm, NEON_TBX_3v);
2576 }
2577
2578 void Assembler::tbx(const VRegister& vd, const VRegister& vn,
2579 const VRegister& vn2, const VRegister& vn3,
2580 const VRegister& vn4, const VRegister& vm) {
2581 USE(vn2);
2582 USE(vn3);
2583 USE(vn4);
2584 DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
2585 DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
2586 NEONTable(vd, vn, vm, NEON_TBX_4v);
2587 }
2588
2589 void Assembler::mov(const VRegister& vd, int vd_index, const VRegister& vn,
2590 int vn_index) {
2591 ins(vd, vd_index, vn, vn_index);
2592 }
1787 2593
1788 void Assembler::mvn(const Register& rd, const Operand& operand) { 2594 void Assembler::mvn(const Register& rd, const Operand& operand) {
1789 orn(rd, AppropriateZeroRegFor(rd), operand); 2595 orn(rd, AppropriateZeroRegFor(rd), operand);
1790 } 2596 }
1791 2597
1792
1793 void Assembler::mrs(const Register& rt, SystemRegister sysreg) { 2598 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1794 DCHECK(rt.Is64Bits()); 2599 DCHECK(rt.Is64Bits());
1795 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); 2600 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1796 } 2601 }
1797 2602
1798
1799 void Assembler::msr(SystemRegister sysreg, const Register& rt) { 2603 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1800 DCHECK(rt.Is64Bits()); 2604 DCHECK(rt.Is64Bits());
1801 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); 2605 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1802 } 2606 }
1803 2607
1804 2608 void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }
1805 void Assembler::hint(SystemHint code) { 2609
1806 Emit(HINT | ImmHint(code) | Rt(xzr)); 2610 // NEON structure loads and stores.
1807 } 2611 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
1808 2612 Instr addr_field = RnSP(addr.base());
2613
2614 if (addr.IsPostIndex()) {
2615 static_assert(NEONLoadStoreMultiStructPostIndex ==
2616 static_cast<NEONLoadStoreMultiStructPostIndexOp>(
2617 NEONLoadStoreSingleStructPostIndex),
2618 "Opcodes must match for NEON post index memop.");
2619
2620 addr_field |= NEONLoadStoreMultiStructPostIndex;
2621 if (addr.offset() == 0) {
2622 addr_field |= RmNot31(addr.regoffset());
2623 } else {
2624 // The immediate post index addressing mode is indicated by rm = 31.
2625 // The immediate is implied by the number of vector registers used.
2626 addr_field |= (0x1f << Rm_offset);
2627 }
2628 } else {
2629 DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
2630 }
2631 return addr_field;
2632 }
2633
2634 void Assembler::LoadStoreStructVerify(const VRegister& vt,
2635 const MemOperand& addr, Instr op) {
2636 #ifdef DEBUG
2637 // Assert that addressing mode is either offset (with immediate 0), post
2638 // index by immediate of the size of the register list, or post index by a
2639 // value in a core register.
2640 if (addr.IsImmediateOffset()) {
2641 DCHECK_EQ(addr.offset(), 0);
2642 } else {
2643 int offset = vt.SizeInBytes();
2644 switch (op) {
2645 case NEON_LD1_1v:
2646 case NEON_ST1_1v:
2647 offset *= 1;
2648 break;
2649 case NEONLoadStoreSingleStructLoad1:
2650 case NEONLoadStoreSingleStructStore1:
2651 case NEON_LD1R:
2652 offset = (offset / vt.LaneCount()) * 1;
2653 break;
2654
2655 case NEON_LD1_2v:
2656 case NEON_ST1_2v:
2657 case NEON_LD2:
2658 case NEON_ST2:
2659 offset *= 2;
2660 break;
2661 case NEONLoadStoreSingleStructLoad2:
2662 case NEONLoadStoreSingleStructStore2:
2663 case NEON_LD2R:
2664 offset = (offset / vt.LaneCount()) * 2;
2665 break;
2666
2667 case NEON_LD1_3v:
2668 case NEON_ST1_3v:
2669 case NEON_LD3:
2670 case NEON_ST3:
2671 offset *= 3;
2672 break;
2673 case NEONLoadStoreSingleStructLoad3:
2674 case NEONLoadStoreSingleStructStore3:
2675 case NEON_LD3R:
2676 offset = (offset / vt.LaneCount()) * 3;
2677 break;
2678
2679 case NEON_LD1_4v:
2680 case NEON_ST1_4v:
2681 case NEON_LD4:
2682 case NEON_ST4:
2683 offset *= 4;
2684 break;
2685 case NEONLoadStoreSingleStructLoad4:
2686 case NEONLoadStoreSingleStructStore4:
2687 case NEON_LD4R:
2688 offset = (offset / vt.LaneCount()) * 4;
2689 break;
2690 default:
2691 UNREACHABLE();
2692 }
2693 DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset);
2694 }
2695 #else
2696 USE(vt);
2697 USE(addr);
2698 USE(op);
2699 #endif
2700 }
2701
2702 void Assembler::LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
2703 NEONLoadStoreMultiStructOp op) {
2704 LoadStoreStructVerify(vt, addr, op);
2705 DCHECK(vt.IsVector() || vt.Is1D());
2706 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2707 }
2708
2709 void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
2710 const MemOperand& addr,
2711 NEONLoadStoreSingleStructOp op) {
2712 LoadStoreStructVerify(vt, addr, op);
2713 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2714 }
2715
2716 void Assembler::ld1(const VRegister& vt, const MemOperand& src) {
2717 LoadStoreStruct(vt, src, NEON_LD1_1v);
2718 }
2719
2720 void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
2721 const MemOperand& src) {
2722 USE(vt2);
2723 DCHECK(AreSameFormat(vt, vt2));
2724 DCHECK(AreConsecutive(vt, vt2));
2725 LoadStoreStruct(vt, src, NEON_LD1_2v);
2726 }
2727
2728 void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
2729 const VRegister& vt3, const MemOperand& src) {
2730 USE(vt2);
2731 USE(vt3);
2732 DCHECK(AreSameFormat(vt, vt2, vt3));
2733 DCHECK(AreConsecutive(vt, vt2, vt3));
2734 LoadStoreStruct(vt, src, NEON_LD1_3v);
2735 }
2736
2737 void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
2738 const VRegister& vt3, const VRegister& vt4,
2739 const MemOperand& src) {
2740 USE(vt2);
2741 USE(vt3);
2742 USE(vt4);
2743 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2744 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2745 LoadStoreStruct(vt, src, NEON_LD1_4v);
2746 }
2747
2748 void Assembler::ld2(const VRegister& vt, const VRegister& vt2,
2749 const MemOperand& src) {
2750 USE(vt2);
2751 DCHECK(AreSameFormat(vt, vt2));
2752 DCHECK(AreConsecutive(vt, vt2));
2753 LoadStoreStruct(vt, src, NEON_LD2);
2754 }
2755
2756 void Assembler::ld2(const VRegister& vt, const VRegister& vt2, int lane,
2757 const MemOperand& src) {
2758 USE(vt2);
2759 DCHECK(AreSameFormat(vt, vt2));
2760 DCHECK(AreConsecutive(vt, vt2));
2761 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
2762 }
2763
2764 void Assembler::ld2r(const VRegister& vt, const VRegister& vt2,
2765 const MemOperand& src) {
2766 USE(vt2);
2767 DCHECK(AreSameFormat(vt, vt2));
2768 DCHECK(AreConsecutive(vt, vt2));
2769 LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
2770 }
2771
2772 void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
2773 const VRegister& vt3, const MemOperand& src) {
2774 USE(vt2);
2775 USE(vt3);
2776 DCHECK(AreSameFormat(vt, vt2, vt3));
2777 DCHECK(AreConsecutive(vt, vt2, vt3));
2778 LoadStoreStruct(vt, src, NEON_LD3);
2779 }
2780
2781 void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
2782 const VRegister& vt3, int lane, const MemOperand& src) {
2783 USE(vt2);
2784 USE(vt3);
2785 DCHECK(AreSameFormat(vt, vt2, vt3));
2786 DCHECK(AreConsecutive(vt, vt2, vt3));
2787 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
2788 }
2789
2790 void Assembler::ld3r(const VRegister& vt, const VRegister& vt2,
2791 const VRegister& vt3, const MemOperand& src) {
2792 USE(vt2);
2793 USE(vt3);
2794 DCHECK(AreSameFormat(vt, vt2, vt3));
2795 DCHECK(AreConsecutive(vt, vt2, vt3));
2796 LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
2797 }
2798
2799 void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
2800 const VRegister& vt3, const VRegister& vt4,
2801 const MemOperand& src) {
2802 USE(vt2);
2803 USE(vt3);
2804 USE(vt4);
2805 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2806 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2807 LoadStoreStruct(vt, src, NEON_LD4);
2808 }
2809
2810 void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
2811 const VRegister& vt3, const VRegister& vt4, int lane,
2812 const MemOperand& src) {
2813 USE(vt2);
2814 USE(vt3);
2815 USE(vt4);
2816 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2817 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2818 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
2819 }
2820
2821 void Assembler::ld4r(const VRegister& vt, const VRegister& vt2,
2822 const VRegister& vt3, const VRegister& vt4,
2823 const MemOperand& src) {
2824 USE(vt2);
2825 USE(vt3);
2826 USE(vt4);
2827 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2828 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2829 LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
2830 }
2831
2832 void Assembler::st1(const VRegister& vt, const MemOperand& src) {
2833 LoadStoreStruct(vt, src, NEON_ST1_1v);
2834 }
2835
2836 void Assembler::st1(const VRegister& vt, const VRegister& vt2,
2837 const MemOperand& src) {
2838 USE(vt2);
2839 DCHECK(AreSameFormat(vt, vt2));
2840 DCHECK(AreConsecutive(vt, vt2));
2841 LoadStoreStruct(vt, src, NEON_ST1_2v);
2842 }
2843
2844 void Assembler::st1(const VRegister& vt, const VRegister& vt2,
2845 const VRegister& vt3, const MemOperand& src) {
2846 USE(vt2);
2847 USE(vt3);
2848 DCHECK(AreSameFormat(vt, vt2, vt3));
2849 DCHECK(AreConsecutive(vt, vt2, vt3));
2850 LoadStoreStruct(vt, src, NEON_ST1_3v);
2851 }
2852
2853 void Assembler::st1(const VRegister& vt, const VRegister& vt2,
2854 const VRegister& vt3, const VRegister& vt4,
2855 const MemOperand& src) {
2856 USE(vt2);
2857 USE(vt3);
2858 USE(vt4);
2859 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2860 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2861 LoadStoreStruct(vt, src, NEON_ST1_4v);
2862 }
2863
2864 void Assembler::st2(const VRegister& vt, const VRegister& vt2,
2865 const MemOperand& dst) {
2866 USE(vt2);
2867 DCHECK(AreSameFormat(vt, vt2));
2868 DCHECK(AreConsecutive(vt, vt2));
2869 LoadStoreStruct(vt, dst, NEON_ST2);
2870 }
2871
2872 void Assembler::st2(const VRegister& vt, const VRegister& vt2, int lane,
2873 const MemOperand& dst) {
2874 USE(vt2);
2875 DCHECK(AreSameFormat(vt, vt2));
2876 DCHECK(AreConsecutive(vt, vt2));
2877 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
2878 }
2879
2880 void Assembler::st3(const VRegister& vt, const VRegister& vt2,
2881 const VRegister& vt3, const MemOperand& dst) {
2882 USE(vt2);
2883 USE(vt3);
2884 DCHECK(AreSameFormat(vt, vt2, vt3));
2885 DCHECK(AreConsecutive(vt, vt2, vt3));
2886 LoadStoreStruct(vt, dst, NEON_ST3);
2887 }
2888
2889 void Assembler::st3(const VRegister& vt, const VRegister& vt2,
2890 const VRegister& vt3, int lane, const MemOperand& dst) {
2891 USE(vt2);
2892 USE(vt3);
2893 DCHECK(AreSameFormat(vt, vt2, vt3));
2894 DCHECK(AreConsecutive(vt, vt2, vt3));
2895 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
2896 }
2897
2898 void Assembler::st4(const VRegister& vt, const VRegister& vt2,
2899 const VRegister& vt3, const VRegister& vt4,
2900 const MemOperand& dst) {
2901 USE(vt2);
2902 USE(vt3);
2903 USE(vt4);
2904 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2905 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2906 LoadStoreStruct(vt, dst, NEON_ST4);
2907 }
2908
2909 void Assembler::st4(const VRegister& vt, const VRegister& vt2,
2910 const VRegister& vt3, const VRegister& vt4, int lane,
2911 const MemOperand& dst) {
2912 USE(vt2);
2913 USE(vt3);
2914 USE(vt4);
2915 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2916 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2917 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
2918 }
2919
2920 void Assembler::LoadStoreStructSingle(const VRegister& vt, uint32_t lane,
2921 const MemOperand& addr,
2922 NEONLoadStoreSingleStructOp op) {
2923 LoadStoreStructVerify(vt, addr, op);
2924
2925 // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
2926 // number of lanes, and T is b, h, s or d.
2927 unsigned lane_size = vt.LaneSizeInBytes();
2928 DCHECK_LT(lane, kQRegSize / lane_size);
2929
2930 // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
2931 // S and size fields.
2932 lane *= lane_size;
2933
2934 // Encodings for S[0]/D[0] and S[2]/D[1] are distinguished using the least-
2935 // significant bit of the size field, so we increment lane here to account for
2936 // that.
2937 if (lane_size == 8) lane++;
2938
2939 Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
2940 Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
2941 Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
2942
2943 Instr instr = op;
2944 switch (lane_size) {
2945 case 1:
2946 instr |= NEONLoadStoreSingle_b;
2947 break;
2948 case 2:
2949 instr |= NEONLoadStoreSingle_h;
2950 break;
2951 case 4:
2952 instr |= NEONLoadStoreSingle_s;
2953 break;
2954 default:
2955 DCHECK_EQ(lane_size, 8U);
2956 instr |= NEONLoadStoreSingle_d;
2957 }
2958
2959 Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
2960 }
2961
2962 void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) {
2963 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
2964 }
2965
2966 void Assembler::ld1r(const VRegister& vt, const MemOperand& src) {
2967 LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
2968 }
2969
2970 void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) {
2971 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
2972 }
1809 2973
1810 void Assembler::dmb(BarrierDomain domain, BarrierType type) { 2974 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1811 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 2975 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1812 } 2976 }
1813 2977
1814
1815 void Assembler::dsb(BarrierDomain domain, BarrierType type) { 2978 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1816 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 2979 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1817 } 2980 }
1818 2981
1819
1820 void Assembler::isb() { 2982 void Assembler::isb() {
1821 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); 2983 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1822 } 2984 }
1823 2985
1824 2986 void Assembler::fmov(const VRegister& vd, double imm) {
1825 void Assembler::fmov(FPRegister fd, double imm) { 2987 if (vd.IsScalar()) {
1826 DCHECK(fd.Is64Bits()); 2988 DCHECK(vd.Is1D());
1827 DCHECK(IsImmFP64(imm)); 2989 Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm));
1828 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); 2990 } else {
1829 } 2991 DCHECK(vd.Is2D());
1830 2992 Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
1831 2993 Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
1832 void Assembler::fmov(FPRegister fd, float imm) { 2994 }
1833 DCHECK(fd.Is32Bits()); 2995 }
1834 DCHECK(IsImmFP32(imm)); 2996
1835 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); 2997 void Assembler::fmov(const VRegister& vd, float imm) {
1836 } 2998 if (vd.IsScalar()) {
1837 2999 DCHECK(vd.Is1S());
1838 3000 Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm));
1839 void Assembler::fmov(Register rd, FPRegister fn) { 3001 } else {
1840 DCHECK(rd.SizeInBits() == fn.SizeInBits()); 3002 DCHECK(vd.Is2S() | vd.Is4S());
3003 Instr op = NEONModifiedImmediate_MOVI;
3004 Instr q = vd.Is4S() ? NEON_Q : 0;
3005 Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
3006 }
3007 }
3008
3009 void Assembler::fmov(const Register& rd, const VRegister& fn) {
3010 DCHECK_EQ(rd.SizeInBits(), fn.SizeInBits());
1841 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; 3011 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1842 Emit(op | Rd(rd) | Rn(fn)); 3012 Emit(op | Rd(rd) | Rn(fn));
1843 } 3013 }
1844 3014
1845 3015 void Assembler::fmov(const VRegister& vd, const Register& rn) {
1846 void Assembler::fmov(FPRegister fd, Register rn) { 3016 DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits());
1847 DCHECK(fd.SizeInBits() == rn.SizeInBits()); 3017 FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
1848 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; 3018 Emit(op | Rd(vd) | Rn(rn));
1849 Emit(op | Rd(fd) | Rn(rn)); 3019 }
1850 } 3020
1851 3021 void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
1852 3022 DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits());
1853 void Assembler::fmov(FPRegister fd, FPRegister fn) { 3023 Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
1854 DCHECK(fd.SizeInBits() == fn.SizeInBits()); 3024 }
1855 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); 3025
1856 } 3026 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
1857 3027 DCHECK((index == 1) && vd.Is1D() && rn.IsX());
1858 3028 USE(index);
1859 void Assembler::fadd(const FPRegister& fd, 3029 Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
1860 const FPRegister& fn, 3030 }
1861 const FPRegister& fm) { 3031
1862 FPDataProcessing2Source(fd, fn, fm, FADD); 3032 void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
1863 } 3033 DCHECK((index == 1) && vn.Is1D() && rd.IsX());
1864 3034 USE(index);
1865 3035 Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
1866 void Assembler::fsub(const FPRegister& fd, 3036 }
1867 const FPRegister& fn, 3037
1868 const FPRegister& fm) { 3038 void Assembler::fmadd(const VRegister& fd, const VRegister& fn,
1869 FPDataProcessing2Source(fd, fn, fm, FSUB); 3039 const VRegister& fm, const VRegister& fa) {
1870 }
1871
1872
1873 void Assembler::fmul(const FPRegister& fd,
1874 const FPRegister& fn,
1875 const FPRegister& fm) {
1876 FPDataProcessing2Source(fd, fn, fm, FMUL);
1877 }
1878
1879
1880 void Assembler::fmadd(const FPRegister& fd,
1881 const FPRegister& fn,
1882 const FPRegister& fm,
1883 const FPRegister& fa) {
1884 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); 3040 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1885 } 3041 }
1886 3042
1887 3043 void Assembler::fmsub(const VRegister& fd, const VRegister& fn,
1888 void Assembler::fmsub(const FPRegister& fd, 3044 const VRegister& fm, const VRegister& fa) {
1889 const FPRegister& fn,
1890 const FPRegister& fm,
1891 const FPRegister& fa) {
1892 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); 3045 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1893 } 3046 }
1894 3047
1895 3048 void Assembler::fnmadd(const VRegister& fd, const VRegister& fn,
1896 void Assembler::fnmadd(const FPRegister& fd, 3049 const VRegister& fm, const VRegister& fa) {
1897 const FPRegister& fn,
1898 const FPRegister& fm,
1899 const FPRegister& fa) {
1900 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); 3050 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1901 } 3051 }
1902 3052
1903 3053 void Assembler::fnmsub(const VRegister& fd, const VRegister& fn,
1904 void Assembler::fnmsub(const FPRegister& fd, 3054 const VRegister& fm, const VRegister& fa) {
1905 const FPRegister& fn,
1906 const FPRegister& fm,
1907 const FPRegister& fa) {
1908 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); 3055 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1909 } 3056 }
1910 3057
1911 3058 void Assembler::fnmul(const VRegister& vd, const VRegister& vn,
1912 void Assembler::fdiv(const FPRegister& fd, 3059 const VRegister& vm) {
1913 const FPRegister& fn, 3060 DCHECK(AreSameSizeAndType(vd, vn, vm));
1914 const FPRegister& fm) { 3061 Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
1915 FPDataProcessing2Source(fd, fn, fm, FDIV); 3062 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
1916 } 3063 }
1917 3064
1918 3065 void Assembler::fcmp(const VRegister& fn, const VRegister& fm) {
1919 void Assembler::fmax(const FPRegister& fd, 3066 DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
1920 const FPRegister& fn,
1921 const FPRegister& fm) {
1922 FPDataProcessing2Source(fd, fn, fm, FMAX);
1923 }
1924
1925
1926 void Assembler::fmaxnm(const FPRegister& fd,
1927 const FPRegister& fn,
1928 const FPRegister& fm) {
1929 FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1930 }
1931
1932
1933 void Assembler::fmin(const FPRegister& fd,
1934 const FPRegister& fn,
1935 const FPRegister& fm) {
1936 FPDataProcessing2Source(fd, fn, fm, FMIN);
1937 }
1938
1939
1940 void Assembler::fminnm(const FPRegister& fd,
1941 const FPRegister& fn,
1942 const FPRegister& fm) {
1943 FPDataProcessing2Source(fd, fn, fm, FMINNM);
1944 }
1945
1946
1947 void Assembler::fabs(const FPRegister& fd,
1948 const FPRegister& fn) {
1949 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1950 FPDataProcessing1Source(fd, fn, FABS);
1951 }
1952
1953
1954 void Assembler::fneg(const FPRegister& fd,
1955 const FPRegister& fn) {
1956 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1957 FPDataProcessing1Source(fd, fn, FNEG);
1958 }
1959
1960
1961 void Assembler::fsqrt(const FPRegister& fd,
1962 const FPRegister& fn) {
1963 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1964 FPDataProcessing1Source(fd, fn, FSQRT);
1965 }
1966
1967
1968 void Assembler::frinta(const FPRegister& fd,
1969 const FPRegister& fn) {
1970 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1971 FPDataProcessing1Source(fd, fn, FRINTA);
1972 }
1973
1974
1975 void Assembler::frintm(const FPRegister& fd,
1976 const FPRegister& fn) {
1977 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1978 FPDataProcessing1Source(fd, fn, FRINTM);
1979 }
1980
1981
1982 void Assembler::frintn(const FPRegister& fd,
1983 const FPRegister& fn) {
1984 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1985 FPDataProcessing1Source(fd, fn, FRINTN);
1986 }
1987
1988
1989 void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
1990 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1991 FPDataProcessing1Source(fd, fn, FRINTP);
1992 }
1993
1994
1995 void Assembler::frintz(const FPRegister& fd,
1996 const FPRegister& fn) {
1997 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1998 FPDataProcessing1Source(fd, fn, FRINTZ);
1999 }
2000
2001
2002 void Assembler::fcmp(const FPRegister& fn,
2003 const FPRegister& fm) {
2004 DCHECK(fn.SizeInBits() == fm.SizeInBits());
2005 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); 3067 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
2006 } 3068 }
2007 3069
2008 3070 void Assembler::fcmp(const VRegister& fn, double value) {
2009 void Assembler::fcmp(const FPRegister& fn,
2010 double value) {
2011 USE(value); 3071 USE(value);
2012 // Although the fcmp instruction can strictly only take an immediate value of 3072 // Although the fcmp instruction can strictly only take an immediate value of
2013 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't 3073 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
2014 // affect the result of the comparison. 3074 // affect the result of the comparison.
2015 DCHECK(value == 0.0); 3075 DCHECK_EQ(value, 0.0);
2016 Emit(FPType(fn) | FCMP_zero | Rn(fn)); 3076 Emit(FPType(fn) | FCMP_zero | Rn(fn));
2017 } 3077 }
2018 3078
2019 3079 void Assembler::fccmp(const VRegister& fn, const VRegister& fm,
2020 void Assembler::fccmp(const FPRegister& fn, 3080 StatusFlags nzcv, Condition cond) {
2021 const FPRegister& fm, 3081 DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
2022 StatusFlags nzcv,
2023 Condition cond) {
2024 DCHECK(fn.SizeInBits() == fm.SizeInBits());
2025 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); 3082 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
2026 } 3083 }
2027 3084
2028 3085 void Assembler::fcsel(const VRegister& fd, const VRegister& fn,
2029 void Assembler::fcsel(const FPRegister& fd, 3086 const VRegister& fm, Condition cond) {
2030 const FPRegister& fn, 3087 DCHECK_EQ(fd.SizeInBits(), fn.SizeInBits());
2031 const FPRegister& fm, 3088 DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits());
2032 Condition cond) {
2033 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2034 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2035 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); 3089 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
2036 } 3090 }
2037 3091
2038 3092 void Assembler::NEONFPConvertToInt(const Register& rd, const VRegister& vn,
2039 void Assembler::FPConvertToInt(const Register& rd, 3093 Instr op) {
2040 const FPRegister& fn, 3094 Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
2041 FPIntegerConvertOp op) { 3095 }
2042 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); 3096
2043 } 3097 void Assembler::NEONFPConvertToInt(const VRegister& vd, const VRegister& vn,
2044 3098 Instr op) {
2045 3099 if (vn.IsScalar()) {
2046 void Assembler::fcvt(const FPRegister& fd, 3100 DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
2047 const FPRegister& fn) { 3101 op |= NEON_Q | NEONScalar;
2048 if (fd.Is64Bits()) { 3102 }
2049 // Convert float to double. 3103 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2050 DCHECK(fn.Is32Bits()); 3104 }
2051 FPDataProcessing1Source(fd, fn, FCVT_ds); 3105
2052 } else { 3106 void Assembler::fcvt(const VRegister& vd, const VRegister& vn) {
2053 // Convert double to float. 3107 FPDataProcessing1SourceOp op;
2054 DCHECK(fn.Is64Bits()); 3108 if (vd.Is1D()) {
2055 FPDataProcessing1Source(fd, fn, FCVT_sd); 3109 DCHECK(vn.Is1S() || vn.Is1H());
2056 } 3110 op = vn.Is1S() ? FCVT_ds : FCVT_dh;
2057 } 3111 } else if (vd.Is1S()) {
2058 3112 DCHECK(vn.Is1D() || vn.Is1H());
2059 3113 op = vn.Is1D() ? FCVT_sd : FCVT_sh;
2060 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { 3114 } else {
2061 FPConvertToInt(rd, fn, FCVTAU); 3115 DCHECK(vd.Is1H());
2062 } 3116 DCHECK(vn.Is1D() || vn.Is1S());
2063 3117 op = vn.Is1D() ? FCVT_hd : FCVT_hs;
2064 3118 }
2065 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) { 3119 FPDataProcessing1Source(vd, vn, op);
2066 FPConvertToInt(rd, fn, FCVTAS); 3120 }
2067 } 3121
2068 3122 void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) {
2069 3123 DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
2070 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) { 3124 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2071 FPConvertToInt(rd, fn, FCVTMU); 3125 Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
2072 } 3126 }
2073 3127
2074 3128 void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) {
2075 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) { 3129 DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
2076 FPConvertToInt(rd, fn, FCVTMS); 3130 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2077 } 3131 Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
2078 3132 }
2079 3133
2080 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) { 3134 void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) {
2081 FPConvertToInt(rd, fn, FCVTNU); 3135 DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
2082 } 3136 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
2083 3137 Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
2084 3138 }
2085 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) { 3139
2086 FPConvertToInt(rd, fn, FCVTNS); 3140 void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) {
2087 } 3141 DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
2088 3142 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
2089 3143 Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
2090 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) { 3144 }
2091 FPConvertToInt(rd, fn, FCVTZU); 3145
2092 } 3146 void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) {
2093 3147 Instr format = 1 << NEONSize_offset;
2094 3148 if (vd.IsScalar()) {
2095 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) { 3149 DCHECK(vd.Is1S() && vn.Is1D());
2096 FPConvertToInt(rd, fn, FCVTZS); 3150 Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
2097 } 3151 } else {
2098 3152 DCHECK(vd.Is2S() && vn.Is2D());
2099 3153 Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
2100 void Assembler::scvtf(const FPRegister& fd, 3154 }
2101 const Register& rn, 3155 }
2102 unsigned fbits) { 3156
3157 void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
3158 DCHECK(vd.Is4S() && vn.Is2D());
3159 Instr format = 1 << NEONSize_offset;
3160 Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
3161 }
3162
3163 #define NEON_FP2REGMISC_FCVT_LIST(V) \
3164 V(fcvtnu, NEON_FCVTNU, FCVTNU) \
3165 V(fcvtns, NEON_FCVTNS, FCVTNS) \
3166 V(fcvtpu, NEON_FCVTPU, FCVTPU) \
3167 V(fcvtps, NEON_FCVTPS, FCVTPS) \
3168 V(fcvtmu, NEON_FCVTMU, FCVTMU) \
3169 V(fcvtms, NEON_FCVTMS, FCVTMS) \
3170 V(fcvtau, NEON_FCVTAU, FCVTAU) \
3171 V(fcvtas, NEON_FCVTAS, FCVTAS)
3172
3173 #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
3174 void Assembler::FN(const Register& rd, const VRegister& vn) { \
3175 NEONFPConvertToInt(rd, vn, SCA_OP); \
3176 } \
3177 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3178 NEONFPConvertToInt(vd, vn, VEC_OP); \
3179 }
3180 NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
3181 #undef DEFINE_ASM_FUNCS
3182
3183 void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) {
3184 DCHECK_GE(fbits, 0);
2103 if (fbits == 0) { 3185 if (fbits == 0) {
2104 Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd)); 3186 NEONFP2RegMisc(vd, vn, NEON_SCVTF);
2105 } else { 3187 } else {
2106 Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | 3188 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2107 Rd(fd)); 3189 NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
2108 } 3190 }
2109 } 3191 }
2110 3192
2111 3193 void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) {
2112 void Assembler::ucvtf(const FPRegister& fd, 3194 DCHECK_GE(fbits, 0);
2113 const Register& rn, 3195 if (fbits == 0) {
2114 unsigned fbits) { 3196 NEONFP2RegMisc(vd, vn, NEON_UCVTF);
3197 } else {
3198 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3199 NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
3200 }
3201 }
3202
3203 void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) {
3204 DCHECK_GE(fbits, 0);
3205 if (fbits == 0) {
3206 Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
3207 } else {
3208 Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
3209 Rd(vd));
3210 }
3211 }
3212
3213 void Assembler::ucvtf(const VRegister& fd, const Register& rn, int fbits) {
3214 DCHECK_GE(fbits, 0);
2115 if (fbits == 0) { 3215 if (fbits == 0) {
2116 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); 3216 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
2117 } else { 3217 } else {
2118 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | 3218 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2119 Rd(fd)); 3219 Rd(fd));
2120 } 3220 }
2121 } 3221 }
2122 3222
3223 void Assembler::NEON3Same(const VRegister& vd, const VRegister& vn,
3224 const VRegister& vm, NEON3SameOp vop) {
3225 DCHECK(AreSameFormat(vd, vn, vm));
3226 DCHECK(vd.IsVector() || !vd.IsQ());
3227
3228 Instr format, op = vop;
3229 if (vd.IsScalar()) {
3230 op |= NEON_Q | NEONScalar;
3231 format = SFormat(vd);
3232 } else {
3233 format = VFormat(vd);
3234 }
3235
3236 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
3237 }
3238
3239 void Assembler::NEONFP3Same(const VRegister& vd, const VRegister& vn,
3240 const VRegister& vm, Instr op) {
3241 DCHECK(AreSameFormat(vd, vn, vm));
3242 Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3243 }
3244
3245 #define NEON_FP2REGMISC_LIST(V) \
3246 V(fabs, NEON_FABS, FABS) \
3247 V(fneg, NEON_FNEG, FNEG) \
3248 V(fsqrt, NEON_FSQRT, FSQRT) \
3249 V(frintn, NEON_FRINTN, FRINTN) \
3250 V(frinta, NEON_FRINTA, FRINTA) \
3251 V(frintp, NEON_FRINTP, FRINTP) \
3252 V(frintm, NEON_FRINTM, FRINTM) \
3253 V(frintx, NEON_FRINTX, FRINTX) \
3254 V(frintz, NEON_FRINTZ, FRINTZ) \
3255 V(frinti, NEON_FRINTI, FRINTI) \
3256 V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
3257 V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar)
3258
3259 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
3260 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3261 Instr op; \
3262 if (vd.IsScalar()) { \
3263 DCHECK(vd.Is1S() || vd.Is1D()); \
3264 op = SCA_OP; \
3265 } else { \
3266 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
3267 op = VEC_OP; \
3268 } \
3269 NEONFP2RegMisc(vd, vn, op); \
3270 }
3271 NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
3272 #undef DEFINE_ASM_FUNC
3273
3274 void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) {
3275 DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) ||
3276 (vd.Is4S() && vn.Is4H() && shift == 16) ||
3277 (vd.Is2D() && vn.Is2S() && shift == 32));
3278 USE(shift);
3279 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3280 }
3281
3282 void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) {
3283 USE(shift);
3284 DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) ||
3285 (vd.Is4S() && vn.Is8H() && shift == 16) ||
3286 (vd.Is2D() && vn.Is4S() && shift == 32));
3287 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3288 }
3289
3290 void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
3291 NEON2RegMiscOp vop, double value) {
3292 DCHECK(AreSameFormat(vd, vn));
3293 DCHECK_EQ(value, 0.0);
3294 USE(value);
3295
3296 Instr op = vop;
3297 if (vd.IsScalar()) {
3298 DCHECK(vd.Is1S() || vd.Is1D());
3299 op |= NEON_Q | NEONScalar;
3300 } else {
3301 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());
3302 }
3303
3304 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3305 }
3306
3307 void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) {
3308 NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
3309 }
3310
3311 void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) {
3312 NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
3313 }
3314
3315 void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) {
3316 NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
3317 }
3318
3319 void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) {
3320 NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
3321 }
3322
3323 void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) {
3324 NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
3325 }
3326
3327 void Assembler::frecpx(const VRegister& vd, const VRegister& vn) {
3328 DCHECK(vd.IsScalar());
3329 DCHECK(AreSameFormat(vd, vn));
3330 DCHECK(vd.Is1S() || vd.Is1D());
3331 Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
3332 }
3333
3334 void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) {
3335 DCHECK(vn.Is1S() || vn.Is1D());
3336 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3337 if (fbits == 0) {
3338 Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
3339 } else {
3340 Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
3341 Rd(rd));
3342 }
3343 }
3344
3345 void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) {
3346 DCHECK_GE(fbits, 0);
3347 if (fbits == 0) {
3348 NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
3349 } else {
3350 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3351 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
3352 }
3353 }
3354
3355 void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) {
3356 DCHECK(vn.Is1S() || vn.Is1D());
3357 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3358 if (fbits == 0) {
3359 Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
3360 } else {
3361 Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
3362 Rd(rd));
3363 }
3364 }
3365
3366 void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) {
3367 DCHECK_GE(fbits, 0);
3368 if (fbits == 0) {
3369 NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
3370 } else {
3371 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3372 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
3373 }
3374 }
3375
3376 void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
3377 Instr op) {
3378 DCHECK(AreSameFormat(vd, vn));
3379 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3380 }
3381
3382 void Assembler::NEON2RegMisc(const VRegister& vd, const VRegister& vn,
3383 NEON2RegMiscOp vop, int value) {
3384 DCHECK(AreSameFormat(vd, vn));
3385 DCHECK_EQ(value, 0);
3386 USE(value);
3387
3388 Instr format, op = vop;
3389 if (vd.IsScalar()) {
3390 op |= NEON_Q | NEONScalar;
3391 format = SFormat(vd);
3392 } else {
3393 format = VFormat(vd);
3394 }
3395
3396 Emit(format | op | Rn(vn) | Rd(vd));
3397 }
3398
3399 void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) {
3400 DCHECK(vd.IsVector() || vd.Is1D());
3401 NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
3402 }
3403
3404 void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) {
3405 DCHECK(vd.IsVector() || vd.Is1D());
3406 NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
3407 }
3408
3409 void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) {
3410 DCHECK(vd.IsVector() || vd.Is1D());
3411 NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
3412 }
3413
3414 void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) {
3415 DCHECK(vd.IsVector() || vd.Is1D());
3416 NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
3417 }
3418
3419 void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
3420 DCHECK(vd.IsVector() || vd.Is1D());
3421 NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
3422 }
3423
3424 #define NEON_3SAME_LIST(V) \
3425 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
3426 V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
3427 V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
3428 V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
3429 V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
3430 V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
3431 V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
3432 V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
3433 V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
3434 V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
3435 V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
3436 V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
3437 V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
3438 V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
3439 V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
3440 V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3441 V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3442 V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3443 V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3444 V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
3445 V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
3446 V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
3447 V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
3448 V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
3449 V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
3450 V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
3451 V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
3452 V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
3453 V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
3454 V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
3455 V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
3456 V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
3457 V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
3458 V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
3459 V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
3460 V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
3461 V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
3462 V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
3463 V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
3464 V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
3465 V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
3466 V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
3467 V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
3468 V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
3469 V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
3470 V(uqadd, NEON_UQADD, true) \
3471 V(sqadd, NEON_SQADD, true) \
3472 V(uqsub, NEON_UQSUB, true) \
3473 V(sqsub, NEON_SQSUB, true) \
3474 V(sqshl, NEON_SQSHL, true) \
3475 V(uqshl, NEON_UQSHL, true) \
3476 V(sqrshl, NEON_SQRSHL, true) \
3477 V(uqrshl, NEON_UQRSHL, true)
3478
3479 #define DEFINE_ASM_FUNC(FN, OP, AS) \
3480 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3481 const VRegister& vm) { \
3482 DCHECK(AS); \
3483 NEON3Same(vd, vn, vm, OP); \
3484 }
3485 NEON_3SAME_LIST(DEFINE_ASM_FUNC)
3486 #undef DEFINE_ASM_FUNC
3487
3488 #define NEON_FP3SAME_LIST(V) \
3489 V(fadd, NEON_FADD, FADD) \
3490 V(fsub, NEON_FSUB, FSUB) \
3491 V(fmul, NEON_FMUL, FMUL) \
3492 V(fdiv, NEON_FDIV, FDIV) \
3493 V(fmax, NEON_FMAX, FMAX) \
3494 V(fmaxnm, NEON_FMAXNM, FMAXNM) \
3495 V(fmin, NEON_FMIN, FMIN) \
3496 V(fminnm, NEON_FMINNM, FMINNM) \
3497 V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
3498 V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
3499 V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
3500 V(fabd, NEON_FABD, NEON_FABD_scalar) \
3501 V(fmla, NEON_FMLA, 0) \
3502 V(fmls, NEON_FMLS, 0) \
3503 V(facge, NEON_FACGE, NEON_FACGE_scalar) \
3504 V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
3505 V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
3506 V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
3507 V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
3508 V(faddp, NEON_FADDP, 0) \
3509 V(fmaxp, NEON_FMAXP, 0) \
3510 V(fminp, NEON_FMINP, 0) \
3511 V(fmaxnmp, NEON_FMAXNMP, 0) \
3512 V(fminnmp, NEON_FMINNMP, 0)
3513
3514 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
3515 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3516 const VRegister& vm) { \
3517 Instr op; \
3518 if ((SCA_OP != 0) && vd.IsScalar()) { \
3519 DCHECK(vd.Is1S() || vd.Is1D()); \
3520 op = SCA_OP; \
3521 } else { \
3522 DCHECK(vd.IsVector()); \
3523 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
3524 op = VEC_OP; \
3525 } \
3526 NEONFP3Same(vd, vn, vm, op); \
3527 }
3528 NEON_FP3SAME_LIST(DEFINE_ASM_FUNC)
3529 #undef DEFINE_ASM_FUNC
3530
3531 void Assembler::addp(const VRegister& vd, const VRegister& vn) {
3532 DCHECK((vd.Is1D() && vn.Is2D()));
3533 Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
3534 }
3535
3536 void Assembler::faddp(const VRegister& vd, const VRegister& vn) {
3537 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3538 Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
3539 }
3540
3541 void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) {
3542 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3543 Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
3544 }
3545
3546 void Assembler::fminp(const VRegister& vd, const VRegister& vn) {
3547 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3548 Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
3549 }
3550
3551 void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) {
3552 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3553 Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
3554 }
3555
3556 void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) {
3557 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3558 Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
3559 }
3560
3561 void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) {
3562 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
3563 }
3564
3565 void Assembler::mov(const VRegister& vd, const VRegister& vn) {
3566 DCHECK(AreSameFormat(vd, vn));
3567 if (vd.IsD()) {
3568 orr(vd.V8B(), vn.V8B(), vn.V8B());
3569 } else {
3570 DCHECK(vd.IsQ());
3571 orr(vd.V16B(), vn.V16B(), vn.V16B());
3572 }
3573 }
3574
3575 void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) {
3576 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
3577 }
3578
3579 void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
3580 const int shift_amount) {
3581 DCHECK((shift == LSL) || (shift == MSL));
3582 if (vd.Is2D() || vd.Is1D()) {
3583 DCHECK_EQ(shift_amount, 0);
3584 int imm8 = 0;
3585 for (int i = 0; i < 8; ++i) {
3586 int byte = (imm >> (i * 8)) & 0xff;
3587 DCHECK((byte == 0) || (byte == 0xff));
3588 if (byte == 0xff) {
3589 imm8 |= (1 << i);
3590 }
3591 }
3592 Instr q = vd.Is2D() ? NEON_Q : 0;
3593 Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
3594 ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
3595 } else if (shift == LSL) {
3596 NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
3597 NEONModifiedImmediate_MOVI);
3598 } else {
3599 NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
3600 NEONModifiedImmediate_MOVI);
3601 }
3602 }
3603
3604 void Assembler::mvn(const VRegister& vd, const VRegister& vn) {
3605 DCHECK(AreSameFormat(vd, vn));
3606 if (vd.IsD()) {
3607 not_(vd.V8B(), vn.V8B());
3608 } else {
3609 DCHECK(vd.IsQ());
3610 not_(vd.V16B(), vn.V16B());
3611 }
3612 }
3613
3614 void Assembler::mvni(const VRegister& vd, const int imm8, Shift shift,
3615 const int shift_amount) {
3616 DCHECK((shift == LSL) || (shift == MSL));
3617 if (shift == LSL) {
3618 NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3619 } else {
3620 NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3621 }
3622 }
3623
3624 void Assembler::NEONFPByElement(const VRegister& vd, const VRegister& vn,
3625 const VRegister& vm, int vm_index,
3626 NEONByIndexedElementOp vop) {
3627 DCHECK(AreSameFormat(vd, vn));
3628 DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
3629 (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
3630 (vd.Is1D() && vm.Is1D()));
3631 DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));
3632
3633 Instr op = vop;
3634 int index_num_bits = vm.Is1S() ? 2 : 1;
3635 if (vd.IsScalar()) {
3636 op |= NEON_Q | NEONScalar;
3637 }
3638
3639 Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
3640 Rn(vn) | Rd(vd));
3641 }
3642
3643 void Assembler::NEONByElement(const VRegister& vd, const VRegister& vn,
3644 const VRegister& vm, int vm_index,
3645 NEONByIndexedElementOp vop) {
3646 DCHECK(AreSameFormat(vd, vn));
3647 DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
3648 (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
3649 (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
3650 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3651 (vm.Is1S() && (vm_index < 4)));
3652
3653 Instr format, op = vop;
3654 int index_num_bits = vm.Is1H() ? 3 : 2;
3655 if (vd.IsScalar()) {
3656 op |= NEONScalar | NEON_Q;
3657 format = SFormat(vn);
3658 } else {
3659 format = VFormat(vn);
3660 }
3661 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3662 Rd(vd));
3663 }
3664
3665 void Assembler::NEONByElementL(const VRegister& vd, const VRegister& vn,
3666 const VRegister& vm, int vm_index,
3667 NEONByIndexedElementOp vop) {
3668 DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
3669 (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
3670 (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
3671 (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
3672 (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
3673 (vd.Is1D() && vn.Is1S() && vm.Is1S()));
3674
3675 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3676 (vm.Is1S() && (vm_index < 4)));
3677
3678 Instr format, op = vop;
3679 int index_num_bits = vm.Is1H() ? 3 : 2;
3680 if (vd.IsScalar()) {
3681 op |= NEONScalar | NEON_Q;
3682 format = SFormat(vn);
3683 } else {
3684 format = VFormat(vn);
3685 }
3686 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3687 Rd(vd));
3688 }
3689
3690 #define NEON_BYELEMENT_LIST(V) \
3691 V(mul, NEON_MUL_byelement, vn.IsVector()) \
3692 V(mla, NEON_MLA_byelement, vn.IsVector()) \
3693 V(mls, NEON_MLS_byelement, vn.IsVector()) \
3694 V(sqdmulh, NEON_SQDMULH_byelement, true) \
3695 V(sqrdmulh, NEON_SQRDMULH_byelement, true)
3696
3697 #define DEFINE_ASM_FUNC(FN, OP, AS) \
3698 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3699 const VRegister& vm, int vm_index) { \
3700 DCHECK(AS); \
3701 NEONByElement(vd, vn, vm, vm_index, OP); \
3702 }
3703 NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
3704 #undef DEFINE_ASM_FUNC
3705
3706 #define NEON_FPBYELEMENT_LIST(V) \
3707 V(fmul, NEON_FMUL_byelement) \
3708 V(fmla, NEON_FMLA_byelement) \
3709 V(fmls, NEON_FMLS_byelement) \
3710 V(fmulx, NEON_FMULX_byelement)
3711
3712 #define DEFINE_ASM_FUNC(FN, OP) \
3713 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3714 const VRegister& vm, int vm_index) { \
3715 NEONFPByElement(vd, vn, vm, vm_index, OP); \
3716 }
3717 NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
3718 #undef DEFINE_ASM_FUNC
3719
3720 #define NEON_BYELEMENT_LONG_LIST(V) \
3721 V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
3722 V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
3723 V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
3724 V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3725 V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
3726 V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
3727 V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
3728 V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
3729 V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
3730 V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
3731 V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
3732 V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3733 V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
3734 V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3735 V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
3736 V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
3737 V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
3738 V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
3739
3740 #define DEFINE_ASM_FUNC(FN, OP, AS) \
3741 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3742 const VRegister& vm, int vm_index) { \
3743 DCHECK(AS); \
3744 NEONByElementL(vd, vn, vm, vm_index, OP); \
3745 }
3746 NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
3747 #undef DEFINE_ASM_FUNC
3748
3749 void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
3750 NEON2RegMisc(vd, vn, NEON_SUQADD);
3751 }
3752
3753 void Assembler::usqadd(const VRegister& vd, const VRegister& vn) {
3754 NEON2RegMisc(vd, vn, NEON_USQADD);
3755 }
3756
3757 void Assembler::abs(const VRegister& vd, const VRegister& vn) {
3758 DCHECK(vd.IsVector() || vd.Is1D());
3759 NEON2RegMisc(vd, vn, NEON_ABS);
3760 }
3761
3762 void Assembler::sqabs(const VRegister& vd, const VRegister& vn) {
3763 NEON2RegMisc(vd, vn, NEON_SQABS);
3764 }
3765
3766 void Assembler::neg(const VRegister& vd, const VRegister& vn) {
3767 DCHECK(vd.IsVector() || vd.Is1D());
3768 NEON2RegMisc(vd, vn, NEON_NEG);
3769 }
3770
3771 void Assembler::sqneg(const VRegister& vd, const VRegister& vn) {
3772 NEON2RegMisc(vd, vn, NEON_SQNEG);
3773 }
3774
3775 void Assembler::NEONXtn(const VRegister& vd, const VRegister& vn,
3776 NEON2RegMiscOp vop) {
3777 Instr format, op = vop;
3778 if (vd.IsScalar()) {
3779 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3780 (vd.Is1S() && vn.Is1D()));
3781 op |= NEON_Q | NEONScalar;
3782 format = SFormat(vd);
3783 } else {
3784 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3785 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3786 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3787 format = VFormat(vd);
3788 }
3789 Emit(format | op | Rn(vn) | Rd(vd));
3790 }
3791
3792 void Assembler::xtn(const VRegister& vd, const VRegister& vn) {
3793 DCHECK(vd.IsVector() && vd.IsD());
3794 NEONXtn(vd, vn, NEON_XTN);
3795 }
3796
3797 void Assembler::xtn2(const VRegister& vd, const VRegister& vn) {
3798 DCHECK(vd.IsVector() && vd.IsQ());
3799 NEONXtn(vd, vn, NEON_XTN);
3800 }
3801
3802 void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) {
3803 DCHECK(vd.IsScalar() || vd.IsD());
3804 NEONXtn(vd, vn, NEON_SQXTN);
3805 }
3806
3807 void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) {
3808 DCHECK(vd.IsVector() && vd.IsQ());
3809 NEONXtn(vd, vn, NEON_SQXTN);
3810 }
3811
3812 void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) {
3813 DCHECK(vd.IsScalar() || vd.IsD());
3814 NEONXtn(vd, vn, NEON_SQXTUN);
3815 }
3816
3817 void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) {
3818 DCHECK(vd.IsVector() && vd.IsQ());
3819 NEONXtn(vd, vn, NEON_SQXTUN);
3820 }
3821
3822 void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) {
3823 DCHECK(vd.IsScalar() || vd.IsD());
3824 NEONXtn(vd, vn, NEON_UQXTN);
3825 }
3826
3827 void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) {
3828 DCHECK(vd.IsVector() && vd.IsQ());
3829 NEONXtn(vd, vn, NEON_UQXTN);
3830 }
3831
3832 // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
3833 void Assembler::not_(const VRegister& vd, const VRegister& vn) {
3834 DCHECK(AreSameFormat(vd, vn));
3835 DCHECK(vd.Is8B() || vd.Is16B());
3836 Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3837 }
3838
3839 void Assembler::rbit(const VRegister& vd, const VRegister& vn) {
3840 DCHECK(AreSameFormat(vd, vn));
3841 DCHECK(vd.Is8B() || vd.Is16B());
3842 Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3843 }
3844
3845 void Assembler::ext(const VRegister& vd, const VRegister& vn,
3846 const VRegister& vm, int index) {
3847 DCHECK(AreSameFormat(vd, vn, vm));
3848 DCHECK(vd.Is8B() || vd.Is16B());
3849 DCHECK((0 <= index) && (index < vd.LaneCount()));
3850 Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
3851 }
3852
3853 void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
3854 Instr q, scalar;
3855
3856 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
3857 // number of lanes, and T is b, h, s or d.
3858 int lane_size = vn.LaneSizeInBytes();
3859 NEONFormatField format;
3860 switch (lane_size) {
3861 case 1:
3862 format = NEON_16B;
3863 break;
3864 case 2:
3865 format = NEON_8H;
3866 break;
3867 case 4:
3868 format = NEON_4S;
3869 break;
3870 default:
3871 DCHECK_EQ(lane_size, 8);
3872 format = NEON_2D;
3873 break;
3874 }
3875
3876 if (vd.IsScalar()) {
3877 q = NEON_Q;
3878 scalar = NEONScalar;
3879 } else {
3880 DCHECK(!vd.Is1D());
3881 q = vd.IsD() ? 0 : NEON_Q;
3882 scalar = 0;
3883 }
3884 Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
3885 Rd(vd));
3886 }
2123 3887
2124 void Assembler::dcptr(Label* label) { 3888 void Assembler::dcptr(Label* label) {
2125 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 3889 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2126 if (label->is_bound()) { 3890 if (label->is_bound()) {
2127 // The label is bound, so it does not need to be updated and the internal 3891 // The label is bound, so it does not need to be updated and the internal
2128 // reference should be emitted. 3892 // reference should be emitted.
2129 // 3893 //
2130 // In this case, label->pos() returns the offset of the label from the 3894 // In this case, label->pos() returns the offset of the label from the
2131 // start of the buffer. 3895 // start of the buffer.
2132 internal_reference_positions_.push_back(pc_offset()); 3896 internal_reference_positions_.push_back(pc_offset());
(...skipping 24 matching lines...) Expand all
2157 offset >>= kInstructionSizeLog2; 3921 offset >>= kInstructionSizeLog2;
2158 DCHECK(is_int32(offset)); 3922 DCHECK(is_int32(offset));
2159 uint32_t high16 = unsigned_bitextract_32(31, 16, offset); 3923 uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
2160 uint32_t low16 = unsigned_bitextract_32(15, 0, offset); 3924 uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
2161 3925
2162 brk(high16); 3926 brk(high16);
2163 brk(low16); 3927 brk(low16);
2164 } 3928 }
2165 } 3929 }
2166 3930
2167
2168 // Note:
2169 // Below, a difference in case for the same letter indicates a 3931 // Below, a difference in case for the same letter indicates a
2170 // negated bit. 3932 // negated bit. If b is 1, then B is 0.
2171 // If b is 1, then B is 0. 3933 uint32_t Assembler::FPToImm8(double imm) {
2172 Instr Assembler::ImmFP32(float imm) {
2173 DCHECK(IsImmFP32(imm));
2174 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
2175 uint32_t bits = float_to_rawbits(imm);
2176 // bit7: a000.0000
2177 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
2178 // bit6: 0b00.0000
2179 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
2180 // bit5_to_0: 00cd.efgh
2181 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
2182
2183 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2184 }
2185
2186
2187 Instr Assembler::ImmFP64(double imm) {
2188 DCHECK(IsImmFP64(imm)); 3934 DCHECK(IsImmFP64(imm));
2189 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 3935 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2190 // 0000.0000.0000.0000.0000.0000.0000.0000 3936 // 0000.0000.0000.0000.0000.0000.0000.0000
2191 uint64_t bits = double_to_rawbits(imm); 3937 uint64_t bits = bit_cast<uint64_t>(imm);
2192 // bit7: a000.0000 3938 // bit7: a000.0000
2193 uint64_t bit7 = ((bits >> 63) & 0x1) << 7; 3939 uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
2194 // bit6: 0b00.0000 3940 // bit6: 0b00.0000
2195 uint64_t bit6 = ((bits >> 61) & 0x1) << 6; 3941 uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
2196 // bit5_to_0: 00cd.efgh 3942 // bit5_to_0: 00cd.efgh
2197 uint64_t bit5_to_0 = (bits >> 48) & 0x3f; 3943 uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
2198 3944
2199 return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset); 3945 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
2200 } 3946 }
2201 3947
3948 Instr Assembler::ImmFP(double imm) { return FPToImm8(imm) << ImmFP_offset; }
3949 Instr Assembler::ImmNEONFP(double imm) {
3950 return ImmNEONabcdefgh(FPToImm8(imm));
3951 }
2202 3952
2203 // Code generation helpers. 3953 // Code generation helpers.
2204 void Assembler::MoveWide(const Register& rd, 3954 void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
2205 uint64_t imm,
2206 int shift,
2207 MoveWideImmediateOp mov_op) { 3955 MoveWideImmediateOp mov_op) {
2208 // Ignore the top 32 bits of an immediate if we're moving to a W register. 3956 // Ignore the top 32 bits of an immediate if we're moving to a W register.
2209 if (rd.Is32Bits()) { 3957 if (rd.Is32Bits()) {
2210 // Check that the top 32 bits are zero (a positive 32-bit number) or top 3958 // Check that the top 32 bits are zero (a positive 32-bit number) or top
2211 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). 3959 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
2212 DCHECK(((imm >> kWRegSizeInBits) == 0) || 3960 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
2213 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); 3961 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
2214 imm &= kWRegMask; 3962 imm &= kWRegMask;
2215 } 3963 }
2216 3964
(...skipping 21 matching lines...) Expand all
2238 shift = 3; 3986 shift = 3;
2239 } 3987 }
2240 } 3988 }
2241 3989
2242 DCHECK(is_uint16(imm)); 3990 DCHECK(is_uint16(imm));
2243 3991
2244 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | 3992 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
2245 ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift)); 3993 ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
2246 } 3994 }
2247 3995
2248 3996 void Assembler::AddSub(const Register& rd, const Register& rn,
2249 void Assembler::AddSub(const Register& rd, 3997 const Operand& operand, FlagsUpdate S, AddSubOp op) {
2250 const Register& rn, 3998 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
2251 const Operand& operand,
2252 FlagsUpdate S,
2253 AddSubOp op) {
2254 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2255 DCHECK(!operand.NeedsRelocation(this)); 3999 DCHECK(!operand.NeedsRelocation(this));
2256 if (operand.IsImmediate()) { 4000 if (operand.IsImmediate()) {
2257 int64_t immediate = operand.ImmediateValue(); 4001 int64_t immediate = operand.ImmediateValue();
2258 DCHECK(IsImmAddSub(immediate)); 4002 DCHECK(IsImmAddSub(immediate));
2259 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); 4003 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2260 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | 4004 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
2261 ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn)); 4005 ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
2262 } else if (operand.IsShiftedRegister()) { 4006 } else if (operand.IsShiftedRegister()) {
2263 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); 4007 DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits());
2264 DCHECK(operand.shift() != ROR); 4008 DCHECK_NE(operand.shift(), ROR);
2265 4009
2266 // For instructions of the form: 4010 // For instructions of the form:
2267 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] 4011 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
2268 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] 4012 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
2269 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] 4013 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
2270 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] 4014 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
2271 // or their 64-bit register equivalents, convert the operand from shifted to 4015 // or their 64-bit register equivalents, convert the operand from shifted to
2272 // extended register mode, and emit an add/sub extended instruction. 4016 // extended register mode, and emit an add/sub extended instruction.
2273 if (rn.IsSP() || rd.IsSP()) { 4017 if (rn.IsSP() || rd.IsSP()) {
2274 DCHECK(!(rd.IsSP() && (S == SetFlags))); 4018 DCHECK(!(rd.IsSP() && (S == SetFlags)));
2275 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, 4019 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
2276 AddSubExtendedFixed | op); 4020 AddSubExtendedFixed | op);
2277 } else { 4021 } else {
2278 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); 4022 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
2279 } 4023 }
2280 } else { 4024 } else {
2281 DCHECK(operand.IsExtendedRegister()); 4025 DCHECK(operand.IsExtendedRegister());
2282 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); 4026 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
2283 } 4027 }
2284 } 4028 }
2285 4029
2286 4030 void Assembler::AddSubWithCarry(const Register& rd, const Register& rn,
2287 void Assembler::AddSubWithCarry(const Register& rd, 4031 const Operand& operand, FlagsUpdate S,
2288 const Register& rn,
2289 const Operand& operand,
2290 FlagsUpdate S,
2291 AddSubWithCarryOp op) { 4032 AddSubWithCarryOp op) {
2292 DCHECK(rd.SizeInBits() == rn.SizeInBits()); 4033 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
2293 DCHECK(rd.SizeInBits() == operand.reg().SizeInBits()); 4034 DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits());
2294 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); 4035 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2295 DCHECK(!operand.NeedsRelocation(this)); 4036 DCHECK(!operand.NeedsRelocation(this));
2296 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); 4037 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
2297 } 4038 }
2298 4039
2299
2300 void Assembler::hlt(int code) { 4040 void Assembler::hlt(int code) {
2301 DCHECK(is_uint16(code)); 4041 DCHECK(is_uint16(code));
2302 Emit(HLT | ImmException(code)); 4042 Emit(HLT | ImmException(code));
2303 } 4043 }
2304 4044
2305
2306 void Assembler::brk(int code) { 4045 void Assembler::brk(int code) {
2307 DCHECK(is_uint16(code)); 4046 DCHECK(is_uint16(code));
2308 Emit(BRK | ImmException(code)); 4047 Emit(BRK | ImmException(code));
2309 } 4048 }
2310 4049
2311
2312 void Assembler::EmitStringData(const char* string) { 4050 void Assembler::EmitStringData(const char* string) {
2313 size_t len = strlen(string) + 1; 4051 size_t len = strlen(string) + 1;
2314 DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); 4052 DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
2315 EmitData(string, static_cast<int>(len)); 4053 EmitData(string, static_cast<int>(len));
2316 // Pad with NULL characters until pc_ is aligned. 4054 // Pad with NULL characters until pc_ is aligned.
2317 const char pad[] = {'\0', '\0', '\0', '\0'}; 4055 const char pad[] = {'\0', '\0', '\0', '\0'};
2318 STATIC_ASSERT(sizeof(pad) == kInstructionSize); 4056 static_assert(sizeof(pad) == kInstructionSize,
4057 "Size of padding must match instruction size.");
2319 EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset()); 4058 EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
2320 } 4059 }
2321 4060
2322 4061
2323 void Assembler::debug(const char* message, uint32_t code, Instr params) { 4062 void Assembler::debug(const char* message, uint32_t code, Instr params) {
2324 #ifdef USE_SIMULATOR 4063 #ifdef USE_SIMULATOR
2325 // Don't generate simulator specific code if we are building a snapshot, which 4064 // Don't generate simulator specific code if we are building a snapshot, which
2326 // might be run on real hardware. 4065 // might be run on real hardware.
2327 if (!serializer_enabled()) { 4066 if (!serializer_enabled()) {
2328 // The arguments to the debug marker need to be contiguous in memory, so 4067 // The arguments to the debug marker need to be contiguous in memory, so
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
2425 } 4164 }
2426 4165
2427 4166
2428 void Assembler::DataProcessing1Source(const Register& rd, 4167 void Assembler::DataProcessing1Source(const Register& rd,
2429 const Register& rn, 4168 const Register& rn,
2430 DataProcessing1SourceOp op) { 4169 DataProcessing1SourceOp op) {
2431 DCHECK(rd.SizeInBits() == rn.SizeInBits()); 4170 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2432 Emit(SF(rn) | op | Rn(rn) | Rd(rd)); 4171 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2433 } 4172 }
2434 4173
2435 4174 void Assembler::FPDataProcessing1Source(const VRegister& vd,
2436 void Assembler::FPDataProcessing1Source(const FPRegister& fd, 4175 const VRegister& vn,
2437 const FPRegister& fn,
2438 FPDataProcessing1SourceOp op) { 4176 FPDataProcessing1SourceOp op) {
2439 Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); 4177 Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
2440 } 4178 }
2441 4179
2442 4180 void Assembler::FPDataProcessing2Source(const VRegister& fd,
2443 void Assembler::FPDataProcessing2Source(const FPRegister& fd, 4181 const VRegister& fn,
2444 const FPRegister& fn, 4182 const VRegister& fm,
2445 const FPRegister& fm,
2446 FPDataProcessing2SourceOp op) { 4183 FPDataProcessing2SourceOp op) {
2447 DCHECK(fd.SizeInBits() == fn.SizeInBits()); 4184 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2448 DCHECK(fd.SizeInBits() == fm.SizeInBits()); 4185 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2449 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); 4186 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2450 } 4187 }
2451 4188
2452 4189 void Assembler::FPDataProcessing3Source(const VRegister& fd,
2453 void Assembler::FPDataProcessing3Source(const FPRegister& fd, 4190 const VRegister& fn,
2454 const FPRegister& fn, 4191 const VRegister& fm,
2455 const FPRegister& fm, 4192 const VRegister& fa,
2456 const FPRegister& fa,
2457 FPDataProcessing3SourceOp op) { 4193 FPDataProcessing3SourceOp op) {
2458 DCHECK(AreSameSizeAndType(fd, fn, fm, fa)); 4194 DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
2459 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); 4195 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2460 } 4196 }
2461 4197
4198 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8,
4199 const int left_shift,
4200 NEONModifiedImmediateOp op) {
4201 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
4202 vd.Is4S());
4203 DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
4204 (left_shift == 24));
4205 DCHECK(is_uint8(imm8));
4206
4207 int cmode_1, cmode_2, cmode_3;
4208 if (vd.Is8B() || vd.Is16B()) {
4209 DCHECK_EQ(op, NEONModifiedImmediate_MOVI);
4210 cmode_1 = 1;
4211 cmode_2 = 1;
4212 cmode_3 = 1;
4213 } else {
4214 cmode_1 = (left_shift >> 3) & 1;
4215 cmode_2 = left_shift >> 4;
4216 cmode_3 = 0;
4217 if (vd.Is4H() || vd.Is8H()) {
4218 DCHECK((left_shift == 0) || (left_shift == 8));
4219 cmode_3 = 1;
4220 }
4221 }
4222 int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
4223
4224 Instr q = vd.IsQ() ? NEON_Q : 0;
4225
4226 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4227 }
4228
4229 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
4230 const int shift_amount,
4231 NEONModifiedImmediateOp op) {
4232 DCHECK(vd.Is2S() || vd.Is4S());
4233 DCHECK((shift_amount == 8) || (shift_amount == 16));
4234 DCHECK(is_uint8(imm8));
4235
4236 int cmode_0 = (shift_amount >> 4) & 1;
4237 int cmode = 0xc | cmode_0;
4238
4239 Instr q = vd.IsQ() ? NEON_Q : 0;
4240
4241 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4242 }
2462 4243
2463 void Assembler::EmitShift(const Register& rd, 4244 void Assembler::EmitShift(const Register& rd,
2464 const Register& rn, 4245 const Register& rn,
2465 Shift shift, 4246 Shift shift,
2466 unsigned shift_amount) { 4247 unsigned shift_amount) {
2467 switch (shift) { 4248 switch (shift) {
2468 case LSL: 4249 case LSL:
2469 lsl(rd, rn, shift_amount); 4250 lsl(rd, rn, shift_amount);
2470 break; 4251 break;
2471 case LSR: 4252 case LSR:
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 return is_uint12(immediate) || 4332 return is_uint12(immediate) ||
2552 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); 4333 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2553 } 4334 }
2554 4335
2555 void Assembler::LoadStore(const CPURegister& rt, 4336 void Assembler::LoadStore(const CPURegister& rt,
2556 const MemOperand& addr, 4337 const MemOperand& addr,
2557 LoadStoreOp op) { 4338 LoadStoreOp op) {
2558 Instr memop = op | Rt(rt) | RnSP(addr.base()); 4339 Instr memop = op | Rt(rt) | RnSP(addr.base());
2559 4340
2560 if (addr.IsImmediateOffset()) { 4341 if (addr.IsImmediateOffset()) {
2561 LSDataSize size = CalcLSDataSize(op); 4342 unsigned size = CalcLSDataSize(op);
2562 if (IsImmLSScaled(addr.offset(), size)) { 4343 if (IsImmLSScaled(addr.offset(), size)) {
2563 int offset = static_cast<int>(addr.offset()); 4344 int offset = static_cast<int>(addr.offset());
2564 // Use the scaled addressing mode. 4345 // Use the scaled addressing mode.
2565 Emit(LoadStoreUnsignedOffsetFixed | memop | 4346 Emit(LoadStoreUnsignedOffsetFixed | memop |
2566 ImmLSUnsigned(offset >> size)); 4347 ImmLSUnsigned(offset >> size));
2567 } else if (IsImmLSUnscaled(addr.offset())) { 4348 } else if (IsImmLSUnscaled(addr.offset())) {
2568 int offset = static_cast<int>(addr.offset()); 4349 int offset = static_cast<int>(addr.offset());
2569 // Use the unscaled addressing mode. 4350 // Use the unscaled addressing mode.
2570 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); 4351 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
2571 } else { 4352 } else {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2604 UNREACHABLE(); 4385 UNREACHABLE();
2605 } 4386 }
2606 } 4387 }
2607 } 4388 }
2608 4389
2609 4390
2610 bool Assembler::IsImmLSUnscaled(int64_t offset) { 4391 bool Assembler::IsImmLSUnscaled(int64_t offset) {
2611 return is_int9(offset); 4392 return is_int9(offset);
2612 } 4393 }
2613 4394
2614 4395 bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
2615 bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
2616 bool offset_is_size_multiple = (((offset >> size) << size) == offset); 4396 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2617 return offset_is_size_multiple && is_uint12(offset >> size); 4397 return offset_is_size_multiple && is_uint12(offset >> size);
2618 } 4398 }
2619 4399
2620 4400 bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
2621 bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
2622 bool offset_is_size_multiple = (((offset >> size) << size) == offset); 4401 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2623 return offset_is_size_multiple && is_int7(offset >> size); 4402 return offset_is_size_multiple && is_int7(offset >> size);
2624 } 4403 }
2625 4404
2626 4405
2627 bool Assembler::IsImmLLiteral(int64_t offset) { 4406 bool Assembler::IsImmLLiteral(int64_t offset) {
2628 int inst_size = static_cast<int>(kInstructionSizeLog2); 4407 int inst_size = static_cast<int>(kInstructionSizeLog2);
2629 bool offset_is_inst_multiple = 4408 bool offset_is_inst_multiple =
2630 (((offset >> inst_size) << inst_size) == offset); 4409 (((offset >> inst_size) << inst_size) == offset);
2631 return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width); 4410 return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
2842 4621
2843 4622
2844 bool Assembler::IsImmConditionalCompare(int64_t immediate) { 4623 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2845 return is_uint5(immediate); 4624 return is_uint5(immediate);
2846 } 4625 }
2847 4626
2848 4627
2849 bool Assembler::IsImmFP32(float imm) { 4628 bool Assembler::IsImmFP32(float imm) {
2850 // Valid values will have the form: 4629 // Valid values will have the form:
2851 // aBbb.bbbc.defg.h000.0000.0000.0000.0000 4630 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2852 uint32_t bits = float_to_rawbits(imm); 4631 uint32_t bits = bit_cast<uint32_t>(imm);
2853 // bits[19..0] are cleared. 4632 // bits[19..0] are cleared.
2854 if ((bits & 0x7ffff) != 0) { 4633 if ((bits & 0x7ffff) != 0) {
2855 return false; 4634 return false;
2856 } 4635 }
2857 4636
2858 // bits[29..25] are all set or all cleared. 4637 // bits[29..25] are all set or all cleared.
2859 uint32_t b_pattern = (bits >> 16) & 0x3e00; 4638 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2860 if (b_pattern != 0 && b_pattern != 0x3e00) { 4639 if (b_pattern != 0 && b_pattern != 0x3e00) {
2861 return false; 4640 return false;
2862 } 4641 }
2863 4642
2864 // bit[30] and bit[29] are opposite. 4643 // bit[30] and bit[29] are opposite.
2865 if (((bits ^ (bits << 1)) & 0x40000000) == 0) { 4644 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2866 return false; 4645 return false;
2867 } 4646 }
2868 4647
2869 return true; 4648 return true;
2870 } 4649 }
2871 4650
2872 4651
2873 bool Assembler::IsImmFP64(double imm) { 4652 bool Assembler::IsImmFP64(double imm) {
2874 // Valid values will have the form: 4653 // Valid values will have the form:
2875 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 4654 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2876 // 0000.0000.0000.0000.0000.0000.0000.0000 4655 // 0000.0000.0000.0000.0000.0000.0000.0000
2877 uint64_t bits = double_to_rawbits(imm); 4656 uint64_t bits = bit_cast<uint64_t>(imm);
2878 // bits[47..0] are cleared. 4657 // bits[47..0] are cleared.
2879 if ((bits & 0xffffffffffffL) != 0) { 4658 if ((bits & 0xffffffffffffL) != 0) {
2880 return false; 4659 return false;
2881 } 4660 }
2882 4661
2883 // bits[61..54] are all set or all cleared. 4662 // bits[61..54] are all set or all cleared.
2884 uint32_t b_pattern = (bits >> 48) & 0x3fc0; 4663 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2885 if (b_pattern != 0 && b_pattern != 0x3fc0) { 4664 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2886 return false; 4665 return false;
2887 } 4666 }
(...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after
3218 movk(scratch, (target_offset >> 32) & 0xFFFF, 32); 4997 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
3219 DCHECK((target_offset >> 48) == 0); 4998 DCHECK((target_offset >> 48) == 0);
3220 add(rd, rd, scratch); 4999 add(rd, rd, scratch);
3221 } 5000 }
3222 5001
3223 5002
3224 } // namespace internal 5003 } // namespace internal
3225 } // namespace v8 5004 } // namespace v8
3226 5005
3227 #endif // V8_TARGET_ARCH_ARM64 5006 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | src/arm64/assembler-arm64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698