OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 558 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
569 // One byte opcode for nop. | 569 // One byte opcode for nop. |
570 static const byte kNopByte = 0x90; | 570 static const byte kNopByte = 0x90; |
571 | 571 |
572 // One byte prefix for a short conditional jump. | 572 // One byte prefix for a short conditional jump. |
573 static const byte kJccShortPrefix = 0x70; | 573 static const byte kJccShortPrefix = 0x70; |
574 static const byte kJncShortOpcode = kJccShortPrefix | not_carry; | 574 static const byte kJncShortOpcode = kJccShortPrefix | not_carry; |
575 static const byte kJcShortOpcode = kJccShortPrefix | carry; | 575 static const byte kJcShortOpcode = kJccShortPrefix | carry; |
576 static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; | 576 static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; |
577 static const byte kJzShortOpcode = kJccShortPrefix | zero; | 577 static const byte kJzShortOpcode = kJccShortPrefix | zero; |
578 | 578 |
| 579 // VEX prefix encodings. |
| 580 enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 }; |
| 581 enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 }; |
| 582 enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 }; |
| 583 enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 }; |
579 | 584 |
580 // --------------------------------------------------------------------------- | 585 // --------------------------------------------------------------------------- |
581 // Code generation | 586 // Code generation |
582 // | 587 // |
583 // Function names correspond one-to-one to x64 instruction mnemonics. | 588 // Function names correspond one-to-one to x64 instruction mnemonics. |
584 // Unless specified otherwise, instructions operate on 64-bit operands. | 589 // Unless specified otherwise, instructions operate on 64-bit operands. |
585 // | 590 // |
586 // If we need versions of an assembly instruction that operate on different | 591 // If we need versions of an assembly instruction that operate on different |
587 // width arguments, we add a single-letter suffix specifying the width. | 592 // width arguments, we add a single-letter suffix specifying the width. |
588 // This is done for the following instructions: mov, cmp, inc, dec, | 593 // This is done for the following instructions: mov, cmp, inc, dec, |
(...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1315 vsd(0x5f, dst, src1, src2); | 1320 vsd(0x5f, dst, src1, src2); |
1316 } | 1321 } |
1317 void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { | 1322 void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
1318 vsd(0x5d, dst, src1, src2); | 1323 vsd(0x5d, dst, src1, src2); |
1319 } | 1324 } |
1320 void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) { | 1325 void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) { |
1321 vsd(0x5d, dst, src1, src2); | 1326 vsd(0x5d, dst, src1, src2); |
1322 } | 1327 } |
1323 void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) { | 1328 void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) { |
1324 XMMRegister isrc2 = {src2.code()}; | 1329 XMMRegister isrc2 = {src2.code()}; |
1325 vsd(0x2a, dst, src1, isrc2); | 1330 vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW0); |
1326 } | 1331 } |
1327 void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) { | 1332 void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) { |
1328 vsd(0x2a, dst, src1, src2); | 1333 vsd(0x2a, dst, src1, src2, kF2, k0F, kW0); |
1329 } | 1334 } |
1330 void vucomisd(XMMRegister dst, XMMRegister src); | 1335 void vcvttsd2si(Register dst, XMMRegister src) { |
1331 void vucomisd(XMMRegister dst, const Operand& src); | 1336 XMMRegister idst = {dst.code()}; |
1332 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); | 1337 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0); |
1333 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); | 1338 } |
| 1339 void vcvttsd2si(Register dst, const Operand& src) { |
| 1340 XMMRegister idst = {dst.code()}; |
| 1341 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0); |
| 1342 } |
| 1343 void vcvttsd2siq(Register dst, XMMRegister src) { |
| 1344 XMMRegister idst = {dst.code()}; |
| 1345 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1); |
| 1346 } |
| 1347 void vcvttsd2siq(Register dst, const Operand& src) { |
| 1348 XMMRegister idst = {dst.code()}; |
| 1349 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1); |
| 1350 } |
| 1351 void vucomisd(XMMRegister dst, XMMRegister src) { |
| 1352 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); |
| 1353 } |
| 1354 void vucomisd(XMMRegister dst, const Operand& src) { |
| 1355 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); |
| 1356 } |
| 1357 |
| 1358 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
| 1359 vsd(op, dst, src1, src2, kF2, k0F, kWIG); |
| 1360 } |
| 1361 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) { |
| 1362 vsd(op, dst, src1, src2, kF2, k0F, kWIG); |
| 1363 } |
| 1364 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, |
| 1365 SIMDPrefix pp, LeadingOpcode m, VexW w); |
| 1366 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2, |
| 1367 SIMDPrefix pp, LeadingOpcode m, VexW w); |
1334 | 1368 |
1335 void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { | 1369 void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
1336 vss(0x58, dst, src1, src2); | 1370 vss(0x58, dst, src1, src2); |
1337 } | 1371 } |
1338 void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) { | 1372 void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) { |
1339 vss(0x58, dst, src1, src2); | 1373 vss(0x58, dst, src1, src2); |
1340 } | 1374 } |
1341 void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { | 1375 void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
1342 vss(0x5c, dst, src1, src2); | 1376 vss(0x5c, dst, src1, src2); |
1343 } | 1377 } |
(...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1766 void emit_rex(P1 p1, P2 p2, int size) { | 1800 void emit_rex(P1 p1, P2 p2, int size) { |
1767 if (size == kInt64Size) { | 1801 if (size == kInt64Size) { |
1768 emit_rex_64(p1, p2); | 1802 emit_rex_64(p1, p2); |
1769 } else { | 1803 } else { |
1770 DCHECK(size == kInt32Size); | 1804 DCHECK(size == kInt32Size); |
1771 emit_optional_rex_32(p1, p2); | 1805 emit_optional_rex_32(p1, p2); |
1772 } | 1806 } |
1773 } | 1807 } |
1774 | 1808 |
1775 // Emit vex prefix | 1809 // Emit vex prefix |
1776 enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 }; | |
1777 enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 }; | |
1778 enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 }; | |
1779 enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 }; | |
1780 | |
1781 void emit_vex2_byte0() { emit(0xc5); } | 1810 void emit_vex2_byte0() { emit(0xc5); } |
1782 inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l, | 1811 inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l, |
1783 SIMDPrefix pp); | 1812 SIMDPrefix pp); |
1784 void emit_vex3_byte0() { emit(0xc4); } | 1813 void emit_vex3_byte0() { emit(0xc4); } |
1785 inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m); | 1814 inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m); |
1786 inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm, | 1815 inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm, |
1787 LeadingOpcode m); | 1816 LeadingOpcode m); |
1788 inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l, | 1817 inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l, |
1789 SIMDPrefix pp); | 1818 SIMDPrefix pp); |
1790 inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm, | 1819 inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm, |
(...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2128 Assembler* assembler_; | 2157 Assembler* assembler_; |
2129 #ifdef DEBUG | 2158 #ifdef DEBUG |
2130 int space_before_; | 2159 int space_before_; |
2131 #endif | 2160 #endif |
2132 }; | 2161 }; |
2133 | 2162 |
2134 } // namespace internal | 2163 } // namespace internal |
2135 } // namespace v8 | 2164 } // namespace v8 |
2136 | 2165 |
2137 #endif // V8_X64_ASSEMBLER_X64_H_ | 2166 #endif // V8_X64_ASSEMBLER_X64_H_ |
OLD | NEW |