| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 1263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1274 void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); | 1274 void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); |
| 1275 void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); | 1275 void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); |
| 1276 | 1276 |
| 1277 void vmovd(XMMRegister dst, Register src); | 1277 void vmovd(XMMRegister dst, Register src); |
| 1278 void vmovd(XMMRegister dst, const Operand& src); | 1278 void vmovd(XMMRegister dst, const Operand& src); |
| 1279 void vmovd(Register dst, XMMRegister src); | 1279 void vmovd(Register dst, XMMRegister src); |
| 1280 void vmovq(XMMRegister dst, Register src); | 1280 void vmovq(XMMRegister dst, Register src); |
| 1281 void vmovq(XMMRegister dst, const Operand& src); | 1281 void vmovq(XMMRegister dst, const Operand& src); |
| 1282 void vmovq(Register dst, XMMRegister src); | 1282 void vmovq(Register dst, XMMRegister src); |
| 1283 | 1283 |
| 1284 void vmovapd(XMMRegister dst, XMMRegister src); | |
| 1285 void vmovsd(XMMRegister dst, const Operand& src) { | 1284 void vmovsd(XMMRegister dst, const Operand& src) { |
| 1286 vsd(0x10, dst, xmm0, src); | 1285 vsd(0x10, dst, xmm0, src); |
| 1287 } | 1286 } |
| 1288 void vmovsd(XMMRegister dst, XMMRegister src) { vsd(0x10, dst, xmm0, src); } | 1287 void vmovsd(XMMRegister dst, XMMRegister src) { vsd(0x10, dst, xmm0, src); } |
| 1289 void vmovsd(const Operand& dst, XMMRegister src) { | 1288 void vmovsd(const Operand& dst, XMMRegister src) { |
| 1290 vsd(0x11, src, xmm0, dst); | 1289 vsd(0x11, src, xmm0, dst); |
| 1291 } | 1290 } |
| 1292 void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { | 1291 void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
| 1293 vsd(0x58, dst, src1, src2); | 1292 vsd(0x58, dst, src1, src2); |
| 1294 } | 1293 } |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1577 bmi2l(kF2, 0xf7, dst, src2, src1); | 1576 bmi2l(kF2, 0xf7, dst, src2, src1); |
| 1578 } | 1577 } |
| 1579 void shrxl(Register dst, const Operand& src1, Register src2) { | 1578 void shrxl(Register dst, const Operand& src1, Register src2) { |
| 1580 bmi2l(kF2, 0xf7, dst, src2, src1); | 1579 bmi2l(kF2, 0xf7, dst, src2, src1); |
| 1581 } | 1580 } |
| 1582 void rorxq(Register dst, Register src, byte imm8); | 1581 void rorxq(Register dst, Register src, byte imm8); |
| 1583 void rorxq(Register dst, const Operand& src, byte imm8); | 1582 void rorxq(Register dst, const Operand& src, byte imm8); |
| 1584 void rorxl(Register dst, Register src, byte imm8); | 1583 void rorxl(Register dst, Register src, byte imm8); |
| 1585 void rorxl(Register dst, const Operand& src, byte imm8); | 1584 void rorxl(Register dst, const Operand& src, byte imm8); |
| 1586 | 1585 |
| 1586 void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); } |
| 1587 void vmovmskpd(Register dst, XMMRegister src) { |
| 1588 XMMRegister idst = {dst.code()}; |
| 1589 vpd(0x50, idst, xmm0, src); |
| 1590 } |
| 1591 |
| 1587 #define PACKED_OP_LIST(V) \ | 1592 #define PACKED_OP_LIST(V) \ |
| 1588 V(and, 0x54) \ | 1593 V(and, 0x54) \ |
| 1589 V(xor, 0x57) | 1594 V(xor, 0x57) |
| 1590 | 1595 |
| 1591 #define AVX_PACKED_OP_DECLARE(name, opcode) \ | 1596 #define AVX_PACKED_OP_DECLARE(name, opcode) \ |
| 1592 void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ | 1597 void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ |
| 1593 vps(opcode, dst, src1, src2); \ | 1598 vps(opcode, dst, src1, src2); \ |
| 1594 } \ | 1599 } \ |
| 1595 void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \ | 1600 void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \ |
| 1596 vps(opcode, dst, src1, src2); \ | 1601 vps(opcode, dst, src1, src2); \ |
| (...skipping 560 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2157 Assembler* assembler_; | 2162 Assembler* assembler_; |
| 2158 #ifdef DEBUG | 2163 #ifdef DEBUG |
| 2159 int space_before_; | 2164 int space_before_; |
| 2160 #endif | 2165 #endif |
| 2161 }; | 2166 }; |
| 2162 | 2167 |
| 2163 } // namespace internal | 2168 } // namespace internal |
| 2164 } // namespace v8 | 2169 } // namespace v8 |
| 2165 | 2170 |
| 2166 #endif // V8_X64_ASSEMBLER_X64_H_ | 2171 #endif // V8_X64_ASSEMBLER_X64_H_ |
| OLD | NEW |