| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 1363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1374 void vcvttsd2siq(Register dst, const Operand& src) { | 1374 void vcvttsd2siq(Register dst, const Operand& src) { |
| 1375 XMMRegister idst = {dst.code()}; | 1375 XMMRegister idst = {dst.code()}; |
| 1376 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1); | 1376 vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1); |
| 1377 } | 1377 } |
| 1378 void vucomisd(XMMRegister dst, XMMRegister src) { | 1378 void vucomisd(XMMRegister dst, XMMRegister src) { |
| 1379 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); | 1379 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); |
| 1380 } | 1380 } |
| 1381 void vucomisd(XMMRegister dst, const Operand& src) { | 1381 void vucomisd(XMMRegister dst, const Operand& src) { |
| 1382 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); | 1382 vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG); |
| 1383 } | 1383 } |
| 1384 void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, |
| 1385 RoundingMode mode) { |
| 1386 vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG); |
| 1387 emit(static_cast<byte>(mode) | 0x8); // Mask precision exception. |
| 1388 } |
| 1384 | 1389 |
| 1385 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) { | 1390 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) { |
| 1386 vsd(op, dst, src1, src2, kF2, k0F, kWIG); | 1391 vsd(op, dst, src1, src2, kF2, k0F, kWIG); |
| 1387 } | 1392 } |
| 1388 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) { | 1393 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) { |
| 1389 vsd(op, dst, src1, src2, kF2, k0F, kWIG); | 1394 vsd(op, dst, src1, src2, kF2, k0F, kWIG); |
| 1390 } | 1395 } |
| 1391 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, | 1396 void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, |
| 1392 SIMDPrefix pp, LeadingOpcode m, VexW w); | 1397 SIMDPrefix pp, LeadingOpcode m, VexW w); |
| 1393 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2, | 1398 void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2, |
| (...skipping 751 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2145 Assembler* assembler_; | 2150 Assembler* assembler_; |
| 2146 #ifdef DEBUG | 2151 #ifdef DEBUG |
| 2147 int space_before_; | 2152 int space_before_; |
| 2148 #endif | 2153 #endif |
| 2149 }; | 2154 }; |
| 2150 | 2155 |
| 2151 } // namespace internal | 2156 } // namespace internal |
| 2152 } // namespace v8 | 2157 } // namespace v8 |
| 2153 | 2158 |
| 2154 #endif // V8_X64_ASSEMBLER_X64_H_ | 2159 #endif // V8_X64_ASSEMBLER_X64_H_ |
| OLD | NEW |