OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 1380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1391 shrx(dst, Operand(src1), src2); | 1391 shrx(dst, Operand(src1), src2); |
1392 } | 1392 } |
1393 void shrx(Register dst, const Operand& src1, Register src2) { | 1393 void shrx(Register dst, const Operand& src1, Register src2) { |
1394 bmi2(kF2, 0xf7, dst, src2, src1); | 1394 bmi2(kF2, 0xf7, dst, src2, src1); |
1395 } | 1395 } |
1396 void rorx(Register dst, Register src, byte imm8) { | 1396 void rorx(Register dst, Register src, byte imm8) { |
1397 rorx(dst, Operand(src), imm8); | 1397 rorx(dst, Operand(src), imm8); |
1398 } | 1398 } |
1399 void rorx(Register dst, const Operand& src, byte imm8); | 1399 void rorx(Register dst, const Operand& src, byte imm8); |
1400 | 1400 |
| 1401 #define PACKED_OP_LIST(V) \ |
| 1402 V(and, 0x54) \ |
| 1403 V(xor, 0x57) |
| 1404 |
| 1405 #define AVX_PACKED_OP_DECLARE(name, opcode) \ |
| 1406 void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ |
| 1407 vps(opcode, dst, src1, Operand(src2)); \ |
| 1408 } \ |
| 1409 void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \ |
| 1410 vps(opcode, dst, src1, src2); \ |
| 1411 } \ |
| 1412 void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ |
| 1413 vpd(opcode, dst, src1, Operand(src2)); \ |
| 1414 } \ |
| 1415 void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \ |
| 1416 vpd(opcode, dst, src1, src2); \ |
| 1417 } |
| 1418 |
| 1419 PACKED_OP_LIST(AVX_PACKED_OP_DECLARE); |
| 1420 void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); |
| 1421 void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); |
| 1422 void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); |
| 1423 void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); |
| 1424 |
1401 // Prefetch src position into cache level. | 1425 // Prefetch src position into cache level. |
1402 // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a | 1426 // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a |
1403 // non-temporal | 1427 // non-temporal |
1404 void prefetch(const Operand& src, int level); | 1428 void prefetch(const Operand& src, int level); |
1405 // TODO(lrn): Need SFENCE for movnt? | 1429 // TODO(lrn): Need SFENCE for movnt? |
1406 | 1430 |
1407 // Check the code size generated from label to here. | 1431 // Check the code size generated from label to here. |
1408 int SizeOfCodeGeneratedSince(Label* label) { | 1432 int SizeOfCodeGeneratedSince(Label* label) { |
1409 return pc_offset() - label->pos(); | 1433 return pc_offset() - label->pos(); |
1410 } | 1434 } |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1574 private: | 1598 private: |
1575 Assembler* assembler_; | 1599 Assembler* assembler_; |
1576 #ifdef DEBUG | 1600 #ifdef DEBUG |
1577 int space_before_; | 1601 int space_before_; |
1578 #endif | 1602 #endif |
1579 }; | 1603 }; |
1580 | 1604 |
1581 } } // namespace v8::internal | 1605 } } // namespace v8::internal |
1582 | 1606 |
1583 #endif // V8_IA32_ASSEMBLER_IA32_H_ | 1607 #endif // V8_IA32_ASSEMBLER_IA32_H_ |
OLD | NEW |