OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
44 V(X64Sar32) \ | 44 V(X64Sar32) \ |
45 V(X64Ror) \ | 45 V(X64Ror) \ |
46 V(X64Ror32) \ | 46 V(X64Ror32) \ |
47 V(SSEFloat64Cmp) \ | 47 V(SSEFloat64Cmp) \ |
48 V(SSEFloat64Add) \ | 48 V(SSEFloat64Add) \ |
49 V(SSEFloat64Sub) \ | 49 V(SSEFloat64Sub) \ |
50 V(SSEFloat64Mul) \ | 50 V(SSEFloat64Mul) \ |
51 V(SSEFloat64Div) \ | 51 V(SSEFloat64Div) \ |
52 V(SSEFloat64Mod) \ | 52 V(SSEFloat64Mod) \ |
53 V(SSEFloat64Sqrt) \ | 53 V(SSEFloat64Sqrt) \ |
| 54 V(SSECvtss2sd) \ |
| 55 V(SSECvtsd2ss) \ |
54 V(SSEFloat64ToInt32) \ | 56 V(SSEFloat64ToInt32) \ |
55 V(SSEFloat64ToUint32) \ | 57 V(SSEFloat64ToUint32) \ |
56 V(SSEInt32ToFloat64) \ | 58 V(SSEInt32ToFloat64) \ |
57 V(SSEUint32ToFloat64) \ | 59 V(SSEUint32ToFloat64) \ |
58 V(X64Movsxbl) \ | 60 V(X64Movsxbl) \ |
59 V(X64Movzxbl) \ | 61 V(X64Movzxbl) \ |
60 V(X64Movb) \ | 62 V(X64Movb) \ |
61 V(X64Movsxwl) \ | 63 V(X64Movsxwl) \ |
62 V(X64Movzxwl) \ | 64 V(X64Movzxwl) \ |
63 V(X64Movw) \ | 65 V(X64Movw) \ |
(...skipping 28 matching lines...) Expand all Loading... |
92 V(MR1I) /* [%r1 + %r2 + K] */ \ | 94 V(MR1I) /* [%r1 + %r2 + K] */ \ |
93 V(MR2I) /* [%r1 + %r2*2 + K] */ \ | 95 V(MR2I) /* [%r1 + %r2*2 + K] */ \ |
94 V(MR4I) /* [%r1 + %r2*4 + K] */ \ | 96 V(MR4I) /* [%r1 + %r2*4 + K] */ \ |
95 V(MR8I) /* [%r1 + %r2*8 + K] */ | 97 V(MR8I) /* [%r1 + %r2*8 + K] */ |
96 | 98 |
97 } // namespace compiler | 99 } // namespace compiler |
98 } // namespace internal | 100 } // namespace internal |
99 } // namespace v8 | 101 } // namespace v8 |
100 | 102 |
101 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 103 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
OLD | NEW |