OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
47 V(X64Ror) \ | 47 V(X64Ror) \ |
48 V(X64Ror32) \ | 48 V(X64Ror32) \ |
49 V(SSEFloat64Cmp) \ | 49 V(SSEFloat64Cmp) \ |
50 V(SSEFloat64Add) \ | 50 V(SSEFloat64Add) \ |
51 V(SSEFloat64Sub) \ | 51 V(SSEFloat64Sub) \ |
52 V(SSEFloat64Mul) \ | 52 V(SSEFloat64Mul) \ |
53 V(SSEFloat64Div) \ | 53 V(SSEFloat64Div) \ |
54 V(SSEFloat64Mod) \ | 54 V(SSEFloat64Mod) \ |
55 V(SSEFloat64Sqrt) \ | 55 V(SSEFloat64Sqrt) \ |
56 V(SSEFloat64Round) \ | 56 V(SSEFloat64Round) \ |
| 57 V(SSEFloat64Max) \ |
| 58 V(SSEFloat64Min) \ |
57 V(SSECvtss2sd) \ | 59 V(SSECvtss2sd) \ |
58 V(SSECvtsd2ss) \ | 60 V(SSECvtsd2ss) \ |
59 V(SSEFloat64ToInt32) \ | 61 V(SSEFloat64ToInt32) \ |
60 V(SSEFloat64ToUint32) \ | 62 V(SSEFloat64ToUint32) \ |
61 V(SSEInt32ToFloat64) \ | 63 V(SSEInt32ToFloat64) \ |
62 V(SSEUint32ToFloat64) \ | 64 V(SSEUint32ToFloat64) \ |
63 V(SSEFloat64ExtractLowWord32) \ | 65 V(SSEFloat64ExtractLowWord32) \ |
64 V(SSEFloat64ExtractHighWord32) \ | 66 V(SSEFloat64ExtractHighWord32) \ |
65 V(SSEFloat64InsertLowWord32) \ | 67 V(SSEFloat64InsertLowWord32) \ |
66 V(SSEFloat64InsertHighWord32) \ | 68 V(SSEFloat64InsertHighWord32) \ |
67 V(SSEFloat64LoadLowWord32) \ | 69 V(SSEFloat64LoadLowWord32) \ |
68 V(AVXFloat64Add) \ | 70 V(AVXFloat64Add) \ |
69 V(AVXFloat64Sub) \ | 71 V(AVXFloat64Sub) \ |
70 V(AVXFloat64Mul) \ | 72 V(AVXFloat64Mul) \ |
71 V(AVXFloat64Div) \ | 73 V(AVXFloat64Div) \ |
| 74 V(AVXFloat64Max) \ |
| 75 V(AVXFloat64Min) \ |
72 V(X64Movsxbl) \ | 76 V(X64Movsxbl) \ |
73 V(X64Movzxbl) \ | 77 V(X64Movzxbl) \ |
74 V(X64Movb) \ | 78 V(X64Movb) \ |
75 V(X64Movsxwl) \ | 79 V(X64Movsxwl) \ |
76 V(X64Movzxwl) \ | 80 V(X64Movzxwl) \ |
77 V(X64Movw) \ | 81 V(X64Movw) \ |
78 V(X64Movl) \ | 82 V(X64Movl) \ |
79 V(X64Movsxlq) \ | 83 V(X64Movsxlq) \ |
80 V(X64Movq) \ | 84 V(X64Movq) \ |
81 V(X64Movsd) \ | 85 V(X64Movsd) \ |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
119 V(M1I) /* [ %r2*1 + K] */ \ | 123 V(M1I) /* [ %r2*1 + K] */ \ |
120 V(M2I) /* [ %r2*2 + K] */ \ | 124 V(M2I) /* [ %r2*2 + K] */ \ |
121 V(M4I) /* [ %r2*4 + K] */ \ | 125 V(M4I) /* [ %r2*4 + K] */ \ |
122 V(M8I) /* [ %r2*8 + K] */ | 126 V(M8I) /* [ %r2*8 + K] */ |
123 | 127 |
124 } // namespace compiler | 128 } // namespace compiler |
125 } // namespace internal | 129 } // namespace internal |
126 } // namespace v8 | 130 } // namespace v8 |
127 | 131 |
128 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 132 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
OLD | NEW |