OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 28 matching lines...) Expand all Loading... |
39 V(X64Shl) \ | 39 V(X64Shl) \ |
40 V(X64Shl32) \ | 40 V(X64Shl32) \ |
41 V(X64Shr) \ | 41 V(X64Shr) \ |
42 V(X64Shr32) \ | 42 V(X64Shr32) \ |
43 V(X64Sar) \ | 43 V(X64Sar) \ |
44 V(X64Sar32) \ | 44 V(X64Sar32) \ |
45 V(X64Ror) \ | 45 V(X64Ror) \ |
46 V(X64Ror32) \ | 46 V(X64Ror32) \ |
47 V(X64Push) \ | 47 V(X64Push) \ |
48 V(X64PushI) \ | 48 V(X64PushI) \ |
49 V(X64Movsxlq) \ | |
50 V(X64Movl) \ | |
51 V(X64CallCodeObject) \ | 49 V(X64CallCodeObject) \ |
52 V(X64CallAddress) \ | 50 V(X64CallAddress) \ |
53 V(PopStack) \ | 51 V(PopStack) \ |
54 V(X64CallJSFunction) \ | 52 V(X64CallJSFunction) \ |
55 V(SSEFloat64Cmp) \ | 53 V(SSEFloat64Cmp) \ |
56 V(SSEFloat64Add) \ | 54 V(SSEFloat64Add) \ |
57 V(SSEFloat64Sub) \ | 55 V(SSEFloat64Sub) \ |
58 V(SSEFloat64Mul) \ | 56 V(SSEFloat64Mul) \ |
59 V(SSEFloat64Div) \ | 57 V(SSEFloat64Div) \ |
60 V(SSEFloat64Mod) \ | 58 V(SSEFloat64Mod) \ |
61 V(SSEFloat64ToInt32) \ | 59 V(SSEFloat64ToInt32) \ |
62 V(SSEFloat64ToUint32) \ | 60 V(SSEFloat64ToUint32) \ |
63 V(SSEInt32ToFloat64) \ | 61 V(SSEInt32ToFloat64) \ |
64 V(SSEUint32ToFloat64) \ | 62 V(SSEUint32ToFloat64) \ |
65 V(SSELoad) \ | 63 V(X64Movsd) \ |
66 V(SSEStore) \ | 64 V(X64Movsxbl) \ |
67 V(X64LoadWord8) \ | 65 V(X64Movzxbl) \ |
68 V(X64StoreWord8) \ | 66 V(X64Movb) \ |
69 V(X64StoreWord8I) \ | 67 V(X64Movsxwl) \ |
70 V(X64LoadWord16) \ | 68 V(X64Movzxwl) \ |
71 V(X64StoreWord16) \ | 69 V(X64Movw) \ |
72 V(X64StoreWord16I) \ | 70 V(X64Movl) \ |
73 V(X64LoadWord32) \ | 71 V(X64Movsxlq) \ |
74 V(X64StoreWord32) \ | 72 V(X64Movq) \ |
75 V(X64StoreWord32I) \ | |
76 V(X64LoadWord64) \ | |
77 V(X64StoreWord64) \ | |
78 V(X64StoreWord64I) \ | |
79 V(X64StoreWriteBarrier) | 73 V(X64StoreWriteBarrier) |
80 | 74 |
81 | 75 |
82 // Addressing modes represent the "shape" of inputs to an instruction. | 76 // Addressing modes represent the "shape" of inputs to an instruction. |
83 // Many instructions support multiple addressing modes. Addressing modes | 77 // Many instructions support multiple addressing modes. Addressing modes |
84 // are encoded into the InstructionCode of the instruction and tell the | 78 // are encoded into the InstructionCode of the instruction and tell the |
85 // code generator after register allocation which assembler method to call. | 79 // code generator after register allocation which assembler method to call. |
86 // | 80 // |
87 // We use the following local notation for addressing modes: | 81 // We use the following local notation for addressing modes: |
88 // | 82 // |
(...skipping 12 matching lines...) Expand all Loading... |
101 V(MR1I) /* [%r1 + %r2 + K] */ \ | 95 V(MR1I) /* [%r1 + %r2 + K] */ \ |
102 V(MR2I) /* [%r1 + %r2*2 + K] */ \ | 96 V(MR2I) /* [%r1 + %r2*2 + K] */ \ |
103 V(MR4I) /* [%r1 + %r2*4 + K] */ \ | 97 V(MR4I) /* [%r1 + %r2*4 + K] */ \ |
104 V(MR8I) /* [%r1 + %r2*8 + K] */ | 98 V(MR8I) /* [%r1 + %r2*8 + K] */ |
105 | 99 |
106 } // namespace compiler | 100 } // namespace compiler |
107 } // namespace internal | 101 } // namespace internal |
108 } // namespace v8 | 102 } // namespace v8 |
109 | 103 |
110 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 104 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
OLD | NEW |