OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
11 | 11 |
12 // X64-specific opcodes that specify which assembly sequence to emit. | 12 // X64-specific opcodes that specify which assembly sequence to emit. |
13 // Most opcodes specify a single instruction. | 13 // Most opcodes specify a single instruction. |
14 #define TARGET_ARCH_OPCODE_LIST(V) \ | 14 #define TARGET_ARCH_OPCODE_LIST(V) \ |
15 V(X64Add) \ | 15 V(X64Add) \ |
16 V(X64Add32) \ | 16 V(X64Add32) \ |
17 V(X64And) \ | 17 V(X64And) \ |
18 V(X64And32) \ | 18 V(X64And32) \ |
19 V(X64Cmp) \ | 19 V(X64Cmp) \ |
20 V(X64Cmp32) \ | 20 V(X64Cmp32) \ |
| 21 V(X64Cmp16) \ |
| 22 V(X64Cmp8) \ |
21 V(X64Test) \ | 23 V(X64Test) \ |
22 V(X64Test32) \ | 24 V(X64Test32) \ |
| 25 V(X64Test16) \ |
| 26 V(X64Test8) \ |
23 V(X64Or) \ | 27 V(X64Or) \ |
24 V(X64Or32) \ | 28 V(X64Or32) \ |
25 V(X64Xor) \ | 29 V(X64Xor) \ |
26 V(X64Xor32) \ | 30 V(X64Xor32) \ |
27 V(X64Sub) \ | 31 V(X64Sub) \ |
28 V(X64Sub32) \ | 32 V(X64Sub32) \ |
29 V(X64Imul) \ | 33 V(X64Imul) \ |
30 V(X64Imul32) \ | 34 V(X64Imul32) \ |
31 V(X64ImulHigh32) \ | 35 V(X64ImulHigh32) \ |
32 V(X64UmulHigh32) \ | 36 V(X64UmulHigh32) \ |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
132 V(X64BitcastIF) \ | 136 V(X64BitcastIF) \ |
133 V(X64BitcastLD) \ | 137 V(X64BitcastLD) \ |
134 V(X64Lea32) \ | 138 V(X64Lea32) \ |
135 V(X64Lea) \ | 139 V(X64Lea) \ |
136 V(X64Dec32) \ | 140 V(X64Dec32) \ |
137 V(X64Inc32) \ | 141 V(X64Inc32) \ |
138 V(X64Push) \ | 142 V(X64Push) \ |
139 V(X64Poke) \ | 143 V(X64Poke) \ |
140 V(X64StackCheck) | 144 V(X64StackCheck) |
141 | 145 |
142 | |
143 // Addressing modes represent the "shape" of inputs to an instruction. | 146 // Addressing modes represent the "shape" of inputs to an instruction. |
144 // Many instructions support multiple addressing modes. Addressing modes | 147 // Many instructions support multiple addressing modes. Addressing modes |
145 // are encoded into the InstructionCode of the instruction and tell the | 148 // are encoded into the InstructionCode of the instruction and tell the |
146 // code generator after register allocation which assembler method to call. | 149 // code generator after register allocation which assembler method to call. |
147 // | 150 // |
148 // We use the following local notation for addressing modes: | 151 // We use the following local notation for addressing modes: |
149 // | 152 // |
150 // M = memory operand | 153 // M = memory operand |
151 // R = base register | 154 // R = base register |
152 // N = index register * N for N in {1, 2, 4, 8} | 155 // N = index register * N for N in {1, 2, 4, 8} |
(...skipping 17 matching lines...) Expand all Loading... |
170 V(M1I) /* [ %r2*1 + K] */ \ | 173 V(M1I) /* [ %r2*1 + K] */ \ |
171 V(M2I) /* [ %r2*2 + K] */ \ | 174 V(M2I) /* [ %r2*2 + K] */ \ |
172 V(M4I) /* [ %r2*4 + K] */ \ | 175 V(M4I) /* [ %r2*4 + K] */ \ |
173 V(M8I) /* [ %r2*8 + K] */ | 176 V(M8I) /* [ %r2*8 + K] */ |
174 | 177 |
175 } // namespace compiler | 178 } // namespace compiler |
176 } // namespace internal | 179 } // namespace internal |
177 } // namespace v8 | 180 } // namespace v8 |
178 | 181 |
179 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 182 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
OLD | NEW |