OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
11 | 11 |
12 // ARM64-specific opcodes that specify which assembly sequence to emit. | 12 // ARM64-specific opcodes that specify which assembly sequence to emit. |
13 // Most opcodes specify a single instruction. | 13 // Most opcodes specify a single instruction. |
14 #define TARGET_ARCH_OPCODE_LIST(V) \ | 14 #define TARGET_ARCH_OPCODE_LIST(V) \ |
15 V(Arm64Add) \ | 15 V(Arm64Add) \ |
16 V(Arm64Add32) \ | 16 V(Arm64Add32) \ |
17 V(Arm64And) \ | 17 V(Arm64And) \ |
18 V(Arm64And32) \ | 18 V(Arm64And32) \ |
19 V(Arm64Cmp) \ | 19 V(Arm64Cmp) \ |
20 V(Arm64Cmp32) \ | 20 V(Arm64Cmp32) \ |
| 21 V(Arm64Cmn) \ |
| 22 V(Arm64Cmn32) \ |
21 V(Arm64Tst) \ | 23 V(Arm64Tst) \ |
22 V(Arm64Tst32) \ | 24 V(Arm64Tst32) \ |
23 V(Arm64Or) \ | 25 V(Arm64Or) \ |
24 V(Arm64Or32) \ | 26 V(Arm64Or32) \ |
25 V(Arm64Xor) \ | 27 V(Arm64Xor) \ |
26 V(Arm64Xor32) \ | 28 V(Arm64Xor32) \ |
27 V(Arm64Sub) \ | 29 V(Arm64Sub) \ |
28 V(Arm64Sub32) \ | 30 V(Arm64Sub32) \ |
29 V(Arm64Mul) \ | 31 V(Arm64Mul) \ |
30 V(Arm64Mul32) \ | 32 V(Arm64Mul32) \ |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 // MRR = [register + register] | 98 // MRR = [register + register] |
97 #define TARGET_ADDRESSING_MODE_LIST(V) \ | 99 #define TARGET_ADDRESSING_MODE_LIST(V) \ |
98 V(MRI) /* [%r0 + K] */ \ | 100 V(MRI) /* [%r0 + K] */ \ |
99 V(MRR) /* [%r0 + %r1] */ | 101 V(MRR) /* [%r0 + %r1] */ |
100 | 102 |
101 } // namespace internal | 103 } // namespace internal |
102 } // namespace compiler | 104 } // namespace compiler |
103 } // namespace v8 | 105 } // namespace v8 |
104 | 106 |
105 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 107 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
OLD | NEW |