OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
47 V(Arm64Udiv) \ | 47 V(Arm64Udiv) \ |
48 V(Arm64Udiv32) \ | 48 V(Arm64Udiv32) \ |
49 V(Arm64Imod) \ | 49 V(Arm64Imod) \ |
50 V(Arm64Imod32) \ | 50 V(Arm64Imod32) \ |
51 V(Arm64Umod) \ | 51 V(Arm64Umod) \ |
52 V(Arm64Umod32) \ | 52 V(Arm64Umod32) \ |
53 V(Arm64Not) \ | 53 V(Arm64Not) \ |
54 V(Arm64Not32) \ | 54 V(Arm64Not32) \ |
55 V(Arm64Neg) \ | 55 V(Arm64Neg) \ |
56 V(Arm64Neg32) \ | 56 V(Arm64Neg32) \ |
57 V(Arm64Shl) \ | 57 V(Arm64Lsl) \ |
58 V(Arm64Shl32) \ | 58 V(Arm64Lsl32) \ |
59 V(Arm64Shr) \ | 59 V(Arm64Lsr) \ |
60 V(Arm64Shr32) \ | 60 V(Arm64Lsr32) \ |
61 V(Arm64Sar) \ | 61 V(Arm64Asr) \ |
62 V(Arm64Sar32) \ | 62 V(Arm64Asr32) \ |
63 V(Arm64Ror) \ | 63 V(Arm64Ror) \ |
64 V(Arm64Ror32) \ | 64 V(Arm64Ror32) \ |
65 V(Arm64Mov32) \ | 65 V(Arm64Mov32) \ |
66 V(Arm64Sxtw) \ | 66 V(Arm64Sxtw) \ |
| 67 V(Arm64Ubfx) \ |
| 68 V(Arm64Ubfx32) \ |
67 V(Arm64Claim) \ | 69 V(Arm64Claim) \ |
68 V(Arm64Poke) \ | 70 V(Arm64Poke) \ |
69 V(Arm64PokePairZero) \ | 71 V(Arm64PokePairZero) \ |
70 V(Arm64PokePair) \ | 72 V(Arm64PokePair) \ |
71 V(Arm64Float64Cmp) \ | 73 V(Arm64Float64Cmp) \ |
72 V(Arm64Float64Add) \ | 74 V(Arm64Float64Add) \ |
73 V(Arm64Float64Sub) \ | 75 V(Arm64Float64Sub) \ |
74 V(Arm64Float64Mul) \ | 76 V(Arm64Float64Mul) \ |
75 V(Arm64Float64Div) \ | 77 V(Arm64Float64Div) \ |
76 V(Arm64Float64Mod) \ | 78 V(Arm64Float64Mod) \ |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
113 // MRR = [register + register] | 115 // MRR = [register + register] |
114 #define TARGET_ADDRESSING_MODE_LIST(V) \ | 116 #define TARGET_ADDRESSING_MODE_LIST(V) \ |
115 V(MRI) /* [%r0 + K] */ \ | 117 V(MRI) /* [%r0 + K] */ \ |
116 V(MRR) /* [%r0 + %r1] */ | 118 V(MRR) /* [%r0 + %r1] */ |
117 | 119 |
118 } // namespace internal | 120 } // namespace internal |
119 } // namespace compiler | 121 } // namespace compiler |
120 } // namespace v8 | 122 } // namespace v8 |
121 | 123 |
122 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 124 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
OLD | NEW |