| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 5 #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
| 6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 6 #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 V(Arm64Neg) \ | 41 V(Arm64Neg) \ |
| 42 V(Arm64Neg32) \ | 42 V(Arm64Neg32) \ |
| 43 V(Arm64Shl) \ | 43 V(Arm64Shl) \ |
| 44 V(Arm64Shl32) \ | 44 V(Arm64Shl32) \ |
| 45 V(Arm64Shr) \ | 45 V(Arm64Shr) \ |
| 46 V(Arm64Shr32) \ | 46 V(Arm64Shr32) \ |
| 47 V(Arm64Sar) \ | 47 V(Arm64Sar) \ |
| 48 V(Arm64Sar32) \ | 48 V(Arm64Sar32) \ |
| 49 V(Arm64Ror) \ | 49 V(Arm64Ror) \ |
| 50 V(Arm64Ror32) \ | 50 V(Arm64Ror32) \ |
| 51 V(Arm64Mov32) \ |
| 52 V(Arm64Sxtw) \ |
| 51 V(Arm64CallCodeObject) \ | 53 V(Arm64CallCodeObject) \ |
| 52 V(Arm64CallJSFunction) \ | 54 V(Arm64CallJSFunction) \ |
| 53 V(Arm64CallAddress) \ | 55 V(Arm64CallAddress) \ |
| 54 V(Arm64Claim) \ | 56 V(Arm64Claim) \ |
| 55 V(Arm64Poke) \ | 57 V(Arm64Poke) \ |
| 56 V(Arm64PokePairZero) \ | 58 V(Arm64PokePairZero) \ |
| 57 V(Arm64PokePair) \ | 59 V(Arm64PokePair) \ |
| 58 V(Arm64Drop) \ | 60 V(Arm64Drop) \ |
| 59 V(Arm64Float64Cmp) \ | 61 V(Arm64Float64Cmp) \ |
| 60 V(Arm64Float64Add) \ | 62 V(Arm64Float64Add) \ |
| 61 V(Arm64Float64Sub) \ | 63 V(Arm64Float64Sub) \ |
| 62 V(Arm64Float64Mul) \ | 64 V(Arm64Float64Mul) \ |
| 63 V(Arm64Float64Div) \ | 65 V(Arm64Float64Div) \ |
| 64 V(Arm64Float64Mod) \ | 66 V(Arm64Float64Mod) \ |
| 65 V(Arm64Int32ToInt64) \ | |
| 66 V(Arm64Int64ToInt32) \ | |
| 67 V(Arm64Float64ToInt32) \ | 67 V(Arm64Float64ToInt32) \ |
| 68 V(Arm64Float64ToUint32) \ | 68 V(Arm64Float64ToUint32) \ |
| 69 V(Arm64Int32ToFloat64) \ | 69 V(Arm64Int32ToFloat64) \ |
| 70 V(Arm64Uint32ToFloat64) \ | 70 V(Arm64Uint32ToFloat64) \ |
| 71 V(Arm64Float64Load) \ | 71 V(Arm64Float64Load) \ |
| 72 V(Arm64Float64Store) \ | 72 V(Arm64Float64Store) \ |
| 73 V(Arm64LoadWord8) \ | 73 V(Arm64LoadWord8) \ |
| 74 V(Arm64StoreWord8) \ | 74 V(Arm64StoreWord8) \ |
| 75 V(Arm64LoadWord16) \ | 75 V(Arm64LoadWord16) \ |
| 76 V(Arm64StoreWord16) \ | 76 V(Arm64StoreWord16) \ |
| (...skipping 19 matching lines...) Expand all Loading... |
| 96 // MRR = [register + register] | 96 // MRR = [register + register] |
| 97 #define TARGET_ADDRESSING_MODE_LIST(V) \ | 97 #define TARGET_ADDRESSING_MODE_LIST(V) \ |
| 98 V(MRI) /* [%r0 + K] */ \ | 98 V(MRI) /* [%r0 + K] */ \ |
| 99 V(MRR) /* [%r0 + %r1] */ | 99 V(MRR) /* [%r0 + %r1] */ |
| 100 | 100 |
| 101 } // namespace internal | 101 } // namespace internal |
| 102 } // namespace compiler | 102 } // namespace compiler |
| 103 } // namespace v8 | 103 } // namespace v8 |
| 104 | 104 |
| 105 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ | 105 #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
| OLD | NEW |