| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 5 #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
| 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 6 #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 V(X64Neg) \ | 37 V(X64Neg) \ |
| 38 V(X64Neg32) \ | 38 V(X64Neg32) \ |
| 39 V(X64Shl) \ | 39 V(X64Shl) \ |
| 40 V(X64Shl32) \ | 40 V(X64Shl32) \ |
| 41 V(X64Shr) \ | 41 V(X64Shr) \ |
| 42 V(X64Shr32) \ | 42 V(X64Shr32) \ |
| 43 V(X64Sar) \ | 43 V(X64Sar) \ |
| 44 V(X64Sar32) \ | 44 V(X64Sar32) \ |
| 45 V(X64Ror) \ | 45 V(X64Ror) \ |
| 46 V(X64Ror32) \ | 46 V(X64Ror32) \ |
| 47 V(X64Push) \ | |
| 48 V(X64PushI) \ | |
| 49 V(X64CallCodeObject) \ | |
| 50 V(X64CallAddress) \ | |
| 51 V(PopStack) \ | |
| 52 V(X64CallJSFunction) \ | |
| 53 V(SSEFloat64Cmp) \ | 47 V(SSEFloat64Cmp) \ |
| 54 V(SSEFloat64Add) \ | 48 V(SSEFloat64Add) \ |
| 55 V(SSEFloat64Sub) \ | 49 V(SSEFloat64Sub) \ |
| 56 V(SSEFloat64Mul) \ | 50 V(SSEFloat64Mul) \ |
| 57 V(SSEFloat64Div) \ | 51 V(SSEFloat64Div) \ |
| 58 V(SSEFloat64Mod) \ | 52 V(SSEFloat64Mod) \ |
| 59 V(SSEFloat64ToInt32) \ | 53 V(SSEFloat64ToInt32) \ |
| 60 V(SSEFloat64ToUint32) \ | 54 V(SSEFloat64ToUint32) \ |
| 61 V(SSEInt32ToFloat64) \ | 55 V(SSEInt32ToFloat64) \ |
| 62 V(SSEUint32ToFloat64) \ | 56 V(SSEUint32ToFloat64) \ |
| 63 V(X64Movsxbl) \ | 57 V(X64Movsxbl) \ |
| 64 V(X64Movzxbl) \ | 58 V(X64Movzxbl) \ |
| 65 V(X64Movb) \ | 59 V(X64Movb) \ |
| 66 V(X64Movsxwl) \ | 60 V(X64Movsxwl) \ |
| 67 V(X64Movzxwl) \ | 61 V(X64Movzxwl) \ |
| 68 V(X64Movw) \ | 62 V(X64Movw) \ |
| 69 V(X64Movl) \ | 63 V(X64Movl) \ |
| 70 V(X64Movsxlq) \ | 64 V(X64Movsxlq) \ |
| 71 V(X64Movq) \ | 65 V(X64Movq) \ |
| 72 V(X64Movsd) \ | 66 V(X64Movsd) \ |
| 73 V(X64Movss) \ | 67 V(X64Movss) \ |
| 68 V(X64Push) \ |
| 74 V(X64StoreWriteBarrier) | 69 V(X64StoreWriteBarrier) |
| 75 | 70 |
| 76 | 71 |
| 77 // Addressing modes represent the "shape" of inputs to an instruction. | 72 // Addressing modes represent the "shape" of inputs to an instruction. |
| 78 // Many instructions support multiple addressing modes. Addressing modes | 73 // Many instructions support multiple addressing modes. Addressing modes |
| 79 // are encoded into the InstructionCode of the instruction and tell the | 74 // are encoded into the InstructionCode of the instruction and tell the |
| 80 // code generator after register allocation which assembler method to call. | 75 // code generator after register allocation which assembler method to call. |
| 81 // | 76 // |
| 82 // We use the following local notation for addressing modes: | 77 // We use the following local notation for addressing modes: |
| 83 // | 78 // |
| (...skipping 12 matching lines...) Expand all Loading... |
| 96 V(MR1I) /* [%r1 + %r2 + K] */ \ | 91 V(MR1I) /* [%r1 + %r2 + K] */ \ |
| 97 V(MR2I) /* [%r1 + %r2*2 + K] */ \ | 92 V(MR2I) /* [%r1 + %r2*2 + K] */ \ |
| 98 V(MR4I) /* [%r1 + %r2*4 + K] */ \ | 93 V(MR4I) /* [%r1 + %r2*4 + K] */ \ |
| 99 V(MR8I) /* [%r1 + %r2*8 + K] */ | 94 V(MR8I) /* [%r1 + %r2*8 + K] */ |
| 100 | 95 |
| 101 } // namespace compiler | 96 } // namespace compiler |
| 102 } // namespace internal | 97 } // namespace internal |
| 103 } // namespace v8 | 98 } // namespace v8 |
| 104 | 99 |
| 105 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ | 100 #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
| OLD | NEW |