OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_CODE_ASSEMBLER_H_ | 5 #ifndef V8_COMPILER_CODE_ASSEMBLER_H_ |
6 #define V8_COMPILER_CODE_ASSEMBLER_H_ | 6 #define V8_COMPILER_CODE_ASSEMBLER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 // Clients of this interface shouldn't depend on lots of compiler internals. | 10 // Clients of this interface shouldn't depend on lots of compiler internals. |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 V(Word64Equal) \ | 65 V(Word64Equal) \ |
66 V(Word64NotEqual) | 66 V(Word64NotEqual) |
67 | 67 |
68 #define CODE_ASSEMBLER_BINARY_OP_LIST(V) \ | 68 #define CODE_ASSEMBLER_BINARY_OP_LIST(V) \ |
69 CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | 69 CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ |
70 V(Float64Add) \ | 70 V(Float64Add) \ |
71 V(Float64Sub) \ | 71 V(Float64Sub) \ |
72 V(Float64Mul) \ | 72 V(Float64Mul) \ |
73 V(Float64Div) \ | 73 V(Float64Div) \ |
74 V(Float64Mod) \ | 74 V(Float64Mod) \ |
| 75 V(Float64Atan2) \ |
75 V(Float64InsertLowWord32) \ | 76 V(Float64InsertLowWord32) \ |
76 V(Float64InsertHighWord32) \ | 77 V(Float64InsertHighWord32) \ |
77 V(IntPtrAdd) \ | 78 V(IntPtrAdd) \ |
78 V(IntPtrAddWithOverflow) \ | 79 V(IntPtrAddWithOverflow) \ |
79 V(IntPtrSub) \ | 80 V(IntPtrSub) \ |
80 V(IntPtrSubWithOverflow) \ | 81 V(IntPtrSubWithOverflow) \ |
81 V(IntPtrMul) \ | 82 V(IntPtrMul) \ |
82 V(Int32Add) \ | 83 V(Int32Add) \ |
83 V(Int32AddWithOverflow) \ | 84 V(Int32AddWithOverflow) \ |
84 V(Int32Sub) \ | 85 V(Int32Sub) \ |
(...skipping 14 matching lines...) Expand all Loading... |
99 V(Word32Sar) \ | 100 V(Word32Sar) \ |
100 V(Word32Ror) \ | 101 V(Word32Ror) \ |
101 V(Word64Or) \ | 102 V(Word64Or) \ |
102 V(Word64And) \ | 103 V(Word64And) \ |
103 V(Word64Xor) \ | 104 V(Word64Xor) \ |
104 V(Word64Shr) \ | 105 V(Word64Shr) \ |
105 V(Word64Sar) \ | 106 V(Word64Sar) \ |
106 V(Word64Ror) | 107 V(Word64Ror) |
107 | 108 |
108 #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \ | 109 #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \ |
| 110 V(Float64Atan) \ |
109 V(Float64Log) \ | 111 V(Float64Log) \ |
110 V(Float64Log1p) \ | 112 V(Float64Log1p) \ |
111 V(Float64Neg) \ | 113 V(Float64Neg) \ |
112 V(Float64Sqrt) \ | 114 V(Float64Sqrt) \ |
113 V(Float64ExtractLowWord32) \ | 115 V(Float64ExtractLowWord32) \ |
114 V(Float64ExtractHighWord32) \ | 116 V(Float64ExtractHighWord32) \ |
115 V(BitcastWordToTagged) \ | 117 V(BitcastWordToTagged) \ |
116 V(TruncateFloat64ToWord32) \ | 118 V(TruncateFloat64ToWord32) \ |
117 V(TruncateInt64ToInt32) \ | 119 V(TruncateInt64ToInt32) \ |
118 V(ChangeFloat64ToUint32) \ | 120 V(ChangeFloat64ToUint32) \ |
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
409 // Map of variables to the list of value nodes that have been added from each | 411 // Map of variables to the list of value nodes that have been added from each |
410 // merge path in their order of merging. | 412 // merge path in their order of merging. |
411 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; | 413 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; |
412 }; | 414 }; |
413 | 415 |
414 } // namespace compiler | 416 } // namespace compiler |
415 } // namespace internal | 417 } // namespace internal |
416 } // namespace v8 | 418 } // namespace v8 |
417 | 419 |
418 #endif // V8_COMPILER_CODE_ASSEMBLER_H_ | 420 #endif // V8_COMPILER_CODE_ASSEMBLER_H_ |
OLD | NEW |