OLD | NEW |
---|---|
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 // Clients of this interface shouldn't depend on lots of compiler internals. | 10 // Clients of this interface shouldn't depend on lots of compiler internals. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
58 V(WordNotEqual) \ | 58 V(WordNotEqual) \ |
59 V(Word32Equal) \ | 59 V(Word32Equal) \ |
60 V(Word32NotEqual) \ | 60 V(Word32NotEqual) \ |
61 V(Word64Equal) \ | 61 V(Word64Equal) \ |
62 V(Word64NotEqual) | 62 V(Word64NotEqual) |
63 | 63 |
64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ | 64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ |
65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | 65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ |
66 V(Float64Add) \ | 66 V(Float64Add) \ |
67 V(Float64Sub) \ | 67 V(Float64Sub) \ |
68 V(Float64InsertLowWord32) \ | |
69 V(Float64InsertHighWord32) \ | |
68 V(IntPtrAdd) \ | 70 V(IntPtrAdd) \ |
69 V(IntPtrAddWithOverflow) \ | 71 V(IntPtrAddWithOverflow) \ |
70 V(IntPtrSub) \ | 72 V(IntPtrSub) \ |
71 V(IntPtrSubWithOverflow) \ | 73 V(IntPtrSubWithOverflow) \ |
72 V(Int32Add) \ | 74 V(Int32Add) \ |
73 V(Int32AddWithOverflow) \ | 75 V(Int32AddWithOverflow) \ |
74 V(Int32Sub) \ | 76 V(Int32Sub) \ |
75 V(Int32Mul) \ | 77 V(Int32Mul) \ |
76 V(WordOr) \ | 78 V(WordOr) \ |
77 V(WordAnd) \ | 79 V(WordAnd) \ |
(...skipping 10 matching lines...) Expand all Loading... | |
88 V(Word32Sar) \ | 90 V(Word32Sar) \ |
89 V(Word32Ror) \ | 91 V(Word32Ror) \ |
90 V(Word64Or) \ | 92 V(Word64Or) \ |
91 V(Word64And) \ | 93 V(Word64And) \ |
92 V(Word64Xor) \ | 94 V(Word64Xor) \ |
93 V(Word64Shr) \ | 95 V(Word64Shr) \ |
94 V(Word64Sar) \ | 96 V(Word64Sar) \ |
95 V(Word64Ror) | 97 V(Word64Ror) |
96 | 98 |
97 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \ | 99 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \ |
100 V(Float64ExtractLowWord32) \ | |
epertoso
2016/03/28 09:55:08
Do we really need to expose these two?
Benedikt Meurer
2016/03/28 17:00:58
You are right, we don't need to.
| |
101 V(Float64ExtractHighWord32) \ | |
98 V(Float64Sqrt) \ | 102 V(Float64Sqrt) \ |
99 V(ChangeFloat64ToUint32) \ | 103 V(ChangeFloat64ToUint32) \ |
100 V(ChangeInt32ToFloat64) \ | 104 V(ChangeInt32ToFloat64) \ |
101 V(ChangeInt32ToInt64) \ | 105 V(ChangeInt32ToInt64) \ |
102 V(ChangeUint32ToFloat64) \ | 106 V(ChangeUint32ToFloat64) \ |
103 V(ChangeUint32ToUint64) | 107 V(ChangeUint32ToUint64) |
104 | 108 |
105 class CodeStubAssembler { | 109 class CodeStubAssembler { |
106 public: | 110 public: |
107 // Create with CallStub linkage. | 111 // Create with CallStub linkage. |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
250 Node* context, Node* arg1, Node* arg2, | 254 Node* context, Node* arg1, Node* arg2, |
251 size_t result_size = 1); | 255 size_t result_size = 1); |
252 | 256 |
253 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, | 257 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, |
254 Node** args, size_t result_size = 1); | 258 Node** args, size_t result_size = 1); |
255 | 259 |
256 // =========================================================================== | 260 // =========================================================================== |
257 // Macros | 261 // Macros |
258 // =========================================================================== | 262 // =========================================================================== |
259 | 263 |
264 // Float64 operations. | |
265 Node* Float64Floor(Node* x); | |
266 | |
260 // Tag a Word as a Smi value. | 267 // Tag a Word as a Smi value. |
261 Node* SmiTag(Node* value); | 268 Node* SmiTag(Node* value); |
262 // Untag a Smi value as a Word. | 269 // Untag a Smi value as a Word. |
263 Node* SmiUntag(Node* value); | 270 Node* SmiUntag(Node* value); |
264 | 271 |
265 // Smi conversions. | 272 // Smi conversions. |
266 Node* SmiToFloat64(Node* value); | 273 Node* SmiToFloat64(Node* value); |
267 Node* SmiToWord32(Node* value); | 274 Node* SmiToWord32(Node* value); |
268 | 275 |
269 // Smi operations. | 276 // Smi operations. |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
326 | 333 |
327 // Returns a node that is true if the given bit is set in |word32|. | 334 // Returns a node that is true if the given bit is set in |word32|. |
328 template <typename T> | 335 template <typename T> |
329 Node* BitFieldDecode(Node* word32) { | 336 Node* BitFieldDecode(Node* word32) { |
330 return BitFieldDecode(word32, T::kShift, T::kMask); | 337 return BitFieldDecode(word32, T::kShift, T::kMask); |
331 } | 338 } |
332 | 339 |
333 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); | 340 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); |
334 | 341 |
335 // Conversions. | 342 // Conversions. |
343 Node* ChangeFloat64ToTagged(Node* value); | |
336 Node* ChangeInt32ToTagged(Node* value); | 344 Node* ChangeInt32ToTagged(Node* value); |
337 Node* TruncateTaggedToFloat64(Node* context, Node* value); | 345 Node* TruncateTaggedToFloat64(Node* context, Node* value); |
338 Node* TruncateTaggedToWord32(Node* context, Node* value); | 346 Node* TruncateTaggedToWord32(Node* context, Node* value); |
339 | 347 |
340 // Branching helpers. | 348 // Branching helpers. |
341 // TODO(danno): Can we be more cleverish wrt. edge-split? | 349 // TODO(danno): Can we be more cleverish wrt. edge-split? |
342 void BranchIf(Node* condition, Label* if_true, Label* if_false); | 350 void BranchIf(Node* condition, Label* if_true, Label* if_false); |
343 | 351 |
344 #define BRANCH_HELPER(name) \ | 352 #define BRANCH_HELPER(name) \ |
345 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ | 353 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 // Map of variables to the list of value nodes that have been added from each | 445 // Map of variables to the list of value nodes that have been added from each |
438 // merge path in their order of merging. | 446 // merge path in their order of merging. |
439 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; | 447 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; |
440 }; | 448 }; |
441 | 449 |
442 } // namespace compiler | 450 } // namespace compiler |
443 } // namespace internal | 451 } // namespace internal |
444 } // namespace v8 | 452 } // namespace v8 |
445 | 453 |
446 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 454 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
OLD | NEW |