OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 // Clients of this interface shouldn't depend on lots of compiler internals. | 10 // Clients of this interface shouldn't depend on lots of compiler internals. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
58 V(WordNotEqual) \ | 58 V(WordNotEqual) \ |
59 V(Word32Equal) \ | 59 V(Word32Equal) \ |
60 V(Word32NotEqual) \ | 60 V(Word32NotEqual) \ |
61 V(Word64Equal) \ | 61 V(Word64Equal) \ |
62 V(Word64NotEqual) | 62 V(Word64NotEqual) |
63 | 63 |
64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ | 64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ |
65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | 65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ |
66 V(Float64Add) \ | 66 V(Float64Add) \ |
67 V(Float64Sub) \ | 67 V(Float64Sub) \ |
| 68 V(Float64InsertLowWord32) \ |
| 69 V(Float64InsertHighWord32) \ |
68 V(IntPtrAdd) \ | 70 V(IntPtrAdd) \ |
69 V(IntPtrAddWithOverflow) \ | 71 V(IntPtrAddWithOverflow) \ |
70 V(IntPtrSub) \ | 72 V(IntPtrSub) \ |
71 V(IntPtrSubWithOverflow) \ | 73 V(IntPtrSubWithOverflow) \ |
72 V(Int32Add) \ | 74 V(Int32Add) \ |
73 V(Int32AddWithOverflow) \ | 75 V(Int32AddWithOverflow) \ |
74 V(Int32Sub) \ | 76 V(Int32Sub) \ |
75 V(Int32Mul) \ | 77 V(Int32Mul) \ |
76 V(WordOr) \ | 78 V(WordOr) \ |
77 V(WordAnd) \ | 79 V(WordAnd) \ |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
250 Node* context, Node* arg1, Node* arg2, | 252 Node* context, Node* arg1, Node* arg2, |
251 size_t result_size = 1); | 253 size_t result_size = 1); |
252 | 254 |
253 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, | 255 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, |
254 Node** args, size_t result_size = 1); | 256 Node** args, size_t result_size = 1); |
255 | 257 |
256 // =========================================================================== | 258 // =========================================================================== |
257 // Macros | 259 // Macros |
258 // =========================================================================== | 260 // =========================================================================== |
259 | 261 |
| 262 // Float64 operations. |
| 263 Node* Float64Floor(Node* x); |
| 264 |
260 // Tag a Word as a Smi value. | 265 // Tag a Word as a Smi value. |
261 Node* SmiTag(Node* value); | 266 Node* SmiTag(Node* value); |
262 // Untag a Smi value as a Word. | 267 // Untag a Smi value as a Word. |
263 Node* SmiUntag(Node* value); | 268 Node* SmiUntag(Node* value); |
264 | 269 |
265 // Smi conversions. | 270 // Smi conversions. |
266 Node* SmiToFloat64(Node* value); | 271 Node* SmiToFloat64(Node* value); |
267 Node* SmiToWord32(Node* value); | 272 Node* SmiToWord32(Node* value); |
268 | 273 |
269 // Smi operations. | 274 // Smi operations. |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
326 | 331 |
327 // Returns a node that is true if the given bit is set in |word32|. | 332 // Returns a node that is true if the given bit is set in |word32|. |
328 template <typename T> | 333 template <typename T> |
329 Node* BitFieldDecode(Node* word32) { | 334 Node* BitFieldDecode(Node* word32) { |
330 return BitFieldDecode(word32, T::kShift, T::kMask); | 335 return BitFieldDecode(word32, T::kShift, T::kMask); |
331 } | 336 } |
332 | 337 |
333 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); | 338 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); |
334 | 339 |
335 // Conversions. | 340 // Conversions. |
| 341 Node* ChangeFloat64ToTagged(Node* value); |
336 Node* ChangeInt32ToTagged(Node* value); | 342 Node* ChangeInt32ToTagged(Node* value); |
337 Node* TruncateTaggedToFloat64(Node* context, Node* value); | 343 Node* TruncateTaggedToFloat64(Node* context, Node* value); |
338 Node* TruncateTaggedToWord32(Node* context, Node* value); | 344 Node* TruncateTaggedToWord32(Node* context, Node* value); |
339 | 345 |
340 // Branching helpers. | 346 // Branching helpers. |
341 // TODO(danno): Can we be more cleverish wrt. edge-split? | 347 // TODO(danno): Can we be more cleverish wrt. edge-split? |
342 void BranchIf(Node* condition, Label* if_true, Label* if_false); | 348 void BranchIf(Node* condition, Label* if_true, Label* if_false); |
343 | 349 |
344 #define BRANCH_HELPER(name) \ | 350 #define BRANCH_HELPER(name) \ |
345 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ | 351 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 // Map of variables to the list of value nodes that have been added from each | 443 // Map of variables to the list of value nodes that have been added from each |
438 // merge path in their order of merging. | 444 // merge path in their order of merging. |
439 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; | 445 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; |
440 }; | 446 }; |
441 | 447 |
442 } // namespace compiler | 448 } // namespace compiler |
443 } // namespace internal | 449 } // namespace internal |
444 } // namespace v8 | 450 } // namespace v8 |
445 | 451 |
446 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | 452 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ |
OLD | NEW |