OLD | NEW |
| (Empty) |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
7 | |
8 #include <map> | |
9 | |
10 // Clients of this interface shouldn't depend on lots of compiler internals. | |
11 // Do not include anything from src/compiler here! | |
12 #include "src/allocation.h" | |
13 #include "src/builtins.h" | |
14 #include "src/heap/heap.h" | |
15 #include "src/machine-type.h" | |
16 #include "src/runtime/runtime.h" | |
17 #include "src/zone-containers.h" | |
18 | |
19 namespace v8 { | |
20 namespace internal { | |
21 | |
22 class Callable; | |
23 class CallInterfaceDescriptor; | |
24 class Isolate; | |
25 class Factory; | |
26 class Zone; | |
27 | |
28 namespace compiler { | |
29 | |
30 class CallDescriptor; | |
31 class Graph; | |
32 class Node; | |
33 class Operator; | |
34 class RawMachineAssembler; | |
35 class RawMachineLabel; | |
36 class Schedule; | |
37 | |
38 #define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | |
39 V(Float32Equal) \ | |
40 V(Float32LessThan) \ | |
41 V(Float32LessThanOrEqual) \ | |
42 V(Float32GreaterThan) \ | |
43 V(Float32GreaterThanOrEqual) \ | |
44 V(Float64Equal) \ | |
45 V(Float64LessThan) \ | |
46 V(Float64LessThanOrEqual) \ | |
47 V(Float64GreaterThan) \ | |
48 V(Float64GreaterThanOrEqual) \ | |
49 V(Int32GreaterThan) \ | |
50 V(Int32GreaterThanOrEqual) \ | |
51 V(Int32LessThan) \ | |
52 V(Int32LessThanOrEqual) \ | |
53 V(IntPtrLessThan) \ | |
54 V(IntPtrLessThanOrEqual) \ | |
55 V(Uint32LessThan) \ | |
56 V(UintPtrGreaterThanOrEqual) \ | |
57 V(WordEqual) \ | |
58 V(WordNotEqual) \ | |
59 V(Word32Equal) \ | |
60 V(Word32NotEqual) \ | |
61 V(Word64Equal) \ | |
62 V(Word64NotEqual) | |
63 | |
64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ | |
65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | |
66 V(Float64Add) \ | |
67 V(Float64Sub) \ | |
68 V(Float64Mul) \ | |
69 V(Float64Div) \ | |
70 V(Float64Mod) \ | |
71 V(Float64InsertLowWord32) \ | |
72 V(Float64InsertHighWord32) \ | |
73 V(IntPtrAdd) \ | |
74 V(IntPtrAddWithOverflow) \ | |
75 V(IntPtrSub) \ | |
76 V(IntPtrSubWithOverflow) \ | |
77 V(IntPtrMul) \ | |
78 V(Int32Add) \ | |
79 V(Int32AddWithOverflow) \ | |
80 V(Int32Sub) \ | |
81 V(Int32Mul) \ | |
82 V(Int32Div) \ | |
83 V(WordOr) \ | |
84 V(WordAnd) \ | |
85 V(WordXor) \ | |
86 V(WordShl) \ | |
87 V(WordShr) \ | |
88 V(WordSar) \ | |
89 V(WordRor) \ | |
90 V(Word32Or) \ | |
91 V(Word32And) \ | |
92 V(Word32Xor) \ | |
93 V(Word32Shl) \ | |
94 V(Word32Shr) \ | |
95 V(Word32Sar) \ | |
96 V(Word32Ror) \ | |
97 V(Word64Or) \ | |
98 V(Word64And) \ | |
99 V(Word64Xor) \ | |
100 V(Word64Shr) \ | |
101 V(Word64Sar) \ | |
102 V(Word64Ror) | |
103 | |
104 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \ | |
105 V(Float64Neg) \ | |
106 V(Float64Sqrt) \ | |
107 V(ChangeFloat64ToUint32) \ | |
108 V(ChangeInt32ToFloat64) \ | |
109 V(ChangeInt32ToInt64) \ | |
110 V(ChangeUint32ToFloat64) \ | |
111 V(ChangeUint32ToUint64) \ | |
112 V(Word32Clz) | |
113 | |
114 class CodeStubAssembler { | |
115 public: | |
116 // Create with CallStub linkage. | |
117 // |result_size| specifies the number of results returned by the stub. | |
118 // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor. | |
119 CodeStubAssembler(Isolate* isolate, Zone* zone, | |
120 const CallInterfaceDescriptor& descriptor, | |
121 Code::Flags flags, const char* name, | |
122 size_t result_size = 1); | |
123 | |
124 // Create with JSCall linkage. | |
125 CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count, | |
126 Code::Flags flags, const char* name); | |
127 | |
128 virtual ~CodeStubAssembler(); | |
129 | |
130 Handle<Code> GenerateCode(); | |
131 | |
132 class Label; | |
133 class Variable { | |
134 public: | |
135 explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep); | |
136 void Bind(Node* value); | |
137 Node* value() const; | |
138 MachineRepresentation rep() const; | |
139 bool IsBound() const; | |
140 | |
141 private: | |
142 friend class CodeStubAssembler; | |
143 class Impl; | |
144 Impl* impl_; | |
145 }; | |
146 | |
147 enum AllocationFlag : uint8_t { | |
148 kNone = 0, | |
149 kDoubleAlignment = 1, | |
150 kPretenured = 1 << 1 | |
151 }; | |
152 | |
153 typedef base::Flags<AllocationFlag> AllocationFlags; | |
154 | |
155 // =========================================================================== | |
156 // Base Assembler | |
157 // =========================================================================== | |
158 | |
159 // Constants. | |
160 Node* Int32Constant(int value); | |
161 Node* IntPtrConstant(intptr_t value); | |
162 Node* NumberConstant(double value); | |
163 Node* SmiConstant(Smi* value); | |
164 Node* HeapConstant(Handle<HeapObject> object); | |
165 Node* BooleanConstant(bool value); | |
166 Node* ExternalConstant(ExternalReference address); | |
167 Node* Float64Constant(double value); | |
168 Node* BooleanMapConstant(); | |
169 Node* EmptyStringConstant(); | |
170 Node* HeapNumberMapConstant(); | |
171 Node* NaNConstant(); | |
172 Node* NoContextConstant(); | |
173 Node* NullConstant(); | |
174 Node* UndefinedConstant(); | |
175 | |
176 Node* Parameter(int value); | |
177 void Return(Node* value); | |
178 | |
179 void Bind(Label* label); | |
180 void Goto(Label* label); | |
181 void GotoIf(Node* condition, Label* true_label); | |
182 void GotoUnless(Node* condition, Label* false_label); | |
183 void Branch(Node* condition, Label* true_label, Label* false_label); | |
184 | |
185 void Switch(Node* index, Label* default_label, int32_t* case_values, | |
186 Label** case_labels, size_t case_count); | |
187 | |
188 // Access to the frame pointer | |
189 Node* LoadFramePointer(); | |
190 Node* LoadParentFramePointer(); | |
191 | |
192 // Access to the stack pointer | |
193 Node* LoadStackPointer(); | |
194 | |
195 // Load raw memory location. | |
196 Node* Load(MachineType rep, Node* base); | |
197 Node* Load(MachineType rep, Node* base, Node* index); | |
198 | |
199 // Store value to raw memory location. | |
200 Node* Store(MachineRepresentation rep, Node* base, Node* value); | |
201 Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value); | |
202 Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value); | |
203 Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index, | |
204 Node* value); | |
205 | |
206 // Basic arithmetic operations. | |
207 #define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b); | |
208 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP) | |
209 #undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP | |
210 | |
211 Node* WordShl(Node* value, int shift); | |
212 | |
213 // Unary | |
214 #define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a); | |
215 CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP) | |
216 #undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP | |
217 | |
218 // Projections | |
219 Node* Projection(int index, Node* value); | |
220 | |
221 // Calls | |
222 Node* CallRuntime(Runtime::FunctionId function_id, Node* context); | |
223 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1); | |
224 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
225 Node* arg2); | |
226 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
227 Node* arg2, Node* arg3); | |
228 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
229 Node* arg2, Node* arg3, Node* arg4); | |
230 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
231 Node* arg2, Node* arg3, Node* arg4, Node* arg5); | |
232 | |
233 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context); | |
234 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
235 Node* arg1); | |
236 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
237 Node* arg1, Node* arg2); | |
238 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
239 Node* arg1, Node* arg2, Node* arg3); | |
240 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
241 Node* arg1, Node* arg2, Node* arg3, Node* arg4); | |
242 | |
243 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
244 size_t result_size = 1); | |
245 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
246 Node* arg2, size_t result_size = 1); | |
247 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
248 Node* arg2, Node* arg3, size_t result_size = 1); | |
249 | |
250 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
251 Node* context, Node* arg1, size_t result_size = 1); | |
252 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
253 Node* context, Node* arg1, Node* arg2, size_t result_size = 1); | |
254 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
255 Node* context, Node* arg1, Node* arg2, Node* arg3, | |
256 size_t result_size = 1); | |
257 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
258 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4, | |
259 size_t result_size = 1); | |
260 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
261 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4, | |
262 Node* arg5, size_t result_size = 1); | |
263 | |
264 Node* TailCallStub(Callable const& callable, Node* context, Node* arg1, | |
265 Node* arg2, size_t result_size = 1); | |
266 Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
267 Node* context, Node* arg1, Node* arg2, | |
268 size_t result_size = 1); | |
269 | |
270 Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, | |
271 Node* code_target_address, Node** args); | |
272 | |
273 // =========================================================================== | |
274 // Macros | |
275 // =========================================================================== | |
276 | |
277 // Float64 operations. | |
278 Node* Float64Ceil(Node* x); | |
279 Node* Float64Floor(Node* x); | |
280 Node* Float64Round(Node* x); | |
281 Node* Float64Trunc(Node* x); | |
282 | |
283 // Tag a Word as a Smi value. | |
284 Node* SmiTag(Node* value); | |
285 // Untag a Smi value as a Word. | |
286 Node* SmiUntag(Node* value); | |
287 Node* SmiToWord(Node* value) { return SmiUntag(value); } | |
288 | |
289 // Smi conversions. | |
290 Node* SmiToFloat64(Node* value); | |
291 Node* SmiFromWord32(Node* value); | |
292 Node* SmiToWord32(Node* value); | |
293 | |
294 // Smi operations. | |
295 Node* SmiAdd(Node* a, Node* b); | |
296 Node* SmiAddWithOverflow(Node* a, Node* b); | |
297 Node* SmiSub(Node* a, Node* b); | |
298 Node* SmiSubWithOverflow(Node* a, Node* b); | |
299 Node* SmiEqual(Node* a, Node* b); | |
300 Node* SmiAboveOrEqual(Node* a, Node* b); | |
301 Node* SmiLessThan(Node* a, Node* b); | |
302 Node* SmiLessThanOrEqual(Node* a, Node* b); | |
303 Node* SmiMin(Node* a, Node* b); | |
304 | |
305 // Load a value from the root array. | |
306 Node* LoadRoot(Heap::RootListIndex root_index); | |
307 | |
308 // Check a value for smi-ness | |
309 Node* WordIsSmi(Node* a); | |
310 | |
311 // Check that the value is a positive smi. | |
312 Node* WordIsPositiveSmi(Node* a); | |
313 | |
314 // Load an object pointer from a buffer that isn't in the heap. | |
315 Node* LoadBufferObject(Node* buffer, int offset, | |
316 MachineType rep = MachineType::AnyTagged()); | |
317 // Load a field from an object on the heap. | |
318 Node* LoadObjectField(Node* object, int offset, | |
319 MachineType rep = MachineType::AnyTagged()); | |
320 // Store a field to an object on the heap. | |
321 Node* StoreObjectFieldNoWriteBarrier( | |
322 Node* object, int offset, Node* value, | |
323 MachineRepresentation rep = MachineRepresentation::kTagged); | |
324 // Load the floating point value of a HeapNumber. | |
325 Node* LoadHeapNumberValue(Node* object); | |
326 // Store the floating point value of a HeapNumber. | |
327 Node* StoreHeapNumberValue(Node* object, Node* value); | |
328 // Truncate the floating point value of a HeapNumber to an Int32. | |
329 Node* TruncateHeapNumberValueToWord32(Node* object); | |
330 // Load the bit field of a Map. | |
331 Node* LoadMapBitField(Node* map); | |
332 // Load bit field 2 of a map. | |
333 Node* LoadMapBitField2(Node* map); | |
334 // Load bit field 3 of a map. | |
335 Node* LoadMapBitField3(Node* map); | |
336 // Load the instance type of a map. | |
337 Node* LoadMapInstanceType(Node* map); | |
338 // Load the instance descriptors of a map. | |
339 Node* LoadMapDescriptors(Node* map); | |
340 | |
341 // Load the hash field of a name. | |
342 Node* LoadNameHash(Node* name); | |
343 // Load the instance size of a Map. | |
344 Node* LoadMapInstanceSize(Node* map); | |
345 | |
346 // Load an array element from a FixedArray. | |
347 Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index, | |
348 int additional_offset = 0); | |
349 Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index, | |
350 int additional_offset = 0); | |
351 Node* LoadFixedArrayElementConstantIndex(Node* object, int index); | |
352 | |
353 // Allocate an object of the given size. | |
354 Node* Allocate(int size, AllocationFlags flags = kNone); | |
355 Node* InnerAllocate(Node* previous, int offset); | |
356 // Allocate a HeapNumber without initializing its value. | |
357 Node* AllocateHeapNumber(); | |
358 // Allocate a HeapNumber with a specific value. | |
359 Node* AllocateHeapNumberWithValue(Node* value); | |
360 // Allocate a SeqOneByteString with the given length. | |
361 Node* AllocateSeqOneByteString(int length); | |
362 // Allocate a SeqTwoByteString with the given length. | |
363 Node* AllocateSeqTwoByteString(int length); | |
364 | |
365 // Store an array element to a FixedArray. | |
366 Node* StoreFixedArrayElementInt32Index(Node* object, Node* index, | |
367 Node* value); | |
368 Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index, | |
369 Node* value); | |
370 // Load the Map of an HeapObject. | |
371 Node* LoadMap(Node* object); | |
372 // Store the Map of an HeapObject. | |
373 Node* StoreMapNoWriteBarrier(Node* object, Node* map); | |
374 // Load the instance type of an HeapObject. | |
375 Node* LoadInstanceType(Node* object); | |
376 | |
377 // Load the elements backing store of a JSObject. | |
378 Node* LoadElements(Node* object); | |
379 // Load the length of a fixed array base instance. | |
380 Node* LoadFixedArrayBaseLength(Node* array); | |
381 | |
382 // Returns a node that is true if the given bit is set in |word32|. | |
383 template <typename T> | |
384 Node* BitFieldDecode(Node* word32) { | |
385 return BitFieldDecode(word32, T::kShift, T::kMask); | |
386 } | |
387 | |
388 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); | |
389 | |
390 // Conversions. | |
391 Node* ChangeFloat64ToTagged(Node* value); | |
392 Node* ChangeInt32ToTagged(Node* value); | |
393 Node* ChangeUint32ToTagged(Node* value); | |
394 Node* TruncateTaggedToFloat64(Node* context, Node* value); | |
395 Node* TruncateTaggedToWord32(Node* context, Node* value); | |
396 // Truncate to int32 using JavaScript truncation mode. | |
397 Node* TruncateFloat64ToInt32(Node* value); | |
398 | |
399 // Type conversions. | |
400 // Throws a TypeError for {method_name} if {value} is not coercible to Object, | |
401 // or returns the {value} converted to a String otherwise. | |
402 Node* ToThisString(Node* context, Node* value, char const* method_name); | |
403 | |
404 // String helpers. | |
405 // Load a character from a String (might flatten a ConsString). | |
406 Node* StringCharCodeAt(Node* string, Node* smi_index); | |
407 // Return the single character string with only {code}. | |
408 Node* StringFromCharCode(Node* code); | |
409 | |
410 // Branching helpers. | |
411 // TODO(danno): Can we be more cleverish wrt. edge-split? | |
412 void BranchIf(Node* condition, Label* if_true, Label* if_false); | |
413 | |
414 #define BRANCH_HELPER(name) \ | |
415 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ | |
416 BranchIf(name(a, b), if_true, if_false); \ | |
417 } | |
418 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER) | |
419 #undef BRANCH_HELPER | |
420 | |
421 void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) { | |
422 BranchIf(SmiLessThan(a, b), if_true, if_false); | |
423 } | |
424 | |
425 void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true, | |
426 Label* if_false) { | |
427 BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false); | |
428 } | |
429 | |
430 void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { | |
431 BranchIfFloat64Equal(value, value, if_false, if_true); | |
432 } | |
433 | |
434 // Helpers which delegate to RawMachineAssembler. | |
435 Factory* factory() const; | |
436 Isolate* isolate() const; | |
437 Zone* zone() const; | |
438 | |
439 protected: | |
440 // Protected helpers which delegate to RawMachineAssembler. | |
441 Graph* graph() const; | |
442 | |
443 // Enables subclasses to perform operations before and after a call. | |
444 virtual void CallPrologue(); | |
445 virtual void CallEpilogue(); | |
446 | |
447 private: | |
448 friend class CodeStubAssemblerTester; | |
449 | |
450 CodeStubAssembler(Isolate* isolate, Zone* zone, | |
451 CallDescriptor* call_descriptor, Code::Flags flags, | |
452 const char* name); | |
453 | |
454 Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args); | |
455 Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args); | |
456 | |
457 Node* SmiShiftBitsConstant(); | |
458 | |
459 Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags, | |
460 Node* top_address, Node* limit_address); | |
461 Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, | |
462 Node* top_adddress, Node* limit_address); | |
463 | |
464 base::SmartPointer<RawMachineAssembler> raw_assembler_; | |
465 Code::Flags flags_; | |
466 const char* name_; | |
467 bool code_generated_; | |
468 ZoneVector<Variable::Impl*> variables_; | |
469 | |
470 DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler); | |
471 }; | |
472 | |
473 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags); | |
474 | |
475 class CodeStubAssembler::Label { | |
476 public: | |
477 enum Type { kDeferred, kNonDeferred }; | |
478 | |
479 explicit Label(CodeStubAssembler* assembler, | |
480 CodeStubAssembler::Label::Type type = | |
481 CodeStubAssembler::Label::kNonDeferred) | |
482 : CodeStubAssembler::Label(assembler, 0, nullptr, type) {} | |
483 Label(CodeStubAssembler* assembler, | |
484 CodeStubAssembler::Variable* merged_variable, | |
485 CodeStubAssembler::Label::Type type = | |
486 CodeStubAssembler::Label::kNonDeferred) | |
487 : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {} | |
488 Label(CodeStubAssembler* assembler, int merged_variable_count, | |
489 CodeStubAssembler::Variable** merged_variables, | |
490 CodeStubAssembler::Label::Type type = | |
491 CodeStubAssembler::Label::kNonDeferred); | |
492 ~Label() {} | |
493 | |
494 private: | |
495 friend class CodeStubAssembler; | |
496 | |
497 void Bind(); | |
498 void MergeVariables(); | |
499 | |
500 bool bound_; | |
501 size_t merge_count_; | |
502 CodeStubAssembler* assembler_; | |
503 RawMachineLabel* label_; | |
504 // Map of variables that need to be merged to their phi nodes (or placeholders | |
505 // for those phis). | |
506 std::map<Variable::Impl*, Node*> variable_phis_; | |
507 // Map of variables to the list of value nodes that have been added from each | |
508 // merge path in their order of merging. | |
509 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; | |
510 }; | |
511 | |
512 } // namespace compiler | |
513 } // namespace internal | |
514 } // namespace v8 | |
515 | |
516 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
OLD | NEW |