OLD | NEW |
| (Empty) |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
7 | |
8 #include <map> | |
9 | |
10 // Clients of this interface shouldn't depend on lots of compiler internals. | |
11 // Do not include anything from src/compiler here! | |
12 #include "src/allocation.h" | |
13 #include "src/builtins.h" | |
14 #include "src/heap/heap.h" | |
15 #include "src/machine-type.h" | |
16 #include "src/runtime/runtime.h" | |
17 #include "src/zone-containers.h" | |
18 | |
19 namespace v8 { | |
20 namespace internal { | |
21 | |
22 class Callable; | |
23 class CallInterfaceDescriptor; | |
24 class Isolate; | |
25 class Factory; | |
26 class Zone; | |
27 | |
28 namespace compiler { | |
29 | |
30 class CallDescriptor; | |
31 class Graph; | |
32 class Node; | |
33 class Operator; | |
34 class RawMachineAssembler; | |
35 class RawMachineLabel; | |
36 class Schedule; | |
37 | |
38 #define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | |
39 V(Float32Equal) \ | |
40 V(Float32LessThan) \ | |
41 V(Float32LessThanOrEqual) \ | |
42 V(Float32GreaterThan) \ | |
43 V(Float32GreaterThanOrEqual) \ | |
44 V(Float64Equal) \ | |
45 V(Float64LessThan) \ | |
46 V(Float64LessThanOrEqual) \ | |
47 V(Float64GreaterThan) \ | |
48 V(Float64GreaterThanOrEqual) \ | |
49 V(Int32GreaterThan) \ | |
50 V(Int32GreaterThanOrEqual) \ | |
51 V(Int32LessThan) \ | |
52 V(Int32LessThanOrEqual) \ | |
53 V(IntPtrLessThan) \ | |
54 V(IntPtrLessThanOrEqual) \ | |
55 V(Uint32LessThan) \ | |
56 V(UintPtrGreaterThanOrEqual) \ | |
57 V(WordEqual) \ | |
58 V(WordNotEqual) \ | |
59 V(Word32Equal) \ | |
60 V(Word32NotEqual) \ | |
61 V(Word64Equal) \ | |
62 V(Word64NotEqual) | |
63 | |
64 #define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \ | |
65 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ | |
66 V(Float64Add) \ | |
67 V(Float64Sub) \ | |
68 V(Float64Mul) \ | |
69 V(Float64Div) \ | |
70 V(Float64InsertLowWord32) \ | |
71 V(Float64InsertHighWord32) \ | |
72 V(IntPtrAdd) \ | |
73 V(IntPtrAddWithOverflow) \ | |
74 V(IntPtrSub) \ | |
75 V(IntPtrSubWithOverflow) \ | |
76 V(IntPtrMul) \ | |
77 V(Int32Add) \ | |
78 V(Int32AddWithOverflow) \ | |
79 V(Int32Sub) \ | |
80 V(Int32Mul) \ | |
81 V(Int32Div) \ | |
82 V(WordOr) \ | |
83 V(WordAnd) \ | |
84 V(WordXor) \ | |
85 V(WordShl) \ | |
86 V(WordShr) \ | |
87 V(WordSar) \ | |
88 V(WordRor) \ | |
89 V(Word32Or) \ | |
90 V(Word32And) \ | |
91 V(Word32Xor) \ | |
92 V(Word32Shl) \ | |
93 V(Word32Shr) \ | |
94 V(Word32Sar) \ | |
95 V(Word32Ror) \ | |
96 V(Word64Or) \ | |
97 V(Word64And) \ | |
98 V(Word64Xor) \ | |
99 V(Word64Shr) \ | |
100 V(Word64Sar) \ | |
101 V(Word64Ror) | |
102 | |
103 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \ | |
104 V(Float64Neg) \ | |
105 V(Float64Sqrt) \ | |
106 V(ChangeFloat64ToUint32) \ | |
107 V(ChangeInt32ToFloat64) \ | |
108 V(ChangeInt32ToInt64) \ | |
109 V(ChangeUint32ToFloat64) \ | |
110 V(ChangeUint32ToUint64) \ | |
111 V(Word32Clz) | |
112 | |
113 class CodeStubAssembler { | |
114 public: | |
115 // Create with CallStub linkage. | |
116 // |result_size| specifies the number of results returned by the stub. | |
117 // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor. | |
118 CodeStubAssembler(Isolate* isolate, Zone* zone, | |
119 const CallInterfaceDescriptor& descriptor, | |
120 Code::Flags flags, const char* name, | |
121 size_t result_size = 1); | |
122 | |
123 // Create with JSCall linkage. | |
124 CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count, | |
125 Code::Flags flags, const char* name); | |
126 | |
127 virtual ~CodeStubAssembler(); | |
128 | |
129 Handle<Code> GenerateCode(); | |
130 | |
131 class Label; | |
132 class Variable { | |
133 public: | |
134 explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep); | |
135 void Bind(Node* value); | |
136 Node* value() const; | |
137 MachineRepresentation rep() const; | |
138 bool IsBound() const; | |
139 | |
140 private: | |
141 friend class CodeStubAssembler; | |
142 class Impl; | |
143 Impl* impl_; | |
144 }; | |
145 | |
146 enum AllocationFlag : uint8_t { | |
147 kNone = 0, | |
148 kDoubleAlignment = 1, | |
149 kPretenured = 1 << 1 | |
150 }; | |
151 | |
152 typedef base::Flags<AllocationFlag> AllocationFlags; | |
153 | |
154 // =========================================================================== | |
155 // Base Assembler | |
156 // =========================================================================== | |
157 | |
158 // Constants. | |
159 Node* Int32Constant(int value); | |
160 Node* IntPtrConstant(intptr_t value); | |
161 Node* NumberConstant(double value); | |
162 Node* SmiConstant(Smi* value); | |
163 Node* HeapConstant(Handle<HeapObject> object); | |
164 Node* BooleanConstant(bool value); | |
165 Node* ExternalConstant(ExternalReference address); | |
166 Node* Float64Constant(double value); | |
167 Node* BooleanMapConstant(); | |
168 Node* HeapNumberMapConstant(); | |
169 Node* NullConstant(); | |
170 Node* UndefinedConstant(); | |
171 | |
172 Node* Parameter(int value); | |
173 void Return(Node* value); | |
174 | |
175 void Bind(Label* label); | |
176 void Goto(Label* label); | |
177 void GotoIf(Node* condition, Label* true_label); | |
178 void GotoUnless(Node* condition, Label* false_label); | |
179 void Branch(Node* condition, Label* true_label, Label* false_label); | |
180 | |
181 void Switch(Node* index, Label* default_label, int32_t* case_values, | |
182 Label** case_labels, size_t case_count); | |
183 | |
184 // Access to the frame pointer | |
185 Node* LoadFramePointer(); | |
186 Node* LoadParentFramePointer(); | |
187 | |
188 // Access to the stack pointer | |
189 Node* LoadStackPointer(); | |
190 | |
191 // Load raw memory location. | |
192 Node* Load(MachineType rep, Node* base); | |
193 Node* Load(MachineType rep, Node* base, Node* index); | |
194 | |
195 // Store value to raw memory location. | |
196 Node* Store(MachineRepresentation rep, Node* base, Node* value); | |
197 Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value); | |
198 Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value); | |
199 Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index, | |
200 Node* value); | |
201 | |
202 // Basic arithmetic operations. | |
203 #define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b); | |
204 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP) | |
205 #undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP | |
206 | |
207 Node* WordShl(Node* value, int shift); | |
208 | |
209 // Unary | |
210 #define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a); | |
211 CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP) | |
212 #undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP | |
213 | |
214 // Projections | |
215 Node* Projection(int index, Node* value); | |
216 | |
217 // Calls | |
218 Node* CallRuntime(Runtime::FunctionId function_id, Node* context); | |
219 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1); | |
220 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
221 Node* arg2); | |
222 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
223 Node* arg2, Node* arg3); | |
224 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
225 Node* arg2, Node* arg3, Node* arg4); | |
226 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, | |
227 Node* arg2, Node* arg3, Node* arg4, Node* arg5); | |
228 | |
229 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context); | |
230 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
231 Node* arg1); | |
232 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
233 Node* arg1, Node* arg2); | |
234 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
235 Node* arg1, Node* arg2, Node* arg3); | |
236 Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context, | |
237 Node* arg1, Node* arg2, Node* arg3, Node* arg4); | |
238 | |
239 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
240 size_t result_size = 1); | |
241 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
242 Node* arg2, size_t result_size = 1); | |
243 Node* CallStub(Callable const& callable, Node* context, Node* arg1, | |
244 Node* arg2, Node* arg3, size_t result_size = 1); | |
245 | |
246 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
247 Node* context, Node* arg1, size_t result_size = 1); | |
248 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
249 Node* context, Node* arg1, Node* arg2, size_t result_size = 1); | |
250 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
251 Node* context, Node* arg1, Node* arg2, Node* arg3, | |
252 size_t result_size = 1); | |
253 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
254 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4, | |
255 size_t result_size = 1); | |
256 Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
257 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4, | |
258 Node* arg5, size_t result_size = 1); | |
259 | |
260 Node* TailCallStub(Callable const& callable, Node* context, Node* arg1, | |
261 Node* arg2, size_t result_size = 1); | |
262 | |
263 Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target, | |
264 Node* context, Node* arg1, Node* arg2, | |
265 size_t result_size = 1); | |
266 | |
267 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, | |
268 Node** args, size_t result_size = 1); | |
269 | |
270 // =========================================================================== | |
271 // Macros | |
272 // =========================================================================== | |
273 | |
274 // Float64 operations. | |
275 Node* Float64Ceil(Node* x); | |
276 Node* Float64Floor(Node* x); | |
277 Node* Float64Round(Node* x); | |
278 Node* Float64Trunc(Node* x); | |
279 | |
280 // Tag a Word as a Smi value. | |
281 Node* SmiTag(Node* value); | |
282 // Untag a Smi value as a Word. | |
283 Node* SmiUntag(Node* value); | |
284 | |
285 // Smi conversions. | |
286 Node* SmiToFloat64(Node* value); | |
287 Node* SmiToWord32(Node* value); | |
288 | |
289 // Smi operations. | |
290 Node* SmiAdd(Node* a, Node* b); | |
291 Node* SmiAddWithOverflow(Node* a, Node* b); | |
292 Node* SmiSub(Node* a, Node* b); | |
293 Node* SmiSubWithOverflow(Node* a, Node* b); | |
294 Node* SmiEqual(Node* a, Node* b); | |
295 Node* SmiLessThan(Node* a, Node* b); | |
296 Node* SmiLessThanOrEqual(Node* a, Node* b); | |
297 Node* SmiMin(Node* a, Node* b); | |
298 | |
299 // Load a value from the root array. | |
300 Node* LoadRoot(Heap::RootListIndex root_index); | |
301 | |
302 // Check a value for smi-ness | |
303 Node* WordIsSmi(Node* a); | |
304 | |
305 // Check that the value is a positive smi. | |
306 Node* WordIsPositiveSmi(Node* a); | |
307 | |
308 // Load an object pointer from a buffer that isn't in the heap. | |
309 Node* LoadBufferObject(Node* buffer, int offset, | |
310 MachineType rep = MachineType::AnyTagged()); | |
311 // Load a field from an object on the heap. | |
312 Node* LoadObjectField(Node* object, int offset, | |
313 MachineType rep = MachineType::AnyTagged()); | |
314 // Load the floating point value of a HeapNumber. | |
315 Node* LoadHeapNumberValue(Node* object); | |
316 // Store the floating point value of a HeapNumber. | |
317 Node* StoreHeapNumberValue(Node* object, Node* value); | |
318 // Truncate the floating point value of a HeapNumber to an Int32. | |
319 Node* TruncateHeapNumberValueToWord32(Node* object); | |
320 // Load the bit field of a Map. | |
321 Node* LoadMapBitField(Node* map); | |
322 // Load bit field 2 of a map. | |
323 Node* LoadMapBitField2(Node* map); | |
324 // Load bit field 3 of a map. | |
325 Node* LoadMapBitField3(Node* map); | |
326 // Load the instance type of a map. | |
327 Node* LoadMapInstanceType(Node* map); | |
328 // Load the instance descriptors of a map. | |
329 Node* LoadMapDescriptors(Node* map); | |
330 | |
331 // Load the hash field of a name. | |
332 Node* LoadNameHash(Node* name); | |
333 | |
334 // Load an array element from a FixedArray. | |
335 Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index, | |
336 int additional_offset = 0); | |
337 Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index, | |
338 int additional_offset = 0); | |
339 Node* LoadFixedArrayElementConstantIndex(Node* object, int index); | |
340 | |
341 // Allocate an object of the given size. | |
342 Node* Allocate(int size, AllocationFlags flags = kNone); | |
343 // Allocate a HeapNumber without initializing its value. | |
344 Node* AllocateHeapNumber(); | |
345 // Allocate a HeapNumber with a specific value. | |
346 Node* AllocateHeapNumberWithValue(Node* value); | |
347 | |
348 // Store an array element to a FixedArray. | |
349 Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index, | |
350 Node* value); | |
351 // Load the Map of an HeapObject. | |
352 Node* LoadMap(Node* object); | |
353 // Store the Map of an HeapObject. | |
354 Node* StoreMapNoWriteBarrier(Node* object, Node* map); | |
355 // Load the instance type of an HeapObject. | |
356 Node* LoadInstanceType(Node* object); | |
357 | |
358 // Load the elements backing store of a JSObject. | |
359 Node* LoadElements(Node* object); | |
360 // Load the length of a fixed array base instance. | |
361 Node* LoadFixedArrayBaseLength(Node* array); | |
362 | |
363 // Returns a node that is true if the given bit is set in |word32|. | |
364 template <typename T> | |
365 Node* BitFieldDecode(Node* word32) { | |
366 return BitFieldDecode(word32, T::kShift, T::kMask); | |
367 } | |
368 | |
369 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask); | |
370 | |
371 // Conversions. | |
372 Node* ChangeFloat64ToTagged(Node* value); | |
373 Node* ChangeInt32ToTagged(Node* value); | |
374 Node* TruncateTaggedToFloat64(Node* context, Node* value); | |
375 Node* TruncateTaggedToWord32(Node* context, Node* value); | |
376 | |
377 // Branching helpers. | |
378 // TODO(danno): Can we be more cleverish wrt. edge-split? | |
379 void BranchIf(Node* condition, Label* if_true, Label* if_false); | |
380 | |
381 #define BRANCH_HELPER(name) \ | |
382 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ | |
383 BranchIf(name(a, b), if_true, if_false); \ | |
384 } | |
385 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER) | |
386 #undef BRANCH_HELPER | |
387 | |
388 void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) { | |
389 BranchIf(SmiLessThan(a, b), if_true, if_false); | |
390 } | |
391 | |
392 void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true, | |
393 Label* if_false) { | |
394 BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false); | |
395 } | |
396 | |
397 void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { | |
398 BranchIfFloat64Equal(value, value, if_false, if_true); | |
399 } | |
400 | |
401 // Helpers which delegate to RawMachineAssembler. | |
402 Factory* factory() const; | |
403 Isolate* isolate() const; | |
404 Zone* zone() const; | |
405 | |
406 protected: | |
407 // Protected helpers which delegate to RawMachineAssembler. | |
408 Graph* graph() const; | |
409 | |
410 // Enables subclasses to perform operations before and after a call. | |
411 virtual void CallPrologue(); | |
412 virtual void CallEpilogue(); | |
413 | |
414 private: | |
415 friend class CodeStubAssemblerTester; | |
416 | |
417 CodeStubAssembler(Isolate* isolate, Zone* zone, | |
418 CallDescriptor* call_descriptor, Code::Flags flags, | |
419 const char* name); | |
420 | |
421 Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args); | |
422 Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args); | |
423 | |
424 Node* SmiShiftBitsConstant(); | |
425 | |
426 Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags, | |
427 Node* top_address, Node* limit_address); | |
428 Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, | |
429 Node* top_adddress, Node* limit_address); | |
430 | |
431 base::SmartPointer<RawMachineAssembler> raw_assembler_; | |
432 Code::Flags flags_; | |
433 const char* name_; | |
434 bool code_generated_; | |
435 ZoneVector<Variable::Impl*> variables_; | |
436 | |
437 DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler); | |
438 }; | |
439 | |
440 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags); | |
441 | |
442 class CodeStubAssembler::Label { | |
443 public: | |
444 enum Type { kDeferred, kNonDeferred }; | |
445 | |
446 explicit Label(CodeStubAssembler* assembler, | |
447 CodeStubAssembler::Label::Type type = | |
448 CodeStubAssembler::Label::kNonDeferred) | |
449 : CodeStubAssembler::Label(assembler, 0, nullptr, type) {} | |
450 Label(CodeStubAssembler* assembler, | |
451 CodeStubAssembler::Variable* merged_variable, | |
452 CodeStubAssembler::Label::Type type = | |
453 CodeStubAssembler::Label::kNonDeferred) | |
454 : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {} | |
455 Label(CodeStubAssembler* assembler, int merged_variable_count, | |
456 CodeStubAssembler::Variable** merged_variables, | |
457 CodeStubAssembler::Label::Type type = | |
458 CodeStubAssembler::Label::kNonDeferred); | |
459 ~Label() {} | |
460 | |
461 private: | |
462 friend class CodeStubAssembler; | |
463 | |
464 void Bind(); | |
465 void MergeVariables(); | |
466 | |
467 bool bound_; | |
468 size_t merge_count_; | |
469 CodeStubAssembler* assembler_; | |
470 RawMachineLabel* label_; | |
471 // Map of variables that need to be merged to their phi nodes (or placeholders | |
472 // for those phis). | |
473 std::map<Variable::Impl*, Node*> variable_phis_; | |
474 // Map of variables to the list of value nodes that have been added from each | |
475 // merge path in their order of merging. | |
476 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; | |
477 }; | |
478 | |
479 } // namespace compiler | |
480 } // namespace internal | |
481 } // namespace v8 | |
482 | |
483 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ | |
OLD | NEW |