Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(282)

Side by Side Diff: src/compiler/code-assembler.h

Issue 1875583003: Separate CodeAssembler and CodeStubAssembler (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix gn build Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_ 5 #ifndef V8_COMPILER_CODE_ASSEMBLER_H_
6 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_ 6 #define V8_COMPILER_CODE_ASSEMBLER_H_
7 7
8 #include <map> 8 #include <map>
9 9
10 // Clients of this interface shouldn't depend on lots of compiler internals. 10 // Clients of this interface shouldn't depend on lots of compiler internals.
11 // Do not include anything from src/compiler here! 11 // Do not include anything from src/compiler here!
12 #include "src/allocation.h" 12 #include "src/allocation.h"
13 #include "src/builtins.h" 13 #include "src/builtins.h"
14 #include "src/heap/heap.h" 14 #include "src/heap/heap.h"
15 #include "src/machine-type.h" 15 #include "src/machine-type.h"
16 #include "src/runtime/runtime.h" 16 #include "src/runtime/runtime.h"
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 V(Word64Or) \ 96 V(Word64Or) \
97 V(Word64And) \ 97 V(Word64And) \
98 V(Word64Xor) \ 98 V(Word64Xor) \
99 V(Word64Shr) \ 99 V(Word64Shr) \
100 V(Word64Sar) \ 100 V(Word64Sar) \
101 V(Word64Ror) 101 V(Word64Ror)
102 102
103 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \ 103 #define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
104 V(Float64Neg) \ 104 V(Float64Neg) \
105 V(Float64Sqrt) \ 105 V(Float64Sqrt) \
106 V(Float64ExtractLowWord32) \
107 V(Float64ExtractHighWord32) \
108 V(TruncateInt64ToInt32) \
106 V(ChangeFloat64ToUint32) \ 109 V(ChangeFloat64ToUint32) \
107 V(ChangeInt32ToFloat64) \ 110 V(ChangeInt32ToFloat64) \
108 V(ChangeInt32ToInt64) \ 111 V(ChangeInt32ToInt64) \
109 V(ChangeUint32ToFloat64) \ 112 V(ChangeUint32ToFloat64) \
110 V(ChangeUint32ToUint64) \ 113 V(ChangeUint32ToUint64) \
111 V(Word32Clz) 114 V(Word32Clz)
112 115
113 class CodeStubAssembler { 116 // A "public" interface used by components outside of compiler directory to
117 // create code objects with TurboFan's backend. This class is mostly a thin shim
118 // around the RawMachineAssembler, and its primary job is to ensure that the
119 // innards of the RawMachineAssembler and other compiler implementation details
120 // don't leak outside of the the compiler directory..
121 //
122 // V8 components that need to generate low-level code using this interface
123 // should include this header--and this header only--from the compiler directory
124 // (this is actually enforced). Since all interesting data structures are
125 // forward declared, it's not possible for clients to peek inside the compiler
126 // internals.
127 //
128 // In addition to providing isolation between TurboFan and code generation
129 // clients, CodeAssembler also provides an abstraction for creating variables
130 // and enhanced Label functionality to merge variable values along paths where
131 // they have differing values, including loops.
132 class CodeAssembler {
114 public: 133 public:
115 // Create with CallStub linkage. 134 // Create with CallStub linkage.
116 // |result_size| specifies the number of results returned by the stub. 135 // |result_size| specifies the number of results returned by the stub.
117 // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor. 136 // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
118 CodeStubAssembler(Isolate* isolate, Zone* zone, 137 CodeAssembler(Isolate* isolate, Zone* zone,
119 const CallInterfaceDescriptor& descriptor, 138 const CallInterfaceDescriptor& descriptor, Code::Flags flags,
120 Code::Flags flags, const char* name, 139 const char* name, size_t result_size = 1);
121 size_t result_size = 1);
122 140
123 // Create with JSCall linkage. 141 // Create with JSCall linkage.
124 CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count, 142 CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
125 Code::Flags flags, const char* name); 143 Code::Flags flags, const char* name);
126 144
127 virtual ~CodeStubAssembler(); 145 virtual ~CodeAssembler();
128 146
129 Handle<Code> GenerateCode(); 147 Handle<Code> GenerateCode();
130 148
149 bool Is64();
Benedikt Meurer 2016/04/11 18:12:09 Nit: const
danno 2016/04/18 11:00:04 Done.
150
131 class Label; 151 class Label;
132 class Variable { 152 class Variable {
133 public: 153 public:
134 explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep); 154 explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
135 void Bind(Node* value); 155 void Bind(Node* value);
136 Node* value() const; 156 Node* value() const;
137 MachineRepresentation rep() const; 157 MachineRepresentation rep() const;
138 bool IsBound() const; 158 bool IsBound() const;
139 159
140 private: 160 private:
141 friend class CodeStubAssembler; 161 friend class CodeAssembler;
142 class Impl; 162 class Impl;
143 Impl* impl_; 163 Impl* impl_;
144 }; 164 };
145 165
146 enum AllocationFlag : uint8_t { 166 enum AllocationFlag : uint8_t {
147 kNone = 0, 167 kNone = 0,
148 kDoubleAlignment = 1, 168 kDoubleAlignment = 1,
149 kPretenured = 1 << 1 169 kPretenured = 1 << 1
150 }; 170 };
151 171
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP) 224 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
205 #undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP 225 #undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
206 226
207 Node* WordShl(Node* value, int shift); 227 Node* WordShl(Node* value, int shift);
208 228
209 // Unary 229 // Unary
210 #define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a); 230 #define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
211 CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP) 231 CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
212 #undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP 232 #undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
213 233
234 Node* TruncateFloat64ToInt32RoundToZero(Node* a);
235 Node* TruncateFloat64ToInt32JavaScript(Node* a);
236
214 // Projections 237 // Projections
215 Node* Projection(int index, Node* value); 238 Node* Projection(int index, Node* value);
216 239
217 // Calls 240 // Calls
218 Node* CallRuntime(Runtime::FunctionId function_id, Node* context); 241 Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
219 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1); 242 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
220 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, 243 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
221 Node* arg2); 244 Node* arg2);
222 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1, 245 Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
223 Node* arg2, Node* arg3); 246 Node* arg2, Node* arg3);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 Node* context, Node* arg1, Node* arg2, 287 Node* context, Node* arg1, Node* arg2,
265 size_t result_size = 1); 288 size_t result_size = 1);
266 289
267 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target, 290 Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
268 Node** args, size_t result_size = 1); 291 Node** args, size_t result_size = 1);
269 292
270 // =========================================================================== 293 // ===========================================================================
271 // Macros 294 // Macros
272 // =========================================================================== 295 // ===========================================================================
273 296
297 // Tag a Word as a Smi value.
298 Node* SmiTag(Node* value);
299 // Untag a Smi value as a Word.
300 Node* SmiUntag(Node* value);
301
274 // Float64 operations. 302 // Float64 operations.
275 Node* Float64Ceil(Node* x); 303 Node* Float64Ceil(Node* x);
276 Node* Float64Floor(Node* x); 304 Node* Float64Floor(Node* x);
277 Node* Float64Round(Node* x); 305 Node* Float64Round(Node* x);
278 Node* Float64Trunc(Node* x); 306 Node* Float64Trunc(Node* x);
279 307
280 // Tag a Word as a Smi value.
Benedikt Meurer 2016/04/11 18:12:09 I think those Smi helpers should be in the CodeAss
danno 2016/04/18 11:00:03 As discussed, I see them there too, but I can't mo
281 Node* SmiTag(Node* value);
282 // Untag a Smi value as a Word.
283 Node* SmiUntag(Node* value);
284
285 // Smi conversions.
286 Node* SmiToFloat64(Node* value);
287 Node* SmiToWord32(Node* value);
288
289 // Smi operations.
290 Node* SmiAdd(Node* a, Node* b);
291 Node* SmiAddWithOverflow(Node* a, Node* b);
292 Node* SmiSub(Node* a, Node* b);
293 Node* SmiSubWithOverflow(Node* a, Node* b);
294 Node* SmiEqual(Node* a, Node* b);
295 Node* SmiLessThan(Node* a, Node* b);
296 Node* SmiLessThanOrEqual(Node* a, Node* b);
297 Node* SmiMin(Node* a, Node* b);
298
299 // Load a value from the root array. 308 // Load a value from the root array.
300 Node* LoadRoot(Heap::RootListIndex root_index); 309 Node* LoadRoot(Heap::RootListIndex root_index);
301 310
302 // Check a value for smi-ness
303 Node* WordIsSmi(Node* a);
304
305 // Check that the value is a positive smi.
306 Node* WordIsPositiveSmi(Node* a);
307
308 // Load an object pointer from a buffer that isn't in the heap.
309 Node* LoadBufferObject(Node* buffer, int offset,
310 MachineType rep = MachineType::AnyTagged());
311 // Load a field from an object on the heap.
312 Node* LoadObjectField(Node* object, int offset,
313 MachineType rep = MachineType::AnyTagged());
314 // Load the floating point value of a HeapNumber.
315 Node* LoadHeapNumberValue(Node* object);
316 // Store the floating point value of a HeapNumber.
317 Node* StoreHeapNumberValue(Node* object, Node* value);
318 // Truncate the floating point value of a HeapNumber to an Int32.
319 Node* TruncateHeapNumberValueToWord32(Node* object);
320 // Load the bit field of a Map.
321 Node* LoadMapBitField(Node* map);
322 // Load bit field 2 of a map.
323 Node* LoadMapBitField2(Node* map);
324 // Load bit field 3 of a map.
325 Node* LoadMapBitField3(Node* map);
326 // Load the instance type of a map.
327 Node* LoadMapInstanceType(Node* map);
328 // Load the instance descriptors of a map.
329 Node* LoadMapDescriptors(Node* map);
330
331 // Load the hash field of a name.
332 Node* LoadNameHash(Node* name);
333
334 // Load an array element from a FixedArray.
335 Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
336 int additional_offset = 0);
337 Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
338 int additional_offset = 0);
339 Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
340
341 // Allocate an object of the given size. 311 // Allocate an object of the given size.
342 Node* Allocate(int size, AllocationFlags flags = kNone); 312 Node* Allocate(int size, AllocationFlags flags = kNone);
343 // Allocate a HeapNumber without initializing its value.
344 Node* AllocateHeapNumber();
345 // Allocate a HeapNumber with a specific value.
346 Node* AllocateHeapNumberWithValue(Node* value);
347
348 // Store an array element to a FixedArray.
349 Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
350 Node* value);
351 // Load the Map of an HeapObject.
352 Node* LoadMap(Node* object);
353 // Store the Map of an HeapObject.
354 Node* StoreMapNoWriteBarrier(Node* object, Node* map);
355 // Load the instance type of an HeapObject.
356 Node* LoadInstanceType(Node* object);
357
358 // Load the elements backing store of a JSObject.
359 Node* LoadElements(Node* object);
360 // Load the length of a fixed array base instance.
361 Node* LoadFixedArrayBaseLength(Node* array);
362
363 // Returns a node that is true if the given bit is set in |word32|.
364 template <typename T>
365 Node* BitFieldDecode(Node* word32) {
366 return BitFieldDecode(word32, T::kShift, T::kMask);
367 }
368
369 Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
370
371 // Conversions.
372 Node* ChangeFloat64ToTagged(Node* value);
373 Node* ChangeInt32ToTagged(Node* value);
374 Node* TruncateTaggedToFloat64(Node* context, Node* value);
375 Node* TruncateTaggedToWord32(Node* context, Node* value);
376 313
377 // Branching helpers. 314 // Branching helpers.
378 // TODO(danno): Can we be more cleverish wrt. edge-split?
379 void BranchIf(Node* condition, Label* if_true, Label* if_false); 315 void BranchIf(Node* condition, Label* if_true, Label* if_false);
380 316
381 #define BRANCH_HELPER(name) \ 317 #define BRANCH_HELPER(name) \
382 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \ 318 void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
383 BranchIf(name(a, b), if_true, if_false); \ 319 BranchIf(name(a, b), if_true, if_false); \
384 } 320 }
385 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER) 321 CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
386 #undef BRANCH_HELPER 322 #undef BRANCH_HELPER
387 323
388 void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
389 BranchIf(SmiLessThan(a, b), if_true, if_false);
390 }
391
392 void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
393 Label* if_false) {
394 BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
395 }
396
397 void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) { 324 void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
398 BranchIfFloat64Equal(value, value, if_false, if_true); 325 BranchIfFloat64Equal(value, value, if_false, if_true);
399 } 326 }
400 327
401 // Helpers which delegate to RawMachineAssembler. 328 // Helpers which delegate to RawMachineAssembler.
402 Factory* factory() const; 329 Factory* factory() const;
403 Isolate* isolate() const; 330 Isolate* isolate() const;
404 Zone* zone() const; 331 Zone* zone() const;
405 332
406 protected: 333 protected:
407 // Protected helpers which delegate to RawMachineAssembler. 334 // Protected helpers which delegate to RawMachineAssembler.
408 Graph* graph() const; 335 Graph* graph() const;
409 336
410 // Enables subclasses to perform operations before and after a call. 337 // Enables subclasses to perform operations before and after a call.
411 virtual void CallPrologue(); 338 virtual void CallPrologue();
412 virtual void CallEpilogue(); 339 virtual void CallEpilogue();
413 340
341 Node* SmiShiftBitsConstant();
342
414 private: 343 private:
415 friend class CodeStubAssemblerTester; 344 friend class CodeAssemblerTester;
416 345
417 CodeStubAssembler(Isolate* isolate, Zone* zone, 346 CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
418 CallDescriptor* call_descriptor, Code::Flags flags, 347 Code::Flags flags, const char* name);
419 const char* name);
420 348
421 Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args); 349 Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
422 Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args); 350 Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
423 351
424 Node* SmiShiftBitsConstant();
425
426 Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags, 352 Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
427 Node* top_address, Node* limit_address); 353 Node* top_address, Node* limit_address);
428 Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, 354 Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
429 Node* top_adddress, Node* limit_address); 355 Node* top_adddress, Node* limit_address);
430 356
431 base::SmartPointer<RawMachineAssembler> raw_assembler_; 357 base::SmartPointer<RawMachineAssembler> raw_assembler_;
432 Code::Flags flags_; 358 Code::Flags flags_;
433 const char* name_; 359 const char* name_;
434 bool code_generated_; 360 bool code_generated_;
435 ZoneVector<Variable::Impl*> variables_; 361 ZoneVector<Variable::Impl*> variables_;
436 362
437 DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler); 363 DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
438 }; 364 };
439 365
440 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags); 366 DEFINE_OPERATORS_FOR_FLAGS(CodeAssembler::AllocationFlags);
441 367
442 class CodeStubAssembler::Label { 368 class CodeAssembler::Label {
443 public: 369 public:
444 enum Type { kDeferred, kNonDeferred }; 370 enum Type { kDeferred, kNonDeferred };
445 371
446 explicit Label(CodeStubAssembler* assembler, 372 explicit Label(
447 CodeStubAssembler::Label::Type type = 373 CodeAssembler* assembler,
448 CodeStubAssembler::Label::kNonDeferred) 374 CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
449 : CodeStubAssembler::Label(assembler, 0, nullptr, type) {} 375 : CodeAssembler::Label(assembler, 0, nullptr, type) {}
450 Label(CodeStubAssembler* assembler, 376 Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
451 CodeStubAssembler::Variable* merged_variable, 377 CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
452 CodeStubAssembler::Label::Type type = 378 : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
453 CodeStubAssembler::Label::kNonDeferred) 379 Label(CodeAssembler* assembler, int merged_variable_count,
454 : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {} 380 CodeAssembler::Variable** merged_variables,
455 Label(CodeStubAssembler* assembler, int merged_variable_count, 381 CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
456 CodeStubAssembler::Variable** merged_variables,
457 CodeStubAssembler::Label::Type type =
458 CodeStubAssembler::Label::kNonDeferred);
459 ~Label() {} 382 ~Label() {}
460 383
461 private: 384 private:
462 friend class CodeStubAssembler; 385 friend class CodeAssembler;
463 386
464 void Bind(); 387 void Bind();
465 void MergeVariables(); 388 void MergeVariables();
466 389
467 bool bound_; 390 bool bound_;
468 size_t merge_count_; 391 size_t merge_count_;
469 CodeStubAssembler* assembler_; 392 CodeAssembler* assembler_;
470 RawMachineLabel* label_; 393 RawMachineLabel* label_;
471 // Map of variables that need to be merged to their phi nodes (or placeholders 394 // Map of variables that need to be merged to their phi nodes (or placeholders
472 // for those phis). 395 // for those phis).
473 std::map<Variable::Impl*, Node*> variable_phis_; 396 std::map<Variable::Impl*, Node*> variable_phis_;
474 // Map of variables to the list of value nodes that have been added from each 397 // Map of variables to the list of value nodes that have been added from each
475 // merge path in their order of merging. 398 // merge path in their order of merging.
476 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_; 399 std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
477 }; 400 };
478 401
479 } // namespace compiler 402 } // namespace compiler
480 } // namespace internal 403 } // namespace internal
481 } // namespace v8 404 } // namespace v8
482 405
483 #endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_ 406 #endif // V8_COMPILER_CODE_ASSEMBLER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698