OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ | |
6 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ | |
7 | |
8 #include "src/arm64/lithium-arm64.h" | |
9 | |
10 #include "src/arm64/lithium-gap-resolver-arm64.h" | |
11 #include "src/deoptimizer.h" | |
12 #include "src/lithium-codegen.h" | |
13 #include "src/safepoint-table.h" | |
14 #include "src/scopes.h" | |
15 #include "src/utils.h" | |
16 | |
17 namespace v8 { | |
18 namespace internal { | |
19 | |
20 // Forward declarations. | |
21 class LDeferredCode; | |
22 class SafepointGenerator; | |
23 class BranchGenerator; | |
24 | |
25 class LCodeGen: public LCodeGenBase { | |
26 public: | |
27 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) | |
28 : LCodeGenBase(chunk, assembler, info), | |
29 deoptimizations_(4, info->zone()), | |
30 jump_table_(4, info->zone()), | |
31 inlined_function_count_(0), | |
32 scope_(info->scope()), | |
33 translations_(info->zone()), | |
34 deferred_(8, info->zone()), | |
35 osr_pc_offset_(-1), | |
36 frame_is_built_(false), | |
37 safepoints_(info->zone()), | |
38 resolver_(this), | |
39 expected_safepoint_kind_(Safepoint::kSimple), | |
40 pushed_arguments_(0) { | |
41 PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
42 } | |
43 | |
44 // Simple accessors. | |
45 Scope* scope() const { return scope_; } | |
46 | |
47 int LookupDestination(int block_id) const { | |
48 return chunk()->LookupDestination(block_id); | |
49 } | |
50 | |
51 bool IsNextEmittedBlock(int block_id) const { | |
52 return LookupDestination(block_id) == GetNextEmittedBlock(); | |
53 } | |
54 | |
55 bool NeedsEagerFrame() const { | |
56 return GetStackSlotCount() > 0 || | |
57 info()->is_non_deferred_calling() || | |
58 !info()->IsStub() || | |
59 info()->requires_frame(); | |
60 } | |
61 bool NeedsDeferredFrame() const { | |
62 return !NeedsEagerFrame() && info()->is_deferred_calling(); | |
63 } | |
64 | |
65 LinkRegisterStatus GetLinkRegisterState() const { | |
66 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; | |
67 } | |
68 | |
69 // Try to generate code for the entire chunk, but it may fail if the | |
70 // chunk contains constructs we cannot handle. Returns true if the | |
71 // code generation attempt succeeded. | |
72 bool GenerateCode(); | |
73 | |
74 // Finish the code by setting stack height, safepoint, and bailout | |
75 // information on it. | |
76 void FinishCode(Handle<Code> code); | |
77 | |
78 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; | |
79 // Support for converting LOperands to assembler types. | |
80 Register ToRegister(LOperand* op) const; | |
81 Register ToRegister32(LOperand* op) const; | |
82 Operand ToOperand(LOperand* op); | |
83 Operand ToOperand32(LOperand* op); | |
84 enum StackMode { kMustUseFramePointer, kCanUseStackPointer }; | |
85 MemOperand ToMemOperand(LOperand* op, | |
86 StackMode stack_mode = kCanUseStackPointer) const; | |
87 Handle<Object> ToHandle(LConstantOperand* op) const; | |
88 | |
89 template <class LI> | |
90 Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info); | |
91 | |
92 int JSShiftAmountFromLConstant(LOperand* constant) { | |
93 return ToInteger32(LConstantOperand::cast(constant)) & 0x1f; | |
94 } | |
95 | |
96 // TODO(jbramley): Examine these helpers and check that they make sense. | |
97 // IsInteger32Constant returns true for smi constants, for example. | |
98 bool IsInteger32Constant(LConstantOperand* op) const; | |
99 bool IsSmi(LConstantOperand* op) const; | |
100 | |
101 int32_t ToInteger32(LConstantOperand* op) const; | |
102 Smi* ToSmi(LConstantOperand* op) const; | |
103 double ToDouble(LConstantOperand* op) const; | |
104 DoubleRegister ToDoubleRegister(LOperand* op) const; | |
105 | |
106 // Declare methods that deal with the individual node types. | |
107 #define DECLARE_DO(type) void Do##type(L##type* node); | |
108 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) | |
109 #undef DECLARE_DO | |
110 | |
111 private: | |
112 // Return a double scratch register which can be used locally | |
113 // when generating code for a lithium instruction. | |
114 DoubleRegister double_scratch() { return crankshaft_fp_scratch; } | |
115 | |
116 // Deferred code support. | |
117 void DoDeferredNumberTagD(LNumberTagD* instr); | |
118 void DoDeferredStackCheck(LStackCheck* instr); | |
119 void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); | |
120 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); | |
121 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); | |
122 void DoDeferredMathAbsTagged(LMathAbsTagged* instr, | |
123 Label* exit, | |
124 Label* allocation_entry); | |
125 | |
126 void DoDeferredNumberTagU(LInstruction* instr, | |
127 LOperand* value, | |
128 LOperand* temp1, | |
129 LOperand* temp2); | |
130 void DoDeferredTaggedToI(LTaggedToI* instr, | |
131 LOperand* value, | |
132 LOperand* temp1, | |
133 LOperand* temp2); | |
134 void DoDeferredAllocate(LAllocate* instr); | |
135 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); | |
136 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | |
137 Register result, | |
138 Register object, | |
139 Register index); | |
140 | |
141 static Condition TokenToCondition(Token::Value op, bool is_unsigned); | |
142 void EmitGoto(int block); | |
143 void DoGap(LGap* instr); | |
144 | |
145 // Generic version of EmitBranch. It contains some code to avoid emitting a | |
146 // branch on the next emitted basic block where we could just fall-through. | |
147 // You shouldn't use that directly but rather consider one of the helper like | |
148 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch... | |
149 template<class InstrType> | |
150 void EmitBranchGeneric(InstrType instr, | |
151 const BranchGenerator& branch); | |
152 | |
153 template<class InstrType> | |
154 void EmitBranch(InstrType instr, Condition condition); | |
155 | |
156 template<class InstrType> | |
157 void EmitCompareAndBranch(InstrType instr, | |
158 Condition condition, | |
159 const Register& lhs, | |
160 const Operand& rhs); | |
161 | |
162 template<class InstrType> | |
163 void EmitTestAndBranch(InstrType instr, | |
164 Condition condition, | |
165 const Register& value, | |
166 uint64_t mask); | |
167 | |
168 template<class InstrType> | |
169 void EmitBranchIfNonZeroNumber(InstrType instr, | |
170 const FPRegister& value, | |
171 const FPRegister& scratch); | |
172 | |
173 template<class InstrType> | |
174 void EmitBranchIfHeapNumber(InstrType instr, | |
175 const Register& value); | |
176 | |
177 template<class InstrType> | |
178 void EmitBranchIfRoot(InstrType instr, | |
179 const Register& value, | |
180 Heap::RootListIndex index); | |
181 | |
182 // Emits optimized code to deep-copy the contents of statically known object | |
183 // graphs (e.g. object literal boilerplate). Expects a pointer to the | |
184 // allocated destination object in the result register, and a pointer to the | |
185 // source object in the source register. | |
186 void EmitDeepCopy(Handle<JSObject> object, | |
187 Register result, | |
188 Register source, | |
189 Register scratch, | |
190 int* offset, | |
191 AllocationSiteMode mode); | |
192 | |
193 template <class T> | |
194 void EmitVectorLoadICRegisters(T* instr); | |
195 template <class T> | |
196 void EmitVectorStoreICRegisters(T* instr); | |
197 | |
198 // Emits optimized code for %_IsString(x). Preserves input register. | |
199 // Returns the condition on which a final split to | |
200 // true and false label should be made, to optimize fallthrough. | |
201 Condition EmitIsString(Register input, Register temp1, Label* is_not_string, | |
202 SmiCheck check_needed); | |
203 | |
204 void PopulateDeoptimizationData(Handle<Code> code); | |
205 void PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
206 | |
207 MemOperand BuildSeqStringOperand(Register string, | |
208 Register temp, | |
209 LOperand* index, | |
210 String::Encoding encoding); | |
211 void DeoptimizeBranch(LInstruction* instr, | |
212 Deoptimizer::DeoptReason deopt_reason, | |
213 BranchType branch_type, Register reg = NoReg, | |
214 int bit = -1, | |
215 Deoptimizer::BailoutType* override_bailout_type = NULL); | |
216 void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, | |
217 Deoptimizer::BailoutType* override_bailout_type = NULL); | |
218 void DeoptimizeIf(Condition cond, LInstruction* instr, | |
219 Deoptimizer::DeoptReason deopt_reason); | |
220 void DeoptimizeIfZero(Register rt, LInstruction* instr, | |
221 Deoptimizer::DeoptReason deopt_reason); | |
222 void DeoptimizeIfNotZero(Register rt, LInstruction* instr, | |
223 Deoptimizer::DeoptReason deopt_reason); | |
224 void DeoptimizeIfNegative(Register rt, LInstruction* instr, | |
225 Deoptimizer::DeoptReason deopt_reason); | |
226 void DeoptimizeIfSmi(Register rt, LInstruction* instr, | |
227 Deoptimizer::DeoptReason deopt_reason); | |
228 void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, | |
229 Deoptimizer::DeoptReason deopt_reason); | |
230 void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, | |
231 LInstruction* instr, | |
232 Deoptimizer::DeoptReason deopt_reason); | |
233 void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, | |
234 LInstruction* instr, | |
235 Deoptimizer::DeoptReason deopt_reason); | |
236 void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr); | |
237 void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, | |
238 Deoptimizer::DeoptReason deopt_reason); | |
239 void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, | |
240 Deoptimizer::DeoptReason deopt_reason); | |
241 void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, | |
242 Deoptimizer::DeoptReason deopt_reason); | |
243 | |
244 MemOperand PrepareKeyedExternalArrayOperand(Register key, | |
245 Register base, | |
246 Register scratch, | |
247 bool key_is_smi, | |
248 bool key_is_constant, | |
249 int constant_key, | |
250 ElementsKind elements_kind, | |
251 int base_offset); | |
252 MemOperand PrepareKeyedArrayOperand(Register base, | |
253 Register elements, | |
254 Register key, | |
255 bool key_is_tagged, | |
256 ElementsKind elements_kind, | |
257 Representation representation, | |
258 int base_offset); | |
259 | |
260 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
261 Safepoint::DeoptMode mode); | |
262 | |
263 int GetStackSlotCount() const { return chunk()->spill_slot_count(); } | |
264 | |
265 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } | |
266 | |
267 // Emit frame translation commands for an environment. | |
268 void WriteTranslation(LEnvironment* environment, Translation* translation); | |
269 | |
270 void AddToTranslation(LEnvironment* environment, | |
271 Translation* translation, | |
272 LOperand* op, | |
273 bool is_tagged, | |
274 bool is_uint32, | |
275 int* object_index_pointer, | |
276 int* dematerialized_index_pointer); | |
277 | |
278 void SaveCallerDoubles(); | |
279 void RestoreCallerDoubles(); | |
280 | |
281 // Code generation steps. Returns true if code generation should continue. | |
282 void GenerateBodyInstructionPre(LInstruction* instr) override; | |
283 bool GeneratePrologue(); | |
284 bool GenerateDeferredCode(); | |
285 bool GenerateJumpTable(); | |
286 bool GenerateSafepointTable(); | |
287 | |
288 // Generates the custom OSR entrypoint and sets the osr_pc_offset. | |
289 void GenerateOsrPrologue(); | |
290 | |
291 enum SafepointMode { | |
292 RECORD_SIMPLE_SAFEPOINT, | |
293 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS | |
294 }; | |
295 | |
296 void CallCode(Handle<Code> code, | |
297 RelocInfo::Mode mode, | |
298 LInstruction* instr); | |
299 | |
300 void CallCodeGeneric(Handle<Code> code, | |
301 RelocInfo::Mode mode, | |
302 LInstruction* instr, | |
303 SafepointMode safepoint_mode); | |
304 | |
305 void CallRuntime(const Runtime::Function* function, | |
306 int num_arguments, | |
307 LInstruction* instr, | |
308 SaveFPRegsMode save_doubles = kDontSaveFPRegs); | |
309 | |
310 void CallRuntime(Runtime::FunctionId id, | |
311 int num_arguments, | |
312 LInstruction* instr) { | |
313 const Runtime::Function* function = Runtime::FunctionForId(id); | |
314 CallRuntime(function, num_arguments, instr); | |
315 } | |
316 | |
317 void LoadContextFromDeferred(LOperand* context); | |
318 void CallRuntimeFromDeferred(Runtime::FunctionId id, | |
319 int argc, | |
320 LInstruction* instr, | |
321 LOperand* context); | |
322 | |
323 // Generate a direct call to a known function. Expects the function | |
324 // to be in x1. | |
325 void CallKnownFunction(Handle<JSFunction> function, | |
326 int formal_parameter_count, int arity, | |
327 LInstruction* instr); | |
328 | |
329 // Support for recording safepoint and position information. | |
330 void RecordAndWritePosition(int position) override; | |
331 void RecordSafepoint(LPointerMap* pointers, | |
332 Safepoint::Kind kind, | |
333 int arguments, | |
334 Safepoint::DeoptMode mode); | |
335 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); | |
336 void RecordSafepoint(Safepoint::DeoptMode mode); | |
337 void RecordSafepointWithRegisters(LPointerMap* pointers, | |
338 int arguments, | |
339 Safepoint::DeoptMode mode); | |
340 void RecordSafepointWithLazyDeopt(LInstruction* instr, | |
341 SafepointMode safepoint_mode); | |
342 | |
343 void EnsureSpaceForLazyDeopt(int space_needed) override; | |
344 | |
345 ZoneList<LEnvironment*> deoptimizations_; | |
346 ZoneList<Deoptimizer::JumpTableEntry*> jump_table_; | |
347 int inlined_function_count_; | |
348 Scope* const scope_; | |
349 TranslationBuffer translations_; | |
350 ZoneList<LDeferredCode*> deferred_; | |
351 int osr_pc_offset_; | |
352 bool frame_is_built_; | |
353 | |
354 // Builder that keeps track of safepoints in the code. The table itself is | |
355 // emitted at the end of the generated code. | |
356 SafepointTableBuilder safepoints_; | |
357 | |
358 // Compiler from a set of parallel moves to a sequential list of moves. | |
359 LGapResolver resolver_; | |
360 | |
361 Safepoint::Kind expected_safepoint_kind_; | |
362 | |
363 // The number of arguments pushed onto the stack, either by this block or by a | |
364 // predecessor. | |
365 int pushed_arguments_; | |
366 | |
367 void RecordPushedArgumentsDelta(int delta) { | |
368 pushed_arguments_ += delta; | |
369 DCHECK(pushed_arguments_ >= 0); | |
370 } | |
371 | |
372 int old_position_; | |
373 | |
374 class PushSafepointRegistersScope BASE_EMBEDDED { | |
375 public: | |
376 explicit PushSafepointRegistersScope(LCodeGen* codegen) | |
377 : codegen_(codegen) { | |
378 DCHECK(codegen_->info()->is_calling()); | |
379 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); | |
380 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; | |
381 | |
382 UseScratchRegisterScope temps(codegen_->masm_); | |
383 // Preserve the value of lr which must be saved on the stack (the call to | |
384 // the stub will clobber it). | |
385 Register to_be_pushed_lr = | |
386 temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr()); | |
387 codegen_->masm_->Mov(to_be_pushed_lr, lr); | |
388 StoreRegistersStateStub stub(codegen_->isolate()); | |
389 codegen_->masm_->CallStub(&stub); | |
390 } | |
391 | |
392 ~PushSafepointRegistersScope() { | |
393 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); | |
394 RestoreRegistersStateStub stub(codegen_->isolate()); | |
395 codegen_->masm_->CallStub(&stub); | |
396 codegen_->expected_safepoint_kind_ = Safepoint::kSimple; | |
397 } | |
398 | |
399 private: | |
400 LCodeGen* codegen_; | |
401 }; | |
402 | |
403 friend class LDeferredCode; | |
404 friend class SafepointGenerator; | |
405 DISALLOW_COPY_AND_ASSIGN(LCodeGen); | |
406 }; | |
407 | |
408 | |
409 class LDeferredCode: public ZoneObject { | |
410 public: | |
411 explicit LDeferredCode(LCodeGen* codegen) | |
412 : codegen_(codegen), | |
413 external_exit_(NULL), | |
414 instruction_index_(codegen->current_instruction_) { | |
415 codegen->AddDeferredCode(this); | |
416 } | |
417 | |
418 virtual ~LDeferredCode() { } | |
419 virtual void Generate() = 0; | |
420 virtual LInstruction* instr() = 0; | |
421 | |
422 void SetExit(Label* exit) { external_exit_ = exit; } | |
423 Label* entry() { return &entry_; } | |
424 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; } | |
425 int instruction_index() const { return instruction_index_; } | |
426 | |
427 protected: | |
428 LCodeGen* codegen() const { return codegen_; } | |
429 MacroAssembler* masm() const { return codegen_->masm(); } | |
430 | |
431 private: | |
432 LCodeGen* codegen_; | |
433 Label entry_; | |
434 Label exit_; | |
435 Label* external_exit_; | |
436 int instruction_index_; | |
437 }; | |
438 | |
439 | |
440 // This is the abstract class used by EmitBranchGeneric. | |
441 // It is used to emit code for conditional branching. The Emit() function | |
442 // emits code to branch when the condition holds and EmitInverted() emits | |
443 // the branch when the inverted condition is verified. | |
444 // | |
445 // For actual examples of condition see the concrete implementation in | |
446 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch). | |
447 class BranchGenerator BASE_EMBEDDED { | |
448 public: | |
449 explicit BranchGenerator(LCodeGen* codegen) | |
450 : codegen_(codegen) { } | |
451 | |
452 virtual ~BranchGenerator() { } | |
453 | |
454 virtual void Emit(Label* label) const = 0; | |
455 virtual void EmitInverted(Label* label) const = 0; | |
456 | |
457 protected: | |
458 MacroAssembler* masm() const { return codegen_->masm(); } | |
459 | |
460 LCodeGen* codegen_; | |
461 }; | |
462 | |
463 } // namespace internal | |
464 } // namespace v8 | |
465 | |
466 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ | |
OLD | NEW |