OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ | |
6 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ | |
7 | |
8 #include "src/deoptimizer.h" | |
9 #include "src/lithium-codegen.h" | |
10 #include "src/mips/lithium-gap-resolver-mips.h" | |
11 #include "src/mips/lithium-mips.h" | |
12 #include "src/safepoint-table.h" | |
13 #include "src/scopes.h" | |
14 #include "src/utils.h" | |
15 | |
16 namespace v8 { | |
17 namespace internal { | |
18 | |
19 // Forward declarations. | |
20 class LDeferredCode; | |
21 class SafepointGenerator; | |
22 | |
23 class LCodeGen: public LCodeGenBase { | |
24 public: | |
25 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) | |
26 : LCodeGenBase(chunk, assembler, info), | |
27 deoptimizations_(4, info->zone()), | |
28 jump_table_(4, info->zone()), | |
29 inlined_function_count_(0), | |
30 scope_(info->scope()), | |
31 translations_(info->zone()), | |
32 deferred_(8, info->zone()), | |
33 osr_pc_offset_(-1), | |
34 frame_is_built_(false), | |
35 safepoints_(info->zone()), | |
36 resolver_(this), | |
37 expected_safepoint_kind_(Safepoint::kSimple) { | |
38 PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
39 } | |
40 | |
41 | |
42 int LookupDestination(int block_id) const { | |
43 return chunk()->LookupDestination(block_id); | |
44 } | |
45 | |
46 bool IsNextEmittedBlock(int block_id) const { | |
47 return LookupDestination(block_id) == GetNextEmittedBlock(); | |
48 } | |
49 | |
50 bool NeedsEagerFrame() const { | |
51 return GetStackSlotCount() > 0 || | |
52 info()->is_non_deferred_calling() || | |
53 !info()->IsStub() || | |
54 info()->requires_frame(); | |
55 } | |
56 bool NeedsDeferredFrame() const { | |
57 return !NeedsEagerFrame() && info()->is_deferred_calling(); | |
58 } | |
59 | |
60 RAStatus GetRAState() const { | |
61 return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved; | |
62 } | |
63 | |
64 // Support for converting LOperands to assembler types. | |
65 // LOperand must be a register. | |
66 Register ToRegister(LOperand* op) const; | |
67 | |
68 // LOperand is loaded into scratch, unless already a register. | |
69 Register EmitLoadRegister(LOperand* op, Register scratch); | |
70 | |
71 // LOperand must be a double register. | |
72 DoubleRegister ToDoubleRegister(LOperand* op) const; | |
73 | |
74 // LOperand is loaded into dbl_scratch, unless already a double register. | |
75 DoubleRegister EmitLoadDoubleRegister(LOperand* op, | |
76 FloatRegister flt_scratch, | |
77 DoubleRegister dbl_scratch); | |
78 int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; | |
79 int32_t ToInteger32(LConstantOperand* op) const; | |
80 Smi* ToSmi(LConstantOperand* op) const; | |
81 double ToDouble(LConstantOperand* op) const; | |
82 Operand ToOperand(LOperand* op); | |
83 MemOperand ToMemOperand(LOperand* op) const; | |
84 // Returns a MemOperand pointing to the high word of a DoubleStackSlot. | |
85 MemOperand ToHighMemOperand(LOperand* op) const; | |
86 | |
87 bool IsInteger32(LConstantOperand* op) const; | |
88 bool IsSmi(LConstantOperand* op) const; | |
89 Handle<Object> ToHandle(LConstantOperand* op) const; | |
90 | |
91 // Try to generate code for the entire chunk, but it may fail if the | |
92 // chunk contains constructs we cannot handle. Returns true if the | |
93 // code generation attempt succeeded. | |
94 bool GenerateCode(); | |
95 | |
96 // Finish the code by setting stack height, safepoint, and bailout | |
97 // information on it. | |
98 void FinishCode(Handle<Code> code); | |
99 | |
100 void DoDeferredNumberTagD(LNumberTagD* instr); | |
101 | |
102 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; | |
103 void DoDeferredNumberTagIU(LInstruction* instr, | |
104 LOperand* value, | |
105 LOperand* temp1, | |
106 LOperand* temp2, | |
107 IntegerSignedness signedness); | |
108 | |
109 void DoDeferredTaggedToI(LTaggedToI* instr); | |
110 void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); | |
111 void DoDeferredStackCheck(LStackCheck* instr); | |
112 void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); | |
113 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); | |
114 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); | |
115 void DoDeferredAllocate(LAllocate* instr); | |
116 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); | |
117 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | |
118 Register result, | |
119 Register object, | |
120 Register index); | |
121 | |
122 // Parallel move support. | |
123 void DoParallelMove(LParallelMove* move); | |
124 void DoGap(LGap* instr); | |
125 | |
126 MemOperand PrepareKeyedOperand(Register key, | |
127 Register base, | |
128 bool key_is_constant, | |
129 int constant_key, | |
130 int element_size, | |
131 int shift_size, | |
132 int base_offset); | |
133 | |
134 // Emit frame translation commands for an environment. | |
135 void WriteTranslation(LEnvironment* environment, Translation* translation); | |
136 | |
137 // Declare methods that deal with the individual node types. | |
138 #define DECLARE_DO(type) void Do##type(L##type* node); | |
139 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) | |
140 #undef DECLARE_DO | |
141 | |
142 private: | |
143 LanguageMode language_mode() const { return info()->language_mode(); } | |
144 | |
145 Scope* scope() const { return scope_; } | |
146 | |
147 Register scratch0() { return kLithiumScratchReg; } | |
148 Register scratch1() { return kLithiumScratchReg2; } | |
149 DoubleRegister double_scratch0() { return kLithiumScratchDouble; } | |
150 | |
151 LInstruction* GetNextInstruction(); | |
152 | |
153 void EmitClassOfTest(Label* if_true, | |
154 Label* if_false, | |
155 Handle<String> class_name, | |
156 Register input, | |
157 Register temporary, | |
158 Register temporary2); | |
159 | |
160 int GetStackSlotCount() const { return chunk()->spill_slot_count(); } | |
161 | |
162 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } | |
163 | |
164 void SaveCallerDoubles(); | |
165 void RestoreCallerDoubles(); | |
166 | |
167 // Code generation passes. Returns true if code generation should | |
168 // continue. | |
169 void GenerateBodyInstructionPre(LInstruction* instr) override; | |
170 bool GeneratePrologue(); | |
171 bool GenerateDeferredCode(); | |
172 bool GenerateJumpTable(); | |
173 bool GenerateSafepointTable(); | |
174 | |
175 // Generates the custom OSR entrypoint and sets the osr_pc_offset. | |
176 void GenerateOsrPrologue(); | |
177 | |
178 enum SafepointMode { | |
179 RECORD_SIMPLE_SAFEPOINT, | |
180 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS | |
181 }; | |
182 | |
183 void CallCode(Handle<Code> code, | |
184 RelocInfo::Mode mode, | |
185 LInstruction* instr); | |
186 | |
187 void CallCodeGeneric(Handle<Code> code, | |
188 RelocInfo::Mode mode, | |
189 LInstruction* instr, | |
190 SafepointMode safepoint_mode); | |
191 | |
192 void CallRuntime(const Runtime::Function* function, | |
193 int num_arguments, | |
194 LInstruction* instr, | |
195 SaveFPRegsMode save_doubles = kDontSaveFPRegs); | |
196 | |
197 void CallRuntime(Runtime::FunctionId id, | |
198 int num_arguments, | |
199 LInstruction* instr) { | |
200 const Runtime::Function* function = Runtime::FunctionForId(id); | |
201 CallRuntime(function, num_arguments, instr); | |
202 } | |
203 | |
204 void LoadContextFromDeferred(LOperand* context); | |
205 void CallRuntimeFromDeferred(Runtime::FunctionId id, | |
206 int argc, | |
207 LInstruction* instr, | |
208 LOperand* context); | |
209 | |
210 // Generate a direct call to a known function. Expects the function | |
211 // to be in a1. | |
212 void CallKnownFunction(Handle<JSFunction> function, | |
213 int formal_parameter_count, int arity, | |
214 LInstruction* instr); | |
215 | |
216 void RecordSafepointWithLazyDeopt(LInstruction* instr, | |
217 SafepointMode safepoint_mode); | |
218 | |
219 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
220 Safepoint::DeoptMode mode); | |
221 void DeoptimizeIf(Condition condition, LInstruction* instr, | |
222 Deoptimizer::DeoptReason deopt_reason, | |
223 Deoptimizer::BailoutType bailout_type, | |
224 Register src1 = zero_reg, | |
225 const Operand& src2 = Operand(zero_reg)); | |
226 void DeoptimizeIf( | |
227 Condition condition, LInstruction* instr, | |
228 Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason, | |
229 Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg)); | |
230 | |
231 void AddToTranslation(LEnvironment* environment, | |
232 Translation* translation, | |
233 LOperand* op, | |
234 bool is_tagged, | |
235 bool is_uint32, | |
236 int* object_index_pointer, | |
237 int* dematerialized_index_pointer); | |
238 void PopulateDeoptimizationData(Handle<Code> code); | |
239 | |
240 void PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
241 | |
242 Register ToRegister(int index) const; | |
243 DoubleRegister ToDoubleRegister(int index) const; | |
244 | |
245 MemOperand BuildSeqStringOperand(Register string, | |
246 LOperand* index, | |
247 String::Encoding encoding); | |
248 | |
249 void EmitIntegerMathAbs(LMathAbs* instr); | |
250 | |
251 // Support for recording safepoint and position information. | |
252 void RecordSafepoint(LPointerMap* pointers, | |
253 Safepoint::Kind kind, | |
254 int arguments, | |
255 Safepoint::DeoptMode mode); | |
256 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); | |
257 void RecordSafepoint(Safepoint::DeoptMode mode); | |
258 void RecordSafepointWithRegisters(LPointerMap* pointers, | |
259 int arguments, | |
260 Safepoint::DeoptMode mode); | |
261 | |
262 void RecordAndWritePosition(int position) override; | |
263 | |
264 static Condition TokenToCondition(Token::Value op, bool is_unsigned); | |
265 void EmitGoto(int block); | |
266 | |
267 // EmitBranch expects to be the last instruction of a block. | |
268 template<class InstrType> | |
269 void EmitBranch(InstrType instr, | |
270 Condition condition, | |
271 Register src1, | |
272 const Operand& src2); | |
273 template<class InstrType> | |
274 void EmitBranchF(InstrType instr, | |
275 Condition condition, | |
276 FPURegister src1, | |
277 FPURegister src2); | |
278 template <class InstrType> | |
279 void EmitTrueBranch(InstrType instr, Condition condition, Register src1, | |
280 const Operand& src2); | |
281 template <class InstrType> | |
282 void EmitFalseBranch(InstrType instr, Condition condition, Register src1, | |
283 const Operand& src2); | |
284 template<class InstrType> | |
285 void EmitFalseBranchF(InstrType instr, | |
286 Condition condition, | |
287 FPURegister src1, | |
288 FPURegister src2); | |
289 void EmitCmpI(LOperand* left, LOperand* right); | |
290 void EmitNumberUntagD(LNumberUntagD* instr, Register input, | |
291 DoubleRegister result, NumberUntagDMode mode); | |
292 | |
293 // Emits optimized code for typeof x == "y". Modifies input register. | |
294 // Returns the condition on which a final split to | |
295 // true and false label should be made, to optimize fallthrough. | |
296 // Returns two registers in cmp1 and cmp2 that can be used in the | |
297 // Branch instruction after EmitTypeofIs. | |
298 Condition EmitTypeofIs(Label* true_label, | |
299 Label* false_label, | |
300 Register input, | |
301 Handle<String> type_name, | |
302 Register* cmp1, | |
303 Operand* cmp2); | |
304 | |
305 // Emits optimized code for %_IsString(x). Preserves input register. | |
306 // Returns the condition on which a final split to | |
307 // true and false label should be made, to optimize fallthrough. | |
308 Condition EmitIsString(Register input, | |
309 Register temp1, | |
310 Label* is_not_string, | |
311 SmiCheck check_needed); | |
312 | |
313 // Emits optimized code for %_IsConstructCall(). | |
314 // Caller should branch on equal condition. | |
315 void EmitIsConstructCall(Register temp1, Register temp2); | |
316 | |
317 // Emits optimized code to deep-copy the contents of statically known | |
318 // object graphs (e.g. object literal boilerplate). | |
319 void EmitDeepCopy(Handle<JSObject> object, | |
320 Register result, | |
321 Register source, | |
322 int* offset, | |
323 AllocationSiteMode mode); | |
324 // Emit optimized code for integer division. | |
325 // Inputs are signed. | |
326 // All registers are clobbered. | |
327 // If 'remainder' is no_reg, it is not computed. | |
328 void EmitSignedIntegerDivisionByConstant(Register result, | |
329 Register dividend, | |
330 int32_t divisor, | |
331 Register remainder, | |
332 Register scratch, | |
333 LEnvironment* environment); | |
334 | |
335 | |
336 void EnsureSpaceForLazyDeopt(int space_needed) override; | |
337 void DoLoadKeyedExternalArray(LLoadKeyed* instr); | |
338 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); | |
339 void DoLoadKeyedFixedArray(LLoadKeyed* instr); | |
340 void DoStoreKeyedExternalArray(LStoreKeyed* instr); | |
341 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); | |
342 void DoStoreKeyedFixedArray(LStoreKeyed* instr); | |
343 | |
344 template <class T> | |
345 void EmitVectorLoadICRegisters(T* instr); | |
346 template <class T> | |
347 void EmitVectorStoreICRegisters(T* instr); | |
348 | |
349 ZoneList<LEnvironment*> deoptimizations_; | |
350 ZoneList<Deoptimizer::JumpTableEntry> jump_table_; | |
351 int inlined_function_count_; | |
352 Scope* const scope_; | |
353 TranslationBuffer translations_; | |
354 ZoneList<LDeferredCode*> deferred_; | |
355 int osr_pc_offset_; | |
356 bool frame_is_built_; | |
357 | |
358 // Builder that keeps track of safepoints in the code. The table | |
359 // itself is emitted at the end of the generated code. | |
360 SafepointTableBuilder safepoints_; | |
361 | |
362 // Compiler from a set of parallel moves to a sequential list of moves. | |
363 LGapResolver resolver_; | |
364 | |
365 Safepoint::Kind expected_safepoint_kind_; | |
366 | |
367 class PushSafepointRegistersScope final BASE_EMBEDDED { | |
368 public: | |
369 explicit PushSafepointRegistersScope(LCodeGen* codegen) | |
370 : codegen_(codegen) { | |
371 DCHECK(codegen_->info()->is_calling()); | |
372 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); | |
373 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; | |
374 | |
375 StoreRegistersStateStub stub(codegen_->isolate()); | |
376 codegen_->masm_->push(ra); | |
377 codegen_->masm_->CallStub(&stub); | |
378 } | |
379 | |
380 ~PushSafepointRegistersScope() { | |
381 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); | |
382 RestoreRegistersStateStub stub(codegen_->isolate()); | |
383 codegen_->masm_->push(ra); | |
384 codegen_->masm_->CallStub(&stub); | |
385 codegen_->expected_safepoint_kind_ = Safepoint::kSimple; | |
386 } | |
387 | |
388 private: | |
389 LCodeGen* codegen_; | |
390 }; | |
391 | |
392 friend class LDeferredCode; | |
393 friend class LEnvironment; | |
394 friend class SafepointGenerator; | |
395 DISALLOW_COPY_AND_ASSIGN(LCodeGen); | |
396 }; | |
397 | |
398 | |
399 class LDeferredCode : public ZoneObject { | |
400 public: | |
401 explicit LDeferredCode(LCodeGen* codegen) | |
402 : codegen_(codegen), | |
403 external_exit_(NULL), | |
404 instruction_index_(codegen->current_instruction_) { | |
405 codegen->AddDeferredCode(this); | |
406 } | |
407 | |
408 virtual ~LDeferredCode() {} | |
409 virtual void Generate() = 0; | |
410 virtual LInstruction* instr() = 0; | |
411 | |
412 void SetExit(Label* exit) { external_exit_ = exit; } | |
413 Label* entry() { return &entry_; } | |
414 Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } | |
415 int instruction_index() const { return instruction_index_; } | |
416 | |
417 protected: | |
418 LCodeGen* codegen() const { return codegen_; } | |
419 MacroAssembler* masm() const { return codegen_->masm(); } | |
420 | |
421 private: | |
422 LCodeGen* codegen_; | |
423 Label entry_; | |
424 Label exit_; | |
425 Label* external_exit_; | |
426 int instruction_index_; | |
427 }; | |
428 | |
429 } // namespace internal | |
430 } // namespace v8 | |
431 | |
432 #endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ | |
OLD | NEW |