OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_ | |
6 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_ | |
7 | |
8 #include "src/ia32/lithium-ia32.h" | |
9 | |
10 #include "src/base/logging.h" | |
11 #include "src/deoptimizer.h" | |
12 #include "src/ia32/lithium-gap-resolver-ia32.h" | |
13 #include "src/lithium-codegen.h" | |
14 #include "src/safepoint-table.h" | |
15 #include "src/scopes.h" | |
16 #include "src/utils.h" | |
17 | |
18 namespace v8 { | |
19 namespace internal { | |
20 | |
21 // Forward declarations. | |
22 class LDeferredCode; | |
23 class LGapNode; | |
24 class SafepointGenerator; | |
25 | |
26 class LCodeGen: public LCodeGenBase { | |
27 public: | |
28 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) | |
29 : LCodeGenBase(chunk, assembler, info), | |
30 deoptimizations_(4, info->zone()), | |
31 jump_table_(4, info->zone()), | |
32 inlined_function_count_(0), | |
33 scope_(info->scope()), | |
34 translations_(info->zone()), | |
35 deferred_(8, info->zone()), | |
36 dynamic_frame_alignment_(false), | |
37 support_aligned_spilled_doubles_(false), | |
38 osr_pc_offset_(-1), | |
39 frame_is_built_(false), | |
40 safepoints_(info->zone()), | |
41 resolver_(this), | |
42 expected_safepoint_kind_(Safepoint::kSimple) { | |
43 PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
44 } | |
45 | |
46 int LookupDestination(int block_id) const { | |
47 return chunk()->LookupDestination(block_id); | |
48 } | |
49 | |
50 bool IsNextEmittedBlock(int block_id) const { | |
51 return LookupDestination(block_id) == GetNextEmittedBlock(); | |
52 } | |
53 | |
54 bool NeedsEagerFrame() const { | |
55 return GetStackSlotCount() > 0 || | |
56 info()->is_non_deferred_calling() || | |
57 !info()->IsStub() || | |
58 info()->requires_frame(); | |
59 } | |
60 bool NeedsDeferredFrame() const { | |
61 return !NeedsEagerFrame() && info()->is_deferred_calling(); | |
62 } | |
63 | |
64 // Support for converting LOperands to assembler types. | |
65 Operand ToOperand(LOperand* op) const; | |
66 Register ToRegister(LOperand* op) const; | |
67 XMMRegister ToDoubleRegister(LOperand* op) const; | |
68 | |
69 bool IsInteger32(LConstantOperand* op) const; | |
70 bool IsSmi(LConstantOperand* op) const; | |
71 Immediate ToImmediate(LOperand* op, const Representation& r) const { | |
72 return Immediate(ToRepresentation(LConstantOperand::cast(op), r)); | |
73 } | |
74 double ToDouble(LConstantOperand* op) const; | |
75 | |
76 Handle<Object> ToHandle(LConstantOperand* op) const; | |
77 | |
78 // The operand denoting the second word (the one with a higher address) of | |
79 // a double stack slot. | |
80 Operand HighOperand(LOperand* op); | |
81 | |
82 // Try to generate code for the entire chunk, but it may fail if the | |
83 // chunk contains constructs we cannot handle. Returns true if the | |
84 // code generation attempt succeeded. | |
85 bool GenerateCode(); | |
86 | |
87 // Finish the code by setting stack height, safepoint, and bailout | |
88 // information on it. | |
89 void FinishCode(Handle<Code> code); | |
90 | |
91 // Deferred code support. | |
92 void DoDeferredNumberTagD(LNumberTagD* instr); | |
93 | |
94 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; | |
95 void DoDeferredNumberTagIU(LInstruction* instr, | |
96 LOperand* value, | |
97 LOperand* temp, | |
98 IntegerSignedness signedness); | |
99 | |
100 void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); | |
101 void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); | |
102 void DoDeferredStackCheck(LStackCheck* instr); | |
103 void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); | |
104 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); | |
105 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); | |
106 void DoDeferredAllocate(LAllocate* instr); | |
107 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); | |
108 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | |
109 Register object, | |
110 Register index); | |
111 | |
112 // Parallel move support. | |
113 void DoParallelMove(LParallelMove* move); | |
114 void DoGap(LGap* instr); | |
115 | |
116 // Emit frame translation commands for an environment. | |
117 void WriteTranslation(LEnvironment* environment, Translation* translation); | |
118 | |
119 void EnsureRelocSpaceForDeoptimization(); | |
120 | |
121 // Declare methods that deal with the individual node types. | |
122 #define DECLARE_DO(type) void Do##type(L##type* node); | |
123 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) | |
124 #undef DECLARE_DO | |
125 | |
126 private: | |
127 LanguageMode language_mode() const { return info()->language_mode(); } | |
128 | |
129 Scope* scope() const { return scope_; } | |
130 | |
131 XMMRegister double_scratch0() const { return xmm0; } | |
132 | |
133 void EmitClassOfTest(Label* if_true, | |
134 Label* if_false, | |
135 Handle<String> class_name, | |
136 Register input, | |
137 Register temporary, | |
138 Register temporary2); | |
139 | |
140 int GetStackSlotCount() const { return chunk()->spill_slot_count(); } | |
141 | |
142 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } | |
143 | |
144 void SaveCallerDoubles(); | |
145 void RestoreCallerDoubles(); | |
146 | |
147 // Code generation passes. Returns true if code generation should | |
148 // continue. | |
149 void GenerateBodyInstructionPre(LInstruction* instr) override; | |
150 void GenerateBodyInstructionPost(LInstruction* instr) override; | |
151 bool GeneratePrologue(); | |
152 bool GenerateDeferredCode(); | |
153 bool GenerateJumpTable(); | |
154 bool GenerateSafepointTable(); | |
155 | |
156 // Generates the custom OSR entrypoint and sets the osr_pc_offset. | |
157 void GenerateOsrPrologue(); | |
158 | |
159 enum SafepointMode { | |
160 RECORD_SIMPLE_SAFEPOINT, | |
161 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS | |
162 }; | |
163 | |
164 void CallCode(Handle<Code> code, | |
165 RelocInfo::Mode mode, | |
166 LInstruction* instr); | |
167 | |
168 void CallCodeGeneric(Handle<Code> code, | |
169 RelocInfo::Mode mode, | |
170 LInstruction* instr, | |
171 SafepointMode safepoint_mode); | |
172 | |
173 void CallRuntime(const Runtime::Function* fun, | |
174 int argc, | |
175 LInstruction* instr, | |
176 SaveFPRegsMode save_doubles = kDontSaveFPRegs); | |
177 | |
178 void CallRuntime(Runtime::FunctionId id, | |
179 int argc, | |
180 LInstruction* instr) { | |
181 const Runtime::Function* function = Runtime::FunctionForId(id); | |
182 CallRuntime(function, argc, instr); | |
183 } | |
184 | |
185 void CallRuntimeFromDeferred(Runtime::FunctionId id, | |
186 int argc, | |
187 LInstruction* instr, | |
188 LOperand* context); | |
189 | |
190 void LoadContextFromDeferred(LOperand* context); | |
191 | |
192 // Generate a direct call to a known function. Expects the function | |
193 // to be in edi. | |
194 void CallKnownFunction(Handle<JSFunction> function, | |
195 int formal_parameter_count, int arity, | |
196 LInstruction* instr); | |
197 | |
198 void RecordSafepointWithLazyDeopt(LInstruction* instr, | |
199 SafepointMode safepoint_mode); | |
200 | |
201 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
202 Safepoint::DeoptMode mode); | |
203 void DeoptimizeIf(Condition cc, LInstruction* instr, | |
204 Deoptimizer::DeoptReason deopt_reason, | |
205 Deoptimizer::BailoutType bailout_type); | |
206 void DeoptimizeIf(Condition cc, LInstruction* instr, | |
207 Deoptimizer::DeoptReason deopt_reason); | |
208 | |
209 bool DeoptEveryNTimes() { | |
210 return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); | |
211 } | |
212 | |
213 void AddToTranslation(LEnvironment* environment, | |
214 Translation* translation, | |
215 LOperand* op, | |
216 bool is_tagged, | |
217 bool is_uint32, | |
218 int* object_index_pointer, | |
219 int* dematerialized_index_pointer); | |
220 void PopulateDeoptimizationData(Handle<Code> code); | |
221 | |
222 void PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
223 | |
224 Register ToRegister(int index) const; | |
225 XMMRegister ToDoubleRegister(int index) const; | |
226 int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; | |
227 int32_t ToInteger32(LConstantOperand* op) const; | |
228 ExternalReference ToExternalReference(LConstantOperand* op) const; | |
229 | |
230 Operand BuildFastArrayOperand(LOperand* elements_pointer, | |
231 LOperand* key, | |
232 Representation key_representation, | |
233 ElementsKind elements_kind, | |
234 uint32_t base_offset); | |
235 | |
236 Operand BuildSeqStringOperand(Register string, | |
237 LOperand* index, | |
238 String::Encoding encoding); | |
239 | |
240 void EmitIntegerMathAbs(LMathAbs* instr); | |
241 | |
242 // Support for recording safepoint and position information. | |
243 void RecordSafepoint(LPointerMap* pointers, | |
244 Safepoint::Kind kind, | |
245 int arguments, | |
246 Safepoint::DeoptMode mode); | |
247 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); | |
248 void RecordSafepoint(Safepoint::DeoptMode mode); | |
249 void RecordSafepointWithRegisters(LPointerMap* pointers, | |
250 int arguments, | |
251 Safepoint::DeoptMode mode); | |
252 | |
253 void RecordAndWritePosition(int position) override; | |
254 | |
255 static Condition TokenToCondition(Token::Value op, bool is_unsigned); | |
256 void EmitGoto(int block); | |
257 | |
258 // EmitBranch expects to be the last instruction of a block. | |
259 template<class InstrType> | |
260 void EmitBranch(InstrType instr, Condition cc); | |
261 template <class InstrType> | |
262 void EmitTrueBranch(InstrType instr, Condition cc); | |
263 template <class InstrType> | |
264 void EmitFalseBranch(InstrType instr, Condition cc); | |
265 void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp, | |
266 XMMRegister result, NumberUntagDMode mode); | |
267 | |
268 // Emits optimized code for typeof x == "y". Modifies input register. | |
269 // Returns the condition on which a final split to | |
270 // true and false label should be made, to optimize fallthrough. | |
271 Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input); | |
272 | |
273 // Emits optimized code for %_IsString(x). Preserves input register. | |
274 // Returns the condition on which a final split to | |
275 // true and false label should be made, to optimize fallthrough. | |
276 Condition EmitIsString(Register input, | |
277 Register temp1, | |
278 Label* is_not_string, | |
279 SmiCheck check_needed); | |
280 | |
281 // Emits optimized code for %_IsConstructCall(). | |
282 // Caller should branch on equal condition. | |
283 void EmitIsConstructCall(Register temp); | |
284 | |
285 // Emits optimized code to deep-copy the contents of statically known | |
286 // object graphs (e.g. object literal boilerplate). | |
287 void EmitDeepCopy(Handle<JSObject> object, | |
288 Register result, | |
289 Register source, | |
290 int* offset, | |
291 AllocationSiteMode mode); | |
292 | |
293 void EnsureSpaceForLazyDeopt(int space_needed) override; | |
294 void DoLoadKeyedExternalArray(LLoadKeyed* instr); | |
295 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); | |
296 void DoLoadKeyedFixedArray(LLoadKeyed* instr); | |
297 void DoStoreKeyedExternalArray(LStoreKeyed* instr); | |
298 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); | |
299 void DoStoreKeyedFixedArray(LStoreKeyed* instr); | |
300 | |
301 template <class T> | |
302 void EmitVectorLoadICRegisters(T* instr); | |
303 template <class T> | |
304 void EmitVectorStoreICRegisters(T* instr); | |
305 | |
306 void EmitReturn(LReturn* instr, bool dynamic_frame_alignment); | |
307 | |
308 // Emits code for pushing either a tagged constant, a (non-double) | |
309 // register, or a stack slot operand. | |
310 void EmitPushTaggedOperand(LOperand* operand); | |
311 | |
312 friend class LGapResolver; | |
313 | |
314 #ifdef _MSC_VER | |
315 // On windows, you may not access the stack more than one page below | |
316 // the most recently mapped page. To make the allocated area randomly | |
317 // accessible, we write an arbitrary value to each page in range | |
318 // esp + offset - page_size .. esp in turn. | |
319 void MakeSureStackPagesMapped(int offset); | |
320 #endif | |
321 | |
322 ZoneList<LEnvironment*> deoptimizations_; | |
323 ZoneList<Deoptimizer::JumpTableEntry> jump_table_; | |
324 int inlined_function_count_; | |
325 Scope* const scope_; | |
326 TranslationBuffer translations_; | |
327 ZoneList<LDeferredCode*> deferred_; | |
328 bool dynamic_frame_alignment_; | |
329 bool support_aligned_spilled_doubles_; | |
330 int osr_pc_offset_; | |
331 bool frame_is_built_; | |
332 | |
333 // Builder that keeps track of safepoints in the code. The table | |
334 // itself is emitted at the end of the generated code. | |
335 SafepointTableBuilder safepoints_; | |
336 | |
337 // Compiler from a set of parallel moves to a sequential list of moves. | |
338 LGapResolver resolver_; | |
339 | |
340 Safepoint::Kind expected_safepoint_kind_; | |
341 | |
342 class PushSafepointRegistersScope final BASE_EMBEDDED { | |
343 public: | |
344 explicit PushSafepointRegistersScope(LCodeGen* codegen) | |
345 : codegen_(codegen) { | |
346 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); | |
347 codegen_->masm_->PushSafepointRegisters(); | |
348 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; | |
349 DCHECK(codegen_->info()->is_calling()); | |
350 } | |
351 | |
352 ~PushSafepointRegistersScope() { | |
353 DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); | |
354 codegen_->masm_->PopSafepointRegisters(); | |
355 codegen_->expected_safepoint_kind_ = Safepoint::kSimple; | |
356 } | |
357 | |
358 private: | |
359 LCodeGen* codegen_; | |
360 }; | |
361 | |
362 friend class LDeferredCode; | |
363 friend class LEnvironment; | |
364 friend class SafepointGenerator; | |
365 DISALLOW_COPY_AND_ASSIGN(LCodeGen); | |
366 }; | |
367 | |
368 | |
369 class LDeferredCode : public ZoneObject { | |
370 public: | |
371 explicit LDeferredCode(LCodeGen* codegen) | |
372 : codegen_(codegen), | |
373 external_exit_(NULL), | |
374 instruction_index_(codegen->current_instruction_) { | |
375 codegen->AddDeferredCode(this); | |
376 } | |
377 | |
378 virtual ~LDeferredCode() {} | |
379 virtual void Generate() = 0; | |
380 virtual LInstruction* instr() = 0; | |
381 | |
382 void SetExit(Label* exit) { external_exit_ = exit; } | |
383 Label* entry() { return &entry_; } | |
384 Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } | |
385 Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } | |
386 int instruction_index() const { return instruction_index_; } | |
387 | |
388 protected: | |
389 LCodeGen* codegen() const { return codegen_; } | |
390 MacroAssembler* masm() const { return codegen_->masm(); } | |
391 | |
392 private: | |
393 LCodeGen* codegen_; | |
394 Label entry_; | |
395 Label exit_; | |
396 Label* external_exit_; | |
397 Label done_; | |
398 int instruction_index_; | |
399 }; | |
400 | |
401 } // namespace internal | |
402 } // namespace v8 | |
403 | |
404 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_ | |
OLD | NEW |