Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(451)

Side by Side Diff: runtime/vm/flow_graph_compiler.h

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/flow_graph_allocator.cc ('k') | runtime/vm/flow_graph_compiler.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #ifndef VM_FLOW_GRAPH_COMPILER_H_ 5 #ifndef VM_FLOW_GRAPH_COMPILER_H_
6 #define VM_FLOW_GRAPH_COMPILER_H_ 6 #define VM_FLOW_GRAPH_COMPILER_H_
7 7
8 #include "vm/allocation.h" 8 #include "vm/allocation.h"
9 #include "vm/assembler.h" 9 #include "vm/assembler.h"
10 #include "vm/assembler_macros.h" 10 #include "vm/assembler_macros.h"
11 #include "vm/code_descriptors.h" 11 #include "vm/code_descriptors.h"
12 #include "vm/code_generator.h" 12 #include "vm/code_generator.h"
13 #include "vm/intermediate_language.h" 13 #include "vm/intermediate_language.h"
14 14
15 namespace dart { 15 namespace dart {
16 16
17 // Forward declarations. 17 // Forward declarations.
18 class Code;
19 class DeoptInfoBuilder;
20 class FlowGraph;
18 class FlowGraphCompiler; 21 class FlowGraphCompiler;
19 class DeoptInfoBuilder; 22 class Function;
23 template <typename T> class GrowableArray;
24 class ParsedFunction;
25
20 26
21 class ParallelMoveResolver : public ValueObject { 27 class ParallelMoveResolver : public ValueObject {
22 public: 28 public:
23 explicit ParallelMoveResolver(FlowGraphCompiler* compiler); 29 explicit ParallelMoveResolver(FlowGraphCompiler* compiler);
24 30
25 // Resolve a set of parallel moves, emitting assembler instructions. 31 // Resolve a set of parallel moves, emitting assembler instructions.
26 void EmitNativeCode(ParallelMoveInstr* parallel_move); 32 void EmitNativeCode(ParallelMoveInstr* parallel_move);
27 33
28 private: 34 private:
29 // Build the initial list of moves. 35 // Build the initial list of moves.
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 virtual void EmitNativeCode(FlowGraphCompiler* compiler) = 0; 142 virtual void EmitNativeCode(FlowGraphCompiler* compiler) = 0;
137 143
138 private: 144 private:
139 Label entry_label_; 145 Label entry_label_;
140 Label exit_label_; 146 Label exit_label_;
141 147
142 DISALLOW_COPY_AND_ASSIGN(SlowPathCode); 148 DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
143 }; 149 };
144 150
145 151
152 struct CidTarget {
153 intptr_t cid;
154 Function* target;
155 intptr_t count;
156 CidTarget(intptr_t cid_arg,
157 Function* target_arg,
158 intptr_t count_arg)
159 : cid(cid_arg), target(target_arg), count(count_arg) {}
160 };
161
162
163 class FlowGraphCompiler : public ValueObject {
164 private:
165 struct BlockInfo : public ZoneAllocated {
166 public:
167 BlockInfo() : label() { }
168 Label label;
169 };
170
171 public:
172 FlowGraphCompiler(Assembler* assembler,
173 const FlowGraph& flow_graph,
174 bool is_optimizing);
175
176 ~FlowGraphCompiler();
177
178 static bool SupportsUnboxedMints();
179
180 // Accessors.
181 Assembler* assembler() const { return assembler_; }
182 const ParsedFunction& parsed_function() const { return parsed_function_; }
183 const GrowableArray<BlockEntryInstr*>& block_order() const {
184 return block_order_;
185 }
186 DescriptorList* pc_descriptors_list() const {
187 return pc_descriptors_list_;
188 }
189 BlockEntryInstr* current_block() const { return current_block_; }
190 void set_current_block(BlockEntryInstr* value) {
191 current_block_ = value;
192 }
193 static bool CanOptimize();
194 bool CanOptimizeFunction() const;
195 bool is_optimizing() const { return is_optimizing_; }
196
197 const GrowableArray<BlockInfo*>& block_info() const { return block_info_; }
198 ParallelMoveResolver* parallel_move_resolver() {
199 return &parallel_move_resolver_;
200 }
201
202 // Constructor is lighweight, major initialization work should occur here.
203 // This makes it easier to measure time spent in the compiler.
204 void InitCompiler();
205
206 void CompileGraph();
207
208 void VisitBlocks();
209
210 // Bail out of the flow graph compiler. Does not return to the caller.
211 void Bailout(const char* reason);
212
213 void LoadDoubleOrSmiToFpu(FpuRegister result,
214 Register reg,
215 Register temp,
216 Label* not_double_or_smi);
217
218 // Returns 'true' if code generation for this function is complete, i.e.,
219 // no fall-through to regular code is needed.
220 bool TryIntrinsify();
221
222 void GenerateCallRuntime(intptr_t token_pos,
223 const RuntimeEntry& entry,
224 LocationSummary* locs);
225
226 void GenerateCall(intptr_t token_pos,
227 const ExternalLabel* label,
228 PcDescriptors::Kind kind,
229 LocationSummary* locs);
230
231 void GenerateDartCall(intptr_t deopt_id,
232 intptr_t token_pos,
233 const ExternalLabel* label,
234 PcDescriptors::Kind kind,
235 LocationSummary* locs);
236
237 void GenerateAssertAssignable(intptr_t token_pos,
238 const AbstractType& dst_type,
239 const String& dst_name,
240 LocationSummary* locs);
241
242 void GenerateInstanceOf(intptr_t token_pos,
243 const AbstractType& type,
244 bool negate_result,
245 LocationSummary* locs);
246
247 void GenerateInstanceCall(intptr_t deopt_id,
248 intptr_t token_pos,
249 intptr_t argument_count,
250 const Array& argument_names,
251 LocationSummary* locs,
252 const ICData& ic_data);
253
254 void GenerateStaticCall(intptr_t deopt_id,
255 intptr_t token_pos,
256 const Function& function,
257 intptr_t argument_count,
258 const Array& argument_names,
259 LocationSummary* locs);
260
261 void GenerateNumberTypeCheck(Register kClassIdReg,
262 const AbstractType& type,
263 Label* is_instance_lbl,
264 Label* is_not_instance_lbl);
265 void GenerateStringTypeCheck(Register kClassIdReg,
266 Label* is_instance_lbl,
267 Label* is_not_instance_lbl);
268 void GenerateListTypeCheck(Register kClassIdReg,
269 Label* is_instance_lbl);
270
271 void EmitComment(Instruction* instr);
272
273 void EmitOptimizedInstanceCall(ExternalLabel* target_label,
274 const ICData& ic_data,
275 const Array& arguments_descriptor,
276 intptr_t argument_count,
277 intptr_t deopt_id,
278 intptr_t token_pos,
279 LocationSummary* locs);
280
281 void EmitInstanceCall(ExternalLabel* target_label,
282 const ICData& ic_data,
283 const Array& arguments_descriptor,
284 intptr_t argument_count,
285 intptr_t deopt_id,
286 intptr_t token_pos,
287 LocationSummary* locs);
288
289 void EmitMegamorphicInstanceCall(const ICData& ic_data,
290 const Array& arguments_descriptor,
291 intptr_t argument_count,
292 intptr_t deopt_id,
293 intptr_t token_pos,
294 LocationSummary* locs);
295
296 void EmitTestAndCall(const ICData& ic_data,
297 Register class_id_reg,
298 intptr_t arg_count,
299 const Array& arg_names,
300 Label* deopt,
301 intptr_t deopt_id,
302 intptr_t token_index,
303 LocationSummary* locs);
304
305 void EmitDoubleCompareBranch(Condition true_condition,
306 FpuRegister left,
307 FpuRegister right,
308 BranchInstr* branch);
309 void EmitDoubleCompareBool(Condition true_condition,
310 FpuRegister left,
311 FpuRegister right,
312 Register result);
313
314 void EmitEqualityRegConstCompare(Register reg,
315 const Object& obj,
316 bool needs_number_check);
317 void EmitEqualityRegRegCompare(Register left,
318 Register right,
319 bool needs_number_check);
320 // Implement equality: if any of the arguments is null do identity check.
321 // Fallthrough calls super equality.
322 void EmitSuperEqualityCallPrologue(Register result, Label* skip_call);
323
324 intptr_t StackSize() const;
325
326 // Returns assembler label associated with the given block entry.
327 Label* GetBlockLabel(BlockEntryInstr* block_entry) const;
328
329 // Returns true if there is a next block after the current one in
330 // the block order and if it is the given block.
331 bool IsNextBlock(BlockEntryInstr* block_entry) const;
332
333 void AddExceptionHandler(intptr_t try_index,
334 intptr_t outer_try_index,
335 intptr_t pc_offset,
336 const Array& handler_types);
337 void AddCurrentDescriptor(PcDescriptors::Kind kind,
338 intptr_t deopt_id,
339 intptr_t token_pos);
340
341 void RecordSafepoint(LocationSummary* locs);
342
343 Label* AddDeoptStub(intptr_t deopt_id, DeoptReasonId reason);
344
345 void AddDeoptIndexAtCall(intptr_t deopt_id, intptr_t token_pos);
346
347 void AddSlowPathCode(SlowPathCode* slow_path);
348
349 void FinalizeExceptionHandlers(const Code& code);
350 void FinalizePcDescriptors(const Code& code);
351 void FinalizeDeoptInfo(const Code& code);
352 void FinalizeStackmaps(const Code& code);
353 void FinalizeVarDescriptors(const Code& code);
354 void FinalizeComments(const Code& code);
355 void FinalizeStaticCallTargetsTable(const Code& code);
356
357 const Class& double_class() const { return double_class_; }
358
359 void SaveLiveRegisters(LocationSummary* locs);
360 void RestoreLiveRegisters(LocationSummary* locs);
361
362 // Returns true if the compiled function has a finally clause.
363 bool HasFinally() const;
364
365 intptr_t CurrentTryIndex() const {
366 if (current_block_ == NULL) {
367 return CatchClauseNode::kInvalidTryIndex;
368 }
369 return current_block_->try_index();
370 }
371
372 bool may_reoptimize() const { return may_reoptimize_; }
373
374 static const int kLocalsOffsetFromFP = (-1 * kWordSize);
375
376 static Condition FlipCondition(Condition condition);
377
378 static bool EvaluateCondition(Condition condition, intptr_t l, intptr_t r);
379
380 // Array/list element address computations.
381 static intptr_t DataOffsetFor(intptr_t cid);
382 static intptr_t ElementSizeFor(intptr_t cid);
383 static FieldAddress ElementAddressForIntIndex(intptr_t cid,
384 Register array,
385 intptr_t offset);
386 static FieldAddress ElementAddressForRegIndex(intptr_t cid,
387 Register array,
388 Register index);
389 static Address ExternalElementAddressForIntIndex(intptr_t cid,
390 Register array,
391 intptr_t offset);
392 static Address ExternalElementAddressForRegIndex(intptr_t cid,
393 Register array,
394 Register index);
395
396 private:
397 void EmitFrameEntry();
398
399 void AddStaticCallTarget(const Function& function);
400
401 void GenerateDeferredCode();
402
403 void EmitInstructionPrologue(Instruction* instr);
404 void EmitInstructionEpilogue(Instruction* instr);
405
406 // Emit code to load a Value into register 'dst'.
407 void LoadValue(Register dst, Value* value);
408
409 void EmitStaticCall(const Function& function,
410 const Array& arguments_descriptor,
411 intptr_t argument_count,
412 intptr_t deopt_id,
413 intptr_t token_pos,
414 LocationSummary* locs);
415
416 // Type checking helper methods.
417 void CheckClassIds(Register class_id_reg,
418 const GrowableArray<intptr_t>& class_ids,
419 Label* is_instance_lbl,
420 Label* is_not_instance_lbl);
421
422 RawSubtypeTestCache* GenerateInlineInstanceof(intptr_t token_pos,
423 const AbstractType& type,
424 Label* is_instance_lbl,
425 Label* is_not_instance_lbl);
426
427 RawSubtypeTestCache* GenerateInstantiatedTypeWithArgumentsTest(
428 intptr_t token_pos,
429 const AbstractType& dst_type,
430 Label* is_instance_lbl,
431 Label* is_not_instance_lbl);
432
433 bool GenerateInstantiatedTypeNoArgumentsTest(intptr_t token_pos,
434 const AbstractType& dst_type,
435 Label* is_instance_lbl,
436 Label* is_not_instance_lbl);
437
438 RawSubtypeTestCache* GenerateUninstantiatedTypeTest(
439 intptr_t token_pos,
440 const AbstractType& dst_type,
441 Label* is_instance_lbl,
442 Label* is_not_instance_label);
443
444 RawSubtypeTestCache* GenerateSubtype1TestCacheLookup(
445 intptr_t token_pos,
446 const Class& type_class,
447 Label* is_instance_lbl,
448 Label* is_not_instance_lbl);
449
450 enum TypeTestStubKind {
451 kTestTypeOneArg,
452 kTestTypeTwoArgs,
453 kTestTypeThreeArgs,
454 };
455
456 RawSubtypeTestCache* GenerateCallSubtypeTestStub(TypeTestStubKind test_kind,
457 Register instance_reg,
458 Register type_arguments_reg,
459 Register temp_reg,
460 Label* is_instance_lbl,
461 Label* is_not_instance_lbl);
462
463 // Returns true if checking against this type is a direct class id comparison.
464 bool TypeCheckAsClassEquality(const AbstractType& type);
465
466 void GenerateBoolToJump(Register bool_reg, Label* is_true, Label* is_false);
467
468 void CopyParameters();
469
470 void GenerateInlinedGetter(intptr_t offset);
471 void GenerateInlinedSetter(intptr_t offset);
472
473 // Perform a greedy local register allocation. Consider all registers free.
474 void AllocateRegistersLocally(Instruction* instr);
475
476 // Map a block number in a forward iteration into the block number in the
477 // corresponding reverse iteration. Used to obtain an index into
478 // block_order for reverse iterations.
479 intptr_t reverse_index(intptr_t index) const {
480 return block_order_.length() - index - 1;
481 }
482
483 // Returns 'sorted' array in decreasing count order.
484 // The expected number of elements to sort is less than 10.
485 static void SortICDataByCount(const ICData& ic_data,
486 GrowableArray<CidTarget>* sorted);
487
488 class Assembler* assembler_;
489 const ParsedFunction& parsed_function_;
490 const GrowableArray<BlockEntryInstr*>& block_order_;
491
492 // Compiler specific per-block state. Indexed by postorder block number
493 // for convenience. This is not the block's index in the block order,
494 // which is reverse postorder.
495 BlockEntryInstr* current_block_;
496 ExceptionHandlerList* exception_handlers_list_;
497 DescriptorList* pc_descriptors_list_;
498 StackmapTableBuilder* stackmap_table_builder_;
499 GrowableArray<BlockInfo*> block_info_;
500 GrowableArray<CompilerDeoptInfo*> deopt_infos_;
501 GrowableArray<SlowPathCode*> slow_path_code_;
502 // Stores: [code offset, function, null(code)].
503 const GrowableObjectArray& static_calls_target_table_;
504 const bool is_optimizing_;
505 // Set to true if optimized code has IC calls.
506 bool may_reoptimize_;
507
508 const Class& double_class_;
509
510 ParallelMoveResolver parallel_move_resolver_;
511
512 // Currently instructions generate deopt stubs internally by
513 // calling AddDeoptStub. To communicate deoptimization environment
514 // that should be used when deoptimizing we store it in this variable.
515 // In future AddDeoptStub should be moved out of the instruction template.
516 Environment* pending_deoptimization_env_;
517
518 DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
519 };
520
146 } // namespace dart 521 } // namespace dart
147 522
148 #if defined(TARGET_ARCH_IA32)
149 #include "vm/flow_graph_compiler_ia32.h"
150 #elif defined(TARGET_ARCH_X64)
151 #include "vm/flow_graph_compiler_x64.h"
152 #elif defined(TARGET_ARCH_ARM)
153 #include "vm/flow_graph_compiler_arm.h"
154 #else
155 #error Unknown architecture.
156 #endif
157
158 #endif // VM_FLOW_GRAPH_COMPILER_H_ 523 #endif // VM_FLOW_GRAPH_COMPILER_H_
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_allocator.cc ('k') | runtime/vm/flow_graph_compiler.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698