OLD | NEW |
---|---|
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/compiler.h" | 5 #include "vm/compiler.h" |
6 | 6 |
7 #include "vm/assembler.h" | 7 #include "vm/assembler.h" |
8 | 8 |
9 #include "vm/ast_printer.h" | 9 #include "vm/ast_printer.h" |
10 #include "vm/code_generator.h" | 10 #include "vm/code_generator.h" |
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
245 bool optimized, | 245 bool optimized, |
246 intptr_t osr_id) { | 246 intptr_t osr_id) { |
247 const Function& function = parsed_function->function(); | 247 const Function& function = parsed_function->function(); |
248 if (optimized && !function.is_optimizable()) { | 248 if (optimized && !function.is_optimizable()) { |
249 return false; | 249 return false; |
250 } | 250 } |
251 TimerScope timer(FLAG_compiler_stats, &CompilerStats::codegen_timer); | 251 TimerScope timer(FLAG_compiler_stats, &CompilerStats::codegen_timer); |
252 bool is_compiled = false; | 252 bool is_compiled = false; |
253 Isolate* isolate = Isolate::Current(); | 253 Isolate* isolate = Isolate::Current(); |
254 HANDLESCOPE(isolate); | 254 HANDLESCOPE(isolate); |
255 const intptr_t prev_deopt_id = isolate->deopt_id(); | 255 bool done = false; |
256 isolate->set_deopt_id(0); | 256 // static to evade gcc's longjmp variable smashing checks. |
srdjan
2013/08/01 19:59:13
Maybe add a comment explaining that use_far_branch
zra
2013/08/01 20:16:31
Done.
| |
257 LongJump* old_base = isolate->long_jump_base(); | 257 static bool use_far_branches = false; |
258 LongJump bailout_jump; | 258 while (!done) { |
259 isolate->set_long_jump_base(&bailout_jump); | 259 const intptr_t prev_deopt_id = isolate->deopt_id(); |
260 if (setjmp(*bailout_jump.Set()) == 0) { | 260 isolate->set_deopt_id(0); |
261 FlowGraph* flow_graph = NULL; | 261 LongJump* old_base = isolate->long_jump_base(); |
262 // TimerScope needs an isolate to be properly terminated in case of a | 262 LongJump bailout_jump; |
263 // LongJump. | 263 isolate->set_long_jump_base(&bailout_jump); |
264 { | 264 if (setjmp(*bailout_jump.Set()) == 0) { |
265 TimerScope timer(FLAG_compiler_stats, | 265 FlowGraph* flow_graph = NULL; |
266 &CompilerStats::graphbuilder_timer, | 266 // TimerScope needs an isolate to be properly terminated in case of a |
267 isolate); | 267 // LongJump. |
268 Array& ic_data_array = Array::Handle(); | 268 { |
269 TimerScope timer(FLAG_compiler_stats, | |
270 &CompilerStats::graphbuilder_timer, | |
271 isolate); | |
272 Array& ic_data_array = Array::Handle(); | |
273 if (optimized) { | |
274 ASSERT(function.HasCode()); | |
275 // Extract type feedback before the graph is built, as the graph | |
276 // builder uses it to attach it to nodes. | |
277 ASSERT(function.deoptimization_counter() < | |
278 FLAG_deoptimization_counter_threshold); | |
279 const Code& unoptimized_code = | |
280 Code::Handle(function.unoptimized_code()); | |
281 ic_data_array = unoptimized_code.ExtractTypeFeedbackArray(); | |
282 } | |
283 | |
284 // Build the flow graph. | |
285 FlowGraphBuilder builder(parsed_function, | |
286 ic_data_array, | |
287 NULL, // NULL = not inlining. | |
288 osr_id); | |
289 flow_graph = builder.BuildGraph(); | |
290 } | |
291 | |
292 if (FLAG_print_flow_graph || | |
293 (optimized && FLAG_print_flow_graph_optimized)) { | |
294 if (osr_id == Isolate::kNoDeoptId) { | |
295 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph); | |
296 } else { | |
297 FlowGraphPrinter::PrintGraph("For OSR", flow_graph); | |
298 } | |
299 } | |
300 | |
269 if (optimized) { | 301 if (optimized) { |
270 ASSERT(function.HasCode()); | 302 TimerScope timer(FLAG_compiler_stats, |
271 // Extract type feedback before the graph is built, as the graph | 303 &CompilerStats::ssa_timer, |
272 // builder uses it to attach it to nodes. | 304 isolate); |
273 ASSERT(function.deoptimization_counter() < | 305 // Transform to SSA (virtual register 0 and no inlining arguments). |
274 FLAG_deoptimization_counter_threshold); | 306 flow_graph->ComputeSSA(0, NULL); |
275 const Code& unoptimized_code = | 307 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
276 Code::Handle(function.unoptimized_code()); | 308 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { |
277 ic_data_array = unoptimized_code.ExtractTypeFeedbackArray(); | 309 FlowGraphPrinter::PrintGraph("After SSA", flow_graph); |
278 } | 310 } |
279 | 311 } |
280 // Build the flow graph. | 312 |
281 FlowGraphBuilder builder(parsed_function, | 313 |
282 ic_data_array, | 314 // Collect all instance fields that are loaded in the graph and |
283 NULL, // NULL = not inlining. | 315 // have non-generic type feedback attached to them that can |
284 osr_id); | 316 // potentially affect optimizations. |
285 flow_graph = builder.BuildGraph(); | 317 GrowableArray<const Field*> guarded_fields(10); |
286 } | 318 if (optimized) { |
287 | 319 TimerScope timer(FLAG_compiler_stats, |
288 if (FLAG_print_flow_graph || | 320 &CompilerStats::graphoptimizer_timer, |
289 (optimized && FLAG_print_flow_graph_optimized)) { | 321 isolate); |
290 if (osr_id == Isolate::kNoDeoptId) { | 322 |
291 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph); | 323 FlowGraphOptimizer optimizer(flow_graph, &guarded_fields); |
292 } else { | 324 optimizer.ApplyICData(); |
293 FlowGraphPrinter::PrintGraph("For OSR", flow_graph); | 325 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
294 } | 326 |
295 } | 327 // Optimize (a << b) & c patterns. Must occur before |
296 | 328 // 'SelectRepresentations' which inserts conversion nodes. |
297 if (optimized) { | 329 // TODO(srdjan): Moved before inlining until environment use list can |
298 TimerScope timer(FLAG_compiler_stats, | 330 // be used to detect when shift-left is outside the scope of bit-and. |
299 &CompilerStats::ssa_timer, | 331 optimizer.TryOptimizeLeftShiftWithBitAndPattern(); |
300 isolate); | 332 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
301 // Transform to SSA (virtual register 0 and no inlining arguments). | 333 |
302 flow_graph->ComputeSSA(0, NULL); | 334 // Inlining (mutates the flow graph) |
303 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | 335 if (FLAG_use_inlining) { |
304 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { | 336 TimerScope timer(FLAG_compiler_stats, |
305 FlowGraphPrinter::PrintGraph("After SSA", flow_graph); | 337 &CompilerStats::graphinliner_timer); |
306 } | 338 FlowGraphInliner inliner(flow_graph, &guarded_fields); |
307 } | 339 inliner.Inline(); |
308 | 340 // Use lists are maintained and validated by the inliner. |
309 | 341 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
310 // Collect all instance fields that are loaded in the graph and | 342 } |
311 // have non-generic type feedback attached to them that can | 343 |
312 // potentially affect optimizations. | 344 // Propagate types and eliminate more type tests. |
313 GrowableArray<const Field*> guarded_fields(10); | |
314 if (optimized) { | |
315 TimerScope timer(FLAG_compiler_stats, | |
316 &CompilerStats::graphoptimizer_timer, | |
317 isolate); | |
318 | |
319 FlowGraphOptimizer optimizer(flow_graph, &guarded_fields); | |
320 optimizer.ApplyICData(); | |
321 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
322 | |
323 // Optimize (a << b) & c patterns. Must occur before | |
324 // 'SelectRepresentations' which inserts conversion nodes. | |
325 // TODO(srdjan): Moved before inlining until environment use list can | |
326 // be used to detect when shift-left is outside the scope of bit-and. | |
327 optimizer.TryOptimizeLeftShiftWithBitAndPattern(); | |
328 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
329 | |
330 // Inlining (mutates the flow graph) | |
331 if (FLAG_use_inlining) { | |
332 TimerScope timer(FLAG_compiler_stats, | |
333 &CompilerStats::graphinliner_timer); | |
334 FlowGraphInliner inliner(flow_graph, &guarded_fields); | |
335 inliner.Inline(); | |
336 // Use lists are maintained and validated by the inliner. | |
337 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
338 } | |
339 | |
340 // Propagate types and eliminate more type tests. | |
341 if (FLAG_propagate_types) { | |
342 FlowGraphTypePropagator propagator(flow_graph); | |
343 propagator.Propagate(); | |
344 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
345 } | |
346 | |
347 // Use propagated class-ids to optimize further. | |
348 optimizer.ApplyClassIds(); | |
349 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
350 | |
351 // Do optimizations that depend on the propagated type information. | |
352 optimizer.Canonicalize(); | |
353 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
354 | |
355 BranchSimplifier::Simplify(flow_graph); | |
356 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
357 | |
358 IfConverter::Simplify(flow_graph); | |
359 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
360 | |
361 if (FLAG_constant_propagation) { | |
362 ConstantPropagator::Optimize(flow_graph); | |
363 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
364 // A canonicalization pass to remove e.g. smi checks on smi constants. | |
365 optimizer.Canonicalize(); | |
366 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
367 // Canonicalization introduced more opportunities for constant | |
368 // propagation. | |
369 ConstantPropagator::Optimize(flow_graph); | |
370 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
371 } | |
372 | |
373 // Propagate types and eliminate even more type tests. | |
374 if (FLAG_propagate_types) { | |
375 // Recompute types after constant propagation to infer more precise | |
376 // types for uses that were previously reached by now eliminated phis. | |
377 FlowGraphTypePropagator propagator(flow_graph); | |
378 propagator.Propagate(); | |
379 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
380 } | |
381 | |
382 // Unbox doubles. Performed after constant propagation to minimize | |
383 // interference from phis merging double values and tagged | |
384 // values comming from dead paths. | |
385 optimizer.SelectRepresentations(); | |
386 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
387 | |
388 if (FLAG_common_subexpression_elimination || | |
389 FLAG_loop_invariant_code_motion) { | |
390 flow_graph->ComputeBlockEffects(); | |
391 } | |
392 | |
393 if (FLAG_common_subexpression_elimination) { | |
394 if (DominatorBasedCSE::Optimize(flow_graph)) { | |
395 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
396 // Do another round of CSE to take secondary effects into account: | |
397 // e.g. when eliminating dependent loads (a.x[0] + a.x[0]) | |
398 // TODO(fschneider): Change to a one-pass optimization pass. | |
399 DominatorBasedCSE::Optimize(flow_graph); | |
400 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
401 } | |
402 } | |
403 if (FLAG_loop_invariant_code_motion && | |
404 (function.deoptimization_counter() < | |
405 FLAG_deoptimization_counter_licm_threshold)) { | |
406 LICM licm(flow_graph); | |
407 licm.Optimize(); | |
408 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
409 } | |
410 flow_graph->RemoveRedefinitions(); | |
411 | |
412 if (FLAG_range_analysis) { | |
413 if (FLAG_propagate_types) { | 345 if (FLAG_propagate_types) { |
414 // Propagate types after store-load-forwarding. Some phis may have | |
415 // become smi phis that can be processed by range analysis. | |
416 FlowGraphTypePropagator propagator(flow_graph); | 346 FlowGraphTypePropagator propagator(flow_graph); |
417 propagator.Propagate(); | 347 propagator.Propagate(); |
418 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | 348 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
419 } | 349 } |
420 // We have to perform range analysis after LICM because it | 350 |
421 // optimistically moves CheckSmi through phis into loop preheaders | 351 // Use propagated class-ids to optimize further. |
422 // making some phis smi. | 352 optimizer.ApplyClassIds(); |
423 optimizer.InferSmiRanges(); | 353 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
424 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | 354 |
425 } | 355 // Do optimizations that depend on the propagated type information. |
426 | |
427 if (FLAG_constant_propagation) { | |
428 // Constant propagation can use information from range analysis to | |
429 // find unreachable branch targets. | |
430 ConstantPropagator::OptimizeBranches(flow_graph); | |
431 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
432 } | |
433 | |
434 if (FLAG_propagate_types) { | |
435 // Recompute types after code movement was done to ensure correct | |
436 // reaching types for hoisted values. | |
437 FlowGraphTypePropagator propagator(flow_graph); | |
438 propagator.Propagate(); | |
439 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
440 } | |
441 | |
442 // Optimize try-blocks. | |
443 TryCatchAnalyzer::Optimize(flow_graph); | |
444 | |
445 // Detach environments from the instructions that can't deoptimize. | |
446 // Do it before we attempt to perform allocation sinking to minimize | |
447 // amount of materializations it has to perform. | |
448 optimizer.EliminateEnvironments(); | |
449 | |
450 // Attempt to sink allocations of temporary non-escaping objects to | |
451 // the deoptimization path. | |
452 AllocationSinking* sinking = NULL; | |
453 if (FLAG_allocation_sinking && | |
454 (flow_graph->graph_entry()->SuccessorCount() == 1)) { | |
455 // TODO(fschneider): Support allocation sinking with try-catch. | |
456 sinking = new AllocationSinking(flow_graph); | |
457 sinking->Optimize(); | |
458 } | |
459 | |
460 // Ensure that all phis inserted by optimization passes have consistent | |
461 // representations. | |
462 optimizer.SelectRepresentations(); | |
463 | |
464 if (optimizer.Canonicalize()) { | |
465 // To fully remove redundant boxing (e.g. BoxDouble used only in | |
466 // environments and UnboxDouble instructions) instruction we | |
467 // first need to replace all their uses and then fold them away. | |
468 // For now we just repeat Canonicalize twice to do that. | |
469 // TODO(vegorov): implement a separate representation folding pass. | |
470 optimizer.Canonicalize(); | 356 optimizer.Canonicalize(); |
471 } | 357 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
472 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | 358 |
473 | 359 BranchSimplifier::Simplify(flow_graph); |
474 if (sinking != NULL) { | 360 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
475 // Remove all MaterializeObject instructions inserted by allocation | 361 |
476 // sinking from the flow graph and let them float on the side referenced | 362 IfConverter::Simplify(flow_graph); |
477 // only from environments. Register allocator will consider them | 363 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
478 // as part of a deoptimization environment. | 364 |
479 sinking->DetachMaterializations(); | 365 if (FLAG_constant_propagation) { |
480 } | 366 ConstantPropagator::Optimize(flow_graph); |
481 | 367 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
482 // Perform register allocation on the SSA graph. | 368 // A canonicalization pass to remove e.g. smi checks on smi constants. |
483 FlowGraphAllocator allocator(*flow_graph); | 369 optimizer.Canonicalize(); |
484 allocator.AllocateRegisters(); | 370 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
485 | 371 // Canonicalization introduced more opportunities for constant |
486 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { | 372 // propagation. |
487 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph); | 373 ConstantPropagator::Optimize(flow_graph); |
488 } | 374 DEBUG_ASSERT(flow_graph->VerifyUseLists()); |
375 } | |
376 | |
377 // Propagate types and eliminate even more type tests. | |
378 if (FLAG_propagate_types) { | |
379 // Recompute types after constant propagation to infer more precise | |
380 // types for uses that were previously reached by now eliminated phis. | |
381 FlowGraphTypePropagator propagator(flow_graph); | |
382 propagator.Propagate(); | |
383 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
384 } | |
385 | |
386 // Unbox doubles. Performed after constant propagation to minimize | |
387 // interference from phis merging double values and tagged | |
388 // values comming from dead paths. | |
389 optimizer.SelectRepresentations(); | |
390 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
391 | |
392 if (FLAG_common_subexpression_elimination || | |
393 FLAG_loop_invariant_code_motion) { | |
394 flow_graph->ComputeBlockEffects(); | |
395 } | |
396 | |
397 if (FLAG_common_subexpression_elimination) { | |
398 if (DominatorBasedCSE::Optimize(flow_graph)) { | |
399 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
400 // Do another round of CSE to take secondary effects into account: | |
401 // e.g. when eliminating dependent loads (a.x[0] + a.x[0]) | |
402 // TODO(fschneider): Change to a one-pass optimization pass. | |
403 DominatorBasedCSE::Optimize(flow_graph); | |
404 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
405 } | |
406 } | |
407 if (FLAG_loop_invariant_code_motion && | |
408 (function.deoptimization_counter() < | |
409 FLAG_deoptimization_counter_licm_threshold)) { | |
410 LICM licm(flow_graph); | |
411 licm.Optimize(); | |
412 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
413 } | |
414 flow_graph->RemoveRedefinitions(); | |
415 | |
416 if (FLAG_range_analysis) { | |
417 if (FLAG_propagate_types) { | |
418 // Propagate types after store-load-forwarding. Some phis may have | |
419 // become smi phis that can be processed by range analysis. | |
420 FlowGraphTypePropagator propagator(flow_graph); | |
421 propagator.Propagate(); | |
422 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
423 } | |
424 // We have to perform range analysis after LICM because it | |
425 // optimistically moves CheckSmi through phis into loop preheaders | |
426 // making some phis smi. | |
427 optimizer.InferSmiRanges(); | |
428 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
429 } | |
430 | |
431 if (FLAG_constant_propagation) { | |
432 // Constant propagation can use information from range analysis to | |
433 // find unreachable branch targets. | |
434 ConstantPropagator::OptimizeBranches(flow_graph); | |
435 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
436 } | |
437 | |
438 if (FLAG_propagate_types) { | |
439 // Recompute types after code movement was done to ensure correct | |
440 // reaching types for hoisted values. | |
441 FlowGraphTypePropagator propagator(flow_graph); | |
442 propagator.Propagate(); | |
443 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
444 } | |
445 | |
446 // Optimize try-blocks. | |
447 TryCatchAnalyzer::Optimize(flow_graph); | |
448 | |
449 // Detach environments from the instructions that can't deoptimize. | |
450 // Do it before we attempt to perform allocation sinking to minimize | |
451 // amount of materializations it has to perform. | |
452 optimizer.EliminateEnvironments(); | |
453 | |
454 // Attempt to sink allocations of temporary non-escaping objects to | |
455 // the deoptimization path. | |
456 AllocationSinking* sinking = NULL; | |
457 if (FLAG_allocation_sinking && | |
458 (flow_graph->graph_entry()->SuccessorCount() == 1)) { | |
459 // TODO(fschneider): Support allocation sinking with try-catch. | |
460 sinking = new AllocationSinking(flow_graph); | |
461 sinking->Optimize(); | |
462 } | |
463 | |
464 // Ensure that all phis inserted by optimization passes have consistent | |
465 // representations. | |
466 optimizer.SelectRepresentations(); | |
467 | |
468 if (optimizer.Canonicalize()) { | |
469 // To fully remove redundant boxing (e.g. BoxDouble used only in | |
470 // environments and UnboxDouble instructions) instruction we | |
471 // first need to replace all their uses and then fold them away. | |
472 // For now we just repeat Canonicalize twice to do that. | |
473 // TODO(vegorov): implement a separate representation folding pass. | |
474 optimizer.Canonicalize(); | |
475 } | |
476 DEBUG_ASSERT(flow_graph->VerifyUseLists()); | |
477 | |
478 if (sinking != NULL) { | |
479 // Remove all MaterializeObject instructions inserted by allocation | |
480 // sinking from the flow graph and let them float on the side | |
481 // referenced only from environments. Register allocator will consider | |
482 // them as part of a deoptimization environment. | |
483 sinking->DetachMaterializations(); | |
484 } | |
485 | |
486 // Perform register allocation on the SSA graph. | |
487 FlowGraphAllocator allocator(*flow_graph); | |
488 allocator.AllocateRegisters(); | |
489 | |
490 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { | |
491 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph); | |
492 } | |
493 } | |
494 | |
495 Assembler assembler(use_far_branches); | |
496 FlowGraphCompiler graph_compiler(&assembler, | |
497 *flow_graph, | |
498 optimized); | |
499 { | |
500 TimerScope timer(FLAG_compiler_stats, | |
501 &CompilerStats::graphcompiler_timer, | |
502 isolate); | |
503 graph_compiler.CompileGraph(); | |
504 } | |
505 { | |
506 TimerScope timer(FLAG_compiler_stats, | |
507 &CompilerStats::codefinalizer_timer, | |
508 isolate); | |
509 const Code& code = Code::Handle( | |
510 Code::FinalizeCode(function, &assembler, optimized)); | |
511 code.set_is_optimized(optimized); | |
512 graph_compiler.FinalizePcDescriptors(code); | |
513 graph_compiler.FinalizeDeoptInfo(code); | |
514 graph_compiler.FinalizeStackmaps(code); | |
515 graph_compiler.FinalizeVarDescriptors(code); | |
516 graph_compiler.FinalizeExceptionHandlers(code); | |
517 graph_compiler.FinalizeComments(code); | |
518 graph_compiler.FinalizeStaticCallTargetsTable(code); | |
519 | |
520 if (optimized) { | |
521 if (osr_id == Isolate::kNoDeoptId) { | |
522 CodePatcher::PatchEntry(Code::Handle(function.CurrentCode())); | |
523 if (FLAG_trace_compiler) { | |
524 OS::Print("--> patching entry %#"Px"\n", | |
525 Code::Handle(function.unoptimized_code()).EntryPoint()); | |
526 } | |
527 } | |
528 function.SetCode(code); | |
529 | |
530 for (intptr_t i = 0; i < guarded_fields.length(); i++) { | |
531 const Field& field = *guarded_fields[i]; | |
532 field.RegisterDependentCode(code); | |
533 } | |
534 } else { | |
535 function.set_unoptimized_code(code); | |
536 function.SetCode(code); | |
537 ASSERT(CodePatcher::CodeIsPatchable(code)); | |
538 } | |
539 } | |
540 is_compiled = true; | |
541 done = true; | |
542 } else { | |
543 // We bailed out. | |
544 const Error& bailout_error = Error::Handle( | |
545 isolate->object_store()->sticky_error()); | |
546 | |
547 ASSERT(bailout_error.IsLanguageError()); | |
548 const LanguageError& le = LanguageError::CheckedHandle( | |
549 isolate->object_store()->sticky_error()); | |
550 const String& msg = String::Handle(le.message()); | |
551 if (msg.Equals("Branch offset overflow")) { | |
552 done = false; | |
553 ASSERT(!use_far_branches); | |
554 use_far_branches = true; | |
555 } else { | |
556 // If not for a branch offset overflow, we only bail out from | |
557 // generating ssa code. | |
558 if (FLAG_trace_bailout) { | |
559 OS::Print("%s\n", bailout_error.ToErrorCString()); | |
560 } | |
561 done = true; | |
562 ASSERT(optimized); | |
563 } | |
564 | |
565 isolate->object_store()->clear_sticky_error(); | |
566 is_compiled = false; | |
489 } | 567 } |
490 | 568 // Reset global isolate state. |
491 Assembler assembler; | 569 isolate->set_long_jump_base(old_base); |
492 FlowGraphCompiler graph_compiler(&assembler, | 570 isolate->set_deopt_id(prev_deopt_id); |
493 *flow_graph, | |
494 optimized); | |
495 { | |
496 TimerScope timer(FLAG_compiler_stats, | |
497 &CompilerStats::graphcompiler_timer, | |
498 isolate); | |
499 graph_compiler.CompileGraph(); | |
500 } | |
501 { | |
502 TimerScope timer(FLAG_compiler_stats, | |
503 &CompilerStats::codefinalizer_timer, | |
504 isolate); | |
505 const Code& code = Code::Handle( | |
506 Code::FinalizeCode(function, &assembler, optimized)); | |
507 code.set_is_optimized(optimized); | |
508 graph_compiler.FinalizePcDescriptors(code); | |
509 graph_compiler.FinalizeDeoptInfo(code); | |
510 graph_compiler.FinalizeStackmaps(code); | |
511 graph_compiler.FinalizeVarDescriptors(code); | |
512 graph_compiler.FinalizeExceptionHandlers(code); | |
513 graph_compiler.FinalizeComments(code); | |
514 graph_compiler.FinalizeStaticCallTargetsTable(code); | |
515 | |
516 if (optimized) { | |
517 if (osr_id == Isolate::kNoDeoptId) { | |
518 CodePatcher::PatchEntry(Code::Handle(function.CurrentCode())); | |
519 if (FLAG_trace_compiler) { | |
520 OS::Print("--> patching entry %#"Px"\n", | |
521 Code::Handle(function.unoptimized_code()).EntryPoint()); | |
522 } | |
523 } | |
524 function.SetCode(code); | |
525 | |
526 for (intptr_t i = 0; i < guarded_fields.length(); i++) { | |
527 const Field& field = *guarded_fields[i]; | |
528 field.RegisterDependentCode(code); | |
529 } | |
530 } else { | |
531 function.set_unoptimized_code(code); | |
532 function.SetCode(code); | |
533 ASSERT(CodePatcher::CodeIsPatchable(code)); | |
534 } | |
535 } | |
536 is_compiled = true; | |
537 } else { | |
538 // We bailed out. | |
539 Error& bailout_error = Error::Handle( | |
540 isolate->object_store()->sticky_error()); | |
541 isolate->object_store()->clear_sticky_error(); | |
542 if (FLAG_trace_bailout) { | |
543 OS::Print("%s\n", bailout_error.ToErrorCString()); | |
544 } | |
545 // We only bail out from generating ssa code. | |
546 ASSERT(optimized); | |
547 is_compiled = false; | |
548 } | 571 } |
549 // Reset global isolate state. | 572 use_far_branches = false; |
550 isolate->set_long_jump_base(old_base); | |
551 isolate->set_deopt_id(prev_deopt_id); | |
552 return is_compiled; | 573 return is_compiled; |
553 } | 574 } |
554 | 575 |
555 | 576 |
556 static void DisassembleCode(const Function& function, bool optimized) { | 577 static void DisassembleCode(const Function& function, bool optimized) { |
557 const char* function_fullname = function.ToFullyQualifiedCString(); | 578 const char* function_fullname = function.ToFullyQualifiedCString(); |
558 OS::Print("Code for %sfunction '%s' {\n", | 579 OS::Print("Code for %sfunction '%s' {\n", |
559 optimized ? "optimized " : "", | 580 optimized ? "optimized " : "", |
560 function_fullname); | 581 function_fullname); |
561 const Code& code = Code::Handle(function.CurrentCode()); | 582 const Code& code = Code::Handle(function.CurrentCode()); |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
881 Object::Handle(isolate->object_store()->sticky_error()); | 902 Object::Handle(isolate->object_store()->sticky_error()); |
882 isolate->object_store()->clear_sticky_error(); | 903 isolate->object_store()->clear_sticky_error(); |
883 isolate->set_long_jump_base(base); | 904 isolate->set_long_jump_base(base); |
884 return result.raw(); | 905 return result.raw(); |
885 } | 906 } |
886 UNREACHABLE(); | 907 UNREACHABLE(); |
887 return Object::null(); | 908 return Object::null(); |
888 } | 909 } |
889 | 910 |
890 } // namespace dart | 911 } // namespace dart |
OLD | NEW |