Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(29)

Side by Side Diff: runtime/vm/compiler.cc

Issue 21363003: Enables per-function far-branches for ARM and MIPS. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/assembler_x64.h ('k') | runtime/vm/flow_graph.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/compiler.h" 5 #include "vm/compiler.h"
6 6
7 #include "vm/assembler.h" 7 #include "vm/assembler.h"
8 8
9 #include "vm/ast_printer.h" 9 #include "vm/ast_printer.h"
10 #include "vm/code_generator.h" 10 #include "vm/code_generator.h"
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
245 bool optimized, 245 bool optimized,
246 intptr_t osr_id) { 246 intptr_t osr_id) {
247 const Function& function = parsed_function->function(); 247 const Function& function = parsed_function->function();
248 if (optimized && !function.is_optimizable()) { 248 if (optimized && !function.is_optimizable()) {
249 return false; 249 return false;
250 } 250 }
251 TimerScope timer(FLAG_compiler_stats, &CompilerStats::codegen_timer); 251 TimerScope timer(FLAG_compiler_stats, &CompilerStats::codegen_timer);
252 bool is_compiled = false; 252 bool is_compiled = false;
253 Isolate* isolate = Isolate::Current(); 253 Isolate* isolate = Isolate::Current();
254 HANDLESCOPE(isolate); 254 HANDLESCOPE(isolate);
255 const intptr_t prev_deopt_id = isolate->deopt_id(); 255
256 isolate->set_deopt_id(0); 256 // We may reattempt compilation if the function needs to be assembled using
257 LongJump* old_base = isolate->long_jump_base(); 257 // far branches on ARM and MIPS. In the else branch of the setjmp call,
258 LongJump bailout_jump; 258 // done is set to false, and use_far_branches is set to true if there is a
259 isolate->set_long_jump_base(&bailout_jump); 259 // longjmp from the ARM or MIPS assemblers. In all other paths through this
260 if (setjmp(*bailout_jump.Set()) == 0) { 260 // while loop, done is set to true. use_far_branches is always false on ia32
261 FlowGraph* flow_graph = NULL; 261 // and x64.
262 // TimerScope needs an isolate to be properly terminated in case of a 262 bool done = false;
263 // LongJump. 263 // static to evade gcc's longjmp variable smashing checks.
264 { 264 static bool use_far_branches = false;
Ivan Posva 2013/08/06 00:17:51 How is this supposed to work with multiple threads
Florian Schneider 2013/08/07 09:21:26 It should rather be volatile bool use_far_branche
zra 2013/08/09 15:19:43 Sorry, forgot about threads. You're right this won
zra 2013/08/09 15:19:43 Yes. Thanks for the fix. I have sent you and Ivan
265 TimerScope timer(FLAG_compiler_stats, 265 while (!done) {
266 &CompilerStats::graphbuilder_timer, 266 const intptr_t prev_deopt_id = isolate->deopt_id();
267 isolate); 267 isolate->set_deopt_id(0);
268 Array& ic_data_array = Array::Handle(); 268 LongJump* old_base = isolate->long_jump_base();
269 LongJump bailout_jump;
270 isolate->set_long_jump_base(&bailout_jump);
271 if (setjmp(*bailout_jump.Set()) == 0) {
272 FlowGraph* flow_graph = NULL;
273 // TimerScope needs an isolate to be properly terminated in case of a
274 // LongJump.
275 {
276 TimerScope timer(FLAG_compiler_stats,
277 &CompilerStats::graphbuilder_timer,
278 isolate);
279 Array& ic_data_array = Array::Handle();
280 if (optimized) {
281 ASSERT(function.HasCode());
282 // Extract type feedback before the graph is built, as the graph
283 // builder uses it to attach it to nodes.
284 ASSERT(function.deoptimization_counter() <
285 FLAG_deoptimization_counter_threshold);
286 const Code& unoptimized_code =
287 Code::Handle(function.unoptimized_code());
288 ic_data_array = unoptimized_code.ExtractTypeFeedbackArray();
289 }
290
291 // Build the flow graph.
292 FlowGraphBuilder builder(parsed_function,
293 ic_data_array,
294 NULL, // NULL = not inlining.
295 osr_id);
296 flow_graph = builder.BuildGraph();
297 }
298
299 if (FLAG_print_flow_graph ||
300 (optimized && FLAG_print_flow_graph_optimized)) {
301 if (osr_id == Isolate::kNoDeoptId) {
302 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph);
303 } else {
304 FlowGraphPrinter::PrintGraph("For OSR", flow_graph);
305 }
306 }
307
269 if (optimized) { 308 if (optimized) {
270 ASSERT(function.HasCode()); 309 TimerScope timer(FLAG_compiler_stats,
271 // Extract type feedback before the graph is built, as the graph 310 &CompilerStats::ssa_timer,
272 // builder uses it to attach it to nodes. 311 isolate);
273 ASSERT(function.deoptimization_counter() < 312 // Transform to SSA (virtual register 0 and no inlining arguments).
274 FLAG_deoptimization_counter_threshold); 313 flow_graph->ComputeSSA(0, NULL);
275 const Code& unoptimized_code = 314 DEBUG_ASSERT(flow_graph->VerifyUseLists());
276 Code::Handle(function.unoptimized_code()); 315 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) {
277 ic_data_array = unoptimized_code.ExtractTypeFeedbackArray(); 316 FlowGraphPrinter::PrintGraph("After SSA", flow_graph);
278 } 317 }
279 318 }
280 // Build the flow graph. 319
281 FlowGraphBuilder builder(parsed_function, 320
282 ic_data_array, 321 // Collect all instance fields that are loaded in the graph and
283 NULL, // NULL = not inlining. 322 // have non-generic type feedback attached to them that can
284 osr_id); 323 // potentially affect optimizations.
285 flow_graph = builder.BuildGraph(); 324 GrowableArray<const Field*> guarded_fields(10);
286 } 325 if (optimized) {
287 326 TimerScope timer(FLAG_compiler_stats,
288 if (FLAG_print_flow_graph || 327 &CompilerStats::graphoptimizer_timer,
289 (optimized && FLAG_print_flow_graph_optimized)) { 328 isolate);
290 if (osr_id == Isolate::kNoDeoptId) { 329
291 FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph); 330 FlowGraphOptimizer optimizer(flow_graph, &guarded_fields);
292 } else { 331 optimizer.ApplyICData();
293 FlowGraphPrinter::PrintGraph("For OSR", flow_graph); 332 DEBUG_ASSERT(flow_graph->VerifyUseLists());
294 } 333
295 } 334 // Optimize (a << b) & c patterns. Must occur before
296 335 // 'SelectRepresentations' which inserts conversion nodes.
297 if (optimized) { 336 // TODO(srdjan): Moved before inlining until environment use list can
298 TimerScope timer(FLAG_compiler_stats, 337 // be used to detect when shift-left is outside the scope of bit-and.
299 &CompilerStats::ssa_timer, 338 optimizer.TryOptimizeLeftShiftWithBitAndPattern();
300 isolate); 339 DEBUG_ASSERT(flow_graph->VerifyUseLists());
301 // Transform to SSA (virtual register 0 and no inlining arguments). 340
302 flow_graph->ComputeSSA(0, NULL); 341 // Inlining (mutates the flow graph)
303 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 342 if (FLAG_use_inlining) {
304 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { 343 TimerScope timer(FLAG_compiler_stats,
305 FlowGraphPrinter::PrintGraph("After SSA", flow_graph); 344 &CompilerStats::graphinliner_timer);
306 } 345 FlowGraphInliner inliner(flow_graph, &guarded_fields);
307 } 346 inliner.Inline();
308 347 // Use lists are maintained and validated by the inliner.
309 348 DEBUG_ASSERT(flow_graph->VerifyUseLists());
310 // Collect all instance fields that are loaded in the graph and 349 }
311 // have non-generic type feedback attached to them that can 350
312 // potentially affect optimizations. 351 // Propagate types and eliminate more type tests.
313 GrowableArray<const Field*> guarded_fields(10);
314 if (optimized) {
315 TimerScope timer(FLAG_compiler_stats,
316 &CompilerStats::graphoptimizer_timer,
317 isolate);
318
319 FlowGraphOptimizer optimizer(flow_graph, &guarded_fields);
320 optimizer.ApplyICData();
321 DEBUG_ASSERT(flow_graph->VerifyUseLists());
322
323 // Optimize (a << b) & c patterns. Must occur before
324 // 'SelectRepresentations' which inserts conversion nodes.
325 // TODO(srdjan): Moved before inlining until environment use list can
326 // be used to detect when shift-left is outside the scope of bit-and.
327 optimizer.TryOptimizeLeftShiftWithBitAndPattern();
328 DEBUG_ASSERT(flow_graph->VerifyUseLists());
329
330 // Inlining (mutates the flow graph)
331 if (FLAG_use_inlining) {
332 TimerScope timer(FLAG_compiler_stats,
333 &CompilerStats::graphinliner_timer);
334 FlowGraphInliner inliner(flow_graph, &guarded_fields);
335 inliner.Inline();
336 // Use lists are maintained and validated by the inliner.
337 DEBUG_ASSERT(flow_graph->VerifyUseLists());
338 }
339
340 // Propagate types and eliminate more type tests.
341 if (FLAG_propagate_types) {
342 FlowGraphTypePropagator propagator(flow_graph);
343 propagator.Propagate();
344 DEBUG_ASSERT(flow_graph->VerifyUseLists());
345 }
346
347 // Use propagated class-ids to optimize further.
348 optimizer.ApplyClassIds();
349 DEBUG_ASSERT(flow_graph->VerifyUseLists());
350
351 // Do optimizations that depend on the propagated type information.
352 optimizer.Canonicalize();
353 DEBUG_ASSERT(flow_graph->VerifyUseLists());
354
355 BranchSimplifier::Simplify(flow_graph);
356 DEBUG_ASSERT(flow_graph->VerifyUseLists());
357
358 IfConverter::Simplify(flow_graph);
359 DEBUG_ASSERT(flow_graph->VerifyUseLists());
360
361 if (FLAG_constant_propagation) {
362 ConstantPropagator::Optimize(flow_graph);
363 DEBUG_ASSERT(flow_graph->VerifyUseLists());
364 // A canonicalization pass to remove e.g. smi checks on smi constants.
365 optimizer.Canonicalize();
366 DEBUG_ASSERT(flow_graph->VerifyUseLists());
367 // Canonicalization introduced more opportunities for constant
368 // propagation.
369 ConstantPropagator::Optimize(flow_graph);
370 DEBUG_ASSERT(flow_graph->VerifyUseLists());
371 }
372
373 // Propagate types and eliminate even more type tests.
374 if (FLAG_propagate_types) {
375 // Recompute types after constant propagation to infer more precise
376 // types for uses that were previously reached by now eliminated phis.
377 FlowGraphTypePropagator propagator(flow_graph);
378 propagator.Propagate();
379 DEBUG_ASSERT(flow_graph->VerifyUseLists());
380 }
381
382 // Unbox doubles. Performed after constant propagation to minimize
383 // interference from phis merging double values and tagged
384 // values comming from dead paths.
385 optimizer.SelectRepresentations();
386 DEBUG_ASSERT(flow_graph->VerifyUseLists());
387
388 if (FLAG_common_subexpression_elimination ||
389 FLAG_loop_invariant_code_motion) {
390 flow_graph->ComputeBlockEffects();
391 }
392
393 if (FLAG_common_subexpression_elimination) {
394 if (DominatorBasedCSE::Optimize(flow_graph)) {
395 DEBUG_ASSERT(flow_graph->VerifyUseLists());
396 // Do another round of CSE to take secondary effects into account:
397 // e.g. when eliminating dependent loads (a.x[0] + a.x[0])
398 // TODO(fschneider): Change to a one-pass optimization pass.
399 DominatorBasedCSE::Optimize(flow_graph);
400 DEBUG_ASSERT(flow_graph->VerifyUseLists());
401 }
402 }
403 if (FLAG_loop_invariant_code_motion &&
404 (function.deoptimization_counter() <
405 FLAG_deoptimization_counter_licm_threshold)) {
406 LICM licm(flow_graph);
407 licm.Optimize();
408 DEBUG_ASSERT(flow_graph->VerifyUseLists());
409 }
410 flow_graph->RemoveRedefinitions();
411
412 if (FLAG_range_analysis) {
413 if (FLAG_propagate_types) { 352 if (FLAG_propagate_types) {
414 // Propagate types after store-load-forwarding. Some phis may have
415 // become smi phis that can be processed by range analysis.
416 FlowGraphTypePropagator propagator(flow_graph); 353 FlowGraphTypePropagator propagator(flow_graph);
417 propagator.Propagate(); 354 propagator.Propagate();
418 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 355 DEBUG_ASSERT(flow_graph->VerifyUseLists());
419 } 356 }
420 // We have to perform range analysis after LICM because it 357
421 // optimistically moves CheckSmi through phis into loop preheaders 358 // Use propagated class-ids to optimize further.
422 // making some phis smi. 359 optimizer.ApplyClassIds();
423 optimizer.InferSmiRanges(); 360 DEBUG_ASSERT(flow_graph->VerifyUseLists());
424 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 361
425 } 362 // Do optimizations that depend on the propagated type information.
426
427 if (FLAG_constant_propagation) {
428 // Constant propagation can use information from range analysis to
429 // find unreachable branch targets.
430 ConstantPropagator::OptimizeBranches(flow_graph);
431 DEBUG_ASSERT(flow_graph->VerifyUseLists());
432 }
433
434 if (FLAG_propagate_types) {
435 // Recompute types after code movement was done to ensure correct
436 // reaching types for hoisted values.
437 FlowGraphTypePropagator propagator(flow_graph);
438 propagator.Propagate();
439 DEBUG_ASSERT(flow_graph->VerifyUseLists());
440 }
441
442 // Optimize try-blocks.
443 TryCatchAnalyzer::Optimize(flow_graph);
444
445 // Detach environments from the instructions that can't deoptimize.
446 // Do it before we attempt to perform allocation sinking to minimize
447 // amount of materializations it has to perform.
448 optimizer.EliminateEnvironments();
449
450 // Attempt to sink allocations of temporary non-escaping objects to
451 // the deoptimization path.
452 AllocationSinking* sinking = NULL;
453 if (FLAG_allocation_sinking &&
454 (flow_graph->graph_entry()->SuccessorCount() == 1)) {
455 // TODO(fschneider): Support allocation sinking with try-catch.
456 sinking = new AllocationSinking(flow_graph);
457 sinking->Optimize();
458 }
459
460 // Ensure that all phis inserted by optimization passes have consistent
461 // representations.
462 optimizer.SelectRepresentations();
463
464 if (optimizer.Canonicalize()) {
465 // To fully remove redundant boxing (e.g. BoxDouble used only in
466 // environments and UnboxDouble instructions) instruction we
467 // first need to replace all their uses and then fold them away.
468 // For now we just repeat Canonicalize twice to do that.
469 // TODO(vegorov): implement a separate representation folding pass.
470 optimizer.Canonicalize(); 363 optimizer.Canonicalize();
471 } 364 DEBUG_ASSERT(flow_graph->VerifyUseLists());
472 DEBUG_ASSERT(flow_graph->VerifyUseLists()); 365
473 366 BranchSimplifier::Simplify(flow_graph);
474 if (sinking != NULL) { 367 DEBUG_ASSERT(flow_graph->VerifyUseLists());
475 // Remove all MaterializeObject instructions inserted by allocation 368
476 // sinking from the flow graph and let them float on the side referenced 369 IfConverter::Simplify(flow_graph);
477 // only from environments. Register allocator will consider them 370 DEBUG_ASSERT(flow_graph->VerifyUseLists());
478 // as part of a deoptimization environment. 371
479 sinking->DetachMaterializations(); 372 if (FLAG_constant_propagation) {
480 } 373 ConstantPropagator::Optimize(flow_graph);
481 374 DEBUG_ASSERT(flow_graph->VerifyUseLists());
482 // Perform register allocation on the SSA graph. 375 // A canonicalization pass to remove e.g. smi checks on smi constants.
483 FlowGraphAllocator allocator(*flow_graph); 376 optimizer.Canonicalize();
484 allocator.AllocateRegisters(); 377 DEBUG_ASSERT(flow_graph->VerifyUseLists());
485 378 // Canonicalization introduced more opportunities for constant
486 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) { 379 // propagation.
487 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph); 380 ConstantPropagator::Optimize(flow_graph);
488 } 381 DEBUG_ASSERT(flow_graph->VerifyUseLists());
382 }
383
384 // Propagate types and eliminate even more type tests.
385 if (FLAG_propagate_types) {
386 // Recompute types after constant propagation to infer more precise
387 // types for uses that were previously reached by now eliminated phis.
388 FlowGraphTypePropagator propagator(flow_graph);
389 propagator.Propagate();
390 DEBUG_ASSERT(flow_graph->VerifyUseLists());
391 }
392
393 // Unbox doubles. Performed after constant propagation to minimize
394 // interference from phis merging double values and tagged
395 // values comming from dead paths.
396 optimizer.SelectRepresentations();
397 DEBUG_ASSERT(flow_graph->VerifyUseLists());
398
399 if (FLAG_common_subexpression_elimination ||
400 FLAG_loop_invariant_code_motion) {
401 flow_graph->ComputeBlockEffects();
402 }
403
404 if (FLAG_common_subexpression_elimination) {
405 if (DominatorBasedCSE::Optimize(flow_graph)) {
406 DEBUG_ASSERT(flow_graph->VerifyUseLists());
407 // Do another round of CSE to take secondary effects into account:
408 // e.g. when eliminating dependent loads (a.x[0] + a.x[0])
409 // TODO(fschneider): Change to a one-pass optimization pass.
410 DominatorBasedCSE::Optimize(flow_graph);
411 DEBUG_ASSERT(flow_graph->VerifyUseLists());
412 }
413 }
414 if (FLAG_loop_invariant_code_motion &&
415 (function.deoptimization_counter() <
416 FLAG_deoptimization_counter_licm_threshold)) {
417 LICM licm(flow_graph);
418 licm.Optimize();
419 DEBUG_ASSERT(flow_graph->VerifyUseLists());
420 }
421 flow_graph->RemoveRedefinitions();
422
423 if (FLAG_range_analysis) {
424 if (FLAG_propagate_types) {
425 // Propagate types after store-load-forwarding. Some phis may have
426 // become smi phis that can be processed by range analysis.
427 FlowGraphTypePropagator propagator(flow_graph);
428 propagator.Propagate();
429 DEBUG_ASSERT(flow_graph->VerifyUseLists());
430 }
431 // We have to perform range analysis after LICM because it
432 // optimistically moves CheckSmi through phis into loop preheaders
433 // making some phis smi.
434 optimizer.InferSmiRanges();
435 DEBUG_ASSERT(flow_graph->VerifyUseLists());
436 }
437
438 if (FLAG_constant_propagation) {
439 // Constant propagation can use information from range analysis to
440 // find unreachable branch targets.
441 ConstantPropagator::OptimizeBranches(flow_graph);
442 DEBUG_ASSERT(flow_graph->VerifyUseLists());
443 }
444
445 if (FLAG_propagate_types) {
446 // Recompute types after code movement was done to ensure correct
447 // reaching types for hoisted values.
448 FlowGraphTypePropagator propagator(flow_graph);
449 propagator.Propagate();
450 DEBUG_ASSERT(flow_graph->VerifyUseLists());
451 }
452
453 // Optimize try-blocks.
454 TryCatchAnalyzer::Optimize(flow_graph);
455
456 // Detach environments from the instructions that can't deoptimize.
457 // Do it before we attempt to perform allocation sinking to minimize
458 // amount of materializations it has to perform.
459 optimizer.EliminateEnvironments();
460
461 // Attempt to sink allocations of temporary non-escaping objects to
462 // the deoptimization path.
463 AllocationSinking* sinking = NULL;
464 if (FLAG_allocation_sinking &&
465 (flow_graph->graph_entry()->SuccessorCount() == 1)) {
466 // TODO(fschneider): Support allocation sinking with try-catch.
467 sinking = new AllocationSinking(flow_graph);
468 sinking->Optimize();
469 }
470
471 // Ensure that all phis inserted by optimization passes have consistent
472 // representations.
473 optimizer.SelectRepresentations();
474
475 if (optimizer.Canonicalize()) {
476 // To fully remove redundant boxing (e.g. BoxDouble used only in
477 // environments and UnboxDouble instructions) instruction we
478 // first need to replace all their uses and then fold them away.
479 // For now we just repeat Canonicalize twice to do that.
480 // TODO(vegorov): implement a separate representation folding pass.
481 optimizer.Canonicalize();
482 }
483 DEBUG_ASSERT(flow_graph->VerifyUseLists());
484
485 if (sinking != NULL) {
486 // Remove all MaterializeObject instructions inserted by allocation
487 // sinking from the flow graph and let them float on the side
488 // referenced only from environments. Register allocator will consider
489 // them as part of a deoptimization environment.
490 sinking->DetachMaterializations();
491 }
492
493 // Perform register allocation on the SSA graph.
494 FlowGraphAllocator allocator(*flow_graph);
495 allocator.AllocateRegisters();
496
497 if (FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) {
498 FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph);
499 }
500 }
501
502 Assembler assembler(use_far_branches);
503 FlowGraphCompiler graph_compiler(&assembler,
504 *flow_graph,
505 optimized);
506 {
507 TimerScope timer(FLAG_compiler_stats,
508 &CompilerStats::graphcompiler_timer,
509 isolate);
510 graph_compiler.CompileGraph();
511 }
512 {
513 TimerScope timer(FLAG_compiler_stats,
514 &CompilerStats::codefinalizer_timer,
515 isolate);
516 const Code& code = Code::Handle(
517 Code::FinalizeCode(function, &assembler, optimized));
518 code.set_is_optimized(optimized);
519 graph_compiler.FinalizePcDescriptors(code);
520 graph_compiler.FinalizeDeoptInfo(code);
521 graph_compiler.FinalizeStackmaps(code);
522 graph_compiler.FinalizeVarDescriptors(code);
523 graph_compiler.FinalizeExceptionHandlers(code);
524 graph_compiler.FinalizeComments(code);
525 graph_compiler.FinalizeStaticCallTargetsTable(code);
526
527 if (optimized) {
528 if (osr_id == Isolate::kNoDeoptId) {
529 CodePatcher::PatchEntry(Code::Handle(function.CurrentCode()));
530 if (FLAG_trace_compiler) {
531 OS::Print("--> patching entry %#"Px"\n",
532 Code::Handle(function.unoptimized_code()).EntryPoint());
533 }
534 }
535 function.SetCode(code);
536
537 for (intptr_t i = 0; i < guarded_fields.length(); i++) {
538 const Field& field = *guarded_fields[i];
539 field.RegisterDependentCode(code);
540 }
541 } else {
542 function.set_unoptimized_code(code);
543 function.SetCode(code);
544 ASSERT(CodePatcher::CodeIsPatchable(code));
545 }
546 }
547 is_compiled = true;
548 done = true;
549 } else {
550 // We bailed out.
551 const Error& bailout_error = Error::Handle(
552 isolate->object_store()->sticky_error());
553
554 ASSERT(bailout_error.IsLanguageError());
555 const LanguageError& le = LanguageError::CheckedHandle(
556 isolate->object_store()->sticky_error());
557 const String& msg = String::Handle(le.message());
558 if (msg.Equals("Branch offset overflow")) {
559 done = false;
560 ASSERT(!use_far_branches);
561 use_far_branches = true;
562 } else {
563 // If not for a branch offset overflow, we only bail out from
564 // generating ssa code.
565 if (FLAG_trace_bailout) {
566 OS::Print("%s\n", bailout_error.ToErrorCString());
567 }
568 done = true;
569 ASSERT(optimized);
570 }
571
572 isolate->object_store()->clear_sticky_error();
573 is_compiled = false;
489 } 574 }
490 575 // Reset global isolate state.
491 Assembler assembler; 576 isolate->set_long_jump_base(old_base);
492 FlowGraphCompiler graph_compiler(&assembler, 577 isolate->set_deopt_id(prev_deopt_id);
493 *flow_graph,
494 optimized);
495 {
496 TimerScope timer(FLAG_compiler_stats,
497 &CompilerStats::graphcompiler_timer,
498 isolate);
499 graph_compiler.CompileGraph();
500 }
501 {
502 TimerScope timer(FLAG_compiler_stats,
503 &CompilerStats::codefinalizer_timer,
504 isolate);
505 const Code& code = Code::Handle(
506 Code::FinalizeCode(function, &assembler, optimized));
507 code.set_is_optimized(optimized);
508 graph_compiler.FinalizePcDescriptors(code);
509 graph_compiler.FinalizeDeoptInfo(code);
510 graph_compiler.FinalizeStackmaps(code);
511 graph_compiler.FinalizeVarDescriptors(code);
512 graph_compiler.FinalizeExceptionHandlers(code);
513 graph_compiler.FinalizeComments(code);
514 graph_compiler.FinalizeStaticCallTargetsTable(code);
515
516 if (optimized) {
517 if (osr_id == Isolate::kNoDeoptId) {
518 CodePatcher::PatchEntry(Code::Handle(function.CurrentCode()));
519 if (FLAG_trace_compiler) {
520 OS::Print("--> patching entry %#"Px"\n",
521 Code::Handle(function.unoptimized_code()).EntryPoint());
522 }
523 }
524 function.SetCode(code);
525
526 for (intptr_t i = 0; i < guarded_fields.length(); i++) {
527 const Field& field = *guarded_fields[i];
528 field.RegisterDependentCode(code);
529 }
530 } else {
531 function.set_unoptimized_code(code);
532 function.SetCode(code);
533 ASSERT(CodePatcher::CodeIsPatchable(code));
534 }
535 }
536 is_compiled = true;
537 } else {
538 // We bailed out.
539 Error& bailout_error = Error::Handle(
540 isolate->object_store()->sticky_error());
541 isolate->object_store()->clear_sticky_error();
542 if (FLAG_trace_bailout) {
543 OS::Print("%s\n", bailout_error.ToErrorCString());
544 }
545 // We only bail out from generating ssa code.
546 ASSERT(optimized);
547 is_compiled = false;
548 } 578 }
549 // Reset global isolate state. 579 use_far_branches = false;
550 isolate->set_long_jump_base(old_base);
551 isolate->set_deopt_id(prev_deopt_id);
552 return is_compiled; 580 return is_compiled;
553 } 581 }
554 582
555 583
556 static void DisassembleCode(const Function& function, bool optimized) { 584 static void DisassembleCode(const Function& function, bool optimized) {
557 const char* function_fullname = function.ToFullyQualifiedCString(); 585 const char* function_fullname = function.ToFullyQualifiedCString();
558 OS::Print("Code for %sfunction '%s' {\n", 586 OS::Print("Code for %sfunction '%s' {\n",
559 optimized ? "optimized " : "", 587 optimized ? "optimized " : "",
560 function_fullname); 588 function_fullname);
561 const Code& code = Code::Handle(function.CurrentCode()); 589 const Code& code = Code::Handle(function.CurrentCode());
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
881 Object::Handle(isolate->object_store()->sticky_error()); 909 Object::Handle(isolate->object_store()->sticky_error());
882 isolate->object_store()->clear_sticky_error(); 910 isolate->object_store()->clear_sticky_error();
883 isolate->set_long_jump_base(base); 911 isolate->set_long_jump_base(base);
884 return result.raw(); 912 return result.raw();
885 } 913 }
886 UNREACHABLE(); 914 UNREACHABLE();
887 return Object::null(); 915 return Object::null();
888 } 916 }
889 917
890 } // namespace dart 918 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/assembler_x64.h ('k') | runtime/vm/flow_graph.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698