Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(110)

Side by Side Diff: runtime/vm/flow_graph_compiler.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
6 6
7 #include "vm/flow_graph_compiler.h" 7 #include "vm/flow_graph_compiler.h"
8 8
9 #include "vm/bit_vector.h" 9 #include "vm/bit_vector.h"
10 #include "vm/cha.h" 10 #include "vm/cha.h"
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
105 COMPILE_ASSERT(!FLAG_deoptimize_alot); // Used in some tests. 105 COMPILE_ASSERT(!FLAG_deoptimize_alot); // Used in some tests.
106 COMPILE_ASSERT(!FLAG_enable_mirrors); 106 COMPILE_ASSERT(!FLAG_enable_mirrors);
107 COMPILE_ASSERT(FLAG_precompiled_runtime); 107 COMPILE_ASSERT(FLAG_precompiled_runtime);
108 COMPILE_ASSERT(!FLAG_print_stop_message); 108 COMPILE_ASSERT(!FLAG_print_stop_message);
109 COMPILE_ASSERT(!FLAG_use_osr); 109 COMPILE_ASSERT(!FLAG_use_osr);
110 COMPILE_ASSERT(FLAG_deoptimize_every == 0); // Used in some tests. 110 COMPILE_ASSERT(FLAG_deoptimize_every == 0); // Used in some tests.
111 COMPILE_ASSERT(FLAG_load_deferred_eagerly); 111 COMPILE_ASSERT(FLAG_load_deferred_eagerly);
112 112
113 #endif // DART_PRECOMPILED_RUNTIME 113 #endif // DART_PRECOMPILED_RUNTIME
114 114
115
116 // Assign locations to incoming arguments, i.e., values pushed above spill slots 115 // Assign locations to incoming arguments, i.e., values pushed above spill slots
117 // with PushArgument. Recursively allocates from outermost to innermost 116 // with PushArgument. Recursively allocates from outermost to innermost
118 // environment. 117 // environment.
119 void CompilerDeoptInfo::AllocateIncomingParametersRecursive( 118 void CompilerDeoptInfo::AllocateIncomingParametersRecursive(
120 Environment* env, 119 Environment* env,
121 intptr_t* stack_height) { 120 intptr_t* stack_height) {
122 if (env == NULL) return; 121 if (env == NULL) return;
123 AllocateIncomingParametersRecursive(env->outer(), stack_height); 122 AllocateIncomingParametersRecursive(env->outer(), stack_height);
124 for (Environment::ShallowIterator it(env); !it.Done(); it.Advance()) { 123 for (Environment::ShallowIterator it(env); !it.Done(); it.Advance()) {
125 if (it.CurrentLocation().IsInvalid() && 124 if (it.CurrentLocation().IsInvalid() &&
126 it.CurrentValue()->definition()->IsPushArgument()) { 125 it.CurrentValue()->definition()->IsPushArgument()) {
127 it.SetCurrentLocation(Location::StackSlot((*stack_height)++)); 126 it.SetCurrentLocation(Location::StackSlot((*stack_height)++));
128 } 127 }
129 } 128 }
130 } 129 }
131 130
132
133 void CompilerDeoptInfo::EmitMaterializations(Environment* env, 131 void CompilerDeoptInfo::EmitMaterializations(Environment* env,
134 DeoptInfoBuilder* builder) { 132 DeoptInfoBuilder* builder) {
135 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) { 133 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
136 if (it.CurrentLocation().IsInvalid()) { 134 if (it.CurrentLocation().IsInvalid()) {
137 MaterializeObjectInstr* mat = 135 MaterializeObjectInstr* mat =
138 it.CurrentValue()->definition()->AsMaterializeObject(); 136 it.CurrentValue()->definition()->AsMaterializeObject();
139 ASSERT(mat != NULL); 137 ASSERT(mat != NULL);
140 builder->AddMaterialization(mat); 138 builder->AddMaterialization(mat);
141 } 139 }
142 } 140 }
143 } 141 }
144 142
145
146 FlowGraphCompiler::FlowGraphCompiler( 143 FlowGraphCompiler::FlowGraphCompiler(
147 Assembler* assembler, 144 Assembler* assembler,
148 FlowGraph* flow_graph, 145 FlowGraph* flow_graph,
149 const ParsedFunction& parsed_function, 146 const ParsedFunction& parsed_function,
150 bool is_optimizing, 147 bool is_optimizing,
151 const GrowableArray<const Function*>& inline_id_to_function, 148 const GrowableArray<const Function*>& inline_id_to_function,
152 const GrowableArray<TokenPosition>& inline_id_to_token_pos, 149 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
153 const GrowableArray<intptr_t>& caller_inline_id) 150 const GrowableArray<intptr_t>& caller_inline_id)
154 : thread_(Thread::Current()), 151 : thread_(Thread::Current()),
155 zone_(Thread::Current()->zone()), 152 zone_(Thread::Current()->zone()),
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 } 203 }
207 ASSERT(assembler != NULL); 204 ASSERT(assembler != NULL);
208 ASSERT(!list_class_.IsNull()); 205 ASSERT(!list_class_.IsNull());
209 206
210 bool stack_traces_only = !FLAG_profiler; 207 bool stack_traces_only = !FLAG_profiler;
211 code_source_map_builder_ = new (zone_) 208 code_source_map_builder_ = new (zone_)
212 CodeSourceMapBuilder(stack_traces_only, caller_inline_id, 209 CodeSourceMapBuilder(stack_traces_only, caller_inline_id,
213 inline_id_to_token_pos, inline_id_to_function); 210 inline_id_to_token_pos, inline_id_to_function);
214 } 211 }
215 212
216
217 bool FlowGraphCompiler::IsUnboxedField(const Field& field) { 213 bool FlowGraphCompiler::IsUnboxedField(const Field& field) {
218 bool valid_class = 214 bool valid_class =
219 (SupportsUnboxedDoubles() && (field.guarded_cid() == kDoubleCid)) || 215 (SupportsUnboxedDoubles() && (field.guarded_cid() == kDoubleCid)) ||
220 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat32x4Cid)) || 216 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat32x4Cid)) ||
221 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat64x2Cid)); 217 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat64x2Cid));
222 return field.is_unboxing_candidate() && !field.is_final() && 218 return field.is_unboxing_candidate() && !field.is_final() &&
223 !field.is_nullable() && valid_class; 219 !field.is_nullable() && valid_class;
224 } 220 }
225 221
226
227 bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) { 222 bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) {
228 return field.is_unboxing_candidate() && 223 return field.is_unboxing_candidate() &&
229 (FlowGraphCompiler::IsUnboxedField(field) || 224 (FlowGraphCompiler::IsUnboxedField(field) ||
230 (!field.is_final() && (field.guarded_cid() == kIllegalCid))); 225 (!field.is_final() && (field.guarded_cid() == kIllegalCid)));
231 } 226 }
232 227
233
234 void FlowGraphCompiler::InitCompiler() { 228 void FlowGraphCompiler::InitCompiler() {
235 pc_descriptors_list_ = new (zone()) DescriptorList(64); 229 pc_descriptors_list_ = new (zone()) DescriptorList(64);
236 exception_handlers_list_ = new (zone()) ExceptionHandlerList(); 230 exception_handlers_list_ = new (zone()) ExceptionHandlerList();
237 catch_entry_state_maps_builder_ = new (zone()) CatchEntryStateMapBuilder(); 231 catch_entry_state_maps_builder_ = new (zone()) CatchEntryStateMapBuilder();
238 block_info_.Clear(); 232 block_info_.Clear();
239 // Conservative detection of leaf routines used to remove the stack check 233 // Conservative detection of leaf routines used to remove the stack check
240 // on function entry. 234 // on function entry.
241 bool is_leaf = is_optimizing() && !flow_graph().IsCompiledForOsr(); 235 bool is_leaf = is_optimizing() && !flow_graph().IsCompiledForOsr();
242 // Initialize block info and search optimized (non-OSR) code for calls 236 // Initialize block info and search optimized (non-OSR) code for calls
243 // indicating a non-leaf routine and calls without IC data indicating 237 // indicating a non-leaf routine and calls without IC data indicating
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
282 const Array& edge_counters = 276 const Array& edge_counters =
283 Array::Handle(Array::New(num_counters, Heap::kOld)); 277 Array::Handle(Array::New(num_counters, Heap::kOld));
284 const Smi& zero_smi = Smi::Handle(Smi::New(0)); 278 const Smi& zero_smi = Smi::Handle(Smi::New(0));
285 for (intptr_t i = 0; i < num_counters; ++i) { 279 for (intptr_t i = 0; i < num_counters; ++i) {
286 edge_counters.SetAt(i, zero_smi); 280 edge_counters.SetAt(i, zero_smi);
287 } 281 }
288 edge_counters_array_ = edge_counters.raw(); 282 edge_counters_array_ = edge_counters.raw();
289 } 283 }
290 } 284 }
291 285
292
293 bool FlowGraphCompiler::CanOptimize() { 286 bool FlowGraphCompiler::CanOptimize() {
294 return FLAG_optimization_counter_threshold >= 0; 287 return FLAG_optimization_counter_threshold >= 0;
295 } 288 }
296 289
297
298 bool FlowGraphCompiler::CanOptimizeFunction() const { 290 bool FlowGraphCompiler::CanOptimizeFunction() const {
299 return CanOptimize() && !parsed_function().function().HasBreakpoint(); 291 return CanOptimize() && !parsed_function().function().HasBreakpoint();
300 } 292 }
301 293
302
303 bool FlowGraphCompiler::CanOSRFunction() const { 294 bool FlowGraphCompiler::CanOSRFunction() const {
304 return isolate()->use_osr() && CanOptimizeFunction() && !is_optimizing(); 295 return isolate()->use_osr() && CanOptimizeFunction() && !is_optimizing();
305 } 296 }
306 297
307
308 bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const { 298 bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const {
309 if ((FLAG_stacktrace_every > 0) || (FLAG_deoptimize_every > 0) || 299 if ((FLAG_stacktrace_every > 0) || (FLAG_deoptimize_every > 0) ||
310 (isolate()->reload_every_n_stack_overflow_checks() > 0)) { 300 (isolate()->reload_every_n_stack_overflow_checks() > 0)) {
311 return true; 301 return true;
312 } 302 }
313 if (FLAG_stacktrace_filter != NULL && 303 if (FLAG_stacktrace_filter != NULL &&
314 strstr(parsed_function().function().ToFullyQualifiedCString(), 304 strstr(parsed_function().function().ToFullyQualifiedCString(),
315 FLAG_stacktrace_filter) != NULL) { 305 FLAG_stacktrace_filter) != NULL) {
316 return true; 306 return true;
317 } 307 }
318 if (is_optimizing() && FLAG_deoptimize_filter != NULL && 308 if (is_optimizing() && FLAG_deoptimize_filter != NULL &&
319 strstr(parsed_function().function().ToFullyQualifiedCString(), 309 strstr(parsed_function().function().ToFullyQualifiedCString(),
320 FLAG_deoptimize_filter) != NULL) { 310 FLAG_deoptimize_filter) != NULL) {
321 return true; 311 return true;
322 } 312 }
323 return false; 313 return false;
324 } 314 }
325 315
326
327 static bool IsEmptyBlock(BlockEntryInstr* block) { 316 static bool IsEmptyBlock(BlockEntryInstr* block) {
328 return !block->IsCatchBlockEntry() && !block->HasNonRedundantParallelMove() && 317 return !block->IsCatchBlockEntry() && !block->HasNonRedundantParallelMove() &&
329 block->next()->IsGoto() && 318 block->next()->IsGoto() &&
330 !block->next()->AsGoto()->HasNonRedundantParallelMove() && 319 !block->next()->AsGoto()->HasNonRedundantParallelMove() &&
331 !block->IsIndirectEntry(); 320 !block->IsIndirectEntry();
332 } 321 }
333 322
334
335 void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) { 323 void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
336 BlockInfo* block_info = block_info_[block->postorder_number()]; 324 BlockInfo* block_info = block_info_[block->postorder_number()];
337 325
338 // Break out of cycles in the control flow graph. 326 // Break out of cycles in the control flow graph.
339 if (block_info->is_marked()) { 327 if (block_info->is_marked()) {
340 return; 328 return;
341 } 329 }
342 block_info->mark(); 330 block_info->mark();
343 331
344 if (IsEmptyBlock(block)) { 332 if (IsEmptyBlock(block)) {
345 // For empty blocks, record a corresponding nonempty target as their 333 // For empty blocks, record a corresponding nonempty target as their
346 // jump label. 334 // jump label.
347 BlockEntryInstr* target = block->next()->AsGoto()->successor(); 335 BlockEntryInstr* target = block->next()->AsGoto()->successor();
348 CompactBlock(target); 336 CompactBlock(target);
349 block_info->set_jump_label(GetJumpLabel(target)); 337 block_info->set_jump_label(GetJumpLabel(target));
350 } 338 }
351 } 339 }
352 340
353
354 void FlowGraphCompiler::CompactBlocks() { 341 void FlowGraphCompiler::CompactBlocks() {
355 // This algorithm does not garbage collect blocks in place, but merely 342 // This algorithm does not garbage collect blocks in place, but merely
356 // records forwarding label information. In this way it avoids having to 343 // records forwarding label information. In this way it avoids having to
357 // change join and target entries. 344 // change join and target entries.
358 Label* nonempty_label = NULL; 345 Label* nonempty_label = NULL;
359 for (intptr_t i = block_order().length() - 1; i >= 1; --i) { 346 for (intptr_t i = block_order().length() - 1; i >= 1; --i) {
360 BlockEntryInstr* block = block_order()[i]; 347 BlockEntryInstr* block = block_order()[i];
361 348
362 // Unoptimized code must emit all possible deoptimization points. 349 // Unoptimized code must emit all possible deoptimization points.
363 if (is_optimizing()) { 350 if (is_optimizing()) {
364 CompactBlock(block); 351 CompactBlock(block);
365 } 352 }
366 353
367 // For nonempty blocks, record the next nonempty block in the block 354 // For nonempty blocks, record the next nonempty block in the block
368 // order. Since no code is emitted for empty blocks, control flow is 355 // order. Since no code is emitted for empty blocks, control flow is
369 // eligible to fall through to the next nonempty one. 356 // eligible to fall through to the next nonempty one.
370 if (!WasCompacted(block)) { 357 if (!WasCompacted(block)) {
371 BlockInfo* block_info = block_info_[block->postorder_number()]; 358 BlockInfo* block_info = block_info_[block->postorder_number()];
372 block_info->set_next_nonempty_label(nonempty_label); 359 block_info->set_next_nonempty_label(nonempty_label);
373 nonempty_label = GetJumpLabel(block); 360 nonempty_label = GetJumpLabel(block);
374 } 361 }
375 } 362 }
376 363
377 ASSERT(block_order()[0]->IsGraphEntry()); 364 ASSERT(block_order()[0]->IsGraphEntry());
378 BlockInfo* block_info = block_info_[block_order()[0]->postorder_number()]; 365 BlockInfo* block_info = block_info_[block_order()[0]->postorder_number()];
379 block_info->set_next_nonempty_label(nonempty_label); 366 block_info->set_next_nonempty_label(nonempty_label);
380 } 367 }
381 368
382
383 void FlowGraphCompiler::EmitCatchEntryState(Environment* env, 369 void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
384 intptr_t try_index) { 370 intptr_t try_index) {
385 #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME) 371 #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
386 env = env ? env : pending_deoptimization_env_; 372 env = env ? env : pending_deoptimization_env_;
387 try_index = try_index != CatchClauseNode::kInvalidTryIndex 373 try_index = try_index != CatchClauseNode::kInvalidTryIndex
388 ? try_index 374 ? try_index
389 : CurrentTryIndex(); 375 : CurrentTryIndex();
390 if (is_optimizing() && env != NULL && 376 if (is_optimizing() && env != NULL &&
391 (try_index != CatchClauseNode::kInvalidTryIndex)) { 377 (try_index != CatchClauseNode::kInvalidTryIndex)) {
392 env = env->Outermost(); 378 env = env->Outermost();
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 if (src.stack_index() != dest_index) { 433 if (src.stack_index() != dest_index) {
448 catch_entry_state_maps_builder_->AppendMove(src.stack_index(), 434 catch_entry_state_maps_builder_->AppendMove(src.stack_index(),
449 dest_index); 435 dest_index);
450 } 436 }
451 } 437 }
452 catch_entry_state_maps_builder_->EndMapping(); 438 catch_entry_state_maps_builder_->EndMapping();
453 } 439 }
454 #endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME) 440 #endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
455 } 441 }
456 442
457
458 void FlowGraphCompiler::EmitCallsiteMetaData(TokenPosition token_pos, 443 void FlowGraphCompiler::EmitCallsiteMetaData(TokenPosition token_pos,
459 intptr_t deopt_id, 444 intptr_t deopt_id,
460 RawPcDescriptors::Kind kind, 445 RawPcDescriptors::Kind kind,
461 LocationSummary* locs) { 446 LocationSummary* locs) {
462 AddCurrentDescriptor(kind, deopt_id, token_pos); 447 AddCurrentDescriptor(kind, deopt_id, token_pos);
463 RecordSafepoint(locs); 448 RecordSafepoint(locs);
464 EmitCatchEntryState(); 449 EmitCatchEntryState();
465 } 450 }
466 451
467
468 void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) { 452 void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
469 if (!is_optimizing()) { 453 if (!is_optimizing()) {
470 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) { 454 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) {
471 // Instructions that can be deoptimization targets need to record kDeopt 455 // Instructions that can be deoptimization targets need to record kDeopt
472 // PcDescriptor corresponding to their deopt id. GotoInstr records its 456 // PcDescriptor corresponding to their deopt id. GotoInstr records its
473 // own so that it can control the placement. 457 // own so that it can control the placement.
474 AddCurrentDescriptor(RawPcDescriptors::kDeopt, instr->deopt_id(), 458 AddCurrentDescriptor(RawPcDescriptors::kDeopt, instr->deopt_id(),
475 instr->token_pos()); 459 instr->token_pos());
476 } 460 }
477 AllocateRegistersLocally(instr); 461 AllocateRegistersLocally(instr);
478 } 462 }
479 } 463 }
480 464
481
482 void FlowGraphCompiler::EmitSourceLine(Instruction* instr) { 465 void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
483 if (!instr->token_pos().IsReal() || (instr->env() == NULL)) { 466 if (!instr->token_pos().IsReal() || (instr->env() == NULL)) {
484 return; 467 return;
485 } 468 }
486 const Script& script = 469 const Script& script =
487 Script::Handle(zone(), instr->env()->function().script()); 470 Script::Handle(zone(), instr->env()->function().script());
488 intptr_t line_nr; 471 intptr_t line_nr;
489 intptr_t column_nr; 472 intptr_t column_nr;
490 script.GetTokenLocation(instr->token_pos(), &line_nr, &column_nr); 473 script.GetTokenLocation(instr->token_pos(), &line_nr, &column_nr);
491 const String& line = String::Handle(zone(), script.GetLine(line_nr)); 474 const String& line = String::Handle(zone(), script.GetLine(line_nr));
492 assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr, 475 assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr,
493 instr->env()->function().ToFullyQualifiedCString(), 476 instr->env()->function().ToFullyQualifiedCString(),
494 line.ToCString()); 477 line.ToCString());
495 } 478 }
496 479
497
498 static void LoopInfoComment( 480 static void LoopInfoComment(
499 Assembler* assembler, 481 Assembler* assembler,
500 const BlockEntryInstr& block, 482 const BlockEntryInstr& block,
501 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers) { 483 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers) {
502 if (Assembler::EmittingComments()) { 484 if (Assembler::EmittingComments()) {
503 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { 485 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) {
504 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); 486 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info());
505 !loop_it.Done(); loop_it.Advance()) { 487 !loop_it.Done(); loop_it.Advance()) {
506 if (loop_it.Current() == block.preorder_number()) { 488 if (loop_it.Current() == block.preorder_number()) {
507 assembler->Comment(" Loop %" Pd "", loop_id); 489 assembler->Comment(" Loop %" Pd "", loop_id);
508 } 490 }
509 } 491 }
510 } 492 }
511 } 493 }
512 } 494 }
513 495
514
515 void FlowGraphCompiler::VisitBlocks() { 496 void FlowGraphCompiler::VisitBlocks() {
516 CompactBlocks(); 497 CompactBlocks();
517 const ZoneGrowableArray<BlockEntryInstr*>* loop_headers = NULL; 498 const ZoneGrowableArray<BlockEntryInstr*>* loop_headers = NULL;
518 if (Assembler::EmittingComments()) { 499 if (Assembler::EmittingComments()) {
519 // 'loop_headers' were cleared, recompute. 500 // 'loop_headers' were cleared, recompute.
520 loop_headers = flow_graph().ComputeLoops(); 501 loop_headers = flow_graph().ComputeLoops();
521 ASSERT(loop_headers != NULL); 502 ASSERT(loop_headers != NULL);
522 } 503 }
523 504
524 for (intptr_t i = 0; i < block_order().length(); ++i) { 505 for (intptr_t i = 0; i < block_order().length(); ++i) {
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
580 } 561 }
581 562
582 #if defined(DEBUG) && !defined(TARGET_ARCH_DBC) 563 #if defined(DEBUG) && !defined(TARGET_ARCH_DBC)
583 ASSERT(is_optimizing() || FrameStateIsSafeToCall()); 564 ASSERT(is_optimizing() || FrameStateIsSafeToCall());
584 #endif 565 #endif
585 } 566 }
586 567
587 set_current_block(NULL); 568 set_current_block(NULL);
588 } 569 }
589 570
590
591 void FlowGraphCompiler::Bailout(const char* reason) { 571 void FlowGraphCompiler::Bailout(const char* reason) {
592 parsed_function_.Bailout("FlowGraphCompiler", reason); 572 parsed_function_.Bailout("FlowGraphCompiler", reason);
593 } 573 }
594 574
595
596 intptr_t FlowGraphCompiler::StackSize() const { 575 intptr_t FlowGraphCompiler::StackSize() const {
597 if (is_optimizing_) { 576 if (is_optimizing_) {
598 return flow_graph_.graph_entry()->spill_slot_count(); 577 return flow_graph_.graph_entry()->spill_slot_count();
599 } else { 578 } else {
600 return parsed_function_.num_stack_locals() + 579 return parsed_function_.num_stack_locals() +
601 parsed_function_.num_copied_params(); 580 parsed_function_.num_copied_params();
602 } 581 }
603 } 582 }
604 583
605
606 Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const { 584 Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const {
607 const intptr_t block_index = block_entry->postorder_number(); 585 const intptr_t block_index = block_entry->postorder_number();
608 return block_info_[block_index]->jump_label(); 586 return block_info_[block_index]->jump_label();
609 } 587 }
610 588
611
612 bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const { 589 bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const {
613 const intptr_t block_index = block_entry->postorder_number(); 590 const intptr_t block_index = block_entry->postorder_number();
614 return block_info_[block_index]->WasCompacted(); 591 return block_info_[block_index]->WasCompacted();
615 } 592 }
616 593
617
618 Label* FlowGraphCompiler::NextNonEmptyLabel() const { 594 Label* FlowGraphCompiler::NextNonEmptyLabel() const {
619 const intptr_t current_index = current_block()->postorder_number(); 595 const intptr_t current_index = current_block()->postorder_number();
620 return block_info_[current_index]->next_nonempty_label(); 596 return block_info_[current_index]->next_nonempty_label();
621 } 597 }
622 598
623
624 bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const { 599 bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const {
625 return NextNonEmptyLabel() == GetJumpLabel(block_entry); 600 return NextNonEmptyLabel() == GetJumpLabel(block_entry);
626 } 601 }
627 602
628
629 BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const { 603 BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const {
630 Label* true_label = GetJumpLabel(branch->true_successor()); 604 Label* true_label = GetJumpLabel(branch->true_successor());
631 Label* false_label = GetJumpLabel(branch->false_successor()); 605 Label* false_label = GetJumpLabel(branch->false_successor());
632 Label* fall_through = NextNonEmptyLabel(); 606 Label* fall_through = NextNonEmptyLabel();
633 BranchLabels result = {true_label, false_label, fall_through}; 607 BranchLabels result = {true_label, false_label, fall_through};
634 return result; 608 return result;
635 } 609 }
636 610
637
638 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { 611 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) {
639 slow_path_code_.Add(code); 612 slow_path_code_.Add(code);
640 } 613 }
641 614
642
643 void FlowGraphCompiler::GenerateDeferredCode() { 615 void FlowGraphCompiler::GenerateDeferredCode() {
644 for (intptr_t i = 0; i < slow_path_code_.length(); i++) { 616 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
645 BeginCodeSourceRange(); 617 BeginCodeSourceRange();
646 slow_path_code_[i]->GenerateCode(this); 618 slow_path_code_[i]->GenerateCode(this);
647 EndCodeSourceRange(TokenPosition::kDeferredSlowPath); 619 EndCodeSourceRange(TokenPosition::kDeferredSlowPath);
648 } 620 }
649 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { 621 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
650 BeginCodeSourceRange(); 622 BeginCodeSourceRange();
651 deopt_infos_[i]->GenerateCode(this, i); 623 deopt_infos_[i]->GenerateCode(this, i);
652 EndCodeSourceRange(TokenPosition::kDeferredDeoptInfo); 624 EndCodeSourceRange(TokenPosition::kDeferredDeoptInfo);
653 } 625 }
654 } 626 }
655 627
656
657 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, 628 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index,
658 intptr_t outer_try_index, 629 intptr_t outer_try_index,
659 intptr_t pc_offset, 630 intptr_t pc_offset,
660 TokenPosition token_pos, 631 TokenPosition token_pos,
661 bool is_generated, 632 bool is_generated,
662 const Array& handler_types, 633 const Array& handler_types,
663 bool needs_stacktrace) { 634 bool needs_stacktrace) {
664 exception_handlers_list_->AddHandler(try_index, outer_try_index, pc_offset, 635 exception_handlers_list_->AddHandler(try_index, outer_try_index, pc_offset,
665 token_pos, is_generated, handler_types, 636 token_pos, is_generated, handler_types,
666 needs_stacktrace); 637 needs_stacktrace);
667 } 638 }
668 639
669
670 void FlowGraphCompiler::SetNeedsStackTrace(intptr_t try_index) { 640 void FlowGraphCompiler::SetNeedsStackTrace(intptr_t try_index) {
671 exception_handlers_list_->SetNeedsStackTrace(try_index); 641 exception_handlers_list_->SetNeedsStackTrace(try_index);
672 } 642 }
673 643
674
675 void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind, 644 void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind,
676 intptr_t pc_offset, 645 intptr_t pc_offset,
677 intptr_t deopt_id, 646 intptr_t deopt_id,
678 TokenPosition token_pos, 647 TokenPosition token_pos,
679 intptr_t try_index) { 648 intptr_t try_index) {
680 code_source_map_builder_->NoteDescriptor(kind, pc_offset, token_pos); 649 code_source_map_builder_->NoteDescriptor(kind, pc_offset, token_pos);
681 // When running with optimizations disabled, don't emit deopt-descriptors. 650 // When running with optimizations disabled, don't emit deopt-descriptors.
682 if (!CanOptimize() && (kind == RawPcDescriptors::kDeopt)) return; 651 if (!CanOptimize() && (kind == RawPcDescriptors::kDeopt)) return;
683 pc_descriptors_list_->AddDescriptor(kind, pc_offset, deopt_id, token_pos, 652 pc_descriptors_list_->AddDescriptor(kind, pc_offset, deopt_id, token_pos,
684 try_index); 653 try_index);
685 } 654 }
686 655
687
688 // Uses current pc position and try-index. 656 // Uses current pc position and try-index.
689 void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind, 657 void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind,
690 intptr_t deopt_id, 658 intptr_t deopt_id,
691 TokenPosition token_pos) { 659 TokenPosition token_pos) {
692 AddDescriptor(kind, assembler()->CodeSize(), deopt_id, token_pos, 660 AddDescriptor(kind, assembler()->CodeSize(), deopt_id, token_pos,
693 CurrentTryIndex()); 661 CurrentTryIndex());
694 } 662 }
695 663
696
697 void FlowGraphCompiler::AddStaticCallTarget(const Function& func) { 664 void FlowGraphCompiler::AddStaticCallTarget(const Function& func) {
698 ASSERT(func.IsZoneHandle()); 665 ASSERT(func.IsZoneHandle());
699 static_calls_target_table_.Add( 666 static_calls_target_table_.Add(
700 new (zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL)); 667 new (zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL));
701 } 668 }
702 669
703
704 void FlowGraphCompiler::AddStubCallTarget(const Code& code) { 670 void FlowGraphCompiler::AddStubCallTarget(const Code& code) {
705 ASSERT(code.IsZoneHandle()); 671 ASSERT(code.IsZoneHandle());
706 static_calls_target_table_.Add( 672 static_calls_target_table_.Add(
707 new (zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code)); 673 new (zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code));
708 } 674 }
709 675
710
711 CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) { 676 CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
712 ASSERT(is_optimizing()); 677 ASSERT(is_optimizing());
713 ASSERT(!intrinsic_mode()); 678 ASSERT(!intrinsic_mode());
714 CompilerDeoptInfo* info = 679 CompilerDeoptInfo* info =
715 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall, 680 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall,
716 0, // No flags. 681 0, // No flags.
717 pending_deoptimization_env_); 682 pending_deoptimization_env_);
718 info->set_pc_offset(assembler()->CodeSize()); 683 info->set_pc_offset(assembler()->CodeSize());
719 deopt_infos_.Add(info); 684 deopt_infos_.Add(info);
720 return info; 685 return info;
721 } 686 }
722 687
723
724 // This function must be in sync with FlowGraphCompiler::SaveLiveRegisters 688 // This function must be in sync with FlowGraphCompiler::SaveLiveRegisters
725 // and FlowGraphCompiler::SlowPathEnvironmentFor. 689 // and FlowGraphCompiler::SlowPathEnvironmentFor.
726 // See StackFrame::VisitObjectPointers for the details of how stack map is 690 // See StackFrame::VisitObjectPointers for the details of how stack map is
727 // interpreted. 691 // interpreted.
728 void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs, 692 void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
729 intptr_t slow_path_argument_count) { 693 intptr_t slow_path_argument_count) {
730 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) { 694 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) {
731 const intptr_t spill_area_size = 695 const intptr_t spill_area_size =
732 is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0; 696 is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
733 697
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
798 } 762 }
799 763
800 // The slow path area Outside the spill area contains are live registers 764 // The slow path area Outside the spill area contains are live registers
801 // and pushed arguments for calls inside the slow path. 765 // and pushed arguments for calls inside the slow path.
802 intptr_t slow_path_bit_count = bitmap->Length() - spill_area_size; 766 intptr_t slow_path_bit_count = bitmap->Length() - spill_area_size;
803 stackmap_table_builder()->AddEntry(assembler()->CodeSize(), bitmap, 767 stackmap_table_builder()->AddEntry(assembler()->CodeSize(), bitmap,
804 slow_path_bit_count); 768 slow_path_bit_count);
805 } 769 }
806 } 770 }
807 771
808
809 // This function must be kept in sync with: 772 // This function must be kept in sync with:
810 // 773 //
811 // FlowGraphCompiler::RecordSafepoint 774 // FlowGraphCompiler::RecordSafepoint
812 // FlowGraphCompiler::SaveLiveRegisters 775 // FlowGraphCompiler::SaveLiveRegisters
813 // MaterializeObjectInstr::RemapRegisters 776 // MaterializeObjectInstr::RemapRegisters
814 // 777 //
815 Environment* FlowGraphCompiler::SlowPathEnvironmentFor( 778 Environment* FlowGraphCompiler::SlowPathEnvironmentFor(
816 Instruction* instruction) { 779 Instruction* instruction) {
817 if (instruction->env() == NULL) { 780 if (instruction->env() == NULL) {
818 ASSERT(!is_optimizing()); 781 ASSERT(!is_optimizing());
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) { 818 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
856 Location loc = it.CurrentLocation(); 819 Location loc = it.CurrentLocation();
857 Value* value = it.CurrentValue(); 820 Value* value = it.CurrentValue();
858 it.SetCurrentLocation(loc.RemapForSlowPath(value->definition(), 821 it.SetCurrentLocation(loc.RemapForSlowPath(value->definition(),
859 cpu_reg_slots, fpu_reg_slots)); 822 cpu_reg_slots, fpu_reg_slots));
860 } 823 }
861 824
862 return env; 825 return env;
863 } 826 }
864 827
865
866 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, 828 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
867 ICData::DeoptReasonId reason, 829 ICData::DeoptReasonId reason,
868 uint32_t flags) { 830 uint32_t flags) {
869 if (intrinsic_mode()) { 831 if (intrinsic_mode()) {
870 return &intrinsic_slow_path_label_; 832 return &intrinsic_slow_path_label_;
871 } 833 }
872 834
873 // No deoptimization allowed when 'FLAG_precompiled_mode' is set. 835 // No deoptimization allowed when 'FLAG_precompiled_mode' is set.
874 if (FLAG_precompiled_mode) { 836 if (FLAG_precompiled_mode) {
875 if (FLAG_trace_compiler) { 837 if (FLAG_trace_compiler) {
876 THR_Print( 838 THR_Print(
877 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n", 839 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
878 parsed_function_.function().ToFullyQualifiedCString(), deopt_id); 840 parsed_function_.function().ToFullyQualifiedCString(), deopt_id);
879 } 841 }
880 ASSERT(deopt_id != 0); // longjmp must return non-zero value. 842 ASSERT(deopt_id != 0); // longjmp must return non-zero value.
881 Thread::Current()->long_jump_base()->Jump( 843 Thread::Current()->long_jump_base()->Jump(
882 deopt_id, Object::speculative_inlining_error()); 844 deopt_id, Object::speculative_inlining_error());
883 } 845 }
884 846
885 ASSERT(is_optimizing_); 847 ASSERT(is_optimizing_);
886 CompilerDeoptInfoWithStub* stub = new (zone()) CompilerDeoptInfoWithStub( 848 CompilerDeoptInfoWithStub* stub = new (zone()) CompilerDeoptInfoWithStub(
887 deopt_id, reason, flags, pending_deoptimization_env_); 849 deopt_id, reason, flags, pending_deoptimization_env_);
888 deopt_infos_.Add(stub); 850 deopt_infos_.Add(stub);
889 return stub->entry_label(); 851 return stub->entry_label();
890 } 852 }
891 853
892
893 #if defined(TARGET_ARCH_DBC) 854 #if defined(TARGET_ARCH_DBC)
894 void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id, 855 void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id,
895 ICData::DeoptReasonId reason, 856 ICData::DeoptReasonId reason,
896 uint32_t flags) { 857 uint32_t flags) {
897 ASSERT(is_optimizing()); 858 ASSERT(is_optimizing());
898 ASSERT(!intrinsic_mode()); 859 ASSERT(!intrinsic_mode());
899 // The pending deoptimization environment may be changed after this deopt is 860 // The pending deoptimization environment may be changed after this deopt is
900 // emitted, so we need to make a copy. 861 // emitted, so we need to make a copy.
901 Environment* env_copy = pending_deoptimization_env_->DeepCopy(zone()); 862 Environment* env_copy = pending_deoptimization_env_->DeepCopy(zone());
902 CompilerDeoptInfo* info = 863 CompilerDeoptInfo* info =
903 new (zone()) CompilerDeoptInfo(deopt_id, reason, flags, env_copy); 864 new (zone()) CompilerDeoptInfo(deopt_id, reason, flags, env_copy);
904 deopt_infos_.Add(info); 865 deopt_infos_.Add(info);
905 assembler()->Deopt(0, /*is_eager =*/1); 866 assembler()->Deopt(0, /*is_eager =*/1);
906 info->set_pc_offset(assembler()->CodeSize()); 867 info->set_pc_offset(assembler()->CodeSize());
907 } 868 }
908 #endif // defined(TARGET_ARCH_DBC) 869 #endif // defined(TARGET_ARCH_DBC)
909 870
910
911 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { 871 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
912 ASSERT(exception_handlers_list_ != NULL); 872 ASSERT(exception_handlers_list_ != NULL);
913 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( 873 const ExceptionHandlers& handlers = ExceptionHandlers::Handle(
914 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart())); 874 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart()));
915 code.set_exception_handlers(handlers); 875 code.set_exception_handlers(handlers);
916 if (FLAG_compiler_stats) { 876 if (FLAG_compiler_stats) {
917 Thread* thread = Thread::Current(); 877 Thread* thread = Thread::Current();
918 INC_STAT(thread, total_code_size, 878 INC_STAT(thread, total_code_size,
919 ExceptionHandlers::InstanceSize(handlers.num_entries())); 879 ExceptionHandlers::InstanceSize(handlers.num_entries()));
920 INC_STAT(thread, total_code_size, handlers.num_entries() * sizeof(uword)); 880 INC_STAT(thread, total_code_size, handlers.num_entries() * sizeof(uword));
921 } 881 }
922 } 882 }
923 883
924
925 void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) { 884 void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
926 ASSERT(pc_descriptors_list_ != NULL); 885 ASSERT(pc_descriptors_list_ != NULL);
927 const PcDescriptors& descriptors = PcDescriptors::Handle( 886 const PcDescriptors& descriptors = PcDescriptors::Handle(
928 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart())); 887 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
929 if (!is_optimizing_) descriptors.Verify(parsed_function_.function()); 888 if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
930 code.set_pc_descriptors(descriptors); 889 code.set_pc_descriptors(descriptors);
931 } 890 }
932 891
933
934 RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) { 892 RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) {
935 // No deopt information if we precompile (no deoptimization allowed). 893 // No deopt information if we precompile (no deoptimization allowed).
936 if (FLAG_precompiled_mode) { 894 if (FLAG_precompiled_mode) {
937 return Array::empty_array().raw(); 895 return Array::empty_array().raw();
938 } 896 }
939 // For functions with optional arguments, all incoming arguments are copied 897 // For functions with optional arguments, all incoming arguments are copied
940 // to spill slots. The deoptimization environment does not track them. 898 // to spill slots. The deoptimization environment does not track them.
941 const Function& function = parsed_function().function(); 899 const Function& function = parsed_function().function();
942 const intptr_t incoming_arg_count = 900 const intptr_t incoming_arg_count =
943 function.HasOptionalParameters() ? 0 : function.num_fixed_parameters(); 901 function.HasOptionalParameters() ? 0 : function.num_fixed_parameters();
(...skipping 12 matching lines...) Expand all
956 offset = Smi::New(deopt_infos_[i]->pc_offset()); 914 offset = Smi::New(deopt_infos_[i]->pc_offset());
957 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array); 915 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array);
958 reason_and_flags = DeoptTable::EncodeReasonAndFlags( 916 reason_and_flags = DeoptTable::EncodeReasonAndFlags(
959 deopt_infos_[i]->reason(), deopt_infos_[i]->flags()); 917 deopt_infos_[i]->reason(), deopt_infos_[i]->flags());
960 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags); 918 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags);
961 } 919 }
962 return array.raw(); 920 return array.raw();
963 } 921 }
964 } 922 }
965 923
966
967 void FlowGraphCompiler::FinalizeStackMaps(const Code& code) { 924 void FlowGraphCompiler::FinalizeStackMaps(const Code& code) {
968 if (stackmap_table_builder_ == NULL) { 925 if (stackmap_table_builder_ == NULL) {
969 code.set_stackmaps(Object::null_array()); 926 code.set_stackmaps(Object::null_array());
970 } else { 927 } else {
971 // Finalize the stack map array and add it to the code object. 928 // Finalize the stack map array and add it to the code object.
972 code.set_stackmaps( 929 code.set_stackmaps(
973 Array::Handle(stackmap_table_builder_->FinalizeStackMaps(code))); 930 Array::Handle(stackmap_table_builder_->FinalizeStackMaps(code)));
974 } 931 }
975 } 932 }
976 933
977
978 void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) { 934 void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
979 if (code.is_optimized()) { 935 if (code.is_optimized()) {
980 // Optimized code does not need variable descriptors. They are 936 // Optimized code does not need variable descriptors. They are
981 // only stored in the unoptimized version. 937 // only stored in the unoptimized version.
982 code.set_var_descriptors(Object::empty_var_descriptors()); 938 code.set_var_descriptors(Object::empty_var_descriptors());
983 return; 939 return;
984 } 940 }
985 LocalVarDescriptors& var_descs = LocalVarDescriptors::Handle(); 941 LocalVarDescriptors& var_descs = LocalVarDescriptors::Handle();
986 if (parsed_function().node_sequence() == NULL) { 942 if (parsed_function().node_sequence() == NULL) {
987 // Eager local var descriptors computation for Irregexp function as it is 943 // Eager local var descriptors computation for Irregexp function as it is
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1029 if (static_calls_target_table_[i]->code != NULL) { 985 if (static_calls_target_table_[i]->code != NULL) {
1030 targets.SetAt(target_ix + Code::kSCallTableCodeEntry, 986 targets.SetAt(target_ix + Code::kSCallTableCodeEntry,
1031 *static_calls_target_table_[i]->code); 987 *static_calls_target_table_[i]->code);
1032 } 988 }
1033 } 989 }
1034 code.set_static_calls_target_table(targets); 990 code.set_static_calls_target_table(targets);
1035 INC_STAT(Thread::Current(), total_code_size, 991 INC_STAT(Thread::Current(), total_code_size,
1036 targets.Length() * sizeof(uword)); 992 targets.Length() * sizeof(uword));
1037 } 993 }
1038 994
1039
1040 void FlowGraphCompiler::FinalizeCodeSourceMap(const Code& code) { 995 void FlowGraphCompiler::FinalizeCodeSourceMap(const Code& code) {
1041 const Array& inlined_id_array = 996 const Array& inlined_id_array =
1042 Array::Handle(zone(), code_source_map_builder_->InliningIdToFunction()); 997 Array::Handle(zone(), code_source_map_builder_->InliningIdToFunction());
1043 INC_STAT(Thread::Current(), total_code_size, 998 INC_STAT(Thread::Current(), total_code_size,
1044 inlined_id_array.Length() * sizeof(uword)); 999 inlined_id_array.Length() * sizeof(uword));
1045 code.set_inlined_id_to_function(inlined_id_array); 1000 code.set_inlined_id_to_function(inlined_id_array);
1046 1001
1047 const CodeSourceMap& map = 1002 const CodeSourceMap& map =
1048 CodeSourceMap::Handle(code_source_map_builder_->Finalize()); 1003 CodeSourceMap::Handle(code_source_map_builder_->Finalize());
1049 INC_STAT(Thread::Current(), total_code_size, map.Length() * sizeof(uint8_t)); 1004 INC_STAT(Thread::Current(), total_code_size, map.Length() * sizeof(uint8_t));
1050 code.set_code_source_map(map); 1005 code.set_code_source_map(map);
1051 1006
1052 #if defined(DEBUG) 1007 #if defined(DEBUG)
1053 // Force simulation through the last pc offset. This checks we can decode 1008 // Force simulation through the last pc offset. This checks we can decode
1054 // the whole CodeSourceMap without hitting an unknown opcode, stack underflow, 1009 // the whole CodeSourceMap without hitting an unknown opcode, stack underflow,
1055 // etc. 1010 // etc.
1056 GrowableArray<const Function*> fs; 1011 GrowableArray<const Function*> fs;
1057 GrowableArray<TokenPosition> tokens; 1012 GrowableArray<TokenPosition> tokens;
1058 code.GetInlinedFunctionsAtInstruction(code.Size() - 1, &fs, &tokens); 1013 code.GetInlinedFunctionsAtInstruction(code.Size() - 1, &fs, &tokens);
1059 #endif 1014 #endif
1060 } 1015 }
1061 1016
1062
1063 // Returns 'true' if regular code generation should be skipped. 1017 // Returns 'true' if regular code generation should be skipped.
1064 bool FlowGraphCompiler::TryIntrinsify() { 1018 bool FlowGraphCompiler::TryIntrinsify() {
1065 // Intrinsification skips arguments checks, therefore disable if in checked 1019 // Intrinsification skips arguments checks, therefore disable if in checked
1066 // mode. 1020 // mode.
1067 if (FLAG_intrinsify && !isolate()->type_checks()) { 1021 if (FLAG_intrinsify && !isolate()->type_checks()) {
1068 const Class& owner = Class::Handle(parsed_function().function().Owner()); 1022 const Class& owner = Class::Handle(parsed_function().function().Owner());
1069 String& name = String::Handle(parsed_function().function().name()); 1023 String& name = String::Handle(parsed_function().function().name());
1070 1024
1071 if (parsed_function().function().kind() == RawFunction::kImplicitGetter) { 1025 if (parsed_function().function().kind() == RawFunction::kImplicitGetter) {
1072 // TODO(27590) Store Field object inside RawFunction::data_ if possible. 1026 // TODO(27590) Store Field object inside RawFunction::data_ if possible.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1107 // "Deoptimization" from intrinsic continues here. All deoptimization 1061 // "Deoptimization" from intrinsic continues here. All deoptimization
1108 // branches from intrinsic code redirect to here where the slow-path 1062 // branches from intrinsic code redirect to here where the slow-path
1109 // (normal function body) starts. 1063 // (normal function body) starts.
1110 // This means that there must not be any side-effects in intrinsic code 1064 // This means that there must not be any side-effects in intrinsic code
1111 // before any deoptimization point. 1065 // before any deoptimization point.
1112 ASSERT(!intrinsic_slow_path_label_.IsBound()); 1066 ASSERT(!intrinsic_slow_path_label_.IsBound());
1113 assembler()->Bind(&intrinsic_slow_path_label_); 1067 assembler()->Bind(&intrinsic_slow_path_label_);
1114 return complete; 1068 return complete;
1115 } 1069 }
1116 1070
1117
1118 // DBC is very different from other architectures in how it performs instance 1071 // DBC is very different from other architectures in how it performs instance
1119 // and static calls because it does not use stubs. 1072 // and static calls because it does not use stubs.
1120 #if !defined(TARGET_ARCH_DBC) 1073 #if !defined(TARGET_ARCH_DBC)
1121 void FlowGraphCompiler::GenerateCallWithDeopt(TokenPosition token_pos, 1074 void FlowGraphCompiler::GenerateCallWithDeopt(TokenPosition token_pos,
1122 intptr_t deopt_id, 1075 intptr_t deopt_id,
1123 const StubEntry& stub_entry, 1076 const StubEntry& stub_entry,
1124 RawPcDescriptors::Kind kind, 1077 RawPcDescriptors::Kind kind,
1125 LocationSummary* locs) { 1078 LocationSummary* locs) {
1126 GenerateCall(token_pos, stub_entry, kind, locs); 1079 GenerateCall(token_pos, stub_entry, kind, locs);
1127 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1080 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1128 if (is_optimizing()) { 1081 if (is_optimizing()) {
1129 AddDeoptIndexAtCall(deopt_id_after); 1082 AddDeoptIndexAtCall(deopt_id_after);
1130 } else { 1083 } else {
1131 // Add deoptimization continuation point after the call and before the 1084 // Add deoptimization continuation point after the call and before the
1132 // arguments are removed. 1085 // arguments are removed.
1133 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1086 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1134 } 1087 }
1135 } 1088 }
1136 1089
1137
1138 void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id, 1090 void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id,
1139 TokenPosition token_pos, 1091 TokenPosition token_pos,
1140 intptr_t argument_count, 1092 intptr_t argument_count,
1141 LocationSummary* locs, 1093 LocationSummary* locs,
1142 const ICData& ic_data_in) { 1094 const ICData& ic_data_in) {
1143 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); 1095 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1144 if (FLAG_precompiled_mode) { 1096 if (FLAG_precompiled_mode) {
1145 ic_data = ic_data.AsUnaryClassChecks(); 1097 ic_data = ic_data.AsUnaryClassChecks();
1146 EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos, 1098 EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos,
1147 locs); 1099 locs);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1186 break; 1138 break;
1187 case 2: 1139 case 2:
1188 EmitInstanceCall(*StubCode::TwoArgsCheckInlineCache_entry(), ic_data, 1140 EmitInstanceCall(*StubCode::TwoArgsCheckInlineCache_entry(), ic_data,
1189 argument_count, deopt_id, token_pos, locs); 1141 argument_count, deopt_id, token_pos, locs);
1190 break; 1142 break;
1191 default: 1143 default:
1192 UNIMPLEMENTED(); 1144 UNIMPLEMENTED();
1193 } 1145 }
1194 } 1146 }
1195 1147
1196
1197 void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id, 1148 void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
1198 TokenPosition token_pos, 1149 TokenPosition token_pos,
1199 const Function& function, 1150 const Function& function,
1200 ArgumentsInfo args_info, 1151 ArgumentsInfo args_info,
1201 LocationSummary* locs, 1152 LocationSummary* locs,
1202 const ICData& ic_data_in) { 1153 const ICData& ic_data_in) {
1203 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); 1154 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1204 const Array& arguments_descriptor = Array::ZoneHandle( 1155 const Array& arguments_descriptor = Array::ZoneHandle(
1205 zone(), ic_data.IsNull() ? args_info.ToArgumentsDescriptor() 1156 zone(), ic_data.IsNull() ? args_info.ToArgumentsDescriptor()
1206 : ic_data.arguments_descriptor()); 1157 : ic_data.arguments_descriptor());
(...skipping 10 matching lines...) Expand all
1217 GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor, 1168 GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor,
1218 kNumArgsChecked) 1169 kNumArgsChecked)
1219 ->raw(); 1170 ->raw();
1220 } 1171 }
1221 AddCurrentDescriptor(RawPcDescriptors::kRewind, deopt_id, token_pos); 1172 AddCurrentDescriptor(RawPcDescriptors::kRewind, deopt_id, token_pos);
1222 EmitUnoptimizedStaticCall(args_info.pushed_argc, deopt_id, token_pos, locs, 1173 EmitUnoptimizedStaticCall(args_info.pushed_argc, deopt_id, token_pos, locs,
1223 call_ic_data); 1174 call_ic_data);
1224 } 1175 }
1225 } 1176 }
1226 1177
1227
1228 void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg, 1178 void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg,
1229 const AbstractType& type, 1179 const AbstractType& type,
1230 Label* is_instance_lbl, 1180 Label* is_instance_lbl,
1231 Label* is_not_instance_lbl) { 1181 Label* is_not_instance_lbl) {
1232 assembler()->Comment("NumberTypeCheck"); 1182 assembler()->Comment("NumberTypeCheck");
1233 GrowableArray<intptr_t> args; 1183 GrowableArray<intptr_t> args;
1234 if (type.IsNumberType()) { 1184 if (type.IsNumberType()) {
1235 args.Add(kDoubleCid); 1185 args.Add(kDoubleCid);
1236 args.Add(kMintCid); 1186 args.Add(kMintCid);
1237 args.Add(kBigintCid); 1187 args.Add(kBigintCid);
1238 } else if (type.IsIntType()) { 1188 } else if (type.IsIntType()) {
1239 args.Add(kMintCid); 1189 args.Add(kMintCid);
1240 args.Add(kBigintCid); 1190 args.Add(kBigintCid);
1241 } else if (type.IsDoubleType()) { 1191 } else if (type.IsDoubleType()) {
1242 args.Add(kDoubleCid); 1192 args.Add(kDoubleCid);
1243 } 1193 }
1244 CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl); 1194 CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl);
1245 } 1195 }
1246 1196
1247
1248 void FlowGraphCompiler::GenerateStringTypeCheck(Register kClassIdReg, 1197 void FlowGraphCompiler::GenerateStringTypeCheck(Register kClassIdReg,
1249 Label* is_instance_lbl, 1198 Label* is_instance_lbl,
1250 Label* is_not_instance_lbl) { 1199 Label* is_not_instance_lbl) {
1251 assembler()->Comment("StringTypeCheck"); 1200 assembler()->Comment("StringTypeCheck");
1252 GrowableArray<intptr_t> args; 1201 GrowableArray<intptr_t> args;
1253 args.Add(kOneByteStringCid); 1202 args.Add(kOneByteStringCid);
1254 args.Add(kTwoByteStringCid); 1203 args.Add(kTwoByteStringCid);
1255 args.Add(kExternalOneByteStringCid); 1204 args.Add(kExternalOneByteStringCid);
1256 args.Add(kExternalTwoByteStringCid); 1205 args.Add(kExternalTwoByteStringCid);
1257 CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl); 1206 CheckClassIds(kClassIdReg, args, is_instance_lbl, is_not_instance_lbl);
1258 } 1207 }
1259 1208
1260
1261 void FlowGraphCompiler::GenerateListTypeCheck(Register kClassIdReg, 1209 void FlowGraphCompiler::GenerateListTypeCheck(Register kClassIdReg,
1262 Label* is_instance_lbl) { 1210 Label* is_instance_lbl) {
1263 assembler()->Comment("ListTypeCheck"); 1211 assembler()->Comment("ListTypeCheck");
1264 Label unknown; 1212 Label unknown;
1265 GrowableArray<intptr_t> args; 1213 GrowableArray<intptr_t> args;
1266 args.Add(kArrayCid); 1214 args.Add(kArrayCid);
1267 args.Add(kGrowableObjectArrayCid); 1215 args.Add(kGrowableObjectArrayCid);
1268 args.Add(kImmutableArrayCid); 1216 args.Add(kImmutableArrayCid);
1269 CheckClassIds(kClassIdReg, args, is_instance_lbl, &unknown); 1217 CheckClassIds(kClassIdReg, args, is_instance_lbl, &unknown);
1270 assembler()->Bind(&unknown); 1218 assembler()->Bind(&unknown);
1271 } 1219 }
1272 #endif // !defined(TARGET_ARCH_DBC) 1220 #endif // !defined(TARGET_ARCH_DBC)
1273 1221
1274 void FlowGraphCompiler::EmitComment(Instruction* instr) { 1222 void FlowGraphCompiler::EmitComment(Instruction* instr) {
1275 if (!FLAG_support_il_printer || !FLAG_support_disassembler) { 1223 if (!FLAG_support_il_printer || !FLAG_support_disassembler) {
1276 return; 1224 return;
1277 } 1225 }
1278 #ifndef PRODUCT 1226 #ifndef PRODUCT
1279 char buffer[256]; 1227 char buffer[256];
1280 BufferFormatter f(buffer, sizeof(buffer)); 1228 BufferFormatter f(buffer, sizeof(buffer));
1281 instr->PrintTo(&f); 1229 instr->PrintTo(&f);
1282 assembler()->Comment("%s", buffer); 1230 assembler()->Comment("%s", buffer);
1283 #endif 1231 #endif
1284 } 1232 }
1285 1233
1286
1287 #if !defined(TARGET_ARCH_DBC) 1234 #if !defined(TARGET_ARCH_DBC)
1288 // TODO(vegorov) enable edge-counters on DBC if we consider them beneficial. 1235 // TODO(vegorov) enable edge-counters on DBC if we consider them beneficial.
1289 bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) { 1236 bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) {
1290 // Only emit an edge counter if there is not goto at the end of the block, 1237 // Only emit an edge counter if there is not goto at the end of the block,
1291 // except for the entry block. 1238 // except for the entry block.
1292 return (FLAG_reorder_basic_blocks && 1239 return (FLAG_reorder_basic_blocks &&
1293 (!block->last_instruction()->IsGoto() || 1240 (!block->last_instruction()->IsGoto() ||
1294 (block == flow_graph().graph_entry()->normal_entry()))); 1241 (block == flow_graph().graph_entry()->normal_entry())));
1295 } 1242 }
1296 1243
1297
1298 // Allocate a register that is not explicitly blocked. 1244 // Allocate a register that is not explicitly blocked.
1299 static Register AllocateFreeRegister(bool* blocked_registers) { 1245 static Register AllocateFreeRegister(bool* blocked_registers) {
1300 for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) { 1246 for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) {
1301 if (!blocked_registers[regno]) { 1247 if (!blocked_registers[regno]) {
1302 blocked_registers[regno] = true; 1248 blocked_registers[regno] = true;
1303 return static_cast<Register>(regno); 1249 return static_cast<Register>(regno);
1304 } 1250 }
1305 } 1251 }
1306 UNREACHABLE(); 1252 UNREACHABLE();
1307 return kNoRegister; 1253 return kNoRegister;
1308 } 1254 }
1309 #endif 1255 #endif
1310 1256
1311
1312 void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) { 1257 void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1313 ASSERT(!is_optimizing()); 1258 ASSERT(!is_optimizing());
1314 instr->InitializeLocationSummary(zone(), false); // Not optimizing. 1259 instr->InitializeLocationSummary(zone(), false); // Not optimizing.
1315 1260
1316 // No need to allocate registers based on LocationSummary on DBC as in 1261 // No need to allocate registers based on LocationSummary on DBC as in
1317 // unoptimized mode it's a stack based bytecode just like IR itself. 1262 // unoptimized mode it's a stack based bytecode just like IR itself.
1318 #if !defined(TARGET_ARCH_DBC) 1263 #if !defined(TARGET_ARCH_DBC)
1319 LocationSummary* locs = instr->locs(); 1264 LocationSummary* locs = instr->locs();
1320 1265
1321 bool blocked_registers[kNumberOfCpuRegisters]; 1266 bool blocked_registers[kNumberOfCpuRegisters];
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1400 break; 1345 break;
1401 case Location::kRequiresFpuRegister: 1346 case Location::kRequiresFpuRegister:
1402 UNREACHABLE(); 1347 UNREACHABLE();
1403 break; 1348 break;
1404 } 1349 }
1405 locs->set_out(0, result_location); 1350 locs->set_out(0, result_location);
1406 } 1351 }
1407 #endif // !defined(TARGET_ARCH_DBC) 1352 #endif // !defined(TARGET_ARCH_DBC)
1408 } 1353 }
1409 1354
1410
1411 static uword RegMaskBit(Register reg) { 1355 static uword RegMaskBit(Register reg) {
1412 return ((reg) != kNoRegister) ? (1 << (reg)) : 0; 1356 return ((reg) != kNoRegister) ? (1 << (reg)) : 0;
1413 } 1357 }
1414 1358
1415
1416 ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler) 1359 ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler)
1417 : compiler_(compiler), moves_(32) {} 1360 : compiler_(compiler), moves_(32) {}
1418 1361
1419
1420 void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) { 1362 void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) {
1421 ASSERT(moves_.is_empty()); 1363 ASSERT(moves_.is_empty());
1422 // Build up a worklist of moves. 1364 // Build up a worklist of moves.
1423 BuildInitialMoveList(parallel_move); 1365 BuildInitialMoveList(parallel_move);
1424 1366
1425 for (int i = 0; i < moves_.length(); ++i) { 1367 for (int i = 0; i < moves_.length(); ++i) {
1426 const MoveOperands& move = *moves_[i]; 1368 const MoveOperands& move = *moves_[i];
1427 // Skip constants to perform them last. They don't block other moves 1369 // Skip constants to perform them last. They don't block other moves
1428 // and skipping such moves with register destinations keeps those 1370 // and skipping such moves with register destinations keeps those
1429 // registers free for the whole algorithm. 1371 // registers free for the whole algorithm.
1430 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); 1372 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i);
1431 } 1373 }
1432 1374
1433 // Perform the moves with constant sources. 1375 // Perform the moves with constant sources.
1434 for (int i = 0; i < moves_.length(); ++i) { 1376 for (int i = 0; i < moves_.length(); ++i) {
1435 const MoveOperands& move = *moves_[i]; 1377 const MoveOperands& move = *moves_[i];
1436 if (!move.IsEliminated()) { 1378 if (!move.IsEliminated()) {
1437 ASSERT(move.src().IsConstant()); 1379 ASSERT(move.src().IsConstant());
1438 compiler_->BeginCodeSourceRange(); 1380 compiler_->BeginCodeSourceRange();
1439 EmitMove(i); 1381 EmitMove(i);
1440 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove); 1382 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
1441 } 1383 }
1442 } 1384 }
1443 1385
1444 moves_.Clear(); 1386 moves_.Clear();
1445 } 1387 }
1446 1388
1447
1448 void ParallelMoveResolver::BuildInitialMoveList( 1389 void ParallelMoveResolver::BuildInitialMoveList(
1449 ParallelMoveInstr* parallel_move) { 1390 ParallelMoveInstr* parallel_move) {
1450 // Perform a linear sweep of the moves to add them to the initial list of 1391 // Perform a linear sweep of the moves to add them to the initial list of
1451 // moves to perform, ignoring any move that is redundant (the source is 1392 // moves to perform, ignoring any move that is redundant (the source is
1452 // the same as the destination, the destination is ignored and 1393 // the same as the destination, the destination is ignored and
1453 // unallocated, or the move was already eliminated). 1394 // unallocated, or the move was already eliminated).
1454 for (int i = 0; i < parallel_move->NumMoves(); i++) { 1395 for (int i = 0; i < parallel_move->NumMoves(); i++) {
1455 MoveOperands* move = parallel_move->MoveOperandsAt(i); 1396 MoveOperands* move = parallel_move->MoveOperandsAt(i);
1456 if (!move->IsRedundant()) moves_.Add(move); 1397 if (!move->IsRedundant()) moves_.Add(move);
1457 } 1398 }
1458 } 1399 }
1459 1400
1460
1461 void ParallelMoveResolver::PerformMove(int index) { 1401 void ParallelMoveResolver::PerformMove(int index) {
1462 // Each call to this function performs a move and deletes it from the move 1402 // Each call to this function performs a move and deletes it from the move
1463 // graph. We first recursively perform any move blocking this one. We 1403 // graph. We first recursively perform any move blocking this one. We
1464 // mark a move as "pending" on entry to PerformMove in order to detect 1404 // mark a move as "pending" on entry to PerformMove in order to detect
1465 // cycles in the move graph. We use operand swaps to resolve cycles, 1405 // cycles in the move graph. We use operand swaps to resolve cycles,
1466 // which means that a call to PerformMove could change any source operand 1406 // which means that a call to PerformMove could change any source operand
1467 // in the move graph. 1407 // in the move graph.
1468 1408
1469 ASSERT(!moves_[index]->IsPending()); 1409 ASSERT(!moves_[index]->IsPending());
1470 ASSERT(!moves_[index]->IsRedundant()); 1410 ASSERT(!moves_[index]->IsRedundant());
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1519 return; 1459 return;
1520 } 1460 }
1521 } 1461 }
1522 1462
1523 // This move is not blocked. 1463 // This move is not blocked.
1524 compiler_->BeginCodeSourceRange(); 1464 compiler_->BeginCodeSourceRange();
1525 EmitMove(index); 1465 EmitMove(index);
1526 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove); 1466 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
1527 } 1467 }
1528 1468
1529
1530 bool ParallelMoveResolver::IsScratchLocation(Location loc) { 1469 bool ParallelMoveResolver::IsScratchLocation(Location loc) {
1531 for (int i = 0; i < moves_.length(); ++i) { 1470 for (int i = 0; i < moves_.length(); ++i) {
1532 if (moves_[i]->Blocks(loc)) { 1471 if (moves_[i]->Blocks(loc)) {
1533 return false; 1472 return false;
1534 } 1473 }
1535 } 1474 }
1536 1475
1537 for (int i = 0; i < moves_.length(); ++i) { 1476 for (int i = 0; i < moves_.length(); ++i) {
1538 if (moves_[i]->dest().Equals(loc)) { 1477 if (moves_[i]->dest().Equals(loc)) {
1539 return true; 1478 return true;
1540 } 1479 }
1541 } 1480 }
1542 1481
1543 return false; 1482 return false;
1544 } 1483 }
1545 1484
1546
1547 intptr_t ParallelMoveResolver::AllocateScratchRegister( 1485 intptr_t ParallelMoveResolver::AllocateScratchRegister(
1548 Location::Kind kind, 1486 Location::Kind kind,
1549 uword blocked_mask, 1487 uword blocked_mask,
1550 intptr_t first_free_register, 1488 intptr_t first_free_register,
1551 intptr_t last_free_register, 1489 intptr_t last_free_register,
1552 bool* spilled) { 1490 bool* spilled) {
1553 COMPILE_ASSERT(static_cast<intptr_t>(sizeof(blocked_mask)) * kBitsPerByte >= 1491 COMPILE_ASSERT(static_cast<intptr_t>(sizeof(blocked_mask)) * kBitsPerByte >=
1554 kNumberOfFpuRegisters); 1492 kNumberOfFpuRegisters);
1555 COMPILE_ASSERT(static_cast<intptr_t>(sizeof(blocked_mask)) * kBitsPerByte >= 1493 COMPILE_ASSERT(static_cast<intptr_t>(sizeof(blocked_mask)) * kBitsPerByte >=
1556 kNumberOfCpuRegisters); 1494 kNumberOfCpuRegisters);
(...skipping 14 matching lines...) Expand all
1571 break; 1509 break;
1572 } 1510 }
1573 } 1511 }
1574 } else { 1512 } else {
1575 *spilled = false; 1513 *spilled = false;
1576 } 1514 }
1577 1515
1578 return scratch; 1516 return scratch;
1579 } 1517 }
1580 1518
1581
1582 ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope( 1519 ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope(
1583 ParallelMoveResolver* resolver, 1520 ParallelMoveResolver* resolver,
1584 FpuRegister blocked) 1521 FpuRegister blocked)
1585 : resolver_(resolver), reg_(kNoFpuRegister), spilled_(false) { 1522 : resolver_(resolver), reg_(kNoFpuRegister), spilled_(false) {
1586 COMPILE_ASSERT(FpuTMP != kNoFpuRegister); 1523 COMPILE_ASSERT(FpuTMP != kNoFpuRegister);
1587 uword blocked_mask = 1524 uword blocked_mask =
1588 ((blocked != kNoFpuRegister) ? 1 << blocked : 0) | 1 << FpuTMP; 1525 ((blocked != kNoFpuRegister) ? 1 << blocked : 0) | 1 << FpuTMP;
1589 reg_ = static_cast<FpuRegister>(resolver_->AllocateScratchRegister( 1526 reg_ = static_cast<FpuRegister>(resolver_->AllocateScratchRegister(
1590 Location::kFpuRegister, blocked_mask, 0, kNumberOfFpuRegisters - 1, 1527 Location::kFpuRegister, blocked_mask, 0, kNumberOfFpuRegisters - 1,
1591 &spilled_)); 1528 &spilled_));
1592 1529
1593 if (spilled_) { 1530 if (spilled_) {
1594 resolver->SpillFpuScratch(reg_); 1531 resolver->SpillFpuScratch(reg_);
1595 } 1532 }
1596 } 1533 }
1597 1534
1598
1599 ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() { 1535 ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() {
1600 if (spilled_) { 1536 if (spilled_) {
1601 resolver_->RestoreFpuScratch(reg_); 1537 resolver_->RestoreFpuScratch(reg_);
1602 } 1538 }
1603 } 1539 }
1604 1540
1605
1606 ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope( 1541 ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
1607 ParallelMoveResolver* resolver, 1542 ParallelMoveResolver* resolver,
1608 Register blocked) 1543 Register blocked)
1609 : resolver_(resolver), reg_(kNoRegister), spilled_(false) { 1544 : resolver_(resolver), reg_(kNoRegister), spilled_(false) {
1610 uword blocked_mask = RegMaskBit(blocked) | kReservedCpuRegisters; 1545 uword blocked_mask = RegMaskBit(blocked) | kReservedCpuRegisters;
1611 if (resolver->compiler_->intrinsic_mode()) { 1546 if (resolver->compiler_->intrinsic_mode()) {
1612 // Block additional registers that must be preserved for intrinsics. 1547 // Block additional registers that must be preserved for intrinsics.
1613 blocked_mask |= RegMaskBit(ARGS_DESC_REG); 1548 blocked_mask |= RegMaskBit(ARGS_DESC_REG);
1614 #if !defined(TARGET_ARCH_IA32) 1549 #if !defined(TARGET_ARCH_IA32)
1615 // Need to preserve CODE_REG to be able to store the PC marker 1550 // Need to preserve CODE_REG to be able to store the PC marker
1616 // and load the pool pointer. 1551 // and load the pool pointer.
1617 blocked_mask |= RegMaskBit(CODE_REG); 1552 blocked_mask |= RegMaskBit(CODE_REG);
1618 #endif 1553 #endif
1619 } 1554 }
1620 reg_ = static_cast<Register>( 1555 reg_ = static_cast<Register>(
1621 resolver_->AllocateScratchRegister(Location::kRegister, blocked_mask, 0, 1556 resolver_->AllocateScratchRegister(Location::kRegister, blocked_mask, 0,
1622 kNumberOfCpuRegisters - 1, &spilled_)); 1557 kNumberOfCpuRegisters - 1, &spilled_));
1623 1558
1624 if (spilled_) { 1559 if (spilled_) {
1625 resolver->SpillScratch(reg_); 1560 resolver->SpillScratch(reg_);
1626 } 1561 }
1627 } 1562 }
1628 1563
1629
1630 ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() { 1564 ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
1631 if (spilled_) { 1565 if (spilled_) {
1632 resolver_->RestoreScratch(reg_); 1566 resolver_->RestoreScratch(reg_);
1633 } 1567 }
1634 } 1568 }
1635 1569
1636
1637 const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData( 1570 const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData(
1638 intptr_t deopt_id, 1571 intptr_t deopt_id,
1639 const String& target_name, 1572 const String& target_name,
1640 const Array& arguments_descriptor, 1573 const Array& arguments_descriptor,
1641 intptr_t num_args_tested) { 1574 intptr_t num_args_tested) {
1642 if ((deopt_id_to_ic_data_ != NULL) && 1575 if ((deopt_id_to_ic_data_ != NULL) &&
1643 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) { 1576 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
1644 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id]; 1577 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1645 ASSERT(res->deopt_id() == deopt_id); 1578 ASSERT(res->deopt_id() == deopt_id);
1646 ASSERT(res->target_name() == target_name.raw()); 1579 ASSERT(res->target_name() == target_name.raw());
(...skipping 10 matching lines...) Expand all
1657 #if defined(TAG_IC_DATA) 1590 #if defined(TAG_IC_DATA)
1658 ic_data.set_tag(Instruction::kInstanceCall); 1591 ic_data.set_tag(Instruction::kInstanceCall);
1659 #endif 1592 #endif
1660 if (deopt_id_to_ic_data_ != NULL) { 1593 if (deopt_id_to_ic_data_ != NULL) {
1661 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data; 1594 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1662 } 1595 }
1663 ASSERT(!ic_data.is_static_call()); 1596 ASSERT(!ic_data.is_static_call());
1664 return &ic_data; 1597 return &ic_data;
1665 } 1598 }
1666 1599
1667
1668 const ICData* FlowGraphCompiler::GetOrAddStaticCallICData( 1600 const ICData* FlowGraphCompiler::GetOrAddStaticCallICData(
1669 intptr_t deopt_id, 1601 intptr_t deopt_id,
1670 const Function& target, 1602 const Function& target,
1671 const Array& arguments_descriptor, 1603 const Array& arguments_descriptor,
1672 intptr_t num_args_tested) { 1604 intptr_t num_args_tested) {
1673 if ((deopt_id_to_ic_data_ != NULL) && 1605 if ((deopt_id_to_ic_data_ != NULL) &&
1674 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) { 1606 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
1675 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id]; 1607 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1676 ASSERT(res->deopt_id() == deopt_id); 1608 ASSERT(res->deopt_id() == deopt_id);
1677 ASSERT(res->target_name() == target.name()); 1609 ASSERT(res->target_name() == target.name());
(...skipping 11 matching lines...) Expand all
1689 ic_data.AddTarget(target); 1621 ic_data.AddTarget(target);
1690 #if defined(TAG_IC_DATA) 1622 #if defined(TAG_IC_DATA)
1691 ic_data.set_tag(Instruction::kStaticCall); 1623 ic_data.set_tag(Instruction::kStaticCall);
1692 #endif 1624 #endif
1693 if (deopt_id_to_ic_data_ != NULL) { 1625 if (deopt_id_to_ic_data_ != NULL) {
1694 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data; 1626 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1695 } 1627 }
1696 return &ic_data; 1628 return &ic_data;
1697 } 1629 }
1698 1630
1699
1700 intptr_t FlowGraphCompiler::GetOptimizationThreshold() const { 1631 intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
1701 intptr_t threshold; 1632 intptr_t threshold;
1702 if (is_optimizing()) { 1633 if (is_optimizing()) {
1703 threshold = FLAG_reoptimization_counter_threshold; 1634 threshold = FLAG_reoptimization_counter_threshold;
1704 } else if (parsed_function_.function().IsIrregexpFunction()) { 1635 } else if (parsed_function_.function().IsIrregexpFunction()) {
1705 threshold = FLAG_regexp_optimization_counter_threshold; 1636 threshold = FLAG_regexp_optimization_counter_threshold;
1706 } else { 1637 } else {
1707 const intptr_t basic_blocks = flow_graph().preorder().length(); 1638 const intptr_t basic_blocks = flow_graph().preorder().length();
1708 ASSERT(basic_blocks > 0); 1639 ASSERT(basic_blocks > 0);
1709 threshold = FLAG_optimization_counter_scale * basic_blocks + 1640 threshold = FLAG_optimization_counter_scale * basic_blocks +
1710 FLAG_min_optimization_counter_threshold; 1641 FLAG_min_optimization_counter_threshold;
1711 if (threshold > FLAG_optimization_counter_threshold) { 1642 if (threshold > FLAG_optimization_counter_threshold) {
1712 threshold = FLAG_optimization_counter_threshold; 1643 threshold = FLAG_optimization_counter_threshold;
1713 } 1644 }
1714 } 1645 }
1715 return threshold; 1646 return threshold;
1716 } 1647 }
1717 1648
1718
1719 const Class& FlowGraphCompiler::BoxClassFor(Representation rep) { 1649 const Class& FlowGraphCompiler::BoxClassFor(Representation rep) {
1720 switch (rep) { 1650 switch (rep) {
1721 case kUnboxedDouble: 1651 case kUnboxedDouble:
1722 return double_class(); 1652 return double_class();
1723 case kUnboxedFloat32x4: 1653 case kUnboxedFloat32x4:
1724 return float32x4_class(); 1654 return float32x4_class();
1725 case kUnboxedFloat64x2: 1655 case kUnboxedFloat64x2:
1726 return float64x2_class(); 1656 return float64x2_class();
1727 case kUnboxedInt32x4: 1657 case kUnboxedInt32x4:
1728 return int32x4_class(); 1658 return int32x4_class();
1729 case kUnboxedMint: 1659 case kUnboxedMint:
1730 return mint_class(); 1660 return mint_class();
1731 default: 1661 default:
1732 UNREACHABLE(); 1662 UNREACHABLE();
1733 return Class::ZoneHandle(); 1663 return Class::ZoneHandle();
1734 } 1664 }
1735 } 1665 }
1736 1666
1737
1738 void FlowGraphCompiler::BeginCodeSourceRange() { 1667 void FlowGraphCompiler::BeginCodeSourceRange() {
1739 code_source_map_builder_->BeginCodeSourceRange(assembler()->CodeSize()); 1668 code_source_map_builder_->BeginCodeSourceRange(assembler()->CodeSize());
1740 } 1669 }
1741 1670
1742
1743 void FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) { 1671 void FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) {
1744 code_source_map_builder_->EndCodeSourceRange(assembler()->CodeSize(), 1672 code_source_map_builder_->EndCodeSourceRange(assembler()->CodeSize(),
1745 token_pos); 1673 token_pos);
1746 } 1674 }
1747 1675
1748
1749 const CallTargets* FlowGraphCompiler::ResolveCallTargetsForReceiverCid( 1676 const CallTargets* FlowGraphCompiler::ResolveCallTargetsForReceiverCid(
1750 intptr_t cid, 1677 intptr_t cid,
1751 const String& selector, 1678 const String& selector,
1752 const Array& args_desc_array) { 1679 const Array& args_desc_array) {
1753 Zone* zone = Thread::Current()->zone(); 1680 Zone* zone = Thread::Current()->zone();
1754 1681
1755 ArgumentsDescriptor args_desc(args_desc_array); 1682 ArgumentsDescriptor args_desc(args_desc_array);
1756 1683
1757 Function& fn = Function::ZoneHandle(zone); 1684 Function& fn = Function::ZoneHandle(zone);
1758 if (!LookupMethodFor(cid, selector, args_desc, &fn)) return NULL; 1685 if (!LookupMethodFor(cid, selector, args_desc, &fn)) return NULL;
1759 1686
1760 CallTargets* targets = new (zone) CallTargets(zone); 1687 CallTargets* targets = new (zone) CallTargets(zone);
1761 targets->Add(new (zone) TargetInfo(cid, cid, &fn, /* count = */ 1)); 1688 targets->Add(new (zone) TargetInfo(cid, cid, &fn, /* count = */ 1));
1762 1689
1763 return targets; 1690 return targets;
1764 } 1691 }
1765 1692
1766
1767 bool FlowGraphCompiler::LookupMethodFor(int class_id, 1693 bool FlowGraphCompiler::LookupMethodFor(int class_id,
1768 const String& name, 1694 const String& name,
1769 const ArgumentsDescriptor& args_desc, 1695 const ArgumentsDescriptor& args_desc,
1770 Function* fn_return, 1696 Function* fn_return,
1771 bool* class_is_abstract_return) { 1697 bool* class_is_abstract_return) {
1772 Thread* thread = Thread::Current(); 1698 Thread* thread = Thread::Current();
1773 Isolate* isolate = thread->isolate(); 1699 Isolate* isolate = thread->isolate();
1774 Zone* zone = thread->zone(); 1700 Zone* zone = thread->zone();
1775 if (class_id < 0) return false; 1701 if (class_id < 0) return false;
1776 if (class_id >= isolate->class_table()->NumCids()) return false; 1702 if (class_id >= isolate->class_table()->NumCids()) return false;
(...skipping 10 matching lines...) Expand all
1787 } 1713 }
1788 const bool allow_add = false; 1714 const bool allow_add = false;
1789 Function& target_function = 1715 Function& target_function =
1790 Function::Handle(zone, Resolver::ResolveDynamicForReceiverClass( 1716 Function::Handle(zone, Resolver::ResolveDynamicForReceiverClass(
1791 cls, name, args_desc, allow_add)); 1717 cls, name, args_desc, allow_add));
1792 if (target_function.IsNull()) return false; 1718 if (target_function.IsNull()) return false;
1793 *fn_return ^= target_function.raw(); 1719 *fn_return ^= target_function.raw();
1794 return true; 1720 return true;
1795 } 1721 }
1796 1722
1797
1798 #if !defined(TARGET_ARCH_DBC) 1723 #if !defined(TARGET_ARCH_DBC)
1799 // DBC emits calls very differently from other architectures due to its 1724 // DBC emits calls very differently from other architectures due to its
1800 // interpreted nature. 1725 // interpreted nature.
1801 void FlowGraphCompiler::EmitPolymorphicInstanceCall( 1726 void FlowGraphCompiler::EmitPolymorphicInstanceCall(
1802 const CallTargets& targets, 1727 const CallTargets& targets,
1803 const InstanceCallInstr& original_call, 1728 const InstanceCallInstr& original_call,
1804 ArgumentsInfo args_info, 1729 ArgumentsInfo args_info,
1805 intptr_t deopt_id, 1730 intptr_t deopt_id,
1806 TokenPosition token_pos, 1731 TokenPosition token_pos,
1807 LocationSummary* locs, 1732 LocationSummary* locs,
(...skipping 18 matching lines...) Expand all
1826 assembler()->Bind(&ok); 1751 assembler()->Bind(&ok);
1827 } else { 1752 } else {
1828 const ICData& unary_checks = ICData::ZoneHandle( 1753 const ICData& unary_checks = ICData::ZoneHandle(
1829 zone(), original_call.ic_data()->AsUnaryClassChecks()); 1754 zone(), original_call.ic_data()->AsUnaryClassChecks());
1830 EmitSwitchableInstanceCall(unary_checks, args_info.pushed_argc, deopt_id, 1755 EmitSwitchableInstanceCall(unary_checks, args_info.pushed_argc, deopt_id,
1831 token_pos, locs); 1756 token_pos, locs);
1832 } 1757 }
1833 } 1758 }
1834 } 1759 }
1835 1760
1836
1837 #define __ assembler()-> 1761 #define __ assembler()->
1838 void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets, 1762 void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
1839 const String& function_name, 1763 const String& function_name,
1840 ArgumentsInfo args_info, 1764 ArgumentsInfo args_info,
1841 Label* failed, 1765 Label* failed,
1842 Label* match_found, 1766 Label* match_found,
1843 intptr_t deopt_id, 1767 intptr_t deopt_id,
1844 TokenPosition token_index, 1768 TokenPosition token_index,
1845 LocationSummary* locs, 1769 LocationSummary* locs,
1846 bool complete, 1770 bool complete,
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
1977 1901
1978 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall()); 1902 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall());
1979 1903
1980 FrameStatePop(instr->ArgumentCount()); 1904 FrameStatePop(instr->ArgumentCount());
1981 Definition* defn = instr->AsDefinition(); 1905 Definition* defn = instr->AsDefinition();
1982 if ((defn != NULL) && defn->HasTemp()) { 1906 if ((defn != NULL) && defn->HasTemp()) {
1983 FrameStatePush(defn); 1907 FrameStatePush(defn);
1984 } 1908 }
1985 } 1909 }
1986 1910
1987
1988 void FlowGraphCompiler::FrameStatePush(Definition* defn) { 1911 void FlowGraphCompiler::FrameStatePush(Definition* defn) {
1989 Representation rep = defn->representation(); 1912 Representation rep = defn->representation();
1990 if ((rep == kUnboxedDouble) || (rep == kUnboxedFloat64x2) || 1913 if ((rep == kUnboxedDouble) || (rep == kUnboxedFloat64x2) ||
1991 (rep == kUnboxedFloat32x4)) { 1914 (rep == kUnboxedFloat32x4)) {
1992 // LoadField instruction lies about its representation in the unoptimized 1915 // LoadField instruction lies about its representation in the unoptimized
1993 // code because Definition::representation() can't depend on the type of 1916 // code because Definition::representation() can't depend on the type of
1994 // compilation but MakeLocationSummary and EmitNativeCode can. 1917 // compilation but MakeLocationSummary and EmitNativeCode can.
1995 ASSERT(defn->IsLoadField() && defn->AsLoadField()->IsUnboxedLoad()); 1918 ASSERT(defn->IsLoadField() && defn->AsLoadField()->IsUnboxedLoad());
1996 ASSERT(defn->locs()->out(0).IsRegister()); 1919 ASSERT(defn->locs()->out(0).IsRegister());
1997 rep = kTagged; 1920 rep = kTagged;
1998 } 1921 }
1999 ASSERT(!is_optimizing()); 1922 ASSERT(!is_optimizing());
2000 ASSERT((rep == kTagged) || (rep == kUntagged)); 1923 ASSERT((rep == kTagged) || (rep == kUntagged));
2001 ASSERT(rep != kUntagged || flow_graph_.IsIrregexpFunction()); 1924 ASSERT(rep != kUntagged || flow_graph_.IsIrregexpFunction());
2002 frame_state_.Add(rep); 1925 frame_state_.Add(rep);
2003 } 1926 }
2004 1927
2005
2006 void FlowGraphCompiler::FrameStatePop(intptr_t count) { 1928 void FlowGraphCompiler::FrameStatePop(intptr_t count) {
2007 ASSERT(!is_optimizing()); 1929 ASSERT(!is_optimizing());
2008 frame_state_.TruncateTo( 1930 frame_state_.TruncateTo(
2009 Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count)); 1931 Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count));
2010 } 1932 }
2011 1933
2012
2013 bool FlowGraphCompiler::FrameStateIsSafeToCall() { 1934 bool FlowGraphCompiler::FrameStateIsSafeToCall() {
2014 ASSERT(!is_optimizing()); 1935 ASSERT(!is_optimizing());
2015 for (intptr_t i = 0; i < frame_state_.length(); i++) { 1936 for (intptr_t i = 0; i < frame_state_.length(); i++) {
2016 if (frame_state_[i] != kTagged) { 1937 if (frame_state_[i] != kTagged) {
2017 return false; 1938 return false;
2018 } 1939 }
2019 } 1940 }
2020 return true; 1941 return true;
2021 } 1942 }
2022 1943
2023
2024 void FlowGraphCompiler::FrameStateClear() { 1944 void FlowGraphCompiler::FrameStateClear() {
2025 ASSERT(!is_optimizing()); 1945 ASSERT(!is_optimizing());
2026 frame_state_.TruncateTo(0); 1946 frame_state_.TruncateTo(0);
2027 } 1947 }
2028 #endif // defined(DEBUG) && !defined(TARGET_ARCH_DBC) 1948 #endif // defined(DEBUG) && !defined(TARGET_ARCH_DBC)
2029 1949
2030
2031 } // namespace dart 1950 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698