OLD | NEW |
---|---|
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/flow_graph_allocator.h" | 5 #include "vm/flow_graph_allocator.h" |
6 | 6 |
7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" |
8 #include "vm/intermediate_language.h" | 8 #include "vm/intermediate_language.h" |
9 #include "vm/il_printer.h" | 9 #include "vm/il_printer.h" |
10 #include "vm/flow_graph_builder.h" | 10 #include "vm/flow_graph_builder.h" |
(...skipping 10 matching lines...) Expand all Loading... | |
21 #define TRACE_ALLOC(m) do { \ | 21 #define TRACE_ALLOC(m) do { \ |
22 if (FLAG_trace_ssa_allocator) OS::Print m ; \ | 22 if (FLAG_trace_ssa_allocator) OS::Print m ; \ |
23 } while (0) | 23 } while (0) |
24 #else | 24 #else |
25 #define TRACE_ALLOC(m) | 25 #define TRACE_ALLOC(m) |
26 #endif | 26 #endif |
27 | 27 |
28 | 28 |
29 static const intptr_t kNoVirtualRegister = -1; | 29 static const intptr_t kNoVirtualRegister = -1; |
30 static const intptr_t kTempVirtualRegister = -2; | 30 static const intptr_t kTempVirtualRegister = -2; |
31 static UseInterval* const kPermanentlyBlocked = | |
32 reinterpret_cast<UseInterval*>(-1); | |
33 static const intptr_t kIllegalPosition = -1; | 31 static const intptr_t kIllegalPosition = -1; |
34 static const intptr_t kMaxPosition = 0x7FFFFFFF; | 32 static const intptr_t kMaxPosition = 0x7FFFFFFF; |
35 | 33 |
36 | 34 |
35 static intptr_t MinPosition(intptr_t a, intptr_t b) { | |
36 return (a < b) ? a : b; | |
37 } | |
38 | |
39 | |
37 FlowGraphAllocator::FlowGraphAllocator( | 40 FlowGraphAllocator::FlowGraphAllocator( |
38 const GrowableArray<BlockEntryInstr*>& block_order, | 41 const GrowableArray<BlockEntryInstr*>& block_order, |
39 FlowGraphBuilder* builder) | 42 FlowGraphBuilder* builder) |
40 : builder_(builder), | 43 : builder_(builder), |
41 block_order_(block_order), | 44 block_order_(block_order), |
42 postorder_(builder->postorder_block_entries()), | 45 postorder_(builder->postorder_block_entries()), |
43 live_out_(block_order.length()), | 46 live_out_(block_order.length()), |
44 kill_(block_order.length()), | 47 kill_(block_order.length()), |
45 live_in_(block_order.length()), | 48 live_in_(block_order.length()), |
46 vreg_count_(builder->current_ssa_temp_index()), | 49 vreg_count_(builder->current_ssa_temp_index()), |
47 live_ranges_(builder->current_ssa_temp_index()) { | 50 live_ranges_(builder->current_ssa_temp_index()) { |
48 for (intptr_t i = 0; i < vreg_count_; i++) live_ranges_.Add(NULL); | 51 for (intptr_t i = 0; i < vreg_count_; i++) live_ranges_.Add(NULL); |
49 | 52 |
50 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { | 53 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
51 cpu_regs_[reg] = NULL; | 54 blocked_cpu_regs_[reg] = false; |
52 } | 55 } |
53 | 56 |
54 cpu_regs_[CTX] = kPermanentlyBlocked; | 57 blocked_cpu_regs_[CTX] = true; |
55 if (TMP != kNoRegister) { | 58 if (TMP != kNoRegister) { |
56 cpu_regs_[TMP] = kPermanentlyBlocked; | 59 blocked_cpu_regs_[TMP] = true; |
57 } | 60 } |
58 cpu_regs_[SPREG] = kPermanentlyBlocked; | 61 blocked_cpu_regs_[SPREG] = true; |
59 cpu_regs_[FPREG] = kPermanentlyBlocked; | 62 blocked_cpu_regs_[FPREG] = true; |
60 } | 63 } |
61 | 64 |
62 | 65 |
63 void FlowGraphAllocator::ComputeInitialSets() { | 66 void FlowGraphAllocator::ComputeInitialSets() { |
64 const intptr_t block_count = postorder_.length(); | 67 const intptr_t block_count = postorder_.length(); |
65 for (intptr_t i = 0; i < block_count; i++) { | 68 for (intptr_t i = 0; i < block_count; i++) { |
66 BlockEntryInstr* block = postorder_[i]; | 69 BlockEntryInstr* block = postorder_[i]; |
67 | 70 |
68 BitVector* kill = kill_[i]; | 71 BitVector* kill = kill_[i]; |
69 BitVector* live_in = live_in_[i]; | 72 BitVector* live_in = live_in_[i]; |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
206 } | 209 } |
207 OS::Print("\n"); | 210 OS::Print("\n"); |
208 | 211 |
209 PrintBitVector(" live out", live_out_[i]); | 212 PrintBitVector(" live out", live_out_[i]); |
210 PrintBitVector(" kill", kill_[i]); | 213 PrintBitVector(" kill", kill_[i]); |
211 PrintBitVector(" live in", live_in_[i]); | 214 PrintBitVector(" live in", live_in_[i]); |
212 } | 215 } |
213 } | 216 } |
214 | 217 |
215 | 218 |
216 void UseInterval::Print() { | 219 void LiveRange::AddUse(intptr_t pos, |
217 OS::Print(" [%d, %d) uses {", start_, end_); | 220 Location* location_slot) { |
218 for (UsePosition* use_pos = uses_; | 221 ASSERT((first_use_interval_->start_ <= pos) && |
219 use_pos != NULL && use_pos->pos() <= end(); | 222 (pos <= first_use_interval_->end_)); |
220 use_pos = use_pos->next()) { | |
221 if (use_pos != uses_) OS::Print(", "); | |
222 OS::Print("%d", use_pos->pos()); | |
223 } | |
224 OS::Print("}\n"); | |
225 } | |
226 | |
227 | |
228 void UseInterval::AddUse(Instruction* instr, | |
229 intptr_t pos, | |
230 Location* location_slot) { | |
231 ASSERT((start_ <= pos) && (pos <= end_)); | |
232 ASSERT((instr == NULL) || (instr->lifetime_position() == pos)); | |
233 if ((uses_ != NULL) && (uses_->pos() == pos)) { | 223 if ((uses_ != NULL) && (uses_->pos() == pos)) { |
234 if ((location_slot == NULL) || (uses_->location_slot() == location_slot)) { | 224 if ((location_slot == NULL) || (uses_->location_slot() == location_slot)) { |
235 return; | 225 return; |
236 } else if ((uses_->location_slot() == NULL) && (instr == NULL)) { | 226 } else if (uses_->location_slot() == NULL) { |
237 uses_->set_location_slot(location_slot); | 227 uses_->set_location_slot(location_slot); |
238 return; | 228 return; |
239 } | 229 } |
240 } | 230 } |
241 uses_ = new UsePosition(instr, pos, uses_, location_slot); | 231 uses_ = new UsePosition(pos, uses_, location_slot); |
242 } | |
243 | |
244 | |
245 void LiveRange::Print() { | |
246 OS::Print("vreg %d live intervals:\n", vreg_); | |
247 for (UseInterval* interval = head_; | |
248 interval != NULL; | |
249 interval = interval->next_) { | |
250 interval->Print(); | |
251 } | |
252 } | 232 } |
253 | 233 |
254 | 234 |
255 void LiveRange::AddUseInterval(intptr_t start, intptr_t end) { | 235 void LiveRange::AddUseInterval(intptr_t start, intptr_t end) { |
256 if ((head_ != NULL) && (head_->start_ == end)) { | 236 if ((first_use_interval_ != NULL) && (first_use_interval_->start_ == end)) { |
257 head_->start_ = start; | 237 first_use_interval_->start_ = start; |
258 return; | 238 return; |
259 } | 239 } |
260 | 240 |
261 head_ = new UseInterval(vreg_, start, end, head_); | 241 first_use_interval_ = new UseInterval(start, end, first_use_interval_); |
242 if (last_use_interval_ == NULL) last_use_interval_ = first_use_interval_; | |
262 } | 243 } |
263 | 244 |
264 | 245 |
265 void LiveRange::DefineAt(Instruction* instr, intptr_t pos, Location* loc) { | 246 void LiveRange::DefineAt(intptr_t pos, Location* loc) { |
266 if (head_ != NULL) { | 247 if (first_use_interval_ != NULL) { |
267 ASSERT(head_->start_ <= pos); | 248 ASSERT(first_use_interval_->start_ <= pos); |
268 head_->start_ = pos; | 249 first_use_interval_->start_ = pos; |
269 } else { | 250 } else { |
270 // Definition without a use. | 251 // Definition without a use. |
271 head_ = new UseInterval(vreg_, pos, pos + 1, NULL); | 252 first_use_interval_ = new UseInterval(pos, pos + 1, NULL); |
272 } | 253 } |
273 head_->AddUse(instr, pos, loc); | 254 |
255 AddUse(pos, loc); | |
274 } | 256 } |
275 | 257 |
276 | 258 |
277 // TODO(vegorov): encode use_at_start vs. use_at_end in the location itself? | 259 // TODO(vegorov): encode use_at_start vs. use_at_end in the location itself? |
278 void LiveRange::UseAt(Instruction* instr, | 260 void LiveRange::UseAt(intptr_t def, intptr_t use, |
279 intptr_t def, intptr_t use, | |
280 bool use_at_end, | 261 bool use_at_end, |
281 Location* loc) { | 262 Location* loc) { |
282 if (head_ == NULL || head_->start_ != def) { | 263 if (first_use_interval_ == NULL || first_use_interval_->start_ != def) { |
srdjan
2012/07/19 22:54:39
Add parenthesis
| |
283 AddUseInterval(def, use + (use_at_end ? 1 : 0)); | 264 AddUseInterval(def, use + (use_at_end ? 1 : 0)); |
284 } | 265 } |
285 head_->AddUse(instr, use, loc); | 266 AddUse(use, loc); |
286 } | 267 } |
287 | 268 |
288 | 269 |
289 LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) { | 270 LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) { |
290 if (live_ranges_[vreg] == NULL) { | 271 if (live_ranges_[vreg] == NULL) { |
291 live_ranges_[vreg] = new LiveRange(vreg); | 272 live_ranges_[vreg] = new LiveRange(vreg); |
292 } | 273 } |
293 return live_ranges_[vreg]; | 274 return live_ranges_[vreg]; |
294 } | 275 } |
295 | 276 |
296 | 277 |
297 void FlowGraphAllocator::BlockLocation(Location loc, intptr_t pos) { | 278 void FlowGraphAllocator::BlockLocation(Location loc, intptr_t pos) { |
298 ASSERT(loc.IsRegister()); | 279 ASSERT(loc.IsRegister()); |
299 const Register reg = loc.reg(); | 280 const Register reg = loc.reg(); |
300 UseInterval* last = cpu_regs_[reg]; | 281 if (blocked_cpu_regs_[reg]) return; |
301 if (last == kPermanentlyBlocked) return; | 282 if (cpu_regs_[reg].length() == 0) { |
302 if ((last != NULL) && (last->start() == pos)) return; | 283 cpu_regs_[reg].Add(new LiveRange(kNoVirtualRegister)); |
303 cpu_regs_[reg] = new UseInterval(kNoVirtualRegister, pos, pos + 1, last); | 284 } |
285 cpu_regs_[reg][0]->AddUseInterval(pos, pos + 1); | |
304 } | 286 } |
305 | 287 |
306 | 288 |
307 void FlowGraphAllocator::Define(Instruction* instr, | 289 void FlowGraphAllocator::Define(intptr_t pos, |
308 intptr_t pos, | |
309 intptr_t vreg, | 290 intptr_t vreg, |
310 Location* loc) { | 291 Location* loc) { |
311 LiveRange* range = GetLiveRange(vreg); | 292 LiveRange* range = GetLiveRange(vreg); |
312 ASSERT(loc != NULL); | 293 ASSERT(loc != NULL); |
313 if (loc->IsRegister()) { | 294 if (loc->IsRegister()) { |
314 BlockLocation(*loc, pos); | 295 BlockLocation(*loc, pos); |
315 range->DefineAt(instr, pos + 1, loc); | 296 range->DefineAt(pos + 1, loc); |
316 } else if (loc->IsUnallocated()) { | 297 } else if (loc->IsUnallocated()) { |
317 range->DefineAt(instr, pos, loc); | 298 range->DefineAt(pos, loc); |
318 } else { | 299 } else { |
319 UNREACHABLE(); | 300 UNREACHABLE(); |
320 } | 301 } |
321 | 302 AddToUnallocated(range); |
322 AddToUnallocated(range->head()); | |
323 } | 303 } |
324 | 304 |
325 | 305 |
326 void FlowGraphAllocator::UseValue(Instruction* instr, | 306 void FlowGraphAllocator::UseValue(intptr_t def_pos, |
327 intptr_t def_pos, | |
328 intptr_t use_pos, | 307 intptr_t use_pos, |
329 intptr_t vreg, | 308 intptr_t vreg, |
330 Location* loc, | 309 Location* loc, |
331 bool use_at_end) { | 310 bool use_at_end) { |
332 LiveRange* range = GetLiveRange(vreg); | 311 LiveRange* range = GetLiveRange(vreg); |
333 if (loc == NULL) { | 312 if (loc == NULL) { |
334 range->UseAt(NULL, def_pos, use_pos, true, loc); | 313 range->UseAt(def_pos, use_pos, true, loc); |
335 } else if (loc->IsRegister()) { | 314 } else if (loc->IsRegister()) { |
336 // We have a fixed use. | 315 // We have a fixed use. |
337 BlockLocation(*loc, use_pos); | 316 BlockLocation(*loc, use_pos); |
338 range->UseAt(instr, def_pos, use_pos, false, loc); | 317 range->UseAt(def_pos, use_pos, false, loc); |
339 } else if (loc->IsUnallocated()) { | 318 } else if (loc->IsUnallocated()) { |
340 ASSERT(loc->policy() == Location::kRequiresRegister); | 319 range->UseAt(def_pos, use_pos, use_at_end, loc); |
341 range->UseAt(use_at_end ? NULL : instr, def_pos, use_pos, use_at_end, loc); | |
342 } | 320 } |
343 } | 321 } |
344 | 322 |
345 | 323 |
346 static void PrintChain(UseInterval* chain) { | 324 void LiveRange::Print() { |
347 if (chain == kPermanentlyBlocked) { | 325 OS::Print(" live range v%d [%d, %d)\n", vreg(), Start(), End()); |
348 OS::Print(" not for allocation\n"); | 326 UsePosition* use_pos = uses_; |
349 return; | 327 for (UseInterval* interval = first_use_interval_; |
328 interval != NULL; | |
329 interval = interval->next()) { | |
330 OS::Print(" use interval [%d, %d)\n", | |
331 interval->start(), | |
332 interval->end()); | |
333 while ((use_pos != NULL) && (use_pos->pos() <= interval->end())) { | |
334 OS::Print(" use at %d as %s\n", | |
335 use_pos->pos(), | |
336 (use_pos->location_slot() == NULL) | |
337 ? "-" : use_pos->location_slot()->Name()); | |
338 use_pos = use_pos->next(); | |
339 } | |
350 } | 340 } |
351 | 341 |
352 while (chain != NULL) { | 342 if (next_sibling() != NULL) { |
353 chain->Print(); | 343 next_sibling()->Print(); |
354 chain = chain->next(); | |
355 } | 344 } |
356 } | 345 } |
357 | 346 |
358 | 347 |
359 void FlowGraphAllocator::PrintLiveRanges() { | 348 void FlowGraphAllocator::PrintLiveRanges() { |
360 for (intptr_t i = 0; i < unallocated_.length(); i++) { | 349 for (intptr_t i = 0; i < unallocated_.length(); i++) { |
361 OS::Print("unallocated chain for vr%d\n", unallocated_[i]->vreg()); | 350 unallocated_[i]->Print(); |
362 PrintChain(unallocated_[i]); | |
363 } | 351 } |
364 | 352 |
365 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { | 353 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
366 OS::Print("blocking chain for %s\n", | 354 if (blocked_cpu_regs_[reg]) continue; |
355 if (cpu_regs_[reg].length() == 0) continue; | |
356 | |
357 ASSERT(cpu_regs_[reg].length() == 1); | |
358 OS::Print("blocking live range for %s\n", | |
367 Location::RegisterLocation(static_cast<Register>(reg)).Name()); | 359 Location::RegisterLocation(static_cast<Register>(reg)).Name()); |
368 PrintChain(cpu_regs_[reg]); | 360 cpu_regs_[reg][0]->Print(); |
369 } | 361 } |
370 } | 362 } |
371 | 363 |
372 | 364 |
373 void FlowGraphAllocator::BuildLiveRanges() { | 365 void FlowGraphAllocator::BuildLiveRanges() { |
374 NumberInstructions(); | 366 NumberInstructions(); |
375 | 367 |
376 const intptr_t block_count = postorder_.length(); | 368 const intptr_t block_count = postorder_.length(); |
377 for (intptr_t i = 0; i < block_count; i++) { | 369 for (intptr_t i = 0; i < block_count; i++) { |
378 BlockEntryInstr* block = postorder_[i]; | 370 BlockEntryInstr* block = postorder_[i]; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
412 // that register allocator will populate source slot with location of | 404 // that register allocator will populate source slot with location of |
413 // the appropriate SSA value. | 405 // the appropriate SSA value. |
414 ZoneGrowableArray<PhiInstr*>* phis = join->phis(); | 406 ZoneGrowableArray<PhiInstr*>* phis = join->phis(); |
415 intptr_t move_idx = 0; | 407 intptr_t move_idx = 0; |
416 for (intptr_t j = 0; j < phis->length(); j++) { | 408 for (intptr_t j = 0; j < phis->length(); j++) { |
417 PhiInstr* phi = (*phis)[j]; | 409 PhiInstr* phi = (*phis)[j]; |
418 if (phi == NULL) continue; | 410 if (phi == NULL) continue; |
419 | 411 |
420 Value* val = phi->InputAt(pred_idx); | 412 Value* val = phi->InputAt(pred_idx); |
421 | 413 |
422 MoveOperands move = parallel_move->moves()[move_idx]; | 414 MoveOperands* move = &(parallel_move->moves()[move_idx]); |
srdjan
2012/07/19 22:54:39
Why make it a pointer here?
| |
423 if (val->IsUse()) { | 415 if (val->IsUse()) { |
424 const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); | 416 const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); |
425 Location* slot = move.src_slot(); | 417 Location* slot = move->src_slot(); |
426 *slot = Location::RequiresRegister(); | 418 *slot = Location::PrefersRegister(); |
427 GetLiveRange(use)->head()->AddUse(NULL, pos, slot); | 419 UseValue(block->start_pos(), pos, use, slot, false); |
428 } else { | 420 } else { |
429 ASSERT(val->IsConstant()); | 421 ASSERT(val->IsConstant()); |
430 move.set_src(Location::Constant(val->AsConstant()->value())); | 422 move->set_src(Location::Constant(val->AsConstant()->value())); |
431 } | 423 } |
432 | 424 |
433 move_idx++; | 425 move_idx++; |
434 } | 426 } |
435 | 427 |
436 current = current->previous(); | 428 current = current->previous(); |
437 } | 429 } |
438 | 430 |
439 // Now process all instructions in reverse order. | 431 // Now process all instructions in reverse order. |
440 // Advance position to the start of the last instruction in the block. | 432 // Advance position to the start of the last instruction in the block. |
441 pos -= 1; | 433 pos -= 1; |
442 while (current != block) { | 434 while (current != block) { |
443 LocationSummary* locs = current->locs(); | 435 LocationSummary* locs = current->locs(); |
444 | 436 |
445 const bool output_same_as_first_input = | 437 const bool output_same_as_first_input = |
446 locs->out().IsUnallocated() && | 438 locs->out().IsUnallocated() && |
447 locs->out().policy() == Location::kSameAsFirstInput; | 439 locs->out().policy() == Location::kSameAsFirstInput; |
448 | 440 |
449 // TODO(vegorov): number of inputs should match number of input locations. | 441 // TODO(vegorov): number of inputs should match number of input locations. |
450 // TODO(vegorov): generic support for writable registers? | 442 // TODO(vegorov): generic support for writable registers? |
451 for (intptr_t j = 0; j < current->InputCount(); j++) { | 443 for (intptr_t j = 0; j < current->InputCount(); j++) { |
452 Value* input = current->InputAt(j); | 444 Value* input = current->InputAt(j); |
453 if (input->IsUse()) { | 445 if (input->IsUse()) { |
454 const intptr_t use = input->AsUse()->definition()->ssa_temp_index(); | 446 const intptr_t use = input->AsUse()->definition()->ssa_temp_index(); |
455 | 447 |
456 Location* in_ref = (j < locs->input_count()) ? | 448 Location* in_ref = (j < locs->input_count()) ? |
457 locs->in_slot(j) : NULL; | 449 locs->in_slot(j) : NULL; |
458 const bool use_at_end = (j > 0) || (in_ref == NULL) || | 450 const bool use_at_end = (j > 0) || (in_ref == NULL) || |
459 !output_same_as_first_input; | 451 !output_same_as_first_input; |
460 UseValue(current, block->start_pos(), pos, use, in_ref, use_at_end); | 452 UseValue(block->start_pos(), pos, use, in_ref, use_at_end); |
461 } | 453 } |
462 } | 454 } |
463 | 455 |
464 // Add uses from the deoptimization environment. | 456 // Add uses from the deoptimization environment. |
465 // TODO(vegorov): these uses should _not_ require register but for now | |
466 // they do because we don't support spilling at all. | |
467 if (current->env() != NULL) { | 457 if (current->env() != NULL) { |
468 const GrowableArray<Value*>& values = current->env()->values(); | 458 const GrowableArray<Value*>& values = current->env()->values(); |
469 GrowableArray<Location>* locations = current->env()->locations(); | 459 GrowableArray<Location>* locations = current->env()->locations(); |
470 | 460 |
471 for (intptr_t j = 0; j < values.length(); j++) { | 461 for (intptr_t j = 0; j < values.length(); j++) { |
472 Value* val = values[j]; | 462 Value* val = values[j]; |
473 if (val->IsUse()) { | 463 if (val->IsUse()) { |
474 locations->Add(Location::RequiresRegister()); | 464 locations->Add(Location::Any()); |
475 const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); | 465 const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); |
476 UseValue(current, | 466 UseValue(block->start_pos(), |
477 block->start_pos(), | |
478 pos, | 467 pos, |
479 use, | 468 use, |
480 &(*locations)[j], | 469 &(*locations)[j], |
481 true); | 470 true); |
482 } else { | 471 } else { |
483 locations->Add(Location::NoLocation()); | 472 locations->Add(Location::NoLocation()); |
484 } | 473 } |
485 } | 474 } |
486 } | 475 } |
487 | 476 |
488 // Process temps. | 477 // Process temps. |
489 for (intptr_t j = 0; j < locs->temp_count(); j++) { | 478 for (intptr_t j = 0; j < locs->temp_count(); j++) { |
490 Location temp = locs->temp(j); | 479 Location temp = locs->temp(j); |
491 if (temp.IsRegister()) { | 480 if (temp.IsRegister()) { |
492 BlockLocation(temp, pos); | 481 BlockLocation(temp, pos); |
493 } else if (temp.IsUnallocated()) { | 482 } else if (temp.IsUnallocated()) { |
494 UseInterval* temp_interval = new UseInterval( | 483 AddToUnallocated(LiveRange::MakeTemp(pos, locs->temp_slot(j))); |
495 kTempVirtualRegister, pos, pos + 1, NULL); | |
496 temp_interval->AddUse(NULL, pos, locs->temp_slot(j)); | |
497 AddToUnallocated(temp_interval); | |
498 } else { | 484 } else { |
499 UNREACHABLE(); | 485 UNREACHABLE(); |
500 } | 486 } |
501 } | 487 } |
502 | 488 |
503 // Block all allocatable registers for calls. | 489 // Block all allocatable registers for calls. |
504 if (locs->is_call()) { | 490 if (locs->is_call()) { |
505 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { | 491 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
506 BlockLocation(Location::RegisterLocation(static_cast<Register>(reg)), | 492 BlockLocation(Location::RegisterLocation(static_cast<Register>(reg)), |
507 pos); | 493 pos); |
508 } | 494 } |
495 for (intptr_t j = 0; j < locs->temp_count(); j++) { | |
496 ASSERT(!locs->temp(j).IsUnallocated()); | |
497 } | |
509 } | 498 } |
510 | 499 |
511 if (locs->out().IsRegister()) { | 500 if (locs->out().IsRegister()) { |
512 builder_->Bailout("ssa allocator: fixed outputs are not supported"); | 501 builder_->Bailout("ssa allocator: fixed outputs are not supported"); |
513 } | 502 } |
514 | 503 |
515 Definition* def = current->AsDefinition(); | 504 Definition* def = current->AsDefinition(); |
516 if ((def != NULL) && (def->ssa_temp_index() >= 0)) { | 505 if ((def != NULL) && (def->ssa_temp_index() >= 0)) { |
517 Define(output_same_as_first_input ? current : NULL, | 506 Define(pos, |
srdjan
2012/07/19 22:54:39
Seems to be able to fit in one line?
| |
518 pos, | |
519 def->ssa_temp_index(), | 507 def->ssa_temp_index(), |
520 locs->out_slot()); | 508 locs->out_slot()); |
521 } | 509 } |
522 | 510 |
523 current = current->previous(); | 511 current = current->previous(); |
524 pos -= 2; | 512 pos -= 2; |
525 } | 513 } |
526 | 514 |
527 // If this block is a join we need to add destinations of phi | 515 // If this block is a join we need to add destinations of phi |
528 // resolution moves to phi's live range so that register allocator will | 516 // resolution moves to phi's live range so that register allocator will |
529 // fill them with moves. | 517 // fill them with moves. |
530 if (block->IsJoinEntry() && block->AsJoinEntry()->phis() != NULL) { | 518 if (block->IsJoinEntry() && block->AsJoinEntry()->phis() != NULL) { |
531 ZoneGrowableArray<PhiInstr*>* phis = block->AsJoinEntry()->phis(); | 519 ZoneGrowableArray<PhiInstr*>* phis = block->AsJoinEntry()->phis(); |
532 | 520 |
533 intptr_t move_idx = 0; | 521 intptr_t move_idx = 0; |
534 for (intptr_t j = 0; j < phis->length(); j++) { | 522 for (intptr_t j = 0; j < phis->length(); j++) { |
535 PhiInstr* phi = (*phis)[j]; | 523 PhiInstr* phi = (*phis)[j]; |
536 if (phi == NULL) continue; | 524 if (phi == NULL) continue; |
537 | 525 |
538 const intptr_t def = phi->ssa_temp_index(); | 526 const intptr_t def = phi->ssa_temp_index(); |
539 ASSERT(def != -1); | 527 ASSERT(def != -1); |
540 | 528 |
541 LiveRange* range = GetLiveRange(def); | 529 LiveRange* range = GetLiveRange(def); |
542 range->DefineAt(NULL, pos, NULL); | 530 range->DefineAt(pos, NULL); |
543 UseInterval* interval = GetLiveRange(def)->head(); | |
544 | 531 |
545 for (intptr_t k = 0; k < phi->InputCount(); k++) { | 532 for (intptr_t k = 0; k < phi->InputCount(); k++) { |
546 BlockEntryInstr* pred = block->PredecessorAt(k); | 533 BlockEntryInstr* pred = block->PredecessorAt(k); |
547 ASSERT(pred->last_instruction()->IsParallelMove()); | 534 ASSERT(pred->last_instruction()->IsParallelMove()); |
548 | 535 |
549 Location* slot = pred->last_instruction()->AsParallelMove()-> | 536 Location* slot = pred->last_instruction()->AsParallelMove()-> |
550 moves()[move_idx].dest_slot(); | 537 moves()[move_idx].dest_slot(); |
551 *slot = Location::RequiresRegister(); | 538 *slot = Location::PrefersRegister(); |
552 interval->AddUse(NULL, pos, slot); | 539 range->AddUse(pos, slot); |
553 } | 540 } |
554 | 541 |
555 // All phi resolution moves are connected. Phi's live range is complete. | 542 // All phi resolution moves are connected. Phi's live range is complete. |
556 AddToUnallocated(interval); | 543 AddToUnallocated(range); |
557 | 544 |
558 move_idx++; | 545 move_idx++; |
559 } | 546 } |
560 } | 547 } |
561 } | 548 } |
562 } | 549 } |
563 | 550 |
564 | 551 |
552 static ParallelMoveInstr* GetLastParallelMove(BlockEntryInstr* block) { | |
553 // TODO(vegorov): fix this for explicit Goto. | |
554 Instruction* last = block->last_instruction(); | |
555 if (!last->IsParallelMove()) { | |
556 ParallelMoveInstr* move = new ParallelMoveInstr(); | |
557 move->set_next(last->next()); | |
558 move->set_previous(last); | |
559 last->set_next(move); | |
560 block->set_last_instruction(move); | |
561 return move; | |
562 } | |
563 return last->AsParallelMove(); | |
564 } | |
565 | |
566 | |
567 Instruction* FlowGraphAllocator::InstructionAt(intptr_t pos) { | |
568 return instructions_[pos >> 1]; | |
569 } | |
570 | |
571 | |
572 bool FlowGraphAllocator::IsBlockEntry(intptr_t pos) { | |
573 return ((pos & 1) == 0) && (InstructionAt(pos)->IsBlockEntry()); | |
574 } | |
575 | |
565 void FlowGraphAllocator::NumberInstructions() { | 576 void FlowGraphAllocator::NumberInstructions() { |
566 intptr_t pos = 0; | 577 intptr_t pos = 0; |
567 | 578 |
568 const intptr_t block_count = postorder_.length(); | 579 const intptr_t block_count = postorder_.length(); |
569 for (intptr_t i = block_count - 1; i >= 0; i--) { | 580 for (intptr_t i = block_count - 1; i >= 0; i--) { |
570 BlockEntryInstr* block = postorder_[i]; | 581 BlockEntryInstr* block = postorder_[i]; |
571 | 582 |
583 instructions_.Add(block); | |
572 block->set_start_pos(pos); | 584 block->set_start_pos(pos); |
573 pos += 2; | 585 pos += 2; |
574 Instruction* current = block->next(); | 586 Instruction* current = block->next(); |
575 | 587 |
576 Instruction* last = block->last_instruction(); | 588 Instruction* last = block->last_instruction(); |
577 if (!last->IsParallelMove()) last = last->next(); | 589 if (!last->IsParallelMove()) last = last->next(); |
578 | 590 |
579 while (current != last) { | 591 while (current != last) { |
592 instructions_.Add(current); | |
580 current->set_lifetime_position(pos); | 593 current->set_lifetime_position(pos); |
581 current = current->next(); | 594 current = current->next(); |
582 pos += 2; | 595 pos += 2; |
583 } | 596 } |
584 block->set_end_pos(pos); | 597 block->set_end_pos(pos); |
585 | 598 |
586 // For join entry predecessors create phi resolution moves if | 599 // For join entry predecessors create phi resolution moves if |
587 // necessary. They will be populated by the register allocator. | 600 // necessary. They will be populated by the register allocator. |
588 if (block->IsJoinEntry() && (block->AsJoinEntry()->phi_count() > 0)) { | 601 if (block->IsJoinEntry() && (block->AsJoinEntry()->phi_count() > 0)) { |
589 const intptr_t phi_count = block->AsJoinEntry()->phi_count(); | 602 const intptr_t phi_count = block->AsJoinEntry()->phi_count(); |
590 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { | 603 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
591 BlockEntryInstr* pred = block->PredecessorAt(i); | 604 BlockEntryInstr* pred = block->PredecessorAt(i); |
592 ASSERT(!pred->last_instruction()->IsParallelMove()); | 605 ASSERT(!pred->last_instruction()->IsParallelMove()); |
593 | 606 |
594 ParallelMoveInstr* move = new ParallelMoveInstr(); | 607 ParallelMoveInstr* move = GetLastParallelMove(pred); |
595 move->set_next(block); | |
596 move->set_previous(pred->last_instruction()); | |
597 pred->last_instruction()->set_next(move); | |
598 pred->set_last_instruction(move); | |
599 | 608 |
600 // Populate ParallelMove with empty moves. | 609 // Populate ParallelMove with empty moves. |
601 for (intptr_t j = 0; j < phi_count; j++) { | 610 for (intptr_t j = 0; j < phi_count; j++) { |
602 move->AddMove(Location::NoLocation(), Location::NoLocation()); | 611 move->AddMove(Location::NoLocation(), Location::NoLocation()); |
603 } | 612 } |
604 } | 613 } |
605 } | 614 } |
606 } | 615 } |
607 } | 616 } |
608 | 617 |
609 | 618 |
619 static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) { | |
620 while ((use != NULL) && (use->pos() < after)) { | |
621 use = use->next(); | |
622 } | |
623 return use; | |
624 } | |
625 | |
626 | |
627 Location AllocationFinger::FirstHint() { | |
628 UsePosition* use = first_hinted_use_; | |
629 | |
630 while (use != NULL) { | |
631 if (use->HasHint()) return use->hint(); | |
632 use = use->next(); | |
633 } | |
634 | |
635 return Location::NoLocation(); | |
636 } | |
637 | |
638 | |
639 UsePosition* AllocationFinger::FirstRegisterUse(intptr_t after) { | |
640 for (UsePosition* use = FirstUseAfter(first_register_use_, after); | |
641 use != NULL; | |
642 use = use->next()) { | |
643 Location* loc = use->location_slot(); | |
644 if ((loc != NULL) && | |
645 loc->IsUnallocated() && | |
646 (loc->policy() == Location::kRequiresRegister)) { | |
647 first_register_use_ = use; | |
648 return use; | |
649 } | |
650 } | |
651 return NULL; | |
652 } | |
653 | |
654 | |
655 UsePosition* AllocationFinger::FirstRegisterBeneficialUse(intptr_t after) { | |
656 for (UsePosition* use = FirstUseAfter(first_register_beneficial_use_, after); | |
657 use != NULL; | |
658 use = use->next()) { | |
659 Location* loc = use->location_slot(); | |
660 if ((loc != NULL) && | |
661 (loc->IsRegister() || | |
662 (loc->IsUnallocated() && loc->IsRegisterBeneficial()))) { | |
663 first_register_beneficial_use_ = use; | |
664 return use; | |
665 } | |
666 } | |
667 return NULL; | |
668 } | |
669 | |
670 | |
610 intptr_t UseInterval::Intersect(UseInterval* other) { | 671 intptr_t UseInterval::Intersect(UseInterval* other) { |
611 if (this->start() <= other->start()) { | 672 if (this->start() <= other->start()) { |
612 if (other->start() < this->end()) return other->start(); | 673 if (other->start() < this->end()) return other->start(); |
613 } else if (this->start() < other->end()) { | 674 } else if (this->start() < other->end()) { |
614 return this->start(); | 675 return this->start(); |
615 } | 676 } |
616 return kIllegalPosition; | 677 return kIllegalPosition; |
617 } | 678 } |
618 | 679 |
619 | 680 |
620 static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) { | 681 static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) { |
621 while (a != NULL && u != NULL) { | 682 while (a != NULL && u != NULL) { |
622 const intptr_t pos = a->Intersect(u); | 683 const intptr_t pos = a->Intersect(u); |
623 if (pos != kIllegalPosition) return pos; | 684 if (pos != kIllegalPosition) return pos; |
624 | 685 |
625 if (a->start() < u->start()) { | 686 if (a->start() < u->start()) { |
626 a = a->next_allocated(); | 687 a = a->next(); |
627 } else { | 688 } else { |
628 u = u->next(); | 689 u = u->next(); |
629 } | 690 } |
630 } | 691 } |
631 | 692 |
632 return kMaxPosition; | 693 return kMaxPosition; |
633 } | 694 } |
634 | 695 |
635 | 696 |
636 static Location LookAheadForHint(UseInterval* interval) { | 697 LiveRange* LiveRange::MakeTemp(intptr_t pos, Location* location_slot) { |
637 UsePosition* use = interval->first_use(); | 698 LiveRange* range = new LiveRange(kTempVirtualRegister); |
638 | 699 range->AddUseInterval(pos, pos + 1); |
639 while (use != NULL) { | 700 range->AddUse(pos, location_slot); |
640 if (use->HasHint()) return use->hint(); | 701 return range; |
641 use = use->next(); | |
642 } | |
643 | |
644 return Location::NoLocation(); | |
645 } | 702 } |
646 | 703 |
647 | 704 |
648 bool FlowGraphAllocator::AllocateFreeRegister(UseInterval* unallocated) { | 705 LiveRange* LiveRange::SplitAt(intptr_t split_pos) { |
706 if (split_pos == Start()) return this; | |
707 | |
708 UseInterval* interval = finger_.first_pending_use_interval(); | |
709 | |
710 ASSERT(interval->start() <= split_pos); | |
711 | |
712 // Corner case. We need to start over to find previous interval. | |
713 if (interval->start() == split_pos) interval = first_use_interval_; | |
714 | |
715 UseInterval* last_before_split = NULL; | |
716 while (split_pos < interval->start()) { | |
717 last_before_split = interval; | |
718 interval = interval->next(); | |
719 } | |
720 | |
721 const bool split_at_start = (interval->start() == split_pos); | |
722 | |
723 UseInterval* first_after_split = interval; | |
724 if (!split_at_start && interval->Contains(split_pos)) { | |
725 first_after_split = new UseInterval(split_pos, | |
726 interval->end(), | |
727 interval->next()); | |
728 interval->end_ = split_pos; | |
729 interval->next_ = first_after_split; | |
730 last_before_split = interval; | |
731 } | |
732 | |
733 ASSERT(last_before_split->next() == first_after_split); | |
734 ASSERT(last_before_split->end() <= split_pos); | |
735 ASSERT(split_pos <= first_after_split->start()); | |
736 | |
737 UsePosition* last_use_before_split = NULL; | |
738 UsePosition* use = uses_; | |
739 if (split_at_start) { | |
740 while ((use != NULL) && (use->pos() < split_pos)) { | |
741 last_use_before_split = use; | |
742 use = use->next(); | |
743 } | |
744 } else { | |
745 while ((use != NULL) && (use->pos() <= split_pos)) { | |
746 last_use_before_split = use; | |
747 use = use->next(); | |
748 } | |
749 } | |
750 UsePosition* first_use_after_split = use; | |
751 | |
752 if (last_use_before_split == NULL) { | |
753 uses_ = NULL; | |
754 } else { | |
755 last_use_before_split->set_next(NULL); | |
756 } | |
757 | |
758 next_sibling_ = new LiveRange(vreg(), | |
759 first_use_after_split, | |
760 first_after_split, | |
761 last_use_interval_, | |
762 next_sibling_); | |
763 last_use_interval_ = last_before_split; | |
764 last_use_interval_->next_ = NULL; | |
765 return next_sibling_; | |
766 } | |
767 | |
768 | |
769 LiveRange* FlowGraphAllocator::SplitBetween(LiveRange* range, | |
770 intptr_t from, | |
771 intptr_t to) { | |
772 // TODO(vegorov): select optimal split position based on loop structure. | |
773 if (to == range->End()) to--; | |
774 TRACE_ALLOC(("split %d [%d, %d) between [%d, %d)\n", | |
775 range->vreg(), range->Start(), range->End(), from, to)); | |
776 return range->SplitAt(to); | |
777 } | |
778 | |
779 | |
780 void FlowGraphAllocator::SpillBetween(LiveRange* range, | |
781 intptr_t from, | |
782 intptr_t to) { | |
783 ASSERT(from < to); | |
784 TRACE_ALLOC(("spill %d [%d, %d) between [%d, %d)\n", | |
785 range->vreg(), range->Start(), range->End(), from, to)); | |
786 LiveRange* tail = range->SplitAt(from); | |
787 | |
788 if (tail->Start() < to) { | |
789 // There is an intersection of tail and [from, to). | |
790 LiveRange* tail_tail = SplitBetween(tail, tail->Start(), to); | |
791 Spill(tail); | |
792 AddToUnallocated(tail_tail); | |
793 } else { | |
794 // No intersection between tail and [from, to). | |
795 AddToUnallocated(tail); | |
796 } | |
797 } | |
798 | |
799 | |
800 void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) { | |
801 TRACE_ALLOC(("spill %d [%d, %d) after %d\n", | |
802 range->vreg(), range->Start(), range->End(), from)); | |
803 LiveRange* tail = range->SplitAt(from); | |
804 Spill(tail); | |
805 } | |
806 | |
807 | |
808 intptr_t FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) { | |
809 for (intptr_t i = 0; i < spill_slots_.length(); i++) { | |
810 if (spill_slots_[i] <= range->Start()) { | |
811 return i; | |
812 } | |
813 } | |
814 spill_slots_.Add(0); | |
815 return spill_slots_.length() - 1; | |
816 } | |
817 | |
818 | |
819 void FlowGraphAllocator::Spill(LiveRange* range) { | |
820 const intptr_t spill_index = AllocateSpillSlotFor(range); | |
821 ASSERT(spill_slots_[spill_index] < range->Start()); | |
822 spill_slots_[spill_index] = range->End(); | |
823 range->set_assigned_location(Location::SpillSlot(spill_index)); | |
824 ConvertAllUses(range); | |
825 } | |
826 | |
827 | |
828 intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated( | |
829 Register reg, LiveRange* unallocated) { | |
830 intptr_t intersection = kMaxPosition; | |
831 for (intptr_t i = 0; i < cpu_regs_[reg].length(); i++) { | |
832 LiveRange* allocated = cpu_regs_[reg][i]; | |
833 if (allocated == NULL) continue; | |
834 | |
835 UseInterval* allocated_head = | |
836 allocated->finger()->first_pending_use_interval(); | |
837 if (allocated_head->start() >= intersection) continue; | |
838 | |
839 const intptr_t pos = FirstIntersection( | |
840 unallocated->finger()->first_pending_use_interval(), | |
841 allocated_head); | |
842 if (pos < intersection) intersection = pos; | |
843 } | |
844 return intersection; | |
845 } | |
846 | |
847 | |
848 | |
849 bool FlowGraphAllocator::AllocateFreeRegister(LiveRange* unallocated) { | |
649 Register candidate = kNoRegister; | 850 Register candidate = kNoRegister; |
650 intptr_t free_until = 0; | 851 intptr_t free_until = 0; |
651 | 852 |
652 // If hint is available try hint first. | 853 // If hint is available try hint first. |
653 // TODO(vegorov): ensure that phis are hinted on the backedge. | 854 // TODO(vegorov): ensure that phis are hinted on the back edge. |
654 Location hint = LookAheadForHint(unallocated); | 855 Location hint = unallocated->finger()->FirstHint(); |
655 if (!hint.IsInvalid()) { | 856 if (!hint.IsInvalid()) { |
656 ASSERT(hint.IsRegister()); | 857 ASSERT(hint.IsRegister()); |
657 | 858 |
658 if (cpu_regs_[hint.reg()] != kPermanentlyBlocked) { | 859 if (!blocked_cpu_regs_[hint.reg()]) { |
659 free_until = FirstIntersection(cpu_regs_[hint.reg()], unallocated); | 860 free_until = FirstIntersectionWithAllocated(hint.reg(), unallocated); |
660 candidate = hint.reg(); | 861 candidate = hint.reg(); |
661 } | 862 } |
662 | 863 |
663 TRACE_ALLOC(("found hint %s for %d: free until %d\n", | 864 TRACE_ALLOC(("found hint %s for %d: free until %d\n", |
664 hint.Name(), unallocated->vreg(), free_until)); | 865 hint.Name(), unallocated->vreg(), free_until)); |
665 } | 866 } |
666 | 867 |
667 if (free_until != kMaxPosition) { | 868 if (free_until != kMaxPosition) { |
668 for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { | 869 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
669 if (cpu_regs_[reg] == NULL) { | 870 if (!blocked_cpu_regs_[reg] && cpu_regs_[reg].length() == 0) { |
670 candidate = static_cast<Register>(reg); | 871 candidate = static_cast<Register>(reg); |
671 free_until = kMaxPosition; | 872 free_until = kMaxPosition; |
672 break; | 873 break; |
673 } | 874 } |
674 } | 875 } |
675 } | 876 } |
676 | 877 |
677 ASSERT(0 <= kMaxPosition); | 878 ASSERT(0 <= kMaxPosition); |
678 if (free_until != kMaxPosition) { | 879 if (free_until != kMaxPosition) { |
679 for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { | 880 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
680 if (cpu_regs_[reg] == kPermanentlyBlocked) continue; | 881 if (blocked_cpu_regs_[reg] || (reg == candidate)) continue; |
681 if (reg == candidate) continue; | |
682 | 882 |
683 const intptr_t pos = FirstIntersection(cpu_regs_[reg], unallocated); | 883 const intptr_t intersection = |
884 FirstIntersectionWithAllocated(static_cast<Register>(reg), unallocated); | |
684 | 885 |
685 if (pos > free_until) { | 886 if (intersection > free_until) { |
686 candidate = static_cast<Register>(reg); | 887 candidate = static_cast<Register>(reg); |
687 free_until = pos; | 888 free_until = intersection; |
688 if (free_until == kMaxPosition) break; | 889 if (free_until == kMaxPosition) break; |
689 } | 890 } |
690 } | 891 } |
691 } | 892 } |
692 | 893 |
693 // All registers are blocked by active ranges. | 894 // All registers are blocked by active ranges. |
694 if (free_until <= unallocated->start()) return false; | 895 if (free_until <= unallocated->Start()) return false; |
695 | 896 |
696 AssignFreeRegister(unallocated, candidate); | 897 TRACE_ALLOC(("assigning free register %s to %d\n", |
898 Location::RegisterLocation(candidate).Name(), | |
899 unallocated->vreg())); | |
900 | |
901 if (free_until != kMaxPosition) { | |
902 // There was an intersection. Split unallocated. | |
903 TRACE_ALLOC((" splitting at %d\n", free_until)); | |
904 LiveRange* tail = unallocated->SplitAt(free_until); | |
905 AddToUnallocated(tail); | |
906 } | |
907 | |
908 cpu_regs_[candidate].Add(unallocated); | |
909 unallocated->set_assigned_location(Location::RegisterLocation(candidate)); | |
910 | |
697 return true; | 911 return true; |
698 } | 912 } |
699 | 913 |
700 | 914 |
701 UseInterval* UseInterval::Split(intptr_t pos) { | 915 void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) { |
702 if (pos == start()) return this; | 916 UsePosition* register_use = |
703 ASSERT(Contains(pos)); | 917 unallocated->finger()->FirstRegisterUse(unallocated->Start()); |
704 UseInterval* tail = new UseInterval(vreg(), pos, end(), next()); | 918 if (register_use == NULL) { |
705 | 919 Spill(unallocated); |
706 UsePosition* use = uses_; | 920 return; |
707 while (use != NULL && use->pos() <= pos) { | |
708 use = use->next(); | |
709 } | 921 } |
710 | 922 |
711 tail->uses_ = use; | 923 Register candidate = kNoRegister; |
924 intptr_t free_until = 0; | |
925 intptr_t blocked_at = kMaxPosition; | |
712 | 926 |
713 end_ = pos; | 927 for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
928 if (blocked_cpu_regs_[reg]) continue; | |
929 if (UpdateFreeUntil(static_cast<Register>(reg), | |
930 unallocated, | |
931 &free_until, | |
932 &blocked_at)) { | |
933 candidate = static_cast<Register>(reg); | |
934 } | |
935 } | |
714 | 936 |
715 return tail; | 937 if (free_until < register_use->pos()) { |
938 // Can't acquire free register. Spill until we really need one. | |
939 ASSERT(unallocated->Start() < register_use->pos()); | |
940 SpillBetween(unallocated, unallocated->Start(), register_use->pos()); | |
941 return; | |
942 } | |
943 | |
944 if (blocked_at < unallocated->End()) { | |
945 LiveRange* tail = SplitBetween(unallocated, | |
946 unallocated->Start(), | |
947 blocked_at); | |
948 AddToUnallocated(tail); | |
949 } | |
950 | |
951 AssignBlockedRegister(unallocated, candidate); | |
716 } | 952 } |
717 | 953 |
718 | 954 |
719 void FlowGraphAllocator::AssignFreeRegister(UseInterval* unallocated, | 955 bool FlowGraphAllocator::UpdateFreeUntil(Register reg, |
720 Register reg) { | 956 LiveRange* unallocated, |
721 TRACE_ALLOC(("assigning free register %s to %d\n", | 957 intptr_t* cur_free_until, |
958 intptr_t* cur_blocked_at) { | |
959 intptr_t free_until = kMaxPosition; | |
960 intptr_t blocked_at = kMaxPosition; | |
961 const intptr_t start = unallocated->Start(); | |
962 | |
963 for (intptr_t i = 0; i < cpu_regs_[reg].length(); i++) { | |
964 LiveRange* allocated = cpu_regs_[reg][i]; | |
965 | |
966 UseInterval* first_pending_use_interval = | |
967 allocated->finger()->first_pending_use_interval(); | |
968 if (first_pending_use_interval->Contains(start)) { | |
969 // This is an active interval. | |
970 if (allocated->vreg() <= 0) { | |
971 // This register blocked by an interval that | |
972 // can't be spilled. | |
973 return false; | |
974 } | |
975 | |
976 const UsePosition* use = | |
977 allocated->finger()->FirstRegisterBeneficialUse(unallocated->Start()); | |
978 | |
979 if ((use != NULL) && ((use->pos() - start) <= 1)) { | |
980 // This register is blocked by interval that is used | |
981 // as register in the current instruction and can't | |
982 // be spilled. | |
983 return false; | |
984 } | |
985 | |
986 const intptr_t use_pos = (use != NULL) ? use->pos() | |
987 : allocated->End(); | |
988 | |
989 if (use_pos < free_until) free_until = use_pos; | |
990 } else { | |
991 // This is inactive interval. | |
992 const intptr_t intersection = FirstIntersection( | |
993 first_pending_use_interval, unallocated->first_use_interval()); | |
994 if (intersection != kMaxPosition) { | |
995 if (intersection < free_until) free_until = intersection; | |
996 if (allocated->vreg() == kNoVirtualRegister) blocked_at = intersection; | |
997 } | |
998 } | |
999 | |
1000 if (free_until <= *cur_free_until) { | |
1001 return false; | |
1002 } | |
1003 } | |
1004 | |
1005 ASSERT(free_until > *cur_free_until); | |
1006 *cur_free_until = free_until; | |
1007 *cur_blocked_at = blocked_at; | |
1008 return true; | |
1009 } | |
1010 | |
1011 | |
1012 void FlowGraphAllocator::RemoveEvicted(Register reg, intptr_t first_evicted) { | |
1013 intptr_t to = first_evicted; | |
1014 intptr_t from = first_evicted + 1; | |
1015 while (from < cpu_regs_[reg].length()) { | |
1016 LiveRange* allocated = cpu_regs_[reg][from++]; | |
1017 if (allocated != NULL) cpu_regs_[reg][to++] = allocated; | |
1018 } | |
1019 cpu_regs_[reg].TruncateTo(to); | |
1020 } | |
1021 | |
1022 | |
1023 void FlowGraphAllocator::AssignBlockedRegister(LiveRange* unallocated, | |
1024 Register reg) { | |
1025 TRACE_ALLOC(("assigning blocked register %s to live range %d\n", | |
722 Location::RegisterLocation(reg).Name(), | 1026 Location::RegisterLocation(reg).Name(), |
723 unallocated->vreg())); | 1027 unallocated->vreg())); |
724 | 1028 |
725 UseInterval* a = cpu_regs_[reg]; | 1029 intptr_t first_evicted = -1; |
726 if (a == NULL) { | 1030 for (intptr_t i = cpu_regs_[reg].length() - 1; i >= 0; i--) { |
727 // Register is completely free. | 1031 LiveRange* allocated = cpu_regs_[reg][i]; |
728 cpu_regs_[reg] = unallocated; | 1032 if (allocated->vreg() < 0) continue; // Can't be evicted. |
729 return; | 1033 if (EvictIntersection(allocated, |
1034 unallocated)) { | |
1035 cpu_regs_[reg][i] = NULL; | |
1036 first_evicted = i; | |
1037 } | |
730 } | 1038 } |
731 | 1039 |
732 UseInterval* u = unallocated; | 1040 // Remove evicted ranges from the array. |
733 ASSERT(u->start() < a->start()); // Register is free. | 1041 if (first_evicted != -1) RemoveEvicted(reg, first_evicted); |
734 cpu_regs_[reg] = u; | |
735 if (u->next() == NULL || u->next()->start() >= a->start()) { | |
736 u->set_next_allocated(a); | |
737 } | |
738 | 1042 |
739 while (a != NULL && u != NULL) { | 1043 cpu_regs_[reg].Add(unallocated); |
740 const intptr_t pos = a->Intersect(u); | 1044 unallocated->set_assigned_location(Location::RegisterLocation(reg)); |
741 if (pos != kIllegalPosition) { | |
742 // TODO(vegorov): split live ranges might require control flow resolution | |
743 // which is not implemented yet. | |
744 builder_->Bailout("ssa allocator: control flow resolution required"); | |
745 | |
746 TRACE_ALLOC((" splitting at %d\n", pos)); | |
747 // Reached intersection | |
748 UseInterval* tail = u->Split(pos); | |
749 AddToUnallocated(tail); | |
750 ASSERT(tail == u || u->next_allocated() == a); | |
751 return; | |
752 } | |
753 | |
754 if (a->start() < u->start()) { | |
755 if (a->next_allocated() == NULL) { | |
756 a->set_next_allocated(u); | |
757 break; | |
758 } | |
759 | |
760 UseInterval* next = a->next_allocated(); | |
761 if (next->start() > u->start()) { | |
762 a->set_next_allocated(u); | |
763 u->set_next_allocated(next); | |
764 } | |
765 | |
766 a = next; | |
767 } else { | |
768 UseInterval* next = u->next(); | |
769 | |
770 if (next == NULL || next->start() >= a->start()) { | |
771 u->set_next_allocated(a); | |
772 } | |
773 u = next; | |
774 } | |
775 } | |
776 } | 1045 } |
777 | 1046 |
778 | 1047 |
1048 bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated, | |
1049 LiveRange* unallocated) { | |
1050 UseInterval* first_unallocated = | |
1051 unallocated->finger()->first_pending_use_interval(); | |
1052 const intptr_t intersection = FirstIntersection( | |
1053 allocated->finger()->first_pending_use_interval(), | |
1054 first_unallocated); | |
srdjan
2012/07/19 22:54:39
Indent 4 spaces above.
Vyacheslav Egorov (Google)
2012/07/24 12:26:41
Done.
| |
1055 if (intersection == kMaxPosition) return false; | |
1056 | |
1057 const intptr_t spill_position = first_unallocated->start(); | |
1058 UsePosition* use = allocated->finger()->FirstRegisterUse(spill_position); | |
1059 if (use == NULL) { | |
1060 // No register uses after this point. | |
1061 SpillAfter(allocated, spill_position); | |
1062 } else { | |
1063 const intptr_t restore_position = | |
1064 (spill_position < intersection) ? MinPosition(intersection, use->pos()) | |
1065 : use->pos(); | |
1066 | |
1067 SpillBetween(allocated, spill_position, restore_position); | |
1068 } | |
1069 | |
1070 return true; | |
1071 } | |
1072 | |
1073 | |
779 static void InsertMoveBefore(Instruction* instr, Location to, Location from) { | 1074 static void InsertMoveBefore(Instruction* instr, Location to, Location from) { |
780 Instruction* prev = instr->previous(); | 1075 Instruction* prev = instr->previous(); |
781 ParallelMoveInstr* move = prev->AsParallelMove(); | 1076 ParallelMoveInstr* move = prev->AsParallelMove(); |
782 if (move == NULL) { | 1077 if (move == NULL) { |
783 move = new ParallelMoveInstr(); | 1078 move = new ParallelMoveInstr(); |
784 move->set_next(prev->next()); | 1079 move->set_next(prev->next()); |
785 prev->set_next(move); | 1080 prev->set_next(move); |
786 move->next()->set_previous(move); | 1081 move->next()->set_previous(move); |
787 move->set_previous(prev); | 1082 move->set_previous(prev); |
788 } | 1083 } |
789 move->AddMove(to, from); | 1084 move->AddMove(to, from); |
790 } | 1085 } |
791 | 1086 |
792 | 1087 |
793 void UsePosition::AssignLocation(Location loc) { | 1088 void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) { |
794 if (location_slot_ == NULL) return; | 1089 if (use->location_slot() == NULL) return; |
795 | 1090 |
796 if (location_slot_->IsUnallocated()) { | 1091 Location* slot = use->location_slot(); |
797 if (location_slot_->policy() == Location::kSameAsFirstInput) { | 1092 if (slot->IsUnallocated()) { |
798 Instruction* instr = this->instr(); | 1093 if (slot->policy() == Location::kSameAsFirstInput) { |
1094 Instruction* instr = InstructionAt(use->pos()); | |
799 LocationSummary* locs = instr->locs(); | 1095 LocationSummary* locs = instr->locs(); |
800 if (!locs->in(0).IsUnallocated()) { | 1096 if (!locs->in(0).IsUnallocated()) { |
801 InsertMoveBefore(instr, loc, locs->in(0)); | 1097 InsertMoveBefore(instr, loc, locs->in(0)); |
802 } | 1098 } |
803 locs->set_in(0, loc); | 1099 locs->set_in(0, loc); |
804 } | 1100 } |
805 TRACE_ALLOC((" use at %d converted to %s\n", pos(), loc.Name())); | 1101 TRACE_ALLOC((" use at %d converted to %s\n", use->pos(), loc.Name())); |
806 *location_slot_ = loc; | 1102 *slot = loc; |
807 } else if (location_slot_->IsRegister()) { | 1103 } else if (slot->IsRegister()) { |
808 InsertMoveBefore(this->instr(), *location_slot_, loc); | 1104 InsertMoveBefore(InstructionAt(use->pos()), *slot, loc); |
1105 } else { | |
1106 UNREACHABLE(); | |
809 } | 1107 } |
810 } | 1108 } |
811 | 1109 |
812 | 1110 |
813 void FlowGraphAllocator::FinalizeInterval(UseInterval* interval, Location loc) { | 1111 void FlowGraphAllocator::ConvertAllUses(LiveRange* range) { |
814 if (interval->vreg() == kNoVirtualRegister) return; | 1112 if (range->vreg() == kNoVirtualRegister) return; |
815 | 1113 TRACE_ALLOC(("range [%d, %d) for v%d has been allocated to %s:\n", |
816 TRACE_ALLOC(("assigning location %s to interval [%d, %d)\n", loc.Name(), | 1114 range->Start(), |
817 interval->start(), interval->end())); | 1115 range->End(), |
818 | 1116 range->vreg(), |
819 for (UsePosition* use = interval->first_use(); | 1117 range->assigned_location().Name())); |
820 use != NULL && use->pos() <= interval->end(); | 1118 ASSERT(!range->assigned_location().IsInvalid()); |
821 use = use->next()) { | 1119 const Location loc = range->assigned_location(); |
822 use->AssignLocation(loc); | 1120 for (UsePosition* use = range->first_use(); use != NULL; use = use->next()) { |
1121 ConvertUseTo(use, loc); | |
823 } | 1122 } |
824 } | 1123 } |
825 | 1124 |
826 | 1125 |
1126 bool AllocationFinger::Advance(const intptr_t start) { | |
1127 UseInterval* a = first_pending_use_interval_; | |
1128 while (a != NULL && a->end() <= start) a = a->next(); | |
1129 first_pending_use_interval_ = a; | |
1130 if (first_pending_use_interval_ == NULL) { | |
1131 return true; | |
1132 } | |
1133 return false; | |
1134 } | |
1135 | |
1136 | |
827 void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) { | 1137 void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) { |
828 for (int reg = 0; reg < kNumberOfCpuRegisters; reg++) { | 1138 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
829 if (cpu_regs_[reg] == NULL) continue; | 1139 if (cpu_regs_[reg].length() == 0) continue; |
srdjan
2012/07/19 22:54:39
use is_empty() instead of length() == 0.
Vyacheslav Egorov (Google)
2012/07/24 12:26:41
Done.
| |
830 if (cpu_regs_[reg] == kPermanentlyBlocked) continue; | |
831 | 1140 |
832 UseInterval* a = cpu_regs_[reg]; | 1141 intptr_t first_evicted = -1; |
833 while (a != NULL && a->end() <= start) { | 1142 for (intptr_t i = cpu_regs_[reg].length() - 1; i >= 0; i--) { |
834 FinalizeInterval(a, | 1143 LiveRange* range = cpu_regs_[reg][i]; |
835 Location::RegisterLocation(static_cast<Register>(reg))); | 1144 if (range->finger()->Advance(start)) { |
836 a = a->next_allocated(); | 1145 ConvertAllUses(range); |
1146 cpu_regs_[reg][i] = NULL; | |
1147 first_evicted = i; | |
1148 } | |
837 } | 1149 } |
838 | 1150 |
839 cpu_regs_[reg] = a; | 1151 if (first_evicted != -1) { |
1152 RemoveEvicted(static_cast<Register>(reg), first_evicted); | |
1153 } | |
840 } | 1154 } |
841 } | 1155 } |
842 | 1156 |
843 | 1157 |
844 static inline bool ShouldBeAllocatedBefore(UseInterval* a, UseInterval* b) { | 1158 void AllocationFinger::Initialize(LiveRange* range) { |
845 return a->start() <= b->start(); | 1159 first_pending_use_interval_ = range->first_use_interval(); |
1160 first_register_use_ = range->first_use(); | |
1161 first_register_beneficial_use_ = range->first_use(); | |
1162 first_hinted_use_ = range->first_use(); | |
846 } | 1163 } |
847 | 1164 |
848 | 1165 |
849 void FlowGraphAllocator::AddToUnallocated(UseInterval* chain) { | 1166 static inline bool ShouldBeAllocatedBefore(LiveRange* a, LiveRange* b) { |
1167 return a->Start() <= b->Start(); | |
1168 } | |
1169 | |
1170 | |
1171 void FlowGraphAllocator::AddToUnallocated(LiveRange* range) { | |
1172 range->finger()->Initialize(range); | |
1173 | |
850 if (unallocated_.is_empty()) { | 1174 if (unallocated_.is_empty()) { |
851 unallocated_.Add(chain); | 1175 unallocated_.Add(range); |
852 return; | 1176 return; |
853 } | 1177 } |
854 | 1178 |
855 for (intptr_t i = unallocated_.length() - 1; i >= 0; i--) { | 1179 for (intptr_t i = unallocated_.length() - 1; i >= 0; i--) { |
856 if (ShouldBeAllocatedBefore(chain, unallocated_[i])) { | 1180 if (ShouldBeAllocatedBefore(range, unallocated_[i])) { |
857 unallocated_.InsertAt(i + 1, chain); | 1181 unallocated_.InsertAt(i + 1, range); |
858 return; | 1182 return; |
859 } | 1183 } |
860 } | 1184 } |
861 unallocated_.InsertAt(0, chain); | 1185 unallocated_.InsertAt(0, range); |
862 } | 1186 } |
863 | 1187 |
864 | 1188 |
865 bool FlowGraphAllocator::UnallocatedIsSorted() { | 1189 bool FlowGraphAllocator::UnallocatedIsSorted() { |
866 for (intptr_t i = unallocated_.length() - 1; i >= 1; i--) { | 1190 for (intptr_t i = unallocated_.length() - 1; i >= 1; i--) { |
867 UseInterval* a = unallocated_[i]; | 1191 LiveRange* a = unallocated_[i]; |
868 UseInterval* b = unallocated_[i - 1]; | 1192 LiveRange* b = unallocated_[i - 1]; |
869 if (!ShouldBeAllocatedBefore(a, b)) return false; | 1193 if (!ShouldBeAllocatedBefore(a, b)) return false; |
870 } | 1194 } |
871 return true; | 1195 return true; |
872 } | 1196 } |
873 | 1197 |
874 | 1198 |
875 void FlowGraphAllocator::AllocateCPURegisters() { | 1199 void FlowGraphAllocator::AllocateCPURegisters() { |
876 ASSERT(UnallocatedIsSorted()); | 1200 ASSERT(UnallocatedIsSorted()); |
877 | 1201 |
1202 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { | |
1203 if (cpu_regs_[i].length() == 1) { | |
1204 LiveRange* range = cpu_regs_[i][0]; | |
1205 range->finger()->Initialize(range); | |
1206 } | |
1207 } | |
1208 | |
878 while (!unallocated_.is_empty()) { | 1209 while (!unallocated_.is_empty()) { |
879 UseInterval* range = unallocated_.Last(); | 1210 LiveRange* range = unallocated_.Last(); |
880 unallocated_.RemoveLast(); | 1211 unallocated_.RemoveLast(); |
881 const intptr_t start = range->start(); | 1212 const intptr_t start = range->Start(); |
882 TRACE_ALLOC(("Processing interval chain for vreg %d starting at %d\n", | 1213 TRACE_ALLOC(("Processing live range for vreg %d starting at %d\n", |
883 range->vreg(), | 1214 range->vreg(), |
884 start)); | 1215 start)); |
885 | 1216 |
886 // TODO(vegorov): eagerly spill liveranges without register uses. | 1217 // TODO(vegorov): eagerly spill liveranges without register uses. |
887 AdvanceActiveIntervals(start); | 1218 AdvanceActiveIntervals(start); |
888 | 1219 |
889 if (!AllocateFreeRegister(range)) { | 1220 if (!AllocateFreeRegister(range)) { |
890 builder_->Bailout("ssa allocator: spilling required"); | 1221 AllocateAnyRegister(range); |
891 return; | |
892 } | 1222 } |
893 } | 1223 } |
894 | 1224 |
895 // All allocation decisions were done. | 1225 // All allocation decisions were done. |
896 ASSERT(unallocated_.is_empty()); | 1226 ASSERT(unallocated_.is_empty()); |
897 | 1227 |
898 // Finish allocation. | 1228 // Finish allocation. |
899 AdvanceActiveIntervals(kMaxPosition); | 1229 AdvanceActiveIntervals(kMaxPosition); |
900 TRACE_ALLOC(("Allocation completed\n")); | 1230 TRACE_ALLOC(("Allocation completed\n")); |
901 } | 1231 } |
902 | 1232 |
903 | 1233 |
1234 void FlowGraphAllocator::ConnectSplitSiblings(LiveRange* range, | |
1235 BlockEntryInstr* source_block, | |
1236 BlockEntryInstr* target_block) { | |
1237 if (range->next_sibling() == NULL) { | |
1238 // Nothing to connect everything is allocated to the same location. | |
1239 return; | |
1240 } | |
1241 | |
1242 const intptr_t source_pos = source_block->end_pos() - 1; | |
1243 const intptr_t target_pos = target_block->start_pos(); | |
1244 | |
1245 Location target; | |
1246 Location source; | |
1247 | |
1248 while ((range != NULL) && (source.IsInvalid() || target.IsInvalid())) { | |
1249 if (range->CanCover(source_pos)) { | |
1250 ASSERT(source.IsInvalid()); | |
1251 source = range->assigned_location(); | |
1252 } | |
1253 if (range->CanCover(target_pos)) { | |
1254 ASSERT(target.IsInvalid()); | |
1255 target = range->assigned_location(); | |
1256 } | |
1257 } | |
1258 | |
1259 // Siblings were allocated to the same register. | |
1260 if (source.Equals(target)) return; | |
1261 | |
1262 GetLastParallelMove(source_block)->AddMove(source, target); | |
1263 } | |
1264 | |
1265 | |
1266 void FlowGraphAllocator::ResolveControlFlow() { | |
1267 // Resolve linear control flow between touching split siblings | |
1268 // inside basic blocks. | |
1269 for (intptr_t vreg = 0; vreg < live_ranges_.length(); vreg++) { | |
1270 LiveRange* range = live_ranges_[vreg]; | |
1271 if (range == NULL) continue; | |
1272 | |
1273 while (range->next_sibling() != NULL) { | |
1274 LiveRange* sibling = range->next_sibling(); | |
1275 if ((range->End() == sibling->Start()) && | |
1276 !range->assigned_location().Equals(sibling->assigned_location()) && | |
1277 !IsBlockEntry(range->End())) { | |
1278 ASSERT((sibling->Start() & 1) == 0); | |
1279 InsertMoveBefore(InstructionAt(sibling->Start()), | |
1280 sibling->assigned_location(), | |
1281 range->assigned_location()); | |
1282 } | |
1283 range = sibling; | |
1284 } | |
1285 } | |
1286 | |
1287 // Resolve non-linear control flow across branches. | |
1288 for (intptr_t i = 1; i < block_order_.length(); i++) { | |
1289 BlockEntryInstr* block = block_order_[0]; | |
1290 BitVector* live = live_in_[block->postorder_number()]; | |
1291 for (BitVector::Iterator it(live); !it.Done(); it.Advance()) { | |
1292 LiveRange* range = GetLiveRange(it.Current()); | |
1293 for (intptr_t j = 0; j < block->PredecessorCount(); j++) { | |
1294 ConnectSplitSiblings(range, block->PredecessorAt(j), block); | |
1295 } | |
1296 } | |
1297 } | |
1298 } | |
1299 | |
1300 | |
904 void FlowGraphAllocator::AllocateRegisters() { | 1301 void FlowGraphAllocator::AllocateRegisters() { |
905 GraphEntryInstr* entry = block_order_[0]->AsGraphEntry(); | 1302 GraphEntryInstr* entry = block_order_[0]->AsGraphEntry(); |
906 ASSERT(entry != NULL); | 1303 ASSERT(entry != NULL); |
907 | 1304 |
908 for (intptr_t i = 0; i < entry->start_env()->values().length(); i++) { | 1305 for (intptr_t i = 0; i < entry->start_env()->values().length(); i++) { |
909 if (entry->start_env()->values()[i]->IsUse()) { | 1306 if (entry->start_env()->values()[i]->IsUse()) { |
910 builder_->Bailout("ssa allocator: unsupported start environment"); | 1307 builder_->Bailout("ssa allocator: unsupported start environment"); |
911 } | 1308 } |
912 } | 1309 } |
913 | 1310 |
914 AnalyzeLiveness(); | 1311 AnalyzeLiveness(); |
915 | 1312 |
916 BuildLiveRanges(); | 1313 BuildLiveRanges(); |
917 | 1314 |
918 if (FLAG_print_ssa_liveness) { | 1315 if (FLAG_print_ssa_liveness) { |
919 DumpLiveness(); | 1316 DumpLiveness(); |
920 } | 1317 } |
921 | 1318 |
922 if (FLAG_trace_ssa_allocator) { | 1319 if (FLAG_trace_ssa_allocator) { |
923 PrintLiveRanges(); | 1320 PrintLiveRanges(); |
924 } | 1321 } |
925 | 1322 |
926 AllocateCPURegisters(); | 1323 AllocateCPURegisters(); |
927 | 1324 |
1325 ResolveControlFlow(); | |
1326 | |
928 if (FLAG_trace_ssa_allocator) { | 1327 if (FLAG_trace_ssa_allocator) { |
929 OS::Print("-- ir after allocation -------------------------\n"); | 1328 OS::Print("-- ir after allocation -------------------------\n"); |
930 FlowGraphPrinter printer(Function::Handle(), block_order_, true); | 1329 FlowGraphPrinter printer(Function::Handle(), block_order_, true); |
931 printer.PrintBlocks(); | 1330 printer.PrintBlocks(); |
932 } | 1331 } |
933 } | 1332 } |
934 | 1333 |
935 | 1334 |
936 } // namespace dart | 1335 } // namespace dart |
OLD | NEW |