OLD | NEW |
1 //===- subzero/src/IceRegAlloc.cpp - Linear-scan implementation -----------===// | 1 //===- subzero/src/IceRegAlloc.cpp - Linear-scan implementation -----------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 if (Item->getLiveRange().overlapsInst(Def->getNumber(), UseTrimmed)) | 42 if (Item->getLiveRange().overlapsInst(Def->getNumber(), UseTrimmed)) |
43 return true; | 43 return true; |
44 } | 44 } |
45 return false; | 45 return false; |
46 } | 46 } |
47 | 47 |
48 void dumpDisableOverlap(const Cfg *Func, const Variable *Var, | 48 void dumpDisableOverlap(const Cfg *Func, const Variable *Var, |
49 const char *Reason) { | 49 const char *Reason) { |
50 if (!BuildDefs::dump()) | 50 if (!BuildDefs::dump()) |
51 return; | 51 return; |
52 if (Func->isVerbose(IceV_LinearScan)) { | 52 if (!Func->isVerbose(IceV_LinearScan)) |
53 VariablesMetadata *VMetadata = Func->getVMetadata(); | 53 return; |
54 Ostream &Str = Func->getContext()->getStrDump(); | 54 |
55 Str << "Disabling Overlap due to " << Reason << " " << *Var | 55 VariablesMetadata *VMetadata = Func->getVMetadata(); |
56 << " LIVE=" << Var->getLiveRange() << " Defs="; | 56 Ostream &Str = Func->getContext()->getStrDump(); |
57 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) | 57 Str << "Disabling Overlap due to " << Reason << " " << *Var |
58 Str << FirstDef->getNumber(); | 58 << " LIVE=" << Var->getLiveRange() << " Defs="; |
59 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var); | 59 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) |
60 for (size_t i = 0; i < Defs.size(); ++i) { | 60 Str << FirstDef->getNumber(); |
61 Str << "," << Defs[i]->getNumber(); | 61 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var); |
62 } | 62 for (size_t i = 0; i < Defs.size(); ++i) { |
63 Str << "\n"; | 63 Str << "," << Defs[i]->getNumber(); |
64 } | 64 } |
| 65 Str << "\n"; |
65 } | 66 } |
66 | 67 |
67 void dumpLiveRange(const Variable *Var, const Cfg *Func) { | 68 void dumpLiveRange(const Variable *Var, const Cfg *Func) { |
68 if (!BuildDefs::dump()) | 69 if (!BuildDefs::dump()) |
69 return; | 70 return; |
70 Ostream &Str = Func->getContext()->getStrDump(); | 71 Ostream &Str = Func->getContext()->getStrDump(); |
71 char buf[30]; | 72 char buf[30]; |
72 snprintf(buf, llvm::array_lengthof(buf), "%2d", Var->getRegNumTmp()); | 73 snprintf(buf, llvm::array_lengthof(buf), "%2d", Var->getRegNumTmp()); |
73 Str << "R=" << buf << " V="; | 74 Str << "R=" << buf << " V="; |
74 Var->dump(Func); | 75 Var->dump(Func); |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 << ", variable " << Var->getName(Func) << "\n"; | 154 << ", variable " << Var->getName(Func) << "\n"; |
154 } | 155 } |
155 for (SizeT VarNum : UsesBeforeDefs) { | 156 for (SizeT VarNum : UsesBeforeDefs) { |
156 Variable *Var = Vars[VarNum]; | 157 Variable *Var = Vars[VarNum]; |
157 Str << "LR use before def, instruction " << LREnd[VarNum] << ", variable " | 158 Str << "LR use before def, instruction " << LREnd[VarNum] << ", variable " |
158 << Var->getName(Func) << "\n"; | 159 << Var->getName(Func) << "\n"; |
159 } | 160 } |
160 return false; | 161 return false; |
161 } | 162 } |
162 | 163 |
163 // Prepare for very simple register allocation of only infinite-weight | 164 // Prepare for very simple register allocation of only infinite-weight Variables |
164 // Variables while respecting pre-colored Variables. Some properties we take | 165 // while respecting pre-colored Variables. Some properties we take advantage of: |
165 // advantage of: | |
166 // | 166 // |
167 // * Live ranges of interest consist of a single segment. | 167 // * Live ranges of interest consist of a single segment. |
168 // | 168 // |
169 // * Live ranges of interest never span a call instruction. | 169 // * Live ranges of interest never span a call instruction. |
170 // | 170 // |
171 // * Phi instructions are not considered because either phis have already been | 171 // * Phi instructions are not considered because either phis have already been |
172 // lowered, or they don't contain any pre-colored or infinite-weight | 172 // lowered, or they don't contain any pre-colored or infinite-weight |
173 // Variables. | 173 // Variables. |
174 // | 174 // |
175 // * We don't need to renumber instructions before computing live ranges | 175 // * We don't need to renumber instructions before computing live ranges because |
176 // because all the high-level ICE instructions are deleted prior to lowering, | 176 // all the high-level ICE instructions are deleted prior to lowering, and the |
177 // and the low-level instructions are added in monotonically increasing order. | 177 // low-level instructions are added in monotonically increasing order. |
178 // | 178 // |
179 // * There are no opportunities for register preference or allowing overlap. | 179 // * There are no opportunities for register preference or allowing overlap. |
180 // | 180 // |
181 // Some properties we aren't (yet) taking advantage of: | 181 // Some properties we aren't (yet) taking advantage of: |
182 // | 182 // |
183 // * Because live ranges are a single segment, the Inactive set will always be | 183 // * Because live ranges are a single segment, the Inactive set will always be |
184 // empty, and the live range trimming operation is unnecessary. | 184 // empty, and the live range trimming operation is unnecessary. |
185 // | 185 // |
186 // * Calculating overlap of single-segment live ranges could be optimized a | 186 // * Calculating overlap of single-segment live ranges could be optimized a bit. |
187 // bit. | |
188 void LinearScan::initForInfOnly() { | 187 void LinearScan::initForInfOnly() { |
189 TimerMarker T(TimerStack::TT_initUnhandled, Func); | 188 TimerMarker T(TimerStack::TT_initUnhandled, Func); |
190 FindPreference = false; | 189 FindPreference = false; |
191 FindOverlap = false; | 190 FindOverlap = false; |
192 SizeT NumVars = 0; | 191 SizeT NumVars = 0; |
193 const VarList &Vars = Func->getVariables(); | 192 const VarList &Vars = Func->getVariables(); |
194 | 193 |
195 // Iterate across all instructions and record the begin and end of the live | 194 // Iterate across all instructions and record the begin and end of the live |
196 // range for each variable that is pre-colored or infinite weight. | 195 // range for each variable that is pre-colored or infinite weight. |
197 CfgVector<InstNumberT> LRBegin(Vars.size(), Inst::NumberSentinel); | 196 CfgVector<InstNumberT> LRBegin(Vars.size(), Inst::NumberSentinel); |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
343 std::sort(UnhandledPrecolored.rbegin(), UnhandledPrecolored.rend(), | 342 std::sort(UnhandledPrecolored.rbegin(), UnhandledPrecolored.rend(), |
344 CompareRanges); | 343 CompareRanges); |
345 | 344 |
346 Handled.reserve(Unhandled.size()); | 345 Handled.reserve(Unhandled.size()); |
347 Inactive.reserve(Unhandled.size()); | 346 Inactive.reserve(Unhandled.size()); |
348 Active.reserve(Unhandled.size()); | 347 Active.reserve(Unhandled.size()); |
349 Evicted.reserve(Unhandled.size()); | 348 Evicted.reserve(Unhandled.size()); |
350 } | 349 } |
351 | 350 |
352 // This is called when Cur must be allocated a register but no registers are | 351 // This is called when Cur must be allocated a register but no registers are |
353 // available across Cur's live range. To handle this, we find a register that | 352 // available across Cur's live range. To handle this, we find a register that is |
354 // is not explicitly used during Cur's live range, spill that register to a | 353 // not explicitly used during Cur's live range, spill that register to a stack |
355 // stack location right before Cur's live range begins, and fill (reload) the | 354 // location right before Cur's live range begins, and fill (reload) the register |
356 // register from the stack location right after Cur's live range ends. | 355 // from the stack location right after Cur's live range ends. |
357 void LinearScan::addSpillFill(IterationState &Iter) { | 356 void LinearScan::addSpillFill(IterationState &Iter) { |
358 // Identify the actual instructions that begin and end Iter.Cur's live range. | 357 // Identify the actual instructions that begin and end Iter.Cur's live range. |
359 // Iterate through Iter.Cur's node's instruction list until we find the actual | 358 // Iterate through Iter.Cur's node's instruction list until we find the actual |
360 // instructions with instruction numbers corresponding to Iter.Cur's recorded | 359 // instructions with instruction numbers corresponding to Iter.Cur's recorded |
361 // live range endpoints. This sounds inefficient but shouldn't be a problem | 360 // live range endpoints. This sounds inefficient but shouldn't be a problem |
362 // in practice because: | 361 // in practice because: |
363 // (1) This function is almost never called in practice. | 362 // (1) This function is almost never called in practice. |
364 // (2) Since this register over-subscription problem happens only for | 363 // (2) Since this register over-subscription problem happens only for |
365 // phi-lowered instructions, the number of instructions in the node is | 364 // phi-lowered instructions, the number of instructions in the node is |
366 // proportional to the number of phi instructions in the original node, | 365 // proportional to the number of phi instructions in the original node, |
(...skipping 11 matching lines...) Expand all Loading... |
378 InstList::iterator SpillPoint = Insts.end(); | 377 InstList::iterator SpillPoint = Insts.end(); |
379 InstList::iterator FillPoint = Insts.end(); | 378 InstList::iterator FillPoint = Insts.end(); |
380 // Stop searching after we have found both the SpillPoint and the FillPoint. | 379 // Stop searching after we have found both the SpillPoint and the FillPoint. |
381 for (auto I = Insts.begin(), E = Insts.end(); | 380 for (auto I = Insts.begin(), E = Insts.end(); |
382 I != E && (SpillPoint == E || FillPoint == E); ++I) { | 381 I != E && (SpillPoint == E || FillPoint == E); ++I) { |
383 if (I->getNumber() == Start) | 382 if (I->getNumber() == Start) |
384 SpillPoint = I; | 383 SpillPoint = I; |
385 if (I->getNumber() == End) | 384 if (I->getNumber() == End) |
386 FillPoint = I; | 385 FillPoint = I; |
387 if (SpillPoint != E) { | 386 if (SpillPoint != E) { |
388 // Remove from RegMask any physical registers referenced during Cur's | 387 // Remove from RegMask any physical registers referenced during Cur's live |
389 // live range. Start looking after SpillPoint gets set, i.e. once Cur's | 388 // range. Start looking after SpillPoint gets set, i.e. once Cur's live |
390 // live range begins. | 389 // range begins. |
391 FOREACH_VAR_IN_INST(Var, *I) { | 390 FOREACH_VAR_IN_INST(Var, *I) { |
392 if (!Var->hasRegTmp()) | 391 if (!Var->hasRegTmp()) |
393 continue; | 392 continue; |
394 const llvm::SmallBitVector &Aliases = *RegAliases[Var->getRegNumTmp()]; | 393 const llvm::SmallBitVector &Aliases = *RegAliases[Var->getRegNumTmp()]; |
395 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 394 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
396 RegAlias = Aliases.find_next(RegAlias)) { | 395 RegAlias = Aliases.find_next(RegAlias)) { |
397 Iter.RegMask[RegAlias] = false; | 396 Iter.RegMask[RegAlias] = false; |
398 } | 397 } |
399 } | 398 } |
400 } | 399 } |
401 } | 400 } |
402 assert(SpillPoint != Insts.end()); | 401 assert(SpillPoint != Insts.end()); |
403 assert(FillPoint != Insts.end()); | 402 assert(FillPoint != Insts.end()); |
404 ++FillPoint; | 403 ++FillPoint; |
405 // TODO(stichnot): Randomize instead of find_first(). | 404 // TODO(stichnot): Randomize instead of find_first(). |
406 int32_t RegNum = Iter.RegMask.find_first(); | 405 int32_t RegNum = Iter.RegMask.find_first(); |
407 assert(RegNum != -1); | 406 assert(RegNum != -1); |
408 Iter.Cur->setRegNumTmp(RegNum); | 407 Iter.Cur->setRegNumTmp(RegNum); |
409 Variable *Preg = Target->getPhysicalRegister(RegNum, Iter.Cur->getType()); | 408 Variable *Preg = Target->getPhysicalRegister(RegNum, Iter.Cur->getType()); |
410 // TODO(stichnot): Add SpillLoc to VariablesMetadata tracking so that | 409 // TODO(stichnot): Add SpillLoc to VariablesMetadata tracking so that SpillLoc |
411 // SpillLoc is correctly identified as !isMultiBlock(), reducing stack frame | 410 // is correctly identified as !isMultiBlock(), reducing stack frame size. |
412 // size. | |
413 Variable *SpillLoc = Func->makeVariable(Iter.Cur->getType()); | 411 Variable *SpillLoc = Func->makeVariable(Iter.Cur->getType()); |
414 // Add "reg=FakeDef;spill=reg" before SpillPoint | 412 // Add "reg=FakeDef;spill=reg" before SpillPoint |
415 Target->lowerInst(Node, SpillPoint, InstFakeDef::create(Func, Preg)); | 413 Target->lowerInst(Node, SpillPoint, InstFakeDef::create(Func, Preg)); |
416 Target->lowerInst(Node, SpillPoint, InstAssign::create(Func, SpillLoc, Preg)); | 414 Target->lowerInst(Node, SpillPoint, InstAssign::create(Func, SpillLoc, Preg)); |
417 // add "reg=spill;FakeUse(reg)" before FillPoint | 415 // add "reg=spill;FakeUse(reg)" before FillPoint |
418 Target->lowerInst(Node, FillPoint, InstAssign::create(Func, Preg, SpillLoc)); | 416 Target->lowerInst(Node, FillPoint, InstAssign::create(Func, Preg, SpillLoc)); |
419 Target->lowerInst(Node, FillPoint, InstFakeUse::create(Func, Preg)); | 417 Target->lowerInst(Node, FillPoint, InstFakeUse::create(Func, Preg)); |
420 } | 418 } |
421 | 419 |
422 void LinearScan::handleActiveRangeExpiredOrInactive(const Variable *Cur) { | 420 void LinearScan::handleActiveRangeExpiredOrInactive(const Variable *Cur) { |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
468 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 466 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
469 RegAlias = Aliases.find_next(RegAlias)) { | 467 RegAlias = Aliases.find_next(RegAlias)) { |
470 assert(RegUses[RegAlias] >= 0); | 468 assert(RegUses[RegAlias] >= 0); |
471 ++RegUses[RegAlias]; | 469 ++RegUses[RegAlias]; |
472 } | 470 } |
473 } | 471 } |
474 } | 472 } |
475 } | 473 } |
476 | 474 |
477 // Infer register preference and allowable overlap. Only form a preference when | 475 // Infer register preference and allowable overlap. Only form a preference when |
478 // the current Variable has an unambiguous "first" definition. The preference | 476 // the current Variable has an unambiguous "first" definition. The preference is |
479 // is some source Variable of the defining instruction that either is assigned | 477 // some source Variable of the defining instruction that either is assigned a |
480 // a register that is currently free, or that is assigned a register that is | 478 // register that is currently free, or that is assigned a register that is not |
481 // not free but overlap is allowed. Overlap is allowed when the Variable under | 479 // free but overlap is allowed. Overlap is allowed when the Variable under |
482 // consideration is single-definition, and its definition is a simple | 480 // consideration is single-definition, and its definition is a simple assignment |
483 // assignment - i.e., the register gets copied/aliased but is never modified. | 481 // - i.e., the register gets copied/aliased but is never modified. Furthermore, |
484 // Furthermore, overlap is only allowed when preferred Variable definition | 482 // overlap is only allowed when preferred Variable definition instructions do |
485 // instructions do not appear within the current Variable's live range. | 483 // not appear within the current Variable's live range. |
486 void LinearScan::findRegisterPreference(IterationState &Iter) { | 484 void LinearScan::findRegisterPreference(IterationState &Iter) { |
487 Iter.Prefer = nullptr; | 485 Iter.Prefer = nullptr; |
488 Iter.PreferReg = Variable::NoRegister; | 486 Iter.PreferReg = Variable::NoRegister; |
489 Iter.AllowOverlap = false; | 487 Iter.AllowOverlap = false; |
490 | 488 |
491 if (FindPreference) { | 489 if (!FindPreference) |
492 VariablesMetadata *VMetadata = Func->getVMetadata(); | 490 return; |
493 if (const Inst *DefInst = | 491 |
494 VMetadata->getFirstDefinitionSingleBlock(Iter.Cur)) { | 492 VariablesMetadata *VMetadata = Func->getVMetadata(); |
495 assert(DefInst->getDest() == Iter.Cur); | 493 const Inst *DefInst = VMetadata->getFirstDefinitionSingleBlock(Iter.Cur); |
496 bool IsAssign = DefInst->isVarAssign(); | 494 if (DefInst == nullptr) |
497 bool IsSingleDef = !VMetadata->isMultiDef(Iter.Cur); | 495 return; |
498 FOREACH_VAR_IN_INST(SrcVar, *DefInst) { | 496 |
499 // Only consider source variables that have (so far) been assigned a | 497 assert(DefInst->getDest() == Iter.Cur); |
500 // register. That register must be one in the RegMask set, e.g. don't | 498 const bool IsSingleDefAssign = |
501 // try to prefer the stack pointer as a result of the stacksave | 499 DefInst->isVarAssign() && !VMetadata->isMultiDef(Iter.Cur); |
502 // intrinsic. | 500 FOREACH_VAR_IN_INST(SrcVar, *DefInst) { |
503 if (SrcVar->hasRegTmp()) { | 501 // Only consider source variables that have (so far) been assigned a |
504 const llvm::SmallBitVector &Aliases = | 502 // register. |
505 *RegAliases[SrcVar->getRegNumTmp()]; | 503 if (!SrcVar->hasRegTmp()) |
506 const int32_t SrcReg = (Iter.RegMask & Aliases).find_first(); | 504 continue; |
507 const bool IsAliasAvailable = (SrcReg != -1); | 505 |
508 if (IsAliasAvailable) { | 506 // That register must be one in the RegMask set, e.g. don't try to prefer |
509 if (FindOverlap && !Iter.Free[SrcReg]) { | 507 // the stack pointer as a result of the stacksave intrinsic. |
510 // Don't bother trying to enable AllowOverlap if the register is | 508 const llvm::SmallBitVector &Aliases = *RegAliases[SrcVar->getRegNumTmp()]; |
511 // already free. | 509 const int32_t SrcReg = (Iter.RegMask & Aliases).find_first(); |
512 Iter.AllowOverlap = IsSingleDef && IsAssign && | 510 if (SrcReg == -1) |
513 !overlapsDefs(Func, Iter.Cur, SrcVar); | 511 continue; |
514 } | 512 |
515 if (Iter.AllowOverlap || Iter.Free[SrcReg]) { | 513 if (FindOverlap && IsSingleDefAssign && !Iter.Free[SrcReg]) { |
516 Iter.Prefer = SrcVar; | 514 // Don't bother trying to enable AllowOverlap if the register is already |
517 Iter.PreferReg = SrcReg; | 515 // free (hence the test on Iter.Free[SrcReg]). |
518 // Stop looking for a preference after the first valid preference | 516 Iter.AllowOverlap = !overlapsDefs(Func, Iter.Cur, SrcVar); |
519 // is found. One might think that we should look at all | |
520 // instruction variables to find the best <Prefer,AllowOverlap> | |
521 // combination, but note that AllowOverlap can only be true for a | |
522 // simple assignment statement which can have only one source | |
523 // operand, so it's not possible for AllowOverlap to be true | |
524 // beyond the first source operand. | |
525 FOREACH_VAR_IN_INST_BREAK; | |
526 } | |
527 } | |
528 } | |
529 } | |
530 if (Verbose && Iter.Prefer) { | |
531 Ostream &Str = Ctx->getStrDump(); | |
532 Str << "Initial Iter.Prefer="; | |
533 Iter.Prefer->dump(Func); | |
534 Str << " R=" << Iter.PreferReg | |
535 << " LIVE=" << Iter.Prefer->getLiveRange() | |
536 << " Overlap=" << Iter.AllowOverlap << "\n"; | |
537 } | |
538 } | 517 } |
| 518 if (Iter.AllowOverlap || Iter.Free[SrcReg]) { |
| 519 Iter.Prefer = SrcVar; |
| 520 Iter.PreferReg = SrcReg; |
| 521 // Stop looking for a preference after the first valid preference is |
| 522 // found. One might think that we should look at all instruction |
| 523 // variables to find the best <Prefer,AllowOverlap> combination, but note |
| 524 // that AllowOverlap can only be true for a simple assignment statement |
| 525 // which can have only one source operand, so it's not possible for |
| 526 // AllowOverlap to be true beyond the first source operand. |
| 527 FOREACH_VAR_IN_INST_BREAK; |
| 528 } |
| 529 } |
| 530 if (BuildDefs::dump() && Verbose && Iter.Prefer) { |
| 531 Ostream &Str = Ctx->getStrDump(); |
| 532 Str << "Initial Iter.Prefer="; |
| 533 Iter.Prefer->dump(Func); |
| 534 Str << " R=" << Iter.PreferReg << " LIVE=" << Iter.Prefer->getLiveRange() |
| 535 << " Overlap=" << Iter.AllowOverlap << "\n"; |
539 } | 536 } |
540 } | 537 } |
541 | 538 |
542 // Remove registers from the Free[] list where an Inactive range overlaps with | 539 // Remove registers from the Free[] list where an Inactive range overlaps with |
543 // the current range. | 540 // the current range. |
544 void LinearScan::filterFreeWithInactiveRanges(IterationState &Iter) { | 541 void LinearScan::filterFreeWithInactiveRanges(IterationState &Iter) { |
545 for (const Variable *Item : Inactive) { | 542 for (const Variable *Item : Inactive) { |
546 if (!Item->rangeOverlaps(Iter.Cur)) | 543 if (!Item->rangeOverlaps(Iter.Cur)) |
547 continue; | 544 continue; |
548 const llvm::SmallBitVector &Aliases = *RegAliases[Item->getRegNumTmp()]; | 545 const llvm::SmallBitVector &Aliases = *RegAliases[Item->getRegNumTmp()]; |
549 // TODO(stichnot): Do this with bitvector ops, not a loop, for efficiency. | 546 // TODO(stichnot): Do this with bitvector ops, not a loop, for efficiency. |
550 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 547 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
551 RegAlias = Aliases.find_next(RegAlias)) { | 548 RegAlias = Aliases.find_next(RegAlias)) { |
552 // Don't assert(Free[RegNum]) because in theory (though probably never in | 549 // Don't assert(Free[RegNum]) because in theory (though probably never in |
553 // practice) there could be two inactive variables that were marked with | 550 // practice) there could be two inactive variables that were marked with |
554 // AllowOverlap. | 551 // AllowOverlap. |
555 Iter.Free[RegAlias] = false; | 552 Iter.Free[RegAlias] = false; |
556 // Disable AllowOverlap if an Inactive variable, which is not Prefer, | 553 // Disable AllowOverlap if an Inactive variable, which is not Prefer, |
557 // shares Prefer's register, and has a definition within Cur's live | 554 // shares Prefer's register, and has a definition within Cur's live range. |
558 // range. | |
559 if (Iter.AllowOverlap && Item != Iter.Prefer && | 555 if (Iter.AllowOverlap && Item != Iter.Prefer && |
560 RegAlias == Iter.PreferReg && overlapsDefs(Func, Iter.Cur, Item)) { | 556 RegAlias == Iter.PreferReg && overlapsDefs(Func, Iter.Cur, Item)) { |
561 Iter.AllowOverlap = false; | 557 Iter.AllowOverlap = false; |
562 dumpDisableOverlap(Func, Item, "Inactive"); | 558 dumpDisableOverlap(Func, Item, "Inactive"); |
563 } | 559 } |
564 } | 560 } |
565 } | 561 } |
566 } | 562 } |
567 | 563 |
568 // Remove registers from the Free[] list where an Unhandled pre-colored range | 564 // Remove registers from the Free[] list where an Unhandled pre-colored range |
569 // overlaps with the current range, and set those registers to infinite weight | 565 // overlaps with the current range, and set those registers to infinite weight |
570 // so that they aren't candidates for eviction. Cur->rangeEndsBefore(Item) is | 566 // so that they aren't candidates for eviction. Cur->rangeEndsBefore(Item) is an |
571 // an early exit check that turns a guaranteed O(N^2) algorithm into expected | 567 // early exit check that turns a guaranteed O(N^2) algorithm into expected |
572 // linear complexity. | 568 // linear complexity. |
573 void LinearScan::filterFreeWithPrecoloredRanges(IterationState &Iter) { | 569 void LinearScan::filterFreeWithPrecoloredRanges(IterationState &Iter) { |
574 for (Variable *Item : reverse_range(UnhandledPrecolored)) { | 570 for (Variable *Item : reverse_range(UnhandledPrecolored)) { |
575 assert(Item->hasReg()); | 571 assert(Item->hasReg()); |
576 if (Iter.Cur->rangeEndsBefore(Item)) | 572 if (Iter.Cur->rangeEndsBefore(Item)) |
577 break; | 573 break; |
578 if (Item->rangeOverlaps(Iter.Cur)) { | 574 if (!Item->rangeOverlaps(Iter.Cur)) |
579 const llvm::SmallBitVector &Aliases = | 575 continue; |
580 *RegAliases[Item->getRegNum()]; // Note: not getRegNumTmp() | 576 const llvm::SmallBitVector &Aliases = |
581 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 577 *RegAliases[Item->getRegNum()]; // Note: not getRegNumTmp() |
582 RegAlias = Aliases.find_next(RegAlias)) { | 578 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
583 Iter.Weights[RegAlias].setWeight(RegWeight::Inf); | 579 RegAlias = Aliases.find_next(RegAlias)) { |
584 Iter.Free[RegAlias] = false; | 580 Iter.Weights[RegAlias].setWeight(RegWeight::Inf); |
585 Iter.PrecoloredUnhandledMask[RegAlias] = true; | 581 Iter.Free[RegAlias] = false; |
586 // Disable Iter.AllowOverlap if the preferred register is one of these | 582 Iter.PrecoloredUnhandledMask[RegAlias] = true; |
587 // pre-colored unhandled overlapping ranges. | 583 // Disable Iter.AllowOverlap if the preferred register is one of these |
588 if (Iter.AllowOverlap && RegAlias == Iter.PreferReg) { | 584 // pre-colored unhandled overlapping ranges. |
589 Iter.AllowOverlap = false; | 585 if (Iter.AllowOverlap && RegAlias == Iter.PreferReg) { |
590 dumpDisableOverlap(Func, Item, "PrecoloredUnhandled"); | 586 Iter.AllowOverlap = false; |
591 } | 587 dumpDisableOverlap(Func, Item, "PrecoloredUnhandled"); |
592 } | 588 } |
593 } | 589 } |
594 } | 590 } |
595 } | 591 } |
596 | 592 |
597 void LinearScan::allocatePrecoloredRegister(Variable *Cur) { | 593 void LinearScan::allocatePrecoloredRegister(Variable *Cur) { |
598 int32_t RegNum = Cur->getRegNum(); | 594 int32_t RegNum = Cur->getRegNum(); |
599 // RegNumTmp should have already been set above. | 595 // RegNumTmp should have already been set above. |
600 assert(Cur->getRegNumTmp() == RegNum); | 596 assert(Cur->getRegNumTmp() == RegNum); |
601 dumpLiveRangeTrace("Precoloring ", Cur); | 597 dumpLiveRangeTrace("Precoloring ", Cur); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
657 continue; | 653 continue; |
658 assert(Item->hasRegTmp()); | 654 assert(Item->hasRegTmp()); |
659 const llvm::SmallBitVector &Aliases = *RegAliases[Item->getRegNumTmp()]; | 655 const llvm::SmallBitVector &Aliases = *RegAliases[Item->getRegNumTmp()]; |
660 RegWeight W = Item->getWeight(Func); | 656 RegWeight W = Item->getWeight(Func); |
661 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 657 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
662 RegAlias = Aliases.find_next(RegAlias)) { | 658 RegAlias = Aliases.find_next(RegAlias)) { |
663 Iter.Weights[RegAlias].addWeight(W); | 659 Iter.Weights[RegAlias].addWeight(W); |
664 } | 660 } |
665 } | 661 } |
666 | 662 |
667 // All the weights are now calculated. Find the register with smallest | 663 // All the weights are now calculated. Find the register with smallest weight. |
668 // weight. | |
669 int32_t MinWeightIndex = Iter.RegMask.find_first(); | 664 int32_t MinWeightIndex = Iter.RegMask.find_first(); |
670 // MinWeightIndex must be valid because of the initial RegMask.any() test. | 665 // MinWeightIndex must be valid because of the initial RegMask.any() test. |
671 assert(MinWeightIndex >= 0); | 666 assert(MinWeightIndex >= 0); |
672 for (SizeT i = MinWeightIndex + 1; i < Iter.Weights.size(); ++i) { | 667 for (SizeT i = MinWeightIndex + 1; i < Iter.Weights.size(); ++i) { |
673 if (Iter.RegMask[i] && Iter.Weights[i] < Iter.Weights[MinWeightIndex]) | 668 if (Iter.RegMask[i] && Iter.Weights[i] < Iter.Weights[MinWeightIndex]) |
674 MinWeightIndex = i; | 669 MinWeightIndex = i; |
675 } | 670 } |
676 | 671 |
677 if (Iter.Cur->getWeight(Func) <= Iter.Weights[MinWeightIndex]) { | 672 if (Iter.Cur->getWeight(Func) <= Iter.Weights[MinWeightIndex]) { |
678 // Cur doesn't have priority over any other live ranges, so don't allocate | 673 // Cur doesn't have priority over any other live ranges, so don't allocate |
(...skipping 27 matching lines...) Expand all Loading... |
706 Evicted.push_back(Item); | 701 Evicted.push_back(Item); |
707 } | 702 } |
708 } | 703 } |
709 // Do the same for Inactive. | 704 // Do the same for Inactive. |
710 for (SizeT I = Inactive.size(); I > 0; --I) { | 705 for (SizeT I = Inactive.size(); I > 0; --I) { |
711 const SizeT Index = I - 1; | 706 const SizeT Index = I - 1; |
712 Variable *Item = Inactive[Index]; | 707 Variable *Item = Inactive[Index]; |
713 // Note: The Item->rangeOverlaps(Cur) clause is not part of the | 708 // Note: The Item->rangeOverlaps(Cur) clause is not part of the |
714 // description of AssignMemLoc() in the original paper. But there doesn't | 709 // description of AssignMemLoc() in the original paper. But there doesn't |
715 // seem to be any need to evict an inactive live range that doesn't | 710 // seem to be any need to evict an inactive live range that doesn't |
716 // overlap with the live range currently being considered. It's | 711 // overlap with the live range currently being considered. It's especially |
717 // especially bad if we would end up evicting an infinite-weight but | 712 // bad if we would end up evicting an infinite-weight but |
718 // currently-inactive live range. The most common situation for this | 713 // currently-inactive live range. The most common situation for this would |
719 // would be a scratch register kill set for call instructions. | 714 // be a scratch register kill set for call instructions. |
720 if (Aliases[Item->getRegNumTmp()] && Item->rangeOverlaps(Iter.Cur)) { | 715 if (Aliases[Item->getRegNumTmp()] && Item->rangeOverlaps(Iter.Cur)) { |
721 dumpLiveRangeTrace("Evicting I ", Item); | 716 dumpLiveRangeTrace("Evicting I ", Item); |
722 Item->setRegNumTmp(Variable::NoRegister); | 717 Item->setRegNumTmp(Variable::NoRegister); |
723 moveItem(Inactive, Index, Handled); | 718 moveItem(Inactive, Index, Handled); |
724 Evicted.push_back(Item); | 719 Evicted.push_back(Item); |
725 } | 720 } |
726 } | 721 } |
727 // Assign the register to Cur. | 722 // Assign the register to Cur. |
728 Iter.Cur->setRegNumTmp(MinWeightIndex); | 723 Iter.Cur->setRegNumTmp(MinWeightIndex); |
729 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; | 724 for (int32_t RegAlias = Aliases.find_first(); RegAlias >= 0; |
(...skipping 25 matching lines...) Expand all Loading... |
755 | 750 |
756 // Finish up by setting RegNum = RegNumTmp (or a random permutation thereof) | 751 // Finish up by setting RegNum = RegNumTmp (or a random permutation thereof) |
757 // for each Variable. | 752 // for each Variable. |
758 for (Variable *Item : Handled) { | 753 for (Variable *Item : Handled) { |
759 int32_t RegNum = Item->getRegNumTmp(); | 754 int32_t RegNum = Item->getRegNumTmp(); |
760 int32_t AssignedRegNum = RegNum; | 755 int32_t AssignedRegNum = RegNum; |
761 | 756 |
762 if (Randomized && Item->hasRegTmp() && !Item->hasReg()) { | 757 if (Randomized && Item->hasRegTmp() && !Item->hasReg()) { |
763 AssignedRegNum = Permutation[RegNum]; | 758 AssignedRegNum = Permutation[RegNum]; |
764 } | 759 } |
765 if (Verbose) { | 760 if (BuildDefs::dump() && Verbose) { |
766 Ostream &Str = Ctx->getStrDump(); | 761 Ostream &Str = Ctx->getStrDump(); |
767 if (!Item->hasRegTmp()) { | 762 if (!Item->hasRegTmp()) { |
768 Str << "Not assigning "; | 763 Str << "Not assigning "; |
769 Item->dump(Func); | 764 Item->dump(Func); |
770 Str << "\n"; | 765 Str << "\n"; |
771 } else { | 766 } else { |
772 Str << (AssignedRegNum == Item->getRegNum() ? "Reassigning " | 767 Str << (AssignedRegNum == Item->getRegNum() ? "Reassigning " |
773 : "Assigning ") | 768 : "Assigning ") |
774 << Target->getRegName(AssignedRegNum, IceType_i32) << "(r" | 769 << Target->getRegName(AssignedRegNum, IceType_i32) << "(r" |
775 << AssignedRegNum << ") to "; | 770 << AssignedRegNum << ") to "; |
776 Item->dump(Func); | 771 Item->dump(Func); |
777 Str << "\n"; | 772 Str << "\n"; |
778 } | 773 } |
779 } | 774 } |
780 Item->setRegNum(AssignedRegNum); | 775 Item->setRegNum(AssignedRegNum); |
781 } | 776 } |
782 } | 777 } |
783 | 778 |
784 // Implements the linear-scan algorithm. Based on "Linear Scan Register | 779 // Implements the linear-scan algorithm. Based on "Linear Scan Register |
785 // Allocation in the Context of SSA Form and Register Constraints" by Hanspeter | 780 // Allocation in the Context of SSA Form and Register Constraints" by Hanspeter |
786 // Mössenböck and Michael Pfeiffer, | 781 // Mössenböck and Michael Pfeiffer, |
787 // ftp://ftp.ssw.uni-linz.ac.at/pub/Papers/Moe02.PDF. This implementation is | 782 // ftp://ftp.ssw.uni-linz.ac.at/pub/Papers/Moe02.PDF. This implementation is |
788 // modified to take affinity into account and allow two interfering variables | 783 // modified to take affinity into account and allow two interfering variables to |
789 // to share the same register in certain cases. | 784 // share the same register in certain cases. |
790 // | 785 // |
791 // Requires running Cfg::liveness(Liveness_Intervals) in preparation. Results | 786 // Requires running Cfg::liveness(Liveness_Intervals) in preparation. Results |
792 // are assigned to Variable::RegNum for each Variable. | 787 // are assigned to Variable::RegNum for each Variable. |
793 void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull, | 788 void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull, |
794 bool Randomized) { | 789 bool Randomized) { |
795 TimerMarker T(TimerStack::TT_linearScan, Func); | 790 TimerMarker T(TimerStack::TT_linearScan, Func); |
796 assert(RegMaskFull.any()); // Sanity check | 791 assert(RegMaskFull.any()); // Sanity check |
797 if (Verbose) | 792 if (Verbose) |
798 Ctx->lockStr(); | 793 Ctx->lockStr(); |
799 Func->resetCurrentNode(); | 794 Func->resetCurrentNode(); |
800 const size_t NumRegisters = RegMaskFull.size(); | 795 const size_t NumRegisters = RegMaskFull.size(); |
801 llvm::SmallBitVector PreDefinedRegisters(NumRegisters); | 796 llvm::SmallBitVector PreDefinedRegisters(NumRegisters); |
802 if (Randomized) { | 797 if (Randomized) { |
803 for (Variable *Var : UnhandledPrecolored) { | 798 for (Variable *Var : UnhandledPrecolored) { |
804 PreDefinedRegisters[Var->getRegNum()] = true; | 799 PreDefinedRegisters[Var->getRegNum()] = true; |
805 } | 800 } |
806 } | 801 } |
807 | 802 |
808 // Build a LiveRange representing the Kills list. | 803 // Build a LiveRange representing the Kills list. |
809 LiveRange KillsRange(Kills); | 804 LiveRange KillsRange(Kills); |
810 KillsRange.untrim(); | 805 KillsRange.untrim(); |
811 | 806 |
812 // Reset the register use count | 807 // Reset the register use count. |
813 RegUses.resize(NumRegisters); | 808 RegUses.resize(NumRegisters); |
814 std::fill(RegUses.begin(), RegUses.end(), 0); | 809 std::fill(RegUses.begin(), RegUses.end(), 0); |
815 | 810 |
816 // Unhandled is already set to all ranges in increasing order of start | 811 // Unhandled is already set to all ranges in increasing order of start points. |
817 // points. | |
818 assert(Active.empty()); | 812 assert(Active.empty()); |
819 assert(Inactive.empty()); | 813 assert(Inactive.empty()); |
820 assert(Handled.empty()); | 814 assert(Handled.empty()); |
821 const TargetLowering::RegSetMask RegsInclude = | 815 const TargetLowering::RegSetMask RegsInclude = |
822 TargetLowering::RegSet_CallerSave; | 816 TargetLowering::RegSet_CallerSave; |
823 const TargetLowering::RegSetMask RegsExclude = TargetLowering::RegSet_None; | 817 const TargetLowering::RegSetMask RegsExclude = TargetLowering::RegSet_None; |
824 const llvm::SmallBitVector KillsMask = | 818 const llvm::SmallBitVector KillsMask = |
825 Target->getRegisterSet(RegsInclude, RegsExclude); | 819 Target->getRegisterSet(RegsInclude, RegsExclude); |
826 | 820 |
827 // Allocate memory once outside the loop | 821 // Allocate memory once outside the loop. |
828 IterationState Iter; | 822 IterationState Iter; |
829 Iter.Weights.reserve(NumRegisters); | 823 Iter.Weights.reserve(NumRegisters); |
830 Iter.PrecoloredUnhandledMask.reserve(NumRegisters); | 824 Iter.PrecoloredUnhandledMask.reserve(NumRegisters); |
831 | 825 |
832 while (!Unhandled.empty()) { | 826 while (!Unhandled.empty()) { |
833 Iter.Cur = Unhandled.back(); | 827 Iter.Cur = Unhandled.back(); |
834 Unhandled.pop_back(); | 828 Unhandled.pop_back(); |
835 dumpLiveRangeTrace("\nConsidering ", Iter.Cur); | 829 dumpLiveRangeTrace("\nConsidering ", Iter.Cur); |
836 Iter.RegMask = RegMaskFull & Target->getRegistersForVariable(Iter.Cur); | 830 Iter.RegMask = RegMaskFull & Target->getRegistersForVariable(Iter.Cur); |
837 KillsRange.trim(Iter.Cur->getLiveRange().getStart()); | 831 KillsRange.trim(Iter.Cur->getLiveRange().getStart()); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
887 Iter.Free.reset(KillsMask); | 881 Iter.Free.reset(KillsMask); |
888 for (int i = KillsMask.find_first(); i != -1; | 882 for (int i = KillsMask.find_first(); i != -1; |
889 i = KillsMask.find_next(i)) { | 883 i = KillsMask.find_next(i)) { |
890 Iter.Weights[i].setWeight(RegWeight::Inf); | 884 Iter.Weights[i].setWeight(RegWeight::Inf); |
891 if (Iter.PreferReg == i) | 885 if (Iter.PreferReg == i) |
892 Iter.AllowOverlap = false; | 886 Iter.AllowOverlap = false; |
893 } | 887 } |
894 } | 888 } |
895 | 889 |
896 // Print info about physical register availability. | 890 // Print info about physical register availability. |
897 if (Verbose) { | 891 if (BuildDefs::dump() && Verbose) { |
898 Ostream &Str = Ctx->getStrDump(); | 892 Ostream &Str = Ctx->getStrDump(); |
899 for (SizeT i = 0; i < Iter.RegMask.size(); ++i) { | 893 for (SizeT i = 0; i < Iter.RegMask.size(); ++i) { |
900 if (Iter.RegMask[i]) { | 894 if (Iter.RegMask[i]) { |
901 Str << Target->getRegName(i, IceType_i32) << "(U=" << RegUses[i] | 895 Str << Target->getRegName(i, IceType_i32) << "(U=" << RegUses[i] |
902 << ",F=" << Iter.Free[i] | 896 << ",F=" << Iter.Free[i] |
903 << ",P=" << Iter.PrecoloredUnhandledMask[i] << ") "; | 897 << ",P=" << Iter.PrecoloredUnhandledMask[i] << ") "; |
904 } | 898 } |
905 } | 899 } |
906 Str << "\n"; | 900 Str << "\n"; |
907 } | 901 } |
908 | 902 |
909 if (Iter.Prefer && (Iter.AllowOverlap || Iter.Free[Iter.PreferReg])) { | 903 if (Iter.Prefer && (Iter.AllowOverlap || Iter.Free[Iter.PreferReg])) { |
910 // First choice: a preferred register that is either free or is allowed | 904 // First choice: a preferred register that is either free or is allowed to |
911 // to overlap with its linked variable. | 905 // overlap with its linked variable. |
912 allocatePreferredRegister(Iter); | 906 allocatePreferredRegister(Iter); |
913 } else if (Iter.Free.any()) { | 907 } else if (Iter.Free.any()) { |
914 // Second choice: any free register. | 908 // Second choice: any free register. |
915 allocateFreeRegister(Iter); | 909 allocateFreeRegister(Iter); |
916 } else { | 910 } else { |
917 // Fallback: there are no free registers, so we look for the | 911 // Fallback: there are no free registers, so we look for the lowest-weight |
918 // lowest-weight register and see if Cur has higher weight. | 912 // register and see if Cur has higher weight. |
919 handleNoFreeRegisters(Iter); | 913 handleNoFreeRegisters(Iter); |
920 } | 914 } |
921 dump(Func); | 915 dump(Func); |
922 } | 916 } |
923 | 917 |
924 // Move anything Active or Inactive to Handled for easier handling. | 918 // Move anything Active or Inactive to Handled for easier handling. |
925 Handled.insert(Handled.end(), Active.begin(), Active.end()); | 919 Handled.insert(Handled.end(), Active.begin(), Active.end()); |
926 Active.clear(); | 920 Active.clear(); |
927 Handled.insert(Handled.end(), Inactive.begin(), Inactive.end()); | 921 Handled.insert(Handled.end(), Inactive.begin(), Inactive.end()); |
928 Inactive.clear(); | 922 Inactive.clear(); |
929 dump(Func); | 923 dump(Func); |
930 | 924 |
931 assignFinalRegisters(RegMaskFull, PreDefinedRegisters, Randomized); | 925 assignFinalRegisters(RegMaskFull, PreDefinedRegisters, Randomized); |
932 | 926 |
933 // TODO: Consider running register allocation one more time, with infinite | |
934 // registers, for two reasons. First, evicted live ranges get a second chance | |
935 // for a register. Second, it allows coalescing of stack slots. If there is | |
936 // no time budget for the second register allocation run, each unallocated | |
937 // variable just gets its own slot. | |
938 // | |
939 // Another idea for coalescing stack slots is to initialize the Unhandled | |
940 // list with just the unallocated variables, saving time but not offering | |
941 // second-chance opportunities. | |
942 | |
943 if (Verbose) | 927 if (Verbose) |
944 Ctx->unlockStr(); | 928 Ctx->unlockStr(); |
945 } | 929 } |
946 | 930 |
947 // ======================== Dump routines ======================== // | 931 // ======================== Dump routines ======================== // |
948 | 932 |
949 void LinearScan::dumpLiveRangeTrace(const char *Label, const Variable *Item) { | 933 void LinearScan::dumpLiveRangeTrace(const char *Label, const Variable *Item) { |
950 if (!BuildDefs::dump()) | 934 if (!BuildDefs::dump()) |
951 return; | 935 return; |
952 | 936 |
953 if (Verbose) { | 937 if (Verbose) { |
954 Ostream &Str = Ctx->getStrDump(); | 938 Ostream &Str = Ctx->getStrDump(); |
955 Str << Label; | 939 Str << Label; |
956 dumpLiveRange(Item, Func); | 940 dumpLiveRange(Item, Func); |
957 Str << "\n"; | 941 Str << "\n"; |
958 } | 942 } |
959 } | 943 } |
960 | 944 |
961 void LinearScan::dump(Cfg *Func) const { | 945 void LinearScan::dump(Cfg *Func) const { |
962 if (!BuildDefs::dump()) | 946 if (!BuildDefs::dump()) |
963 return; | 947 return; |
964 if (!Func->isVerbose(IceV_LinearScan)) | 948 if (!Verbose) |
965 return; | 949 return; |
966 Ostream &Str = Func->getContext()->getStrDump(); | 950 Ostream &Str = Func->getContext()->getStrDump(); |
967 Func->resetCurrentNode(); | 951 Func->resetCurrentNode(); |
968 Str << "**** Current regalloc state:\n"; | 952 Str << "**** Current regalloc state:\n"; |
969 Str << "++++++ Handled:\n"; | 953 Str << "++++++ Handled:\n"; |
970 for (const Variable *Item : Handled) { | 954 for (const Variable *Item : Handled) { |
971 dumpLiveRange(Item, Func); | 955 dumpLiveRange(Item, Func); |
972 Str << "\n"; | 956 Str << "\n"; |
973 } | 957 } |
974 Str << "++++++ Unhandled:\n"; | 958 Str << "++++++ Unhandled:\n"; |
975 for (const Variable *Item : reverse_range(Unhandled)) { | 959 for (const Variable *Item : reverse_range(Unhandled)) { |
976 dumpLiveRange(Item, Func); | 960 dumpLiveRange(Item, Func); |
977 Str << "\n"; | 961 Str << "\n"; |
978 } | 962 } |
979 Str << "++++++ Active:\n"; | 963 Str << "++++++ Active:\n"; |
980 for (const Variable *Item : Active) { | 964 for (const Variable *Item : Active) { |
981 dumpLiveRange(Item, Func); | 965 dumpLiveRange(Item, Func); |
982 Str << "\n"; | 966 Str << "\n"; |
983 } | 967 } |
984 Str << "++++++ Inactive:\n"; | 968 Str << "++++++ Inactive:\n"; |
985 for (const Variable *Item : Inactive) { | 969 for (const Variable *Item : Inactive) { |
986 dumpLiveRange(Item, Func); | 970 dumpLiveRange(Item, Func); |
987 Str << "\n"; | 971 Str << "\n"; |
988 } | 972 } |
989 } | 973 } |
990 | 974 |
991 } // end of namespace Ice | 975 } // end of namespace Ice |
OLD | NEW |