Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(302)

Side by Side Diff: src/IceRegAlloc.cpp

Issue 1253833002: Subzero: Cleanly implement register allocation after phi lowering. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Cleanup Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceRegAlloc.cpp - Linear-scan implementation -----------===// 1 //===- subzero/src/IceRegAlloc.cpp - Linear-scan implementation -----------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
(...skipping 19 matching lines...) Expand all
30 // being compiled. 30 // being compiled.
31 constexpr size_t REGS_SIZE = 32; 31 constexpr size_t REGS_SIZE = 32;
32 32
33 // Returns true if Var has any definitions within Item's live range. 33 // Returns true if Var has any definitions within Item's live range.
34 // TODO(stichnot): Consider trimming the Definitions list similar to 34 // TODO(stichnot): Consider trimming the Definitions list similar to
35 // how the live ranges are trimmed, since all the overlapsDefs() tests 35 // how the live ranges are trimmed, since all the overlapsDefs() tests
36 // are whether some variable's definitions overlap Cur, and trimming 36 // are whether some variable's definitions overlap Cur, and trimming
37 // is with respect Cur.start. Initial tests show no measurable 37 // is with respect Cur.start. Initial tests show no measurable
38 // performance difference, so we'll keep the code simple for now. 38 // performance difference, so we'll keep the code simple for now.
39 bool overlapsDefs(const Cfg *Func, const Variable *Item, const Variable *Var) { 39 bool overlapsDefs(const Cfg *Func, const Variable *Item, const Variable *Var) {
40 const bool UseTrimmed = true; 40 constexpr bool UseTrimmed = true;
41 VariablesMetadata *VMetadata = Func->getVMetadata(); 41 VariablesMetadata *VMetadata = Func->getVMetadata();
42 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) 42 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var))
43 if (Item->getLiveRange().overlapsInst(FirstDef->getNumber(), UseTrimmed)) 43 if (Item->getLiveRange().overlapsInst(FirstDef->getNumber(), UseTrimmed))
44 return true; 44 return true;
45 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var); 45 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var);
46 for (size_t i = 0; i < Defs.size(); ++i) { 46 for (size_t i = 0; i < Defs.size(); ++i) {
47 if (Item->getLiveRange().overlapsInst(Defs[i]->getNumber(), UseTrimmed)) 47 if (Item->getLiveRange().overlapsInst(Defs[i]->getNumber(), UseTrimmed))
48 return true; 48 return true;
49 } 49 }
50 return false; 50 return false;
(...skipping 15 matching lines...) Expand all
66 Str << "," << Defs[i]->getNumber(); 66 Str << "," << Defs[i]->getNumber();
67 } 67 }
68 Str << "\n"; 68 Str << "\n";
69 } 69 }
70 } 70 }
71 71
72 void dumpLiveRange(const Variable *Var, const Cfg *Func) { 72 void dumpLiveRange(const Variable *Var, const Cfg *Func) {
73 if (!BuildDefs::dump()) 73 if (!BuildDefs::dump())
74 return; 74 return;
75 Ostream &Str = Func->getContext()->getStrDump(); 75 Ostream &Str = Func->getContext()->getStrDump();
76 const static size_t BufLen = 30; 76 char buf[30];
77 char buf[BufLen]; 77 snprintf(buf, llvm::array_lengthof(buf), "%2d", Var->getRegNumTmp());
78 snprintf(buf, BufLen, "%2d", Var->getRegNumTmp());
79 Str << "R=" << buf << " V="; 78 Str << "R=" << buf << " V=";
jvoung (off chromium) 2015/07/24 19:43:08 Could you just use std::to_string(Var->getRegNumTm
Jim Stichnoth 2015/07/26 04:47:49 That has the "%d" printf equivalent, as opposed to
80 Var->dump(Func); 79 Var->dump(Func);
81 Str << " Range=" << Var->getLiveRange(); 80 Str << " Range=" << Var->getLiveRange();
82 } 81 }
83 82
84 } // end of anonymous namespace 83 } // end of anonymous namespace
85 84
86 // Prepare for full register allocation of all variables. We depend 85 // Prepare for full register allocation of all variables. We depend
87 // on liveness analysis to have calculated live ranges. 86 // on liveness analysis to have calculated live ranges.
88 void LinearScan::initForGlobal() { 87 void LinearScan::initForGlobal() {
89 TimerMarker T(TimerStack::TT_initUnhandled, Func); 88 TimerMarker T(TimerStack::TT_initUnhandled, Func);
90 FindPreference = true; 89 FindPreference = true;
91 FindOverlap = true; 90 FindOverlap = (Kind != RAK_Phi);
jvoung (off chromium) 2015/07/24 19:43:08 Could you leave a comment about what FindOverlap i
Jim Stichnoth 2015/07/26 04:47:49 Done.
92 const VarList &Vars = Func->getVariables(); 91 const VarList &Vars = Func->getVariables();
93 Unhandled.reserve(Vars.size()); 92 Unhandled.reserve(Vars.size());
94 UnhandledPrecolored.reserve(Vars.size()); 93 UnhandledPrecolored.reserve(Vars.size());
95 // Gather the live ranges of all variables and add them to the 94 // Gather the live ranges of all variables and add them to the
96 // Unhandled set. 95 // Unhandled set.
97 for (Variable *Var : Vars) { 96 for (Variable *Var : Vars) {
98 // Explicitly don't consider zero-weight variables, which are 97 // Explicitly don't consider zero-weight variables, which are
99 // meant to be spill slots. 98 // meant to be spill slots.
100 if (Var->getWeight().isZero()) 99 if (Var->getWeight().isZero())
101 continue; 100 continue;
102 // Don't bother if the variable has a null live range, which means 101 // Don't bother if the variable has a null live range, which means
103 // it was never referenced. 102 // it was never referenced.
104 if (Var->getLiveRange().isEmpty()) 103 if (Var->getLiveRange().isEmpty())
105 continue; 104 continue;
105 // Post phi lowering register allocation is only concerned with
106 // infinite-weight and pre-colored variables.
jvoung (off chromium) 2015/07/24 19:43:08 I'm not sure I understand the pre-colored part (th
Jim Stichnoth 2015/07/26 04:47:50 This is terminology that I think is used consisten
jvoung (off chromium) 2015/07/27 22:28:15 I think my confusion is with the term "and" in the
Jim Stichnoth 2015/07/30 07:22:18 Ah, I see. Done.
107 if (Kind == RAK_Phi && !Var->getWeight().isInf() && !Var->hasReg())
108 continue;
106 Var->untrimLiveRange(); 109 Var->untrimLiveRange();
107 Unhandled.push_back(Var); 110 Unhandled.push_back(Var);
108 if (Var->hasReg()) { 111 if (Var->hasReg()) {
109 Var->setRegNumTmp(Var->getRegNum()); 112 Var->setRegNumTmp(Var->getRegNum());
110 Var->setLiveRangeInfiniteWeight(); 113 Var->setLiveRangeInfiniteWeight();
111 UnhandledPrecolored.push_back(Var); 114 UnhandledPrecolored.push_back(Var);
112 } 115 }
113 } 116 }
114 117
115 // Build the (ordered) list of FakeKill instruction numbers. 118 // Build the (ordered) list of FakeKill instruction numbers.
116 Kills.clear(); 119 Kills.clear();
120 // Phi lowering should not be creating new call instructions, so there should
121 // be no infinite-weight not-yet-colored live ranges that span a call
122 // instruction, hence no need to construct the Kills list.
123 if (Kind == RAK_Phi)
124 return;
117 for (CfgNode *Node : Func->getNodes()) { 125 for (CfgNode *Node : Func->getNodes()) {
118 for (Inst &I : Node->getInsts()) { 126 for (Inst &I : Node->getInsts()) {
119 if (auto Kill = llvm::dyn_cast<InstFakeKill>(&I)) { 127 if (auto Kill = llvm::dyn_cast<InstFakeKill>(&I)) {
120 if (!Kill->isDeleted() && !Kill->getLinked()->isDeleted()) 128 if (!Kill->isDeleted() && !Kill->getLinked()->isDeleted())
121 Kills.push_back(I.getNumber()); 129 Kills.push_back(I.getNumber());
122 } 130 }
123 } 131 }
124 } 132 }
125 } 133 }
126 134
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 } 197 }
190 198
191 Unhandled.reserve(NumVars); 199 Unhandled.reserve(NumVars);
192 UnhandledPrecolored.reserve(NumVars); 200 UnhandledPrecolored.reserve(NumVars);
193 for (SizeT i = 0; i < Vars.size(); ++i) { 201 for (SizeT i = 0; i < Vars.size(); ++i) {
194 Variable *Var = Vars[i]; 202 Variable *Var = Vars[i];
195 if (LRBegin[i] != Inst::NumberSentinel) { 203 if (LRBegin[i] != Inst::NumberSentinel) {
196 assert(LREnd[i] != Inst::NumberSentinel); 204 assert(LREnd[i] != Inst::NumberSentinel);
197 Unhandled.push_back(Var); 205 Unhandled.push_back(Var);
198 Var->resetLiveRange(); 206 Var->resetLiveRange();
199 const uint32_t WeightDelta = 1; 207 constexpr uint32_t WeightDelta = 1;
200 Var->addLiveRange(LRBegin[i], LREnd[i], WeightDelta); 208 Var->addLiveRange(LRBegin[i], LREnd[i], WeightDelta);
201 Var->untrimLiveRange(); 209 Var->untrimLiveRange();
202 if (Var->hasReg()) { 210 if (Var->hasReg()) {
203 Var->setRegNumTmp(Var->getRegNum()); 211 Var->setRegNumTmp(Var->getRegNum());
204 Var->setLiveRangeInfiniteWeight(); 212 Var->setLiveRangeInfiniteWeight();
205 UnhandledPrecolored.push_back(Var); 213 UnhandledPrecolored.push_back(Var);
206 } 214 }
207 --NumVars; 215 --NumVars;
208 } 216 }
209 } 217 }
210 // This isn't actually a fatal condition, but it would be nice to 218 // This isn't actually a fatal condition, but it would be nice to
211 // know if we somehow pre-calculated Unhandled's size wrong. 219 // know if we somehow pre-calculated Unhandled's size wrong.
212 assert(NumVars == 0); 220 assert(NumVars == 0);
213 221
214 // Don't build up the list of Kills because we know that no 222 // Don't build up the list of Kills because we know that no
215 // infinite-weight Variable has a live range spanning a call. 223 // infinite-weight Variable has a live range spanning a call.
216 Kills.clear(); 224 Kills.clear();
217 } 225 }
218 226
219 void LinearScan::init(RegAllocKind Kind) { 227 void LinearScan::init(RegAllocKind Kind) {
228 this->Kind = Kind;
220 Unhandled.clear(); 229 Unhandled.clear();
221 UnhandledPrecolored.clear(); 230 UnhandledPrecolored.clear();
222 Handled.clear(); 231 Handled.clear();
223 Inactive.clear(); 232 Inactive.clear();
224 Active.clear(); 233 Active.clear();
225 234
226 switch (Kind) { 235 switch (Kind) {
236 case RAK_Unknown:
237 llvm::report_fatal_error("Invalid RAK_Unknown");
238 break;
227 case RAK_Global: 239 case RAK_Global:
240 case RAK_Phi:
228 initForGlobal(); 241 initForGlobal();
229 break; 242 break;
230 case RAK_InfOnly: 243 case RAK_InfOnly:
231 initForInfOnly(); 244 initForInfOnly();
232 break; 245 break;
233 } 246 }
234 247
235 struct CompareRanges { 248 struct CompareRanges {
236 bool operator()(const Variable *L, const Variable *R) { 249 bool operator()(const Variable *L, const Variable *R) {
237 InstNumberT Lstart = L->getLiveRange().getStart(); 250 InstNumberT Lstart = L->getLiveRange().getStart();
238 InstNumberT Rstart = R->getLiveRange().getStart(); 251 InstNumberT Rstart = R->getLiveRange().getStart();
239 if (Lstart == Rstart) 252 if (Lstart == Rstart)
240 return L->getIndex() < R->getIndex(); 253 return L->getIndex() < R->getIndex();
241 return Lstart < Rstart; 254 return Lstart < Rstart;
242 } 255 }
243 }; 256 };
244 // Do a reverse sort so that erasing elements (from the end) is fast. 257 // Do a reverse sort so that erasing elements (from the end) is fast.
245 std::sort(Unhandled.rbegin(), Unhandled.rend(), CompareRanges()); 258 std::sort(Unhandled.rbegin(), Unhandled.rend(), CompareRanges());
246 std::sort(UnhandledPrecolored.rbegin(), UnhandledPrecolored.rend(), 259 std::sort(UnhandledPrecolored.rbegin(), UnhandledPrecolored.rend(),
247 CompareRanges()); 260 CompareRanges());
248 261
249 Handled.reserve(Unhandled.size()); 262 Handled.reserve(Unhandled.size());
250 Inactive.reserve(Unhandled.size()); 263 Inactive.reserve(Unhandled.size());
251 Active.reserve(Unhandled.size()); 264 Active.reserve(Unhandled.size());
252 } 265 }
253 266
267 // This is called when Cur must be allocated a register but no registers are
268 // available across Cur's live range. To handle this, we find a register that
269 // is not explicitly used during Cur's live range, spill that register to a
270 // stack location right before Cur's live range begins, and fill (reload) the
271 // register from the stack location right after Cur's live range ends.
272 void LinearScan::addSpillFill(Variable *Cur, llvm::SmallBitVector RegMask) {
273 // Identify the actual instructions that begin and end Cur's live range.
274 // Iterate through Cur's node's instruction list until we find the actual
275 // instructions with instruction numbers corresponding to Cur's recorded live
276 // range endpoints. This sounds inefficient but shouldn't be a problem in
277 // practice because:
278 // (1) This function is almost never called in practice.
279 // (2) Since this register over-subscription problem happens only for
280 // phi-lowered instructions, the number of instructions in the node is
281 // proportional to the number of phi instructions in the original node,
282 // which is never very large in practice.
283 // (3) We still have to iterate through all instructions of Cur's live range
284 // to find all explicitly used registers (though the live range is usually
285 // only 2-3 instructions), so the main cost that could be avoided would be
286 // finding the instruction that begin's Cur's live range.
287 assert(!Cur->getLiveRange().isEmpty());
288 InstNumberT Start = Cur->getLiveRange().getStart();
289 InstNumberT End = Cur->getLiveRange().getEnd();
290 CfgNode *Node = Func->getVMetadata()->getLocalUseNode(Cur);
291 assert(Node);
292 InstList &Insts = Node->getInsts();
293 InstList::iterator SpillPoint = Insts.end();
294 InstList::iterator FillPoint = Insts.end();
295 // Stop searching after we have found both the SpillPoint and the FillPoint.
296 for (auto I = Insts.begin(), E = Insts.end();
297 I != E && (SpillPoint == E || FillPoint == E); ++I) {
298 if (I->getNumber() == Start)
299 SpillPoint = I;
300 if (I->getNumber() == End)
301 FillPoint = I;
302 if (SpillPoint != E) {
303 // Remove from RegMask any physical registers referenced during Cur's live
304 // range. Start looking after SpillPoint gets set, i.e. once Cur's live
305 // range begins.
306 for (SizeT i = 0; i < I->getSrcSize(); ++i) {
307 Operand *Src = I->getSrc(i);
308 SizeT NumVars = Src->getNumVars();
309 for (SizeT j = 0; j < NumVars; ++j) {
310 const Variable *Var = Src->getVar(j);
311 if (Var->hasRegTmp())
jvoung (off chromium) 2015/07/24 19:43:08 This doesn't have to be concerned about Var->hasRe
Jim Stichnoth 2015/07/26 04:47:49 That's right. In the register allocator design, a
312 RegMask[Var->getRegNumTmp()] = false;
313 }
314 }
315 }
316 }
317 assert(SpillPoint != Insts.end());
318 assert(FillPoint != Insts.end());
319 ++FillPoint;
320 // TODO(stichnot): Randomize instead of find_first().
321 int32_t RegNum = RegMask.find_first();
322 assert(RegNum != -1);
323 Cur->setRegNumTmp(RegNum);
324 TargetLowering *Target = Func->getTarget();
325 Variable *Preg = Target->getPhysicalRegister(RegNum, Cur->getType());
jvoung (off chromium) 2015/07/24 19:43:08 Should this SpillLoc type depend on Cur->getType()
Jim Stichnoth 2015/07/26 04:47:50 I think Cur->getType() is right. For example, on
326 // TODO(stichnot): Add SpillLoc to VariablesMetadata tracking so that SpillLoc
327 // is correctly identified as !isMultiBlock(), reducing stack frame size.
328 Variable *SpillLoc = Func->template makeVariable(Cur->getType());
jvoung (off chromium) 2015/07/24 19:43:08 does this need the Func->template ... ?
Jim Stichnoth 2015/07/26 04:47:50 Done. (guess I copied this from an older version
329 // Add "reg=FakeDef;spill=reg" before SpillPoint
330 Target->lowerInst(Node, SpillPoint, InstFakeDef::create(Func, Preg));
jvoung (off chromium) 2015/07/24 19:43:08 Probably not too much, but Target->lowerInst() doe
Jim Stichnoth 2015/07/26 04:47:50 Yeah, my feeling was that addSpillFill() is invoke
331 Target->lowerInst(Node, SpillPoint, InstAssign::create(Func, SpillLoc, Preg));
332 // add "reg=spill;FakeUse(reg)" before FillPoint
333 Target->lowerInst(Node, FillPoint, InstAssign::create(Func, Preg, SpillLoc));
334 Target->lowerInst(Node, FillPoint, InstFakeUse::create(Func, Preg));
335 }
336
254 // Implements the linear-scan algorithm. Based on "Linear Scan 337 // Implements the linear-scan algorithm. Based on "Linear Scan
255 // Register Allocation in the Context of SSA Form and Register 338 // Register Allocation in the Context of SSA Form and Register
256 // Constraints" by Hanspeter Mössenböck and Michael Pfeiffer, 339 // Constraints" by Hanspeter Mössenböck and Michael Pfeiffer,
257 // ftp://ftp.ssw.uni-linz.ac.at/pub/Papers/Moe02.PDF . This 340 // ftp://ftp.ssw.uni-linz.ac.at/pub/Papers/Moe02.PDF . This
258 // implementation is modified to take affinity into account and allow 341 // implementation is modified to take affinity into account and allow
259 // two interfering variables to share the same register in certain 342 // two interfering variables to share the same register in certain
260 // cases. 343 // cases.
261 // 344 //
262 // Requires running Cfg::liveness(Liveness_Intervals) in 345 // Requires running Cfg::liveness(Liveness_Intervals) in
263 // preparation. Results are assigned to Variable::RegNum for each 346 // preparation. Results are assigned to Variable::RegNum for each
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 if (Verbose) { 388 if (Verbose) {
306 Ostream &Str = Ctx->getStrDump(); 389 Ostream &Str = Ctx->getStrDump();
307 Str << "\nConsidering "; 390 Str << "\nConsidering ";
308 dumpLiveRange(Cur, Func); 391 dumpLiveRange(Cur, Func);
309 Str << "\n"; 392 Str << "\n";
310 } 393 }
311 const llvm::SmallBitVector RegMask = 394 const llvm::SmallBitVector RegMask =
312 RegMaskFull & Func->getTarget()->getRegisterSetForType(Cur->getType()); 395 RegMaskFull & Func->getTarget()->getRegisterSetForType(Cur->getType());
313 KillsRange.trim(Cur->getLiveRange().getStart()); 396 KillsRange.trim(Cur->getLiveRange().getStart());
314 397
315 // Check for precolored ranges. If Cur is precolored, it 398 // Check for pre-colored ranges. If Cur is pre-colored, it
316 // definitely gets that register. Previously processed live 399 // definitely gets that register. Previously processed live
317 // ranges would have avoided that register due to it being 400 // ranges would have avoided that register due to it being
318 // precolored. Future processed live ranges won't evict that 401 // pre-colored. Future processed live ranges won't evict that
319 // register because the live range has infinite weight. 402 // register because the live range has infinite weight.
320 if (Cur->hasReg()) { 403 if (Cur->hasReg()) {
321 int32_t RegNum = Cur->getRegNum(); 404 int32_t RegNum = Cur->getRegNum();
322 // RegNumTmp should have already been set above. 405 // RegNumTmp should have already been set above.
323 assert(Cur->getRegNumTmp() == RegNum); 406 assert(Cur->getRegNumTmp() == RegNum);
324 if (Verbose) { 407 if (Verbose) {
325 Ostream &Str = Ctx->getStrDump(); 408 Ostream &Str = Ctx->getStrDump();
326 Str << "Precoloring "; 409 Str << "Precoloring ";
327 dumpLiveRange(Cur, Func); 410 dumpLiveRange(Cur, Func);
328 Str << "\n"; 411 Str << "\n";
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
494 overlapsDefs(Func, Cur, Item)) { 577 overlapsDefs(Func, Cur, Item)) {
495 AllowOverlap = false; 578 AllowOverlap = false;
496 dumpDisableOverlap(Func, Item, "Active"); 579 dumpDisableOverlap(Func, Item, "Active");
497 } 580 }
498 } 581 }
499 } 582 }
500 583
501 llvm::SmallVector<RegWeight, REGS_SIZE> Weights(RegMask.size()); 584 llvm::SmallVector<RegWeight, REGS_SIZE> Weights(RegMask.size());
502 585
503 // Remove registers from the Free[] list where an Unhandled 586 // Remove registers from the Free[] list where an Unhandled
504 // precolored range overlaps with the current range, and set those 587 // pre-colored range overlaps with the current range, and set those
505 // registers to infinite weight so that they aren't candidates for 588 // registers to infinite weight so that they aren't candidates for
506 // eviction. Cur->rangeEndsBefore(Item) is an early exit check 589 // eviction. Cur->rangeEndsBefore(Item) is an early exit check
507 // that turns a guaranteed O(N^2) algorithm into expected linear 590 // that turns a guaranteed O(N^2) algorithm into expected linear
508 // complexity. 591 // complexity.
509 llvm::SmallBitVector PrecoloredUnhandledMask(RegMask.size()); 592 llvm::SmallBitVector PrecoloredUnhandledMask(RegMask.size());
510 // Note: PrecoloredUnhandledMask is only used for dumping. 593 // Note: PrecoloredUnhandledMask is only used for dumping.
511 for (Variable *Item : reverse_range(UnhandledPrecolored)) { 594 for (Variable *Item : reverse_range(UnhandledPrecolored)) {
512 assert(Item->hasReg()); 595 assert(Item->hasReg());
513 if (Cur->rangeEndsBefore(Item)) 596 if (Cur->rangeEndsBefore(Item))
514 break; 597 break;
515 if (Item->rangeOverlaps(Cur)) { 598 if (Item->rangeOverlaps(Cur)) {
516 int32_t ItemReg = Item->getRegNum(); // Note: not getRegNumTmp() 599 int32_t ItemReg = Item->getRegNum(); // Note: not getRegNumTmp()
517 Weights[ItemReg].setWeight(RegWeight::Inf); 600 Weights[ItemReg].setWeight(RegWeight::Inf);
518 Free[ItemReg] = false; 601 Free[ItemReg] = false;
519 PrecoloredUnhandledMask[ItemReg] = true; 602 PrecoloredUnhandledMask[ItemReg] = true;
520 // Disable AllowOverlap if the preferred register is one of 603 // Disable AllowOverlap if the preferred register is one of
521 // these precolored unhandled overlapping ranges. 604 // these pre-colored unhandled overlapping ranges.
522 if (AllowOverlap && ItemReg == PreferReg) { 605 if (AllowOverlap && ItemReg == PreferReg) {
523 AllowOverlap = false; 606 AllowOverlap = false;
524 dumpDisableOverlap(Func, Item, "PrecoloredUnhandled"); 607 dumpDisableOverlap(Func, Item, "PrecoloredUnhandled");
525 } 608 }
526 } 609 }
527 } 610 }
528 611
529 // Remove scratch registers from the Free[] list, and mark their 612 // Remove scratch registers from the Free[] list, and mark their
530 // Weights[] as infinite, if KillsRange overlaps Cur's live range. 613 // Weights[] as infinite, if KillsRange overlaps Cur's live range.
531 const bool UseTrimmed = true; 614 constexpr bool UseTrimmed = true;
532 if (Cur->getLiveRange().overlaps(KillsRange, UseTrimmed)) { 615 if (Cur->getLiveRange().overlaps(KillsRange, UseTrimmed)) {
533 Free.reset(KillsMask); 616 Free.reset(KillsMask);
534 for (int i = KillsMask.find_first(); i != -1; 617 for (int i = KillsMask.find_first(); i != -1;
535 i = KillsMask.find_next(i)) { 618 i = KillsMask.find_next(i)) {
536 Weights[i].setWeight(RegWeight::Inf); 619 Weights[i].setWeight(RegWeight::Inf);
537 if (PreferReg == i) 620 if (PreferReg == i)
538 AllowOverlap = false; 621 AllowOverlap = false;
539 } 622 }
540 } 623 }
541 624
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
608 if (RegMask[i] && Weights[i] < Weights[MinWeightIndex]) 691 if (RegMask[i] && Weights[i] < Weights[MinWeightIndex])
609 MinWeightIndex = i; 692 MinWeightIndex = i;
610 } 693 }
611 694
612 if (Cur->getLiveRange().getWeight() <= Weights[MinWeightIndex]) { 695 if (Cur->getLiveRange().getWeight() <= Weights[MinWeightIndex]) {
613 // Cur doesn't have priority over any other live ranges, so 696 // Cur doesn't have priority over any other live ranges, so
614 // don't allocate any register to it, and move it to the 697 // don't allocate any register to it, and move it to the
615 // Handled state. 698 // Handled state.
616 Handled.push_back(Cur); 699 Handled.push_back(Cur);
617 if (Cur->getLiveRange().getWeight().isInf()) { 700 if (Cur->getLiveRange().getWeight().isInf()) {
618 Func->setError("Unable to find a physical register for an " 701 if (Kind == RAK_Phi)
619 "infinite-weight live range"); 702 addSpillFill(Cur, RegMask);
703 else
704 Func->setError("Unable to find a physical register for an "
705 "infinite-weight live range");
620 } 706 }
621 } else { 707 } else {
622 // Evict all live ranges in Active that register number 708 // Evict all live ranges in Active that register number
623 // MinWeightIndex is assigned to. 709 // MinWeightIndex is assigned to.
624 for (SizeT I = Active.size(); I > 0; --I) { 710 for (SizeT I = Active.size(); I > 0; --I) {
625 const SizeT Index = I - 1; 711 const SizeT Index = I - 1;
626 Variable *Item = Active[Index]; 712 Variable *Item = Active[Index];
627 if (Item->getRegNumTmp() == MinWeightIndex) { 713 if (Item->getRegNumTmp() == MinWeightIndex) {
628 if (Verbose) { 714 if (Verbose) {
629 Ostream &Str = Ctx->getStrDump(); 715 Ostream &Str = Ctx->getStrDump();
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
760 Str << "\n"; 846 Str << "\n";
761 } 847 }
762 Str << "++++++ Inactive:\n"; 848 Str << "++++++ Inactive:\n";
763 for (const Variable *Item : Inactive) { 849 for (const Variable *Item : Inactive) {
764 dumpLiveRange(Item, Func); 850 dumpLiveRange(Item, Func);
765 Str << "\n"; 851 Str << "\n";
766 } 852 }
767 } 853 }
768 854
769 } // end of namespace Ice 855 } // end of namespace Ice
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698