OLD | NEW |
---|---|
(Empty) | |
1 //===- subzero/src/IceVariableSplitting.cpp - Local variable splitting ----===// | |
2 // | |
3 // The Subzero Code Generator | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 /// | |
10 /// \file | |
11 /// \brief Aggressive block-local variable splitting to improve linear-scan | |
12 /// register allocation. | |
13 /// | |
14 //===----------------------------------------------------------------------===// | |
15 | |
16 #include "IceVariableSplitting.h" | |
17 | |
18 #include "IceCfg.h" | |
19 #include "IceCfgNode.h" | |
20 #include "IceClFlags.h" | |
21 #include "IceInst.h" | |
22 #include "IceOperand.h" | |
23 #include "IceTargetLowering.h" | |
24 | |
25 namespace Ice { | |
26 | |
27 namespace { | |
28 | |
29 /// A Variable is "allocable" if it is a register allocation candidate but | |
30 /// doesn't already have a register. | |
31 bool isAllocable(const Variable *Var) { | |
32 if (Var == nullptr) | |
33 return false; | |
34 return !Var->hasReg() && Var->mayHaveReg(); | |
35 } | |
36 | |
37 /// A Variable is "inf" if it already has a register or is infinite-weight. | |
38 bool isInf(const Variable *Var) { | |
39 if (Var == nullptr) | |
40 return false; | |
41 return Var->hasReg() || Var->mustHaveReg(); | |
42 } | |
43 | |
44 /// VariableMap is a simple helper class that keeps track of the latest split | |
45 /// version of the original Variables, as well as the instruction containing the | |
46 /// last use of the Variable within the current block. For each entry, the | |
47 /// Variable is tagged with the CfgNode that it is valid in, so that we don't | |
48 /// need to clear the entire Map[] vector for each block. | |
49 class VariableMap { | |
50 private: | |
51 VariableMap() = delete; | |
52 VariableMap(const VariableMap &) = delete; | |
53 VariableMap &operator=(const VariableMap &) = delete; | |
54 | |
55 struct VarInfo { | |
56 /// MappedVar is the latest mapped/split version of the Variable. | |
57 Variable *MappedVar = nullptr; | |
58 /// MappedVarNode is the block in which MappedVar is valid. | |
59 const CfgNode *MappedVarNode = nullptr; | |
60 /// LastUseInst is the last instruction in the block that uses the Variable | |
61 /// as a source operand. | |
62 const Inst *LastUseInst = nullptr; | |
63 /// LastUseNode is the block in which LastUseInst is valid. | |
64 const CfgNode *LastUseNode = nullptr; | |
65 VarInfo() = default; | |
66 | |
67 private: | |
68 VarInfo(const VarInfo &) = delete; | |
69 VarInfo &operator=(const VarInfo &) = delete; | |
70 }; | |
71 | |
72 public: | |
73 explicit VariableMap(Cfg *Func) | |
74 : Func(Func), NumVars(Func->getNumVariables()), Map(NumVars) {} | |
75 /// Reset the mappings at the start of a block. | |
76 void reset(const CfgNode *CurNode) { | |
77 Node = CurNode; | |
78 // Do a prepass through all the instructions, marking which instruction is | |
79 // the last use of each Variable within the block. | |
80 for (const Inst &Instr : Node->getInsts()) { | |
81 if (Instr.isDeleted()) | |
82 continue; | |
83 for (SizeT i = 0; i < Instr.getSrcSize(); ++i) { | |
84 if (auto *SrcVar = llvm::dyn_cast<Variable>(Instr.getSrc(i))) { | |
85 const SizeT VarNum = getVarNum(SrcVar); | |
86 Map[VarNum].LastUseInst = &Instr; | |
87 Map[VarNum].LastUseNode = Node; | |
88 } | |
89 } | |
90 } | |
91 } | |
92 /// Get Var's current mapping (or Var itself if it has no mapping yet). | |
93 Variable *get(Variable *Var) const { | |
94 const SizeT VarNum = getVarNum(Var); | |
95 Variable *MappedVar = Map[VarNum].MappedVar; | |
96 if (MappedVar == nullptr) | |
97 return Var; | |
98 if (Map[VarNum].MappedVarNode != Node) | |
99 return Var; | |
100 return MappedVar; | |
101 } | |
102 /// Create a new linked Variable in the LinkedTo chain, and set it as Var's | |
103 /// latest mapping. | |
104 Variable *makeLinked(Variable *Var) { | |
105 Variable *NewVar = Func->makeVariable(Var->getType()); | |
106 NewVar->setRegClass(Var->getRegClass()); | |
107 NewVar->setLinkedTo(get(Var)); | |
108 const SizeT VarNum = getVarNum(Var); | |
109 Map[VarNum].MappedVar = NewVar; | |
110 Map[VarNum].MappedVarNode = Node; | |
111 return NewVar; | |
112 } | |
113 /// Given Var that is LinkedTo some other variable, re-splice it into the | |
114 /// LinkedTo chain so that the chain is ordered by Variable::getIndex(). | |
115 void spliceBlockLocalLinkedToChain(Variable *Var) { | |
116 Variable *LinkedTo = Var->getLinkedTo(); | |
117 assert(LinkedTo != nullptr); | |
118 assert(Var->getIndex() > LinkedTo->getIndex()); | |
119 const SizeT VarNum = getVarNum(LinkedTo); | |
120 Variable *Link = Map[VarNum].MappedVar; | |
121 if (Link == nullptr || Map[VarNum].MappedVarNode != Node) | |
122 return; | |
123 Variable *LinkParent = Link->getLinkedTo(); | |
124 while (LinkParent != nullptr && LinkParent->getIndex() >= Var->getIndex()) { | |
125 Link = LinkParent; | |
126 LinkParent = Link->getLinkedTo(); | |
127 } | |
128 Var->setLinkedTo(LinkParent); | |
129 Link->setLinkedTo(Var); | |
130 } | |
131 /// Return whether the given Variable has any uses as a source operand within | |
132 /// the current block. If it has no source operand uses, but is assigned as a | |
133 /// dest variable in some instruction in the block, then we needn't bother | |
134 /// splitting it. | |
135 bool isDestUsedInBlock(const Variable *Dest) const { | |
136 return Map[getVarNum(Dest)].LastUseNode == Node; | |
137 } | |
138 /// Return whether the given instruction is the last use of the given Variable | |
139 /// within the current block. If it is, then we needn't bother splitting the | |
140 /// Variable at this instruction. | |
141 bool isInstLastUseOfVar(const Variable *Var, const Inst *Instr) { | |
142 return Map[getVarNum(Var)].LastUseInst == Instr; | |
143 } | |
144 | |
145 private: | |
146 Cfg *const Func; | |
147 // NumVars is for the size of the Map array. It can be const because any new | |
148 // Variables created during the splitting pass don't need to be mapped. | |
149 const SizeT NumVars; | |
150 CfgVector<VarInfo> Map; | |
151 const CfgNode *Node = nullptr; | |
152 /// Get Var's VarNum, and do some validation. | |
153 SizeT getVarNum(const Variable *Var) const { | |
154 const SizeT VarNum = Var->getIndex(); | |
155 assert(VarNum < NumVars); | |
156 return VarNum; | |
157 } | |
158 }; | |
159 | |
160 /// LocalSplittingState tracks the necessary splitting state across | |
161 /// instructions. | |
162 class LocalSplittingState { | |
163 LocalSplittingState() = delete; | |
164 LocalSplittingState(const LocalSplittingState &) = delete; | |
165 LocalSplittingState &operator=(const LocalSplittingState &) = delete; | |
166 | |
167 public: | |
168 explicit LocalSplittingState(Cfg *Func) | |
169 : Target(Func->getTarget()), VarMap(Func) {} | |
170 /// setNode() is called before processing the instructions of a block. | |
171 void setNode(CfgNode *CurNode) { | |
172 Node = CurNode; | |
173 VarMap.reset(Node); | |
174 LinkedToFixups.clear(); | |
175 } | |
176 /// finalizeNode() is called after all instructions in the block are | |
177 /// processed. | |
178 void finalizeNode() { | |
179 // Splice in any preexisting LinkedTo links into the single chain. These | |
180 // are the ones that were recorded during setInst(). | |
181 for (Variable *Var : LinkedToFixups) { | |
182 VarMap.spliceBlockLocalLinkedToChain(Var); | |
183 } | |
184 } | |
185 /// setInst() is called before processing the next instruction. The iterators | |
186 /// are the insertion points for a new instructions, depending on whether the | |
187 /// new instruction should be inserted before or after the current | |
188 /// instruction. | |
189 void setInst(Inst *CurInst, InstList::iterator Cur, InstList::iterator Next) { | |
190 Instr = CurInst; | |
191 Dest = Instr->getDest(); | |
192 IterCur = Cur; | |
193 IterNext = Next; | |
194 ShouldSkipAllInstructions = false; | |
195 // Note any preexisting LinkedTo relationships that were created during | |
196 // target lowering. Record them in LinkedToFixups which is then processed | |
197 // in finalizeNode(). | |
198 if (Dest != nullptr && Dest->getLinkedTo() != nullptr) { | |
199 LinkedToFixups.emplace_back(Dest); | |
200 } | |
201 } | |
202 bool shouldSkipAllInstructions() const { return ShouldSkipAllInstructions; } | |
203 bool isUnconditionallyExecuted() const { return WaitingForLabel == nullptr; } | |
204 | |
205 /// Note: the handle*() functions return true to indicate that the instruction | |
206 /// has now been handled and that the instruction loop should continue to the | |
207 /// next instruction in the block (and return false otherwise). In addition, | |
208 /// they set the ShouldSkipAllInstructions flag to indicate that no more | |
209 /// instructions in the block should be processed. | |
210 | |
211 /// Handle an "unwanted" instruction by returning true; | |
212 bool handleUnwantedInstruction() { | |
213 // We can limit the splitting to an arbitrary subset of the instructions, | |
214 // and still expect correct code. As such, we can do instruction-subset | |
215 // bisection to help debug any problems in this pass. | |
216 static constexpr char AnInstructionHasNoName[] = ""; | |
217 if (!BuildDefs::minimal() && | |
218 !getFlags().matchSplitInsts(AnInstructionHasNoName, | |
219 Instr->getNumber())) { | |
220 return true; | |
221 } | |
222 if (!llvm::isa<InstTarget>(Instr)) { | |
223 // Ignore non-lowered instructions like FakeDef/FakeUse. | |
224 return true; | |
225 } | |
226 return false; | |
227 } | |
228 | |
229 /// Process a potential label instruction. | |
230 bool handleLabel() { | |
231 if (!Instr->isLabel()) | |
232 return false; | |
233 // A Label instruction shouldn't have any operands, so it can be handled | |
234 // right here and then move on. | |
235 assert(Dest == nullptr); | |
236 assert(Instr->getSrcSize() == 0); | |
237 if (Instr == WaitingForLabel) { | |
238 // If we found the forward-branch-target Label instruction we're waiting | |
239 // for, then clear the WaitingForLabel state. | |
240 WaitingForLabel = nullptr; | |
241 } else if (WaitingForLabel == nullptr && WaitingForBranchTo == nullptr) { | |
242 // If we found a new Label instruction while the WaitingFor* state is | |
243 // clear, then set things up for this being a backward branch target. | |
244 WaitingForBranchTo = Instr; | |
245 } else { | |
246 // We see something we don't understand, so skip to the next block. | |
247 ShouldSkipAllInstructions = true; | |
248 } | |
249 return true; | |
250 } | |
251 | |
252 /// Process a potential intra-block branch instruction. | |
253 bool handleIntraBlockBranch() { | |
254 const Inst *Label = Instr->getIntraBlockBranchTarget(); | |
255 if (Label == nullptr) | |
256 return false; | |
257 // An intra-block branch instruction shouldn't have any operands, so it can | |
258 // be handled right here and then move on. | |
259 assert(Dest == nullptr); | |
260 assert(Instr->getSrcSize() == 0); | |
261 if (WaitingForBranchTo == Label && WaitingForLabel == nullptr) { | |
262 WaitingForBranchTo = nullptr; | |
263 } else if (WaitingForBranchTo == nullptr && | |
264 (WaitingForLabel == nullptr || WaitingForLabel == Label)) { | |
265 WaitingForLabel = Label; | |
266 } else { | |
267 // We see something we don't understand, so skip to the next block. | |
268 ShouldSkipAllInstructions = true; | |
269 } | |
270 return true; | |
271 } | |
272 | |
273 /// Specially process a potential "Variable=Variable" assignment instruction, | |
274 /// when it conforms to certain patterns. | |
275 bool handleVarAssign() { | |
276 if (!Instr->isVarAssign()) | |
277 return false; | |
278 const bool DestIsInf = isInf(Dest); | |
279 const bool DestIsAllocable = isAllocable(Dest); | |
280 auto *SrcVar = llvm::cast<Variable>(Instr->getSrc(0)); | |
281 const bool SrcIsInf = isInf(SrcVar); | |
282 const bool SrcIsAllocable = isAllocable(SrcVar); | |
283 if (DestIsInf && SrcIsInf) { | |
284 // The instruction: | |
285 // t:inf = u:inf | |
286 // No transformation is needed. | |
287 return true; | |
288 } | |
289 if (DestIsInf && SrcIsAllocable && Dest->getType() == SrcVar->getType()) { | |
290 // The instruction: | |
291 // t:inf = v | |
292 // gets transformed to: | |
293 // t:inf = v1 | |
294 // v2 = t:inf | |
295 // where: | |
296 // v1 := map[v] | |
297 // v2 := linkTo(v) | |
298 // map[v] := v2 | |
299 // | |
300 // If both v2 and its linkedToStackRoot get a stack slot, then "v2=t:inf" | |
301 // is recognized as a redundant assignment and elided. | |
302 // | |
303 // Note that if the dest and src types are different, then this is | |
304 // actually a truncation operation, which would make "v2=t:inf" an invalid | |
305 // instruction. In this case, the type test will make it fall through to | |
306 // the general case below. | |
307 Variable *OldMapped = VarMap.get(SrcVar); | |
308 Instr->replaceSource(0, OldMapped); | |
309 if (isUnconditionallyExecuted()) { | |
310 // Only create new mapping state if the instruction is unconditionally | |
311 // executed. | |
312 if (!VarMap.isInstLastUseOfVar(SrcVar, Instr)) { | |
313 Variable *NewMapped = VarMap.makeLinked(SrcVar); | |
314 Inst *Mov = Target->createLoweredMove(NewMapped, Dest); | |
315 Node->getInsts().insert(IterNext, Mov); | |
316 } | |
317 } | |
318 return true; | |
319 } | |
320 if (DestIsAllocable && SrcIsInf) { | |
321 if (!VarMap.isDestUsedInBlock(Dest)) { | |
322 return true; | |
323 } | |
324 // The instruction: | |
325 // v = t:inf | |
326 // gets transformed to: | |
327 // v = t:inf | |
328 // v2 = t:inf | |
329 // where: | |
330 // v2 := linkTo(v) | |
331 // map[v] := v2 | |
332 // | |
333 // If both v2 and v get a stack slot, then "v2=t:inf" is recognized as a | |
334 // redundant assignment and elided. | |
335 if (isUnconditionallyExecuted()) { | |
336 // Only create new mapping state if the instruction is unconditionally | |
337 // executed. | |
338 Variable *NewMapped = VarMap.makeLinked(Dest); | |
339 Inst *Mov = Target->createLoweredMove(NewMapped, SrcVar); | |
340 Node->getInsts().insert(IterNext, Mov); | |
341 } else { | |
342 // For a conditionally executed instruction, add a redefinition of the | |
343 // original Dest mapping, without creating a new linked variable. | |
344 Variable *OldMapped = VarMap.get(Dest); | |
345 Inst *Mov = Target->createLoweredMove(OldMapped, SrcVar); | |
346 Mov->setDestRedefined(); | |
347 Node->getInsts().insert(IterNext, Mov); | |
348 } | |
349 return true; | |
350 } | |
351 assert(!ShouldSkipAllInstructions); | |
352 return false; | |
353 } | |
354 | |
355 /// Process the dest Variable of a Phi instruction. | |
356 bool handlePhi() { | |
357 assert(llvm::isa<InstPhi>(Instr)); | |
358 const bool DestIsAllocable = isAllocable(Dest); | |
359 if (!DestIsAllocable) | |
360 return true; | |
361 if (!VarMap.isDestUsedInBlock(Dest)) | |
362 return true; | |
363 Variable *NewMapped = VarMap.makeLinked(Dest); | |
364 Inst *Mov = Target->createLoweredMove(NewMapped, Dest); | |
365 Node->getInsts().insert(IterCur, Mov); | |
366 return true; | |
367 } | |
368 | |
369 /// Process an arbitrary instruction. | |
370 bool handleGeneralInst() { | |
371 const bool DestIsAllocable = isAllocable(Dest); | |
372 // The (non-variable-assignment) instruction: | |
373 // ... = F(v) | |
374 // where v is not infinite-weight, gets transformed to: | |
375 // v2 = v1 | |
376 // ... = F(v1) | |
377 // where: | |
378 // v1 := map[v] | |
379 // v2 := linkTo(v) | |
380 // map[v] := v2 | |
381 // After that, if the "..." dest=u is not infinite-weight, append: | |
382 // u2 = u | |
383 // where: | |
384 // u2 := linkTo(u) | |
385 // map[u] := u2 | |
386 for (SizeT i = 0; i < Instr->getSrcSize(); ++i) { | |
387 // Iterate over the top-level src vars. Don't bother to dig into | |
388 // e.g. MemOperands because their vars should all be infinite-weight. | |
389 // (This assumption would need to change if the pass were done | |
390 // pre-lowering.) | |
391 if (auto *SrcVar = llvm::dyn_cast<Variable>(Instr->getSrc(i))) { | |
392 const bool SrcIsAllocable = isAllocable(SrcVar); | |
393 if (SrcIsAllocable) { | |
394 Variable *OldMapped = VarMap.get(SrcVar); | |
395 if (isUnconditionallyExecuted()) { | |
396 if (!VarMap.isInstLastUseOfVar(SrcVar, Instr)) { | |
397 Variable *NewMapped = VarMap.makeLinked(SrcVar); | |
398 Inst *Mov = Target->createLoweredMove(NewMapped, OldMapped); | |
399 Node->getInsts().insert(IterCur, Mov); | |
400 } | |
401 } | |
402 Instr->replaceSource(i, OldMapped); | |
403 } | |
404 } | |
405 } | |
406 // Transformation of Dest is the same as the "v=t:inf" case above. | |
407 if (DestIsAllocable && VarMap.isDestUsedInBlock(Dest)) { | |
408 if (isUnconditionallyExecuted()) { | |
409 Variable *NewMapped = VarMap.makeLinked(Dest); | |
410 Inst *Mov = Target->createLoweredMove(NewMapped, Dest); | |
411 Node->getInsts().insert(IterNext, Mov); | |
412 } else { | |
413 Variable *OldMapped = VarMap.get(Dest); | |
414 Inst *Mov = Target->createLoweredMove(OldMapped, Dest); | |
415 Mov->setDestRedefined(); | |
416 Node->getInsts().insert(IterNext, Mov); | |
417 } | |
418 } | |
419 return true; | |
420 } | |
421 | |
422 private: | |
423 TargetLowering *Target; | |
424 CfgNode *Node = nullptr; | |
425 Inst *Instr = nullptr; | |
426 Variable *Dest = nullptr; | |
427 InstList::iterator IterCur; | |
428 InstList::iterator IterNext; | |
429 bool ShouldSkipAllInstructions = false; | |
430 VariableMap VarMap; | |
431 CfgVector<Variable *> LinkedToFixups; | |
432 /// WaitingForLabel and WaitingForBranchTo are for tracking intra-block | |
433 /// control flow. | |
434 const Inst *WaitingForLabel = nullptr; | |
435 const Inst *WaitingForBranchTo = nullptr; | |
436 }; | |
437 | |
438 } // end of anonymous namespace | |
439 | |
440 /// Within each basic block, rewrite Variable references in terms of chained | |
441 /// copies of the original Variable. For example: | |
442 /// A = B + C | |
443 /// might be rewritten as: | |
444 /// B1 = B | |
445 /// C1 = C | |
446 /// A = B + C | |
447 /// A1 = A | |
448 /// and then: | |
449 /// D = A + B | |
450 /// might be rewritten as: | |
451 /// A2 = A1 | |
452 /// B2 = B1 | |
453 /// D = A1 + B1 | |
454 /// D1 = D | |
455 /// | |
456 /// The purpose is to present the linear-scan register allocator with smaller | |
457 /// live ranges, to help mitigate its "all or nothing" allocation strategy, | |
458 /// while counting on its preference mechanism to keep the split versions in the | |
459 /// same register when possible. | |
460 /// | |
461 /// When creating new Variables, A2 is linked to A1 which is linked to A, and | |
462 /// similar for the other Variable linked-to chains. Rewrites apply only to | |
463 /// Variables where mayHaveReg() is true. | |
464 /// | |
465 /// At code emission time, redundant linked-to stack assignments will be | |
466 /// recognized and elided. To illustrate using the above example, if A1 gets a | |
467 /// register but A and A2 are on the stack, the "A2=A1" store instruction is | |
468 /// redundant since A and A2 share the same stack slot and A1 originated from A. | |
469 /// | |
470 /// Simple assignment instructions are rewritten slightly differently, to take | |
471 /// maximal advantage of Variables known to have registers. | |
472 /// | |
473 /// In general, there may be several valid ways to rewrite an instruction: add | |
474 /// the new assignment instruction either before or after the original | |
475 /// instruction, and rewrite the original instruction with either the old or the | |
476 /// new variable mapping. We try to pick a strategy most likely to avoid | |
477 /// potential performance problems. For example, try to avoid storing to the | |
478 /// stack and then immediately reloading from the same location. One | |
479 /// consequence is that code might be generated that loads a register from a | |
480 /// stack location, followed almost immediately by another use of the same stack | |
481 /// location, despite its value already being available in a register as a | |
482 /// result of the first instruction. However, the performance impact here is | |
483 /// likely to be negligible, and a simple availability peephole optimization | |
484 /// could clean it up. | |
485 /// | |
486 /// This pass potentially adds a lot of new instructions and variables, and as | |
487 /// such there are compile-time performance concerns, particularly with liveness | |
488 /// analysis and register allocation. Note that for liveness analysis, the new | |
489 /// variables have single-block liveness, so they don't increase the size of the | |
490 /// liveness bit vectors that need to be merged across blocks. As a result, the | |
491 /// performance impact is likely to be linearly related to the number of new | |
492 /// instructions, rather than number of new variables times number of blocks | |
493 /// which would be the case if they were multi-block variables. | |
494 void splitBlockLocalVariables(Cfg *Func) { | |
495 if (!getFlags().getSplitLocalVars()) | |
496 return; | |
497 TimerMarker _(TimerStack::TT_splitLocalVars, Func); | |
498 LocalSplittingState State(Func); | |
499 // TODO(stichnot): Fix this mechanism for LinkedTo variables and stack slot | |
500 // assignment. | |
501 // | |
502 // To work around shortcomings with stack frame mapping, we want to arrange | |
503 // LinkedTo structure such that within one block, the LinkedTo structure | |
504 // leading to a root forms a list, not a tree. A LinkedTo root can have | |
505 // multiple children linking to it, but only one per block. Furthermore, | |
506 // because stack slot mapping processes variables in numerical order, the | |
507 // LinkedTo chain needs to be ordered such that when A->getLinkedTo()==B, then | |
508 // A->getIndex()>B->getIndex(). | |
509 // | |
510 // To effect this, while processing a block we keep track of preexisting | |
511 // LinkedTo relationships via the LinkedToFixups vector, and at the end of the | |
512 // block we splice them in such that the block has a single chain for each | |
513 // root, ordered by getIndex() value. | |
514 CfgVector<Variable *> LinkedToFixups; | |
515 for (CfgNode *Node : Func->getNodes()) { | |
516 // Clear the VarMap and LinkedToFixups at the start of every block. | |
517 LinkedToFixups.clear(); | |
518 State.setNode(Node); | |
519 auto &Insts = Node->getInsts(); | |
520 auto Iter = Insts.begin(); | |
521 auto IterEnd = Insts.end(); | |
522 // TODO(stichnot): Figure out why Phi processing usually degrades | |
523 // performance. Disable for now.q | |
John
2016/08/01 14:17:32
now.q?
Jim Stichnoth
2016/08/01 15:13:51
heh. Done.
maybe should have been now.:wq or
| |
524 static constexpr bool ProcessPhis = false; | |
525 if (ProcessPhis) { | |
526 for (Inst &Instr : Node->getPhis()) { | |
527 if (Instr.isDeleted()) | |
528 continue; | |
529 State.setInst(&Instr, Iter, Iter); | |
530 State.handlePhi(); | |
531 } | |
532 } | |
533 InstList::iterator NextIter; | |
534 for (; Iter != IterEnd && !State.shouldSkipAllInstructions(); | |
535 Iter = NextIter) { | |
536 NextIter = Iter; | |
537 ++NextIter; | |
538 Inst *Instr = iteratorToInst(Iter); | |
539 if (Instr->isDeleted()) | |
540 continue; | |
541 State.setInst(Instr, Iter, NextIter); | |
542 | |
543 // Before doing any transformations, take care of the bookkeeping for | |
544 // intra-block branching. | |
545 // | |
546 // This is tricky because the transformation for one instruction may | |
547 // depend on a transformation for a previous instruction, but if that | |
548 // previous instruction is not dynamically executed due to intra-block | |
549 // control flow, it may lead to an inconsistent state and incorrect code. | |
550 // | |
551 // We want to handle some simple cases, and reject some others: | |
552 // | |
553 // 1. For something like a select instruction, we could have: | |
554 // test cond | |
555 // dest = src_false | |
556 // branch conditionally to label | |
557 // dest = src_true | |
558 // label: | |
559 // | |
560 // Between the conditional branch and the label, we need to treat dest and | |
561 // src variables specially, specifically not creating any new state. | |
562 // | |
563 // 2. Some 64-bit atomic instructions may be lowered to a loop: | |
564 // label: | |
565 // ... | |
566 // branch conditionally to label | |
567 // | |
568 // No special treatment is needed, but it's worth tracking so that case #1 | |
569 // above can also be handled. | |
570 // | |
571 // 3. Advanced switch lowering can create really complex intra-block | |
572 // control flow, so when we recognize this, we should just stop splitting | |
573 // for the remainder of the block (which isn't much since a switch | |
574 // instruction is a terminator). | |
575 // | |
576 // 4. Other complex lowering, e.g. an i64 icmp on a 32-bit architecture, | |
577 // can result in an if/then/else like structure with two labels. One | |
578 // possibility would be to suspect splitting for the remainder of the | |
579 // lowered instruction, and then resume for the remainder of the block, | |
580 // but since we don't have high-level instruction markers, we might as | |
581 // well just stop splitting for the remainder of the block. | |
582 if (State.handleLabel()) | |
583 continue; | |
584 if (State.handleIntraBlockBranch()) | |
585 continue; | |
586 if (State.handleUnwantedInstruction()) | |
587 continue; | |
588 | |
589 // Intra-block bookkeeping is complete, now do the transformations. | |
590 | |
591 // Determine the transformation based on the kind of instruction, and | |
592 // whether its Variables are infinite-weight. New instructions can be | |
593 // inserted before the current instruction via Iter, or after the current | |
594 // instruction via NextIter. | |
595 if (State.handleVarAssign()) | |
596 continue; | |
597 if (State.handleGeneralInst()) | |
John
2016/08/01 14:17:32
no need for this if here, right?
Jim Stichnoth
2016/08/01 15:13:51
True, but I kinda like the consistency, and it mak
| |
598 continue; | |
599 } | |
600 State.finalizeNode(); | |
601 } | |
602 | |
603 Func->dump("After splitting local variables"); | |
604 } | |
605 | |
606 } // end of namespace Ice | |
OLD | NEW |