Index: src/IceCfgNode.cpp |
diff --git a/src/IceCfgNode.cpp b/src/IceCfgNode.cpp |
index d269ee2e1423cdfe4b642558d427bd1e9e4a8207..3f352b3394f2ba5ca9bde461ca65dcc4cf62f5d6 100644 |
--- a/src/IceCfgNode.cpp |
+++ b/src/IceCfgNode.cpp |
@@ -834,6 +834,8 @@ void emitLiveRangesEnded(Ostream &Str, const Cfg *Func, const Inst *Instr, |
} |
void updateStats(Cfg *Func, const Inst *I) { |
+ if (!ALLOW_DUMP) |
+ return; |
// Update emitted instruction count, plus fill/spill count for |
// Variable operands without a physical register. |
if (uint32_t Count = I->getEmitInstCount()) { |
@@ -901,14 +903,119 @@ void CfgNode::emitIAS(Cfg *Func) const { |
// Emitting a Phi instruction should cause an error. |
I.emitIAS(Func); |
} |
- for (const Inst &I : Insts) { |
- if (I.isDeleted()) |
+ |
+ // Do the simple emission if not sandboxed. |
+ if (!Func->getContext()->getFlags().getUseSandboxing()) { |
+ for (const Inst &I : Insts) { |
+ if (!I.isDeleted() && !I.isRedundantAssign()) { |
+ I.emitIAS(Func); |
+ updateStats(Func, &I); |
+ } |
+ } |
+ return; |
+ } |
+ // The remainder of the function handles emission with sandboxing. |
+ |
+ // BundleMask is used for testing whether emission has crossed a |
+ // bundle boundary. |
+ const intptr_t BundleSize = 1 << Asm->getBundleAlignLog2Bytes(); |
+ const intptr_t BundleMask = ~(BundleSize - 1); |
+ InstList::const_iterator End = Insts.end(); |
+ // BundleLockStart points to the BundleLock instruction for the |
+ // current BundleLock region, or the sentinel value End if we're not |
+ // within a BundleLock region. |
+ InstList::const_iterator BundleLockStart = End; |
+ // Retrying indicates that we had to roll back to BundleLockStart in |
+ // order to apply padding before the BundleLock sequence. |
+ bool Retrying = false; |
+ intptr_t SizeSnaphotPre = 0; |
+ for (InstList::const_iterator I = Insts.begin(); I != End; ++I) { |
+ if (I->isDeleted() || I->isRedundantAssign()) |
continue; |
- if (I.isRedundantAssign()) |
+ if (llvm::isa<InstBundleLock>(I)) { |
+ // Set up the initial BundleLock state. This should not happen |
+ // while retrying, because the retry rolls back to the |
+ // instruction following the BundleLock instruction. |
+ assert(!Retrying); |
+ assert(BundleLockStart == End); |
+ BundleLockStart = I; |
+ SizeSnaphotPre = Asm->getBufferSize(); |
continue; |
- I.emitIAS(Func); |
- updateStats(Func, &I); |
+ } |
+ if (llvm::isa<InstBundleUnlock>(I)) { |
+ assert(BundleLockStart != End); |
+ intptr_t SizeSnaphotPost = Asm->getBufferSize(); |
+ switch (llvm::cast<InstBundleLock>(BundleLockStart)->getOption()) { |
+ case InstBundleLock::Opt_None: |
+ // If this is the first pass, check whether a bundle boundary |
+ // was crossed, and if so, roll back, add padding, and retry. |
+ // If this is the retry pass, just validate that the bundle |
+ // boundary was not crossed. |
+ if (Retrying) { |
+ assert((SizeSnaphotPre & BundleMask) == |
+ (SizeSnaphotPost & BundleMask)); |
+ // The fallthrough will reset the BundleLock status and |
+ // continue. |
+ } else { |
+ if ((SizeSnaphotPre & BundleMask) != (SizeSnaphotPost & BundleMask)) { |
+ Asm->setBufferSize(SizeSnaphotPre); |
+ Asm->padWithNop(BundleSize - (SizeSnaphotPre & (BundleSize - 1))); |
+ assert((Asm->getBufferSize() & ~BundleMask) == 0); |
+ SizeSnaphotPre = Asm->getBufferSize(); |
+ Retrying = true; |
+ I = BundleLockStart; |
+ continue; |
+ } |
+ } |
+ break; |
+ case InstBundleLock::Opt_AlignToEnd: { |
+ // If we are already aligned at a bundle boundary, then just |
+ // continue. Otherwise, make sure we are not already |
+ // retrying, then roll back, pad to the next bundle boundary |
+ // if the instructions won't fit, then pad for alignment, then |
+ // retry. |
+ if (SizeSnaphotPost & (BundleSize - 1)) { |
+ assert(!Retrying); |
+ Asm->setBufferSize(SizeSnaphotPre); |
+ Asm->padWithNop(BundleSize - (SizeSnaphotPre & (BundleSize - 1))); |
+ assert((Asm->getBufferSize() & ~BundleMask) == 0); |
+ Asm->padWithNop(BundleSize - (SizeSnaphotPost - SizeSnaphotPre)); |
+ Retrying = true; |
+ I = BundleLockStart; |
+ continue; |
+ } |
+ } break; |
+ } |
+ // Not retrying, so reset the bundling state. |
+ BundleLockStart = End; |
+ Retrying = false; |
+ continue; |
+ } |
+ // At this point, I points to a non-bundling instruction. |
+ intptr_t BufSizePre = Asm->getBufferSize(); |
+ I->emitIAS(Func); |
+ if (!Retrying) |
+ updateStats(Func, I); |
+ intptr_t BufSizePost = Asm->getBufferSize(); |
+ if (BundleLockStart == End) { |
+ // Not within a BundleLock region, so do a mini-retry if the |
+ // instruction crosses a bundle boundary. Note that if we are |
+ // within a BundleLock region, we explicitly don't want to add |
+ // inter-instruction padding because it would mess up the |
+ // align_to_end calculation. |
+ if ((BufSizePre & BundleMask) != (BufSizePost & BundleMask)) { |
jvoung (off chromium)
2015/02/17 22:57:45
Maybe put this into an inlined helper function.
Jim Stichnoth
2015/02/18 07:06:14
Yeah, this started getting out of control and so I
|
+ Asm->setBufferSize(BufSizePre); |
+ Asm->padWithNop((BufSizePost & BundleMask) - BufSizePre); |
+ assert((Asm->getBufferSize() & ~BundleMask) == 0); |
+ I->emitIAS(Func); |
jvoung (off chromium)
2015/02/17 22:57:45
Besides labels being bound twice, I think the othe
Jim Stichnoth
2015/02/18 07:06:14
IIUC, binding labels twice isn't fundamentally muc
|
+ } |
+ } |
} |
+ |
+ // Don't allow bundle locking across basic blocks, to keep the |
+ // backtracking mechanism simple. |
+ assert(BundleLockStart == End); |
+ assert(!Retrying); |
} |
void CfgNode::dump(Cfg *Func) const { |