Index: src/gpu/GrTargetCommands.cpp |
diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp |
index a2b2d6e7beb28d81bb2311b61d7691f43fc120e6..1e119fe59c2ae8dd9a51fdb71462dcca67f76d4d 100644 |
--- a/src/gpu/GrTargetCommands.cpp |
+++ b/src/gpu/GrTargetCommands.cpp |
@@ -191,30 +191,38 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( |
transformValues, transformType, |
count, &savedIndices, &savedTransforms); |
- if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { |
- // The previous command was also DrawPaths. Try to collapse this call into the one |
- // before. Note that stenciling all the paths at once, then covering, may not be |
- // equivalent to two separate draw calls if there is overlap. Blending won't work, |
- // and the combined calls may also cancel each other's winding numbers in some |
- // places. For now the winding numbers are only an issue if the fill is even/odd, |
- // because DrawPaths is currently only used for glyphs, and glyphs in the same |
- // font tend to all wind in the same direction. |
- DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
+ CmdBuffer::ReverseIter reverseIter(fCmdBuffer); |
+ if (Cmd::kXferBarrier_CmdType == reverseIter->type()) { |
+ // We can combine instanced path commands through Xfer barriers because they are implemented |
+ // by a single (non-overlapping) cover operation and they use fullscreen barriers. |
+ reverseIter.previous(); |
+ } |
+ |
+ if (Cmd::kDrawPaths_CmdType == reverseIter->type()) { |
+ // The previous command was also DrawPaths. Try to combine this call with the one before. |
+ // Note that when there is overlap, this is not equivalent to two separate draw calls. We |
+ // don't worry about that case because instanced path commands are only used for text. |
+ DrawPaths* previous = static_cast<DrawPaths*>(reverseIter.get()); |
if (pathRange == previous->pathRange() && |
indexType == previous->fIndexType && |
transformType == previous->fTransformType && |
stencilSettings == previous->fStencilSettings && |
- path_fill_type_is_winding(stencilSettings) && |
- !pipelineInfo.willBlendWithDst(pathProc)) { |
- const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
- const int xformSize = GrPathRendering::PathTransformSize(transformType); |
- if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && |
- (0 == xformSize || |
- &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { |
- // Fold this DrawPaths call into the one previous. |
- previous->fCount += count; |
- return NULL; |
+ path_fill_type_is_winding(stencilSettings)) { |
+ const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
+ const int xformSize = GrPathRendering::PathTransformSize(transformType); |
+ if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && |
+ (0 == xformSize || |
+ &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { |
+ // Fold this DrawPaths call into the one previous. |
+ previous->fCount += count; |
+ if (Cmd::kXferBarrier_CmdType == fCmdBuffer.back().type()) { |
+ // Drop the barrier we fell through. |
+ fCmdBuffer.pop_back(); |
+ SkASSERT(!fXferBarrier); // The Xfer barrier should have been fullscreen. |
} |
+ SkASSERT(previous == &fCmdBuffer.back()); |
+ return NULL; |
+ } |
} |
} |
@@ -279,6 +287,7 @@ void GrTargetCommands::reset() { |
fCmdBuffer.reset(); |
fPrevState = NULL; |
fDrawBatch = NULL; |
+ fXferBarrier = NULL; |
} |
void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
@@ -404,12 +413,15 @@ void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { |
void GrTargetCommands::XferBarrier::execute(GrGpu* gpu, const SetState* state) { |
SkASSERT(state); |
+ SkASSERT(fRenderTarget == state->getRenderTarget()); |
if (kDstCopy_GrXferBarrierType == fType) { |
GrDeviceCoordTexture dstCopy; |
- gpu->setXferBarrier(fType, state->getRenderTarget(), fBounds, &dstCopy); |
- state->getXferProcessor()->updateDstCopy(dstCopy); |
+ gpu->setXferBarrier(fType, fRenderTarget, fReadBounds, &dstCopy); |
+ for (int i = 0; i < fXferProcessors.count(); i++) { |
+ fXferProcessors[i]->updateDstCopy(dstCopy); |
+ } |
} else { |
- SkAssertResult(gpu->setXferBarrier(fType, state->getRenderTarget(), fBounds)); |
+ SkAssertResult(gpu->setXferBarrier(fType, fRenderTarget, fReadBounds)); |
} |
} |
@@ -487,25 +499,50 @@ bool GrTargetCommands::recordXferBarrierIfNecessary(GrInOrderDrawBuffer* iodb, |
const GrDrawTarget::PipelineInfo& info) { |
SkASSERT(fPrevState); |
const GrXferProcessor& xp = *fPrevState->getXferProcessor(); |
+ GrRenderTarget* rt = fPrevState->getRenderTarget(); |
GrXferBarrierType xbType = xp.setupDstReadIfNecessary(*iodb->caps(), info.getCustomDstCopy()); |
if (kNone_GrXferBarrierType == xbType) { |
- return true; |
- } |
+ // The XP doesn't need a barrier, but the draw may still affect one. |
+ if (!fXferBarrier || rt != fXferBarrier->fRenderTarget) { |
+ return true; |
+ } |
+ SkIRect drawBounds; |
+ info.getQuickDrawBounds(&drawBounds); |
+ fXferBarrier->fInvalidBounds.join(drawBounds); |
+ } else { |
+ SkIRect xferBounds; |
+ if (!info.getConservativeDrawBounds(&xferBounds)) { |
+ // Early reject was detected. |
+ return false; |
+ } |
- SkIRect xferBounds; |
- if (!info.getConservativeDrawBounds(&xferBounds)) { |
- // Early reject was detected. |
- return false; |
- } |
+ if (fXferBarrier && xbType == fXferBarrier->fType && rt == fXferBarrier->fRenderTarget && |
+ !SkIRect::IntersectsNoEmptyCheck(xferBounds, fXferBarrier->fInvalidBounds)) { |
+ // We can join with the previous barrier since they match and there is no overlap. |
+ fXferBarrier->fReadBounds.join(xferBounds); |
+ fXferBarrier->fInvalidBounds.join(xferBounds); |
+ if (fXferBarrier->fXferProcessors.back() != &xp) { |
+ fXferBarrier->fXferProcessors.push_back(&xp); |
+ } |
+ } else { |
+ // We need to set a new barrier for this draw. |
+ this->closeBatch(); |
- this->closeBatch(); |
+ fXferBarrier = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, (xbType, rt)); |
+ fXferBarrier->fReadBounds = xferBounds; |
+ fXferBarrier->fInvalidBounds = xferBounds; |
+ fXferBarrier->fXferProcessors.push_back(&xp); |
- XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, (xbType)); |
- xb->fBounds = xferBounds; |
+ iodb->recordTraceMarkersIfNecessary(fXferBarrier); |
+ } |
+ } |
- iodb->recordTraceMarkersIfNecessary(xb); |
+ if (fXferBarrier->fInvalidBounds.contains(0, 0, rt->width(), rt->height())) { |
+ // Close out the barrier since there is no valid area left. |
+ fXferBarrier = NULL; |
+ } |
return true; |
} |