Index: src/gpu/GrTargetCommands.cpp |
diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp |
index 9b968dc9e73650c0501fd9786a1e0901bcef2e18..1827e8898aa1d701dc01011d92d3af91d82f7c87 100644 |
--- a/src/gpu/GrTargetCommands.cpp |
+++ b/src/gpu/GrTargetCommands.cpp |
@@ -47,11 +47,20 @@ int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, |
// Check if there is a draw info that is compatible that uses the same VB from the pool and |
// the same IB |
- if (Cmd::kDraw_CmdType != fCmdBuffer.back().type()) { |
+ CmdBuffer::ReverseIter reverseIter(fCmdBuffer); |
+ if (Cmd::kBlendBarrier_CmdType == reverseIter->type()) { |
+ // Instanced draw commands can collapse through a blend barrier if they don't overlap. |
+ // TODO: We could also collapse through a dst texture setup if they had a dedicated command. |
+ SkAssertResult(reverseIter.previous()); |
+ SkASSERT(Cmd::kBlendBarrier_CmdType != reverseIter->type()); |
+ } |
+ |
+ if (Cmd::kDraw_CmdType != reverseIter->type()) { |
return 0; |
} |
- Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); |
+ Draw* draw = static_cast<Draw*>(reverseIter.get()); |
+ bool allowOverlap = (draw == &fCmdBuffer.back()); |
if (!draw->fInfo.isInstanced() || |
draw->fInfo.primitiveType() != info.primitiveType() || |
@@ -64,6 +73,19 @@ int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, |
if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) { |
return 0; |
} |
+ if (!allowOverlap) { |
+ if (draw->fInfo.getDevBounds()->isEmpty() || info.getDevBounds()->isEmpty()) { |
+ // Draw bounds are unknown. |
+ return 0; |
+ } |
+ SkIRect existingBounds, incomingBounds; |
+ draw->fInfo.getDevBounds()->roundOut(&existingBounds); |
+ info.getDevBounds()->roundOut(&incomingBounds); |
+ if (SkIRect::Intersects(existingBounds, incomingBounds)) { |
+ // Draw bounds overlap. |
+ return 0; |
+ } |
+ } |
// how many instances can be concat'ed onto draw given the size of the index buffer |
int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance(); |
@@ -72,6 +94,22 @@ int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, |
draw->fInfo.adjustInstanceCount(instancesToConcat); |
+ // Join the draw bounds. |
+ if (!draw->fInfo.getDevBounds()->isEmpty()) { |
+ if (info.getDevBounds()->isEmpty()) { |
+ draw->fInfo.setDevBounds(SkRect::MakeEmpty()); |
+ } else { |
+ SkRect newBounds(*draw->fInfo.getDevBounds()); |
+ newBounds.join(*info.getDevBounds()); |
+ draw->fInfo.setDevBounds(newBounds); |
+ } |
+ } |
+ |
+ // Remove the blend barrier, if any. |
+ if (Cmd::kBlendBarrier_CmdType == fCmdBuffer.back().type()) { |
+ fCmdBuffer.pop_back(); |
Mark Kilgard
2015/04/02 23:02:38
does something avoid all this checking to eliminat
|
+ } |
+ |
// update last fGpuCmdMarkers to include any additional trace markers that have been added |
iodb->recordTraceMarkersIfNecessary(draw); |
return instancesToConcat; |
@@ -115,6 +153,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( |
// Check if there is a Batch Draw we can batch with |
if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { |
+ // TODO: Batch through blend barriers and dst texture setups when there is no overlap. |
fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); |
return fDrawBatch; |
} |
@@ -153,6 +192,9 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( |
const GrPath* path, |
const GrStencilSettings& stencilSettings, |
const GrDrawTarget::PipelineInfo& pipelineInfo) { |
+ // Path commands don't require a preceeding blend barrier (per the spec). |
+ SkASSERT(fCmdBuffer.empty() || Cmd::kBlendBarrier_CmdType != fCmdBuffer.back().type()); |
+ |
this->closeBatch(); |
// TODO: Only compare the subset of GrPipelineBuilder relevant to path covering? |
@@ -178,6 +220,10 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( |
SkASSERT(pathRange); |
SkASSERT(indexValues); |
SkASSERT(transformValues); |
+ |
+ // Path commands don't require a preceeding blend barrier (per the spec). |
+ SkASSERT(fCmdBuffer.empty() || Cmd::kBlendBarrier_CmdType != fCmdBuffer.back().type()); |
+ |
this->closeBatch(); |
if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { |
@@ -193,28 +239,25 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( |
if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { |
// The previous command was also DrawPaths. Try to collapse this call into the one |
- // before. Note that stenciling all the paths at once, then covering, may not be |
- // equivalent to two separate draw calls if there is overlap. Blending won't work, |
- // and the combined calls may also cancel each other's winding numbers in some |
- // places. For now the winding numbers are only an issue if the fill is even/odd, |
- // because DrawPaths is currently only used for glyphs, and glyphs in the same |
- // font tend to all wind in the same direction. |
+ // before. Note that when there is overlap, this is not equivalent to two separate |
+ // draw calls. We don't worry about this case because instanced path commands are |
+ // only used for text. (It also means we get to combine calls when the blend mode |
+ // doesn't allow overlap; a single cover pass is non-overlapping by definition.) |
DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
if (pathRange == previous->pathRange() && |
indexType == previous->fIndexType && |
transformType == previous->fTransformType && |
stencilSettings == previous->fStencilSettings && |
- path_fill_type_is_winding(stencilSettings) && |
- !pipelineInfo.willBlendWithDst(pathProc)) { |
- const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
- const int xformSize = GrPathRendering::PathTransformSize(transformType); |
- if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && |
- (0 == xformSize || |
- &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { |
- // Fold this DrawPaths call into the one previous. |
- previous->fCount += count; |
- return NULL; |
- } |
+ path_fill_type_is_winding(stencilSettings)) { // Even/odd could cancel on overlap. |
+ const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
+ const int xformSize = GrPathRendering::PathTransformSize(transformType); |
+ if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && |
+ (0 == xformSize || |
+ &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { |
+ // Fold this DrawPaths call into the one previous. |
+ previous->fCount += count; |
+ return NULL; |
+ } |
} |
} |
@@ -402,6 +445,10 @@ void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { |
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
} |
+void GrTargetCommands::BlendBarrier::execute(GrGpu* gpu, const SetState*) { |
+ gpu->blendBarrier(); |
+} |
+ |
GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb, |
GrSurface* dst, |
GrSurface* src, |
@@ -417,6 +464,14 @@ GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* |
return NULL; |
} |
+GrTargetCommands::Cmd* GrTargetCommands::recordBlendBarrier(GrInOrderDrawBuffer* iodb) { |
+ if (!fCmdBuffer.empty() && Cmd::kBlendBarrier_CmdType == fCmdBuffer.back().type()) { |
+ return NULL; |
+ } |
+ this->closeBatch(); |
+ return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, BlendBarrier, ()); |
+} |
+ |
bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, |
const GrPrimitiveProcessor* primProc, |
const GrDrawTarget::PipelineInfo& pipelineInfo) { |