Index: src/gpu/GrInOrderDrawBuffer.h |
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h |
index 5a525fdfa27bfd9bd8d5546b68a1577e3db9a48e..1100d95a9c04fbb0fcf00c602e30e6804dca10c2 100644 |
--- a/src/gpu/GrInOrderDrawBuffer.h |
+++ b/src/gpu/GrInOrderDrawBuffer.h |
@@ -17,88 +17,21 @@ |
#include "GrPath.h" |
#include "GrTRecorder.h" |
-/** |
- * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual |
- * playback into a GrGpu. In theory one draw buffer could playback into another. When index or |
- * vertex buffers are used as geometry sources it is the callers the draw buffer only holds |
- * references to the buffers. It is the callers responsibility to ensure that the data is still |
- * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's |
- * responsibility to ensure that all referenced textures, buffers, and render-targets are associated |
- * in the GrGpu object that the buffer is played back into. The buffer requires VB and IB pools to |
- * store geometry. |
- */ |
-class GrInOrderDrawBuffer : public GrFlushToGpuDrawTarget { |
-public: |
- |
- /** |
- * Creates a GrInOrderDrawBuffer |
- * |
- * @param gpu the gpu object that this draw buffer flushes to. |
- * @param vertexPool pool where vertices for queued draws will be saved when |
- * the vertex source is either reserved or array. |
- * @param indexPool pool where indices for queued draws will be saved when |
- * the index source is either reserved or array. |
- */ |
- GrInOrderDrawBuffer(GrGpu* gpu, |
- GrVertexBufferAllocPool* vertexPool, |
- GrIndexBufferAllocPool* indexPool); |
- |
- ~GrInOrderDrawBuffer() SK_OVERRIDE; |
- |
- // tracking for draws |
- DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID); } |
- |
- void clearStencilClip(const SkIRect& rect, |
- bool insideClip, |
- GrRenderTarget* renderTarget) SK_OVERRIDE; |
+class GrInOrderDrawBuffer; |
- void discard(GrRenderTarget*) SK_OVERRIDE; |
- |
-protected: |
- void willReserveVertexAndIndexSpace(int vertexCount, |
- size_t vertexStride, |
- int indexCount); |
- |
- void appendIndicesAndTransforms(const void* indexValues, PathIndexType indexType, |
- const float* transformValues, PathTransformType transformType, |
- int count, char** indicesLocation, float** xformsLocation) { |
- int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
- *indicesLocation = (char*) fPathIndexBuffer.alloc(count * indexBytes, |
- SkChunkAlloc::kThrow_AllocFailType); |
- SkASSERT(SkIsAlign4((uintptr_t)*indicesLocation)); |
- memcpy(*indicesLocation, reinterpret_cast<const char*>(indexValues), count * indexBytes); |
- |
- const int xformBytes = GrPathRendering::PathTransformSize(transformType) * sizeof(float); |
- *xformsLocation = NULL; |
- |
- if (0 != xformBytes) { |
- *xformsLocation = (float*) fPathTransformBuffer.alloc(count * xformBytes, |
- SkChunkAlloc::kThrow_AllocFailType); |
- SkASSERT(SkIsAlign4((uintptr_t)*xformsLocation)); |
- memcpy(*xformsLocation, transformValues, count * xformBytes); |
- } |
- } |
- |
- bool canConcatToIndexBuffer(const GrIndexBuffer** ib) { |
- const GrDrawTarget::GeometrySrcState& geomSrc = this->getGeomSrc(); |
- |
- // we only attempt to concat when reserved verts are used with a client-specified |
- // index buffer. To make this work with client-specified VBs we'd need to know if the VB |
- // was updated between draws. |
- if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
- kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
- return false; |
- } |
+class GrTargetCommands : ::SkNoncopyable { |
+ struct SetState; |
- *ib = geomSrc.fIndexBuffer; |
- return true; |
+public: |
+ GrTargetCommands(GrGpu* gpu, |
+ GrVertexBufferAllocPool* vertexPool, |
+ GrIndexBufferAllocPool* indexPool) |
+ : fCmdBuffer(kCmdBufferInitialSizeInBytes) |
+ , fPrevState(NULL) |
+ , fBatchTarget(gpu, vertexPool, indexPool) |
+ , fDrawBatch(NULL) { |
} |
-private: |
- typedef GrGpu::DrawArgs DrawArgs; |
- |
- struct SetState; |
- |
struct Cmd : ::SkNoncopyable { |
enum { |
kDraw_Cmd = 1, |
@@ -128,12 +61,89 @@ private: |
uint8_t fType; |
}; |
+ void reset(); |
+ void flush(GrInOrderDrawBuffer*); |
+ |
+ Cmd* recordClearStencilClip(GrInOrderDrawBuffer*, |
+ const SkIRect& rect, |
+ bool insideClip, |
+ GrRenderTarget* renderTarget); |
+ |
+ Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*); |
+ |
+ Cmd* recordDraw(GrInOrderDrawBuffer*, |
+ const GrGeometryProcessor*, |
+ const GrDrawTarget::DrawInfo&, |
+ const GrDrawTarget::PipelineInfo&); |
+ Cmd* recordDrawBatch(GrInOrderDrawBuffer*, |
+ GrBatch*, |
+ const GrDrawTarget::PipelineInfo&); |
+ void recordDrawRect(GrInOrderDrawBuffer*, |
+ GrPipelineBuilder*, |
+ GrColor, |
+ const SkMatrix& viewMatrix, |
+ const SkRect& rect, |
+ const SkRect* localRect, |
+ const SkMatrix* localMatrix); |
+ Cmd* recordStencilPath(GrInOrderDrawBuffer*, |
+ const GrPipelineBuilder&, |
+ const GrPathProcessor*, |
+ const GrPath*, |
+ const GrScissorState&, |
+ const GrStencilSettings&); |
+ Cmd* recordDrawPath(GrInOrderDrawBuffer*, |
+ const GrPathProcessor*, |
+ const GrPath*, |
+ const GrStencilSettings&, |
+ const GrDrawTarget::PipelineInfo&); |
+ Cmd* recordDrawPaths(GrInOrderDrawBuffer*, |
+ const GrPathProcessor*, |
+ const GrPathRange*, |
+ const void*, |
+ GrDrawTarget::PathIndexType, |
+ const float transformValues[], |
+ GrDrawTarget::PathTransformType , |
+ int, |
+ const GrStencilSettings&, |
+ const GrDrawTarget::PipelineInfo&); |
+ Cmd* recordClear(GrInOrderDrawBuffer*, |
+ const SkIRect* rect, |
+ GrColor, |
+ bool canIgnoreRect, |
+ GrRenderTarget*); |
+ Cmd* recordCopySurface(GrInOrderDrawBuffer*, |
+ GrSurface* dst, |
+ GrSurface* src, |
+ const SkIRect& srcRect, |
+ const SkIPoint& dstPoint); |
+ |
+protected: |
+ void willReserveVertexAndIndexSpace(int vertexCount, |
+ size_t vertexStride, |
+ int indexCount); |
+ |
+private: |
+ friend class GrInOrderDrawBuffer; |
+ |
+ typedef GrGpu::DrawArgs DrawArgs; |
+ |
+ // Attempts to concat instances from info onto the previous draw. info must represent an |
+ // instanced draw. The caller must have already recorded a new draw state and clip if necessary. |
+ int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&); |
+ |
+ bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, |
+ const GrPrimitiveProcessor*, |
+ const GrDrawTarget::PipelineInfo&); |
+ bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, |
+ GrBatch*, |
+ const GrDrawTarget::PipelineInfo&); |
+ |
struct Draw : public Cmd { |
- Draw(const DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {} |
+ Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {} |
void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
- DrawInfo fInfo; |
+ GrDrawTarget::DrawInfo fInfo; |
}; |
struct StencilPath : public Cmd { |
@@ -175,12 +185,12 @@ private: |
void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
- char* fIndices; |
- PathIndexType fIndexType; |
- float* fTransforms; |
- PathTransformType fTransformType; |
- int fCount; |
- GrStencilSettings fStencilSettings; |
+ char* fIndices; |
+ GrDrawTarget::PathIndexType fIndexType; |
+ float* fTransforms; |
+ GrDrawTarget::PathTransformType fTransformType; |
+ int fCount; |
+ GrStencilSettings fStencilSettings; |
private: |
GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; |
@@ -276,8 +286,103 @@ private: |
GrBatchTarget* fBatchTarget; |
}; |
- typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. |
- typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
+ static const int kCmdBufferInitialSizeInBytes = 8 * 1024; |
+ |
+ typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. |
+ typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
+ |
+ CmdBuffer fCmdBuffer; |
+ SetState* fPrevState; |
+ GrBatchTarget fBatchTarget; |
+ // TODO hack until batch is everywhere |
+ GrTargetCommands::DrawBatch* fDrawBatch; |
+ |
+ // This will go away when everything uses batch. However, in the short term anything which |
+ // might be put into the GrInOrderDrawBuffer needs to make sure it closes the last batch |
+ void closeBatch(); |
+}; |
+ |
+/** |
+ * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual |
+ * playback into a GrGpu. In theory one draw buffer could playback into another. When index or |
+ * vertex buffers are used as geometry sources it is the callers the draw buffer only holds |
+ * references to the buffers. It is the callers responsibility to ensure that the data is still |
+ * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's |
+ * responsibility to ensure that all referenced textures, buffers, and render-targets are associated |
+ * in the GrGpu object that the buffer is played back into. The buffer requires VB and IB pools to |
+ * store geometry. |
+ */ |
+class GrInOrderDrawBuffer : public GrFlushToGpuDrawTarget { |
+public: |
+ |
+ /** |
+ * Creates a GrInOrderDrawBuffer |
+ * |
+ * @param gpu the gpu object that this draw buffer flushes to. |
+ * @param vertexPool pool where vertices for queued draws will be saved when |
+ * the vertex source is either reserved or array. |
+ * @param indexPool pool where indices for queued draws will be saved when |
+ * the index source is either reserved or array. |
+ */ |
+ GrInOrderDrawBuffer(GrGpu* gpu, |
+ GrVertexBufferAllocPool* vertexPool, |
+ GrIndexBufferAllocPool* indexPool); |
+ |
+ ~GrInOrderDrawBuffer() SK_OVERRIDE; |
+ |
+ // tracking for draws |
+ DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID); } |
+ |
+ void clearStencilClip(const SkIRect& rect, |
+ bool insideClip, |
+ GrRenderTarget* renderTarget) SK_OVERRIDE; |
+ |
+ void discard(GrRenderTarget*) SK_OVERRIDE; |
+ |
+protected: |
+ void willReserveVertexAndIndexSpace(int vertexCount, |
+ size_t vertexStride, |
+ int indexCount) SK_OVERRIDE; |
+ |
+ void appendIndicesAndTransforms(const void* indexValues, PathIndexType indexType, |
+ const float* transformValues, PathTransformType transformType, |
+ int count, char** indicesLocation, float** xformsLocation) { |
+ int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
+ *indicesLocation = (char*) fPathIndexBuffer.alloc(count * indexBytes, |
+ SkChunkAlloc::kThrow_AllocFailType); |
+ SkASSERT(SkIsAlign4((uintptr_t)*indicesLocation)); |
+ memcpy(*indicesLocation, reinterpret_cast<const char*>(indexValues), count * indexBytes); |
+ |
+ const int xformBytes = GrPathRendering::PathTransformSize(transformType) * sizeof(float); |
+ *xformsLocation = NULL; |
+ |
+ if (0 != xformBytes) { |
+ *xformsLocation = (float*) fPathTransformBuffer.alloc(count * xformBytes, |
+ SkChunkAlloc::kThrow_AllocFailType); |
+ SkASSERT(SkIsAlign4((uintptr_t)*xformsLocation)); |
+ memcpy(*xformsLocation, transformValues, count * xformBytes); |
+ } |
+ } |
+ |
+ bool canConcatToIndexBuffer(const GrIndexBuffer** ib) { |
+ const GrDrawTarget::GeometrySrcState& geomSrc = this->getGeomSrc(); |
+ |
+ // we only attempt to concat when reserved verts are used with a client-specified |
+ // index buffer. To make this work with client-specified VBs we'd need to know if the VB |
+ // was updated between draws. |
+ if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
+ kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
+ return false; |
+ } |
+ |
+ *ib = geomSrc.fIndexBuffer; |
+ return true; |
+ } |
+ |
+private: |
+ friend class GrTargetCommands; |
+ |
+ typedef GrGpu::DrawArgs DrawArgs; |
void onReset() SK_OVERRIDE; |
void onFlush() SK_OVERRIDE; |
@@ -323,46 +428,27 @@ private: |
// instanced draw. The caller must have already recorded a new draw state and clip if necessary. |
int concatInstancedDraw(const DrawInfo&); |
- // Determines whether the current draw operation requires a new GrPipeline and if so |
- // records it. If the draw can be skipped false is returned and no new GrPipeline is |
- // recorded. |
- // TODO delete the primproc variant when we have batches everywhere |
- bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(const GrPrimitiveProcessor*, |
- const PipelineInfo&); |
- bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrBatch*, const PipelineInfo&); |
- |
// We lazily record clip changes in order to skip clips that have no effect. |
void recordClipIfNecessary(); |
// Records any trace markers for a command |
- void recordTraceMarkersIfNecessary(Cmd*); |
+ void recordTraceMarkersIfNecessary(GrTargetCommands::Cmd*); |
SkString getCmdString(int index) const { |
SkASSERT(index < fGpuCmdMarkers.count()); |
return fGpuCmdMarkers[index].toString(); |
} |
bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; } |
- GrBatchTarget* getBatchTarget() { return &fBatchTarget; } |
- |
// TODO: Use a single allocator for commands and records |
enum { |
- kCmdBufferInitialSizeInBytes = 8 * 1024, |
kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's |
kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms |
}; |
- CmdBuffer fCmdBuffer; |
- SetState* fPrevState; |
+ GrTargetCommands fCommands; |
SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; |
SkChunkAlloc fPathIndexBuffer; |
SkChunkAlloc fPathTransformBuffer; |
uint32_t fDrawID; |
- GrBatchTarget fBatchTarget; |
- // TODO hack until batch is everywhere |
- DrawBatch* fDrawBatch; |
- |
- // This will go away when everything uses batch. However, in the short term anything which |
- // might be put into the GrInOrderDrawBuffer needs to make sure it closes the last batch |
- inline void closeBatch(); |
typedef GrFlushToGpuDrawTarget INHERITED; |
}; |