| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrInOrderDrawBuffer_DEFINED | 8 #ifndef GrInOrderDrawBuffer_DEFINED |
| 9 #define GrInOrderDrawBuffer_DEFINED | 9 #define GrInOrderDrawBuffer_DEFINED |
| 10 | 10 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 /** | 30 /** |
| 31 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws
for eventual | 31 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws
for eventual |
| 32 * playback into a GrGpu. In theory one draw buffer could playback into another.
When index or | 32 * playback into a GrGpu. In theory one draw buffer could playback into another.
When index or |
| 33 * vertex buffers are used as geometry sources it is the callers the draw buffer
only holds | 33 * vertex buffers are used as geometry sources it is the callers the draw buffer
only holds |
| 34 * references to the buffers. It is the callers responsibility to ensure that th
e data is still | 34 * references to the buffers. It is the callers responsibility to ensure that th
e data is still |
| 35 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the
caller's | 35 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the
caller's |
| 36 * responsibility to ensure that all referenced textures, buffers, and render-ta
rgets are associated | 36 * responsibility to ensure that all referenced textures, buffers, and render-ta
rgets are associated |
| 37 * in the GrGpu object that the buffer is played back into. The buffer requires
VB and IB pools to | 37 * in the GrGpu object that the buffer is played back into. The buffer requires
VB and IB pools to |
| 38 * store geometry. | 38 * store geometry. |
| 39 */ | 39 */ |
| 40 class GrInOrderDrawBuffer : public GrDrawTarget { | 40 class GrInOrderDrawBuffer : public GrClipTarget { |
| 41 public: | 41 public: |
| 42 | 42 |
| 43 /** | 43 /** |
| 44 * Creates a GrInOrderDrawBuffer | 44 * Creates a GrInOrderDrawBuffer |
| 45 * | 45 * |
| 46 * @param gpu the gpu object that this draw buffer flushes to. | 46 * @param gpu the gpu object that this draw buffer flushes to. |
| 47 * @param vertexPool pool where vertices for queued draws will be saved when | 47 * @param vertexPool pool where vertices for queued draws will be saved when |
| 48 * the vertex source is either reserved or array. | 48 * the vertex source is either reserved or array. |
| 49 * @param indexPool pool where indices for queued draws will be saved when | 49 * @param indexPool pool where indices for queued draws will be saved when |
| 50 * the index source is either reserved or array. | 50 * the index source is either reserved or array. |
| (...skipping 17 matching lines...) Expand all Loading... |
| 68 * before flushing and popped afterwards. | 68 * before flushing and popped afterwards. |
| 69 */ | 69 */ |
| 70 void flush(); | 70 void flush(); |
| 71 | 71 |
| 72 // tracking for draws | 72 // tracking for draws |
| 73 virtual DrawToken getCurrentDrawToken() { return DrawToken(this, fDrawID); } | 73 virtual DrawToken getCurrentDrawToken() { return DrawToken(this, fDrawID); } |
| 74 | 74 |
| 75 // overrides from GrDrawTarget | 75 // overrides from GrDrawTarget |
| 76 virtual bool geometryHints(int* vertexCount, | 76 virtual bool geometryHints(int* vertexCount, |
| 77 int* indexCount) const SK_OVERRIDE; | 77 int* indexCount) const SK_OVERRIDE; |
| 78 |
| 78 virtual void clear(const SkIRect* rect, | 79 virtual void clear(const SkIRect* rect, |
| 79 GrColor color, | 80 GrColor color, |
| 80 bool canIgnoreRect, | 81 bool canIgnoreRect, |
| 81 GrRenderTarget* renderTarget) SK_OVERRIDE; | 82 GrRenderTarget* renderTarget) SK_OVERRIDE; |
| 82 | 83 |
| 84 virtual void clearStencilClip(const SkIRect& rect, |
| 85 bool insideClip, |
| 86 GrRenderTarget* renderTarget) SK_OVERRIDE; |
| 87 |
| 83 virtual void discard(GrRenderTarget*) SK_OVERRIDE; | 88 virtual void discard(GrRenderTarget*) SK_OVERRIDE; |
| 84 | 89 |
| 85 virtual void initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* des
c) SK_OVERRIDE; | 90 virtual void initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* des
c) SK_OVERRIDE; |
| 86 | 91 |
| 87 protected: | 92 protected: |
| 88 virtual void clipWillBeSet(const GrClipData* newClip) SK_OVERRIDE; | 93 virtual void clipWillBeSet(const GrClipData* newClip) SK_OVERRIDE; |
| 89 | 94 |
| 90 private: | 95 private: |
| 91 enum { | 96 enum { |
| 92 kDraw_Cmd = 1, | 97 kDraw_Cmd = 1, |
| 93 kStencilPath_Cmd = 2, | 98 kStencilPath_Cmd = 2, |
| 94 kSetState_Cmd = 3, | 99 kSetState_Cmd = 3, |
| 95 kSetClip_Cmd = 4, | 100 kSetClip_Cmd = 4, |
| 96 kClear_Cmd = 5, | 101 kClear_Cmd = 5, |
| 97 kCopySurface_Cmd = 6, | 102 kCopySurface_Cmd = 6, |
| 98 kDrawPath_Cmd = 7, | 103 kDrawPath_Cmd = 7, |
| 99 kDrawPaths_Cmd = 8, | 104 kDrawPaths_Cmd = 8, |
| 100 }; | 105 }; |
| 101 | 106 |
| 102 struct Cmd : ::SkNoncopyable { | 107 struct Cmd : ::SkNoncopyable { |
| 103 Cmd(uint8_t type) : fType(type) {} | 108 Cmd(uint8_t type) : fType(type) {} |
| 104 virtual ~Cmd() {} | 109 virtual ~Cmd() {} |
| 105 | 110 |
| 106 virtual void execute(GrDrawTarget*) = 0; | 111 virtual void execute(GrClipTarget*) = 0; |
| 107 | 112 |
| 108 uint8_t fType; | 113 uint8_t fType; |
| 109 }; | 114 }; |
| 110 | 115 |
| 111 struct Draw : public Cmd { | 116 struct Draw : public Cmd { |
| 112 Draw(const DrawInfo& info, const GrVertexBuffer* vb, const GrIndexBuffer
* ib) | 117 Draw(const DrawInfo& info, const GrVertexBuffer* vb, const GrIndexBuffer
* ib) |
| 113 : Cmd(kDraw_Cmd) | 118 : Cmd(kDraw_Cmd) |
| 114 , fInfo(info) | 119 , fInfo(info) |
| 115 , fVertexBuffer(vb) | 120 , fVertexBuffer(vb) |
| 116 , fIndexBuffer(ib) {} | 121 , fIndexBuffer(ib) {} |
| 117 | 122 |
| 118 const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get();
} | 123 const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get();
} |
| 119 const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); } | 124 const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); } |
| 120 | 125 |
| 121 virtual void execute(GrDrawTarget*); | 126 virtual void execute(GrClipTarget*); |
| 122 | 127 |
| 123 DrawInfo fInfo; | 128 DrawInfo fInfo; |
| 124 | 129 |
| 125 private: | 130 private: |
| 126 GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuff
er; | 131 GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuff
er; |
| 127 GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffe
r; | 132 GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffe
r; |
| 128 }; | 133 }; |
| 129 | 134 |
| 130 struct StencilPath : public Cmd { | 135 struct StencilPath : public Cmd { |
| 131 StencilPath(const GrPath* path) : Cmd(kStencilPath_Cmd), fPath(path) {} | 136 StencilPath(const GrPath* path) : Cmd(kStencilPath_Cmd), fPath(path) {} |
| 132 | 137 |
| 133 const GrPath* path() const { return fPath.get(); } | 138 const GrPath* path() const { return fPath.get(); } |
| 134 | 139 |
| 135 virtual void execute(GrDrawTarget*); | 140 virtual void execute(GrClipTarget*); |
| 136 | 141 |
| 137 SkPath::FillType fFill; | 142 SkPath::FillType fFill; |
| 138 | 143 |
| 139 private: | 144 private: |
| 140 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 145 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
| 141 }; | 146 }; |
| 142 | 147 |
| 143 struct DrawPath : public Cmd { | 148 struct DrawPath : public Cmd { |
| 144 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} | 149 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} |
| 145 | 150 |
| 146 const GrPath* path() const { return fPath.get(); } | 151 const GrPath* path() const { return fPath.get(); } |
| 147 | 152 |
| 148 virtual void execute(GrDrawTarget*); | 153 virtual void execute(GrClipTarget*); |
| 149 | 154 |
| 150 SkPath::FillType fFill; | 155 SkPath::FillType fFill; |
| 151 GrDeviceCoordTexture fDstCopy; | 156 GrDeviceCoordTexture fDstCopy; |
| 152 | 157 |
| 153 private: | 158 private: |
| 154 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 159 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
| 155 }; | 160 }; |
| 156 | 161 |
| 157 struct DrawPaths : public Cmd { | 162 struct DrawPaths : public Cmd { |
| 158 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang
e(pathRange) {} | 163 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang
e(pathRange) {} |
| 159 | 164 |
| 160 const GrPathRange* pathRange() const { return fPathRange.get(); } | 165 const GrPathRange* pathRange() const { return fPathRange.get(); } |
| 161 uint32_t* indices() { return reinterpret_cast<uint32_t*>(CmdBuffer::GetD
ataForItem(this)); } | 166 uint32_t* indices() { return reinterpret_cast<uint32_t*>(CmdBuffer::GetD
ataForItem(this)); } |
| 162 float* transforms() { return reinterpret_cast<float*>(&this->indices()[f
Count]); } | 167 float* transforms() { return reinterpret_cast<float*>(&this->indices()[f
Count]); } |
| 163 | 168 |
| 164 virtual void execute(GrDrawTarget*); | 169 virtual void execute(GrClipTarget*); |
| 165 | 170 |
| 166 size_t fCount; | 171 size_t fCount; |
| 167 PathTransformType fTransformsType; | 172 PathTransformType fTransformsType; |
| 168 SkPath::FillType fFill; | 173 SkPath::FillType fFill; |
| 169 GrDeviceCoordTexture fDstCopy; | 174 GrDeviceCoordTexture fDstCopy; |
| 170 | 175 |
| 171 private: | 176 private: |
| 172 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; | 177 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; |
| 173 }; | 178 }; |
| 174 | 179 |
| 175 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL | 180 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL |
| 176 struct Clear : public Cmd { | 181 struct Clear : public Cmd { |
| 177 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} | 182 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} |
| 178 | 183 |
| 179 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 184 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
| 180 | 185 |
| 181 virtual void execute(GrDrawTarget*); | 186 virtual void execute(GrClipTarget*); |
| 182 | 187 |
| 183 SkIRect fRect; | 188 SkIRect fRect; |
| 184 GrColor fColor; | 189 GrColor fColor; |
| 185 bool fCanIgnoreRect; | 190 bool fCanIgnoreRect; |
| 186 | 191 |
| 187 private: | 192 private: |
| 188 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 193 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
| 189 }; | 194 }; |
| 190 | 195 |
| 196 // This command is ONLY used by the clip mask manager to clear the stencil c
lip bits |
| 197 struct ClearStencilClip : public Cmd { |
| 198 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt
) {} |
| 199 |
| 200 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
| 201 |
| 202 virtual void execute(GrClipTarget*); |
| 203 |
| 204 SkIRect fRect; |
| 205 bool fInsideClip; |
| 206 |
| 207 private: |
| 208 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
| 209 }; |
| 210 |
| 191 struct CopySurface : public Cmd { | 211 struct CopySurface : public Cmd { |
| 192 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs
t(dst), fSrc(src) {} | 212 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs
t(dst), fSrc(src) {} |
| 193 | 213 |
| 194 GrSurface* dst() const { return fDst.get(); } | 214 GrSurface* dst() const { return fDst.get(); } |
| 195 GrSurface* src() const { return fSrc.get(); } | 215 GrSurface* src() const { return fSrc.get(); } |
| 196 | 216 |
| 197 virtual void execute(GrDrawTarget*); | 217 virtual void execute(GrClipTarget*); |
| 198 | 218 |
| 199 SkIPoint fDstPoint; | 219 SkIPoint fDstPoint; |
| 200 SkIRect fSrcRect; | 220 SkIRect fSrcRect; |
| 201 | 221 |
| 202 private: | 222 private: |
| 203 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; | 223 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; |
| 204 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; | 224 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; |
| 205 }; | 225 }; |
| 206 | 226 |
| 207 struct SetState : public Cmd { | 227 struct SetState : public Cmd { |
| 208 SetState(const GrDrawState& state) : Cmd(kSetState_Cmd), fState(state) {
} | 228 SetState(const GrDrawState& state) : Cmd(kSetState_Cmd), fState(state) {
} |
| 209 | 229 |
| 210 virtual void execute(GrDrawTarget*); | 230 virtual void execute(GrClipTarget*); |
| 211 | 231 |
| 212 GrDrawState fState; | 232 GrDrawState fState; |
| 213 }; | 233 }; |
| 214 | 234 |
| 215 struct SetClip : public Cmd { | 235 struct SetClip : public Cmd { |
| 216 SetClip(const GrClipData* clipData) | 236 SetClip(const GrClipData* clipData) |
| 217 : Cmd(kSetClip_Cmd), | 237 : Cmd(kSetClip_Cmd), |
| 218 fStackStorage(*clipData->fClipStack) { | 238 fStackStorage(*clipData->fClipStack) { |
| 219 fClipData.fClipStack = &fStackStorage; | 239 fClipData.fClipStack = &fStackStorage; |
| 220 fClipData.fOrigin = clipData->fOrigin; | 240 fClipData.fOrigin = clipData->fOrigin; |
| 221 } | 241 } |
| 222 | 242 |
| 223 virtual void execute(GrDrawTarget*); | 243 virtual void execute(GrClipTarget*); |
| 224 | 244 |
| 225 GrClipData fClipData; | 245 GrClipData fClipData; |
| 226 | 246 |
| 227 private: | 247 private: |
| 228 SkClipStack fStackStorage; | 248 SkClipStack fStackStorage; |
| 229 }; | 249 }; |
| 230 | 250 |
| 231 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. | 251 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. |
| 232 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 252 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
| 233 | 253 |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 293 enum { | 313 enum { |
| 294 kCmdBufferInitialSizeInBytes = 64 * 1024, | 314 kCmdBufferInitialSizeInBytes = 64 * 1024, |
| 295 kGeoPoolStatePreAllocCnt = 4, | 315 kGeoPoolStatePreAllocCnt = 4, |
| 296 }; | 316 }; |
| 297 | 317 |
| 298 CmdBuffer fCmdBuffer; | 318 CmdBuffer fCmdBuffer; |
| 299 GrDrawState* fLastState; | 319 GrDrawState* fLastState; |
| 300 GrClipData* fLastClip; | 320 GrClipData* fLastClip; |
| 301 | 321 |
| 302 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; | 322 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; |
| 303 GrDrawTarget* fDstGpu; | 323 GrClipTarget* fDstGpu; |
| 304 bool fClipSet; | 324 bool fClipSet; |
| 305 | 325 |
| 306 enum ClipProxyState { | 326 enum ClipProxyState { |
| 307 kUnknown_ClipProxyState, | 327 kUnknown_ClipProxyState, |
| 308 kValid_ClipProxyState, | 328 kValid_ClipProxyState, |
| 309 kInvalid_ClipProxyState | 329 kInvalid_ClipProxyState |
| 310 }; | 330 }; |
| 311 | 331 |
| 312 ClipProxyState fClipProxyState; | 332 ClipProxyState fClipProxyState; |
| 313 SkRect fClipProxy; | 333 SkRect fClipProxy; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 325 size_t fUsedPoolVertexBytes; | 345 size_t fUsedPoolVertexBytes; |
| 326 size_t fUsedPoolIndexBytes; | 346 size_t fUsedPoolIndexBytes; |
| 327 }; | 347 }; |
| 328 | 348 |
| 329 typedef SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> GeoPoolStateS
tack; | 349 typedef SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> GeoPoolStateS
tack; |
| 330 | 350 |
| 331 GeoPoolStateStack fGeoPoolStateStack; | 351 GeoPoolStateStack fGeoPoolStateStack; |
| 332 bool fFlushing; | 352 bool fFlushing; |
| 333 uint32_t fDrawID; | 353 uint32_t fDrawID; |
| 334 | 354 |
| 335 typedef GrDrawTarget INHERITED; | 355 typedef GrClipTarget INHERITED; |
| 336 }; | 356 }; |
| 337 | 357 |
| 338 #endif | 358 #endif |
| OLD | NEW |