OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef GrInOrderDrawBuffer_DEFINED | 8 #ifndef GrInOrderDrawBuffer_DEFINED |
9 #define GrInOrderDrawBuffer_DEFINED | 9 #define GrInOrderDrawBuffer_DEFINED |
10 | 10 |
11 #include "GrFlushToGpuDrawTarget.h" | 11 #include "GrFlushToGpuDrawTarget.h" |
12 | |
13 #include "GrBatch.h" | |
12 #include "GrOptDrawState.h" | 14 #include "GrOptDrawState.h" |
13 #include "GrPath.h" | 15 #include "GrPath.h" |
14 #include "GrTRecorder.h" | 16 #include "GrTRecorder.h" |
15 | 17 |
16 /** | 18 /** |
17 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual | 19 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual |
18 * playback into a GrGpu. In theory one draw buffer could playback into another. When index or | 20 * playback into a GrGpu. In theory one draw buffer could playback into another. When index or |
19 * vertex buffers are used as geometry sources it is the callers the draw buffer only holds | 21 * vertex buffers are used as geometry sources it is the callers the draw buffer only holds |
20 * references to the buffers. It is the callers responsibility to ensure that th e data is still | 22 * references to the buffers. It is the callers responsibility to ensure that th e data is still |
21 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's | 23 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's |
(...skipping 23 matching lines...) Expand all Loading... | |
45 DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID ); } | 47 DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID ); } |
46 | 48 |
47 void clearStencilClip(const SkIRect& rect, | 49 void clearStencilClip(const SkIRect& rect, |
48 bool insideClip, | 50 bool insideClip, |
49 GrRenderTarget* renderTarget) SK_OVERRIDE; | 51 GrRenderTarget* renderTarget) SK_OVERRIDE; |
50 | 52 |
51 void discard(GrRenderTarget*) SK_OVERRIDE; | 53 void discard(GrRenderTarget*) SK_OVERRIDE; |
52 | 54 |
53 private: | 55 private: |
54 enum { | 56 enum { |
55 kDraw_Cmd = 1, | 57 kDraw_Cmd = 1, |
56 kStencilPath_Cmd = 2, | 58 kStencilPath_Cmd = 2, |
57 kSetState_Cmd = 3, | 59 kSetState_Cmd = 3, |
58 kClear_Cmd = 4, | 60 kClear_Cmd = 4, |
59 kCopySurface_Cmd = 5, | 61 kCopySurface_Cmd = 5, |
60 kDrawPath_Cmd = 6, | 62 kDrawPath_Cmd = 6, |
61 kDrawPaths_Cmd = 7, | 63 kDrawPaths_Cmd = 7, |
64 kBatchDraw = 8, | |
62 }; | 65 }; |
63 | 66 |
64 struct Cmd : ::SkNoncopyable { | 67 struct Cmd : ::SkNoncopyable { |
65 Cmd(uint8_t type) : fType(type) {} | 68 Cmd(uint8_t type) : fType(type) {} |
66 virtual ~Cmd() {} | 69 virtual ~Cmd() {} |
67 | 70 |
68 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) = 0; | 71 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) = 0; |
69 | 72 |
70 uint8_t fType; | 73 uint8_t fType; |
71 }; | 74 }; |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
169 | 172 |
170 SkIPoint fDstPoint; | 173 SkIPoint fDstPoint; |
171 SkIRect fSrcRect; | 174 SkIRect fSrcRect; |
172 | 175 |
173 private: | 176 private: |
174 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; | 177 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; |
175 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; | 178 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; |
176 }; | 179 }; |
177 | 180 |
178 struct SetState : public Cmd { | 181 struct SetState : public Cmd { |
182 // TODO get rid of the prim proc version of this when we use batch every where | |
179 SetState(const GrDrawState& drawState, const GrPrimitiveProcessor* primP roc, | 183 SetState(const GrDrawState& drawState, const GrPrimitiveProcessor* primP roc, |
180 const GrDrawTargetCaps& caps, | 184 const GrDrawTargetCaps& caps, |
181 const GrScissorState& scissor, const GrDeviceCoordTexture* dstC opy, | 185 const GrScissorState& scissor, const GrDeviceCoordTexture* dstC opy, |
182 GrGpu::DrawType drawType) | 186 GrGpu::DrawType drawType) |
183 : Cmd(kSetState_Cmd) | 187 : Cmd(kSetState_Cmd) |
184 , fState(drawState, primProc, caps, scissor, dstCopy, drawType) {} | 188 , fState(drawState, primProc, caps, scissor, dstCopy, drawType) {} |
185 | 189 |
190 SetState(GrBatch* batch, | |
191 const GrDrawState& drawState, | |
192 const GrDrawTargetCaps& caps, | |
193 const GrScissorState& scissor, const GrDeviceCoordTexture* dstC opy, | |
194 GrGpu::DrawType drawType) | |
195 : Cmd(kSetState_Cmd) | |
196 , fState(batch, drawState, caps, scissor, dstCopy, drawType) {} | |
197 | |
186 void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) SK_OVERRIDE; | 198 void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) SK_OVERRIDE; |
187 | 199 |
188 GrOptDrawState fState; | 200 GrOptDrawState fState; |
189 }; | 201 }; |
190 | 202 |
203 struct BatchDraw : public Cmd { | |
204 BatchDraw(GrBatch* batch) : Cmd(kBatchDraw), fBatch(batch) {} | |
205 | |
206 void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) SK_OVERRIDE; | |
207 | |
208 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe r | |
bsalomon
2015/01/20 16:14:03
as opposed to in the mem pool? should grbatch be r
joshualitt
2015/01/20 17:03:08
Right, as opposed to the mem pool. I don't think
| |
209 SkAutoTDelete<GrBatch> fBatch; | |
210 | |
bsalomon
2015/01/20 16:14:03
extra \n
| |
211 }; | |
212 | |
191 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. | 213 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. |
192 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 214 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
193 | 215 |
194 void onReset() SK_OVERRIDE; | 216 void onReset() SK_OVERRIDE; |
195 void onFlush() SK_OVERRIDE; | 217 void onFlush() SK_OVERRIDE; |
196 | 218 |
197 // overrides from GrDrawTarget | 219 // overrides from GrDrawTarget |
198 void onDraw(const GrDrawState&, | 220 void onDraw(const GrDrawState&, |
199 const GrGeometryProcessor*, | 221 const GrGeometryProcessor*, |
200 const DrawInfo&, | 222 const DrawInfo&, |
201 const GrScissorState&, | 223 const GrScissorState&, |
202 const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE; | 224 const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE; |
225 void onBatchDraw(GrBatch*, | |
226 const GrDrawState&, | |
227 GrPrimitiveType type, | |
228 const GrScissorState&, | |
229 const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE; | |
203 void onDrawRect(GrDrawState*, | 230 void onDrawRect(GrDrawState*, |
204 GrColor, | 231 GrColor, |
205 const SkMatrix& viewMatrix, | 232 const SkMatrix& viewMatrix, |
206 const SkRect& rect, | 233 const SkRect& rect, |
207 const SkRect* localRect, | 234 const SkRect* localRect, |
208 const SkMatrix* localMatrix) SK_OVERRIDE; | 235 const SkMatrix* localMatrix) SK_OVERRIDE; |
209 | 236 |
210 void onStencilPath(const GrDrawState&, | 237 void onStencilPath(const GrDrawState&, |
211 const GrPathProcessor*, | 238 const GrPathProcessor*, |
212 const GrPath*, | 239 const GrPath*, |
(...skipping 25 matching lines...) Expand all Loading... | |
238 const SkIRect& srcRect, | 265 const SkIRect& srcRect, |
239 const SkIPoint& dstPoint) SK_OVERRIDE; | 266 const SkIPoint& dstPoint) SK_OVERRIDE; |
240 | 267 |
241 // Attempts to concat instances from info onto the previous draw. info must represent an | 268 // Attempts to concat instances from info onto the previous draw. info must represent an |
242 // instanced draw. The caller must have already recorded a new draw state an d clip if necessary. | 269 // instanced draw. The caller must have already recorded a new draw state an d clip if necessary. |
243 int concatInstancedDraw(const GrDrawState&, const DrawInfo&); | 270 int concatInstancedDraw(const GrDrawState&, const DrawInfo&); |
244 | 271 |
245 // Determines whether the current draw operation requires a new GrOptDrawSta te and if so | 272 // Determines whether the current draw operation requires a new GrOptDrawSta te and if so |
246 // records it. If the draw can be skipped false is returned and no new GrOpt DrawState is | 273 // records it. If the draw can be skipped false is returned and no new GrOpt DrawState is |
247 // recorded. | 274 // recorded. |
275 // TODO delete the primproc variant when we have batches everywhere | |
248 bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(const GrDrawState&, | 276 bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(const GrDrawState&, |
249 const GrPrimitiveProcess or*, | 277 const GrPrimitiveProcess or*, |
250 GrGpu::DrawType, | 278 GrGpu::DrawType, |
251 const GrScissorState&, | 279 const GrScissorState&, |
252 const GrDeviceCoordTextu re*); | 280 const GrDeviceCoordTextu re*); |
281 bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(GrBatch*, | |
282 const GrDrawState&, | |
283 GrGpu::DrawType, | |
284 const GrScissorState&, | |
285 const GrDeviceCoordTextu re*); | |
286 bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(SetState*); | |
287 | |
288 | |
253 // We lazily record clip changes in order to skip clips that have no effect. | 289 // We lazily record clip changes in order to skip clips that have no effect. |
254 void recordClipIfNecessary(); | 290 void recordClipIfNecessary(); |
255 // Records any trace markers for a command after adding it to the buffer. | 291 // Records any trace markers for a command after adding it to the buffer. |
256 void recordTraceMarkersIfNecessary(); | 292 void recordTraceMarkersIfNecessary(); |
257 | 293 |
258 bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; } | 294 bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; } |
259 | 295 |
260 // TODO: Use a single allocator for commands and records | 296 // TODO: Use a single allocator for commands and records |
261 enum { | 297 enum { |
262 kCmdBufferInitialSizeInBytes = 8 * 1024, | 298 kCmdBufferInitialSizeInBytes = 8 * 1024, |
263 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's | 299 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's |
264 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms | 300 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms |
265 }; | 301 }; |
266 | 302 |
267 CmdBuffer fCmdBuffer; | 303 CmdBuffer fCmdBuffer; |
268 GrOptDrawState* fPrevState; | 304 GrOptDrawState* fPrevState; |
269 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; | 305 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; |
270 SkTDArray<char> fPathIndexBuffer; | 306 SkTDArray<char> fPathIndexBuffer; |
271 SkTDArray<float> fPathTransformBuffer; | 307 SkTDArray<float> fPathTransformBuffer; |
272 uint32_t fDrawID; | 308 uint32_t fDrawID; |
273 | 309 |
274 typedef GrFlushToGpuDrawTarget INHERITED; | 310 typedef GrFlushToGpuDrawTarget INHERITED; |
275 }; | 311 }; |
276 | 312 |
277 #endif | 313 #endif |
OLD | NEW |