OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef GrInOrderDrawBuffer_DEFINED | 8 #ifndef GrInOrderDrawBuffer_DEFINED |
9 #define GrInOrderDrawBuffer_DEFINED | 9 #define GrInOrderDrawBuffer_DEFINED |
10 | 10 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
48 * the vertex source is either reserved or array. | 48 * the vertex source is either reserved or array. |
49 * @param indexPool pool where indices for queued draws will be saved when | 49 * @param indexPool pool where indices for queued draws will be saved when |
50 * the index source is either reserved or array. | 50 * the index source is either reserved or array. |
51 */ | 51 */ |
52 GrInOrderDrawBuffer(GrGpu* gpu, | 52 GrInOrderDrawBuffer(GrGpu* gpu, |
53 GrVertexBufferAllocPool* vertexPool, | 53 GrVertexBufferAllocPool* vertexPool, |
54 GrIndexBufferAllocPool* indexPool); | 54 GrIndexBufferAllocPool* indexPool); |
55 | 55 |
56 virtual ~GrInOrderDrawBuffer(); | 56 virtual ~GrInOrderDrawBuffer(); |
57 | 57 |
58 GrGpu* dstGpu() const { return fDstGpu; } | |
bsalomon
2014/11/13 17:36:05
Does this need to be public? Otherwise looks good.
Chris Dalton
2014/11/13 19:05:43
Done.
| |
59 const uint32_t* pathIndexBuffer() const { return fPathIndexBuffer.begin(); } | |
60 const float* pathTransformBuffer() const { return fPathTransformBuffer.begin (); } | |
61 | |
58 /** | 62 /** |
59 * Empties the draw buffer of any queued up draws. This must not be called w hile inside an | 63 * Empties the draw buffer of any queued up draws. This must not be called w hile inside an |
60 * unbalanced pushGeometrySource(). The current draw state and clip are pres erved. | 64 * unbalanced pushGeometrySource(). The current draw state and clip are pres erved. |
61 */ | 65 */ |
62 void reset(); | 66 void reset(); |
63 | 67 |
64 /** | 68 /** |
65 * This plays the queued up draws to its GrGpu target. It also resets this o bject (i.e. flushing | 69 * This plays the queued up draws to its GrGpu target. It also resets this o bject (i.e. flushing |
66 * is destructive). This buffer must not have an active reserved vertex or i ndex source. Any | 70 * is destructive). This buffer must not have an active reserved vertex or i ndex source. Any |
67 * reserved geometry on the target will be finalized because it's geometry s ource will be pushed | 71 * reserved geometry on the target will be finalized because it's geometry s ource will be pushed |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
103 kClear_Cmd = 4, | 107 kClear_Cmd = 4, |
104 kCopySurface_Cmd = 5, | 108 kCopySurface_Cmd = 5, |
105 kDrawPath_Cmd = 6, | 109 kDrawPath_Cmd = 6, |
106 kDrawPaths_Cmd = 7, | 110 kDrawPaths_Cmd = 7, |
107 }; | 111 }; |
108 | 112 |
109 struct Cmd : ::SkNoncopyable { | 113 struct Cmd : ::SkNoncopyable { |
110 Cmd(uint8_t type) : fType(type) {} | 114 Cmd(uint8_t type) : fType(type) {} |
111 virtual ~Cmd() {} | 115 virtual ~Cmd() {} |
112 | 116 |
113 virtual void execute(GrGpu*, const GrOptDrawState*) = 0; | 117 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*) = 0; |
114 | 118 |
115 uint8_t fType; | 119 uint8_t fType; |
116 }; | 120 }; |
117 | 121 |
118 struct Draw : public Cmd { | 122 struct Draw : public Cmd { |
119 Draw(const DrawInfo& info, | 123 Draw(const DrawInfo& info, |
120 const ScissorState& scissorState, | 124 const ScissorState& scissorState, |
121 const GrVertexBuffer* vb, | 125 const GrVertexBuffer* vb, |
122 const GrIndexBuffer* ib) | 126 const GrIndexBuffer* ib) |
123 : Cmd(kDraw_Cmd) | 127 : Cmd(kDraw_Cmd) |
124 , fInfo(info) | 128 , fInfo(info) |
125 , fScissorState(scissorState) | 129 , fScissorState(scissorState) |
126 , fVertexBuffer(vb) | 130 , fVertexBuffer(vb) |
127 , fIndexBuffer(ib) {} | 131 , fIndexBuffer(ib) {} |
128 | 132 |
129 const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); } | 133 const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); } |
130 const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); } | 134 const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); } |
131 | 135 |
132 virtual void execute(GrGpu*, const GrOptDrawState*); | 136 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
133 | 137 |
134 DrawInfo fInfo; | 138 DrawInfo fInfo; |
135 ScissorState fScissorState; | 139 ScissorState fScissorState; |
136 | 140 |
137 private: | 141 private: |
138 GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuff er; | 142 GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuff er; |
139 GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffe r; | 143 GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffe r; |
140 }; | 144 }; |
141 | 145 |
142 struct StencilPath : public Cmd { | 146 struct StencilPath : public Cmd { |
143 StencilPath(const GrPath* path) : Cmd(kStencilPath_Cmd), fPath(path) {} | 147 StencilPath(const GrPath* path) : Cmd(kStencilPath_Cmd), fPath(path) {} |
144 | 148 |
145 const GrPath* path() const { return fPath.get(); } | 149 const GrPath* path() const { return fPath.get(); } |
146 | 150 |
147 virtual void execute(GrGpu*, const GrOptDrawState*); | 151 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
148 | 152 |
149 ScissorState fScissorState; | 153 ScissorState fScissorState; |
150 GrStencilSettings fStencilSettings; | 154 GrStencilSettings fStencilSettings; |
151 | 155 |
152 private: | 156 private: |
153 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 157 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
154 }; | 158 }; |
155 | 159 |
156 struct DrawPath : public Cmd { | 160 struct DrawPath : public Cmd { |
157 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} | 161 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} |
158 | 162 |
159 const GrPath* path() const { return fPath.get(); } | 163 const GrPath* path() const { return fPath.get(); } |
160 | 164 |
161 virtual void execute(GrGpu*, const GrOptDrawState*); | 165 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
162 | 166 |
163 GrDeviceCoordTexture fDstCopy; | 167 GrDeviceCoordTexture fDstCopy; |
164 ScissorState fScissorState; | 168 ScissorState fScissorState; |
165 GrStencilSettings fStencilSettings; | 169 GrStencilSettings fStencilSettings; |
166 | 170 |
167 private: | 171 private: |
168 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 172 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
169 }; | 173 }; |
170 | 174 |
171 struct DrawPaths : public Cmd { | 175 struct DrawPaths : public Cmd { |
172 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang e(pathRange) {} | 176 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang e(pathRange) {} |
173 | 177 |
174 const GrPathRange* pathRange() const { return fPathRange.get(); } | 178 const GrPathRange* pathRange() const { return fPathRange.get(); } |
175 uint32_t* indices() { return reinterpret_cast<uint32_t*>(CmdBuffer::GetD ataForItem(this)); } | |
176 float* transforms() { return reinterpret_cast<float*>(&this->indices()[f Count]); } | |
177 | 179 |
178 virtual void execute(GrGpu*, const GrOptDrawState*); | 180 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
179 | 181 |
182 int fIndicesLocation; | |
180 size_t fCount; | 183 size_t fCount; |
184 int fTransformsLocation; | |
181 PathTransformType fTransformsType; | 185 PathTransformType fTransformsType; |
182 GrDeviceCoordTexture fDstCopy; | 186 GrDeviceCoordTexture fDstCopy; |
183 ScissorState fScissorState; | 187 ScissorState fScissorState; |
184 GrStencilSettings fStencilSettings; | 188 GrStencilSettings fStencilSettings; |
185 | 189 |
186 private: | 190 private: |
187 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; | 191 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; |
188 }; | 192 }; |
189 | 193 |
190 // This is also used to record a discard by setting the color to GrColor_ILL EGAL | 194 // This is also used to record a discard by setting the color to GrColor_ILL EGAL |
191 struct Clear : public Cmd { | 195 struct Clear : public Cmd { |
192 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} | 196 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} |
193 | 197 |
194 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 198 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
195 | 199 |
196 virtual void execute(GrGpu*, const GrOptDrawState*); | 200 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
197 | 201 |
198 SkIRect fRect; | 202 SkIRect fRect; |
199 GrColor fColor; | 203 GrColor fColor; |
200 bool fCanIgnoreRect; | 204 bool fCanIgnoreRect; |
201 | 205 |
202 private: | 206 private: |
203 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 207 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
204 }; | 208 }; |
205 | 209 |
206 // This command is ONLY used by the clip mask manager to clear the stencil c lip bits | 210 // This command is ONLY used by the clip mask manager to clear the stencil c lip bits |
207 struct ClearStencilClip : public Cmd { | 211 struct ClearStencilClip : public Cmd { |
208 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt ) {} | 212 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt ) {} |
209 | 213 |
210 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 214 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
211 | 215 |
212 virtual void execute(GrGpu*, const GrOptDrawState*); | 216 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
213 | 217 |
214 SkIRect fRect; | 218 SkIRect fRect; |
215 bool fInsideClip; | 219 bool fInsideClip; |
216 | 220 |
217 private: | 221 private: |
218 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 222 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
219 }; | 223 }; |
220 | 224 |
221 struct CopySurface : public Cmd { | 225 struct CopySurface : public Cmd { |
222 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs t(dst), fSrc(src) {} | 226 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs t(dst), fSrc(src) {} |
223 | 227 |
224 GrSurface* dst() const { return fDst.get(); } | 228 GrSurface* dst() const { return fDst.get(); } |
225 GrSurface* src() const { return fSrc.get(); } | 229 GrSurface* src() const { return fSrc.get(); } |
226 | 230 |
227 virtual void execute(GrGpu*, const GrOptDrawState*); | 231 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
228 | 232 |
229 SkIPoint fDstPoint; | 233 SkIPoint fDstPoint; |
230 SkIRect fSrcRect; | 234 SkIRect fSrcRect; |
231 | 235 |
232 private: | 236 private: |
233 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; | 237 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; |
234 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; | 238 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; |
235 }; | 239 }; |
236 | 240 |
237 struct SetState : public Cmd { | 241 struct SetState : public Cmd { |
238 SetState(const GrDrawState& state) : Cmd(kSetState_Cmd), fState(state) { } | 242 SetState(const GrDrawState& state) : Cmd(kSetState_Cmd), fState(state) { } |
239 | 243 |
240 virtual void execute(GrGpu*, const GrOptDrawState*); | 244 virtual void execute(GrInOrderDrawBuffer*, const GrOptDrawState*); |
241 | 245 |
242 GrDrawState fState; | 246 GrDrawState fState; |
243 GrGpu::DrawType fDrawType; | 247 GrGpu::DrawType fDrawType; |
244 GrDeviceCoordTexture fDstCopy; | 248 GrDeviceCoordTexture fDstCopy; |
245 }; | 249 }; |
246 | 250 |
247 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. | 251 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. |
248 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 252 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
249 | 253 |
250 // overrides from GrDrawTarget | 254 // overrides from GrDrawTarget |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
294 // We lazily record clip changes in order to skip clips that have no effect. | 298 // We lazily record clip changes in order to skip clips that have no effect. |
295 void recordClipIfNecessary(); | 299 void recordClipIfNecessary(); |
296 // Records any trace markers for a command after adding it to the buffer. | 300 // Records any trace markers for a command after adding it to the buffer. |
297 void recordTraceMarkersIfNecessary(); | 301 void recordTraceMarkersIfNecessary(); |
298 | 302 |
299 virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; } | 303 virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; } |
300 | 304 |
301 // TODO: Use a single allocator for commands and records | 305 // TODO: Use a single allocator for commands and records |
302 enum { | 306 enum { |
303 kCmdBufferInitialSizeInBytes = 8 * 1024, | 307 kCmdBufferInitialSizeInBytes = 8 * 1024, |
308 kPathIdxBufferMinReserve = 64, | |
309 kPathXformBufferMinReserve = 2 * kPathIdxBufferMinReserve, | |
304 kGeoPoolStatePreAllocCnt = 4, | 310 kGeoPoolStatePreAllocCnt = 4, |
305 }; | 311 }; |
306 | 312 |
307 CmdBuffer fCmdBuffer; | 313 CmdBuffer fCmdBuffer; |
308 GrDrawState* fLastState; | 314 GrDrawState* fLastState; |
309 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; | 315 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; |
310 GrGpu* fDstGpu; | 316 GrGpu* fDstGpu; |
311 GrVertexBufferAllocPool& fVertexPool; | 317 GrVertexBufferAllocPool& fVertexPool; |
312 GrIndexBufferAllocPool& fIndexPool; | 318 GrIndexBufferAllocPool& fIndexPool; |
319 SkTDArray<uint32_t> fPathIndexBuffer; | |
320 SkTDArray<float> fPathTransformBuffer; | |
313 | 321 |
314 struct GeometryPoolState { | 322 struct GeometryPoolState { |
315 const GrVertexBuffer* fPoolVertexBuffer; | 323 const GrVertexBuffer* fPoolVertexBuffer; |
316 int fPoolStartVertex; | 324 int fPoolStartVertex; |
317 const GrIndexBuffer* fPoolIndexBuffer; | 325 const GrIndexBuffer* fPoolIndexBuffer; |
318 int fPoolStartIndex; | 326 int fPoolStartIndex; |
319 // caller may conservatively over reserve vertices / indices. | 327 // caller may conservatively over reserve vertices / indices. |
320 // we release unused space back to allocator if possible | 328 // we release unused space back to allocator if possible |
321 // can only do this if there isn't an intervening pushGeometrySource() | 329 // can only do this if there isn't an intervening pushGeometrySource() |
322 size_t fUsedPoolVertexBytes; | 330 size_t fUsedPoolVertexBytes; |
323 size_t fUsedPoolIndexBytes; | 331 size_t fUsedPoolIndexBytes; |
324 }; | 332 }; |
325 | 333 |
326 typedef SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> GeoPoolStateS tack; | 334 typedef SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> GeoPoolStateS tack; |
327 | 335 |
328 GeoPoolStateStack fGeoPoolStateStack; | 336 GeoPoolStateStack fGeoPoolStateStack; |
329 bool fFlushing; | 337 bool fFlushing; |
330 uint32_t fDrawID; | 338 uint32_t fDrawID; |
331 | 339 |
332 typedef GrClipTarget INHERITED; | 340 typedef GrClipTarget INHERITED; |
333 }; | 341 }; |
334 | 342 |
335 #endif | 343 #endif |
OLD | NEW |