OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef GrTargetCommands_DEFINED | 8 #ifndef GrTargetCommands_DEFINED |
9 #define GrTargetCommands_DEFINED | 9 #define GrTargetCommands_DEFINED |
10 | 10 |
11 #include "GrBatch.h" | 11 #include "GrBatch.h" |
12 #include "GrBatchTarget.h" | 12 #include "GrBatchTarget.h" |
13 #include "GrDrawTarget.h" | 13 #include "GrDrawTarget.h" |
14 #include "GrGpu.h" | 14 #include "GrGpu.h" |
15 #include "GrPath.h" | 15 #include "GrPath.h" |
16 #include "GrPendingProgramElement.h" | 16 #include "GrPendingProgramElement.h" |
17 #include "GrRenderTarget.h" | 17 #include "GrRenderTarget.h" |
18 #include "GrTRecorder.h" | 18 #include "GrTRecorder.h" |
19 #include "SkRect.h" | 19 #include "SkRect.h" |
20 #include "SkTypes.h" | 20 #include "SkTypes.h" |
21 | 21 |
22 class GrInOrderDrawBuffer; | 22 class GrInOrderDrawBuffer; |
23 class GrVertexBufferAllocPool; | 23 class GrVertexBufferAllocPool; |
24 class GrIndexBufferAllocPool; | 24 class GrIndexBufferAllocPool; |
25 | 25 |
26 class GrTargetCommands : ::SkNoncopyable { | 26 class GrTargetCommands : ::SkNoncopyable { |
| 27 struct State; |
27 struct SetState; | 28 struct SetState; |
28 | 29 |
29 public: | 30 public: |
30 GrTargetCommands(GrGpu* gpu, | 31 GrTargetCommands(GrGpu* gpu, |
31 GrVertexBufferAllocPool* vertexPool, | 32 GrVertexBufferAllocPool* vertexPool, |
32 GrIndexBufferAllocPool* indexPool) | 33 GrIndexBufferAllocPool* indexPool) |
33 : fCmdBuffer(kCmdBufferInitialSizeInBytes) | 34 : fCmdBuffer(kCmdBufferInitialSizeInBytes) |
34 , fPrevState(NULL) | |
35 , fBatchTarget(gpu, vertexPool, indexPool) { | 35 , fBatchTarget(gpu, vertexPool, indexPool) { |
36 } | 36 } |
37 | 37 |
38 class Cmd : ::SkNoncopyable { | 38 class Cmd : ::SkNoncopyable { |
39 public: | 39 public: |
40 enum CmdType { | 40 enum CmdType { |
41 kStencilPath_CmdType = 1, | 41 kStencilPath_CmdType = 1, |
42 kSetState_CmdType = 2, | 42 kSetState_CmdType = 2, |
43 kClear_CmdType = 3, | 43 kClear_CmdType = 3, |
44 kCopySurface_CmdType = 4, | 44 kCopySurface_CmdType = 4, |
45 kDrawPath_CmdType = 5, | 45 kDrawPath_CmdType = 5, |
46 kDrawPaths_CmdType = 6, | 46 kDrawPaths_CmdType = 6, |
47 kDrawBatch_CmdType = 7, | 47 kDrawBatch_CmdType = 7, |
48 kXferBarrier_CmdType = 8, | 48 kXferBarrier_CmdType = 8, |
49 }; | 49 }; |
50 | 50 |
51 Cmd(CmdType type) : fMarkerID(-1), fType(type) {} | 51 Cmd(CmdType type) : fMarkerID(-1), fType(type) {} |
52 virtual ~Cmd() {} | 52 virtual ~Cmd() {} |
53 | 53 |
54 virtual void execute(GrGpu*, const SetState*) = 0; | 54 virtual void execute(GrGpu*) = 0; |
55 | 55 |
56 CmdType type() const { return fType; } | 56 CmdType type() const { return fType; } |
57 | 57 |
58 // trace markers | 58 // trace markers |
59 bool isTraced() const { return -1 != fMarkerID; } | 59 bool isTraced() const { return -1 != fMarkerID; } |
60 void setMarkerID(int markerID) { SkASSERT(-1 == fMarkerID); fMarkerID =
markerID; } | 60 void setMarkerID(int markerID) { SkASSERT(-1 == fMarkerID); fMarkerID =
markerID; } |
61 int markerID() const { return fMarkerID; } | 61 int markerID() const { return fMarkerID; } |
62 | 62 |
63 private: | 63 private: |
64 int fMarkerID; | 64 int fMarkerID; |
65 CmdType fType; | 65 CmdType fType; |
66 }; | 66 }; |
67 | 67 |
68 void reset(); | 68 void reset(); |
69 void flush(GrInOrderDrawBuffer*); | 69 void flush(GrInOrderDrawBuffer*); |
70 | 70 |
71 Cmd* recordClearStencilClip(GrInOrderDrawBuffer*, | 71 Cmd* recordClearStencilClip(const SkIRect& rect, |
72 const SkIRect& rect, | |
73 bool insideClip, | 72 bool insideClip, |
74 GrRenderTarget* renderTarget); | 73 GrRenderTarget* renderTarget); |
75 | 74 |
76 Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*); | 75 Cmd* recordDiscard(GrRenderTarget*); |
77 | 76 Cmd* recordDrawBatch(State*, GrBatch*); |
78 Cmd* recordDraw(GrInOrderDrawBuffer*, | 77 Cmd* recordStencilPath(const GrPipelineBuilder&, |
79 const GrGeometryProcessor*, | |
80 const GrDrawTarget::DrawInfo&, | |
81 const GrDrawTarget::PipelineInfo&); | |
82 Cmd* recordDrawBatch(GrInOrderDrawBuffer*, | |
83 GrBatch*, | |
84 const GrDrawTarget::PipelineInfo&); | |
85 Cmd* recordStencilPath(GrInOrderDrawBuffer*, | |
86 const GrPipelineBuilder&, | |
87 const GrPathProcessor*, | 78 const GrPathProcessor*, |
88 const GrPath*, | 79 const GrPath*, |
89 const GrScissorState&, | 80 const GrScissorState&, |
90 const GrStencilSettings&); | 81 const GrStencilSettings&); |
91 Cmd* recordDrawPath(GrInOrderDrawBuffer*, | 82 Cmd* recordDrawPath(State*, |
92 const GrPathProcessor*, | 83 const GrPathProcessor*, |
93 const GrPath*, | 84 const GrPath*, |
94 const GrStencilSettings&, | 85 const GrStencilSettings&); |
95 const GrDrawTarget::PipelineInfo&); | 86 Cmd* recordDrawPaths(State*, |
96 Cmd* recordDrawPaths(GrInOrderDrawBuffer*, | 87 GrInOrderDrawBuffer*, |
97 const GrPathProcessor*, | 88 const GrPathProcessor*, |
98 const GrPathRange*, | 89 const GrPathRange*, |
99 const void*, | 90 const void*, |
100 GrDrawTarget::PathIndexType, | 91 GrDrawTarget::PathIndexType, |
101 const float transformValues[], | 92 const float transformValues[], |
102 GrDrawTarget::PathTransformType , | 93 GrDrawTarget::PathTransformType , |
103 int, | 94 int, |
104 const GrStencilSettings&, | 95 const GrStencilSettings&, |
105 const GrDrawTarget::PipelineInfo&); | 96 const GrDrawTarget::PipelineInfo&); |
106 Cmd* recordClear(GrInOrderDrawBuffer*, | 97 Cmd* recordClear(const SkIRect* rect, |
107 const SkIRect* rect, | |
108 GrColor, | 98 GrColor, |
109 bool canIgnoreRect, | 99 bool canIgnoreRect, |
110 GrRenderTarget*); | 100 GrRenderTarget*); |
111 Cmd* recordCopySurface(GrSurface* dst, | 101 Cmd* recordCopySurface(GrSurface* dst, |
112 GrSurface* src, | 102 GrSurface* src, |
113 const SkIRect& srcRect, | 103 const SkIRect& srcRect, |
114 const SkIPoint& dstPoint); | 104 const SkIPoint& dstPoint); |
115 | 105 |
116 private: | 106 private: |
117 friend class GrInOrderDrawBuffer; | 107 friend class GrInOrderDrawBuffer; |
118 | 108 |
119 typedef GrGpu::DrawArgs DrawArgs; | 109 typedef GrGpu::DrawArgs DrawArgs; |
120 | 110 |
121 // Attempts to concat instances from info onto the previous draw. info must
represent an | 111 void recordXferBarrierIfNecessary(const GrPipeline&, GrInOrderDrawBuffer*); |
122 // instanced draw. The caller must have already recorded a new draw state an
d clip if necessary. | |
123 int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&)
; | |
124 | 112 |
125 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, | 113 // TODO: This can be just a pipeline once paths are in batch, and it should
live elsewhere |
126 const GrPrimitiveProce
ssor*, | 114 struct State : public SkNVRefCnt<State> { |
127 const GrDrawTarget::Pi
pelineInfo&); | 115 // TODO get rid of the prim proc parameter when we use batch everywhere |
128 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, | 116 State(const GrPrimitiveProcessor* primProc = NULL) |
129 GrBatch*, | 117 : fPrimitiveProcessor(primProc) |
130 const GrDrawTarget::Pi
pelineInfo&); | 118 , fCompiled(false) {} |
131 | 119 |
132 void recordXferBarrierIfNecessary(GrInOrderDrawBuffer*, const GrDrawTarget::
PipelineInfo&); | 120 ~State() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline()
; } |
| 121 |
| 122 // This function is only for getting the location in memory where we wil
l create our |
| 123 // pipeline object. |
| 124 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } |
| 125 |
| 126 const GrPipeline* getPipeline() const { |
| 127 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); |
| 128 } |
| 129 GrRenderTarget* getRenderTarget() const { |
| 130 return this->getPipeline()->getRenderTarget(); |
| 131 } |
| 132 const GrXferProcessor* getXferProcessor() const { |
| 133 return this->getPipeline()->getXferProcessor(); |
| 134 } |
| 135 |
| 136 void operator delete(void* p) { |
| 137 //SkDebugf("destruction\n"); |
| 138 } |
| 139 void* operator new(size_t) { |
| 140 SkFAIL("All States are created by placement new."); |
| 141 return sk_malloc_throw(0); |
| 142 } |
| 143 |
| 144 void* operator new(size_t, void* p) { return p; } |
| 145 void operator delete(void* target, void* placement) { |
| 146 ::operator delete(target, placement); |
| 147 } |
| 148 |
| 149 |
| 150 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; |
| 151 ProgramPrimitiveProcessor fPrimitiveProcessor; |
| 152 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; |
| 153 GrProgramDesc fDesc; |
| 154 GrBatchTracker fBatchTracker; |
| 155 bool fCompiled; |
| 156 }; |
133 | 157 |
134 struct StencilPath : public Cmd { | 158 struct StencilPath : public Cmd { |
135 StencilPath(const GrPath* path, GrRenderTarget* rt) | 159 StencilPath(const GrPath* path, GrRenderTarget* rt) |
136 : Cmd(kStencilPath_CmdType) | 160 : Cmd(kStencilPath_CmdType) |
137 , fRenderTarget(rt) | 161 , fRenderTarget(rt) |
138 , fPath(path) {} | 162 , fPath(path) {} |
139 | 163 |
140 const GrPath* path() const { return fPath.get(); } | 164 const GrPath* path() const { return fPath.get(); } |
141 | 165 |
142 void execute(GrGpu*, const SetState*) override; | 166 void execute(GrGpu*) override; |
143 | 167 |
144 SkMatrix fViewMatrix; | 168 SkMatrix fViewMatrix; |
145 bool fUseHWAA; | 169 bool fUseHWAA; |
146 GrStencilSettings fStencil; | 170 GrStencilSettings fStencil; |
147 GrScissorState fScissor; | 171 GrScissorState fScissor; |
148 private: | 172 private: |
149 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 173 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
150 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 174 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
151 }; | 175 }; |
152 | 176 |
153 struct DrawPath : public Cmd { | 177 struct DrawPath : public Cmd { |
154 DrawPath(const GrPath* path) : Cmd(kDrawPath_CmdType), fPath(path) {} | 178 DrawPath(State* state, const GrPath* path) |
| 179 : Cmd(kDrawPath_CmdType) |
| 180 , fState(SkRef(state)) |
| 181 , fPath(path) {} |
155 | 182 |
156 const GrPath* path() const { return fPath.get(); } | 183 const GrPath* path() const { return fPath.get(); } |
157 | 184 |
158 void execute(GrGpu*, const SetState*) override; | 185 void execute(GrGpu*) override; |
159 | 186 |
| 187 SkAutoTUnref<State> fState; |
160 GrStencilSettings fStencilSettings; | 188 GrStencilSettings fStencilSettings; |
161 | |
162 private: | 189 private: |
163 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 190 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
164 }; | 191 }; |
165 | 192 |
166 struct DrawPaths : public Cmd { | 193 struct DrawPaths : public Cmd { |
167 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_CmdType), fPath
Range(pathRange) {} | 194 DrawPaths(State* state, const GrPathRange* pathRange) |
| 195 : Cmd(kDrawPaths_CmdType) |
| 196 , fState(SkRef(state)) |
| 197 , fPathRange(pathRange) {} |
168 | 198 |
169 const GrPathRange* pathRange() const { return fPathRange.get(); } | 199 const GrPathRange* pathRange() const { return fPathRange.get(); } |
170 | 200 |
171 void execute(GrGpu*, const SetState*) override; | 201 void execute(GrGpu*) override; |
172 | 202 |
| 203 SkAutoTUnref<State> fState; |
173 char* fIndices; | 204 char* fIndices; |
174 GrDrawTarget::PathIndexType fIndexType; | 205 GrDrawTarget::PathIndexType fIndexType; |
175 float* fTransforms; | 206 float* fTransforms; |
176 GrDrawTarget::PathTransformType fTransformType; | 207 GrDrawTarget::PathTransformType fTransformType; |
177 int fCount; | 208 int fCount; |
178 GrStencilSettings fStencilSettings; | 209 GrStencilSettings fStencilSettings; |
179 | 210 |
180 private: | 211 private: |
181 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; | 212 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; |
182 }; | 213 }; |
183 | 214 |
184 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL | 215 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL |
185 struct Clear : public Cmd { | 216 struct Clear : public Cmd { |
186 Clear(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarget(rt) {} | 217 Clear(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarget(rt) {} |
187 | 218 |
188 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 219 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
189 | 220 |
190 void execute(GrGpu*, const SetState*) override; | 221 void execute(GrGpu*) override; |
191 | 222 |
192 SkIRect fRect; | 223 SkIRect fRect; |
193 GrColor fColor; | 224 GrColor fColor; |
194 bool fCanIgnoreRect; | 225 bool fCanIgnoreRect; |
195 | 226 |
196 private: | 227 private: |
197 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 228 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
198 }; | 229 }; |
199 | 230 |
200 // This command is ONLY used by the clip mask manager to clear the stencil c
lip bits | 231 // This command is ONLY used by the clip mask manager to clear the stencil c
lip bits |
201 struct ClearStencilClip : public Cmd { | 232 struct ClearStencilClip : public Cmd { |
202 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarge
t(rt) {} | 233 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarge
t(rt) {} |
203 | 234 |
204 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 235 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
205 | 236 |
206 void execute(GrGpu*, const SetState*) override; | 237 void execute(GrGpu*) override; |
207 | 238 |
208 SkIRect fRect; | 239 SkIRect fRect; |
209 bool fInsideClip; | 240 bool fInsideClip; |
210 | 241 |
211 private: | 242 private: |
212 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 243 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
213 }; | 244 }; |
214 | 245 |
215 struct CopySurface : public Cmd { | 246 struct CopySurface : public Cmd { |
216 CopySurface(GrSurface* dst, GrSurface* src) | 247 CopySurface(GrSurface* dst, GrSurface* src) |
217 : Cmd(kCopySurface_CmdType) | 248 : Cmd(kCopySurface_CmdType) |
218 , fDst(dst) | 249 , fDst(dst) |
219 , fSrc(src) { | 250 , fSrc(src) { |
220 } | 251 } |
221 | 252 |
222 GrSurface* dst() const { return fDst.get(); } | 253 GrSurface* dst() const { return fDst.get(); } |
223 GrSurface* src() const { return fSrc.get(); } | 254 GrSurface* src() const { return fSrc.get(); } |
224 | 255 |
225 void execute(GrGpu*, const SetState*) override; | 256 void execute(GrGpu*) override; |
226 | 257 |
227 SkIPoint fDstPoint; | 258 SkIPoint fDstPoint; |
228 SkIRect fSrcRect; | 259 SkIRect fSrcRect; |
229 | 260 |
230 private: | 261 private: |
231 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; | 262 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; |
232 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; | 263 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; |
233 }; | 264 }; |
234 | 265 |
235 // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed | |
236 struct SetState : public Cmd { | |
237 // TODO get rid of the prim proc parameter when we use batch everywhere | |
238 SetState(const GrPrimitiveProcessor* primProc = NULL) | |
239 : Cmd(kSetState_CmdType) | |
240 , fPrimitiveProcessor(primProc) {} | |
241 | |
242 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipelin
e(); } | |
243 | |
244 // This function is only for getting the location in memory where we wil
l create our | |
245 // pipeline object. | |
246 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } | |
247 | |
248 const GrPipeline* getPipeline() const { | |
249 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); | |
250 } | |
251 GrRenderTarget* getRenderTarget() const { | |
252 return this->getPipeline()->getRenderTarget(); | |
253 } | |
254 const GrXferProcessor* getXferProcessor() const { | |
255 return this->getPipeline()->getXferProcessor(); | |
256 } | |
257 | |
258 void execute(GrGpu*, const SetState*) override; | |
259 | |
260 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; | |
261 ProgramPrimitiveProcessor fPrimitiveProcessor; | |
262 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; | |
263 GrProgramDesc fDesc; | |
264 GrBatchTracker fBatchTracker; | |
265 }; | |
266 | |
267 struct DrawBatch : public Cmd { | 266 struct DrawBatch : public Cmd { |
268 DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget) | 267 DrawBatch(State* state, GrBatch* batch, GrBatchTarget* batchTarget) |
269 : Cmd(kDrawBatch_CmdType) | 268 : Cmd(kDrawBatch_CmdType) |
| 269 , fState(SkRef(state)) |
270 , fBatch(SkRef(batch)) | 270 , fBatch(SkRef(batch)) |
271 , fBatchTarget(batchTarget) { | 271 , fBatchTarget(batchTarget) { |
272 SkASSERT(!batch->isUsed()); | 272 SkASSERT(!batch->isUsed()); |
273 } | 273 } |
274 | 274 |
275 void execute(GrGpu*, const SetState*) override; | 275 void execute(GrGpu*) override; |
276 | 276 |
277 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe
r | 277 SkAutoTUnref<State> fState; |
278 SkAutoTUnref<GrBatch> fBatch; | 278 SkAutoTUnref<GrBatch> fBatch; |
279 | 279 |
280 private: | 280 private: |
281 GrBatchTarget* fBatchTarget; | 281 GrBatchTarget* fBatchTarget; |
282 }; | 282 }; |
283 | 283 |
284 struct XferBarrier : public Cmd { | 284 struct XferBarrier : public Cmd { |
285 XferBarrier() : Cmd(kXferBarrier_CmdType) {} | 285 XferBarrier() : Cmd(kXferBarrier_CmdType) {} |
286 | 286 |
287 void execute(GrGpu*, const SetState*) override; | 287 void execute(GrGpu*) override; |
288 | 288 |
289 GrXferBarrierType fBarrierType; | 289 GrXferBarrierType fBarrierType; |
290 }; | 290 }; |
291 | 291 |
292 static const int kCmdBufferInitialSizeInBytes = 8 * 1024; | 292 static const int kCmdBufferInitialSizeInBytes = 8 * 1024; |
293 | 293 |
294 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. | 294 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. |
295 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 295 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
296 | 296 |
297 CmdBuffer fCmdBuffer; | 297 CmdBuffer fCmdBuffer; |
298 SetState* fPrevState; | 298 GrBatchTarget fBatchTarget; |
299 GrBatchTarget fBatchTarget; | |
300 }; | 299 }; |
301 | 300 |
302 #endif | 301 #endif |
303 | 302 |
OLD | NEW |