| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrInOrderDrawBuffer_DEFINED | 8 #ifndef GrInOrderDrawBuffer_DEFINED |
| 9 #define GrInOrderDrawBuffer_DEFINED | 9 #define GrInOrderDrawBuffer_DEFINED |
| 10 | 10 |
| 11 #include "GrFlushToGpuDrawTarget.h" | 11 #include "GrFlushToGpuDrawTarget.h" |
| 12 | 12 |
| 13 #include "GrBatch.h" | 13 #include "GrBatch.h" |
| 14 #include "GrBatchTarget.h" | 14 #include "GrBatchTarget.h" |
| 15 #include "SkChunkAlloc.h" |
| 15 #include "GrPipeline.h" | 16 #include "GrPipeline.h" |
| 16 #include "GrPath.h" | 17 #include "GrPath.h" |
| 17 #include "GrTRecorder.h" | 18 #include "GrTRecorder.h" |
| 18 | 19 |
| 19 /** | 20 /** |
| 20 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws
for eventual | 21 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws
for eventual |
| 21 * playback into a GrGpu. In theory one draw buffer could playback into another.
When index or | 22 * playback into a GrGpu. In theory one draw buffer could playback into another.
When index or |
| 22 * vertex buffers are used as geometry sources it is the callers the draw buffer
only holds | 23 * vertex buffers are used as geometry sources it is the callers the draw buffer
only holds |
| 23 * references to the buffers. It is the callers responsibility to ensure that th
e data is still | 24 * references to the buffers. It is the callers responsibility to ensure that th
e data is still |
| 24 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the
caller's | 25 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the
caller's |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 71 kClear_Cmd = 4, | 72 kClear_Cmd = 4, |
| 72 kCopySurface_Cmd = 5, | 73 kCopySurface_Cmd = 5, |
| 73 kDrawPath_Cmd = 6, | 74 kDrawPath_Cmd = 6, |
| 74 kDrawPaths_Cmd = 7, | 75 kDrawPaths_Cmd = 7, |
| 75 kDrawBatch_Cmd = 8, | 76 kDrawBatch_Cmd = 8, |
| 76 }; | 77 }; |
| 77 | 78 |
| 78 Cmd(uint8_t type) : fType(type) {} | 79 Cmd(uint8_t type) : fType(type) {} |
| 79 virtual ~Cmd() {} | 80 virtual ~Cmd() {} |
| 80 | 81 |
| 81 virtual void execute(GrInOrderDrawBuffer*, const SetState*) = 0; | 82 virtual void execute(GrGpu*, const SetState*) = 0; |
| 82 | 83 |
| 83 uint8_t type() const { return fType & kCmdMask; } | 84 uint8_t type() const { return fType & kCmdMask; } |
| 84 | 85 |
| 85 bool isTraced() const { return SkToBool(fType & kTraceCmdBit); } | 86 bool isTraced() const { return SkToBool(fType & kTraceCmdBit); } |
| 86 void makeTraced() { fType |= kTraceCmdBit; } | 87 void makeTraced() { fType |= kTraceCmdBit; } |
| 87 | 88 |
| 88 private: | 89 private: |
| 89 static const int kCmdMask = 0x7F; | 90 static const int kCmdMask = 0x7F; |
| 90 static const int kTraceCmdBit = 0x80; | 91 static const int kTraceCmdBit = 0x80; |
| 91 | 92 |
| 92 uint8_t fType; | 93 uint8_t fType; |
| 93 }; | 94 }; |
| 94 | 95 |
| 95 struct Draw : public Cmd { | 96 struct Draw : public Cmd { |
| 96 Draw(const DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {} | 97 Draw(const DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {} |
| 97 | 98 |
| 98 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 99 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 99 | 100 |
| 100 DrawInfo fInfo; | 101 DrawInfo fInfo; |
| 101 }; | 102 }; |
| 102 | 103 |
| 103 struct StencilPath : public Cmd { | 104 struct StencilPath : public Cmd { |
| 104 StencilPath(const GrPath* path, GrRenderTarget* rt) | 105 StencilPath(const GrPath* path, GrRenderTarget* rt) |
| 105 : Cmd(kStencilPath_Cmd) | 106 : Cmd(kStencilPath_Cmd) |
| 106 , fRenderTarget(rt) | 107 , fRenderTarget(rt) |
| 107 , fPath(path) {} | 108 , fPath(path) {} |
| 108 | 109 |
| 109 const GrPath* path() const { return fPath.get(); } | 110 const GrPath* path() const { return fPath.get(); } |
| 110 | 111 |
| 111 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 112 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 112 | 113 |
| 113 SkMatrix fViewMatrix; | 114 SkMatrix fViewMatrix; |
| 114 bool fUseHWAA; | 115 bool fUseHWAA; |
| 115 GrStencilSettings fStencil; | 116 GrStencilSettings fStencil; |
| 116 GrScissorState fScissor; | 117 GrScissorState fScissor; |
| 117 private: | 118 private: |
| 118 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 119 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
| 119 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 120 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
| 120 }; | 121 }; |
| 121 | 122 |
| 122 struct DrawPath : public Cmd { | 123 struct DrawPath : public Cmd { |
| 123 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} | 124 DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {} |
| 124 | 125 |
| 125 const GrPath* path() const { return fPath.get(); } | 126 const GrPath* path() const { return fPath.get(); } |
| 126 | 127 |
| 127 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 128 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 128 | 129 |
| 129 GrStencilSettings fStencilSettings; | 130 GrStencilSettings fStencilSettings; |
| 130 | 131 |
| 131 private: | 132 private: |
| 132 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; | 133 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; |
| 133 }; | 134 }; |
| 134 | 135 |
| 135 struct DrawPaths : public Cmd { | 136 struct DrawPaths : public Cmd { |
| 136 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang
e(pathRange) {} | 137 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang
e(pathRange) {} |
| 137 | 138 |
| 138 const GrPathRange* pathRange() const { return fPathRange.get(); } | 139 const GrPathRange* pathRange() const { return fPathRange.get(); } |
| 139 | 140 |
| 140 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 141 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 141 | 142 |
| 142 int fIndicesLocation; | 143 char* fIndices; |
| 143 PathIndexType fIndexType; | 144 PathIndexType fIndexType; |
| 144 int fTransformsLocation; | 145 float* fTransforms; |
| 145 PathTransformType fTransformType; | 146 PathTransformType fTransformType; |
| 146 int fCount; | 147 int fCount; |
| 147 GrStencilSettings fStencilSettings; | 148 GrStencilSettings fStencilSettings; |
| 148 | 149 |
| 149 private: | 150 private: |
| 150 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; | 151 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; |
| 151 }; | 152 }; |
| 152 | 153 |
| 153 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL | 154 // This is also used to record a discard by setting the color to GrColor_ILL
EGAL |
| 154 struct Clear : public Cmd { | 155 struct Clear : public Cmd { |
| 155 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} | 156 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} |
| 156 | 157 |
| 157 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 158 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
| 158 | 159 |
| 159 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 160 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 160 | 161 |
| 161 SkIRect fRect; | 162 SkIRect fRect; |
| 162 GrColor fColor; | 163 GrColor fColor; |
| 163 bool fCanIgnoreRect; | 164 bool fCanIgnoreRect; |
| 164 | 165 |
| 165 private: | 166 private: |
| 166 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 167 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
| 167 }; | 168 }; |
| 168 | 169 |
| 169 // This command is ONLY used by the clip mask manager to clear the stencil c
lip bits | 170 // This command is ONLY used by the clip mask manager to clear the stencil c
lip bits |
| 170 struct ClearStencilClip : public Cmd { | 171 struct ClearStencilClip : public Cmd { |
| 171 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt
) {} | 172 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt
) {} |
| 172 | 173 |
| 173 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } | 174 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } |
| 174 | 175 |
| 175 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 176 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 176 | 177 |
| 177 SkIRect fRect; | 178 SkIRect fRect; |
| 178 bool fInsideClip; | 179 bool fInsideClip; |
| 179 | 180 |
| 180 private: | 181 private: |
| 181 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; | 182 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; |
| 182 }; | 183 }; |
| 183 | 184 |
| 184 struct CopySurface : public Cmd { | 185 struct CopySurface : public Cmd { |
| 185 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs
t(dst), fSrc(src) {} | 186 CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDs
t(dst), fSrc(src) {} |
| 186 | 187 |
| 187 GrSurface* dst() const { return fDst.get(); } | 188 GrSurface* dst() const { return fDst.get(); } |
| 188 GrSurface* src() const { return fSrc.get(); } | 189 GrSurface* src() const { return fSrc.get(); } |
| 189 | 190 |
| 190 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 191 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 191 | 192 |
| 192 SkIPoint fDstPoint; | 193 SkIPoint fDstPoint; |
| 193 SkIRect fSrcRect; | 194 SkIRect fSrcRect; |
| 194 | 195 |
| 195 private: | 196 private: |
| 196 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; | 197 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst; |
| 197 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; | 198 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc; |
| 198 }; | 199 }; |
| 199 | 200 |
| 200 // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed | 201 // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed |
| 201 struct SetState : public Cmd { | 202 struct SetState : public Cmd { |
| 202 // TODO get rid of the prim proc parameter when we use batch everywhere | 203 // TODO get rid of the prim proc parameter when we use batch everywhere |
| 203 SetState(const GrPrimitiveProcessor* primProc = NULL) | 204 SetState(const GrPrimitiveProcessor* primProc = NULL) |
| 204 : Cmd(kSetState_Cmd) | 205 : Cmd(kSetState_Cmd) |
| 205 , fPrimitiveProcessor(primProc) {} | 206 , fPrimitiveProcessor(primProc) {} |
| 206 | 207 |
| 207 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipelin
e(); } | 208 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipelin
e(); } |
| 208 | 209 |
| 209 // This function is only for getting the location in memory where we wil
l create our | 210 // This function is only for getting the location in memory where we wil
l create our |
| 210 // pipeline object. | 211 // pipeline object. |
| 211 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } | 212 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } |
| 212 | 213 |
| 213 const GrPipeline* getPipeline() const { | 214 const GrPipeline* getPipeline() const { |
| 214 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); | 215 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); |
| 215 } | 216 } |
| 216 | 217 |
| 217 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 218 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 218 | 219 |
| 219 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; | 220 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; |
| 220 ProgramPrimitiveProcessor fPrimitiveProcessor; | 221 ProgramPrimitiveProcessor fPrimitiveProcessor; |
| 221 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; | 222 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; |
| 222 GrProgramDesc fDesc; | 223 GrProgramDesc fDesc; |
| 223 GrBatchTracker fBatchTracker; | 224 GrBatchTracker fBatchTracker; |
| 224 }; | 225 }; |
| 225 | 226 |
| 226 struct DrawBatch : public Cmd { | 227 struct DrawBatch : public Cmd { |
| 227 DrawBatch(GrBatch* batch) : Cmd(kDrawBatch_Cmd), fBatch(SkRef(batch)) { | 228 DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget) |
| 229 : Cmd(kDrawBatch_Cmd) |
| 230 , fBatch(SkRef(batch)) |
| 231 , fBatchTarget(batchTarget) { |
| 228 SkASSERT(!batch->isUsed()); | 232 SkASSERT(!batch->isUsed()); |
| 229 } | 233 } |
| 230 | 234 |
| 231 void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE; | 235 void execute(GrGpu*, const SetState*) SK_OVERRIDE; |
| 232 | 236 |
| 233 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe
r | 237 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe
r |
| 234 SkAutoTUnref<GrBatch> fBatch; | 238 SkAutoTUnref<GrBatch> fBatch; |
| 239 |
| 240 private: |
| 241 GrBatchTarget* fBatchTarget; |
| 235 }; | 242 }; |
| 236 | 243 |
| 237 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. | 244 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. |
| 238 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 245 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
| 239 | 246 |
| 240 void onReset() SK_OVERRIDE; | 247 void onReset() SK_OVERRIDE; |
| 241 void onFlush() SK_OVERRIDE; | 248 void onFlush() SK_OVERRIDE; |
| 242 | 249 |
| 243 // overrides from GrDrawTarget | 250 // overrides from GrDrawTarget |
| 244 void onDraw(const GrGeometryProcessor*, const DrawInfo&, const PipelineInfo&
) SK_OVERRIDE; | 251 void onDraw(const GrGeometryProcessor*, const DrawInfo&, const PipelineInfo&
) SK_OVERRIDE; |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 301 // TODO: Use a single allocator for commands and records | 308 // TODO: Use a single allocator for commands and records |
| 302 enum { | 309 enum { |
| 303 kCmdBufferInitialSizeInBytes = 8 * 1024, | 310 kCmdBufferInitialSizeInBytes = 8 * 1024, |
| 304 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's | 311 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's |
| 305 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms | 312 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms |
| 306 }; | 313 }; |
| 307 | 314 |
| 308 CmdBuffer fCmdBuffer; | 315 CmdBuffer fCmdBuffer; |
| 309 SetState* fPrevState; | 316 SetState* fPrevState; |
| 310 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; | 317 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; |
| 311 SkTDArray<char> fPathIndexBuffer; | 318 SkChunkAlloc fPathIndexBuffer; |
| 312 SkTDArray<float> fPathTransformBuffer; | 319 SkChunkAlloc fPathTransformBuffer; |
| 313 uint32_t fDrawID; | 320 uint32_t fDrawID; |
| 314 GrBatchTarget fBatchTarget; | 321 GrBatchTarget fBatchTarget; |
| 315 // TODO hack until batch is everywhere | 322 // TODO hack until batch is everywhere |
| 316 DrawBatch* fDrawBatch; | 323 DrawBatch* fDrawBatch; |
| 317 | 324 |
| 318 // This will go away when everything uses batch. However, in the short term
anything which | 325 // This will go away when everything uses batch. However, in the short term
anything which |
| 319 // might be put into the GrInOrderDrawBuffer needs to make sure it closes th
e last batch | 326 // might be put into the GrInOrderDrawBuffer needs to make sure it closes th
e last batch |
| 320 void closeBatch() { | 327 void closeBatch() { |
| 321 if (fDrawBatch) { | 328 if (fDrawBatch) { |
| 322 fBatchTarget.resetNumberOfDraws(); | 329 fBatchTarget.resetNumberOfDraws(); |
| 323 fDrawBatch->execute(this, fPrevState); | 330 fDrawBatch->execute(this->getGpu(), fPrevState); |
| 324 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); | 331 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); |
| 325 fDrawBatch = NULL; | 332 fDrawBatch = NULL; |
| 326 } | 333 } |
| 327 } | 334 } |
| 328 | 335 |
| 329 typedef GrFlushToGpuDrawTarget INHERITED; | 336 typedef GrFlushToGpuDrawTarget INHERITED; |
| 330 }; | 337 }; |
| 331 | 338 |
| 332 #endif | 339 #endif |
| OLD | NEW |