| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrTargetCommands_DEFINED | 8 #ifndef GrTargetCommands_DEFINED |
| 9 #define GrTargetCommands_DEFINED | 9 #define GrTargetCommands_DEFINED |
| 10 | 10 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 40 public: | 40 public: |
| 41 enum CmdType { | 41 enum CmdType { |
| 42 kDraw_CmdType = 1, | 42 kDraw_CmdType = 1, |
| 43 kStencilPath_CmdType = 2, | 43 kStencilPath_CmdType = 2, |
| 44 kSetState_CmdType = 3, | 44 kSetState_CmdType = 3, |
| 45 kClear_CmdType = 4, | 45 kClear_CmdType = 4, |
| 46 kCopySurface_CmdType = 5, | 46 kCopySurface_CmdType = 5, |
| 47 kDrawPath_CmdType = 6, | 47 kDrawPath_CmdType = 6, |
| 48 kDrawPaths_CmdType = 7, | 48 kDrawPaths_CmdType = 7, |
| 49 kDrawBatch_CmdType = 8, | 49 kDrawBatch_CmdType = 8, |
| 50 kXferBarrier_CmdType = 9, |
| 50 }; | 51 }; |
| 51 | 52 |
| 52 Cmd(CmdType type) : fMarkerID(-1), fType(type) {} | 53 Cmd(CmdType type) : fMarkerID(-1), fType(type) {} |
| 53 virtual ~Cmd() {} | 54 virtual ~Cmd() {} |
| 54 | 55 |
| 55 virtual void execute(GrGpu*, const SetState*) = 0; | 56 virtual void execute(GrGpu*, const SetState*) = 0; |
| 56 | 57 |
| 57 CmdType type() const { return fType; } | 58 CmdType type() const { return fType; } |
| 58 | 59 |
| 59 // trace markers | 60 // trace markers |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 136 // instanced draw. The caller must have already recorded a new draw state an
d clip if necessary. | 137 // instanced draw. The caller must have already recorded a new draw state an
d clip if necessary. |
| 137 int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&)
; | 138 int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&)
; |
| 138 | 139 |
| 139 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, | 140 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, |
| 140 const GrPrimitiveProce
ssor*, | 141 const GrPrimitiveProce
ssor*, |
| 141 const GrDrawTarget::Pi
pelineInfo&); | 142 const GrDrawTarget::Pi
pelineInfo&); |
| 142 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, | 143 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*, |
| 143 GrBatch*, | 144 GrBatch*, |
| 144 const GrDrawTarget::Pi
pelineInfo&); | 145 const GrDrawTarget::Pi
pelineInfo&); |
| 145 | 146 |
| 147 void recordXferBarrierIfNecessary(GrInOrderDrawBuffer*, const GrDrawTarget::
PipelineInfo&); |
| 148 |
| 146 struct Draw : public Cmd { | 149 struct Draw : public Cmd { |
| 147 Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_CmdType), fInfo(inf
o) {} | 150 Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_CmdType), fInfo(inf
o) {} |
| 148 | 151 |
| 149 void execute(GrGpu*, const SetState*) override; | 152 void execute(GrGpu*, const SetState*) override; |
| 150 | 153 |
| 151 GrDrawTarget::DrawInfo fInfo; | 154 GrDrawTarget::DrawInfo fInfo; |
| 152 }; | 155 }; |
| 153 | 156 |
| 154 struct StencilPath : public Cmd { | 157 struct StencilPath : public Cmd { |
| 155 StencilPath(const GrPath* path, GrRenderTarget* rt) | 158 StencilPath(const GrPath* path, GrRenderTarget* rt) |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 261 | 264 |
| 262 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipelin
e(); } | 265 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipelin
e(); } |
| 263 | 266 |
| 264 // This function is only for getting the location in memory where we wil
l create our | 267 // This function is only for getting the location in memory where we wil
l create our |
| 265 // pipeline object. | 268 // pipeline object. |
| 266 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } | 269 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fP
ipeline.get()); } |
| 267 | 270 |
| 268 const GrPipeline* getPipeline() const { | 271 const GrPipeline* getPipeline() const { |
| 269 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); | 272 return reinterpret_cast<const GrPipeline*>(fPipeline.get()); |
| 270 } | 273 } |
| 274 GrRenderTarget* getRenderTarget() const { |
| 275 return this->getPipeline()->getRenderTarget(); |
| 276 } |
| 277 const GrXferProcessor* getXferProcessor() const { |
| 278 return this->getPipeline()->getXferProcessor(); |
| 279 } |
| 271 | 280 |
| 272 void execute(GrGpu*, const SetState*) override; | 281 void execute(GrGpu*, const SetState*) override; |
| 273 | 282 |
| 274 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; | 283 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimi
tiveProcessor; |
| 275 ProgramPrimitiveProcessor fPrimitiveProcessor; | 284 ProgramPrimitiveProcessor fPrimitiveProcessor; |
| 276 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; | 285 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline; |
| 277 GrProgramDesc fDesc; | 286 GrProgramDesc fDesc; |
| 278 GrBatchTracker fBatchTracker; | 287 GrBatchTracker fBatchTracker; |
| 279 }; | 288 }; |
| 280 | 289 |
| 281 struct DrawBatch : public Cmd { | 290 struct DrawBatch : public Cmd { |
| 282 DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget) | 291 DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget) |
| 283 : Cmd(kDrawBatch_CmdType) | 292 : Cmd(kDrawBatch_CmdType) |
| 284 , fBatch(SkRef(batch)) | 293 , fBatch(SkRef(batch)) |
| 285 , fBatchTarget(batchTarget) { | 294 , fBatchTarget(batchTarget) { |
| 286 SkASSERT(!batch->isUsed()); | 295 SkASSERT(!batch->isUsed()); |
| 287 } | 296 } |
| 288 | 297 |
| 289 void execute(GrGpu*, const SetState*) override; | 298 void execute(GrGpu*, const SetState*) override; |
| 290 | 299 |
| 291 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe
r | 300 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe
r |
| 292 SkAutoTUnref<GrBatch> fBatch; | 301 SkAutoTUnref<GrBatch> fBatch; |
| 293 | 302 |
| 294 private: | 303 private: |
| 295 GrBatchTarget* fBatchTarget; | 304 GrBatchTarget* fBatchTarget; |
| 296 }; | 305 }; |
| 297 | 306 |
| 307 struct XferBarrier : public Cmd { |
| 308 XferBarrier() : Cmd(kXferBarrier_CmdType) {} |
| 309 |
| 310 void execute(GrGpu*, const SetState*) override; |
| 311 |
| 312 GrXferBarrierType fBarrierType; |
| 313 }; |
| 314 |
| 298 static const int kCmdBufferInitialSizeInBytes = 8 * 1024; | 315 static const int kCmdBufferInitialSizeInBytes = 8 * 1024; |
| 299 | 316 |
| 300 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. | 317 typedef void* TCmdAlign; // This wouldn't be enough align if a command used
long double. |
| 301 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; | 318 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; |
| 302 | 319 |
| 303 CmdBuffer fCmdBuffer; | 320 CmdBuffer fCmdBuffer; |
| 304 SetState* fPrevState; | 321 SetState* fPrevState; |
| 305 GrBatchTarget fBatchTarget; | 322 GrBatchTarget fBatchTarget; |
| 306 // TODO hack until batch is everywhere | 323 // TODO hack until batch is everywhere |
| 307 GrTargetCommands::DrawBatch* fDrawBatch; | 324 GrTargetCommands::DrawBatch* fDrawBatch; |
| 308 | 325 |
| 309 // This will go away when everything uses batch. However, in the short ter
m anything which | 326 // This will go away when everything uses batch. However, in the short ter
m anything which |
| 310 // might be put into the GrInOrderDrawBuffer needs to make sure it closes t
he last batch | 327 // might be put into the GrInOrderDrawBuffer needs to make sure it closes t
he last batch |
| 311 void closeBatch(); | 328 void closeBatch(); |
| 312 }; | 329 }; |
| 313 | 330 |
| 314 #endif | 331 #endif |
| 315 | 332 |
| OLD | NEW |