| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 99 // allocated space, and leave enough for 50% growth over last time. | 99 // allocated space, and leave enough for 50% growth over last time. |
| 100 if (3 * buffer->count() < buffer->reserved() && buffer->reserved() > minRese
rve) { | 100 if (3 * buffer->count() < buffer->reserved() && buffer->reserved() > minRese
rve) { |
| 101 int reserve = SkTMax(minReserve, buffer->count() * 3 / 2); | 101 int reserve = SkTMax(minReserve, buffer->count() * 3 / 2); |
| 102 buffer->reset(); | 102 buffer->reset(); |
| 103 buffer->setReserve(reserve); | 103 buffer->setReserve(reserve); |
| 104 } else { | 104 } else { |
| 105 buffer->rewind(); | 105 buffer->rewind(); |
| 106 } | 106 } |
| 107 } | 107 } |
| 108 | 108 |
| 109 enum { | |
| 110 kTraceCmdBit = 0x80, | |
| 111 kCmdMask = 0x7f, | |
| 112 }; | |
| 113 | |
| 114 static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; } | |
| 115 | |
| 116 static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; } | |
| 117 | |
| 118 static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTr
aceCmdBit); } | |
| 119 | |
| 120 void GrInOrderDrawBuffer::onDrawRect(GrPipelineBuilder* pipelineBuilder, | 109 void GrInOrderDrawBuffer::onDrawRect(GrPipelineBuilder* pipelineBuilder, |
| 121 GrColor color, | 110 GrColor color, |
| 122 const SkMatrix& viewMatrix, | 111 const SkMatrix& viewMatrix, |
| 123 const SkRect& rect, | 112 const SkRect& rect, |
| 124 const SkRect* localRect, | 113 const SkRect* localRect, |
| 125 const SkMatrix* localMatrix) { | 114 const SkMatrix* localMatrix) { |
| 126 GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder); | 115 GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder); |
| 127 | 116 |
| 128 // Go to device coords to allow batching across matrix changes | 117 // Go to device coords to allow batching across matrix changes |
| 129 SkMatrix invert = SkMatrix::I(); | 118 SkMatrix invert = SkMatrix::I(); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 198 | 187 |
| 199 // we only attempt to concat the case when reserved verts are used with a cl
ient-specified index | 188 // we only attempt to concat the case when reserved verts are used with a cl
ient-specified index |
| 200 // buffer. To make this work with client-specified VBs we'd need to know if
the VB was updated | 189 // buffer. To make this work with client-specified VBs we'd need to know if
the VB was updated |
| 201 // between draws. | 190 // between draws. |
| 202 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || | 191 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
| 203 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { | 192 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
| 204 return 0; | 193 return 0; |
| 205 } | 194 } |
| 206 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and | 195 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and |
| 207 // the same IB | 196 // the same IB |
| 208 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { | 197 if (Cmd::kDraw_Cmd != fCmdBuffer.back().type()) { |
| 209 return 0; | 198 return 0; |
| 210 } | 199 } |
| 211 | 200 |
| 212 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); | 201 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); |
| 213 | 202 |
| 214 if (!draw->fInfo.isInstanced() || | 203 if (!draw->fInfo.isInstanced() || |
| 215 draw->fInfo.primitiveType() != info.primitiveType() || | 204 draw->fInfo.primitiveType() != info.primitiveType() || |
| 216 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || | 205 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || |
| 217 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || | 206 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || |
| 218 draw->fInfo.vertexBuffer() != info.vertexBuffer() || | 207 draw->fInfo.vertexBuffer() != info.vertexBuffer() || |
| 219 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { | 208 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { |
| 220 return 0; | 209 return 0; |
| 221 } | 210 } |
| 222 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { | 211 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { |
| 223 return 0; | 212 return 0; |
| 224 } | 213 } |
| 225 | 214 |
| 226 // how many instances can be concat'ed onto draw given the size of the index
buffer | 215 // how many instances can be concat'ed onto draw given the size of the index
buffer |
| 227 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); | 216 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); |
| 228 instancesToConcat -= draw->fInfo.instanceCount(); | 217 instancesToConcat -= draw->fInfo.instanceCount(); |
| 229 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); | 218 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); |
| 230 | 219 |
| 231 draw->fInfo.adjustInstanceCount(instancesToConcat); | 220 draw->fInfo.adjustInstanceCount(instancesToConcat); |
| 232 | 221 |
| 233 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added | 222 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added |
| 234 if (this->getActiveTraceMarkers().count() > 0) { | 223 if (this->getActiveTraceMarkers().count() > 0) { |
| 235 if (cmd_has_trace_marker(draw->fType)) { | 224 if (draw->isTraced()) { |
| 236 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); | 225 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); |
| 237 } else { | 226 } else { |
| 238 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); | 227 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); |
| 239 draw->fType = add_trace_bit(draw->fType); | 228 draw->makeTraced(); |
| 240 } | 229 } |
| 241 } | 230 } |
| 242 | 231 |
| 243 return instancesToConcat; | 232 return instancesToConcat; |
| 244 } | 233 } |
| 245 | 234 |
| 246 void GrInOrderDrawBuffer::onDraw(const GrGeometryProcessor* gp, | 235 void GrInOrderDrawBuffer::onDraw(const GrGeometryProcessor* gp, |
| 247 const DrawInfo& info, | 236 const DrawInfo& info, |
| 248 const PipelineInfo& pipelineInfo) { | 237 const PipelineInfo& pipelineInfo) { |
| 249 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); | 238 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 268 this->recordTraceMarkersIfNecessary(); | 257 this->recordTraceMarkersIfNecessary(); |
| 269 } | 258 } |
| 270 | 259 |
| 271 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch, | 260 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch, |
| 272 const PipelineInfo& pipelineInfo) { | 261 const PipelineInfo& pipelineInfo) { |
| 273 if (!this->setupPipelineAndShouldDraw(batch, pipelineInfo)) { | 262 if (!this->setupPipelineAndShouldDraw(batch, pipelineInfo)) { |
| 274 return; | 263 return; |
| 275 } | 264 } |
| 276 | 265 |
| 277 // Check if there is a Batch Draw we can batch with | 266 // Check if there is a Batch Draw we can batch with |
| 278 if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { | 267 if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type()) { |
| 279 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | 268 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); |
| 280 return; | 269 return; |
| 281 } | 270 } |
| 282 | 271 |
| 283 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back()); | 272 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back()); |
| 284 if (draw->fBatch->combineIfPossible(batch)) { | 273 if (draw->fBatch->combineIfPossible(batch)) { |
| 285 return; | 274 return; |
| 286 } else { | 275 } else { |
| 287 this->closeBatch(); | 276 this->closeBatch(); |
| 288 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | 277 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 344 // Add padding to the index buffer so the indices are aligned properly. | 333 // Add padding to the index buffer so the indices are aligned properly. |
| 345 fPathIndexBuffer.append(indexBytes - misalign); | 334 fPathIndexBuffer.append(indexBytes - misalign); |
| 346 } | 335 } |
| 347 | 336 |
| 348 char* savedIndices = fPathIndexBuffer.append(count * indexBytes, | 337 char* savedIndices = fPathIndexBuffer.append(count * indexBytes, |
| 349 reinterpret_cast<const char*>(i
ndices)); | 338 reinterpret_cast<const char*>(i
ndices)); |
| 350 float* savedTransforms = fPathTransformBuffer.append( | 339 float* savedTransforms = fPathTransformBuffer.append( |
| 351 count * GrPathRendering::PathTransformSize(tran
sformType), | 340 count * GrPathRendering::PathTransformSize(tran
sformType), |
| 352 transformValues); | 341 transformValues); |
| 353 | 342 |
| 354 if (kDrawPaths_Cmd == strip_trace_bit(fCmdBuffer.back().fType)) { | 343 if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) { |
| 355 // The previous command was also DrawPaths. Try to collapse this call in
to the one | 344 // The previous command was also DrawPaths. Try to collapse this call in
to the one |
| 356 // before. Note that stenciling all the paths at once, then covering, ma
y not be | 345 // before. Note that stenciling all the paths at once, then covering, ma
y not be |
| 357 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, | 346 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, |
| 358 // and the combined calls may also cancel each other's winding numbers i
n some | 347 // and the combined calls may also cancel each other's winding numbers i
n some |
| 359 // places. For now the winding numbers are only an issue if the fill is
even/odd, | 348 // places. For now the winding numbers are only an issue if the fill is
even/odd, |
| 360 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same | 349 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same |
| 361 // font tend to all wind in the same direction. | 350 // font tend to all wind in the same direction. |
| 362 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | 351 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
| 363 if (pathRange == previous->pathRange() && | 352 if (pathRange == previous->pathRange() && |
| 364 indexType == previous->fIndexType && | 353 indexType == previous->fIndexType && |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 455 currentState = NULL; | 444 currentState = NULL; |
| 456 CmdBuffer::Iter iter(fCmdBuffer); | 445 CmdBuffer::Iter iter(fCmdBuffer); |
| 457 | 446 |
| 458 int currCmdMarker = 0; | 447 int currCmdMarker = 0; |
| 459 | 448 |
| 460 int i = 0; | 449 int i = 0; |
| 461 while (iter.next()) { | 450 while (iter.next()) { |
| 462 i++; | 451 i++; |
| 463 GrGpuTraceMarker newMarker("", -1); | 452 GrGpuTraceMarker newMarker("", -1); |
| 464 SkString traceString; | 453 SkString traceString; |
| 465 if (cmd_has_trace_marker(iter->fType)) { | 454 if (iter->isTraced()) { |
| 466 traceString = fGpuCmdMarkers[currCmdMarker].toString(); | 455 traceString = fGpuCmdMarkers[currCmdMarker].toString(); |
| 467 newMarker.fMarker = traceString.c_str(); | 456 newMarker.fMarker = traceString.c_str(); |
| 468 this->getGpu()->addGpuTraceMarker(&newMarker); | 457 this->getGpu()->addGpuTraceMarker(&newMarker); |
| 469 ++currCmdMarker; | 458 ++currCmdMarker; |
| 470 } | 459 } |
| 471 | 460 |
| 472 // TODO temporary hack | 461 // TODO temporary hack |
| 473 if (kDrawBatch_Cmd == strip_trace_bit(iter->fType)) { | 462 if (Cmd::kDrawBatch_Cmd == iter->type()) { |
| 474 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get()); | 463 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get()); |
| 475 fBatchTarget.flushNext(db->fBatch->numberOfDraws()); | 464 fBatchTarget.flushNext(db->fBatch->numberOfDraws()); |
| 476 continue; | 465 continue; |
| 477 } | 466 } |
| 478 | 467 |
| 479 bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType); | 468 if (Cmd::kSetState_Cmd == iter->type()) { |
| 480 if (isSetState) { | |
| 481 SetState* ss = reinterpret_cast<SetState*>(iter.get()); | 469 SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| 482 | 470 |
| 483 // TODO sometimes we have a prim proc, othertimes we have a GrBatch.
Eventually we will | 471 // TODO sometimes we have a prim proc, othertimes we have a GrBatch.
Eventually we will |
| 484 // only have GrBatch and we can delete this | 472 // only have GrBatch and we can delete this |
| 485 if (ss->fPrimitiveProcessor) { | 473 if (ss->fPrimitiveProcessor) { |
| 486 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc
essor, | 474 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc
essor, |
| 487 *ss->getPipeline(), | 475 *ss->getPipeline(), |
| 488 ss->fBatchTracker); | 476 ss->fBatchTracker); |
| 489 } | 477 } |
| 490 currentState = ss; | 478 currentState = ss; |
| 491 } else { | 479 } else { |
| 492 iter->execute(this, currentState); | 480 iter->execute(this, currentState); |
| 493 } | 481 } |
| 494 | 482 |
| 495 if (cmd_has_trace_marker(iter->fType)) { | 483 if (iter->isTraced()) { |
| 496 this->getGpu()->removeGpuTraceMarker(&newMarker); | 484 this->getGpu()->removeGpuTraceMarker(&newMarker); |
| 497 } | 485 } |
| 498 } | 486 } |
| 499 | 487 |
| 500 // TODO see copious notes about hack | 488 // TODO see copious notes about hack |
| 501 fBatchTarget.postFlush(); | 489 fBatchTarget.postFlush(); |
| 502 | 490 |
| 503 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); | 491 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); |
| 504 ++fDrawID; | 492 ++fDrawID; |
| 505 } | 493 } |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 621 } else { | 609 } else { |
| 622 this->closeBatch(); | 610 this->closeBatch(); |
| 623 fPrevState = ss; | 611 fPrevState = ss; |
| 624 this->recordTraceMarkersIfNecessary(); | 612 this->recordTraceMarkersIfNecessary(); |
| 625 } | 613 } |
| 626 return true; | 614 return true; |
| 627 } | 615 } |
| 628 | 616 |
| 629 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { | 617 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { |
| 630 SkASSERT(!fCmdBuffer.empty()); | 618 SkASSERT(!fCmdBuffer.empty()); |
| 631 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); | 619 SkASSERT(!fCmdBuffer.back().isTraced()); |
| 632 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); | 620 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); |
| 633 if (activeTraceMarkers.count() > 0) { | 621 if (activeTraceMarkers.count() > 0) { |
| 634 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); | 622 fCmdBuffer.back().makeTraced(); |
| 635 fGpuCmdMarkers.push_back(activeTraceMarkers); | 623 fGpuCmdMarkers.push_back(activeTraceMarkers); |
| 636 } | 624 } |
| 637 } | 625 } |
| 638 | 626 |
| 639 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, | 627 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, |
| 640 size_t vertexStride, | 628 size_t vertexStride, |
| 641 int indexCount) { | 629 int indexCount) { |
| 642 this->closeBatch(); | 630 this->closeBatch(); |
| 643 | 631 |
| 644 this->INHERITED::willReserveVertexAndIndexSpace(vertexCount, vertexStride, i
ndexCount); | 632 this->INHERITED::willReserveVertexAndIndexSpace(vertexCount, vertexStride, i
ndexCount); |
| 645 } | 633 } |
| OLD | NEW |