| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | |
| 11 #include "GrDefaultGeoProcFactory.h" | 10 #include "GrDefaultGeoProcFactory.h" |
| 12 #include "GrDrawTargetCaps.h" | 11 #include "GrDrawTargetCaps.h" |
| 13 #include "GrGpu.h" | 12 #include "GrGpu.h" |
| 14 #include "GrTemplates.h" | 13 #include "GrTemplates.h" |
| 15 #include "GrTextStrike.h" | 14 #include "GrTextStrike.h" |
| 16 #include "GrTexture.h" | 15 #include "GrTexture.h" |
| 17 | 16 |
| 18 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, | 17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, |
| 19 GrVertexBufferAllocPool* vertexPool, | 18 GrVertexBufferAllocPool* vertexPool, |
| 20 GrIndexBufferAllocPool* indexPool) | 19 GrIndexBufferAllocPool* indexPool) |
| 21 : INHERITED(gpu->getContext()) | 20 : INHERITED(gpu, vertexPool, indexPool) |
| 22 , fCmdBuffer(kCmdBufferInitialSizeInBytes) | 21 , fCmdBuffer(kCmdBufferInitialSizeInBytes) |
| 23 , fPrevState(NULL) | 22 , fPrevState(NULL) |
| 24 , fDstGpu(gpu) | |
| 25 , fVertexPool(*vertexPool) | |
| 26 , fIndexPool(*indexPool) | |
| 27 , fFlushing(false) | |
| 28 , fDrawID(0) { | 23 , fDrawID(0) { |
| 29 | 24 |
| 30 fDstGpu->ref(); | |
| 31 fCaps.reset(SkRef(fDstGpu->caps())); | |
| 32 | |
| 33 SkASSERT(vertexPool); | 25 SkASSERT(vertexPool); |
| 34 SkASSERT(indexPool); | 26 SkASSERT(indexPool); |
| 35 | 27 |
| 36 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); | 28 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); |
| 37 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); | 29 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); |
| 38 | |
| 39 GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); | |
| 40 poolState.fUsedPoolVertexBytes = 0; | |
| 41 poolState.fUsedPoolIndexBytes = 0; | |
| 42 #ifdef SK_DEBUG | |
| 43 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; | |
| 44 poolState.fPoolStartVertex = ~0; | |
| 45 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; | |
| 46 poolState.fPoolStartIndex = ~0; | |
| 47 #endif | |
| 48 this->reset(); | |
| 49 } | 30 } |
| 50 | 31 |
| 51 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { | 32 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { |
| 52 this->reset(); | 33 this->reset(); |
| 53 // This must be called by before the GrDrawTarget destructor | |
| 54 this->releaseGeometry(); | |
| 55 fDstGpu->unref(); | |
| 56 } | 34 } |
| 57 | 35 |
| 58 //////////////////////////////////////////////////////////////////////////////// | 36 //////////////////////////////////////////////////////////////////////////////// |
| 59 | 37 |
| 60 namespace { | 38 namespace { |
| 61 void get_vertex_bounds(const void* vertices, | 39 void get_vertex_bounds(const void* vertices, |
| 62 size_t vertexSize, | 40 size_t vertexSize, |
| 63 int vertexCount, | 41 int vertexCount, |
| 64 SkRect* bounds) { | 42 SkRect* bounds) { |
| 65 SkASSERT(vertexSize >= sizeof(SkPoint)); | 43 SkASSERT(vertexSize >= sizeof(SkPoint)); |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 203 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { | 181 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
| 204 return 0; | 182 return 0; |
| 205 } | 183 } |
| 206 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and | 184 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and |
| 207 // the same IB | 185 // the same IB |
| 208 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { | 186 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { |
| 209 return 0; | 187 return 0; |
| 210 } | 188 } |
| 211 | 189 |
| 212 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); | 190 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); |
| 213 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 214 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; | |
| 215 | 191 |
| 216 if (!draw->fInfo.isInstanced() || | 192 if (!draw->fInfo.isInstanced() || |
| 217 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || | 193 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || |
| 218 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || | 194 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || |
| 219 draw->fInfo.vertexBuffer() != vertexBuffer || | 195 draw->fInfo.vertexBuffer() != info.vertexBuffer() || |
| 220 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { | 196 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { |
| 221 return 0; | 197 return 0; |
| 222 } | 198 } |
| 223 // info does not yet account for the offset from the start of the pool's VB
while the previous | 199 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { |
| 224 // draw record does. | |
| 225 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); | |
| 226 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != adjustedStartVe
rtex) { | |
| 227 return 0; | 200 return 0; |
| 228 } | 201 } |
| 229 | 202 |
| 230 SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fIn
fo.vertexCount()); | |
| 231 | |
| 232 // how many instances can be concat'ed onto draw given the size of the index
buffer | 203 // how many instances can be concat'ed onto draw given the size of the index
buffer |
| 233 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); | 204 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); |
| 234 instancesToConcat -= draw->fInfo.instanceCount(); | 205 instancesToConcat -= draw->fInfo.instanceCount(); |
| 235 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); | 206 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); |
| 236 | 207 |
| 237 // update the amount of reserved vertex data actually referenced in draws | |
| 238 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * ds.get
VertexStride(); | |
| 239 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vert
exBytes); | |
| 240 | |
| 241 draw->fInfo.adjustInstanceCount(instancesToConcat); | 208 draw->fInfo.adjustInstanceCount(instancesToConcat); |
| 242 | 209 |
| 243 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added | 210 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added |
| 244 if (this->getActiveTraceMarkers().count() > 0) { | 211 if (this->getActiveTraceMarkers().count() > 0) { |
| 245 if (cmd_has_trace_marker(draw->fType)) { | 212 if (cmd_has_trace_marker(draw->fType)) { |
| 246 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); | 213 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); |
| 247 } else { | 214 } else { |
| 248 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); | 215 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); |
| 249 draw->fType = add_trace_bit(draw->fType); | 216 draw->fType = add_trace_bit(draw->fType); |
| 250 } | 217 } |
| 251 } | 218 } |
| 252 | 219 |
| 253 return instancesToConcat; | 220 return instancesToConcat; |
| 254 } | 221 } |
| 255 | 222 |
| 256 void GrInOrderDrawBuffer::onDraw(const GrDrawState& ds, | 223 void GrInOrderDrawBuffer::onDraw(const GrDrawState& ds, |
| 257 const DrawInfo& info, | 224 const DrawInfo& info, |
| 258 const ScissorState& scissorState, | 225 const ScissorState& scissorState, |
| 259 const GrDeviceCoordTexture* dstCopy) { | 226 const GrDeviceCoordTexture* dstCopy) { |
| 260 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); | 227 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); |
| 261 | 228 |
| 262 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 263 | |
| 264 if (!this->recordStateAndShouldDraw(ds, GrGpu::PrimTypeToDrawType(info.primi
tiveType()), | 229 if (!this->recordStateAndShouldDraw(ds, GrGpu::PrimTypeToDrawType(info.primi
tiveType()), |
| 265 scissorState, dstCopy)) { | 230 scissorState, dstCopy)) { |
| 266 return; | 231 return; |
| 267 } | 232 } |
| 268 | 233 |
| 269 Draw* draw; | 234 Draw* draw; |
| 270 if (info.isInstanced()) { | 235 if (info.isInstanced()) { |
| 271 int instancesConcated = this->concatInstancedDraw(ds, info); | 236 int instancesConcated = this->concatInstancedDraw(ds, info); |
| 272 if (info.instanceCount() > instancesConcated) { | 237 if (info.instanceCount() > instancesConcated) { |
| 273 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | 238 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); |
| 274 draw->fInfo.adjustInstanceCount(-instancesConcated); | 239 draw->fInfo.adjustInstanceCount(-instancesConcated); |
| 275 } else { | 240 } else { |
| 276 return; | 241 return; |
| 277 } | 242 } |
| 278 } else { | 243 } else { |
| 279 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | 244 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); |
| 280 } | 245 } |
| 281 this->recordTraceMarkersIfNecessary(); | 246 this->recordTraceMarkersIfNecessary(); |
| 282 | |
| 283 // Adjust the starting vertex and index when we are using reserved or array
sources to | |
| 284 // compensate for the fact that the data was inserted into a larger vb/ib ow
ned by the pool. | |
| 285 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) { | |
| 286 size_t bytes = (info.vertexCount() + info.startVertex()) * ds.getVertexS
tride(); | |
| 287 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes,
bytes); | |
| 288 draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex); | |
| 289 } | |
| 290 | |
| 291 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndex
Src) { | |
| 292 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t
); | |
| 293 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by
tes); | |
| 294 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); | |
| 295 } | |
| 296 } | 247 } |
| 297 | 248 |
| 298 void GrInOrderDrawBuffer::onStencilPath(const GrDrawState& ds, | 249 void GrInOrderDrawBuffer::onStencilPath(const GrDrawState& ds, |
| 299 const GrPath* path, | 250 const GrPath* path, |
| 300 const GrClipMaskManager::ScissorState& s
cissorState, | 251 const GrClipMaskManager::ScissorState& s
cissorState, |
| 301 const GrStencilSettings& stencilSettings
) { | 252 const GrStencilSettings& stencilSettings
) { |
| 302 // Only compare the subset of GrDrawState relevant to path stenciling? | 253 // Only compare the subset of GrDrawState relevant to path stenciling? |
| 303 if (!this->recordStateAndShouldDraw(ds, GrGpu::kStencilPath_DrawType, scisso
rState, NULL)) { | 254 if (!this->recordStateAndShouldDraw(ds, GrGpu::kStencilPath_DrawType, scisso
rState, NULL)) { |
| 304 return; | 255 return; |
| 305 } | 256 } |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 347 } | 298 } |
| 348 | 299 |
| 349 char* savedIndices = fPathIndexBuffer.append(count * indexBytes, | 300 char* savedIndices = fPathIndexBuffer.append(count * indexBytes, |
| 350 reinterpret_cast<const char*>(i
ndices)); | 301 reinterpret_cast<const char*>(i
ndices)); |
| 351 float* savedTransforms = fPathTransformBuffer.append( | 302 float* savedTransforms = fPathTransformBuffer.append( |
| 352 count * GrPathRendering::PathTransformSize(tran
sformType), | 303 count * GrPathRendering::PathTransformSize(tran
sformType), |
| 353 transformValues); | 304 transformValues); |
| 354 | 305 |
| 355 if (kDrawPaths_Cmd == strip_trace_bit(fCmdBuffer.back().fType)) { | 306 if (kDrawPaths_Cmd == strip_trace_bit(fCmdBuffer.back().fType)) { |
| 356 // The previous command was also DrawPaths. Try to collapse this call in
to the one | 307 // The previous command was also DrawPaths. Try to collapse this call in
to the one |
| 357 // before. Note that stencilling all the paths at once, then covering, m
ay not be | 308 // before. Note that stenciling all the paths at once, then covering, ma
y not be |
| 358 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, | 309 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, |
| 359 // and the combined calls may also cancel each other's winding numbers i
n some | 310 // and the combined calls may also cancel each other's winding numbers i
n some |
| 360 // places. For now the winding numbers are only an issue if the fill is
even/odd, | 311 // places. For now the winding numbers are only an issue if the fill is
even/odd, |
| 361 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same | 312 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same |
| 362 // font tend to all wind in the same direction. | 313 // font tend to all wind in the same direction. |
| 363 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | 314 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
| 364 if (pathRange == previous->pathRange() && | 315 if (pathRange == previous->pathRange() && |
| 365 indexType == previous->fIndexType && | 316 indexType == previous->fIndexType && |
| 366 transformType == previous->fTransformType && | 317 transformType == previous->fTransformType && |
| 367 stencilSettings == previous->fStencilSettings && | 318 stencilSettings == previous->fStencilSettings && |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 416 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) { | 367 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) { |
| 417 SkASSERT(renderTarget); | 368 SkASSERT(renderTarget); |
| 418 if (!this->caps()->discardRenderTargetSupport()) { | 369 if (!this->caps()->discardRenderTargetSupport()) { |
| 419 return; | 370 return; |
| 420 } | 371 } |
| 421 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | 372 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); |
| 422 clr->fColor = GrColor_ILLEGAL; | 373 clr->fColor = GrColor_ILLEGAL; |
| 423 this->recordTraceMarkersIfNecessary(); | 374 this->recordTraceMarkersIfNecessary(); |
| 424 } | 375 } |
| 425 | 376 |
| 426 void GrInOrderDrawBuffer::setDrawBuffers(DrawInfo* info) { | 377 void GrInOrderDrawBuffer::onReset() { |
| 427 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 428 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { | |
| 429 info->setVertexBuffer(this->getGeomSrc().fVertexBuffer); | |
| 430 } else { | |
| 431 info->setVertexBuffer(poolState.fPoolVertexBuffer); | |
| 432 } | |
| 433 | |
| 434 if (info->isIndexed()) { | |
| 435 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) { | |
| 436 info->setIndexBuffer(this->getGeomSrc().fIndexBuffer); | |
| 437 } else { | |
| 438 info->setIndexBuffer(poolState.fPoolIndexBuffer); | |
| 439 } | |
| 440 } | |
| 441 } | |
| 442 | |
| 443 void GrInOrderDrawBuffer::reset() { | |
| 444 SkASSERT(1 == fGeoPoolStateStack.count()); | |
| 445 this->resetVertexSource(); | |
| 446 this->resetIndexSource(); | |
| 447 | |
| 448 fCmdBuffer.reset(); | 378 fCmdBuffer.reset(); |
| 449 fPrevState = NULL; | 379 fPrevState = NULL; |
| 450 fVertexPool.reset(); | |
| 451 fIndexPool.reset(); | |
| 452 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve); | 380 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve); |
| 453 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); | 381 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); |
| 454 fGpuCmdMarkers.reset(); | 382 fGpuCmdMarkers.reset(); |
| 455 } | 383 } |
| 456 | 384 |
| 457 void GrInOrderDrawBuffer::flush() { | 385 void GrInOrderDrawBuffer::onFlush() { |
| 458 if (fFlushing) { | |
| 459 return; | |
| 460 } | |
| 461 | |
| 462 this->getContext()->getFontCache()->updateTextures(); | |
| 463 | |
| 464 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); | |
| 465 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); | |
| 466 | |
| 467 if (fCmdBuffer.empty()) { | 386 if (fCmdBuffer.empty()) { |
| 468 return; | 387 return; |
| 469 } | 388 } |
| 470 | 389 |
| 471 GrAutoTRestore<bool> flushRestore(&fFlushing); | |
| 472 fFlushing = true; | |
| 473 | |
| 474 fVertexPool.unmap(); | |
| 475 fIndexPool.unmap(); | |
| 476 | 390 |
| 477 CmdBuffer::Iter iter(fCmdBuffer); | 391 CmdBuffer::Iter iter(fCmdBuffer); |
| 478 | 392 |
| 479 int currCmdMarker = 0; | 393 int currCmdMarker = 0; |
| 480 fDstGpu->saveActiveTraceMarkers(); | |
| 481 | 394 |
| 482 // Updated every time we find a set state cmd to reflect the current state i
n the playback | 395 // Updated every time we find a set state cmd to reflect the current state i
n the playback |
| 483 // stream. | 396 // stream. |
| 484 const GrOptDrawState* currentOptState = NULL; | 397 const GrOptDrawState* currentOptState = NULL; |
| 485 | 398 |
| 486 while (iter.next()) { | 399 while (iter.next()) { |
| 487 GrGpuTraceMarker newMarker("", -1); | 400 GrGpuTraceMarker newMarker("", -1); |
| 488 SkString traceString; | 401 SkString traceString; |
| 489 if (cmd_has_trace_marker(iter->fType)) { | 402 if (cmd_has_trace_marker(iter->fType)) { |
| 490 traceString = fGpuCmdMarkers[currCmdMarker].toString(); | 403 traceString = fGpuCmdMarkers[currCmdMarker].toString(); |
| 491 newMarker.fMarker = traceString.c_str(); | 404 newMarker.fMarker = traceString.c_str(); |
| 492 fDstGpu->addGpuTraceMarker(&newMarker); | 405 this->getGpu()->addGpuTraceMarker(&newMarker); |
| 493 ++currCmdMarker; | 406 ++currCmdMarker; |
| 494 } | 407 } |
| 495 | 408 |
| 496 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { | 409 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { |
| 497 SetState* ss = reinterpret_cast<SetState*>(iter.get()); | 410 SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| 498 currentOptState = &ss->fState; | 411 currentOptState = &ss->fState; |
| 499 } else { | 412 } else { |
| 500 iter->execute(this, currentOptState); | 413 iter->execute(this, currentOptState); |
| 501 } | 414 } |
| 502 | 415 |
| 503 if (cmd_has_trace_marker(iter->fType)) { | 416 if (cmd_has_trace_marker(iter->fType)) { |
| 504 fDstGpu->removeGpuTraceMarker(&newMarker); | 417 this->getGpu()->removeGpuTraceMarker(&newMarker); |
| 505 } | 418 } |
| 506 } | 419 } |
| 507 | 420 |
| 508 fDstGpu->restoreActiveTraceMarkers(); | |
| 509 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); | 421 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); |
| 510 | |
| 511 this->reset(); | |
| 512 ++fDrawID; | 422 ++fDrawID; |
| 513 } | 423 } |
| 514 | 424 |
| 515 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const GrOptDra
wState* optState) { | 425 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const GrOptDra
wState* optState) { |
| 516 SkASSERT(optState); | 426 SkASSERT(optState); |
| 517 buf->fDstGpu->draw(*optState, fInfo); | 427 buf->getGpu()->draw(*optState, fInfo); |
| 518 } | 428 } |
| 519 | 429 |
| 520 void GrInOrderDrawBuffer::StencilPath::execute(GrInOrderDrawBuffer* buf, | 430 void GrInOrderDrawBuffer::StencilPath::execute(GrInOrderDrawBuffer* buf, |
| 521 const GrOptDrawState* optState) { | 431 const GrOptDrawState* optState) { |
| 522 SkASSERT(optState); | 432 SkASSERT(optState); |
| 523 buf->fDstGpu->stencilPath(*optState, this->path(), fStencilSettings); | 433 buf->getGpu()->stencilPath(*optState, this->path(), fStencilSettings); |
| 524 } | 434 } |
| 525 | 435 |
| 526 void GrInOrderDrawBuffer::DrawPath::execute(GrInOrderDrawBuffer* buf, | 436 void GrInOrderDrawBuffer::DrawPath::execute(GrInOrderDrawBuffer* buf, |
| 527 const GrOptDrawState* optState) { | 437 const GrOptDrawState* optState) { |
| 528 SkASSERT(optState); | 438 SkASSERT(optState); |
| 529 buf->fDstGpu->drawPath(*optState, this->path(), fStencilSettings); | 439 buf->getGpu()->drawPath(*optState, this->path(), fStencilSettings); |
| 530 } | 440 } |
| 531 | 441 |
| 532 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, | 442 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, |
| 533 const GrOptDrawState* optState) { | 443 const GrOptDrawState* optState) { |
| 534 SkASSERT(optState); | 444 SkASSERT(optState); |
| 535 buf->fDstGpu->drawPaths(*optState, this->pathRange(), | 445 buf->getGpu()->drawPaths(*optState, this->pathRange(), |
| 536 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType
, | 446 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType
, |
| 537 &buf->fPathTransformBuffer[fTransformsLocation], fTr
ansformType, | 447 &buf->fPathTransformBuffer[fTransformsLocation], fTr
ansformType, |
| 538 fCount, fStencilSettings); | 448 fCount, fStencilSettings); |
| 539 } | 449 } |
| 540 | 450 |
| 541 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const GrOptDra
wState*) {} | 451 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const GrOptDra
wState*) {} |
| 542 | 452 |
| 543 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const GrOptDr
awState*) { | 453 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const GrOptDr
awState*) { |
| 544 if (GrColor_ILLEGAL == fColor) { | 454 if (GrColor_ILLEGAL == fColor) { |
| 545 buf->fDstGpu->discard(this->renderTarget()); | 455 buf->getGpu()->discard(this->renderTarget()); |
| 546 } else { | 456 } else { |
| 547 buf->fDstGpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()
); | 457 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget(
)); |
| 548 } | 458 } |
| 549 } | 459 } |
| 550 | 460 |
| 551 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrInOrderDrawBuffer* buf, | 461 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrInOrderDrawBuffer* buf, |
| 552 const GrOptDrawState*) { | 462 const GrOptDrawState*) { |
| 553 buf->fDstGpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); | 463 buf->getGpu()->clearStencilClip(fRect, fInsideClip, this->renderTarget()); |
| 554 } | 464 } |
| 555 | 465 |
| 556 void GrInOrderDrawBuffer::CopySurface::execute(GrInOrderDrawBuffer* buf, const G
rOptDrawState*) { | 466 void GrInOrderDrawBuffer::CopySurface::execute(GrInOrderDrawBuffer* buf, const G
rOptDrawState*) { |
| 557 buf->fDstGpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 467 buf->getGpu()->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
| 558 } | 468 } |
| 559 | 469 |
| 560 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, | 470 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, |
| 561 GrSurface* src, | 471 GrSurface* src, |
| 562 const SkIRect& srcRect, | 472 const SkIRect& srcRect, |
| 563 const SkIPoint& dstPoint) { | 473 const SkIPoint& dstPoint) { |
| 564 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { | 474 if (getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { |
| 565 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); | 475 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); |
| 566 cs->fSrcRect = srcRect; | 476 cs->fSrcRect = srcRect; |
| 567 cs->fDstPoint = dstPoint; | 477 cs->fDstPoint = dstPoint; |
| 568 this->recordTraceMarkersIfNecessary(); | 478 this->recordTraceMarkersIfNecessary(); |
| 569 return true; | 479 return true; |
| 570 } | 480 } |
| 571 return false; | 481 return false; |
| 572 } | 482 } |
| 573 | 483 |
| 574 bool GrInOrderDrawBuffer::onCanCopySurface(const GrSurface* dst, | 484 bool GrInOrderDrawBuffer::onCanCopySurface(const GrSurface* dst, |
| 575 const GrSurface* src, | 485 const GrSurface* src, |
| 576 const SkIRect& srcRect, | 486 const SkIRect& srcRect, |
| 577 const SkIPoint& dstPoint) { | 487 const SkIPoint& dstPoint) { |
| 578 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint); | 488 return getGpu()->canCopySurface(dst, src, srcRect, dstPoint); |
| 579 } | 489 } |
| 580 | 490 |
| 581 bool GrInOrderDrawBuffer::onInitCopySurfaceDstDesc(const GrSurface* src, GrSurfa
ceDesc* desc) { | 491 bool GrInOrderDrawBuffer::onInitCopySurfaceDstDesc(const GrSurface* src, GrSurfa
ceDesc* desc) { |
| 582 return fDstGpu->initCopySurfaceDstDesc(src, desc); | 492 return getGpu()->initCopySurfaceDstDesc(src, desc); |
| 583 } | |
| 584 | |
| 585 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, | |
| 586 size_t vertexStride, | |
| 587 int indexCount) { | |
| 588 // We use geometryHints() to know whether to flush the draw buffer. We | |
| 589 // can't flush if we are inside an unbalanced pushGeometrySource. | |
| 590 // Moreover, flushing blows away vertex and index data that was | |
| 591 // previously reserved. So if the vertex or index data is pulled from | |
| 592 // reserved space and won't be released by this request then we can't | |
| 593 // flush. | |
| 594 bool insideGeoPush = fGeoPoolStateStack.count() > 1; | |
| 595 | |
| 596 bool unreleasedVertexSpace = | |
| 597 !vertexCount && | |
| 598 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; | |
| 599 | |
| 600 bool unreleasedIndexSpace = | |
| 601 !indexCount && | |
| 602 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; | |
| 603 | |
| 604 int vcount = vertexCount; | |
| 605 int icount = indexCount; | |
| 606 | |
| 607 if (!insideGeoPush && | |
| 608 !unreleasedVertexSpace && | |
| 609 !unreleasedIndexSpace && | |
| 610 this->geometryHints(vertexStride, &vcount, &icount)) { | |
| 611 this->flush(); | |
| 612 } | |
| 613 } | |
| 614 | |
| 615 bool GrInOrderDrawBuffer::geometryHints(size_t vertexStride, | |
| 616 int* vertexCount, | |
| 617 int* indexCount) const { | |
| 618 // we will recommend a flush if the data could fit in a single | |
| 619 // preallocated buffer but none are left and it can't fit | |
| 620 // in the current buffer (which may not be prealloced). | |
| 621 bool flush = false; | |
| 622 if (indexCount) { | |
| 623 int32_t currIndices = fIndexPool.currentBufferIndices(); | |
| 624 if (*indexCount > currIndices && | |
| 625 (!fIndexPool.preallocatedBuffersRemaining() && | |
| 626 *indexCount <= fIndexPool.preallocatedBufferIndices())) { | |
| 627 | |
| 628 flush = true; | |
| 629 } | |
| 630 *indexCount = currIndices; | |
| 631 } | |
| 632 if (vertexCount) { | |
| 633 int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride); | |
| 634 if (*vertexCount > currVertices && | |
| 635 (!fVertexPool.preallocatedBuffersRemaining() && | |
| 636 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride
))) { | |
| 637 | |
| 638 flush = true; | |
| 639 } | |
| 640 *vertexCount = currVertices; | |
| 641 } | |
| 642 return flush; | |
| 643 } | |
| 644 | |
| 645 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize, | |
| 646 int vertexCount, | |
| 647 void** vertices) { | |
| 648 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 649 SkASSERT(vertexCount > 0); | |
| 650 SkASSERT(vertices); | |
| 651 SkASSERT(0 == poolState.fUsedPoolVertexBytes); | |
| 652 | |
| 653 *vertices = fVertexPool.makeSpace(vertexSize, | |
| 654 vertexCount, | |
| 655 &poolState.fPoolVertexBuffer, | |
| 656 &poolState.fPoolStartVertex); | |
| 657 return SkToBool(*vertices); | |
| 658 } | |
| 659 | |
| 660 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) { | |
| 661 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 662 SkASSERT(indexCount > 0); | |
| 663 SkASSERT(indices); | |
| 664 SkASSERT(0 == poolState.fUsedPoolIndexBytes); | |
| 665 | |
| 666 *indices = fIndexPool.makeSpace(indexCount, | |
| 667 &poolState.fPoolIndexBuffer, | |
| 668 &poolState.fPoolStartIndex); | |
| 669 return SkToBool(*indices); | |
| 670 } | |
| 671 | |
| 672 void GrInOrderDrawBuffer::releaseReservedVertexSpace() { | |
| 673 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 674 const GeometrySrcState& geoSrc = this->getGeomSrc(); | |
| 675 | |
| 676 // If we get a release vertex space call then our current source should eith
er be reserved | |
| 677 // or array (which we copied into reserved space). | |
| 678 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc); | |
| 679 | |
| 680 // When the caller reserved vertex buffer space we gave it back a pointer | |
| 681 // provided by the vertex buffer pool. At each draw we tracked the largest | |
| 682 // offset into the pool's pointer that was referenced. Now we return to the | |
| 683 // pool any portion at the tail of the allocation that no draw referenced. | |
| 684 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; | |
| 685 fVertexPool.putBack(reservedVertexBytes - poolState.fUsedPoolVertexBytes); | |
| 686 poolState.fUsedPoolVertexBytes = 0; | |
| 687 poolState.fPoolVertexBuffer = NULL; | |
| 688 poolState.fPoolStartVertex = 0; | |
| 689 } | |
| 690 | |
| 691 void GrInOrderDrawBuffer::releaseReservedIndexSpace() { | |
| 692 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 693 const GeometrySrcState& geoSrc = this->getGeomSrc(); | |
| 694 | |
| 695 // If we get a release index space call then our current source should eithe
r be reserved | |
| 696 // or array (which we copied into reserved space). | |
| 697 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc); | |
| 698 | |
| 699 // Similar to releaseReservedVertexSpace we return any unused portion at | |
| 700 // the tail | |
| 701 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount; | |
| 702 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes); | |
| 703 poolState.fUsedPoolIndexBytes = 0; | |
| 704 poolState.fPoolIndexBuffer = NULL; | |
| 705 poolState.fPoolStartIndex = 0; | |
| 706 } | |
| 707 | |
| 708 void GrInOrderDrawBuffer::geometrySourceWillPush() { | |
| 709 GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); | |
| 710 poolState.fUsedPoolVertexBytes = 0; | |
| 711 poolState.fUsedPoolIndexBytes = 0; | |
| 712 #ifdef SK_DEBUG | |
| 713 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; | |
| 714 poolState.fPoolStartVertex = ~0; | |
| 715 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; | |
| 716 poolState.fPoolStartIndex = ~0; | |
| 717 #endif | |
| 718 } | |
| 719 | |
| 720 void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restored
State) { | |
| 721 SkASSERT(fGeoPoolStateStack.count() > 1); | |
| 722 fGeoPoolStateStack.pop_back(); | |
| 723 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | |
| 724 // we have to assume that any slack we had in our vertex/index data | |
| 725 // is now unreleasable because data may have been appended later in the | |
| 726 // pool. | |
| 727 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) { | |
| 728 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta
te.fVertexCount; | |
| 729 } | |
| 730 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) { | |
| 731 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * restoredState.fIndexC
ount; | |
| 732 } | |
| 733 } | 493 } |
| 734 | 494 |
| 735 bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrDrawState& ds, | 495 bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrDrawState& ds, |
| 736 GrGpu::DrawType drawType, | 496 GrGpu::DrawType drawType, |
| 737 const GrClipMaskManager::Scis
sorState& scissor, | 497 const GrClipMaskManager::Scis
sorState& scissor, |
| 738 const GrDeviceCoordTexture* d
stCopy) { | 498 const GrDeviceCoordTexture* d
stCopy) { |
| 739 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, | 499 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, |
| 740 (ds, fDstGpu, scissor, dstCopy, draw
Type)); | 500 (ds, this->getGpu(), scissor, dstCop
y, drawType)); |
| 741 if (ss->fState.mustSkip()) { | 501 if (ss->fState.mustSkip()) { |
| 742 fCmdBuffer.pop_back(); | 502 fCmdBuffer.pop_back(); |
| 743 return false; | 503 return false; |
| 744 } | 504 } |
| 745 if (fPrevState && *fPrevState == ss->fState) { | 505 if (fPrevState && *fPrevState == ss->fState) { |
| 746 fCmdBuffer.pop_back(); | 506 fCmdBuffer.pop_back(); |
| 747 } else { | 507 } else { |
| 748 fPrevState = &ss->fState; | 508 fPrevState = &ss->fState; |
| 749 this->recordTraceMarkersIfNecessary(); | 509 this->recordTraceMarkersIfNecessary(); |
| 750 } | 510 } |
| 751 return true; | 511 return true; |
| 752 } | 512 } |
| 753 | 513 |
| 754 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { | 514 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { |
| 755 SkASSERT(!fCmdBuffer.empty()); | 515 SkASSERT(!fCmdBuffer.empty()); |
| 756 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); | 516 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); |
| 757 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); | 517 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); |
| 758 if (activeTraceMarkers.count() > 0) { | 518 if (activeTraceMarkers.count() > 0) { |
| 759 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); | 519 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); |
| 760 fGpuCmdMarkers.push_back(activeTraceMarkers); | 520 fGpuCmdMarkers.push_back(activeTraceMarkers); |
| 761 } | 521 } |
| 762 } | 522 } |
| OLD | NEW |