| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 126 kTraceCmdBit = 0x80, | 126 kTraceCmdBit = 0x80, |
| 127 kCmdMask = 0x7f, | 127 kCmdMask = 0x7f, |
| 128 }; | 128 }; |
| 129 | 129 |
| 130 static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; } | 130 static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; } |
| 131 | 131 |
| 132 static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; } | 132 static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; } |
| 133 | 133 |
| 134 static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTr
aceCmdBit); } | 134 static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTr
aceCmdBit); } |
| 135 | 135 |
| 136 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect, | 136 void GrInOrderDrawBuffer::onDrawRect(GrDrawState* ds, |
| 137 const SkRect& rect, |
| 137 const SkRect* localRect, | 138 const SkRect* localRect, |
| 138 const SkMatrix* localMatrix) { | 139 const SkMatrix* localMatrix) { |
| 139 GrDrawState* drawState = this->drawState(); | 140 GrDrawState::AutoRestoreEffects are(ds); |
| 140 GrDrawState::AutoRestoreEffects are(drawState); | |
| 141 | 141 |
| 142 GrColor color = drawState->getColor(); | 142 GrColor color = ds->getColor(); |
| 143 set_vertex_attributes(ds, SkToBool(localRect), color); |
| 143 | 144 |
| 144 set_vertex_attributes(drawState, SkToBool(localRect), color); | 145 AutoReleaseGeometry geo(this, 4, ds->getVertexStride(), 0); |
| 145 | |
| 146 AutoReleaseGeometry geo(this, 4, 0); | |
| 147 if (!geo.succeeded()) { | 146 if (!geo.succeeded()) { |
| 148 SkDebugf("Failed to get space for vertices!\n"); | 147 SkDebugf("Failed to get space for vertices!\n"); |
| 149 return; | 148 return; |
| 150 } | 149 } |
| 151 | 150 |
| 152 // Go to device coords to allow batching across matrix changes | 151 // Go to device coords to allow batching across matrix changes |
| 153 SkMatrix matrix = drawState->getViewMatrix(); | 152 SkMatrix matrix = ds->getViewMatrix(); |
| 154 | 153 |
| 155 // When the caller has provided an explicit source rect for a stage then we
don't want to | 154 // When the caller has provided an explicit source rect for a stage then we
don't want to |
| 156 // modify that stage's matrix. Otherwise if the effect is generating its sou
rce rect from | 155 // modify that stage's matrix. Otherwise if the effect is generating its sou
rce rect from |
| 157 // the vertex positions then we have to account for the view matrix change. | 156 // the vertex positions then we have to account for the view matrix change. |
| 158 GrDrawState::AutoViewMatrixRestore avmr; | 157 GrDrawState::AutoViewMatrixRestore avmr; |
| 159 if (!avmr.setIdentity(drawState)) { | 158 if (!avmr.setIdentity(ds)) { |
| 160 return; | 159 return; |
| 161 } | 160 } |
| 162 | 161 |
| 163 size_t vstride = drawState->getVertexStride(); | 162 size_t vstride = ds->getVertexStride(); |
| 164 | 163 |
| 165 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom
, vstride); | 164 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom
, vstride); |
| 166 matrix.mapPointsWithStride(geo.positions(), vstride, 4); | 165 matrix.mapPointsWithStride(geo.positions(), vstride, 4); |
| 167 | 166 |
| 168 SkRect devBounds; | 167 SkRect devBounds; |
| 169 // since we already computed the dev verts, set the bounds hint. This will h
elp us avoid | 168 // since we already computed the dev verts, set the bounds hint. This will h
elp us avoid |
| 170 // unnecessary clipping in our onDraw(). | 169 // unnecessary clipping in our onDraw(). |
| 171 get_vertex_bounds(geo.vertices(), vstride, 4, &devBounds); | 170 get_vertex_bounds(geo.vertices(), vstride, 4, &devBounds); |
| 172 | 171 |
| 173 if (localRect) { | 172 if (localRect) { |
| 174 static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor); | 173 static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor); |
| 175 SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) +
kLocalOffset); | 174 SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) +
kLocalOffset); |
| 176 coords->setRectFan(localRect->fLeft, localRect->fTop, | 175 coords->setRectFan(localRect->fLeft, localRect->fTop, |
| 177 localRect->fRight, localRect->fBottom, | 176 localRect->fRight, localRect->fBottom, |
| 178 vstride); | 177 vstride); |
| 179 if (localMatrix) { | 178 if (localMatrix) { |
| 180 localMatrix->mapPointsWithStride(coords, vstride, 4); | 179 localMatrix->mapPointsWithStride(coords, vstride, 4); |
| 181 } | 180 } |
| 182 } | 181 } |
| 183 | 182 |
| 184 static const int kColorOffset = sizeof(SkPoint); | 183 static const int kColorOffset = sizeof(SkPoint); |
| 185 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + k
ColorOffset); | 184 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + k
ColorOffset); |
| 186 for (int i = 0; i < 4; ++i) { | 185 for (int i = 0; i < 4; ++i) { |
| 187 *vertColor = color; | 186 *vertColor = color; |
| 188 vertColor = (GrColor*) ((intptr_t) vertColor + vstride); | 187 vertColor = (GrColor*) ((intptr_t) vertColor + vstride); |
| 189 } | 188 } |
| 190 | 189 |
| 191 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer()); | 190 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer()); |
| 192 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds); | 191 this->drawIndexedInstances(ds, kTriangles_GrPrimitiveType, 1, 4, 6, &devBoun
ds); |
| 193 | |
| 194 // to ensure that stashing the drawState ptr is valid | |
| 195 SkASSERT(this->drawState() == drawState); | |
| 196 } | 192 } |
| 197 | 193 |
| 198 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info, | 194 int GrInOrderDrawBuffer::concatInstancedDraw(const GrDrawState& ds, |
| 195 const DrawInfo& info, |
| 199 const GrClipMaskManager::ScissorSta
te& scissorState) { | 196 const GrClipMaskManager::ScissorSta
te& scissorState) { |
| 200 SkASSERT(!fCmdBuffer.empty()); | 197 SkASSERT(!fCmdBuffer.empty()); |
| 201 SkASSERT(info.isInstanced()); | 198 SkASSERT(info.isInstanced()); |
| 202 | 199 |
| 203 const GeometrySrcState& geomSrc = this->getGeomSrc(); | 200 const GeometrySrcState& geomSrc = this->getGeomSrc(); |
| 204 const GrDrawState& drawState = this->getDrawState(); | |
| 205 | 201 |
| 206 // we only attempt to concat the case when reserved verts are used with a cl
ient-specified index | 202 // we only attempt to concat the case when reserved verts are used with a cl
ient-specified index |
| 207 // buffer. To make this work with client-specified VBs we'd need to know if
the VB was updated | 203 // buffer. To make this work with client-specified VBs we'd need to know if
the VB was updated |
| 208 // between draws. | 204 // between draws. |
| 209 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || | 205 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
| 210 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { | 206 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
| 211 return 0; | 207 return 0; |
| 212 } | 208 } |
| 213 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and | 209 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and |
| 214 // the same IB | 210 // the same IB |
| (...skipping 21 matching lines...) Expand all Loading... |
| 236 } | 232 } |
| 237 | 233 |
| 238 SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fIn
fo.vertexCount()); | 234 SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fIn
fo.vertexCount()); |
| 239 | 235 |
| 240 // how many instances can be concat'ed onto draw given the size of the index
buffer | 236 // how many instances can be concat'ed onto draw given the size of the index
buffer |
| 241 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); | 237 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI
nstance(); |
| 242 instancesToConcat -= draw->fInfo.instanceCount(); | 238 instancesToConcat -= draw->fInfo.instanceCount(); |
| 243 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); | 239 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); |
| 244 | 240 |
| 245 // update the amount of reserved vertex data actually referenced in draws | 241 // update the amount of reserved vertex data actually referenced in draws |
| 246 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * | 242 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * ds.get
VertexStride(); |
| 247 drawState.getVertexStride(); | |
| 248 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vert
exBytes); | 243 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vert
exBytes); |
| 249 | 244 |
| 250 draw->fInfo.adjustInstanceCount(instancesToConcat); | 245 draw->fInfo.adjustInstanceCount(instancesToConcat); |
| 251 | 246 |
| 252 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added | 247 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added |
| 253 if (this->getActiveTraceMarkers().count() > 0) { | 248 if (this->getActiveTraceMarkers().count() > 0) { |
| 254 if (cmd_has_trace_marker(draw->fType)) { | 249 if (cmd_has_trace_marker(draw->fType)) { |
| 255 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); | 250 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); |
| 256 } else { | 251 } else { |
| 257 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); | 252 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); |
| 258 draw->fType = add_trace_bit(draw->fType); | 253 draw->fType = add_trace_bit(draw->fType); |
| 259 } | 254 } |
| 260 } | 255 } |
| 261 | 256 |
| 262 return instancesToConcat; | 257 return instancesToConcat; |
| 263 } | 258 } |
| 264 | 259 |
| 265 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info, | 260 void GrInOrderDrawBuffer::onDraw(const GrDrawState& ds, |
| 261 const DrawInfo& info, |
| 266 const GrClipMaskManager::ScissorState& scissorS
tate) { | 262 const GrClipMaskManager::ScissorState& scissorS
tate) { |
| 267 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | 263 GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| 268 const GrDrawState& drawState = this->getDrawState(); | |
| 269 | 264 |
| 270 this->recordStateIfNecessary(GrGpu::PrimTypeToDrawType(info.primitiveType())
, | 265 this->recordStateIfNecessary(ds, |
| 266 GrGpu::PrimTypeToDrawType(info.primitiveType())
, |
| 271 info.getDstCopy()); | 267 info.getDstCopy()); |
| 272 | 268 |
| 273 const GrVertexBuffer* vb; | 269 const GrVertexBuffer* vb; |
| 274 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { | 270 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { |
| 275 vb = this->getGeomSrc().fVertexBuffer; | 271 vb = this->getGeomSrc().fVertexBuffer; |
| 276 } else { | 272 } else { |
| 277 vb = poolState.fPoolVertexBuffer; | 273 vb = poolState.fPoolVertexBuffer; |
| 278 } | 274 } |
| 279 | 275 |
| 280 const GrIndexBuffer* ib = NULL; | 276 const GrIndexBuffer* ib = NULL; |
| 281 if (info.isIndexed()) { | 277 if (info.isIndexed()) { |
| 282 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) { | 278 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) { |
| 283 ib = this->getGeomSrc().fIndexBuffer; | 279 ib = this->getGeomSrc().fIndexBuffer; |
| 284 } else { | 280 } else { |
| 285 ib = poolState.fPoolIndexBuffer; | 281 ib = poolState.fPoolIndexBuffer; |
| 286 } | 282 } |
| 287 } | 283 } |
| 288 | 284 |
| 289 Draw* draw; | 285 Draw* draw; |
| 290 if (info.isInstanced()) { | 286 if (info.isInstanced()) { |
| 291 int instancesConcated = this->concatInstancedDraw(info, scissorState); | 287 int instancesConcated = this->concatInstancedDraw(ds, info, scissorState
); |
| 292 if (info.instanceCount() > instancesConcated) { | 288 if (info.instanceCount() > instancesConcated) { |
| 293 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, scissorStat
e, vb, ib)); | 289 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, scissorStat
e, vb, ib)); |
| 294 draw->fInfo.adjustInstanceCount(-instancesConcated); | 290 draw->fInfo.adjustInstanceCount(-instancesConcated); |
| 295 } else { | 291 } else { |
| 296 return; | 292 return; |
| 297 } | 293 } |
| 298 } else { | 294 } else { |
| 299 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, scissorState, v
b, ib)); | 295 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, scissorState, v
b, ib)); |
| 300 } | 296 } |
| 301 this->recordTraceMarkersIfNecessary(); | 297 this->recordTraceMarkersIfNecessary(); |
| 302 | 298 |
| 303 // Adjust the starting vertex and index when we are using reserved or array
sources to | 299 // Adjust the starting vertex and index when we are using reserved or array
sources to |
| 304 // compensate for the fact that the data was inserted into a larger vb/ib ow
ned by the pool. | 300 // compensate for the fact that the data was inserted into a larger vb/ib ow
ned by the pool. |
| 305 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) { | 301 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) { |
| 306 size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.get
VertexStride(); | 302 size_t bytes = (info.vertexCount() + info.startVertex()) * ds.getVertexS
tride(); |
| 307 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes,
bytes); | 303 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes,
bytes); |
| 308 draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex); | 304 draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex); |
| 309 } | 305 } |
| 310 | 306 |
| 311 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndex
Src) { | 307 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndex
Src) { |
| 312 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t
); | 308 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t
); |
| 313 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by
tes); | 309 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by
tes); |
| 314 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); | 310 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); |
| 315 } | 311 } |
| 316 } | 312 } |
| 317 | 313 |
| 318 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, | 314 void GrInOrderDrawBuffer::onStencilPath(const GrDrawState& ds, |
| 315 const GrPath* path, |
| 319 const GrClipMaskManager::ScissorState& s
cissorState, | 316 const GrClipMaskManager::ScissorState& s
cissorState, |
| 320 const GrStencilSettings& stencilSettings
) { | 317 const GrStencilSettings& stencilSettings
) { |
| 321 // Only compare the subset of GrDrawState relevant to path stenciling? | 318 // Only compare the subset of GrDrawState relevant to path stenciling? |
| 322 this->recordStateIfNecessary(GrGpu::kStencilPath_DrawType, NULL); | 319 this->recordStateIfNecessary(ds, GrGpu::kStencilPath_DrawType, NULL); |
| 323 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path)); | 320 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path)); |
| 324 sp->fScissorState = scissorState; | 321 sp->fScissorState = scissorState; |
| 325 sp->fStencilSettings = stencilSettings; | 322 sp->fStencilSettings = stencilSettings; |
| 326 this->recordTraceMarkersIfNecessary(); | 323 this->recordTraceMarkersIfNecessary(); |
| 327 } | 324 } |
| 328 | 325 |
| 329 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, | 326 void GrInOrderDrawBuffer::onDrawPath(const GrDrawState& ds, |
| 327 const GrPath* path, |
| 330 const GrClipMaskManager::ScissorState& scis
sorState, | 328 const GrClipMaskManager::ScissorState& scis
sorState, |
| 331 const GrStencilSettings& stencilSettings, | 329 const GrStencilSettings& stencilSettings, |
| 332 const GrDeviceCoordTexture* dstCopy) { | 330 const GrDeviceCoordTexture* dstCopy) { |
| 333 // TODO: Only compare the subset of GrDrawState relevant to path covering? | 331 // TODO: Only compare the subset of GrDrawState relevant to path covering? |
| 334 this->recordStateIfNecessary(GrGpu::kDrawPath_DrawType, dstCopy); | 332 this->recordStateIfNecessary(ds, GrGpu::kDrawPath_DrawType, dstCopy); |
| 335 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); | 333 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); |
| 336 if (dstCopy) { | 334 if (dstCopy) { |
| 337 dp->fDstCopy = *dstCopy; | 335 dp->fDstCopy = *dstCopy; |
| 338 } | 336 } |
| 339 dp->fScissorState = scissorState; | 337 dp->fScissorState = scissorState; |
| 340 dp->fStencilSettings = stencilSettings; | 338 dp->fStencilSettings = stencilSettings; |
| 341 this->recordTraceMarkersIfNecessary(); | 339 this->recordTraceMarkersIfNecessary(); |
| 342 } | 340 } |
| 343 | 341 |
| 344 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange, | 342 void GrInOrderDrawBuffer::onDrawPaths(const GrDrawState& ds, |
| 343 const GrPathRange* pathRange, |
| 345 const uint32_t indices[], | 344 const uint32_t indices[], |
| 346 int count, | 345 int count, |
| 347 const float transforms[], | 346 const float transforms[], |
| 348 PathTransformType transformsType, | 347 PathTransformType transformsType, |
| 349 const GrClipMaskManager::ScissorState& sci
ssorState, | 348 const GrClipMaskManager::ScissorState& sci
ssorState, |
| 350 const GrStencilSettings& stencilSettings, | 349 const GrStencilSettings& stencilSettings, |
| 351 const GrDeviceCoordTexture* dstCopy) { | 350 const GrDeviceCoordTexture* dstCopy) { |
| 352 SkASSERT(pathRange); | 351 SkASSERT(pathRange); |
| 353 SkASSERT(indices); | 352 SkASSERT(indices); |
| 354 SkASSERT(transforms); | 353 SkASSERT(transforms); |
| 355 | 354 |
| 356 this->recordStateIfNecessary(GrGpu::kDrawPaths_DrawType, dstCopy); | 355 this->recordStateIfNecessary(ds, GrGpu::kDrawPaths_DrawType, dstCopy); |
| 357 | 356 |
| 358 uint32_t* savedIndices = fPathIndexBuffer.append(count, indices); | 357 uint32_t* savedIndices = fPathIndexBuffer.append(count, indices); |
| 359 float* savedTransforms = fPathTransformBuffer.append(count * | 358 float* savedTransforms = fPathTransformBuffer.append(count * |
| 360 GrPathRendering::PathTransformSize(transformsTy
pe), transforms); | 359 GrPathRendering::PathTransformSize(transformsTy
pe), transforms); |
| 361 | 360 |
| 362 if (kDrawPaths_Cmd == strip_trace_bit(fCmdBuffer.back().fType)) { | 361 if (kDrawPaths_Cmd == strip_trace_bit(fCmdBuffer.back().fType)) { |
| 363 // The previous command was also DrawPaths. Try to collapse this call in
to the one | 362 // The previous command was also DrawPaths. Try to collapse this call in
to the one |
| 364 // before. Note that stencilling all the paths at once, then covering, m
ay not be | 363 // before. Note that stencilling all the paths at once, then covering, m
ay not be |
| 365 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, | 364 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, |
| 366 // and the combined calls may also cancel each other's winding numbers i
n some | 365 // and the combined calls may also cancel each other's winding numbers i
n some |
| 367 // places. For now the winding numbers are only an issue if the fill is
even/odd, | 366 // places. For now the winding numbers are only an issue if the fill is
even/odd, |
| 368 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same | 367 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same |
| 369 // font tend to all wind in the same direction. | 368 // font tend to all wind in the same direction. |
| 370 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | 369 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
| 371 if (pathRange == previous->pathRange() && | 370 if (pathRange == previous->pathRange() && |
| 372 transformsType == previous->fTransformsType && | 371 transformsType == previous->fTransformsType && |
| 373 scissorState == previous->fScissorState && | 372 scissorState == previous->fScissorState && |
| 374 stencilSettings == previous->fStencilSettings && | 373 stencilSettings == previous->fStencilSettings && |
| 375 path_fill_type_is_winding(stencilSettings) && | 374 path_fill_type_is_winding(stencilSettings) && |
| 376 !this->getDrawState().willBlendWithDst()) { | 375 !ds.willBlendWithDst()) { |
| 377 // Fold this DrawPaths call into the one previous. | 376 // Fold this DrawPaths call into the one previous. |
| 378 previous->fCount += count; | 377 previous->fCount += count; |
| 379 return; | 378 return; |
| 380 } | 379 } |
| 381 } | 380 } |
| 382 | 381 |
| 383 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange))
; | 382 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange))
; |
| 384 dp->fIndicesLocation = savedIndices - fPathIndexBuffer.begin(); | 383 dp->fIndicesLocation = savedIndices - fPathIndexBuffer.begin(); |
| 385 dp->fCount = count; | 384 dp->fCount = count; |
| 386 dp->fTransformsLocation = savedTransforms - fPathTransformBuffer.begin(); | 385 dp->fTransformsLocation = savedTransforms - fPathTransformBuffer.begin(); |
| 387 dp->fTransformsType = transformsType; | 386 dp->fTransformsType = transformsType; |
| 388 dp->fScissorState = scissorState; | 387 dp->fScissorState = scissorState; |
| 389 dp->fStencilSettings = stencilSettings; | 388 dp->fStencilSettings = stencilSettings; |
| 390 if (dstCopy) { | 389 if (dstCopy) { |
| 391 dp->fDstCopy = *dstCopy; | 390 dp->fDstCopy = *dstCopy; |
| 392 } | 391 } |
| 393 | 392 |
| 394 this->recordTraceMarkersIfNecessary(); | 393 this->recordTraceMarkersIfNecessary(); |
| 395 } | 394 } |
| 396 | 395 |
| 397 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color, | 396 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color, |
| 398 bool canIgnoreRect, GrRenderTarget* renderTarg
et) { | 397 bool canIgnoreRect, GrRenderTarget* renderTarg
et) { |
| 398 SkASSERT(renderTarget); |
| 399 SkIRect r; | 399 SkIRect r; |
| 400 if (NULL == renderTarget) { | |
| 401 renderTarget = this->drawState()->getRenderTarget(); | |
| 402 SkASSERT(renderTarget); | |
| 403 } | |
| 404 if (NULL == rect) { | 400 if (NULL == rect) { |
| 405 // We could do something smart and remove previous draws and clears to | 401 // We could do something smart and remove previous draws and clears to |
| 406 // the current render target. If we get that smart we have to make sure | 402 // the current render target. If we get that smart we have to make sure |
| 407 // those draws aren't read before this clear (render-to-texture). | 403 // those draws aren't read before this clear (render-to-texture). |
| 408 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); | 404 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); |
| 409 rect = &r; | 405 rect = &r; |
| 410 } | 406 } |
| 411 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | 407 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); |
| 412 GrColorIsPMAssert(color); | 408 GrColorIsPMAssert(color); |
| 413 clr->fColor = color; | 409 clr->fColor = color; |
| 414 clr->fRect = *rect; | 410 clr->fRect = *rect; |
| 415 clr->fCanIgnoreRect = canIgnoreRect; | 411 clr->fCanIgnoreRect = canIgnoreRect; |
| 416 this->recordTraceMarkersIfNecessary(); | 412 this->recordTraceMarkersIfNecessary(); |
| 417 } | 413 } |
| 418 | 414 |
| 419 void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect, | 415 void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect, |
| 420 bool insideClip, | 416 bool insideClip, |
| 421 GrRenderTarget* renderTarget) { | 417 GrRenderTarget* renderTarget) { |
| 422 if (NULL == renderTarget) { | 418 SkASSERT(renderTarget); |
| 423 renderTarget = this->drawState()->getRenderTarget(); | |
| 424 SkASSERT(renderTarget); | |
| 425 } | |
| 426 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli
p, (renderTarget)); | 419 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli
p, (renderTarget)); |
| 427 clr->fRect = rect; | 420 clr->fRect = rect; |
| 428 clr->fInsideClip = insideClip; | 421 clr->fInsideClip = insideClip; |
| 429 this->recordTraceMarkersIfNecessary(); | 422 this->recordTraceMarkersIfNecessary(); |
| 430 } | 423 } |
| 431 | 424 |
| 432 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) { | 425 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) { |
| 433 SkASSERT(renderTarget); | 426 SkASSERT(renderTarget); |
| 434 if (!this->caps()->discardRenderTargetSupport()) { | 427 if (!this->caps()->discardRenderTargetSupport()) { |
| 435 return; | 428 return; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 486 GrGpuTraceMarker newMarker("", -1); | 479 GrGpuTraceMarker newMarker("", -1); |
| 487 SkString traceString; | 480 SkString traceString; |
| 488 if (cmd_has_trace_marker(iter->fType)) { | 481 if (cmd_has_trace_marker(iter->fType)) { |
| 489 traceString = fGpuCmdMarkers[currCmdMarker].toString(); | 482 traceString = fGpuCmdMarkers[currCmdMarker].toString(); |
| 490 newMarker.fMarker = traceString.c_str(); | 483 newMarker.fMarker = traceString.c_str(); |
| 491 fDstGpu->addGpuTraceMarker(&newMarker); | 484 fDstGpu->addGpuTraceMarker(&newMarker); |
| 492 ++currCmdMarker; | 485 ++currCmdMarker; |
| 493 } | 486 } |
| 494 | 487 |
| 495 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { | 488 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { |
| 496 const SetState* ss = reinterpret_cast<const SetState*>(iter.get()); | 489 SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| 497 currentOptState.reset(GrOptDrawState::Create(ss->fState, | 490 currentOptState.reset(GrOptDrawState::Create(ss->fState, |
| 498 fDstGpu, | 491 fDstGpu, |
| 499 &ss->fDstCopy, | 492 &ss->fDstCopy, |
| 500 ss->fDrawType)); | 493 ss->fDrawType)); |
| 501 } else { | 494 } else { |
| 502 iter->execute(this, currentOptState.get()); | 495 iter->execute(this, currentOptState.get()); |
| 503 } | 496 } |
| 504 | 497 |
| 505 if (cmd_has_trace_marker(iter->fType)) { | 498 if (cmd_has_trace_marker(iter->fType)) { |
| 506 fDstGpu->removeGpuTraceMarker(&newMarker); | 499 fDstGpu->removeGpuTraceMarker(&newMarker); |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 585 this->recordTraceMarkersIfNecessary(); | 578 this->recordTraceMarkersIfNecessary(); |
| 586 return true; | 579 return true; |
| 587 } else if (GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint)) { | 580 } else if (GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint)) { |
| 588 GrDrawTarget::copySurface(dst, src, srcRect, dstPoint); | 581 GrDrawTarget::copySurface(dst, src, srcRect, dstPoint); |
| 589 return true; | 582 return true; |
| 590 } else { | 583 } else { |
| 591 return false; | 584 return false; |
| 592 } | 585 } |
| 593 } | 586 } |
| 594 | 587 |
| 595 bool GrInOrderDrawBuffer::canCopySurface(GrSurface* dst, | 588 bool GrInOrderDrawBuffer::canCopySurface(const GrSurface* dst, |
| 596 GrSurface* src, | 589 const GrSurface* src, |
| 597 const SkIRect& srcRect, | 590 const SkIRect& srcRect, |
| 598 const SkIPoint& dstPoint) { | 591 const SkIPoint& dstPoint) { |
| 599 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint) || | 592 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint) || |
| 600 GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint); | 593 GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint); |
| 601 } | 594 } |
| 602 | 595 |
| 603 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrSurface
Desc* desc) { | 596 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrSurface
Desc* desc) { |
| 604 fDstGpu->initCopySurfaceDstDesc(src, desc); | 597 fDstGpu->initCopySurfaceDstDesc(src, desc); |
| 605 } | 598 } |
| 606 | 599 |
| 607 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, | 600 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, |
| 601 size_t vertexStride, |
| 608 int indexCount) { | 602 int indexCount) { |
| 609 // We use geometryHints() to know whether to flush the draw buffer. We | 603 // We use geometryHints() to know whether to flush the draw buffer. We |
| 610 // can't flush if we are inside an unbalanced pushGeometrySource. | 604 // can't flush if we are inside an unbalanced pushGeometrySource. |
| 611 // Moreover, flushing blows away vertex and index data that was | 605 // Moreover, flushing blows away vertex and index data that was |
| 612 // previously reserved. So if the vertex or index data is pulled from | 606 // previously reserved. So if the vertex or index data is pulled from |
| 613 // reserved space and won't be released by this request then we can't | 607 // reserved space and won't be released by this request then we can't |
| 614 // flush. | 608 // flush. |
| 615 bool insideGeoPush = fGeoPoolStateStack.count() > 1; | 609 bool insideGeoPush = fGeoPoolStateStack.count() > 1; |
| 616 | 610 |
| 617 bool unreleasedVertexSpace = | 611 bool unreleasedVertexSpace = |
| 618 !vertexCount && | 612 !vertexCount && |
| 619 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; | 613 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; |
| 620 | 614 |
| 621 bool unreleasedIndexSpace = | 615 bool unreleasedIndexSpace = |
| 622 !indexCount && | 616 !indexCount && |
| 623 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; | 617 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; |
| 624 | 618 |
| 625 int vcount = vertexCount; | 619 int vcount = vertexCount; |
| 626 int icount = indexCount; | 620 int icount = indexCount; |
| 627 | 621 |
| 628 if (!insideGeoPush && | 622 if (!insideGeoPush && |
| 629 !unreleasedVertexSpace && | 623 !unreleasedVertexSpace && |
| 630 !unreleasedIndexSpace && | 624 !unreleasedIndexSpace && |
| 631 this->geometryHints(&vcount, &icount)) { | 625 this->geometryHints(vertexStride, &vcount, &icount)) { |
| 632 this->flush(); | 626 this->flush(); |
| 633 } | 627 } |
| 634 } | 628 } |
| 635 | 629 |
| 636 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount, | 630 bool GrInOrderDrawBuffer::geometryHints(size_t vertexStride, |
| 631 int* vertexCount, |
| 637 int* indexCount) const { | 632 int* indexCount) const { |
| 638 // we will recommend a flush if the data could fit in a single | 633 // we will recommend a flush if the data could fit in a single |
| 639 // preallocated buffer but none are left and it can't fit | 634 // preallocated buffer but none are left and it can't fit |
| 640 // in the current buffer (which may not be prealloced). | 635 // in the current buffer (which may not be prealloced). |
| 641 bool flush = false; | 636 bool flush = false; |
| 642 if (indexCount) { | 637 if (indexCount) { |
| 643 int32_t currIndices = fIndexPool.currentBufferIndices(); | 638 int32_t currIndices = fIndexPool.currentBufferIndices(); |
| 644 if (*indexCount > currIndices && | 639 if (*indexCount > currIndices && |
| 645 (!fIndexPool.preallocatedBuffersRemaining() && | 640 (!fIndexPool.preallocatedBuffersRemaining() && |
| 646 *indexCount <= fIndexPool.preallocatedBufferIndices())) { | 641 *indexCount <= fIndexPool.preallocatedBufferIndices())) { |
| 647 | 642 |
| 648 flush = true; | 643 flush = true; |
| 649 } | 644 } |
| 650 *indexCount = currIndices; | 645 *indexCount = currIndices; |
| 651 } | 646 } |
| 652 if (vertexCount) { | 647 if (vertexCount) { |
| 653 size_t vertexStride = this->getDrawState().getVertexStride(); | |
| 654 int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride); | 648 int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride); |
| 655 if (*vertexCount > currVertices && | 649 if (*vertexCount > currVertices && |
| 656 (!fVertexPool.preallocatedBuffersRemaining() && | 650 (!fVertexPool.preallocatedBuffersRemaining() && |
| 657 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride
))) { | 651 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride
))) { |
| 658 | 652 |
| 659 flush = true; | 653 flush = true; |
| 660 } | 654 } |
| 661 *vertexCount = currVertices; | 655 *vertexCount = currVertices; |
| 662 } | 656 } |
| 663 return flush; | 657 return flush; |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 746 // is now unreleasable because data may have been appended later in the | 740 // is now unreleasable because data may have been appended later in the |
| 747 // pool. | 741 // pool. |
| 748 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) { | 742 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) { |
| 749 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta
te.fVertexCount; | 743 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta
te.fVertexCount; |
| 750 } | 744 } |
| 751 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) { | 745 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) { |
| 752 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * restoredState.fIndexC
ount; | 746 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * restoredState.fIndexC
ount; |
| 753 } | 747 } |
| 754 } | 748 } |
| 755 | 749 |
| 756 void GrInOrderDrawBuffer::recordStateIfNecessary(GrGpu::DrawType drawType, | 750 void GrInOrderDrawBuffer::recordStateIfNecessary(const GrDrawState& ds, |
| 751 GrGpu::DrawType drawType, |
| 757 const GrDeviceCoordTexture* dst
Copy) { | 752 const GrDeviceCoordTexture* dst
Copy) { |
| 758 if (!fLastState) { | 753 if (!fLastState) { |
| 759 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->get
DrawState())); | 754 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (ds)); |
| 760 fLastState = &ss->fState; | 755 fLastState = &ss->fState; |
| 761 if (dstCopy) { | 756 if (dstCopy) { |
| 762 ss->fDstCopy = *dstCopy; | 757 ss->fDstCopy = *dstCopy; |
| 763 } | 758 } |
| 764 ss->fDrawType = drawType; | 759 ss->fDrawType = drawType; |
| 765 this->convertDrawStateToPendingExec(fLastState); | 760 this->convertDrawStateToPendingExec(fLastState); |
| 766 this->recordTraceMarkersIfNecessary(); | 761 this->recordTraceMarkersIfNecessary(); |
| 767 return; | 762 return; |
| 768 } | 763 } |
| 769 const GrDrawState& curr = this->getDrawState(); | 764 switch (GrDrawState::CombineIfPossible(*fLastState, ds, *this->caps())) { |
| 770 switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) { | |
| 771 case GrDrawState::kIncompatible_CombinedState: { | 765 case GrDrawState::kIncompatible_CombinedState: { |
| 772 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr)
); | 766 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (ds)); |
| 773 fLastState = &ss->fState; | 767 fLastState = &ss->fState; |
| 774 if (dstCopy) { | 768 if (dstCopy) { |
| 775 ss->fDstCopy = *dstCopy; | 769 ss->fDstCopy = *dstCopy; |
| 776 } | 770 } |
| 777 ss->fDrawType = drawType; | 771 ss->fDrawType = drawType; |
| 778 this->convertDrawStateToPendingExec(fLastState); | 772 this->convertDrawStateToPendingExec(fLastState); |
| 779 this->recordTraceMarkersIfNecessary(); | 773 this->recordTraceMarkersIfNecessary(); |
| 780 break; | 774 break; |
| 781 } | 775 } |
| 782 case GrDrawState::kA_CombinedState: | 776 case GrDrawState::kA_CombinedState: |
| 783 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA. | 777 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA. |
| 784 break; | 778 break; |
| 785 case GrDrawState::kB_CombinedState: | 779 case GrDrawState::kB_CombinedState: |
| 786 // prev has already been converted to pending execution. That is a o
ne-way ticket. | 780 // prev has already been converted to pending execution. That is a o
ne-way ticket. |
| 787 // So here we just destruct the previous state and reinit with a new
copy of curr. | 781 // So here we just destruct the previous state and reinit with a new
copy of curr. |
| 788 // Note that this goes away when we move GrIODB over to taking optim
ized snapshots | 782 // Note that this goes away when we move GrIODB over to taking optim
ized snapshots |
| 789 // of draw states. | 783 // of draw states. |
| 790 fLastState->~GrDrawState(); | 784 fLastState->~GrDrawState(); |
| 791 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr)); | 785 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (ds)); |
| 792 this->convertDrawStateToPendingExec(fLastState); | 786 this->convertDrawStateToPendingExec(fLastState); |
| 793 break; | 787 break; |
| 794 } | 788 } |
| 795 } | 789 } |
| 796 | 790 |
| 797 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { | 791 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { |
| 798 SkASSERT(!fCmdBuffer.empty()); | 792 SkASSERT(!fCmdBuffer.empty()); |
| 799 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); | 793 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); |
| 800 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); | 794 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); |
| 801 if (activeTraceMarkers.count() > 0) { | 795 if (activeTraceMarkers.count() > 0) { |
| 802 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); | 796 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); |
| 803 fGpuCmdMarkers.push_back(activeTraceMarkers); | 797 fGpuCmdMarkers.push_back(activeTraceMarkers); |
| 804 } | 798 } |
| 805 } | 799 } |
| OLD | NEW |