OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "InstancedRendering.h" |
| 9 |
| 10 #include "GrBatchFlushState.h" |
| 11 #include "GrPipeline.h" |
| 12 #include "GrResourceProvider.h" |
| 13 #include "instanced/InstanceProcessor.h" |
| 14 |
| 15 namespace gr_instanced { |
| 16 |
| 17 InstancedRendering::InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAA
Mode, |
| 18 bool canRenderToFloat) |
| 19 : fGpu(SkRef(gpu)), |
| 20 fLastSupportedAAMode(lastSupportedAAMode), |
| 21 fCanRenderToFloat(canRenderToFloat), |
| 22 fState(State::kRecordingDraws), |
| 23 fDrawPool(1024 * sizeof(Batch::Draw), 1024 * sizeof(Batch::Draw)) { |
| 24 } |
| 25 |
| 26 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, |
| 27 GrColor color, bool antialias, |
| 28 const GrInstancedPipelineInfo& info,
bool* useHWAA) { |
| 29 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, an
tialias, info, |
| 30 useHWAA); |
| 31 } |
| 32 |
| 33 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, |
| 34 GrColor color, const SkRect& localRe
ct, bool antialias, |
| 35 const GrInstancedPipelineInfo& info,
bool* useHWAA) { |
| 36 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRec
t, antialias, info, |
| 37 useHWAA); |
| 38 } |
| 39 |
| 40 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, |
| 41 GrColor color, const SkMatrix& local
Matrix, |
| 42 bool antialias, const GrInstancedPip
elineInfo& info, |
| 43 bool* useHWAA) { |
| 44 if (localMatrix.hasPerspective()) { |
| 45 return nullptr; // Perspective is not yet supported in the local matrix. |
| 46 } |
| 47 if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, col
or, rect, antialias, |
| 48 info, useHWAA)) { |
| 49 batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag; |
| 50 batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX()
, |
| 51 localMatrix.getTranslateX()); |
| 52 batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY()
, |
| 53 localMatrix.getTranslateY()); |
| 54 batch->fInfo.fHasLocalMatrix = true; |
| 55 return batch; |
| 56 } |
| 57 return nullptr; |
| 58 } |
| 59 |
| 60 GrDrawBatch* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix&
viewMatrix, |
| 61 GrColor color, bool antialias, |
| 62 const GrInstancedPipelineInfo& info,
bool* useHWAA) { |
| 63 return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, an
tialias, info, |
| 64 useHWAA); |
| 65 } |
| 66 |
| 67 GrDrawBatch* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatri
x& viewMatrix, |
| 68 GrColor color, bool antialias, |
| 69 const GrInstancedPipelineInfo& info
, bool* useHWAA) { |
| 70 if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(),
viewMatrix, color, |
| 71 rrect.rect(), antialias, info, useHWAA)
) { |
| 72 batch->appendRRectParams(rrect); |
| 73 return batch; |
| 74 } |
| 75 return nullptr; |
| 76 } |
| 77 |
| 78 GrDrawBatch* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRec
t& inner, |
| 79 const SkMatrix& viewMatrix, GrColo
r color, |
| 80 bool antialias, const GrInstancedP
ipelineInfo& info, |
| 81 bool* useHWAA) { |
| 82 if (inner.getType() > SkRRect::kSimple_Type) { |
| 83 return nullptr; // Complex inner round rects are not yet supported. |
| 84 } |
| 85 if (SkRRect::kEmpty_Type == inner.getType()) { |
| 86 return this->recordRRect(outer, viewMatrix, color, antialias, info, useH
WAA); |
| 87 } |
| 88 if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(),
viewMatrix, color, |
| 89 outer.rect(), antialias, info, useHWAA)
) { |
| 90 batch->appendRRectParams(outer); |
| 91 ShapeType innerShapeType = GetRRectShapeType(inner); |
| 92 batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType); |
| 93 batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeT
ype_InfoBit); |
| 94 batch->appendParamsTexel(inner.rect().asScalars(), 4); |
| 95 batch->appendRRectParams(inner); |
| 96 return batch; |
| 97 } |
| 98 return nullptr; |
| 99 } |
| 100 |
| 101 InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
SkRect& bounds, |
| 102 const SkMatrix& viewM
atrix, |
| 103 GrColor color, const
SkRect& localRect, |
| 104 bool antialias, |
| 105 const GrInstancedPipe
lineInfo& info, |
| 106 bool* useHWAA) { |
| 107 SkASSERT(State::kRecordingDraws == fState); |
| 108 |
| 109 if (info.fIsRenderingToFloat && !fCanRenderToFloat) { |
| 110 return nullptr; |
| 111 } |
| 112 |
| 113 AntialiasMode antialiasMode; |
| 114 if (!this->selectAntialiasMode(viewMatrix, antialias, info, useHWAA, &antial
iasMode)) { |
| 115 return nullptr; |
| 116 } |
| 117 |
| 118 Batch* batch = this->createBatch(); |
| 119 batch->fInfo.fAntialiasMode = antialiasMode; |
| 120 batch->fInfo.fShapeTypes = GetShapeFlag(type); |
| 121 batch->fInfo.fCannotDiscard = !info.fCanDiscard; |
| 122 |
| 123 Instance& instance = batch->getSingleInstance(); |
| 124 instance.fInfo = (int)type << kShapeType_InfoBit; |
| 125 |
| 126 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we
find the matrix that |
| 127 // will map this rectangle to the same device coordinates as "viewMatrix * b
ounds". |
| 128 float sx = 0.5f * bounds.width(); |
| 129 float sy = 0.5f * bounds.height(); |
| 130 float tx = sx + bounds.fLeft; |
| 131 float ty = sy + bounds.fTop; |
| 132 if (!viewMatrix.hasPerspective()) { |
| 133 float* m = instance.fShapeMatrix2x3; |
| 134 m[0] = viewMatrix.getScaleX() * sx; |
| 135 m[1] = viewMatrix.getSkewX() * sy; |
| 136 m[2] = viewMatrix.getTranslateX() + |
| 137 viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty; |
| 138 |
| 139 m[3] = viewMatrix.getSkewY() * sx; |
| 140 m[4] = viewMatrix.getScaleY() * sy; |
| 141 m[5] = viewMatrix.getTranslateY() + |
| 142 viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty; |
| 143 |
| 144 // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape'
s device-space quad, |
| 145 // it's quite simple to find the bounding rectangle: |
| 146 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]); |
| 147 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]); |
| 148 batch->fBounds.fLeft = m[2] - devBoundsHalfWidth; |
| 149 batch->fBounds.fRight = m[2] + devBoundsHalfWidth; |
| 150 batch->fBounds.fTop = m[5] - devBoundsHalfHeight; |
| 151 batch->fBounds.fBottom = m[5] + devBoundsHalfHeight; |
| 152 |
| 153 // TODO: Is this worth the CPU overhead? |
| 154 batch->fInfo.fNonSquare = |
| 155 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early o
ut. |
| 156 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew? |
| 157 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f;
// Diff. lengths? |
| 158 } else { |
| 159 SkMatrix shapeMatrix(viewMatrix); |
| 160 shapeMatrix.preTranslate(tx, ty); |
| 161 shapeMatrix.preScale(sx, sy); |
| 162 instance.fInfo |= kPerspective_InfoFlag; |
| 163 |
| 164 float* m = instance.fShapeMatrix2x3; |
| 165 m[0] = SkScalarToFloat(shapeMatrix.getScaleX()); |
| 166 m[1] = SkScalarToFloat(shapeMatrix.getSkewX()); |
| 167 m[2] = SkScalarToFloat(shapeMatrix.getTranslateX()); |
| 168 m[3] = SkScalarToFloat(shapeMatrix.getSkewY()); |
| 169 m[4] = SkScalarToFloat(shapeMatrix.getScaleY()); |
| 170 m[5] = SkScalarToFloat(shapeMatrix.getTranslateY()); |
| 171 |
| 172 // Send the perspective column as a param. |
| 173 batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[Sk
Matrix::kMPersp1], |
| 174 shapeMatrix[SkMatrix::kMPersp2]); |
| 175 batch->fInfo.fHasPerspective = true; |
| 176 |
| 177 viewMatrix.mapRect(&batch->fBounds, bounds); |
| 178 |
| 179 batch->fInfo.fNonSquare = true; |
| 180 } |
| 181 |
| 182 instance.fColor = color; |
| 183 |
| 184 const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == flo
at. |
| 185 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float)); |
| 186 |
| 187 batch->fPixelLoad = batch->fBounds.height() * batch->fBounds.width(); |
| 188 return batch; |
| 189 } |
| 190 |
| 191 inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix,
bool antialias, |
| 192 const GrInstancedPipelineInf
o& info, |
| 193 bool* useHWAA, AntialiasMode
* antialiasMode) { |
| 194 SkASSERT(!info.fColorDisabled || info.fDrawingShapeToStencil); |
| 195 SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled); |
| 196 |
| 197 if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) { |
| 198 SkASSERT(fLastSupportedAAMode >= AntialiasMode::kCoverage); |
| 199 if (!antialias) { |
| 200 if (info.fDrawingShapeToStencil && !info.fCanDiscard) { |
| 201 // We can't draw to the stencil buffer without discard (or sampl
e mask if MSAA). |
| 202 return false; |
| 203 } |
| 204 *antialiasMode = AntialiasMode::kNone; |
| 205 *useHWAA = false; |
| 206 return true; |
| 207 } |
| 208 |
| 209 if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) { |
| 210 *antialiasMode = AntialiasMode::kCoverage; |
| 211 *useHWAA = false; |
| 212 return true; |
| 213 } |
| 214 } |
| 215 |
| 216 if (info.fIsMultisampled && fLastSupportedAAMode >= AntialiasMode::kMSAA) { |
| 217 if (!info.fIsMixedSampled || info.fColorDisabled) { |
| 218 *antialiasMode = AntialiasMode::kMSAA; |
| 219 *useHWAA = true; |
| 220 return true; |
| 221 } |
| 222 if (fLastSupportedAAMode >= AntialiasMode::kMixedSamples) { |
| 223 *antialiasMode = AntialiasMode::kMixedSamples; |
| 224 *useHWAA = true; |
| 225 return true; |
| 226 } |
| 227 } |
| 228 |
| 229 return false; |
| 230 } |
| 231 |
| 232 InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir) |
| 233 : INHERITED(classID), |
| 234 fInstancedRendering(ir), |
| 235 fIsTracked(false), |
| 236 fNumDraws(1), |
| 237 fNumChangesInGeometry(0) { |
| 238 fHeadDraw = fTailDraw = (Draw*)fInstancedRendering->fDrawPool.allocate(sizeo
f(Draw)); |
| 239 #ifdef SK_DEBUG |
| 240 fHeadDraw->fGeometry = {-1, 0}; |
| 241 #endif |
| 242 fHeadDraw->fNext = nullptr; |
| 243 } |
| 244 |
| 245 InstancedRendering::Batch::~Batch() { |
| 246 if (fIsTracked) { |
| 247 fInstancedRendering->fTrackedBatches.remove(this); |
| 248 } |
| 249 |
| 250 Draw* draw = fHeadDraw; |
| 251 while (draw) { |
| 252 Draw* next = draw->fNext; |
| 253 fInstancedRendering->fDrawPool.release(draw); |
| 254 draw = next; |
| 255 } |
| 256 } |
| 257 |
| 258 void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) { |
| 259 SkASSERT(!fIsTracked); |
| 260 switch (rrect.getType()) { |
| 261 case SkRRect::kSimple_Type: { |
| 262 const SkVector& radii = rrect.getSimpleRadii(); |
| 263 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.h
eight()); |
| 264 return; |
| 265 } |
| 266 case SkRRect::kNinePatch_Type: { |
| 267 float twoOverW = 2 / rrect.width(); |
| 268 float twoOverH = 2 / rrect.height(); |
| 269 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); |
| 270 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); |
| 271 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOve
rW, |
| 272 radiiTL.y() * twoOverH, radiiBR.y() * twoOve
rH); |
| 273 return; |
| 274 } |
| 275 case SkRRect::kComplex_Type: { |
| 276 /** |
| 277 * The x and y radii of each arc are stored in separate vectors, |
| 278 * in the following order: |
| 279 * |
| 280 * __x1 _ _ _ x3__ |
| 281 * y1 | | y2 |
| 282 * |
| 283 * | | |
| 284 * |
| 285 * y3 |__ _ _ _ __| y4 |
| 286 * x2 x4 |
| 287 * |
| 288 */ |
| 289 float twoOverW = 2 / rrect.width(); |
| 290 float twoOverH = 2 / rrect.height(); |
| 291 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); |
| 292 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner); |
| 293 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); |
| 294 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner); |
| 295 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOve
rW, |
| 296 radiiTR.x() * twoOverW, radiiBR.x() * twoOve
rW); |
| 297 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOve
rH, |
| 298 radiiBL.y() * twoOverH, radiiBR.y() * twoOve
rH); |
| 299 return; |
| 300 } |
| 301 default: return; |
| 302 } |
| 303 } |
| 304 |
| 305 void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int coun
t) { |
| 306 SkASSERT(!fIsTracked); |
| 307 SkASSERT(count <= 4 && count >= 0); |
| 308 const float* valsAsFloats = vals; // Ensure SkScalar == float. |
| 309 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float)); |
| 310 fInfo.fHasParams = true; |
| 311 } |
| 312 |
| 313 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal
ar z, SkScalar w) { |
| 314 SkASSERT(!fIsTracked); |
| 315 ParamsTexel& texel = fParams.push_back(); |
| 316 texel.fX = SkScalarToFloat(x); |
| 317 texel.fY = SkScalarToFloat(y); |
| 318 texel.fZ = SkScalarToFloat(z); |
| 319 texel.fW = SkScalarToFloat(w); |
| 320 fInfo.fHasParams = true; |
| 321 } |
| 322 |
| 323 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal
ar z) { |
| 324 SkASSERT(!fIsTracked); |
| 325 ParamsTexel& texel = fParams.push_back(); |
| 326 texel.fX = SkScalarToFloat(x); |
| 327 texel.fY = SkScalarToFloat(y); |
| 328 texel.fZ = SkScalarToFloat(z); |
| 329 fInfo.fHasParams = true; |
| 330 } |
| 331 |
| 332 void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutp
ut* color, |
| 333 GrInitInvariantOutpu
t* coverage, |
| 334 GrBatchToXPOverrides
* overrides) const { |
| 335 color->setKnownFourComponents(this->getSingleInstance().fColor); |
| 336 |
| 337 if (AntialiasMode::kCoverage == fInfo.fAntialiasMode || |
| 338 (AntialiasMode::kNone == fInfo.fAntialiasMode && |
| 339 !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) { |
| 340 coverage->setUnknownSingleComponent(); |
| 341 } else { |
| 342 coverage->setKnownSingleComponent(255); |
| 343 } |
| 344 } |
| 345 |
| 346 void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& ov
errides) { |
| 347 Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 comma
nd. |
| 348 SkASSERT(draw.fGeometry.isEmpty()); |
| 349 SkASSERT(SkIsPow2(fInfo.fShapeTypes)); |
| 350 SkASSERT(!fIsTracked); |
| 351 |
| 352 if (kRect_ShapeFlag == fInfo.fShapeTypes) { |
| 353 draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialia
sMode); |
| 354 } else if (kOval_ShapeFlag == fInfo.fShapeTypes) { |
| 355 draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialia
sMode, fBounds); |
| 356 } else { |
| 357 draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntiali
asMode); |
| 358 } |
| 359 |
| 360 if (!fParams.empty()) { |
| 361 SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask
); // TODO: cleaner. |
| 362 this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count(); |
| 363 fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin(
)); |
| 364 } |
| 365 |
| 366 GrColor overrideColor; |
| 367 if (overrides.getOverrideColorIfSet(&overrideColor)) { |
| 368 SkASSERT(State::kRecordingDraws == fInstancedRendering->fState); |
| 369 this->getSingleInstance().fColor = overrideColor; |
| 370 } |
| 371 fInfo.fUsesLocalCoords = overrides.readsLocalCoords(); |
| 372 fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage(); |
| 373 |
| 374 fInstancedRendering->fTrackedBatches.addToTail(this); |
| 375 fIsTracked = true; |
| 376 } |
| 377 |
| 378 bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps
& caps) { |
| 379 Batch* that = static_cast<Batch*>(other); |
| 380 SkASSERT(fInstancedRendering == that->fInstancedRendering); |
| 381 SkASSERT(fTailDraw); |
| 382 SkASSERT(that->fTailDraw); |
| 383 |
| 384 if (!BatchInfo::CanCombine(fInfo, that->fInfo) || |
| 385 !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), |
| 386 *that->pipeline(), that->bounds(), caps)) { |
| 387 return false; |
| 388 } |
| 389 |
| 390 BatchInfo combinedInfo = fInfo | that->fInfo; |
| 391 if (!combinedInfo.isSimpleRects()) { |
| 392 // This threshold was chosen with the "shapes_mixed" bench on a MacBook
with Intel graphics. |
| 393 // There seems to be a wide range where it doesn't matter if we combine
or not. What matters |
| 394 // is that the itty bitty rects combine with other shapes and the giant
ones don't. |
| 395 constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256; |
| 396 if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) { |
| 397 return false; |
| 398 } |
| 399 if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGenera
lizeRects) { |
| 400 return false; |
| 401 } |
| 402 } |
| 403 |
| 404 fBounds.join(that->fBounds); |
| 405 fInfo = combinedInfo; |
| 406 fPixelLoad += that->fPixelLoad; |
| 407 |
| 408 // Adopt the other batch's draws. |
| 409 fNumDraws += that->fNumDraws; |
| 410 fNumChangesInGeometry += that->fNumChangesInGeometry; |
| 411 if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) { |
| 412 ++fNumChangesInGeometry; |
| 413 } |
| 414 fTailDraw->fNext = that->fHeadDraw; |
| 415 fTailDraw = that->fTailDraw; |
| 416 |
| 417 that->fHeadDraw = that->fTailDraw = nullptr; |
| 418 |
| 419 return true; |
| 420 } |
| 421 |
| 422 void InstancedRendering::beginFlush(GrResourceProvider* rp) { |
| 423 SkASSERT(State::kRecordingDraws == fState); |
| 424 fState = State::kFlushing; |
| 425 |
| 426 if (fTrackedBatches.isEmpty()) { |
| 427 return; |
| 428 } |
| 429 |
| 430 if (!fVertexBuffer) { |
| 431 fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu)); |
| 432 if (!fVertexBuffer) { |
| 433 return; |
| 434 } |
| 435 } |
| 436 |
| 437 if (!fIndexBuffer) { |
| 438 fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu)); |
| 439 if (!fIndexBuffer) { |
| 440 return; |
| 441 } |
| 442 } |
| 443 |
| 444 if (!fParams.empty()) { |
| 445 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexe
l), |
| 446 kTexel_GrBufferType, kDynamic_GrAcc
essPattern, |
| 447 GrResourceProvider::kNoPendingIO_Fl
ag, |
| 448 fParams.begin())); |
| 449 if (!fParamsBuffer) { |
| 450 return; |
| 451 } |
| 452 } |
| 453 |
| 454 this->onBeginFlush(rp); |
| 455 } |
| 456 |
| 457 void InstancedRendering::Batch::onDraw(GrBatchFlushState* state) { |
| 458 SkASSERT(State::kFlushing == fInstancedRendering->fState); |
| 459 SkASSERT(state->gpu() == fInstancedRendering->gpu()); |
| 460 |
| 461 state->gpu()->handleDirtyContext(); |
| 462 if (GrXferBarrierType barrierType = this->pipeline()->xferBarrierType(*state
->gpu()->caps())) { |
| 463 state->gpu()->xferBarrier(this->pipeline()->getRenderTarget(), barrierTy
pe); |
| 464 } |
| 465 |
| 466 InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer); |
| 467 fInstancedRendering->onDraw(*this->pipeline(), instProc, this); |
| 468 } |
| 469 |
| 470 void InstancedRendering::endFlush() { |
| 471 // The caller is expected to delete all tracked batches (i.e. batches whose
initBatchTracker |
| 472 // method has been called) before ending the flush. |
| 473 SkASSERT(fTrackedBatches.isEmpty()); |
| 474 fParams.reset(); |
| 475 fParamsBuffer.reset(); |
| 476 this->onEndFlush(); |
| 477 fState = State::kRecordingDraws; |
| 478 // Hold on to the shape coords and index buffers. |
| 479 } |
| 480 |
| 481 void InstancedRendering::resetGpuResources(ResetType resetType) { |
| 482 fVertexBuffer.reset(); |
| 483 fIndexBuffer.reset(); |
| 484 fParamsBuffer.reset(); |
| 485 this->onResetGpuResources(resetType); |
| 486 } |
| 487 |
| 488 } |
OLD | NEW |