OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrInstancedRendering.h" |
| 9 |
| 10 #include "GrBatchFlushState.h" |
| 11 #include "GrPipeline.h" |
| 12 #include "GrResourceProvider.h" |
| 13 #include "effects/GrInstanceProcessor.h" |
| 14 |
| 15 static const SkScalar* matrix_as_scalars(const SkMatrix& matrix) { |
| 16 const SkScalar* scalars = reinterpret_cast<const SkScalar*>(&matrix); |
| 17 SkASSERT(scalars[0] == matrix.getScaleX()); |
| 18 SkASSERT(scalars[1] == matrix.getSkewX()); |
| 19 SkASSERT(scalars[2] == matrix.getTranslateX()); |
| 20 SkASSERT(scalars[3] == matrix.getSkewY()); |
| 21 SkASSERT(scalars[4] == matrix.getScaleY()); |
| 22 SkASSERT(scalars[5] == matrix.getTranslateY()); |
| 23 SkASSERT(scalars[6] == matrix.getPerspX()); |
| 24 SkASSERT(scalars[7] == matrix.getPerspY()); |
| 25 SkASSERT(scalars[8] == matrix[8]); |
| 26 return scalars; |
| 27 } |
| 28 |
| 29 GrInstancedRendering::GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes
, |
| 30 size_t sizeofBatchClass) |
| 31 : fGpu(SkRef(gpu)), |
| 32 fSupportedAAModes(supportedAAModes), |
| 33 fState(State::kRecordingShapes), |
| 34 fBatchAllocator(sizeofBatchClass) { |
| 35 SkDEBUGCODE(fInUseBatchCount = 0;) |
| 36 } |
| 37 |
| 38 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix
& viewMatrix, |
| 39 GrColor color, bool antialias, uin
t32_t flags, |
| 40 bool* requireHWAA) { |
| 41 return this->recordShape(kRect_ShapeType, rect, viewMatrix, color, rect, ant
ialias, flags, |
| 42 requireHWAA); |
| 43 } |
| 44 |
| 45 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix
& viewMatrix, |
| 46 GrColor color, const SkRect& local
Rect, |
| 47 bool antialias, uint32_t flags, bo
ol* requireHWAA) { |
| 48 return this->recordShape(kRect_ShapeType, rect, viewMatrix, color, localRect
, antialias, flags, |
| 49 requireHWAA); |
| 50 } |
| 51 |
| 52 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix
& viewMatrix, |
| 53 GrColor color, const SkMatrix& loc
alMatrix, |
| 54 bool antialias, uint32_t flags, bo
ol* requireHWAA) { |
| 55 if (localMatrix.hasPerspective()) { |
| 56 return nullptr; // Perspective local matrix not supported yet. |
| 57 } |
| 58 if (Batch* batch = this->recordShape(kRect_ShapeType, rect, viewMatrix, colo
r, rect, antialias, |
| 59 flags, requireHWAA)) { |
| 60 fInstances.back().fInfo |= kLocalMatrix_InfoFlag; |
| 61 this->appendParamsTexel(matrix_as_scalars(localMatrix), 3); |
| 62 this->appendParamsTexel(matrix_as_scalars(localMatrix) + 3, 3); |
| 63 batch->fTracker.fHasLocalMatrix = true; |
| 64 batch->fTracker.fHasParams = true; |
| 65 return batch; |
| 66 } |
| 67 return nullptr; |
| 68 } |
| 69 |
| 70 GrDrawBatch* GrInstancedRendering::recordOval(const SkRect& oval, const SkMatrix
& viewMatrix, |
| 71 GrColor color, bool antialias, uin
t32_t flags, |
| 72 bool* requireHWAA) { |
| 73 return this->recordShape(kOval_ShapeType, oval, viewMatrix, color, oval, ant
ialias, flags, |
| 74 requireHWAA); |
| 75 } |
| 76 |
| 77 GrDrawBatch* GrInstancedRendering::recordRRect(const SkRRect& rrect, const SkMat
rix& viewMatrix, |
| 78 GrColor color, bool antialias, ui
nt32_t flags, |
| 79 bool* requireHWAA) { |
| 80 if (Batch* batch = this->recordShape(RRectShapeType(rrect), rrect.rect(), vi
ewMatrix, color, |
| 81 rrect.rect(), antialias, flags, require
HWAA)) { |
| 82 this->appendRRectParams(rrect, &batch->fTracker); |
| 83 return batch; |
| 84 } |
| 85 return nullptr; |
| 86 } |
| 87 |
| 88 GrDrawBatch* GrInstancedRendering::recordDRRect(const SkRRect& outer, const SkRR
ect& inner, |
| 89 const SkMatrix& viewMatrix, GrCo
lor color, |
| 90 bool antialias, uint32_t flags,
bool* requireHWAA) { |
| 91 if (inner.getType() > SkRRect::kSimple_Type) { |
| 92 return nullptr; // Complex inner rrects are not supported. |
| 93 } |
| 94 if (SkRRect::kEmpty_Type == inner.getType()) { |
| 95 return this->recordRRect(outer, viewMatrix, color, antialias, flags, req
uireHWAA); |
| 96 } |
| 97 if (Batch* batch = this->recordShape(RRectShapeType(outer), outer.rect(), vi
ewMatrix, color, |
| 98 outer.rect(), antialias, flags, require
HWAA)) { |
| 99 this->appendRRectParams(outer, &batch->fTracker); |
| 100 ShapeType innerShapeType = RRectShapeType(inner); |
| 101 batch->fTracker.fInnerShapeTypes |= (1 << innerShapeType); |
| 102 fInstances.back().fInfo |= (innerShapeType << kInnerShapeType_InfoBit); |
| 103 this->appendParamsTexel(inner.rect().asScalars(), 4); |
| 104 this->appendRRectParams(inner, &batch->fTracker); |
| 105 batch->fTracker.fHasParams = true; |
| 106 return batch; |
| 107 } |
| 108 return nullptr; |
| 109 } |
| 110 |
| 111 GrInstancedRendering::Batch* GrInstancedRendering::recordShape(ShapeType type, c
onst SkRect& bounds, |
| 112 const SkMatrix& v
iewMatrix, |
| 113 GrColor color, |
| 114 const SkRect& loc
alRect, |
| 115 bool antialias, u
int32_t flags, |
| 116 bool* requireHWAA
) { |
| 117 SkASSERT(State::kRecordingShapes == fState); |
| 118 |
| 119 uint32_t paramsIdx = fParams.count(); |
| 120 if (paramsIdx > kParamsIdx_InfoMask) { |
| 121 return nullptr; // Params index is too large for its allotted space. |
| 122 } |
| 123 |
| 124 AntialiasMode aa; |
| 125 if (!this->selectAntialiasMode(viewMatrix, antialias, flags, &aa, requireHWA
A)) { |
| 126 return nullptr; |
| 127 } |
| 128 // We can't return null after this point since requireHWAA might be set. |
| 129 |
| 130 Batch* batch = this->constructBatch(fBatchAllocator.push_back(), aa, flags,
fInstances.count()); |
| 131 SkASSERT(batch == fBatchAllocator.back()); // We rely on batch ptr equality
with the allocator. |
| 132 SkDEBUGCODE(++fInUseBatchCount;) |
| 133 batch->fTracker.fShapeTypes |= (1 << type); |
| 134 |
| 135 Instance& instance = fInstances.push_back(); |
| 136 instance.fInfo = (type << kShapeType_InfoBit) | paramsIdx; |
| 137 |
| 138 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we
find the matrix that |
| 139 // will map this rectangle to the same device coordinates as "viewMatrix * b
ounds". |
| 140 float sx = 0.5f * bounds.width(); |
| 141 float sy = 0.5f * bounds.height(); |
| 142 float tx = sx + bounds.fLeft; |
| 143 float ty = sy + bounds.fTop; |
| 144 if (!viewMatrix.hasPerspective()) { |
| 145 float* m = instance.fShapeMatrix2x3; |
| 146 m[0] = viewMatrix[SkMatrix::kMScaleX] * sx; |
| 147 m[1] = viewMatrix[SkMatrix::kMSkewX] * sy; |
| 148 m[2] = viewMatrix[SkMatrix::kMTransX] + |
| 149 viewMatrix[SkMatrix::kMScaleX] * tx + viewMatrix[SkMatrix::kMSkew
X] * ty; |
| 150 |
| 151 m[3] = viewMatrix[SkMatrix::kMSkewY] * sx; |
| 152 m[4] = viewMatrix[SkMatrix::kMScaleY] * sy; |
| 153 m[5] = viewMatrix[SkMatrix::kMTransY] + |
| 154 viewMatrix[SkMatrix::kMSkewY] * tx + viewMatrix[SkMatrix::kMScale
Y] * ty; |
| 155 |
| 156 // Since 'm' is a 2x3 matrix that maps the rect [-1, -1, +1, +1] into th
e shape's device- |
| 157 // space quad, it's quite simple to find the bounding rectangle: |
| 158 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]); |
| 159 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]); |
| 160 batch->fBounds.fLeft = m[2] - devBoundsHalfWidth; |
| 161 batch->fBounds.fRight = m[2] + devBoundsHalfWidth; |
| 162 batch->fBounds.fTop = m[5] - devBoundsHalfHeight; |
| 163 batch->fBounds.fBottom = m[5] + devBoundsHalfHeight; |
| 164 |
| 165 // TODO: Is this worth the CPU overhead? |
| 166 batch->fTracker.fNonSquare = |
| 167 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early o
ut. |
| 168 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew? |
| 169 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f;
// Diff. lengths? |
| 170 } else { |
| 171 SkMatrix shapeMatrix(viewMatrix); |
| 172 shapeMatrix.preTranslate(tx, ty); |
| 173 shapeMatrix.preScale(sx, sy); |
| 174 instance.fInfo |= kPerspective_InfoFlag; |
| 175 #if SK_SCALAR_IS_FLOAT |
| 176 const float* matrixAsFloats = matrix_as_scalars(shapeMatrix); |
| 177 memcpy(instance.fShapeMatrix2x3, matrixAsFloats, 6 * sizeof(float)); |
| 178 #else |
| 179 float* m = instance.fShapeMatrix2x3; |
| 180 m[0] == static_cast<float>(matrix.getScaleX()); |
| 181 m[1] == static_cast<float>(matrix.getSkewX()); |
| 182 m[2] == static_cast<float>(matrix.getTranslateX()); |
| 183 m[3] == static_cast<float>(matrix.getSkewY()); |
| 184 m[4] == static_cast<float>(matrix.getScaleY()); |
| 185 m[5] == static_cast<float>(matrix.getTranslateY()); |
| 186 m[6] == static_cast<float>(matrix.getPerspX()); |
| 187 #endif |
| 188 // Send the perspective column as a param. |
| 189 this->appendParamsTexel(matrix_as_scalars(shapeMatrix) + 6, 3); |
| 190 batch->fTracker.fHasPerspective = true; |
| 191 batch->fTracker.fHasParams = true; |
| 192 |
| 193 viewMatrix.mapRect(&batch->fBounds, bounds); |
| 194 |
| 195 batch->fTracker.fNonSquare = true; |
| 196 } |
| 197 |
| 198 instance.fColor = color; |
| 199 #if SK_SCALAR_IS_FLOAT |
| 200 const float* rectAsFloats = localRect.asScalars(); |
| 201 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float)); |
| 202 #else |
| 203 instance.fLocalRect[0] = static_cast<float>(localRect.left()); |
| 204 instance.fLocalRect[1] = static_cast<float>(localRect.top()); |
| 205 instance.fLocalRect[2] = static_cast<float>(localRect.right()); |
| 206 instance.fLocalRect[3] = static_cast<float>(localRect.bottom()); |
| 207 #endif |
| 208 |
| 209 return batch; |
| 210 } |
| 211 |
| 212 inline bool GrInstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix
, bool antialias, |
| 213 uint32_t flags, AntialiasM
ode* antialiasMode, |
| 214 bool* requireHWAA) { |
| 215 SkASSERT(flags & (kColorWrite_Flag | kStencilWrite_Flag)); |
| 216 SkASSERT((flags & (kColorBufferMSAA_Flag | kStencilBufferMSAA_Flag)) != kCol
orBufferMSAA_Flag); |
| 217 |
| 218 if (!antialias) { |
| 219 if (!(fSupportedAAModes & kNone_AntialiasFlag)) { |
| 220 return false; |
| 221 } |
| 222 if ((flags & (kStencilWrite_Flag | kCanDiscardFragments_Flag)) == kStenc
ilWrite_Flag) { |
| 223 // We can only subtract coverage from the stencil test via discard w
hen no MSAA. |
| 224 return false; |
| 225 } |
| 226 *antialiasMode = kNone_AntialiasMode; |
| 227 return true; |
| 228 } |
| 229 |
| 230 if (!(flags & (kColorBufferMSAA_Flag | kStencilWrite_Flag)) && |
| 231 viewMatrix.preservesRightAngles()) { |
| 232 SkASSERT(fSupportedAAModes & kCoverage_AntialiasFlag); |
| 233 *antialiasMode = kCoverage_AntialiasMode; |
| 234 return true; |
| 235 } |
| 236 |
| 237 if ((fSupportedAAModes & kMSAA_AntialiasFlag) && (flags & kStencilBufferMSAA
_Flag)) { |
| 238 if ((flags ^ kColorWrite_Flag) & (kColorWrite_Flag | kColorBufferMSAA_Fl
ag)) { |
| 239 // We either do not write color, or the color buffer is multisampled
. |
| 240 *antialiasMode = kMSAA_AntialiasMode; |
| 241 return true; |
| 242 } |
| 243 if (fSupportedAAModes & kMixedSamples_AntialiasFlag) { |
| 244 SkASSERT(requireHWAA); |
| 245 *antialiasMode = kMixedSamples_AntialiasMode; |
| 246 *requireHWAA = true; |
| 247 return true; |
| 248 } |
| 249 } |
| 250 |
| 251 return false; |
| 252 } |
| 253 |
| 254 void GrInstancedRendering::appendRRectParams(const SkRRect& rrect, BatchTracker*
tracker) { |
| 255 switch (rrect.getType()) { |
| 256 case SkRRect::kSimple_Type: { |
| 257 const SkVector& radii = rrect.getSimpleRadii(); |
| 258 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.h
eight()); |
| 259 tracker->fHasParams = true; |
| 260 return; |
| 261 } |
| 262 case SkRRect::kNinePatch_Type: { |
| 263 float twoOverW = 2 / rrect.width(); |
| 264 float twoOverH = 2 / rrect.height(); |
| 265 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); |
| 266 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); |
| 267 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOve
rW, |
| 268 radiiTL.y() * twoOverH, radiiBR.y() * twoOve
rH); |
| 269 tracker->fHasParams = true; |
| 270 return; |
| 271 } |
| 272 case SkRRect::kComplex_Type: { |
| 273 /** |
| 274 * The x and y radii of each arc are stored in separate vectors, |
| 275 * in the following order: |
| 276 * |
| 277 * __x1 _ _ _ x3__ |
| 278 * y1 | | y2 |
| 279 * |
| 280 * | | |
| 281 * |
| 282 * y3 |__ _ _ _ __| y4 |
| 283 * x2 x4 |
| 284 * |
| 285 */ |
| 286 float twoOverW = 2 / rrect.width(); |
| 287 float twoOverH = 2 / rrect.height(); |
| 288 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); |
| 289 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner); |
| 290 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); |
| 291 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner); |
| 292 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOve
rW, |
| 293 radiiTR.x() * twoOverW, radiiBR.x() * twoOve
rW); |
| 294 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOve
rH, |
| 295 radiiBL.y() * twoOverH, radiiBR.y() * twoOve
rH); |
| 296 tracker->fHasParams = true; |
| 297 return; |
| 298 } |
| 299 default: return; |
| 300 } |
| 301 } |
| 302 |
| 303 void GrInstancedRendering::appendParamsTexel(const SkScalar* vals, int count) { |
| 304 SkASSERT(count <= 4 && count >= 0); |
| 305 #if SK_SCALAR_IS_FLOAT |
| 306 const float* valsAsFloats = vals; |
| 307 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float)); |
| 308 #else |
| 309 float* params = fParams.push_back().fValues; |
| 310 for (int i = 0; i < count; i++) { |
| 311 params[i] = static_cast<float>(vals[i]); |
| 312 } |
| 313 #endif |
| 314 } |
| 315 |
| 316 void GrInstancedRendering::appendParamsTexel(float x, float y, float z, float w)
{ |
| 317 ParamsTexel& texel = fParams.push_back(); |
| 318 texel.fX = x; |
| 319 texel.fY = y; |
| 320 texel.fZ = z; |
| 321 texel.fW = w; |
| 322 } |
| 323 |
| 324 void GrInstancedRendering::appendParamsTexel(float x, float y, float z) { |
| 325 ParamsTexel& texel = fParams.push_back(); |
| 326 texel.fX = x; |
| 327 texel.fY = y; |
| 328 texel.fZ = z; |
| 329 } |
| 330 |
| 331 void GrInstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOu
tput* color, |
| 332 GrInitInvariantOutpu
t* coverage, |
| 333 GrBatchToXPOverrides
* overrides) const { |
| 334 // We can't rely on fTracker here because it might change later as batches a
re combined. |
| 335 color->setUnknownFourComponents(); |
| 336 if (fAntialiasMode >= kMSAA_AntialiasMode) { |
| 337 coverage->setKnownSingleComponent(255); |
| 338 } else if (kNone_AntialiasMode == fAntialiasMode && (fFlags & kCanDiscardFra
gments_Flag)) { |
| 339 coverage->setKnownSingleComponent(255); |
| 340 } else { |
| 341 coverage->setUnknownSingleComponent(); |
| 342 } |
| 343 } |
| 344 |
| 345 void GrInstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch&
overrides) { |
| 346 fTracker.fUsesColor = overrides.readsColor(); |
| 347 fTracker.fUsesCoverage = overrides.readsCoverage(); |
| 348 fTracker.fUsesLocalCoords = overrides.readsLocalCoords(); |
| 349 fTracker.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage(
); |
| 350 |
| 351 GrColor overrideColor; |
| 352 if (overrides.getOverrideColorIfSet(&overrideColor)) { |
| 353 SkASSERT(State::kRecordingShapes == fInstancedRendering->fState); |
| 354 SkASSERT(!fIsCombined); |
| 355 fInstancedRendering->fInstances[fFirstInstanceIdx].fColor = overrideColo
r; |
| 356 } |
| 357 } |
| 358 |
| 359 void GrInstancedRendering::commitToGpu(GrResourceProvider* rp) { |
| 360 SkASSERT(State::kRecordingShapes == fState); |
| 361 fState = State::kDrawingBatches; |
| 362 |
| 363 if (fInstances.empty()) { |
| 364 return; |
| 365 } |
| 366 |
| 367 if (!fVertexBuffer) { |
| 368 fVertexBuffer.reset(GrInstanceProcessor::FindOrCreateVertexBuffer(fGpu))
; |
| 369 if (!fVertexBuffer) { |
| 370 return; |
| 371 } |
| 372 } |
| 373 |
| 374 if (!fIndexBuffer) { |
| 375 fIndexBuffer.reset(GrInstanceProcessor::FindOrCreateIndex8Buffer(fGpu)); |
| 376 if (!fIndexBuffer) { |
| 377 return; |
| 378 } |
| 379 } |
| 380 |
| 381 fInstanceBuffer.reset(rp->createBuffer(fInstances.count() * sizeof(Instance)
, |
| 382 kVertex_GrBufferType, kDynamic_GrAcce
ssPattern, |
| 383 GrResourceProvider::kNoPendingIO_Flag
, |
| 384 fInstances.begin())); |
| 385 if (!fInstanceBuffer) { |
| 386 return; |
| 387 } |
| 388 |
| 389 if (!fParams.empty()) { |
| 390 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexe
l), |
| 391 kTexel_GrBufferType, kDynamic_GrAcc
essPattern, |
| 392 GrResourceProvider::kNoPendingIO_Fl
ag, |
| 393 fParams.begin())); |
| 394 if (!fParamsBuffer) { |
| 395 return; |
| 396 } |
| 397 } |
| 398 |
| 399 this->onCommitToGpu(rp); |
| 400 } |
| 401 |
| 402 void GrInstancedRendering::Batch::onDraw(GrBatchFlushState* state) { |
| 403 SkASSERT(State::kDrawingBatches == fInstancedRendering->fState); |
| 404 SkASSERT(state->gpu() == fInstancedRendering->gpu()); |
| 405 |
| 406 GrInstanceProcessor instProc(fTracker, fInstancedRendering->fParamsBuffer, f
AntialiasMode, |
| 407 (fFlags & kCanDiscardFragments_Flag)); |
| 408 fInstancedRendering->onDraw(*this->pipeline(), instProc, this); |
| 409 } |
| 410 |
| 411 void GrInstancedRendering::Batch::onDelete() const { |
| 412 this->~Batch(); |
| 413 SkDEBUGCODE(--fInstancedRendering->fInUseBatchCount); |
| 414 if (this == fInstancedRendering->fBatchAllocator.back()) { |
| 415 fInstancedRendering->fBatchAllocator.pop_back(); |
| 416 } |
| 417 } |
| 418 |
| 419 void GrInstancedRendering::restart() { |
| 420 SkASSERT(0 == fInUseBatchCount); |
| 421 fBatchAllocator.reset(); |
| 422 // Hold on to the shape coords and index buffers. |
| 423 fInstances.reset(); |
| 424 fParams.reset(); |
| 425 fInstanceBuffer.reset(); |
| 426 fParamsBuffer.reset(); |
| 427 this->onRestart(); |
| 428 fState = State::kRecordingShapes; |
| 429 } |
| 430 |
| 431 void GrInstancedRendering::clearGpuResources(ClearType clearType) { |
| 432 fVertexBuffer.reset(); |
| 433 fIndexBuffer.reset(); |
| 434 fInstanceBuffer.reset(); |
| 435 fParamsBuffer.reset(); |
| 436 this->onClearGpuResources(clearType); |
| 437 } |
OLD | NEW |