| OLD | NEW |
| (Empty) |
| 1 | |
| 2 /* | |
| 3 * Copyright 2012 Google Inc. | |
| 4 * | |
| 5 * Use of this source code is governed by a BSD-style license that can be | |
| 6 * found in the LICENSE file. | |
| 7 */ | |
| 8 | |
| 9 #include "GrAAConvexPathRenderer.h" | |
| 10 | |
| 11 #include "GrAAConvexTessellator.h" | |
| 12 #include "GrBatchFlushState.h" | |
| 13 #include "GrBatchTest.h" | |
| 14 #include "GrCaps.h" | |
| 15 #include "GrContext.h" | |
| 16 #include "GrDefaultGeoProcFactory.h" | |
| 17 #include "GrGeometryProcessor.h" | |
| 18 #include "GrInvariantOutput.h" | |
| 19 #include "GrPathUtils.h" | |
| 20 #include "GrProcessor.h" | |
| 21 #include "GrPipelineBuilder.h" | |
| 22 #include "GrStrokeInfo.h" | |
| 23 #include "SkGeometry.h" | |
| 24 #include "SkPathPriv.h" | |
| 25 #include "SkString.h" | |
| 26 #include "SkTraceEvent.h" | |
| 27 #include "batches/GrVertexBatch.h" | |
| 28 #include "gl/GrGLProcessor.h" | |
| 29 #include "gl/GrGLGeometryProcessor.h" | |
| 30 #include "gl/builders/GrGLProgramBuilder.h" | |
| 31 | |
| 32 GrAAConvexPathRenderer::GrAAConvexPathRenderer() { | |
| 33 } | |
| 34 | |
| 35 struct Segment { | |
| 36 enum { | |
| 37 // These enum values are assumed in member functions below. | |
| 38 kLine = 0, | |
| 39 kQuad = 1, | |
| 40 } fType; | |
| 41 | |
| 42 // line uses one pt, quad uses 2 pts | |
| 43 SkPoint fPts[2]; | |
| 44 // normal to edge ending at each pt | |
| 45 SkVector fNorms[2]; | |
| 46 // is the corner where the previous segment meets this segment | |
| 47 // sharp. If so, fMid is a normalized bisector facing outward. | |
| 48 SkVector fMid; | |
| 49 | |
| 50 int countPoints() { | |
| 51 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); | |
| 52 return fType + 1; | |
| 53 } | |
| 54 const SkPoint& endPt() const { | |
| 55 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); | |
| 56 return fPts[fType]; | |
| 57 }; | |
| 58 const SkPoint& endNorm() const { | |
| 59 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); | |
| 60 return fNorms[fType]; | |
| 61 }; | |
| 62 }; | |
| 63 | |
| 64 typedef SkTArray<Segment, true> SegmentArray; | |
| 65 | |
| 66 static void center_of_mass(const SegmentArray& segments, SkPoint* c) { | |
| 67 SkScalar area = 0; | |
| 68 SkPoint center = {0, 0}; | |
| 69 int count = segments.count(); | |
| 70 SkPoint p0 = {0, 0}; | |
| 71 if (count > 2) { | |
| 72 // We translate the polygon so that the first point is at the origin. | |
| 73 // This avoids some precision issues with small area polygons far away | |
| 74 // from the origin. | |
| 75 p0 = segments[0].endPt(); | |
| 76 SkPoint pi; | |
| 77 SkPoint pj; | |
| 78 // the first and last iteration of the below loop would compute | |
| 79 // zeros since the starting / ending point is (0,0). So instead we start | |
| 80 // at i=1 and make the last iteration i=count-2. | |
| 81 pj = segments[1].endPt() - p0; | |
| 82 for (int i = 1; i < count - 1; ++i) { | |
| 83 pi = pj; | |
| 84 const SkPoint pj = segments[i + 1].endPt() - p0; | |
| 85 | |
| 86 SkScalar t = SkScalarMul(pi.fX, pj.fY) - SkScalarMul(pj.fX, pi.fY); | |
| 87 area += t; | |
| 88 center.fX += (pi.fX + pj.fX) * t; | |
| 89 center.fY += (pi.fY + pj.fY) * t; | |
| 90 | |
| 91 } | |
| 92 } | |
| 93 // If the poly has no area then we instead return the average of | |
| 94 // its points. | |
| 95 if (SkScalarNearlyZero(area)) { | |
| 96 SkPoint avg; | |
| 97 avg.set(0, 0); | |
| 98 for (int i = 0; i < count; ++i) { | |
| 99 const SkPoint& pt = segments[i].endPt(); | |
| 100 avg.fX += pt.fX; | |
| 101 avg.fY += pt.fY; | |
| 102 } | |
| 103 SkScalar denom = SK_Scalar1 / count; | |
| 104 avg.scale(denom); | |
| 105 *c = avg; | |
| 106 } else { | |
| 107 area *= 3; | |
| 108 area = SkScalarInvert(area); | |
| 109 center.fX = SkScalarMul(center.fX, area); | |
| 110 center.fY = SkScalarMul(center.fY, area); | |
| 111 // undo the translate of p0 to the origin. | |
| 112 *c = center + p0; | |
| 113 } | |
| 114 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY)); | |
| 115 } | |
| 116 | |
| 117 static void compute_vectors(SegmentArray* segments, | |
| 118 SkPoint* fanPt, | |
| 119 SkPathPriv::FirstDirection dir, | |
| 120 int* vCount, | |
| 121 int* iCount) { | |
| 122 center_of_mass(*segments, fanPt); | |
| 123 int count = segments->count(); | |
| 124 | |
| 125 // Make the normals point towards the outside | |
| 126 SkPoint::Side normSide; | |
| 127 if (dir == SkPathPriv::kCCW_FirstDirection) { | |
| 128 normSide = SkPoint::kRight_Side; | |
| 129 } else { | |
| 130 normSide = SkPoint::kLeft_Side; | |
| 131 } | |
| 132 | |
| 133 *vCount = 0; | |
| 134 *iCount = 0; | |
| 135 // compute normals at all points | |
| 136 for (int a = 0; a < count; ++a) { | |
| 137 Segment& sega = (*segments)[a]; | |
| 138 int b = (a + 1) % count; | |
| 139 Segment& segb = (*segments)[b]; | |
| 140 | |
| 141 const SkPoint* prevPt = &sega.endPt(); | |
| 142 int n = segb.countPoints(); | |
| 143 for (int p = 0; p < n; ++p) { | |
| 144 segb.fNorms[p] = segb.fPts[p] - *prevPt; | |
| 145 segb.fNorms[p].normalize(); | |
| 146 segb.fNorms[p].setOrthog(segb.fNorms[p], normSide); | |
| 147 prevPt = &segb.fPts[p]; | |
| 148 } | |
| 149 if (Segment::kLine == segb.fType) { | |
| 150 *vCount += 5; | |
| 151 *iCount += 9; | |
| 152 } else { | |
| 153 *vCount += 6; | |
| 154 *iCount += 12; | |
| 155 } | |
| 156 } | |
| 157 | |
| 158 // compute mid-vectors where segments meet. TODO: Detect shallow corners | |
| 159 // and leave out the wedges and close gaps by stitching segments together. | |
| 160 for (int a = 0; a < count; ++a) { | |
| 161 const Segment& sega = (*segments)[a]; | |
| 162 int b = (a + 1) % count; | |
| 163 Segment& segb = (*segments)[b]; | |
| 164 segb.fMid = segb.fNorms[0] + sega.endNorm(); | |
| 165 segb.fMid.normalize(); | |
| 166 // corner wedges | |
| 167 *vCount += 4; | |
| 168 *iCount += 6; | |
| 169 } | |
| 170 } | |
| 171 | |
| 172 struct DegenerateTestData { | |
| 173 DegenerateTestData() { fStage = kInitial; } | |
| 174 bool isDegenerate() const { return kNonDegenerate != fStage; } | |
| 175 enum { | |
| 176 kInitial, | |
| 177 kPoint, | |
| 178 kLine, | |
| 179 kNonDegenerate | |
| 180 } fStage; | |
| 181 SkPoint fFirstPoint; | |
| 182 SkVector fLineNormal; | |
| 183 SkScalar fLineC; | |
| 184 }; | |
| 185 | |
| 186 static const SkScalar kClose = (SK_Scalar1 / 16); | |
| 187 static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose); | |
| 188 | |
| 189 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt)
{ | |
| 190 switch (data->fStage) { | |
| 191 case DegenerateTestData::kInitial: | |
| 192 data->fFirstPoint = pt; | |
| 193 data->fStage = DegenerateTestData::kPoint; | |
| 194 break; | |
| 195 case DegenerateTestData::kPoint: | |
| 196 if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) { | |
| 197 data->fLineNormal = pt - data->fFirstPoint; | |
| 198 data->fLineNormal.normalize(); | |
| 199 data->fLineNormal.setOrthog(data->fLineNormal); | |
| 200 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint); | |
| 201 data->fStage = DegenerateTestData::kLine; | |
| 202 } | |
| 203 break; | |
| 204 case DegenerateTestData::kLine: | |
| 205 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose)
{ | |
| 206 data->fStage = DegenerateTestData::kNonDegenerate; | |
| 207 } | |
| 208 case DegenerateTestData::kNonDegenerate: | |
| 209 break; | |
| 210 default: | |
| 211 SkFAIL("Unexpected degenerate test stage."); | |
| 212 } | |
| 213 } | |
| 214 | |
| 215 static inline bool get_direction(const SkPath& path, const SkMatrix& m, | |
| 216 SkPathPriv::FirstDirection* dir) { | |
| 217 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) { | |
| 218 return false; | |
| 219 } | |
| 220 // check whether m reverses the orientation | |
| 221 SkASSERT(!m.hasPerspective()); | |
| 222 SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMS
caleY)) - | |
| 223 SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSk
ewY)); | |
| 224 if (det2x2 < 0) { | |
| 225 *dir = SkPathPriv::OppositeFirstDirection(*dir); | |
| 226 } | |
| 227 return true; | |
| 228 } | |
| 229 | |
| 230 static inline void add_line_to_segment(const SkPoint& pt, | |
| 231 SegmentArray* segments) { | |
| 232 segments->push_back(); | |
| 233 segments->back().fType = Segment::kLine; | |
| 234 segments->back().fPts[0] = pt; | |
| 235 } | |
| 236 | |
| 237 static inline void add_quad_segment(const SkPoint pts[3], | |
| 238 SegmentArray* segments) { | |
| 239 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2])
< kCloseSqd) { | |
| 240 if (pts[0] != pts[2]) { | |
| 241 add_line_to_segment(pts[2], segments); | |
| 242 } | |
| 243 } else { | |
| 244 segments->push_back(); | |
| 245 segments->back().fType = Segment::kQuad; | |
| 246 segments->back().fPts[0] = pts[1]; | |
| 247 segments->back().fPts[1] = pts[2]; | |
| 248 } | |
| 249 } | |
| 250 | |
| 251 static inline void add_cubic_segments(const SkPoint pts[4], | |
| 252 SkPathPriv::FirstDirection dir, | |
| 253 SegmentArray* segments) { | |
| 254 SkSTArray<15, SkPoint, true> quads; | |
| 255 GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads); | |
| 256 int count = quads.count(); | |
| 257 for (int q = 0; q < count; q += 3) { | |
| 258 add_quad_segment(&quads[q], segments); | |
| 259 } | |
| 260 } | |
| 261 | |
| 262 static bool get_segments(const SkPath& path, | |
| 263 const SkMatrix& m, | |
| 264 SegmentArray* segments, | |
| 265 SkPoint* fanPt, | |
| 266 int* vCount, | |
| 267 int* iCount) { | |
| 268 SkPath::Iter iter(path, true); | |
| 269 // This renderer over-emphasizes very thin path regions. We use the distance | |
| 270 // to the path from the sample to compute coverage. Every pixel intersected | |
| 271 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't | |
| 272 // notice that the sample may be close to a very thin area of the path and | |
| 273 // thus should be very light. This is particularly egregious for degenerate | |
| 274 // line paths. We detect paths that are very close to a line (zero area) and | |
| 275 // draw nothing. | |
| 276 DegenerateTestData degenerateData; | |
| 277 SkPathPriv::FirstDirection dir; | |
| 278 // get_direction can fail for some degenerate paths. | |
| 279 if (!get_direction(path, m, &dir)) { | |
| 280 return false; | |
| 281 } | |
| 282 | |
| 283 for (;;) { | |
| 284 SkPoint pts[4]; | |
| 285 SkPath::Verb verb = iter.next(pts); | |
| 286 switch (verb) { | |
| 287 case SkPath::kMove_Verb: | |
| 288 m.mapPoints(pts, 1); | |
| 289 update_degenerate_test(°enerateData, pts[0]); | |
| 290 break; | |
| 291 case SkPath::kLine_Verb: { | |
| 292 m.mapPoints(&pts[1], 1); | |
| 293 update_degenerate_test(°enerateData, pts[1]); | |
| 294 add_line_to_segment(pts[1], segments); | |
| 295 break; | |
| 296 } | |
| 297 case SkPath::kQuad_Verb: | |
| 298 m.mapPoints(pts, 3); | |
| 299 update_degenerate_test(°enerateData, pts[1]); | |
| 300 update_degenerate_test(°enerateData, pts[2]); | |
| 301 add_quad_segment(pts, segments); | |
| 302 break; | |
| 303 case SkPath::kConic_Verb: { | |
| 304 m.mapPoints(pts, 3); | |
| 305 SkScalar weight = iter.conicWeight(); | |
| 306 SkAutoConicToQuads converter; | |
| 307 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5
f); | |
| 308 for (int i = 0; i < converter.countQuads(); ++i) { | |
| 309 update_degenerate_test(°enerateData, quadPts[2*i + 1]); | |
| 310 update_degenerate_test(°enerateData, quadPts[2*i + 2]); | |
| 311 add_quad_segment(quadPts + 2*i, segments); | |
| 312 } | |
| 313 break; | |
| 314 } | |
| 315 case SkPath::kCubic_Verb: { | |
| 316 m.mapPoints(pts, 4); | |
| 317 update_degenerate_test(°enerateData, pts[1]); | |
| 318 update_degenerate_test(°enerateData, pts[2]); | |
| 319 update_degenerate_test(°enerateData, pts[3]); | |
| 320 add_cubic_segments(pts, dir, segments); | |
| 321 break; | |
| 322 }; | |
| 323 case SkPath::kDone_Verb: | |
| 324 if (degenerateData.isDegenerate()) { | |
| 325 return false; | |
| 326 } else { | |
| 327 compute_vectors(segments, fanPt, dir, vCount, iCount); | |
| 328 return true; | |
| 329 } | |
| 330 default: | |
| 331 break; | |
| 332 } | |
| 333 } | |
| 334 } | |
| 335 | |
| 336 struct QuadVertex { | |
| 337 SkPoint fPos; | |
| 338 SkPoint fUV; | |
| 339 SkScalar fD0; | |
| 340 SkScalar fD1; | |
| 341 }; | |
| 342 | |
| 343 struct Draw { | |
| 344 Draw() : fVertexCnt(0), fIndexCnt(0) {} | |
| 345 int fVertexCnt; | |
| 346 int fIndexCnt; | |
| 347 }; | |
| 348 | |
| 349 typedef SkTArray<Draw, true> DrawArray; | |
| 350 | |
| 351 static void create_vertices(const SegmentArray& segments, | |
| 352 const SkPoint& fanPt, | |
| 353 DrawArray* draws, | |
| 354 QuadVertex* verts, | |
| 355 uint16_t* idxs) { | |
| 356 Draw* draw = &draws->push_back(); | |
| 357 // alias just to make vert/index assignments easier to read. | |
| 358 int* v = &draw->fVertexCnt; | |
| 359 int* i = &draw->fIndexCnt; | |
| 360 | |
| 361 int count = segments.count(); | |
| 362 for (int a = 0; a < count; ++a) { | |
| 363 const Segment& sega = segments[a]; | |
| 364 int b = (a + 1) % count; | |
| 365 const Segment& segb = segments[b]; | |
| 366 | |
| 367 // Check whether adding the verts for this segment to the current draw w
ould cause index | |
| 368 // values to overflow. | |
| 369 int vCount = 4; | |
| 370 if (Segment::kLine == segb.fType) { | |
| 371 vCount += 5; | |
| 372 } else { | |
| 373 vCount += 6; | |
| 374 } | |
| 375 if (draw->fVertexCnt + vCount > (1 << 16)) { | |
| 376 verts += *v; | |
| 377 idxs += *i; | |
| 378 draw = &draws->push_back(); | |
| 379 v = &draw->fVertexCnt; | |
| 380 i = &draw->fIndexCnt; | |
| 381 } | |
| 382 | |
| 383 // FIXME: These tris are inset in the 1 unit arc around the corner | |
| 384 verts[*v + 0].fPos = sega.endPt(); | |
| 385 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm(); | |
| 386 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid; | |
| 387 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0]; | |
| 388 verts[*v + 0].fUV.set(0,0); | |
| 389 verts[*v + 1].fUV.set(0,-SK_Scalar1); | |
| 390 verts[*v + 2].fUV.set(0,-SK_Scalar1); | |
| 391 verts[*v + 3].fUV.set(0,-SK_Scalar1); | |
| 392 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1; | |
| 393 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1; | |
| 394 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1; | |
| 395 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1; | |
| 396 | |
| 397 idxs[*i + 0] = *v + 0; | |
| 398 idxs[*i + 1] = *v + 2; | |
| 399 idxs[*i + 2] = *v + 1; | |
| 400 idxs[*i + 3] = *v + 0; | |
| 401 idxs[*i + 4] = *v + 3; | |
| 402 idxs[*i + 5] = *v + 2; | |
| 403 | |
| 404 *v += 4; | |
| 405 *i += 6; | |
| 406 | |
| 407 if (Segment::kLine == segb.fType) { | |
| 408 verts[*v + 0].fPos = fanPt; | |
| 409 verts[*v + 1].fPos = sega.endPt(); | |
| 410 verts[*v + 2].fPos = segb.fPts[0]; | |
| 411 | |
| 412 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0]; | |
| 413 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0]; | |
| 414 | |
| 415 // we draw the line edge as a degenerate quad (u is 0, v is the | |
| 416 // signed distance to the edge) | |
| 417 SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos, | |
| 418 verts[*v + 2].fPos); | |
| 419 verts[*v + 0].fUV.set(0, dist); | |
| 420 verts[*v + 1].fUV.set(0, 0); | |
| 421 verts[*v + 2].fUV.set(0, 0); | |
| 422 verts[*v + 3].fUV.set(0, -SK_Scalar1); | |
| 423 verts[*v + 4].fUV.set(0, -SK_Scalar1); | |
| 424 | |
| 425 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1; | |
| 426 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1; | |
| 427 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1; | |
| 428 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1; | |
| 429 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1; | |
| 430 | |
| 431 idxs[*i + 0] = *v + 3; | |
| 432 idxs[*i + 1] = *v + 1; | |
| 433 idxs[*i + 2] = *v + 2; | |
| 434 | |
| 435 idxs[*i + 3] = *v + 4; | |
| 436 idxs[*i + 4] = *v + 3; | |
| 437 idxs[*i + 5] = *v + 2; | |
| 438 | |
| 439 *i += 6; | |
| 440 | |
| 441 // Draw the interior fan if it exists. | |
| 442 // TODO: Detect and combine colinear segments. This will ensure we c
atch every case | |
| 443 // with no interior, and that the resulting shared edge uses the sam
e endpoints. | |
| 444 if (count >= 3) { | |
| 445 idxs[*i + 0] = *v + 0; | |
| 446 idxs[*i + 1] = *v + 2; | |
| 447 idxs[*i + 2] = *v + 1; | |
| 448 | |
| 449 *i += 3; | |
| 450 } | |
| 451 | |
| 452 *v += 5; | |
| 453 } else { | |
| 454 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]}; | |
| 455 | |
| 456 SkVector midVec = segb.fNorms[0] + segb.fNorms[1]; | |
| 457 midVec.normalize(); | |
| 458 | |
| 459 verts[*v + 0].fPos = fanPt; | |
| 460 verts[*v + 1].fPos = qpts[0]; | |
| 461 verts[*v + 2].fPos = qpts[2]; | |
| 462 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0]; | |
| 463 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1]; | |
| 464 verts[*v + 5].fPos = qpts[1] + midVec; | |
| 465 | |
| 466 SkScalar c = segb.fNorms[0].dot(qpts[0]); | |
| 467 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c; | |
| 468 verts[*v + 1].fD0 = 0.f; | |
| 469 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c; | |
| 470 verts[*v + 3].fD0 = -SK_ScalarMax/100; | |
| 471 verts[*v + 4].fD0 = -SK_ScalarMax/100; | |
| 472 verts[*v + 5].fD0 = -SK_ScalarMax/100; | |
| 473 | |
| 474 c = segb.fNorms[1].dot(qpts[2]); | |
| 475 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c; | |
| 476 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c; | |
| 477 verts[*v + 2].fD1 = 0.f; | |
| 478 verts[*v + 3].fD1 = -SK_ScalarMax/100; | |
| 479 verts[*v + 4].fD1 = -SK_ScalarMax/100; | |
| 480 verts[*v + 5].fD1 = -SK_ScalarMax/100; | |
| 481 | |
| 482 GrPathUtils::QuadUVMatrix toUV(qpts); | |
| 483 toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v); | |
| 484 | |
| 485 idxs[*i + 0] = *v + 3; | |
| 486 idxs[*i + 1] = *v + 1; | |
| 487 idxs[*i + 2] = *v + 2; | |
| 488 idxs[*i + 3] = *v + 4; | |
| 489 idxs[*i + 4] = *v + 3; | |
| 490 idxs[*i + 5] = *v + 2; | |
| 491 | |
| 492 idxs[*i + 6] = *v + 5; | |
| 493 idxs[*i + 7] = *v + 3; | |
| 494 idxs[*i + 8] = *v + 4; | |
| 495 | |
| 496 *i += 9; | |
| 497 | |
| 498 // Draw the interior fan if it exists. | |
| 499 // TODO: Detect and combine colinear segments. This will ensure we c
atch every case | |
| 500 // with no interior, and that the resulting shared edge uses the sam
e endpoints. | |
| 501 if (count >= 3) { | |
| 502 idxs[*i + 0] = *v + 0; | |
| 503 idxs[*i + 1] = *v + 2; | |
| 504 idxs[*i + 2] = *v + 1; | |
| 505 | |
| 506 *i += 3; | |
| 507 } | |
| 508 | |
| 509 *v += 6; | |
| 510 } | |
| 511 } | |
| 512 } | |
| 513 | |
| 514 /////////////////////////////////////////////////////////////////////////////// | |
| 515 | |
| 516 /* | |
| 517 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first | |
| 518 * two components of the vertex attribute. Coverage is based on signed | |
| 519 * distance with negative being inside, positive outside. The edge is specified
in | |
| 520 * window space (y-down). If either the third or fourth component of the interpo
lated | |
| 521 * vertex coord is > 0 then the pixel is considered outside the edge. This is us
ed to | |
| 522 * attempt to trim to a portion of the infinite quad. | |
| 523 * Requires shader derivative instruction support. | |
| 524 */ | |
| 525 | |
| 526 class QuadEdgeEffect : public GrGeometryProcessor { | |
| 527 public: | |
| 528 | |
| 529 static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatri
x, | |
| 530 bool usesLocalCoords) { | |
| 531 return new QuadEdgeEffect(color, localMatrix, usesLocalCoords); | |
| 532 } | |
| 533 | |
| 534 virtual ~QuadEdgeEffect() {} | |
| 535 | |
| 536 const char* name() const override { return "QuadEdge"; } | |
| 537 | |
| 538 const Attribute* inPosition() const { return fInPosition; } | |
| 539 const Attribute* inQuadEdge() const { return fInQuadEdge; } | |
| 540 GrColor color() const { return fColor; } | |
| 541 bool colorIgnored() const { return GrColor_ILLEGAL == fColor; } | |
| 542 const SkMatrix& localMatrix() const { return fLocalMatrix; } | |
| 543 bool usesLocalCoords() const { return fUsesLocalCoords; } | |
| 544 | |
| 545 class GLProcessor : public GrGLGeometryProcessor { | |
| 546 public: | |
| 547 GLProcessor(const GrGeometryProcessor&, | |
| 548 const GrBatchTracker&) | |
| 549 : fColor(GrColor_ILLEGAL) {} | |
| 550 | |
| 551 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override { | |
| 552 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>(); | |
| 553 GrGLGPBuilder* pb = args.fPB; | |
| 554 GrGLVertexBuilder* vsBuilder = pb->getVertexShaderBuilder(); | |
| 555 | |
| 556 // emit attributes | |
| 557 vsBuilder->emitAttributes(qe); | |
| 558 | |
| 559 GrGLVertToFrag v(kVec4f_GrSLType); | |
| 560 args.fPB->addVarying("QuadEdge", &v); | |
| 561 vsBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName
); | |
| 562 | |
| 563 // Setup pass through color | |
| 564 if (!qe.colorIgnored()) { | |
| 565 this->setupUniformColor(pb, args.fOutputColor, &fColorUniform); | |
| 566 } | |
| 567 | |
| 568 // Setup position | |
| 569 this->setupPosition(pb, gpArgs, qe.inPosition()->fName); | |
| 570 | |
| 571 // emit transforms | |
| 572 this->emitTransforms(args.fPB, gpArgs->fPositionVar, qe.inPosition()
->fName, | |
| 573 qe.localMatrix(), args.fTransformsIn, args.fTra
nsformsOut); | |
| 574 | |
| 575 GrGLFragmentBuilder* fsBuilder = args.fPB->getFragmentShaderBuilder(
); | |
| 576 | |
| 577 SkAssertResult(fsBuilder->enableFeature( | |
| 578 GrGLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature)
); | |
| 579 fsBuilder->codeAppendf("float edgeAlpha;"); | |
| 580 | |
| 581 // keep the derivative instructions outside the conditional | |
| 582 fsBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn()); | |
| 583 fsBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn()); | |
| 584 fsBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(),
v.fsIn()); | |
| 585 // today we know z and w are in device space. We could use derivativ
es | |
| 586 fsBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);
", v.fsIn(), | |
| 587 v.fsIn()); | |
| 588 fsBuilder->codeAppendf ("} else {"); | |
| 589 fsBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y," | |
| 590 " 2.0*%s.x*duvdy.x - duvdy.y);"
, | |
| 591 v.fsIn(), v.fsIn()); | |
| 592 fsBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(),
v.fsIn(), | |
| 593 v.fsIn()); | |
| 594 fsBuilder->codeAppendf("edgeAlpha = " | |
| 595 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0
);}"); | |
| 596 | |
| 597 fsBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage
); | |
| 598 } | |
| 599 | |
| 600 static inline void GenKey(const GrGeometryProcessor& gp, | |
| 601 const GrBatchTracker& bt, | |
| 602 const GrGLSLCaps&, | |
| 603 GrProcessorKeyBuilder* b) { | |
| 604 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>(); | |
| 605 uint32_t key = 0; | |
| 606 key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ?
0x1 : 0x0; | |
| 607 key |= qee.colorIgnored() ? 0x2 : 0x0; | |
| 608 b->add32(key); | |
| 609 } | |
| 610 | |
| 611 virtual void setData(const GrGLProgramDataManager& pdman, | |
| 612 const GrPrimitiveProcessor& gp, | |
| 613 const GrBatchTracker& bt) override { | |
| 614 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>(); | |
| 615 if (qe.color() != fColor) { | |
| 616 GrGLfloat c[4]; | |
| 617 GrColorToRGBAFloat(qe.color(), c); | |
| 618 pdman.set4fv(fColorUniform, 1, c); | |
| 619 fColor = qe.color(); | |
| 620 } | |
| 621 } | |
| 622 | |
| 623 void setTransformData(const GrPrimitiveProcessor& primProc, | |
| 624 const GrGLProgramDataManager& pdman, | |
| 625 int index, | |
| 626 const SkTArray<const GrCoordTransform*, true>& tra
nsforms) override { | |
| 627 this->setTransformDataHelper<QuadEdgeEffect>(primProc, pdman, index,
transforms); | |
| 628 } | |
| 629 | |
| 630 private: | |
| 631 GrColor fColor; | |
| 632 UniformHandle fColorUniform; | |
| 633 | |
| 634 typedef GrGLGeometryProcessor INHERITED; | |
| 635 }; | |
| 636 | |
| 637 virtual void getGLProcessorKey(const GrBatchTracker& bt, | |
| 638 const GrGLSLCaps& caps, | |
| 639 GrProcessorKeyBuilder* b) const override { | |
| 640 GLProcessor::GenKey(*this, bt, caps, b); | |
| 641 } | |
| 642 | |
| 643 virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt, | |
| 644 const GrGLSLCaps&) const ov
erride { | |
| 645 return new GLProcessor(*this, bt); | |
| 646 } | |
| 647 | |
| 648 private: | |
| 649 QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoo
rds) | |
| 650 : fColor(color) | |
| 651 , fLocalMatrix(localMatrix) | |
| 652 , fUsesLocalCoords(usesLocalCoords) { | |
| 653 this->initClassID<QuadEdgeEffect>(); | |
| 654 fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVe
rtexAttribType)); | |
| 655 fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVe
rtexAttribType)); | |
| 656 } | |
| 657 | |
| 658 const Attribute* fInPosition; | |
| 659 const Attribute* fInQuadEdge; | |
| 660 GrColor fColor; | |
| 661 SkMatrix fLocalMatrix; | |
| 662 bool fUsesLocalCoords; | |
| 663 | |
| 664 GR_DECLARE_GEOMETRY_PROCESSOR_TEST; | |
| 665 | |
| 666 typedef GrGeometryProcessor INHERITED; | |
| 667 }; | |
| 668 | |
| 669 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect); | |
| 670 | |
| 671 const GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) { | |
| 672 // Doesn't work without derivative instructions. | |
| 673 return d->fCaps->shaderCaps()->shaderDerivativeSupport() ? | |
| 674 QuadEdgeEffect::Create(GrRandomColor(d->fRandom), | |
| 675 GrTest::TestMatrix(d->fRandom), | |
| 676 d->fRandom->nextBool()) : nullptr; | |
| 677 } | |
| 678 | |
| 679 /////////////////////////////////////////////////////////////////////////////// | |
| 680 | |
| 681 bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { | |
| 682 return (args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias && | |
| 683 args.fStroke->isFillStyle() && !args.fPath->isInverseFillType() && | |
| 684 args.fPath->isConvex()); | |
| 685 } | |
| 686 | |
| 687 // extract the result vertices and indices from the GrAAConvexTessellator | |
| 688 static void extract_verts(const GrAAConvexTessellator& tess, | |
| 689 void* vertices, | |
| 690 size_t vertexStride, | |
| 691 GrColor color, | |
| 692 uint16_t* idxs, | |
| 693 bool tweakAlphaForCoverage) { | |
| 694 intptr_t verts = reinterpret_cast<intptr_t>(vertices); | |
| 695 | |
| 696 for (int i = 0; i < tess.numPts(); ++i) { | |
| 697 *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i); | |
| 698 } | |
| 699 | |
| 700 // Make 'verts' point to the colors | |
| 701 verts += sizeof(SkPoint); | |
| 702 for (int i = 0; i < tess.numPts(); ++i) { | |
| 703 if (tweakAlphaForCoverage) { | |
| 704 SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255); | |
| 705 unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i)); | |
| 706 GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, s
cale); | |
| 707 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor; | |
| 708 } else { | |
| 709 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color; | |
| 710 *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)
) = | |
| 711 tess.coverage(i); | |
| 712 } | |
| 713 } | |
| 714 | |
| 715 for (int i = 0; i < tess.numIndices(); ++i) { | |
| 716 idxs[i] = tess.index(i); | |
| 717 } | |
| 718 } | |
| 719 | |
| 720 static const GrGeometryProcessor* create_fill_gp(bool tweakAlphaForCoverage, | |
| 721 const SkMatrix& viewMatrix, | |
| 722 bool usesLocalCoords, | |
| 723 bool coverageIgnored) { | |
| 724 using namespace GrDefaultGeoProcFactory; | |
| 725 | |
| 726 Color color(Color::kAttribute_Type); | |
| 727 Coverage::Type coverageType; | |
| 728 // TODO remove coverage if coverage is ignored | |
| 729 /*if (coverageIgnored) { | |
| 730 coverageType = Coverage::kNone_Type; | |
| 731 } else*/ if (tweakAlphaForCoverage) { | |
| 732 coverageType = Coverage::kSolid_Type; | |
| 733 } else { | |
| 734 coverageType = Coverage::kAttribute_Type; | |
| 735 } | |
| 736 Coverage coverage(coverageType); | |
| 737 LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type : | |
| 738 LocalCoords::kUnused_Type); | |
| 739 return CreateForDeviceSpace(color, coverage, localCoords, viewMatrix); | |
| 740 } | |
| 741 | |
| 742 class AAConvexPathBatch : public GrVertexBatch { | |
| 743 public: | |
| 744 struct Geometry { | |
| 745 GrColor fColor; | |
| 746 SkMatrix fViewMatrix; | |
| 747 SkPath fPath; | |
| 748 }; | |
| 749 | |
| 750 static GrDrawBatch* Create(const Geometry& geometry) { return new AAConvexPa
thBatch(geometry); } | |
| 751 | |
| 752 const char* name() const override { return "AAConvexBatch"; } | |
| 753 | |
| 754 void getInvariantOutputColor(GrInitInvariantOutput* out) const override { | |
| 755 // When this is called on a batch, there is only one geometry bundle | |
| 756 out->setKnownFourComponents(fGeoData[0].fColor); | |
| 757 } | |
| 758 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override { | |
| 759 out->setUnknownSingleComponent(); | |
| 760 } | |
| 761 | |
| 762 private: | |
| 763 | |
| 764 void initBatchTracker(const GrPipelineOptimizations& opt) override { | |
| 765 // Handle any color overrides | |
| 766 if (!opt.readsColor()) { | |
| 767 fGeoData[0].fColor = GrColor_ILLEGAL; | |
| 768 } | |
| 769 opt.getOverrideColorIfSet(&fGeoData[0].fColor); | |
| 770 | |
| 771 // setup batch properties | |
| 772 fBatch.fColorIgnored = !opt.readsColor(); | |
| 773 fBatch.fColor = fGeoData[0].fColor; | |
| 774 fBatch.fUsesLocalCoords = opt.readsLocalCoords(); | |
| 775 fBatch.fCoverageIgnored = !opt.readsCoverage(); | |
| 776 fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSe
gmentMasks(); | |
| 777 fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage(); | |
| 778 } | |
| 779 | |
| 780 void prepareLinesOnlyDraws(Target* target) { | |
| 781 bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage(); | |
| 782 | |
| 783 // Setup GrGeometryProcessor | |
| 784 SkAutoTUnref<const GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaF
orCoverage, | |
| 785 this->viewMatr
ix(), | |
| 786 this->usesLoca
lCoords(), | |
| 787 this->coverage
Ignored())); | |
| 788 if (!gp) { | |
| 789 SkDebugf("Could not create GrGeometryProcessor\n"); | |
| 790 return; | |
| 791 } | |
| 792 | |
| 793 target->initDraw(gp, this->pipeline()); | |
| 794 | |
| 795 size_t vertexStride = gp->getVertexStride(); | |
| 796 | |
| 797 SkASSERT(canTweakAlphaForCoverage ? | |
| 798 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAt
tr) : | |
| 799 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCo
verageAttr)); | |
| 800 | |
| 801 GrAAConvexTessellator tess; | |
| 802 | |
| 803 int instanceCount = fGeoData.count(); | |
| 804 | |
| 805 for (int i = 0; i < instanceCount; i++) { | |
| 806 tess.rewind(); | |
| 807 | |
| 808 Geometry& args = fGeoData[i]; | |
| 809 | |
| 810 if (!tess.tessellate(args.fViewMatrix, args.fPath)) { | |
| 811 continue; | |
| 812 } | |
| 813 | |
| 814 const GrVertexBuffer* vertexBuffer; | |
| 815 int firstVertex; | |
| 816 | |
| 817 void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &
vertexBuffer, | |
| 818 &firstVertex); | |
| 819 if (!verts) { | |
| 820 SkDebugf("Could not allocate vertices\n"); | |
| 821 return; | |
| 822 } | |
| 823 | |
| 824 const GrIndexBuffer* indexBuffer; | |
| 825 int firstIndex; | |
| 826 | |
| 827 uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuf
fer, &firstIndex); | |
| 828 if (!idxs) { | |
| 829 SkDebugf("Could not allocate indices\n"); | |
| 830 return; | |
| 831 } | |
| 832 | |
| 833 extract_verts(tess, verts, vertexStride, args.fColor, idxs, canTweak
AlphaForCoverage); | |
| 834 | |
| 835 GrVertices info; | |
| 836 info.initIndexed(kTriangles_GrPrimitiveType, | |
| 837 vertexBuffer, indexBuffer, | |
| 838 firstVertex, firstIndex, | |
| 839 tess.numPts(), tess.numIndices()); | |
| 840 target->draw(info); | |
| 841 } | |
| 842 } | |
| 843 | |
| 844 void onPrepareDraws(Target* target) override { | |
| 845 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS | |
| 846 if (this->linesOnly()) { | |
| 847 this->prepareLinesOnlyDraws(target); | |
| 848 return; | |
| 849 } | |
| 850 #endif | |
| 851 | |
| 852 int instanceCount = fGeoData.count(); | |
| 853 | |
| 854 SkMatrix invert; | |
| 855 if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) { | |
| 856 SkDebugf("Could not invert viewmatrix\n"); | |
| 857 return; | |
| 858 } | |
| 859 | |
| 860 // Setup GrGeometryProcessor | |
| 861 SkAutoTUnref<GrGeometryProcessor> quadProcessor( | |
| 862 QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoo
rds())); | |
| 863 | |
| 864 target->initDraw(quadProcessor, this->pipeline()); | |
| 865 | |
| 866 // TODO generate all segments for all paths and use one vertex buffer | |
| 867 for (int i = 0; i < instanceCount; i++) { | |
| 868 Geometry& args = fGeoData[i]; | |
| 869 | |
| 870 // We use the fact that SkPath::transform path does subdivision base
d on | |
| 871 // perspective. Otherwise, we apply the view matrix when copying to
the | |
| 872 // segment representation. | |
| 873 const SkMatrix* viewMatrix = &args.fViewMatrix; | |
| 874 if (viewMatrix->hasPerspective()) { | |
| 875 args.fPath.transform(*viewMatrix); | |
| 876 viewMatrix = &SkMatrix::I(); | |
| 877 } | |
| 878 | |
| 879 int vertexCount; | |
| 880 int indexCount; | |
| 881 enum { | |
| 882 kPreallocSegmentCnt = 512 / sizeof(Segment), | |
| 883 kPreallocDrawCnt = 4, | |
| 884 }; | |
| 885 SkSTArray<kPreallocSegmentCnt, Segment, true> segments; | |
| 886 SkPoint fanPt; | |
| 887 | |
| 888 if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &verte
xCount, | |
| 889 &indexCount)) { | |
| 890 continue; | |
| 891 } | |
| 892 | |
| 893 const GrVertexBuffer* vertexBuffer; | |
| 894 int firstVertex; | |
| 895 | |
| 896 size_t vertexStride = quadProcessor->getVertexStride(); | |
| 897 QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertex
Space( | |
| 898 vertexStride, vertexCount, &vertexBuffer, &firstVertex)); | |
| 899 | |
| 900 if (!verts) { | |
| 901 SkDebugf("Could not allocate vertices\n"); | |
| 902 return; | |
| 903 } | |
| 904 | |
| 905 const GrIndexBuffer* indexBuffer; | |
| 906 int firstIndex; | |
| 907 | |
| 908 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &f
irstIndex); | |
| 909 if (!idxs) { | |
| 910 SkDebugf("Could not allocate indices\n"); | |
| 911 return; | |
| 912 } | |
| 913 | |
| 914 SkSTArray<kPreallocDrawCnt, Draw, true> draws; | |
| 915 create_vertices(segments, fanPt, &draws, verts, idxs); | |
| 916 | |
| 917 GrVertices vertices; | |
| 918 | |
| 919 for (int i = 0; i < draws.count(); ++i) { | |
| 920 const Draw& draw = draws[i]; | |
| 921 vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, i
ndexBuffer, | |
| 922 firstVertex, firstIndex, draw.fVertexCnt, d
raw.fIndexCnt); | |
| 923 target->draw(vertices); | |
| 924 firstVertex += draw.fVertexCnt; | |
| 925 firstIndex += draw.fIndexCnt; | |
| 926 } | |
| 927 } | |
| 928 } | |
| 929 | |
| 930 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } | |
| 931 | |
| 932 AAConvexPathBatch(const Geometry& geometry) { | |
| 933 this->initClassID<AAConvexPathBatch>(); | |
| 934 fGeoData.push_back(geometry); | |
| 935 | |
| 936 // compute bounds | |
| 937 fBounds = geometry.fPath.getBounds(); | |
| 938 geometry.fViewMatrix.mapRect(&fBounds); | |
| 939 } | |
| 940 | |
| 941 bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { | |
| 942 AAConvexPathBatch* that = t->cast<AAConvexPathBatch>(); | |
| 943 if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pi
peline(), | |
| 944 that->bounds(), caps)) { | |
| 945 return false; | |
| 946 } | |
| 947 | |
| 948 if (this->color() != that->color()) { | |
| 949 return false; | |
| 950 } | |
| 951 | |
| 952 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords()); | |
| 953 if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->vi
ewMatrix())) { | |
| 954 return false; | |
| 955 } | |
| 956 | |
| 957 if (this->linesOnly() != that->linesOnly()) { | |
| 958 return false; | |
| 959 } | |
| 960 | |
| 961 // In the event of two batches, one who can tweak, one who cannot, we ju
st fall back to | |
| 962 // not tweaking | |
| 963 if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()
) { | |
| 964 fBatch.fCanTweakAlphaForCoverage = false; | |
| 965 } | |
| 966 | |
| 967 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin())
; | |
| 968 this->joinBounds(that->bounds()); | |
| 969 return true; | |
| 970 } | |
| 971 | |
| 972 GrColor color() const { return fBatch.fColor; } | |
| 973 bool linesOnly() const { return fBatch.fLinesOnly; } | |
| 974 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } | |
| 975 bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCover
age; } | |
| 976 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } | |
| 977 bool coverageIgnored() const { return fBatch.fCoverageIgnored; } | |
| 978 | |
| 979 struct BatchTracker { | |
| 980 GrColor fColor; | |
| 981 bool fUsesLocalCoords; | |
| 982 bool fColorIgnored; | |
| 983 bool fCoverageIgnored; | |
| 984 bool fLinesOnly; | |
| 985 bool fCanTweakAlphaForCoverage; | |
| 986 }; | |
| 987 | |
| 988 BatchTracker fBatch; | |
| 989 SkSTArray<1, Geometry, true> fGeoData; | |
| 990 }; | |
| 991 | |
| 992 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) { | |
| 993 if (args.fPath->isEmpty()) { | |
| 994 return true; | |
| 995 } | |
| 996 | |
| 997 AAConvexPathBatch::Geometry geometry; | |
| 998 geometry.fColor = args.fColor; | |
| 999 geometry.fViewMatrix = *args.fViewMatrix; | |
| 1000 geometry.fPath = *args.fPath; | |
| 1001 | |
| 1002 SkAutoTUnref<GrDrawBatch> batch(AAConvexPathBatch::Create(geometry)); | |
| 1003 args.fTarget->drawBatch(*args.fPipelineBuilder, batch); | |
| 1004 | |
| 1005 return true; | |
| 1006 | |
| 1007 } | |
| 1008 | |
| 1009 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | |
| 1010 | |
| 1011 #ifdef GR_TEST_UTILS | |
| 1012 | |
| 1013 DRAW_BATCH_TEST_DEFINE(AAConvexPathBatch) { | |
| 1014 AAConvexPathBatch::Geometry geometry; | |
| 1015 geometry.fColor = GrRandomColor(random); | |
| 1016 geometry.fViewMatrix = GrTest::TestMatrixInvertible(random); | |
| 1017 geometry.fPath = GrTest::TestPathConvex(random); | |
| 1018 | |
| 1019 return AAConvexPathBatch::Create(geometry); | |
| 1020 } | |
| 1021 | |
| 1022 #endif | |
| OLD | NEW |