OLD | NEW |
---|---|
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2012 Google Inc. | 3 * Copyright 2012 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 #include "GrAAConvexPathRenderer.h" | 9 #include "GrAAConvexPathRenderer.h" |
10 | 10 |
11 #include "GrBatch.h" | |
12 #include "GrBatchTarget.h" | |
13 #include "GrBufferAllocPool.h" | |
11 #include "GrContext.h" | 14 #include "GrContext.h" |
12 #include "GrDrawTargetCaps.h" | 15 #include "GrDrawTargetCaps.h" |
13 #include "GrGeometryProcessor.h" | 16 #include "GrGeometryProcessor.h" |
14 #include "GrInvariantOutput.h" | 17 #include "GrInvariantOutput.h" |
15 #include "GrPathUtils.h" | 18 #include "GrPathUtils.h" |
16 #include "GrProcessor.h" | 19 #include "GrProcessor.h" |
17 #include "GrPipelineBuilder.h" | 20 #include "GrPipelineBuilder.h" |
18 #include "SkGeometry.h" | 21 #include "SkGeometry.h" |
19 #include "SkString.h" | 22 #include "SkString.h" |
20 #include "SkStrokeRec.h" | 23 #include "SkStrokeRec.h" |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
215 SkASSERT(!m.hasPerspective()); | 218 SkASSERT(!m.hasPerspective()); |
216 SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMS caleY)) - | 219 SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMS caleY)) - |
217 SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSk ewY)); | 220 SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSk ewY)); |
218 if (det2x2 < 0) { | 221 if (det2x2 < 0) { |
219 *dir = SkPath::OppositeDirection(*dir); | 222 *dir = SkPath::OppositeDirection(*dir); |
220 } | 223 } |
221 return true; | 224 return true; |
222 } | 225 } |
223 | 226 |
224 static inline void add_line_to_segment(const SkPoint& pt, | 227 static inline void add_line_to_segment(const SkPoint& pt, |
225 SegmentArray* segments, | 228 SegmentArray* segments) { |
226 SkRect* devBounds) { | |
227 segments->push_back(); | 229 segments->push_back(); |
228 segments->back().fType = Segment::kLine; | 230 segments->back().fType = Segment::kLine; |
229 segments->back().fPts[0] = pt; | 231 segments->back().fPts[0] = pt; |
230 devBounds->growToInclude(pt.fX, pt.fY); | |
231 } | 232 } |
232 | 233 |
233 #ifdef SK_DEBUG | 234 #ifdef SK_DEBUG |
234 static inline bool contains_inclusive(const SkRect& rect, const SkPoint& p) { | 235 static inline bool contains_inclusive(const SkRect& rect, const SkPoint& p) { |
235 return p.fX >= rect.fLeft && p.fX <= rect.fRight && p.fY >= rect.fTop && p.f Y <= rect.fBottom; | 236 return p.fX >= rect.fLeft && p.fX <= rect.fRight && p.fY >= rect.fTop && p.f Y <= rect.fBottom; |
236 } | 237 } |
237 #endif | 238 #endif |
238 | 239 |
239 static inline void add_quad_segment(const SkPoint pts[3], | 240 static inline void add_quad_segment(const SkPoint pts[3], |
240 SegmentArray* segments, | 241 SegmentArray* segments) { |
241 SkRect* devBounds) { | |
242 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) { | 242 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) { |
243 if (pts[0] != pts[2]) { | 243 if (pts[0] != pts[2]) { |
244 add_line_to_segment(pts[2], segments, devBounds); | 244 add_line_to_segment(pts[2], segments); |
245 } | 245 } |
246 } else { | 246 } else { |
247 segments->push_back(); | 247 segments->push_back(); |
248 segments->back().fType = Segment::kQuad; | 248 segments->back().fType = Segment::kQuad; |
249 segments->back().fPts[0] = pts[1]; | 249 segments->back().fPts[0] = pts[1]; |
250 segments->back().fPts[1] = pts[2]; | 250 segments->back().fPts[1] = pts[2]; |
251 SkASSERT(contains_inclusive(*devBounds, pts[0])); | |
252 devBounds->growToInclude(pts + 1, 2); | |
253 } | 251 } |
254 } | 252 } |
255 | 253 |
256 static inline void add_cubic_segments(const SkPoint pts[4], | 254 static inline void add_cubic_segments(const SkPoint pts[4], |
257 SkPath::Direction dir, | 255 SkPath::Direction dir, |
258 SegmentArray* segments, | 256 SegmentArray* segments) { |
259 SkRect* devBounds) { | |
260 SkSTArray<15, SkPoint, true> quads; | 257 SkSTArray<15, SkPoint, true> quads; |
261 GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads); | 258 GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads); |
262 int count = quads.count(); | 259 int count = quads.count(); |
263 for (int q = 0; q < count; q += 3) { | 260 for (int q = 0; q < count; q += 3) { |
264 add_quad_segment(&quads[q], segments, devBounds); | 261 add_quad_segment(&quads[q], segments); |
265 } | 262 } |
266 } | 263 } |
267 | 264 |
268 static bool get_segments(const SkPath& path, | 265 static bool get_segments(const SkPath& path, |
269 const SkMatrix& m, | 266 const SkMatrix& m, |
270 SegmentArray* segments, | 267 SegmentArray* segments, |
271 SkPoint* fanPt, | 268 SkPoint* fanPt, |
272 int* vCount, | 269 int* vCount, |
273 int* iCount, | 270 int* iCount) { |
274 SkRect* devBounds) { | |
275 SkPath::Iter iter(path, true); | 271 SkPath::Iter iter(path, true); |
276 // This renderer over-emphasizes very thin path regions. We use the distance | 272 // This renderer over-emphasizes very thin path regions. We use the distance |
277 // to the path from the sample to compute coverage. Every pixel intersected | 273 // to the path from the sample to compute coverage. Every pixel intersected |
278 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't | 274 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't |
279 // notice that the sample may be close to a very thin area of the path and | 275 // notice that the sample may be close to a very thin area of the path and |
280 // thus should be very light. This is particularly egregious for degenerate | 276 // thus should be very light. This is particularly egregious for degenerate |
281 // line paths. We detect paths that are very close to a line (zero area) and | 277 // line paths. We detect paths that are very close to a line (zero area) and |
282 // draw nothing. | 278 // draw nothing. |
283 DegenerateTestData degenerateData; | 279 DegenerateTestData degenerateData; |
284 SkPath::Direction dir; | 280 SkPath::Direction dir; |
285 // get_direction can fail for some degenerate paths. | 281 // get_direction can fail for some degenerate paths. |
286 if (!get_direction(path, m, &dir)) { | 282 if (!get_direction(path, m, &dir)) { |
287 return false; | 283 return false; |
288 } | 284 } |
289 | 285 |
290 for (;;) { | 286 for (;;) { |
291 SkPoint pts[4]; | 287 SkPoint pts[4]; |
292 SkPath::Verb verb = iter.next(pts); | 288 SkPath::Verb verb = iter.next(pts); |
293 switch (verb) { | 289 switch (verb) { |
294 case SkPath::kMove_Verb: | 290 case SkPath::kMove_Verb: |
295 m.mapPoints(pts, 1); | 291 m.mapPoints(pts, 1); |
296 update_degenerate_test(°enerateData, pts[0]); | 292 update_degenerate_test(°enerateData, pts[0]); |
297 devBounds->set(pts->fX, pts->fY, pts->fX, pts->fY); | |
298 break; | 293 break; |
299 case SkPath::kLine_Verb: { | 294 case SkPath::kLine_Verb: { |
300 m.mapPoints(&pts[1], 1); | 295 m.mapPoints(&pts[1], 1); |
301 update_degenerate_test(°enerateData, pts[1]); | 296 update_degenerate_test(°enerateData, pts[1]); |
302 add_line_to_segment(pts[1], segments, devBounds); | 297 add_line_to_segment(pts[1], segments); |
303 break; | 298 break; |
304 } | 299 } |
305 case SkPath::kQuad_Verb: | 300 case SkPath::kQuad_Verb: |
306 m.mapPoints(pts, 3); | 301 m.mapPoints(pts, 3); |
307 update_degenerate_test(°enerateData, pts[1]); | 302 update_degenerate_test(°enerateData, pts[1]); |
308 update_degenerate_test(°enerateData, pts[2]); | 303 update_degenerate_test(°enerateData, pts[2]); |
309 add_quad_segment(pts, segments, devBounds); | 304 add_quad_segment(pts, segments); |
310 break; | 305 break; |
311 case SkPath::kConic_Verb: { | 306 case SkPath::kConic_Verb: { |
312 m.mapPoints(pts, 3); | 307 m.mapPoints(pts, 3); |
313 SkScalar weight = iter.conicWeight(); | 308 SkScalar weight = iter.conicWeight(); |
314 SkAutoConicToQuads converter; | 309 SkAutoConicToQuads converter; |
315 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5 f); | 310 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5 f); |
316 for (int i = 0; i < converter.countQuads(); ++i) { | 311 for (int i = 0; i < converter.countQuads(); ++i) { |
317 update_degenerate_test(°enerateData, quadPts[2*i + 1]); | 312 update_degenerate_test(°enerateData, quadPts[2*i + 1]); |
318 update_degenerate_test(°enerateData, quadPts[2*i + 2]); | 313 update_degenerate_test(°enerateData, quadPts[2*i + 2]); |
319 add_quad_segment(quadPts + 2*i, segments, devBounds); | 314 add_quad_segment(quadPts + 2*i, segments); |
320 } | 315 } |
321 break; | 316 break; |
322 } | 317 } |
323 case SkPath::kCubic_Verb: { | 318 case SkPath::kCubic_Verb: { |
324 m.mapPoints(pts, 4); | 319 m.mapPoints(pts, 4); |
325 update_degenerate_test(°enerateData, pts[1]); | 320 update_degenerate_test(°enerateData, pts[1]); |
326 update_degenerate_test(°enerateData, pts[2]); | 321 update_degenerate_test(°enerateData, pts[2]); |
327 update_degenerate_test(°enerateData, pts[3]); | 322 update_degenerate_test(°enerateData, pts[3]); |
328 add_cubic_segments(pts, dir, segments, devBounds); | 323 add_cubic_segments(pts, dir, segments); |
329 break; | 324 break; |
330 }; | 325 }; |
331 case SkPath::kDone_Verb: | 326 case SkPath::kDone_Verb: |
332 if (degenerateData.isDegenerate()) { | 327 if (degenerateData.isDegenerate()) { |
333 return false; | 328 return false; |
334 } else { | 329 } else { |
335 compute_vectors(segments, fanPt, dir, vCount, iCount); | 330 compute_vectors(segments, fanPt, dir, vCount, iCount); |
336 return true; | 331 return true; |
337 } | 332 } |
338 default: | 333 default: |
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
696 bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target, | 691 bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target, |
697 const GrPipelineBuilder*, | 692 const GrPipelineBuilder*, |
698 const SkMatrix& viewMatrix, | 693 const SkMatrix& viewMatrix, |
699 const SkPath& path, | 694 const SkPath& path, |
700 const SkStrokeRec& stroke, | 695 const SkStrokeRec& stroke, |
701 bool antiAlias) const { | 696 bool antiAlias) const { |
702 return (target->caps()->shaderDerivativeSupport() && antiAlias && | 697 return (target->caps()->shaderDerivativeSupport() && antiAlias && |
703 stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex() ); | 698 stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex() ); |
704 } | 699 } |
705 | 700 |
701 class AAConvexPathBatch : public GrBatch { | |
702 public: | |
703 struct Geometry { | |
704 GrColor fColor; | |
705 SkMatrix fViewMatrix; | |
706 SkPath fPath; | |
707 SkDEBUGCODE(SkRect fDevBounds;) | |
708 }; | |
709 | |
710 static GrBatch* Create(const Geometry& geometry) { | |
711 return SkNEW_ARGS(AAConvexPathBatch, (geometry)); | |
712 } | |
713 | |
714 const char* name() const SK_OVERRIDE { return "AAConvexBatch"; } | |
715 | |
716 void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE { | |
717 // When this is called on a batch, there is only one geometry bundle | |
718 out->setKnownFourComponents(fGeoData[0].fColor); | |
719 } | |
720 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRID E { | |
721 out->setUnknownSingleComponent(); | |
722 } | |
723 | |
724 void initBatchOpt(const GrBatchOpt& batchOpt) { | |
725 fBatchOpt = batchOpt; | |
726 } | |
727 | |
728 void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE { | |
729 // Handle any color overrides | |
730 if (init.fColorIgnored) { | |
731 fGeoData[0].fColor = GrColor_ILLEGAL; | |
732 } else if (GrColor_ILLEGAL != init.fOverrideColor) { | |
733 fGeoData[0].fColor = init.fOverrideColor; | |
734 } | |
735 | |
736 // setup batch properties | |
737 fBatch.fColorIgnored = init.fColorIgnored; | |
738 fBatch.fColor = fGeoData[0].fColor; | |
739 fBatch.fUsesLocalCoords = init.fUsesLocalCoords; | |
740 fBatch.fCoverageIgnored = init.fCoverageIgnored; | |
741 } | |
742 | |
743 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline ) SK_OVERRIDE { | |
744 int instanceCount = fGeoData.count(); | |
745 | |
746 SkMatrix invert; | |
747 if (!this->viewMatrix().invert(&invert)) { | |
748 SkDebugf("Could not invert viewmatrix\n"); | |
749 return; | |
750 } | |
751 | |
752 // Setup GrGeometryProcessor | |
753 SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(t his->color(), | |
754 i nvert)); | |
755 | |
756 batchTarget->initDraw(quadProcessor, pipeline); | |
757 | |
758 // TODO remove this when batch is everywhere | |
759 GrPipelineInfo init; | |
760 init.fColorIgnored = fBatch.fColorIgnored; | |
761 init.fOverrideColor = GrColor_ILLEGAL; | |
762 init.fCoverageIgnored = fBatch.fCoverageIgnored; | |
763 init.fUsesLocalCoords = this->usesLocalCoords(); | |
764 quadProcessor->initBatchTracker(batchTarget->currentBatchTracker(), init ); | |
765 | |
766 // TODO generate all segments for all paths and use one vertex buffer | |
767 for (int i = 0; i < instanceCount; i++) { | |
768 Geometry& args = fGeoData[i]; | |
769 | |
770 // We use the fact that SkPath::transform path does subdivision base d on | |
771 // perspective. Otherwise, we apply the view matrix when copying to the | |
772 // segment representation. | |
773 const SkMatrix* viewMatrix = &args.fViewMatrix; | |
774 if (viewMatrix->hasPerspective()) { | |
775 args.fPath.transform(*viewMatrix); | |
776 viewMatrix = &SkMatrix::I(); | |
777 } | |
778 | |
779 int vertexCount; | |
780 int indexCount; | |
781 enum { | |
782 kPreallocSegmentCnt = 512 / sizeof(Segment), | |
783 kPreallocDrawCnt = 4, | |
784 }; | |
785 SkSTArray<kPreallocSegmentCnt, Segment, true> segments; | |
786 SkPoint fanPt; | |
787 | |
788 if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &verte xCount, | |
789 &indexCount)) { | |
790 continue; | |
791 } | |
792 | |
793 const GrVertexBuffer* vertexBuffer; | |
794 int firstVertex; | |
795 | |
796 size_t vertexStride = quadProcessor->getVertexStride(); | |
797 void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride, | |
798 vertexCount, | |
799 &vertexBuffer, | |
800 &firstVertex); | |
801 | |
802 const GrIndexBuffer* indexBuffer; | |
803 int firstIndex; | |
804 | |
805 void *indices = batchTarget->indexPool()->makeSpace(indexCount, | |
806 &indexBuffer, | |
807 &firstIndex); | |
808 | |
809 QuadVertex* verts = reinterpret_cast<QuadVertex*>(vertices); | |
810 uint16_t* idxs = reinterpret_cast<uint16_t*>(indices); | |
811 | |
812 SkSTArray<kPreallocDrawCnt, Draw, true> draws; | |
813 create_vertices(segments, fanPt, &draws, verts, idxs); | |
814 | |
815 #ifdef SK_DEBUG | |
816 // Check devBounds | |
bsalomon
2015/02/05 19:51:33
I didn't mean to eliminate the check... just the e
| |
817 SkRect tolDevBounds = args.fDevBounds; | |
818 tolDevBounds.outset(SK_Scalar1 / 10000, SK_Scalar1 / 10000); | |
819 SkRect actualBounds; | |
820 actualBounds.set(verts[0].fPos, verts[1].fPos); | |
821 for (int i = 2; i < vertexCount; ++i) { | |
822 actualBounds.growToInclude(verts[i].fPos.fX, verts[i].fPos.fY); | |
823 } | |
824 SkASSERT(tolDevBounds.contains(actualBounds)); | |
825 #endif | |
826 | |
827 GrDrawTarget::DrawInfo info; | |
828 info.setVertexBuffer(vertexBuffer); | |
829 info.setIndexBuffer(indexBuffer); | |
830 info.setPrimitiveType(kTriangles_GrPrimitiveType); | |
831 info.setStartIndex(firstIndex); | |
832 | |
833 int vOffset = 0; | |
834 for (int i = 0; i < draws.count(); ++i) { | |
835 const Draw& draw = draws[i]; | |
836 info.setStartVertex(vOffset + firstVertex); | |
837 info.setVertexCount(draw.fVertexCnt); | |
838 info.setIndexCount(draw.fIndexCnt); | |
839 batchTarget->draw(info); | |
840 vOffset += draw.fVertexCnt; | |
841 } | |
842 } | |
843 } | |
844 | |
845 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } | |
846 | |
847 private: | |
848 AAConvexPathBatch(const Geometry& geometry) { | |
849 this->initClassID<AAConvexPathBatch>(); | |
850 fGeoData.push_back(geometry); | |
851 } | |
852 | |
853 bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE { | |
854 AAConvexPathBatch* that = t->cast<AAConvexPathBatch>(); | |
855 | |
856 if (this->color() != that->color()) { | |
857 return false; | |
858 } | |
859 | |
860 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords()); | |
861 if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->vi ewMatrix())) { | |
862 return false; | |
863 } | |
864 | |
865 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin()) ; | |
866 return true; | |
867 } | |
868 | |
869 GrColor color() const { return fBatch.fColor; } | |
870 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } | |
871 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } | |
872 | |
873 struct BatchTracker { | |
874 GrColor fColor; | |
875 bool fUsesLocalCoords; | |
876 bool fColorIgnored; | |
877 bool fCoverageIgnored; | |
878 }; | |
879 | |
880 GrBatchOpt fBatchOpt; | |
881 BatchTracker fBatch; | |
882 SkSTArray<1, Geometry, true> fGeoData; | |
883 }; | |
884 | |
706 bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target, | 885 bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target, |
707 GrPipelineBuilder* pipelineBuilder, | 886 GrPipelineBuilder* pipelineBuilder, |
708 GrColor color, | 887 GrColor color, |
709 const SkMatrix& vm, | 888 const SkMatrix& vm, |
710 const SkPath& origPath, | 889 const SkPath& path, |
711 const SkStrokeRec&, | 890 const SkStrokeRec&, |
712 bool antiAlias) { | 891 bool antiAlias) { |
713 | 892 if (path.isEmpty()) { |
714 const SkPath* path = &origPath; | |
715 if (path->isEmpty()) { | |
716 return true; | 893 return true; |
717 } | 894 } |
718 | 895 |
719 SkMatrix viewMatrix = vm; | 896 // This outset was determined experimentally by running skps and gms. It pr obably could be a |
bsalomon
2015/02/05 18:36:12
Ok, can we change this comment to just say that we
| |
720 SkMatrix invert; | 897 // bit tighter |
721 if (!viewMatrix.invert(&invert)) { | 898 SkRect devRect = path.getBounds(); |
722 return false; | 899 vm.mapRect(&devRect); |
723 } | 900 devRect.outset(2, 2); |
724 | 901 |
725 // We use the fact that SkPath::transform path does subdivision based on | 902 AAConvexPathBatch::Geometry geometry; |
726 // perspective. Otherwise, we apply the view matrix when copying to the | 903 geometry.fColor = color; |
727 // segment representation. | 904 geometry.fViewMatrix = vm; |
728 SkPath tmpPath; | 905 geometry.fPath = path; |
729 if (viewMatrix.hasPerspective()) { | 906 SkDEBUGCODE(geometry.fDevBounds = devRect;) |
730 origPath.transform(viewMatrix, &tmpPath); | 907 |
731 path = &tmpPath; | 908 SkAutoTUnref<GrBatch> batch(AAConvexPathBatch::Create(geometry)); |
732 viewMatrix = SkMatrix::I(); | 909 target->drawBatch(pipelineBuilder, batch, &devRect); |
733 } | |
734 | |
735 QuadVertex *verts; | |
736 uint16_t* idxs; | |
737 | |
738 int vCount; | |
739 int iCount; | |
740 enum { | |
741 kPreallocSegmentCnt = 512 / sizeof(Segment), | |
742 kPreallocDrawCnt = 4, | |
743 }; | |
744 SkSTArray<kPreallocSegmentCnt, Segment, true> segments; | |
745 SkPoint fanPt; | |
746 | |
747 // We can't simply use the path bounds because we may degenerate cubics to q uads which produces | |
748 // new control points outside the original convex hull. | |
749 SkRect devBounds; | |
750 if (!get_segments(*path, viewMatrix, &segments, &fanPt, &vCount, &iCount, &d evBounds)) { | |
751 return false; | |
752 } | |
753 | |
754 // Our computed verts should all be within one pixel of the segment control points. | |
755 devBounds.outset(SK_Scalar1, SK_Scalar1); | |
756 | |
757 SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(color , invert)); | |
758 | |
759 GrDrawTarget::AutoReleaseGeometry arg(target, vCount, quadProcessor->getVert exStride(), iCount); | |
760 SkASSERT(quadProcessor->getVertexStride() == sizeof(QuadVertex)); | |
761 if (!arg.succeeded()) { | |
762 return false; | |
763 } | |
764 verts = reinterpret_cast<QuadVertex*>(arg.vertices()); | |
765 idxs = reinterpret_cast<uint16_t*>(arg.indices()); | |
766 | |
767 SkSTArray<kPreallocDrawCnt, Draw, true> draws; | |
768 create_vertices(segments, fanPt, &draws, verts, idxs); | |
769 | |
770 // Check devBounds | |
771 #ifdef SK_DEBUG | |
772 SkRect tolDevBounds = devBounds; | |
773 tolDevBounds.outset(SK_Scalar1 / 10000, SK_Scalar1 / 10000); | |
774 SkRect actualBounds; | |
775 actualBounds.set(verts[0].fPos, verts[1].fPos); | |
776 for (int i = 2; i < vCount; ++i) { | |
777 actualBounds.growToInclude(verts[i].fPos.fX, verts[i].fPos.fY); | |
778 } | |
779 SkASSERT(tolDevBounds.contains(actualBounds)); | |
780 #endif | |
781 | |
782 int vOffset = 0; | |
783 for (int i = 0; i < draws.count(); ++i) { | |
784 const Draw& draw = draws[i]; | |
785 target->drawIndexed(pipelineBuilder, | |
786 quadProcessor, | |
787 kTriangles_GrPrimitiveType, | |
788 vOffset, // start vertex | |
789 0, // start index | |
790 draw.fVertexCnt, | |
791 draw.fIndexCnt, | |
792 &devBounds); | |
793 vOffset += draw.fVertexCnt; | |
794 } | |
795 | 910 |
796 return true; | 911 return true; |
912 | |
797 } | 913 } |
OLD | NEW |