Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(292)

Side by Side Diff: src/gpu/instanced/InstancedRendering.h

Issue 2066993003: Begin instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: get mixed samples and base instance paths working again Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef gr_instanced_InstancedRendering_DEFINED
9 #define gr_instanced_InstancedRendering_DEFINED
10
11 #include "GrAllocator.h"
12 #include "SkTInternalLList.h"
13 #include "batches/GrDrawBatch.h"
14 #include "instanced/InstancedRenderingTypes.h"
15 #include "../private/GrInstancedPipelineInfo.h"
16
17 class GrResourceProvider;
18
19 namespace gr_instanced {
20
21 class InstanceProcessor;
22
23 /**
24 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
25 * instanced draws into one location, and creates special batches that pull from this data. The
26 * nature of instanced rendering allows these batches to combine well and render efficiently.
27 *
28 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
29 * buffer, and its subclass draws the batches using backend-specific instanced r endering APIs.
30 *
31 * This class is responsible for the CPU side of instanced rendering. Shaders ar e implemented by
32 * InstanceProcessor.
33 */
34 class InstancedRendering : public SkNoncopyable {
35 public:
36 virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
37
38 GrGpu* gpu() const { return fGpu; }
39
40 /**
41 * These methods make a new record internally for an instanced draw, and ret urn a batch that is
42 * effectively just an index to that record. The returned batch is not self- contained, but
43 * rather relies on this class to handle the rendering. The client must call beginFlush() on
44 * this class before attempting to flush batches returned by it. It is inval id to record new
45 * draws between beginFlush() and endFlush().
46 */
47 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
48 bool antialias, const GrInstan cedPipelineInfo&,
49 bool* useHWAA);
50
51 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
52 const SkRect& localRect, bool antialias,
53 const GrInstancedPipelineInfo& , bool* useHWAA);
54
55 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
56 const SkMatrix& localMatrix, b ool antialias,
57 const GrInstancedPipelineInfo& , bool* useHWAA);
58
59 GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix& , GrColor,
60 bool antialias, const GrInstan cedPipelineInfo&,
61 bool* useHWAA);
62
63 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatri x&, GrColor,
64 bool antialias, const GrInsta ncedPipelineInfo&,
65 bool* useHWAA);
66
67 GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
68 const SkMatrix&, GrColor, bo ol antialias,
69 const GrInstancedPipelineInf o&, bool* useHWAA);
70
71 /**
72 * Compiles all recorded draws into GPU buffers and allows the client to beg in flushing the
73 * batches created by this class.
74 */
75 void beginFlush(GrResourceProvider*);
76
77 /**
78 * Called once the batches created previously by this class have all been re leased. Allows the
79 * client to begin recording draws again.
80 */
81 void endFlush();
82
83 enum class ResetType : bool {
84 kDestroy,
85 kAbandon
86 };
87
88 /**
89 * Resets all GPU resources, including those that are held long term. They w ill be lazily
90 * reinitialized if the class begins to be used again.
91 */
92 void resetGpuResources(ResetType);
93
94 protected:
95 class Batch : public GrDrawBatch {
96 public:
97 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
98
99 const char* name() const override { return "Instanced Batch"; }
100 ~Batch() override { fInstancedRendering->fBatchList.remove(this); }
101
102 protected:
103 Batch(uint32_t classID, InstancedRendering* ir, int instanceIdx);
104
105 void initBatchTracker(const GrXPOverridesForBatch&) override;
106 bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
107
108 void computePipelineOptimizations(GrInitInvariantOutput* color,
109 GrInitInvariantOutput* coverage,
110 GrBatchToXPOverrides*) const override;
111
112 void onPrepare(GrBatchFlushState*) override {}
113 void onDraw(GrBatchFlushState*) override;
114
115 struct DrawCmd {
116 #ifdef SK_DEBUG
117 DrawCmd() : fGeometry{-1, 0}, fInstanceRange{-1, 0} {}
118
119 bool isValid() const {
120 return fGeometry.fStart >= 0 && fGeometry.fCount > 0 &&
121 fInstanceRange.fStart >= 0 && fInstanceRange.fCount > 0;
122 }
123 #endif
124
125 int getSingleInstanceIdx() const {
126 SkASSERT(1 == fInstanceRange.fCount);
127 return fInstanceRange.fStart;
128 }
129
130 IndexRange fGeometry;
131 InstanceRange fInstanceRange;
132 };
133
134 DrawCmd& getSingleDrawCmd() {
135 SkASSERT(1 == fDrawCmds.count());
136 return fDrawCmds.front();
137 }
138
139 InstancedRendering* const fInstancedRendering;
140 SkSTArray<4, DrawCmd, false> fDrawCmds;
141 BatchInfo fInfo;
142
143 typedef GrDrawBatch INHERITED;
144
145 friend class InstancedRendering;
146 };
147
148 typedef SkTInternalLList<Batch> BatchList;
149
150 InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAAMode);
151
152 const Instance& instance(int idx) const { return fInstances[idx]; }
153 const BatchList& batchList() const { return fBatchList; }
154 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVert exBuffer; }
155 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexB uffer; }
156
157 virtual void onBeginFlush(GrResourceProvider*) = 0;
158 virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch *) = 0;
159 virtual void onEndFlush() = 0;
160 virtual void onResetGpuResources(ResetType) = 0;
161
162 private:
163 enum class State : bool {
164 kRecordingDraws,
165 kFlushing
166 };
167
168 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
169 const SkMatrix& viewMatrix, GrColor ,
170 const SkRect& localRect, bool antia lias,
171 const GrInstancedPipelineInfo&, boo l* requireHWAA);
172
173 bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
174 const GrInstancedPipelineInfo&, bool* useHWAA, Anti aliasMode*);
175
176 void appendRRectParams(const SkRRect&, BatchInfo*);
177 void appendParamsTexel(const SkScalar* vals, int count);
178 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
179 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
180
181 virtual Batch* createBatch(int instanceIdx) = 0;
182
183 const SkAutoTUnref<GrGpu> fGpu;
184 const AntialiasMode fLastSupportedAAMode;
185 State fState;
186 SkSTArray<1024, Instance, true> fInstances;
187 SkSTArray<1024, ParamsTexel, true> fParams;
188 BatchList fBatchList;
189 SkAutoTUnref<const GrBuffer> fVertexBuffer;
190 SkAutoTUnref<const GrBuffer> fIndexBuffer;
191 SkAutoTUnref<GrBuffer> fParamsBuffer;
192 };
193
194 }
195
196 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698