Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(760)

Side by Side Diff: src/gpu/GrInstancedRendering.h

Issue 1897203002: Implement instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@upload2_requireHWAA
Patch Set: rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrInstancedRendering_DEFINED
9 #define GrInstancedRendering_DEFINED
10
11 #include "GrAllocator.h"
12 #include "GrInstancedRenderingTypes.h"
13 #include "batches/GrDrawBatch.h"
14
15 class GrInstanceProcessor;
16 class GrResourceProvider;
17
18 /**
19 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
bsalomon 2016/04/25 13:22:38 If we had a completely different instanced primiti
Chris Dalton 2016/04/25 17:01:19 My vision for this class is yes! The basic geometr
bsalomon 2016/04/27 14:10:32 Ok, let's keep the name then
20 * instanced draws into one location, and creates special batches that pull from this data. The
21 * nature of instanced rendering allows these batches to combine well and render efficiently.
22 *
23 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
24 * buffer, and its subclass draws the batches using backend-specific instanced r endering APIs.
25 *
26 * This class is responsible for the CPU side of instanced rendering. Shaders ar e implemented by
27 * GrInstanceProcessor.
28 */
29 class GrInstancedRendering : public SkNoncopyable, protected GrInstancedRenderin gTypes {
30 public:
31 virtual ~GrInstancedRendering() { SkASSERT(State::kRecordingShapes == fState ); }
32
33 GrGpu* gpu() const { return fGpu; }
34
bsalomon 2016/04/25 13:22:38 I think these could use a comment
Chris Dalton 2016/04/26 19:18:07 Done.
35 enum Flags {
36 kStencilWrite_Flag = (1 << 0),
37 kStencilBufferMSAA_Flag = (1 << 1),
38 kColorWrite_Flag = (1 << 2),
39 kColorBufferMSAA_Flag = (1 << 3),
40 /**
41 * This should not be set if the fragment shader uses derivatives, autom atic mipmap LOD, or
42 * other features that depend on neighboring pixels.
43 */
44 kUseDiscard_Flag = (1 << 4)
45 };
46
47 /**
48 * These methods record a new instanced draw and return a batch that can ren der it. The client
49 * must call commitToGpu() before attempting to draw batches returned by thi s class. After
bsalomon 2016/04/25 13:22:38 Maybe before attempting to flush batches returned
Chris Dalton 2016/04/26 19:18:07 Done.
50 * commitToGpu(), it becomes invalid to record new draws until a subsequent call to restart().
51 */
52 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
bsalomon 2016/04/25 13:22:38 It seems to me like the "recording" aspect of thes
Chris Dalton 2016/04/25 17:01:19 When naming these methods I did think this through
Chris Dalton 2016/04/26 19:18:07 Updated the comment.
bsalomon 2016/04/27 14:10:32 Ok, that makes sense to me.
53 bool antialias, uint32_t flags , bool* useHWAA);
54
55 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
56 const SkRect& localRect, bool antialias,
57 uint32_t flags, bool* useHWAA) ;
58
59 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
60 const SkMatrix& localMatrix, b ool antialias,
61 uint32_t flags, bool* useHWAA) ;
62
63 GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix& , GrColor,
64 bool antialias, uint32_t flags , bool* useHWAA);
65
66 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatri x&, GrColor,
67 bool antialias, uint32_t flag s, bool* useHWAA);
68
69 GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
70 const SkMatrix&, GrColor, bo ol antialias,
71 uint32_t flags, bool* useHWA A);
72
73 /**
74 * Commits all recorded draws to GPU memory and allows the client to begin d rawing the batches
75 * created by this class.
76 */
77 void commitToGpu(GrResourceProvider*);
bsalomon 2016/04/25 13:22:38 Wonder if something like prepareToFlush or willFlu
Chris Dalton 2016/04/26 19:18:08 Done.
78
79 /**
80 * Called once the batches created previously by this class have all been re leased. Allows the
81 * client to begin recording draws again.
82 */
83 void restart();
bsalomon 2016/04/25 13:22:38 Minor but I think we more commonly use reset()
Chris Dalton 2016/04/25 17:01:19 I also thought this one through. I decided to go w
Chris Dalton 2016/04/26 19:18:07 How's endFlush()?
bsalomon 2016/04/27 14:10:32 sgtm
84
85 enum class ClearType {
86 kDestroy,
87 kAbandon
88 };
89
90 /**
91 * Clears all GPU resources, including those that are held long term. They w ill be lazily
92 * reinitialized if the class begins to be used again.
93 */
94 void clearGpuResources(ClearType);
bsalomon 2016/04/25 13:22:38 freeGpuResources? again just trying to mentally ma
Chris Dalton 2016/04/25 17:01:19 clearGpuResources(ClearType::kDestroy) would be th
Chris Dalton 2016/04/26 19:18:07 Done.
95
96 protected:
97 class Batch : public GrDrawBatch {
98 public:
99 virtual ~Batch() { fInUse = false; } // fInUse will continue to be acces sed.
100
101 const char* name() const override { return "Instanced Batch"; }
102
103 void computePipelineOptimizations(GrInitInvariantOutput* color,
104 GrInitInvariantOutput* coverage,
105 GrBatchToXPOverrides*) const override;
106
107 protected:
108 Batch(uint32_t classID, GrInstancedRendering* ir, AntialiasMode aa, int instanceIdx)
109 : INHERITED(classID),
110 fInstancedRendering(ir),
111 fAntialiasMode(aa),
112 fFirstInstanceIdx(instanceIdx),
113 fInUse(true) {
114 #ifdef SK_DEBUG
115 fIsCombined = false;
116 #endif
117 }
118
119 void initBatchTracker(const GrXPOverridesForBatch&) override;
120
121 void onPrepare(GrBatchFlushState*) override {}
122 void onDraw(GrBatchFlushState*) override;
123 void onDelete() const override;
124
125 GrInstancedRendering* const fInstancedRendering;
126 const AntialiasMode fAntialiasMode;
127 const int fFirstInstanceIdx;
128 BatchTracker fTracker;
129 bool fInUse;
130 #ifdef SK_DEBUG
131 bool fIsCombined;
132 #endif
133
134 typedef GrDrawBatch INHERITED;
135
136 friend class GrInstancedRendering;
137 };
138
139 class BatchAllocator : public GrAllocator {
140 public:
141 BatchAllocator(size_t sizeofBatchClass)
142 : INHERITED(sizeofBatchClass, kBatchesPerBlock, nullptr) {
143 fFirstBlock = sk_malloc_throw(kBatchesPerBlock * sizeofBatchClass);
144 this->setInitialBlock(fFirstBlock);
145 }
146
147 ~BatchAllocator() {
148 sk_free(fFirstBlock);
149 }
150
151 private:
152 enum { kBatchesPerBlock = 128 };
153
154 void* fFirstBlock;
155
156 typedef GrAllocator INHERITED;
157 };
158
159 GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes, size_t sizeofBat chClass);
160
161 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVert exBuffer; }
162 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexB uffer; }
163 const GrBuffer* instanceBuffer() const { SkASSERT(fInstanceBuffer); return f InstanceBuffer; }
164 const BatchAllocator* batchAllocator() const { return &fBatchAllocator; }
165
166 virtual void onCommitToGpu(GrResourceProvider*) = 0;
167 virtual void onDraw(const GrPipeline&, const GrInstanceProcessor&, const Bat ch*) = 0;
168 virtual void onRestart() = 0;
169 virtual void onClearGpuResources(ClearType) = 0;
170
171 #ifdef SK_DEBUG
172 int fInUseBatchCount;
173 #endif
174
175 private:
176 enum class State {
177 kRecordingShapes,
178 kDrawingBatches
179 };
180
181 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
182 const SkMatrix& viewMatrix, GrColor ,
183 const SkRect& localRect, bool antia lias,
184 uint32_t flags, bool* requireHWAA);
185
186 bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, uint32_ t flags,
187 AntialiasMode*, bool* requireHWAA);
188
189 void appendRRectParams(const SkRRect&, BatchTracker*);
190 void appendParamsTexel(const SkScalar* vals, int count);
191 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
192 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
193
194 virtual Batch* constructBatch(void* storage, AntialiasMode, int instanceIdx) = 0;
195
196 const SkAutoTUnref<GrGpu> fGpu;
197 const uint32_t fSupportedAAModes;
198 State fState;
199 SkSTArray<1024, Instance, true> fInstances;
200 SkSTArray<1024, ParamsTexel, true> fParams;
201 BatchAllocator fBatchAllocator;
202 SkAutoTUnref<const GrBuffer> fVertexBuffer;
203 SkAutoTUnref<const GrBuffer> fIndexBuffer;
204 SkAutoTUnref<const GrBuffer> fInstanceBuffer;
205 SkAutoTUnref<GrBuffer> fParamsBuffer;
206 };
207
208 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698