Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(438)

Side by Side Diff: src/gpu/GrInstancedRendering.h

Issue 1897203002: Implement instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@upload2_requireHWAA
Patch Set: comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrGpu.h ('k') | src/gpu/GrInstancedRendering.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrInstancedRendering_DEFINED
9 #define GrInstancedRendering_DEFINED
10
11 #include "GrAllocator.h"
12 #include "GrInstancedRenderingTypes.h"
13 #include "batches/GrDrawBatch.h"
14
15 class GrInstanceProcessor;
16 class GrResourceProvider;
17
18 /**
19 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
20 * instanced draws into one location, and creates special batches that pull from this data. The
21 * nature of instanced rendering allows these batches to combine well and render efficiently.
22 *
23 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
24 * buffer, and its subclass draws the batches using backend-specific instanced r endering APIs.
25 *
26 * This class is responsible for the CPU side of instanced rendering. Shaders ar e implemented by
27 * GrInstanceProcessor.
28 */
29 class GrInstancedRendering : public SkNoncopyable, protected GrInstancedRenderin gTypes {
30 public:
31 virtual ~GrInstancedRendering() { SkASSERT(State::kRecordingDraws == fState) ; }
32
33 GrGpu* gpu() const { return fGpu; }
34
35 /**
36 * Flags that describe relevant external pipeline conditions. These are used to select
37 * appropriate antialias modes, shader strategies, etc.
38 */
39 enum Flags {
40 kStencilWrite_Flag = (1 << 0),
41 kStencilBufferMSAA_Flag = (1 << 1),
42 kColorWrite_Flag = (1 << 2),
43 kColorBufferMSAA_Flag = (1 << 3),
44 /**
45 * This should not be set if the fragment shader uses derivatives, autom atic mipmap LOD, or
46 * other features that depend on neighboring pixels.
47 */
48 kUseDiscard_Flag = (1 << 4)
49 };
50
51 /**
52 * These methods make a new record internally for an instanced draw, and ret urn a batch that is
53 * effectively just an index to that record. The returned batch is not self- contained, but
54 * rather relies on this class to handle the rendering. The client must call beginFlush() on
55 * this class before attempting to flush batches returned by it. It is inval id to record new
56 * draws between beginFlush() and endFlush().
57 */
58 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
59 bool antialias, uint32_t flags , bool* useHWAA);
60
61 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
62 const SkRect& localRect, bool antialias,
63 uint32_t flags, bool* useHWAA) ;
64
65 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix& , GrColor,
66 const SkMatrix& localMatrix, b ool antialias,
67 uint32_t flags, bool* useHWAA) ;
68
69 GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix& , GrColor,
70 bool antialias, uint32_t flags , bool* useHWAA);
71
72 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatri x&, GrColor,
73 bool antialias, uint32_t flag s, bool* useHWAA);
74
75 GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
76 const SkMatrix&, GrColor, bo ol antialias,
77 uint32_t flags, bool* useHWA A);
78
79 /**
80 * Compiles all recorded draws into GPU buffers and allows the client to beg in flushing the
81 * batches created by this class.
82 */
83 void beginFlush(GrResourceProvider*);
84
85 /**
86 * Called once the batches created previously by this class have all been re leased. Allows the
87 * client to begin recording draws again.
88 */
89 void endFlush();
90
91 enum class ResetType : bool {
92 kDestroy,
93 kAbandon
94 };
95
96 /**
97 * Resets all GPU resources, including those that are held long term. They w ill be lazily
98 * reinitialized if the class begins to be used again.
99 */
100 void resetGpuResources(ResetType);
101
102 protected:
103 class Batch : public GrDrawBatch {
104 public:
105 virtual ~Batch() { fInUse = false; } // fInUse will continue to be acces sed.
106
107 const char* name() const override { return "Instanced Batch"; }
108
109 void computePipelineOptimizations(GrInitInvariantOutput* color,
110 GrInitInvariantOutput* coverage,
111 GrBatchToXPOverrides*) const override;
112
113 protected:
114 Batch(uint32_t classID, GrInstancedRendering* ir, int instanceIdx)
115 : INHERITED(classID),
116 fInstancedRendering(ir),
117 fFirstInstanceIdx(instanceIdx),
118 fInUse(true) {
119 #ifdef SK_DEBUG
120 fIsCombined = false;
121 #endif
122 }
123
124 void initBatchTracker(const GrXPOverridesForBatch&) override;
125
126 void onPrepare(GrBatchFlushState*) override {}
127 void onDraw(GrBatchFlushState*) override;
128 void onDelete() const override;
129
130 GrInstancedRendering* const fInstancedRendering;
131 const int fFirstInstanceIdx;
132 BatchInfo fInfo;
133 bool fInUse;
134 #ifdef SK_DEBUG
135 bool fIsCombined;
136 #endif
137
138 typedef GrDrawBatch INHERITED;
139
140 friend class GrInstancedRendering;
141 };
142
143 /**
144 * We allocate our own batches. This allows us to iterate through them immed iately before a
145 * flush in order to compile draw buffers.
146 */
147 class BatchAllocator : public GrAllocator {
148 public:
149 BatchAllocator(size_t sizeofBatchClass)
150 : INHERITED(sizeofBatchClass, kBatchesPerBlock, nullptr) {
151 fFirstBlock = sk_malloc_throw(kBatchesPerBlock * sizeofBatchClass);
152 this->setInitialBlock(fFirstBlock);
153 }
154
155 ~BatchAllocator() {
156 sk_free(fFirstBlock);
157 }
158
159 private:
160 enum { kBatchesPerBlock = 128 };
161
162 void* fFirstBlock;
163
164 typedef GrAllocator INHERITED;
165 };
166
167 GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes, size_t sizeofBat chClass);
168
169 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVert exBuffer; }
170 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexB uffer; }
171 const GrBuffer* instanceBuffer() const { SkASSERT(fInstanceBuffer); return f InstanceBuffer; }
172 const BatchAllocator* batchAllocator() const { return &fBatchAllocator; }
173
174 virtual void onBeginFlush(GrResourceProvider*) = 0;
175 virtual void onDraw(const GrPipeline&, const GrInstanceProcessor&, const Bat ch*) = 0;
176 virtual void onEndFlush() = 0;
177 virtual void onResetGpuResources(ResetType) = 0;
178
179 #ifdef SK_DEBUG
180 int fInUseBatchCount;
181 #endif
182
183 private:
184 enum class State : bool {
185 kRecordingDraws,
186 kFlushing
187 };
188
189 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
190 const SkMatrix& viewMatrix, GrColor ,
191 const SkRect& localRect, bool antia lias,
192 uint32_t flags, bool* requireHWAA);
193
194 bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, uint32_ t flags,
195 AntialiasMode*, bool* requireHWAA);
196
197 void appendRRectParams(const SkRRect&, BatchInfo*);
198 void appendParamsTexel(const SkScalar* vals, int count);
199 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
200 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
201
202 virtual Batch* constructBatch(void* storage, int instanceIdx) = 0;
203
204 const SkAutoTUnref<GrGpu> fGpu;
205 const uint32_t fSupportedAAModes;
206 State fState;
207 SkSTArray<1024, Instance, true> fInstances;
208 SkSTArray<1024, ParamsTexel, true> fParams;
209 BatchAllocator fBatchAllocator;
210 SkAutoTUnref<const GrBuffer> fVertexBuffer;
211 SkAutoTUnref<const GrBuffer> fIndexBuffer;
212 SkAutoTUnref<const GrBuffer> fInstanceBuffer;
213 SkAutoTUnref<GrBuffer> fParamsBuffer;
214 };
215
216 #endif
OLDNEW
« no previous file with comments | « src/gpu/GrGpu.h ('k') | src/gpu/GrInstancedRendering.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698