Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(411)

Side by Side Diff: src/gpu/GrBatchAtlas.cpp

Issue 1835283002: Simplify GrDrawBatch uploads and token uage. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrBatchAtlas.h" 8 #include "GrBatchAtlas.h"
9 #include "GrBatchFlushState.h" 9 #include "GrBatchFlushState.h"
10 #include "GrRectanizer.h" 10 #include "GrRectanizer.h"
11 #include "GrTracing.h" 11 #include "GrTracing.h"
12 12
13 //////////////////////////////////////////////////////////////////////////////// 13 ////////////////////////////////////////////////////////////////////////////////
14 14
15 GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY , int width, 15 GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY , int width,
16 int height, GrPixelConfig config) 16 int height, GrPixelConfig config)
17 : fLastUpload(0) 17 : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken())
18 , fLastUse(0) 18 , fLastUse(GrBatchDrawToken::AlreadyFlushedToken())
19 , fIndex(index) 19 , fIndex(index)
20 , fGenID(genID) 20 , fGenID(genID)
21 , fID(CreateId(fIndex, fGenID)) 21 , fID(CreateId(fIndex, fGenID))
22 , fData(nullptr) 22 , fData(nullptr)
23 , fWidth(width) 23 , fWidth(width)
24 , fHeight(height) 24 , fHeight(height)
25 , fX(offX) 25 , fX(offX)
26 , fY(offY) 26 , fY(offY)
27 , fRects(nullptr) 27 , fRects(nullptr)
28 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) 28 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 71
72 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); 72 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
73 73
74 loc->fX += fOffset.fX; 74 loc->fX += fOffset.fX;
75 loc->fY += fOffset.fY; 75 loc->fY += fOffset.fY;
76 SkDEBUGCODE(fDirty = true;) 76 SkDEBUGCODE(fDirty = true;)
77 77
78 return true; 78 return true;
79 } 79 }
80 80
81 void GrBatchAtlas::BatchPlot::uploadToTexture(GrBatchUploader::TextureUploader* uploader, 81 void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writeP ixels,
82 GrTexture* texture) { 82 GrTexture* texture) {
83 // We should only be issuing uploads if we are in fact dirty 83 // We should only be issuing uploads if we are in fact dirty
84 SkASSERT(fDirty && fData && texture); 84 SkASSERT(fDirty && fData && texture);
85 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTe xture"); 85 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTe xture");
86 size_t rowBytes = fBytesPerPixel * fWidth; 86 size_t rowBytes = fBytesPerPixel * fWidth;
87 const unsigned char* dataPtr = fData; 87 const unsigned char* dataPtr = fData;
88 dataPtr += rowBytes * fDirtyRect.fTop; 88 dataPtr += rowBytes * fDirtyRect.fTop;
89 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 89 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
90 uploader->writeTexturePixels(texture, 90 writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect. fTop,
91 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDi rtyRect.fTop, 91 fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBy tes);
92 fDirtyRect.width(), fDirtyRect.height(),
93 fConfig, dataPtr, rowBytes);
94 fDirtyRect.setEmpty(); 92 fDirtyRect.setEmpty();
95 SkDEBUGCODE(fDirty = false;) 93 SkDEBUGCODE(fDirty = false;)
96 } 94 }
97 95
98 void GrBatchAtlas::BatchPlot::resetRects() { 96 void GrBatchAtlas::BatchPlot::resetRects() {
99 if (fRects) { 97 if (fRects) {
100 fRects->reset(); 98 fRects->reset();
101 } 99 }
102 100
103 fGenID++; 101 fGenID++;
104 fID = CreateId(fIndex, fGenID); 102 fID = CreateId(fIndex, fGenID);
105 103
106 // zero out the plot 104 // zero out the plot
107 if (fData) { 105 if (fData) {
108 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); 106 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
109 } 107 }
110 108
111 fDirtyRect.setEmpty(); 109 fDirtyRect.setEmpty();
112 SkDEBUGCODE(fDirty = false;) 110 SkDEBUGCODE(fDirty = false;)
113 } 111 }
114 112
115 ////////////////////////////////////////////////////////////////////////////////
116
117 class GrPlotUploader : public GrBatchUploader {
118 public:
119 GrPlotUploader(GrBatchAtlas::BatchPlot* plot, GrTexture* texture)
120 : INHERITED(plot->lastUploadToken())
121 , fPlot(SkRef(plot))
122 , fTexture(texture) {
123 SkASSERT(plot);
124 }
125
126 void upload(TextureUploader* uploader) override {
127 fPlot->uploadToTexture(uploader, fTexture);
128 }
129
130 private:
131 SkAutoTUnref<GrBatchAtlas::BatchPlot> fPlot;
132 GrTexture* fTexture;
133
134 typedef GrBatchUploader INHERITED;
135 };
136
137 /////////////////////////////////////////////////////////////////////////////// 113 ///////////////////////////////////////////////////////////////////////////////
138 114
139 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) 115 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
140 : fTexture(texture) 116 : fTexture(texture)
141 , fAtlasGeneration(kInvalidAtlasGeneration + 1) { 117 , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
142 118
143 int plotWidth = texture->width() / numPlotsX; 119 int plotWidth = texture->width() / numPlotsX;
144 int plotHeight = texture->height() / numPlotsY; 120 int plotHeight = texture->height() / numPlotsY;
145 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots); 121 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
146 SkASSERT(plotWidth * numPlotsX == texture->width()); 122 SkASSERT(plotWidth * numPlotsX == texture->width());
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
178 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); 154 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
179 } 155 }
180 } 156 }
181 157
182 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) { 158 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) {
183 this->makeMRU(plot); 159 this->makeMRU(plot);
184 160
185 // If our most recent upload has already occurred then we have to insert a n ew 161 // If our most recent upload has already occurred then we have to insert a n ew
186 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred. 162 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred.
187 // This new update will piggy back on that previously scheduled update. 163 // This new update will piggy back on that previously scheduled update.
188 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) { 164 if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
189 plot->setLastUploadToken(target->asapToken()); 165 // With c+14 we could move sk_sp into lamba to only ref once.
190 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot, fTexture) ); 166 sk_sp<BatchPlot> plotsp(SkRef(plot));
191 target->upload(uploader); 167 GrTexture* texture = fTexture;
168 GrBatchDrawToken lastUploadToken = target->addAsapUpload(
169 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
170 plotsp->uploadToTexture(writePixels, texture);
171 }
172 );
173 plot->setLastUploadToken(lastUploadToken);
192 } 174 }
193 *id = plot->id(); 175 *id = plot->id();
194 } 176 }
195 177
196 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget, 178 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target,
197 int width, int height, const void* image, SkIPoint 16* loc) { 179 int width, int height, const void* image, SkIPoint 16* loc) {
198 // We should already have a texture, TODO clean this up 180 // We should already have a texture, TODO clean this up
199 SkASSERT(fTexture); 181 SkASSERT(fTexture);
200 182
201 // now look through all allocated plots for one we can share, in Most Recent ly Refed order 183 // now look through all allocated plots for one we can share, in Most Recent ly Refed order
202 GrBatchPlotList::Iter plotIter; 184 GrBatchPlotList::Iter plotIter;
203 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); 185 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
204 BatchPlot* plot; 186 BatchPlot* plot;
205 while ((plot = plotIter.get())) { 187 while ((plot = plotIter.get())) {
206 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp()); 188 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
207 if (plot->addSubImage(width, height, image, loc)) { 189 if (plot->addSubImage(width, height, image, loc)) {
208 this->updatePlot(batchTarget, id, plot); 190 this->updatePlot(target, id, plot);
209 return true; 191 return true;
210 } 192 }
211 plotIter.next(); 193 plotIter.next();
212 } 194 }
213 195
214 // If the above fails, then see if the least recently refed plot has already been flushed to the 196 // If the above fails, then see if the least recently refed plot has already been flushed to the
215 // gpu 197 // gpu
216 plot = fPlotList.tail(); 198 plot = fPlotList.tail();
217 SkASSERT(plot); 199 SkASSERT(plot);
218 if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) { 200 if (target->hasDrawBeenFlushed(plot->lastUseToken())) {
219 this->processEviction(plot->id()); 201 this->processEviction(plot->id());
220 plot->resetRects(); 202 plot->resetRects();
221 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp()); 203 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
222 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc); 204 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
223 SkASSERT(verify); 205 SkASSERT(verify);
224 this->updatePlot(batchTarget, id, plot); 206 this->updatePlot(target, id, plot);
225 fAtlasGeneration++; 207 fAtlasGeneration++;
226 return true; 208 return true;
227 } 209 }
228 210
229 // The least recently used plot hasn't been flushed to the gpu yet, however, if we have flushed 211 // If this plot has been used in a draw that is currently being prepared by a batch, then we
230 // it to the batch target than we can reuse it. Our last use token is guara nteed to be less 212 // have to fail. This gives the batch a chance to enqueue the draw, and call back into this
231 // than or equal to the current token. If its 'less than' the current token , than we can spin 213 // function. When that draw is enqueued, the draw token advances, and the su bsequent call will
232 // off the plot (ie let the batch target manage it) and create a new plot in its place in our 214 // continue past this branch and prepare an inline upload that will occur af ter the enqueued
233 // array. If it is equal to the currentToken, then the caller has to flush draws to the batch 215 // draw which references the plot's pre-upload content.
234 // target so we can spin off the plot 216 if (plot->lastUseToken() == target->nextDrawToken()) {
235 if (plot->lastUseToken() == batchTarget->currentToken()) {
236 return false; 217 return false;
237 } 218 }
238 219
239 SkASSERT(plot->lastUseToken() < batchTarget->currentToken());
240 SkASSERT(!batchTarget->hasTokenBeenFlushed(batchTarget->currentToken()));
241
242 SkASSERT(!plot->unique()); // The GrPlotUpdater should have a ref too 220 SkASSERT(!plot->unique()); // The GrPlotUpdater should have a ref too
243 221
244 this->processEviction(plot->id()); 222 this->processEviction(plot->id());
245 fPlotList.remove(plot); 223 fPlotList.remove(plot);
246 SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()]; 224 SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
247 newPlot.reset(plot->clone()); 225 newPlot.reset(plot->clone());
248 226
249 fPlotList.addToHead(newPlot.get()); 227 fPlotList.addToHead(newPlot.get());
250 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp()); 228 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp());
251 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); 229 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
252 SkASSERT(verify); 230 SkASSERT(verify);
253 231
254 // Note that this plot will be uploaded inline with the draws whereas the 232 // Note that this plot will be uploaded inline with the draws whereas the
255 // one it displaced most likely was uploaded asap. 233 // one it displaced most likely was uploaded asap.
256 newPlot->setLastUploadToken(batchTarget->currentToken()); 234 // With c+14 we could move sk_sp into lamba to only ref once.
257 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot, fTexture)) ; 235 sk_sp<BatchPlot> plotsp(SkRef(newPlot.get()));
258 batchTarget->upload(uploader); 236 GrTexture* texture = fTexture;
237 GrBatchDrawToken lastUploadToken = target->addInlineUpload(
238 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
239 plotsp->uploadToTexture(writePixels, texture);
240 }
241 );
242 newPlot->setLastUploadToken(lastUploadToken);
243
259 *id = newPlot->id(); 244 *id = newPlot->id();
260 245
261 fAtlasGeneration++; 246 fAtlasGeneration++;
262 return true; 247 return true;
263 } 248 }
OLDNEW
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698