Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1045)

Side by Side Diff: src/gpu/GrBatchAtlas.cpp

Issue 1286043004: Make GrVertexBatch objects hold their own draws during GrDrawTarget flush (Closed) Base URL: https://skia.googlesource.com/skia.git@m
Patch Set: forward decl Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrBatchAtlas.h" 8 #include "GrBatchAtlas.h"
9 #include "GrBatchTarget.h" 9 #include "GrBatchFlushState.h"
10 #include "GrGpu.h"
11 #include "GrRectanizer.h" 10 #include "GrRectanizer.h"
12 #include "GrTracing.h" 11 #include "GrTracing.h"
13 #include "GrVertexBuffer.h" 12 #include "GrVertexBuffer.h"
14 13
15 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) { 14 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) {
16 loc->fX += offset.fX; 15 loc->fX += offset.fX;
17 loc->fY += offset.fY; 16 loc->fY += offset.fY;
18 } 17 }
19 18
20 static GrBatchAtlas::AtlasID create_id(uint32_t index, uint64_t generation) { 19 static GrBatchAtlas::AtlasID create_id(uint32_t index, uint64_t generation) {
21 SkASSERT(index < (1 << 16)); 20 SkASSERT(index < (1 << 16));
22 SkASSERT(generation < ((uint64_t)1 << 48)); 21 SkASSERT(generation < ((uint64_t)1 << 48));
23 return generation << 16 | index; 22 return generation << 16 | index;
24 } 23 }
25 24
26 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrB atchPlots. 25 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrB atchPlots.
27 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot 26 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot
28 // manages the lifetime of its data using two tokens, a last ref toke and a last upload token. 27 // manages the lifetime of its data using two tokens, a last ref toke and a last upload token.
29 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage acco rding to the 28 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage acco rding to the
30 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been 29 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been
31 // flushed through to the gpu. 30 // flushed through to the gpu.
32 31
33 class BatchPlot : public SkRefCnt { 32 class BatchPlot : public SkRefCnt {
34 public: 33 public:
35 typedef GrBatchAtlas::BatchToken BatchToken;
36
37 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot); 34 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
38 35
39 // index() refers to the index of the plot in the owning GrAtlas's plot arra y. genID() is a 36 // index() refers to the index of the plot in the owning GrAtlas's plot arra y. genID() is a
40 // monotonically incrementing number which is bumped every time the cpu back ing store is 37 // monotonically incrementing number which is bumped every time the cpu back ing store is
41 // wiped, or when the plot itself is evicted from the atlas(ie, there is con tinuity in genID() 38 // wiped, or when the plot itself is evicted from the atlas(ie, there is con tinuity in genID()
42 // across atlas spills) 39 // across atlas spills)
43 uint32_t index() const { return fIndex; } 40 uint32_t index() const { return fIndex; }
44 uint64_t genID() const { return fGenID; } 41 uint64_t genID() const { return fGenID; }
45 GrBatchAtlas::AtlasID id() { 42 GrBatchAtlas::AtlasID id() {
46 SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID); 43 SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID);
(...skipping 28 matching lines...) Expand all
75 SkDEBUGCODE(fDirty = true;) 72 SkDEBUGCODE(fDirty = true;)
76 73
77 return true; 74 return true;
78 } 75 }
79 76
80 // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when 77 // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when
81 // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need 78 // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need
82 // to issue a new upload even if we update the cpu backing store. We use la stref to determine 79 // to issue a new upload even if we update the cpu backing store. We use la stref to determine
83 // when we can evict a plot from the cache, ie if the last ref has already f lushed through 80 // when we can evict a plot from the cache, ie if the last ref has already f lushed through
84 // the gpu then we can reuse the plot 81 // the gpu then we can reuse the plot
85 BatchToken lastUploadToken() const { return fLastUpload; } 82 GrBatchToken lastUploadToken() const { return fLastUpload; }
86 BatchToken lastUseToken() const { return fLastUse; } 83 GrBatchToken lastUseToken() const { return fLastUse; }
87 void setLastUploadToken(BatchToken batchToken) { 84 void setLastUploadToken(GrBatchToken batchToken) {
88 SkASSERT(batchToken >= fLastUpload); 85 SkASSERT(batchToken >= fLastUpload);
89 fLastUpload = batchToken; 86 fLastUpload = batchToken;
90 } 87 }
91 void setLastUseToken(BatchToken batchToken) { 88 void setLastUseToken(GrBatchToken batchToken) {
92 SkASSERT(batchToken >= fLastUse); 89 SkASSERT(batchToken >= fLastUse);
93 fLastUse = batchToken; 90 fLastUse = batchToken;
94 } 91 }
95 92
96 void uploadToTexture(GrBatchTarget::TextureUploader uploader) { 93 void uploadToTexture(GrBatchUploader::TextureUploader* uploader) {
97 // We should only be issuing uploads if we are in fact dirty 94 // We should only be issuing uploads if we are in fact dirty
98 SkASSERT(fDirty && fData && fTexture); 95 SkASSERT(fDirty && fData && fTexture);
99 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload ToTexture"); 96 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload ToTexture");
100 size_t rowBytes = fBytesPerPixel * fRects->width(); 97 size_t rowBytes = fBytesPerPixel * fRects->width();
101 const unsigned char* dataPtr = fData; 98 const unsigned char* dataPtr = fData;
102 dataPtr += rowBytes * fDirtyRect.fTop; 99 dataPtr += rowBytes * fDirtyRect.fTop;
103 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 100 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
104 uploader.writeTexturePixels(fTexture, 101 uploader->writeTexturePixels(fTexture,
105 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, 102 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
106 fDirtyRect.width(), fDirtyRect.height(), 103 fDirtyRect.width(), fDirtyRect.height(),
107 fTexture->config(), dataPtr, rowBytes); 104 fTexture->config(), dataPtr, rowBytes);
108 fDirtyRect.setEmpty(); 105 fDirtyRect.setEmpty();
109 SkDEBUGCODE(fDirty = false;) 106 SkDEBUGCODE(fDirty = false;)
110 } 107 }
111 108
112 void resetRects() { 109 void resetRects() {
113 SkASSERT(fRects); 110 SkASSERT(fRects);
114 fRects->reset(); 111 fRects->reset();
115 fGenID++; 112 fGenID++;
116 fID = create_id(fIndex, fGenID); 113 fID = create_id(fIndex, fGenID);
117 114
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 fRects = GrRectanizer::Factory(width, height); 165 fRects = GrRectanizer::Factory(width, height);
169 fAtlas = atlas; 166 fAtlas = atlas;
170 fOffset.set(offX * width, offY * height); 167 fOffset.set(offX * width, offY * height);
171 fBytesPerPixel = bpp; 168 fBytesPerPixel = bpp;
172 fData = NULL; 169 fData = NULL;
173 fDirtyRect.setEmpty(); 170 fDirtyRect.setEmpty();
174 SkDEBUGCODE(fDirty = false;) 171 SkDEBUGCODE(fDirty = false;)
175 fTexture = texture; 172 fTexture = texture;
176 } 173 }
177 174
178 BatchToken fLastUpload; 175 GrBatchToken fLastUpload;
179 BatchToken fLastUse; 176 GrBatchToken fLastUse;
180 177
181 uint32_t fIndex; 178 uint32_t fIndex;
182 uint64_t fGenID; 179 uint64_t fGenID;
183 GrBatchAtlas::AtlasID fID; 180 GrBatchAtlas::AtlasID fID;
184 unsigned char* fData; 181 unsigned char* fData;
185 uint32_t fWidth; 182 uint32_t fWidth;
186 uint32_t fHeight; 183 uint32_t fHeight;
187 uint32_t fX; 184 uint32_t fX;
188 uint32_t fY; 185 uint32_t fY;
189 GrTexture* fTexture; 186 GrTexture* fTexture;
190 GrRectanizer* fRects; 187 GrRectanizer* fRects;
191 GrBatchAtlas* fAtlas; 188 GrBatchAtlas* fAtlas;
192 SkIPoint16 fOffset; // the offset of the plot in the backing texture 189 SkIPoint16 fOffset; // the offset of the plot in the backing texture
193 size_t fBytesPerPixel; 190 size_t fBytesPerPixel;
194 SkIRect fDirtyRect; 191 SkIRect fDirtyRect;
195 SkDEBUGCODE(bool fDirty;) 192 SkDEBUGCODE(bool fDirty;)
196 193
197 friend class GrBatchAtlas; 194 friend class GrBatchAtlas;
198 195
199 typedef SkRefCnt INHERITED; 196 typedef SkRefCnt INHERITED;
200 }; 197 };
201 198
202 //////////////////////////////////////////////////////////////////////////////// 199 ////////////////////////////////////////////////////////////////////////////////
203 200
204 class GrPlotUploader : public GrBatchTarget::Uploader { 201 class GrPlotUploader : public GrBatchUploader {
205 public: 202 public:
206 GrPlotUploader(BatchPlot* plot) 203 GrPlotUploader(BatchPlot* plot)
207 : INHERITED(plot->lastUploadToken()) 204 : INHERITED(plot->lastUploadToken())
208 , fPlot(SkRef(plot)) { 205 , fPlot(SkRef(plot)) {
209 SkASSERT(plot); 206 SkASSERT(plot);
210 } 207 }
211 208
212 void upload(GrBatchTarget::TextureUploader uploader) override { 209 void upload(TextureUploader* uploader) override {
213 fPlot->uploadToTexture(uploader); 210 fPlot->uploadToTexture(uploader);
214 } 211 }
215 212
216 private: 213 private:
217 SkAutoTUnref<BatchPlot> fPlot; 214 SkAutoTUnref<BatchPlot> fPlot;
218 215
219 typedef GrBatchTarget::Uploader INHERITED; 216 typedef GrBatchUploader INHERITED;
220 }; 217 };
221 218
222 /////////////////////////////////////////////////////////////////////////////// 219 ///////////////////////////////////////////////////////////////////////////////
223 220
224 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) 221 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
225 : fTexture(texture) 222 : fTexture(texture)
226 , fNumPlotsX(numPlotsX) 223 , fNumPlotsX(numPlotsX)
227 , fNumPlotsY(numPlotsY) 224 , fNumPlotsY(numPlotsY)
228 , fPlotWidth(texture->width() / numPlotsX) 225 , fPlotWidth(texture->width() / numPlotsX)
229 , fPlotHeight(texture->height() / numPlotsY) 226 , fPlotHeight(texture->height() / numPlotsY)
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
266 263
267 void GrBatchAtlas::makeMRU(BatchPlot* plot) { 264 void GrBatchAtlas::makeMRU(BatchPlot* plot) {
268 if (fPlotList.head() == plot) { 265 if (fPlotList.head() == plot) {
269 return; 266 return;
270 } 267 }
271 268
272 fPlotList.remove(plot); 269 fPlotList.remove(plot);
273 fPlotList.addToHead(plot); 270 fPlotList.addToHead(plot);
274 } 271 }
275 272
276 inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, Ba tchPlot* plot) { 273 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) {
277 this->makeMRU(plot); 274 this->makeMRU(plot);
278 275
279 // If our most recent upload has already occurred then we have to insert a n ew 276 // If our most recent upload has already occurred then we have to insert a n ew
280 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred. 277 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred.
281 // This new update will piggy back on that previously scheduled update. 278 // This new update will piggy back on that previously scheduled update.
282 if (batchTarget->isIssued(plot->lastUploadToken())) { 279 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) {
283 plot->setLastUploadToken(batchTarget->asapToken()); 280 plot->setLastUploadToken(target->asapToken());
284 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)) ); 281 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)) );
285 batchTarget->upload(uploader); 282 target->upload(uploader);
286 } 283 }
287 *id = plot->id(); 284 *id = plot->id();
288 } 285 }
289 286
290 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget, 287 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
291 int width, int height, const void* image, SkIPoint 16* loc) { 288 int width, int height, const void* image, SkIPoint 16* loc) {
292 // We should already have a texture, TODO clean this up 289 // We should already have a texture, TODO clean this up
293 SkASSERT(fTexture && 290 SkASSERT(fTexture &&
294 static_cast<uint32_t>(width) <= fPlotWidth && 291 static_cast<uint32_t>(width) <= fPlotWidth &&
295 static_cast<uint32_t>(height) <= fPlotHeight); 292 static_cast<uint32_t>(height) <= fPlotHeight);
296 293
297 // now look through all allocated plots for one we can share, in Most Recent ly Refed order 294 // now look through all allocated plots for one we can share, in Most Recent ly Refed order
298 GrBatchPlotList::Iter plotIter; 295 GrBatchPlotList::Iter plotIter;
299 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); 296 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
300 BatchPlot* plot; 297 BatchPlot* plot;
301 while ((plot = plotIter.get())) { 298 while ((plot = plotIter.get())) {
302 if (plot->addSubImage(width, height, image, loc, fBPP * width)) { 299 if (plot->addSubImage(width, height, image, loc, fBPP * width)) {
303 this->updatePlot(batchTarget, id, plot); 300 this->updatePlot(batchTarget, id, plot);
304 return true; 301 return true;
305 } 302 }
306 plotIter.next(); 303 plotIter.next();
307 } 304 }
308 305
309 // If the above fails, then see if the least recently refed plot has already been flushed to the 306 // If the above fails, then see if the least recently refed plot has already been flushed to the
310 // gpu 307 // gpu
311 plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); 308 plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
312 plot = plotIter.get(); 309 plot = plotIter.get();
313 SkASSERT(plot); 310 SkASSERT(plot);
314 if (batchTarget->isIssued(plot->lastUseToken())) { 311 if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) {
315 this->processEviction(plot->id()); 312 this->processEviction(plot->id());
316 plot->resetRects(); 313 plot->resetRects();
317 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width); 314 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
318 SkASSERT(verify); 315 SkASSERT(verify);
319 this->updatePlot(batchTarget, id, plot); 316 this->updatePlot(batchTarget, id, plot);
320 fAtlasGeneration++; 317 fAtlasGeneration++;
321 return true; 318 return true;
322 } 319 }
323 320
324 // The least recently refed plot hasn't been flushed to the gpu yet, however , if we have flushed 321 // The least recently refed plot hasn't been flushed to the gpu yet, however , if we have flushed
(...skipping 30 matching lines...) Expand all
355 fAtlasGeneration++; 352 fAtlasGeneration++;
356 return true; 353 return true;
357 } 354 }
358 355
359 bool GrBatchAtlas::hasID(AtlasID id) { 356 bool GrBatchAtlas::hasID(AtlasID id) {
360 uint32_t index = GetIndexFromID(id); 357 uint32_t index = GetIndexFromID(id);
361 SkASSERT(index < fNumPlotsX * fNumPlotsY); 358 SkASSERT(index < fNumPlotsX * fNumPlotsY);
362 return fPlotArray[index]->genID() == GetGenerationFromID(id); 359 return fPlotArray[index]->genID() == GetGenerationFromID(id);
363 } 360 }
364 361
365 void GrBatchAtlas::setLastUseToken(AtlasID id, BatchToken batchToken) { 362 void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) {
366 SkASSERT(this->hasID(id)); 363 SkASSERT(this->hasID(id));
367 uint32_t index = GetIndexFromID(id); 364 uint32_t index = GetIndexFromID(id);
368 SkASSERT(index < fNumPlotsX * fNumPlotsY); 365 SkASSERT(index < fNumPlotsX * fNumPlotsY);
369 this->makeMRU(fPlotArray[index]); 366 this->makeMRU(fPlotArray[index]);
370 fPlotArray[index]->setLastUseToken(batchToken); 367 fPlotArray[index]->setLastUseToken(batchToken);
371 } 368 }
372 369
373 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, Batch Token batchToken) { 370 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater,
371 GrBatchToken batchToken) {
374 int count = updater.fPlotsToUpdate.count(); 372 int count = updater.fPlotsToUpdate.count();
375 for (int i = 0; i < count; i++) { 373 for (int i = 0; i < count; i++) {
376 BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]]; 374 BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
377 this->makeMRU(plot); 375 this->makeMRU(plot);
378 plot->setLastUseToken(batchToken); 376 plot->setLastUseToken(batchToken);
379 } 377 }
380 } 378 }
OLDNEW
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698