Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(664)

Side by Side Diff: src/gpu/GrBatchAtlas.cpp

Issue 1413403009: Clean up GrBatchAtlas a bit (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Fix bpp size Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrBatchAtlas.h" 8 #include "GrBatchAtlas.h"
9 #include "GrBatchFlushState.h" 9 #include "GrBatchFlushState.h"
10 #include "GrRectanizer.h" 10 #include "GrRectanizer.h"
11 #include "GrTracing.h" 11 #include "GrTracing.h"
12 #include "GrVertexBuffer.h" 12 #include "GrVertexBuffer.h"
13 13
14 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) { 14 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of Bat chPlots.
15 loc->fX += offset.fX; 15 // The BatchPlots keep track of subimage placement via their GrRectanizer. A Bat chPlot
16 loc->fY += offset.fY; 16 // manages the lifetime of its data using two tokens, a last use token and a las t upload token.
17 } 17 // Once a BatchPlot is "full" (i.e. there is no room for the new subimage accord ing to the
18 18 // GrRectanizer), it can no longer be used unless the last use of the GrPlot has already been
19 static GrBatchAtlas::AtlasID create_id(uint32_t index, uint64_t generation) {
20 SkASSERT(index < (1 << 16));
21 SkASSERT(generation < ((uint64_t)1 << 48));
22 return generation << 16 | index;
23 }
24
25 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrB atchPlots.
26 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot
27 // manages the lifetime of its data using two tokens, a last ref toke and a last upload token.
28 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage acco rding to the
29 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been
30 // flushed through to the gpu. 19 // flushed through to the gpu.
31 20
32 class BatchPlot : public SkRefCnt { 21 class BatchPlot : public SkRefCnt {
33 public:
34 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot); 22 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
35 23
36 // index() refers to the index of the plot in the owning GrAtlas's plot arra y. genID() is a 24 public:
37 // monotonically incrementing number which is bumped every time the cpu back ing store is 25 // index() is a unique id for the plot relative to the owning GrAtlas. genI D() is a
38 // wiped, or when the plot itself is evicted from the atlas(ie, there is con tinuity in genID() 26 // monotonically incremented number which is bumped every time this plot is
39 // across atlas spills) 27 // evicted from the cache (i.e., there is continuity in genID() across atlas spills).
40 uint32_t index() const { return fIndex; } 28 uint32_t index() const { return fIndex; }
41 uint64_t genID() const { return fGenID; } 29 uint64_t genID() const { return fGenID; }
42 GrBatchAtlas::AtlasID id() { 30 GrBatchAtlas::AtlasID id() const {
43 SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID); 31 SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID);
44 return fID; 32 return fID;
45 } 33 }
34 SkDEBUGCODE(size_t bpp() const { return fBytesPerPixel; })
46 35
47 GrTexture* texture() const { return fTexture; } 36 bool addSubImage(int width, int height, const void* image, SkIPoint16* loc) {
37 SkASSERT(width <= fWidth && height <= fHeight);
48 38
49 bool addSubImage(int width, int height, const void* image, SkIPoint16* loc, size_t rowBytes) { 39 if (!fRects) {
40 fRects = GrRectanizer::Factory(fWidth, fHeight);
41 }
42
50 if (!fRects->addRect(width, height, loc)) { 43 if (!fRects->addRect(width, height, loc)) {
51 return false; 44 return false;
52 } 45 }
53 46
54 if (!fData) { 47 if (!fData) {
55 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPi xel * fWidth * 48 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPi xel * fWidth *
56 fHeight)); 49 fHeight));
57 } 50 }
51 size_t rowBytes = width * fBytesPerPixel;
58 const unsigned char* imagePtr = (const unsigned char*)image; 52 const unsigned char* imagePtr = (const unsigned char*)image;
59 // point ourselves at the right starting spot 53 // point ourselves at the right starting spot
60 unsigned char* dataPtr = fData; 54 unsigned char* dataPtr = fData;
61 dataPtr += fBytesPerPixel * fWidth * loc->fY; 55 dataPtr += fBytesPerPixel * fWidth * loc->fY;
62 dataPtr += fBytesPerPixel * loc->fX; 56 dataPtr += fBytesPerPixel * loc->fX;
63 // copy into the data buffer 57 // copy into the data buffer
64 for (int i = 0; i < height; ++i) { 58 for (int i = 0; i < height; ++i) {
65 memcpy(dataPtr, imagePtr, rowBytes); 59 memcpy(dataPtr, imagePtr, rowBytes);
66 dataPtr += fBytesPerPixel * fWidth; 60 dataPtr += fBytesPerPixel * fWidth;
67 imagePtr += rowBytes; 61 imagePtr += rowBytes;
68 } 62 }
69 63
70 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); 64 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
71 adjust_for_offset(loc, fOffset); 65
66 loc->fX += fOffset.fX;
67 loc->fY += fOffset.fY;
72 SkDEBUGCODE(fDirty = true;) 68 SkDEBUGCODE(fDirty = true;)
73 69
74 return true; 70 return true;
75 } 71 }
76 72
77 // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when 73 // To manage the lifetime of a plot, we use two tokens. We use the last upl oad token to know
78 // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need 74 // when we can 'piggy back' uploads, ie if the last upload hasn't been flush ed to gpu, we don't
79 // to issue a new upload even if we update the cpu backing store. We use la stref to determine 75 // need to issue a new upload even if we update the cpu backing store. We u se lastUse to
80 // when we can evict a plot from the cache, ie if the last ref has already f lushed through 76 // determine when we can evict a plot from the cache, ie if the last use has already flushed
81 // the gpu then we can reuse the plot 77 // through the gpu then we can reuse the plot.
82 GrBatchToken lastUploadToken() const { return fLastUpload; } 78 GrBatchToken lastUploadToken() const { return fLastUpload; }
83 GrBatchToken lastUseToken() const { return fLastUse; } 79 GrBatchToken lastUseToken() const { return fLastUse; }
84 void setLastUploadToken(GrBatchToken batchToken) { 80 void setLastUploadToken(GrBatchToken batchToken) {
85 SkASSERT(batchToken >= fLastUpload); 81 SkASSERT(batchToken >= fLastUpload);
86 fLastUpload = batchToken; 82 fLastUpload = batchToken;
87 } 83 }
88 void setLastUseToken(GrBatchToken batchToken) { 84 void setLastUseToken(GrBatchToken batchToken) {
89 SkASSERT(batchToken >= fLastUse); 85 SkASSERT(batchToken >= fLastUse);
90 fLastUse = batchToken; 86 fLastUse = batchToken;
91 } 87 }
92 88
93 void uploadToTexture(GrBatchUploader::TextureUploader* uploader) { 89 void uploadToTexture(GrBatchUploader::TextureUploader* uploader, GrTexture* texture) {
94 // We should only be issuing uploads if we are in fact dirty 90 // We should only be issuing uploads if we are in fact dirty
95 SkASSERT(fDirty && fData && fTexture); 91 SkASSERT(fDirty && fData && texture);
96 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload ToTexture"); 92 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload ToTexture");
97 size_t rowBytes = fBytesPerPixel * fRects->width(); 93 size_t rowBytes = fBytesPerPixel * fWidth;
98 const unsigned char* dataPtr = fData; 94 const unsigned char* dataPtr = fData;
99 dataPtr += rowBytes * fDirtyRect.fTop; 95 dataPtr += rowBytes * fDirtyRect.fTop;
100 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 96 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
101 uploader->writeTexturePixels(fTexture, 97 uploader->writeTexturePixels(texture,
102 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, 98 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
103 fDirtyRect.width(), fDirtyRect.height(), 99 fDirtyRect.width(), fDirtyRect.height(),
104 fTexture->config(), dataPtr, rowBytes); 100 fConfig, dataPtr, rowBytes);
105 fDirtyRect.setEmpty(); 101 fDirtyRect.setEmpty();
106 SkDEBUGCODE(fDirty = false;) 102 SkDEBUGCODE(fDirty = false;)
107 } 103 }
108 104
109 void resetRects() { 105 void resetRects() {
110 SkASSERT(fRects); 106 if (fRects) {
111 fRects->reset(); 107 fRects->reset();
108 }
109
112 fGenID++; 110 fGenID++;
113 fID = create_id(fIndex, fGenID); 111 fID = CreateId(fIndex, fGenID);
114 112
115 // zero out the plot 113 // zero out the plot
116 if (fData) { 114 if (fData) {
117 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); 115 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
118 } 116 }
119 117
120 fDirtyRect.setEmpty(); 118 fDirtyRect.setEmpty();
121 SkDEBUGCODE(fDirty = false;) 119 SkDEBUGCODE(fDirty = false;)
122 } 120 }
123 121
124 uint32_t x() const { return fX; }
125 uint32_t y() const { return fY; }
126
127 private: 122 private:
128 BatchPlot() 123 BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int heig ht,
124 GrPixelConfig config)
129 : fLastUpload(0) 125 : fLastUpload(0)
130 , fLastUse(0) 126 , fLastUse(0)
131 , fIndex(-1) 127 , fIndex(index)
132 , fGenID(-1) 128 , fGenID(genID)
133 , fID(0) 129 , fID(CreateId(fIndex, fGenID))
134 , fData(nullptr) 130 , fData(nullptr)
135 , fWidth(0) 131 , fWidth(width)
136 , fHeight(0) 132 , fHeight(height)
137 , fX(0) 133 , fX(offX)
138 , fY(0) 134 , fY(offY)
139 , fTexture(nullptr)
140 , fRects(nullptr) 135 , fRects(nullptr)
141 , fAtlas(nullptr) 136 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
142 , fBytesPerPixel(1) 137 , fConfig(config)
138 , fBytesPerPixel(GrBytesPerPixel(config))
143 #ifdef SK_DEBUG 139 #ifdef SK_DEBUG
144 , fDirty(false) 140 , fDirty(false)
145 #endif 141 #endif
146 { 142 {
147 fOffset.set(0, 0); 143 fDirtyRect.setEmpty();
148 } 144 }
149 145
150 ~BatchPlot() { 146 ~BatchPlot() override {
151 sk_free(fData); 147 sk_free(fData);
152 fData = nullptr;
153 delete fRects; 148 delete fRects;
154 } 149 }
155 150
156 void init(GrBatchAtlas* atlas, GrTexture* texture, int index, uint64_t gener ation, 151 // Create a clone of this plot. The cloned plot will take the place of the
157 int offX, int offY, int width, int height, size_t bpp) { 152 // current plot in the atlas.
158 fIndex = index; 153 BatchPlot* clone() const {
159 fGenID = generation; 154 return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig) ;
160 fID = create_id(index, generation);
161 fWidth = width;
162 fHeight = height;
163 fX = offX;
164 fY = offY;
165 fRects = GrRectanizer::Factory(width, height);
166 fAtlas = atlas;
167 fOffset.set(offX * width, offY * height);
168 fBytesPerPixel = bpp;
169 fData = nullptr;
170 fDirtyRect.setEmpty();
171 SkDEBUGCODE(fDirty = false;)
172 fTexture = texture;
173 } 155 }
174 156
175 GrBatchToken fLastUpload; 157 static GrBatchAtlas::AtlasID CreateId(uint32_t index, uint64_t generation) {
176 GrBatchToken fLastUse; 158 SkASSERT(index < (1 << 16));
159 SkASSERT(generation < ((uint64_t)1 << 48));
160 return generation << 16 | index;
161 }
177 162
178 uint32_t fIndex; 163 GrBatchToken fLastUpload;
179 uint64_t fGenID; 164 GrBatchToken fLastUse;
165
166 const uint32_t fIndex;
167 uint64_t fGenID;
180 GrBatchAtlas::AtlasID fID; 168 GrBatchAtlas::AtlasID fID;
181 unsigned char* fData; 169 unsigned char* fData;
182 uint32_t fWidth; 170 const int fWidth;
183 uint32_t fHeight; 171 const int fHeight;
184 uint32_t fX; 172 const int fX;
185 uint32_t fY; 173 const int fY;
186 GrTexture* fTexture; 174 GrRectanizer* fRects;
187 GrRectanizer* fRects; 175 const SkIPoint16 fOffset; // the offset of the plot in the backi ng texture
188 GrBatchAtlas* fAtlas; 176 const GrPixelConfig fConfig;
189 SkIPoint16 fOffset; // the offset of the plot in the backing texture 177 const size_t fBytesPerPixel;
190 size_t fBytesPerPixel; 178 SkIRect fDirtyRect;
191 SkIRect fDirtyRect; 179 SkDEBUGCODE(bool fDirty;)
192 SkDEBUGCODE(bool fDirty;)
193 180
194 friend class GrBatchAtlas; 181 friend class GrBatchAtlas;
195 182
196 typedef SkRefCnt INHERITED; 183 typedef SkRefCnt INHERITED;
197 }; 184 };
198 185
199 //////////////////////////////////////////////////////////////////////////////// 186 ////////////////////////////////////////////////////////////////////////////////
200 187
201 class GrPlotUploader : public GrBatchUploader { 188 class GrPlotUploader : public GrBatchUploader {
202 public: 189 public:
203 GrPlotUploader(BatchPlot* plot) 190 GrPlotUploader(BatchPlot* plot, GrTexture* texture)
204 : INHERITED(plot->lastUploadToken()) 191 : INHERITED(plot->lastUploadToken())
205 , fPlot(SkRef(plot)) { 192 , fPlot(SkRef(plot))
193 , fTexture(texture) {
206 SkASSERT(plot); 194 SkASSERT(plot);
207 } 195 }
208 196
209 void upload(TextureUploader* uploader) override { 197 void upload(TextureUploader* uploader) override {
210 fPlot->uploadToTexture(uploader); 198 fPlot->uploadToTexture(uploader, fTexture);
211 } 199 }
212 200
213 private: 201 private:
214 SkAutoTUnref<BatchPlot> fPlot; 202 SkAutoTUnref<BatchPlot> fPlot;
203 GrTexture* fTexture;
215 204
216 typedef GrBatchUploader INHERITED; 205 typedef GrBatchUploader INHERITED;
217 }; 206 };
218 207
219 /////////////////////////////////////////////////////////////////////////////// 208 ///////////////////////////////////////////////////////////////////////////////
220 209
221 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) 210 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
222 : fTexture(texture) 211 : fTexture(texture)
223 , fNumPlotsX(numPlotsX)
224 , fNumPlotsY(numPlotsY)
225 , fPlotWidth(texture->width() / numPlotsX)
226 , fPlotHeight(texture->height() / numPlotsY)
227 , fAtlasGeneration(kInvalidAtlasGeneration + 1) { 212 , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
228 SkASSERT(fNumPlotsX * fNumPlotsY <= BulkUseTokenUpdater::kMaxPlots); 213
229 SkASSERT(fPlotWidth * fNumPlotsX == static_cast<uint32_t>(texture->width())) ; 214 int plotWidth = texture->width() / numPlotsX;
230 SkASSERT(fPlotHeight * fNumPlotsY == static_cast<uint32_t>(texture->height() )); 215 int plotHeight = texture->height() / numPlotsY;
216 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
217 SkASSERT(plotWidth * numPlotsX == texture->width());
218 SkASSERT(plotHeight * numPlotsY == texture->height());
219
220 SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
231 221
232 // We currently do not support compressed atlases... 222 // We currently do not support compressed atlases...
233 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig)); 223 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
234 224
235 // set up allocated plots 225 // set up allocated plots
236 fBPP = GrBytesPerPixel(texture->desc().fConfig); 226 fPlotArray = new SkAutoTUnref<BatchPlot>[numPlotsX * numPlotsY];
237 fPlotArray = new SkAutoTUnref<BatchPlot>[(fNumPlotsX * fNumPlotsY)];
238 227
239 SkAutoTUnref<BatchPlot>* currPlot = fPlotArray; 228 SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
240 for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) { 229 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
241 for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) { 230 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
242 uint32_t id = r * fNumPlotsX + c; 231 uint32_t index = r * numPlotsX + c;
243 currPlot->reset(new BatchPlot); 232 currPlot->reset(new BatchPlot(index, 1, x, y, plotWidth, plotHeight,
244 (*currPlot)->init(this, texture, id, 1, x, y, fPlotWidth, fPlotHeigh t, fBPP); 233 texture->desc().fConfig));
245 234
246 // build LRU list 235 // build LRU list
247 fPlotList.addToHead(currPlot->get()); 236 fPlotList.addToHead(currPlot->get());
248 ++currPlot; 237 ++currPlot;
249 } 238 }
250 } 239 }
251 } 240 }
252 241
253 GrBatchAtlas::~GrBatchAtlas() { 242 GrBatchAtlas::~GrBatchAtlas() {
254 SkSafeUnref(fTexture); 243 SkSafeUnref(fTexture);
(...skipping 16 matching lines...) Expand all
271 } 260 }
272 261
273 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) { 262 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) {
274 this->makeMRU(plot); 263 this->makeMRU(plot);
275 264
276 // If our most recent upload has already occurred then we have to insert a n ew 265 // If our most recent upload has already occurred then we have to insert a n ew
277 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred. 266 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred.
278 // This new update will piggy back on that previously scheduled update. 267 // This new update will piggy back on that previously scheduled update.
279 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) { 268 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) {
280 plot->setLastUploadToken(target->asapToken()); 269 plot->setLastUploadToken(target->asapToken());
281 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot)); 270 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot, fTexture) );
282 target->upload(uploader); 271 target->upload(uploader);
283 } 272 }
284 *id = plot->id(); 273 *id = plot->id();
285 } 274 }
286 275
287 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget, 276 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
288 int width, int height, const void* image, SkIPoint 16* loc) { 277 int width, int height, const void* image, SkIPoint 16* loc) {
289 // We should already have a texture, TODO clean this up 278 // We should already have a texture, TODO clean this up
290 SkASSERT(fTexture && 279 SkASSERT(fTexture);
291 static_cast<uint32_t>(width) <= fPlotWidth &&
292 static_cast<uint32_t>(height) <= fPlotHeight);
293 280
294 // now look through all allocated plots for one we can share, in Most Recent ly Refed order 281 // now look through all allocated plots for one we can share, in Most Recent ly Refed order
295 GrBatchPlotList::Iter plotIter; 282 GrBatchPlotList::Iter plotIter;
296 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); 283 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
297 BatchPlot* plot; 284 BatchPlot* plot;
298 while ((plot = plotIter.get())) { 285 while ((plot = plotIter.get())) {
299 if (plot->addSubImage(width, height, image, loc, fBPP * width)) { 286 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
287 if (plot->addSubImage(width, height, image, loc)) {
300 this->updatePlot(batchTarget, id, plot); 288 this->updatePlot(batchTarget, id, plot);
301 return true; 289 return true;
302 } 290 }
303 plotIter.next(); 291 plotIter.next();
304 } 292 }
305 293
306 // If the above fails, then see if the least recently refed plot has already been flushed to the 294 // If the above fails, then see if the least recently refed plot has already been flushed to the
307 // gpu 295 // gpu
308 plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); 296 plot = fPlotList.tail();
309 plot = plotIter.get();
310 SkASSERT(plot); 297 SkASSERT(plot);
311 if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) { 298 if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) {
312 this->processEviction(plot->id()); 299 this->processEviction(plot->id());
313 plot->resetRects(); 300 plot->resetRects();
314 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width); 301 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
302 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
315 SkASSERT(verify); 303 SkASSERT(verify);
316 this->updatePlot(batchTarget, id, plot); 304 this->updatePlot(batchTarget, id, plot);
317 fAtlasGeneration++; 305 fAtlasGeneration++;
318 return true; 306 return true;
319 } 307 }
320 308
321 // The least recently refed plot hasn't been flushed to the gpu yet, however , if we have flushed 309 // The least recently used plot hasn't been flushed to the gpu yet, however, if we have flushed
322 // it to the batch target than we can reuse it. Our last ref token is guara nteed to be less 310 // it to the batch target than we can reuse it. Our last use token is guara nteed to be less
323 // than or equal to the current token. If its 'less than' the current token , than we can spin 311 // than or equal to the current token. If its 'less than' the current token , than we can spin
324 // off the plot(ie let the batch target manage it) and create a new plot in its place in our 312 // off the plot (ie let the batch target manage it) and create a new plot in its place in our
325 // array. If it is equal to the currentToken, then the caller has to flush draws to the batch 313 // array. If it is equal to the currentToken, then the caller has to flush draws to the batch
326 // target so we can spin off the plot 314 // target so we can spin off the plot
327 if (plot->lastUseToken() == batchTarget->currentToken()) { 315 if (plot->lastUseToken() == batchTarget->currentToken()) {
328 return false; 316 return false;
329 } 317 }
330 318
331 // We take an extra ref here so our plot isn't deleted when we reset its ind ex in the array. 319 SkASSERT(plot->lastUseToken() < batchTarget->currentToken());
332 plot->ref(); 320 SkASSERT(!batchTarget->hasTokenBeenFlushed(batchTarget->currentToken()));
333 int index = plot->index(); 321
334 int x = plot->x(); 322 SkASSERT(!plot->unique()); // The GrPlotUpdater should have a ref too
335 int y = plot->y();
336 uint64_t generation = plot->genID();
337 323
338 this->processEviction(plot->id()); 324 this->processEviction(plot->id());
339 fPlotList.remove(plot); 325 fPlotList.remove(plot);
340 SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()]; 326 SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
341 newPlot.reset(new BatchPlot); 327 newPlot.reset(plot->clone());
342 newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHe ight, fBPP);
343 328
344 fPlotList.addToHead(newPlot.get()); 329 fPlotList.addToHead(newPlot.get());
345 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc, f BPP * width); 330 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp());
331 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
346 SkASSERT(verify); 332 SkASSERT(verify);
333
347 newPlot->setLastUploadToken(batchTarget->currentToken()); 334 newPlot->setLastUploadToken(batchTarget->currentToken());
348 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot)); 335 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot, fTexture)) ;
349 batchTarget->upload(uploader); 336 batchTarget->upload(uploader);
350 *id = newPlot->id(); 337 *id = newPlot->id();
351 plot->unref(); 338
352 fAtlasGeneration++; 339 fAtlasGeneration++;
353 return true; 340 return true;
354 } 341 }
355 342
356 bool GrBatchAtlas::hasID(AtlasID id) { 343 bool GrBatchAtlas::hasID(AtlasID id) {
357 uint32_t index = GetIndexFromID(id); 344 uint32_t index = GetIndexFromID(id);
358 SkASSERT(index < fNumPlotsX * fNumPlotsY); 345 SkASSERT(index < fNumPlots);
359 return fPlotArray[index]->genID() == GetGenerationFromID(id); 346 return fPlotArray[index]->genID() == GetGenerationFromID(id);
360 } 347 }
361 348
362 void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) { 349 void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) {
363 SkASSERT(this->hasID(id)); 350 SkASSERT(this->hasID(id));
364 uint32_t index = GetIndexFromID(id); 351 uint32_t index = GetIndexFromID(id);
365 SkASSERT(index < fNumPlotsX * fNumPlotsY); 352 SkASSERT(index < fNumPlots);
366 this->makeMRU(fPlotArray[index]); 353 this->makeMRU(fPlotArray[index]);
367 fPlotArray[index]->setLastUseToken(batchToken); 354 fPlotArray[index]->setLastUseToken(batchToken);
368 } 355 }
369 356
370 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, 357 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater,
371 GrBatchToken batchToken) { 358 GrBatchToken batchToken) {
372 int count = updater.fPlotsToUpdate.count(); 359 int count = updater.fPlotsToUpdate.count();
373 for (int i = 0; i < count; i++) { 360 for (int i = 0; i < count; i++) {
374 BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]]; 361 BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
375 this->makeMRU(plot); 362 this->makeMRU(plot);
376 plot->setLastUseToken(batchToken); 363 plot->setLastUseToken(batchToken);
377 } 364 }
378 } 365 }
OLDNEW
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698