OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrBatchAtlas.h" | 8 #include "GrBatchAtlas.h" |
9 #include "GrBatchFlushState.h" | 9 #include "GrBatchFlushState.h" |
10 #include "GrRectanizer.h" | 10 #include "GrRectanizer.h" |
11 #include "GrTracing.h" | 11 #include "GrTracing.h" |
12 #include "GrVertexBuffer.h" | 12 #include "GrVertexBuffer.h" |
13 | 13 |
14 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of Bat
chPlots. | 14 //////////////////////////////////////////////////////////////////////////////// |
15 // The BatchPlots keep track of subimage placement via their GrRectanizer. A Bat
chPlot | |
16 // manages the lifetime of its data using two tokens, a last use token and a las
t upload token. | |
17 // Once a BatchPlot is "full" (i.e. there is no room for the new subimage accord
ing to the | |
18 // GrRectanizer), it can no longer be used unless the last use of the GrPlot has
already been | |
19 // flushed through to the gpu. | |
20 | 15 |
21 class BatchPlot : public SkRefCnt { | 16 GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY
, int width, |
22 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot); | 17 int height, GrPixelConfig config) |
| 18 : fLastUpload(0) |
| 19 , fLastUse(0) |
| 20 , fIndex(index) |
| 21 , fGenID(genID) |
| 22 , fID(CreateId(fIndex, fGenID)) |
| 23 , fData(nullptr) |
| 24 , fWidth(width) |
| 25 , fHeight(height) |
| 26 , fX(offX) |
| 27 , fY(offY) |
| 28 , fRects(nullptr) |
| 29 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) |
| 30 , fConfig(config) |
| 31 , fBytesPerPixel(GrBytesPerPixel(config)) |
| 32 #ifdef SK_DEBUG |
| 33 , fDirty(false) |
| 34 #endif |
| 35 { |
| 36 fDirtyRect.setEmpty(); |
| 37 } |
23 | 38 |
24 public: | 39 GrBatchAtlas::BatchPlot::~BatchPlot() { |
25 // index() is a unique id for the plot relative to the owning GrAtlas. genI
D() is a | 40 sk_free(fData); |
26 // monotonically incremented number which is bumped every time this plot is | 41 delete fRects; |
27 // evicted from the cache (i.e., there is continuity in genID() across atlas
spills). | 42 } |
28 uint32_t index() const { return fIndex; } | |
29 uint64_t genID() const { return fGenID; } | |
30 GrBatchAtlas::AtlasID id() const { | |
31 SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID); | |
32 return fID; | |
33 } | |
34 SkDEBUGCODE(size_t bpp() const { return fBytesPerPixel; }) | |
35 | 43 |
36 bool addSubImage(int width, int height, const void* image, SkIPoint16* loc)
{ | 44 bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* ima
ge, |
37 SkASSERT(width <= fWidth && height <= fHeight); | 45 SkIPoint16* loc) { |
| 46 SkASSERT(width <= fWidth && height <= fHeight); |
38 | 47 |
39 if (!fRects) { | 48 if (!fRects) { |
40 fRects = GrRectanizer::Factory(fWidth, fHeight); | 49 fRects = GrRectanizer::Factory(fWidth, fHeight); |
41 } | |
42 | |
43 if (!fRects->addRect(width, height, loc)) { | |
44 return false; | |
45 } | |
46 | |
47 if (!fData) { | |
48 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPi
xel * fWidth * | |
49 fHeight)); | |
50 } | |
51 size_t rowBytes = width * fBytesPerPixel; | |
52 const unsigned char* imagePtr = (const unsigned char*)image; | |
53 // point ourselves at the right starting spot | |
54 unsigned char* dataPtr = fData; | |
55 dataPtr += fBytesPerPixel * fWidth * loc->fY; | |
56 dataPtr += fBytesPerPixel * loc->fX; | |
57 // copy into the data buffer | |
58 for (int i = 0; i < height; ++i) { | |
59 memcpy(dataPtr, imagePtr, rowBytes); | |
60 dataPtr += fBytesPerPixel * fWidth; | |
61 imagePtr += rowBytes; | |
62 } | |
63 | |
64 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); | |
65 | |
66 loc->fX += fOffset.fX; | |
67 loc->fY += fOffset.fY; | |
68 SkDEBUGCODE(fDirty = true;) | |
69 | |
70 return true; | |
71 } | 50 } |
72 | 51 |
73 // To manage the lifetime of a plot, we use two tokens. We use the last upl
oad token to know | 52 if (!fRects->addRect(width, height, loc)) { |
74 // when we can 'piggy back' uploads, ie if the last upload hasn't been flush
ed to gpu, we don't | 53 return false; |
75 // need to issue a new upload even if we update the cpu backing store. We u
se lastUse to | |
76 // determine when we can evict a plot from the cache, ie if the last use has
already flushed | |
77 // through the gpu then we can reuse the plot. | |
78 GrBatchToken lastUploadToken() const { return fLastUpload; } | |
79 GrBatchToken lastUseToken() const { return fLastUse; } | |
80 void setLastUploadToken(GrBatchToken batchToken) { | |
81 SkASSERT(batchToken >= fLastUpload); | |
82 fLastUpload = batchToken; | |
83 } | |
84 void setLastUseToken(GrBatchToken batchToken) { | |
85 SkASSERT(batchToken >= fLastUse); | |
86 fLastUse = batchToken; | |
87 } | 54 } |
88 | 55 |
89 void uploadToTexture(GrBatchUploader::TextureUploader* uploader, GrTexture*
texture) { | 56 if (!fData) { |
90 // We should only be issuing uploads if we are in fact dirty | 57 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel
* fWidth * |
91 SkASSERT(fDirty && fData && texture); | 58 fHeight)); |
92 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload
ToTexture"); | 59 } |
93 size_t rowBytes = fBytesPerPixel * fWidth; | 60 size_t rowBytes = width * fBytesPerPixel; |
94 const unsigned char* dataPtr = fData; | 61 const unsigned char* imagePtr = (const unsigned char*)image; |
95 dataPtr += rowBytes * fDirtyRect.fTop; | 62 // point ourselves at the right starting spot |
96 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; | 63 unsigned char* dataPtr = fData; |
97 uploader->writeTexturePixels(texture, | 64 dataPtr += fBytesPerPixel * fWidth * loc->fY; |
98 fOffset.fX + fDirtyRect.fLeft, fOffset.fY +
fDirtyRect.fTop, | 65 dataPtr += fBytesPerPixel * loc->fX; |
99 fDirtyRect.width(), fDirtyRect.height(), | 66 // copy into the data buffer |
100 fConfig, dataPtr, rowBytes); | 67 for (int i = 0; i < height; ++i) { |
101 fDirtyRect.setEmpty(); | 68 memcpy(dataPtr, imagePtr, rowBytes); |
102 SkDEBUGCODE(fDirty = false;) | 69 dataPtr += fBytesPerPixel * fWidth; |
| 70 imagePtr += rowBytes; |
103 } | 71 } |
104 | 72 |
105 void resetRects() { | 73 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); |
106 if (fRects) { | |
107 fRects->reset(); | |
108 } | |
109 | 74 |
110 fGenID++; | 75 loc->fX += fOffset.fX; |
111 fID = CreateId(fIndex, fGenID); | 76 loc->fY += fOffset.fY; |
| 77 SkDEBUGCODE(fDirty = true;) |
112 | 78 |
113 // zero out the plot | 79 return true; |
114 if (fData) { | 80 } |
115 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); | |
116 } | |
117 | 81 |
118 fDirtyRect.setEmpty(); | 82 void GrBatchAtlas::BatchPlot::uploadToTexture(GrBatchUploader::TextureUploader*
uploader, |
119 SkDEBUGCODE(fDirty = false;) | 83 GrTexture* texture) { |
| 84 // We should only be issuing uploads if we are in fact dirty |
| 85 SkASSERT(fDirty && fData && texture); |
| 86 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTe
xture"); |
| 87 size_t rowBytes = fBytesPerPixel * fWidth; |
| 88 const unsigned char* dataPtr = fData; |
| 89 dataPtr += rowBytes * fDirtyRect.fTop; |
| 90 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; |
| 91 uploader->writeTexturePixels(texture, |
| 92 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDi
rtyRect.fTop, |
| 93 fDirtyRect.width(), fDirtyRect.height(), |
| 94 fConfig, dataPtr, rowBytes); |
| 95 fDirtyRect.setEmpty(); |
| 96 SkDEBUGCODE(fDirty = false;) |
| 97 } |
| 98 |
| 99 void GrBatchAtlas::BatchPlot::resetRects() { |
| 100 if (fRects) { |
| 101 fRects->reset(); |
120 } | 102 } |
121 | 103 |
122 private: | 104 fGenID++; |
123 BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int heig
ht, | 105 fID = CreateId(fIndex, fGenID); |
124 GrPixelConfig config) | 106 |
125 : fLastUpload(0) | 107 // zero out the plot |
126 , fLastUse(0) | 108 if (fData) { |
127 , fIndex(index) | 109 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); |
128 , fGenID(genID) | |
129 , fID(CreateId(fIndex, fGenID)) | |
130 , fData(nullptr) | |
131 , fWidth(width) | |
132 , fHeight(height) | |
133 , fX(offX) | |
134 , fY(offY) | |
135 , fRects(nullptr) | |
136 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) | |
137 , fConfig(config) | |
138 , fBytesPerPixel(GrBytesPerPixel(config)) | |
139 #ifdef SK_DEBUG | |
140 , fDirty(false) | |
141 #endif | |
142 { | |
143 fDirtyRect.setEmpty(); | |
144 } | 110 } |
145 | 111 |
146 ~BatchPlot() override { | 112 fDirtyRect.setEmpty(); |
147 sk_free(fData); | 113 SkDEBUGCODE(fDirty = false;) |
148 delete fRects; | 114 } |
149 } | |
150 | |
151 // Create a clone of this plot. The cloned plot will take the place of the | |
152 // current plot in the atlas. | |
153 BatchPlot* clone() const { | |
154 return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig)
; | |
155 } | |
156 | |
157 static GrBatchAtlas::AtlasID CreateId(uint32_t index, uint64_t generation) { | |
158 SkASSERT(index < (1 << 16)); | |
159 SkASSERT(generation < ((uint64_t)1 << 48)); | |
160 return generation << 16 | index; | |
161 } | |
162 | |
163 GrBatchToken fLastUpload; | |
164 GrBatchToken fLastUse; | |
165 | |
166 const uint32_t fIndex; | |
167 uint64_t fGenID; | |
168 GrBatchAtlas::AtlasID fID; | |
169 unsigned char* fData; | |
170 const int fWidth; | |
171 const int fHeight; | |
172 const int fX; | |
173 const int fY; | |
174 GrRectanizer* fRects; | |
175 const SkIPoint16 fOffset; // the offset of the plot in the backi
ng texture | |
176 const GrPixelConfig fConfig; | |
177 const size_t fBytesPerPixel; | |
178 SkIRect fDirtyRect; | |
179 SkDEBUGCODE(bool fDirty;) | |
180 | |
181 friend class GrBatchAtlas; | |
182 | |
183 typedef SkRefCnt INHERITED; | |
184 }; | |
185 | 115 |
186 //////////////////////////////////////////////////////////////////////////////// | 116 //////////////////////////////////////////////////////////////////////////////// |
187 | 117 |
188 class GrPlotUploader : public GrBatchUploader { | 118 class GrPlotUploader : public GrBatchUploader { |
189 public: | 119 public: |
190 GrPlotUploader(BatchPlot* plot, GrTexture* texture) | 120 GrPlotUploader(GrBatchAtlas::BatchPlot* plot, GrTexture* texture) |
191 : INHERITED(plot->lastUploadToken()) | 121 : INHERITED(plot->lastUploadToken()) |
192 , fPlot(SkRef(plot)) | 122 , fPlot(SkRef(plot)) |
193 , fTexture(texture) { | 123 , fTexture(texture) { |
194 SkASSERT(plot); | 124 SkASSERT(plot); |
195 } | 125 } |
196 | 126 |
197 void upload(TextureUploader* uploader) override { | 127 void upload(TextureUploader* uploader) override { |
198 fPlot->uploadToTexture(uploader, fTexture); | 128 fPlot->uploadToTexture(uploader, fTexture); |
199 } | 129 } |
200 | 130 |
201 private: | 131 private: |
202 SkAutoTUnref<BatchPlot> fPlot; | 132 SkAutoTUnref<GrBatchAtlas::BatchPlot> fPlot; |
203 GrTexture* fTexture; | 133 GrTexture* fTexture; |
204 | 134 |
205 typedef GrBatchUploader INHERITED; | 135 typedef GrBatchUploader INHERITED; |
206 }; | 136 }; |
207 | 137 |
208 /////////////////////////////////////////////////////////////////////////////// | 138 /////////////////////////////////////////////////////////////////////////////// |
209 | 139 |
210 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) | 140 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) |
211 : fTexture(texture) | 141 : fTexture(texture) |
212 , fAtlasGeneration(kInvalidAtlasGeneration + 1) { | 142 , fAtlasGeneration(kInvalidAtlasGeneration + 1) { |
213 | 143 |
(...skipping 29 matching lines...) Expand all Loading... |
243 SkSafeUnref(fTexture); | 173 SkSafeUnref(fTexture); |
244 delete[] fPlotArray; | 174 delete[] fPlotArray; |
245 } | 175 } |
246 | 176 |
247 void GrBatchAtlas::processEviction(AtlasID id) { | 177 void GrBatchAtlas::processEviction(AtlasID id) { |
248 for (int i = 0; i < fEvictionCallbacks.count(); i++) { | 178 for (int i = 0; i < fEvictionCallbacks.count(); i++) { |
249 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); | 179 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); |
250 } | 180 } |
251 } | 181 } |
252 | 182 |
253 void GrBatchAtlas::makeMRU(BatchPlot* plot) { | |
254 if (fPlotList.head() == plot) { | |
255 return; | |
256 } | |
257 | |
258 fPlotList.remove(plot); | |
259 fPlotList.addToHead(plot); | |
260 } | |
261 | |
262 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
atchPlot* plot) { | 183 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
atchPlot* plot) { |
263 this->makeMRU(plot); | 184 this->makeMRU(plot); |
264 | 185 |
265 // If our most recent upload has already occurred then we have to insert a n
ew | 186 // If our most recent upload has already occurred then we have to insert a n
ew |
266 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu
rred. | 187 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu
rred. |
267 // This new update will piggy back on that previously scheduled update. | 188 // This new update will piggy back on that previously scheduled update. |
268 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) { | 189 if (target->hasTokenBeenFlushed(plot->lastUploadToken())) { |
269 plot->setLastUploadToken(target->asapToken()); | 190 plot->setLastUploadToken(target->asapToken()); |
270 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot, fTexture)
); | 191 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot, fTexture)
); |
271 target->upload(uploader); | 192 target->upload(uploader); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
334 // Note that this plot will be uploaded inline with the draws whereas the | 255 // Note that this plot will be uploaded inline with the draws whereas the |
335 // one it displaced most likely was uploaded asap. | 256 // one it displaced most likely was uploaded asap. |
336 newPlot->setLastUploadToken(batchTarget->currentToken()); | 257 newPlot->setLastUploadToken(batchTarget->currentToken()); |
337 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot, fTexture))
; | 258 SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot, fTexture))
; |
338 batchTarget->upload(uploader); | 259 batchTarget->upload(uploader); |
339 *id = newPlot->id(); | 260 *id = newPlot->id(); |
340 | 261 |
341 fAtlasGeneration++; | 262 fAtlasGeneration++; |
342 return true; | 263 return true; |
343 } | 264 } |
344 | |
345 bool GrBatchAtlas::hasID(AtlasID id) { | |
346 uint32_t index = GetIndexFromID(id); | |
347 SkASSERT(index < fNumPlots); | |
348 return fPlotArray[index]->genID() == GetGenerationFromID(id); | |
349 } | |
350 | |
351 void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) { | |
352 SkASSERT(this->hasID(id)); | |
353 uint32_t index = GetIndexFromID(id); | |
354 SkASSERT(index < fNumPlots); | |
355 this->makeMRU(fPlotArray[index]); | |
356 fPlotArray[index]->setLastUseToken(batchToken); | |
357 } | |
358 | |
359 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, | |
360 GrBatchToken batchToken) { | |
361 int count = updater.fPlotsToUpdate.count(); | |
362 for (int i = 0; i < count; i++) { | |
363 BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]]; | |
364 this->makeMRU(plot); | |
365 plot->setLastUseToken(batchToken); | |
366 } | |
367 } | |
OLD | NEW |