OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrBatchAtlas.h" |
| 9 #include "GrBatchTarget.h" |
| 10 #include "GrGpu.h" |
| 11 #include "GrRectanizer.h" |
| 12 #include "GrTracing.h" |
| 13 |
| 14 // for testing |
| 15 #define ATLAS_STATS 0 |
| 16 #if ATLAS_STATS |
| 17 static int g_UploadCount = 0; |
| 18 #endif |
| 19 |
| 20 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset)
{ |
| 21 loc->fX += offset.fX; |
| 22 loc->fY += offset.fY; |
| 23 } |
| 24 |
| 25 static GrBatchAtlas::AtlasID create_id(int index, int generation) { |
| 26 // Generation ID can roll over because we only check for equality |
| 27 SkASSERT(index < (1 << 16)); |
| 28 return generation << 16 | index; |
| 29 } |
| 30 |
| 31 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrB
atchPlots. |
| 32 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In
turn, a GrBatchPlot |
| 33 // manages the lifetime of its data using two tokens, a last ref toke and a last
upload token. |
| 34 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage acco
rding to the |
| 35 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has
already been |
| 36 // flushed through to the gpu. |
| 37 |
| 38 class BatchPlot : public SkRefCnt { |
| 39 public: |
| 40 typedef GrBatchAtlas::BatchToken BatchToken; |
| 41 SK_DECLARE_INST_COUNT(BatchPlot); |
| 42 SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot); |
| 43 |
| 44 // index() refers to the index of the plot in the owning GrAtlas's plot arra
y. genID() is a |
| 45 // monotonically incrementing number which is bumped every time the cpu back
ing store is |
| 46 // wiped, or when the plot itself is evicted from the atlas(ie, there is con
tinuity in genID() |
| 47 // across atlas spills) |
| 48 int index() const { return fIndex; } |
| 49 int genID() const { return fGenID; } |
| 50 GrBatchAtlas::AtlasID id() { return fID; } |
| 51 |
| 52 GrTexture* texture() const { return fTexture; } |
| 53 |
| 54 bool addSubImage(int width, int height, const void* image, SkIPoint16* loc,
size_t rowBytes) { |
| 55 if (!fRects->addRect(width, height, loc)) { |
| 56 return false; |
| 57 } |
| 58 |
| 59 SkASSERT(fData); |
| 60 const unsigned char* imagePtr = (const unsigned char*)image; |
| 61 // point ourselves at the right starting spot |
| 62 unsigned char* dataPtr = fData; |
| 63 dataPtr += fBytesPerPixel * fWidth * loc->fY; |
| 64 dataPtr += fBytesPerPixel * loc->fX; |
| 65 // copy into the data buffer |
| 66 for (int i = 0; i < height; ++i) { |
| 67 memcpy(dataPtr, imagePtr, rowBytes); |
| 68 dataPtr += fBytesPerPixel * fWidth; |
| 69 imagePtr += rowBytes; |
| 70 } |
| 71 |
| 72 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); |
| 73 adjust_for_offset(loc, fOffset); |
| 74 SkDEBUGCODE(fDirty = true;) |
| 75 |
| 76 #if ATLAS_STATS |
| 77 ++g_UploadCount; |
| 78 #endif |
| 79 |
| 80 return true; |
| 81 } |
| 82 |
| 83 // to manage the lifetime of a plot, we use two tokens. We use last upload
token to know when |
| 84 // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to
gpu, we don't need |
| 85 // to issue a new upload even if we update the cpu backing store. We use la
stref to determine |
| 86 // when we can evict a plot from the cache, ie if the last ref has already f
lushed through |
| 87 // the gpu then we can reuse the plot |
| 88 BatchToken lastUploadToken() const { return fLastUpload; } |
| 89 BatchToken lastRefToken() const { return fLastRef; } |
| 90 void setLastUploadToken(BatchToken batchToken) { fLastUpload = batchToken; } |
| 91 void setLastRefToken(BatchToken batchToken) { fLastRef = batchToken; } |
| 92 |
| 93 void uploadToTexture(GrBatchTarget::TextureUploader uploader) { |
| 94 // We should only be issuing uploads if we are in fact dirty |
| 95 SkASSERT(fDirty); |
| 96 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload
ToTexture"); |
| 97 SkASSERT(fTexture); |
| 98 size_t rowBytes = fBytesPerPixel * fRects->width(); |
| 99 const unsigned char* dataPtr = fData; |
| 100 dataPtr += rowBytes * fDirtyRect.fTop; |
| 101 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; |
| 102 uploader.writeTexturePixels(fTexture, |
| 103 fOffset.fX + fDirtyRect.fLeft, fOffset.fY +
fDirtyRect.fTop, |
| 104 fDirtyRect.width(), fDirtyRect.height(), |
| 105 fTexture->config(), dataPtr, rowBytes); |
| 106 fDirtyRect.setEmpty(); |
| 107 SkDEBUGCODE(fDirty = false;) |
| 108 } |
| 109 |
| 110 void resetRects() { |
| 111 SkASSERT(fRects); |
| 112 fRects->reset(); |
| 113 fGenID++; |
| 114 fID = create_id(fIndex, fGenID); |
| 115 |
| 116 // zero out the plot |
| 117 SkASSERT(fData); |
| 118 memset(fData, 0, fBytesPerPixel * fWidth * fHeight); |
| 119 |
| 120 fDirtyRect.setEmpty(); |
| 121 SkDEBUGCODE(fDirty = false;) |
| 122 } |
| 123 |
| 124 int x() const { return fX; } |
| 125 int y() const { return fY; } |
| 126 |
| 127 private: |
| 128 BatchPlot() |
| 129 : fLastUpload(0) |
| 130 , fLastRef(0) |
| 131 , fIndex(-1) |
| 132 , fGenID(-1) |
| 133 , fID(0) |
| 134 , fData(NULL) |
| 135 , fWidth(0) |
| 136 , fHeight(0) |
| 137 , fX(0) |
| 138 , fY(0) |
| 139 , fTexture(NULL) |
| 140 , fRects(NULL) |
| 141 , fAtlas(NULL) |
| 142 , fBytesPerPixel(1) |
| 143 #ifdef SK_DEBUG |
| 144 , fDirty(false) |
| 145 #endif |
| 146 { |
| 147 fOffset.set(0, 0); |
| 148 } |
| 149 |
| 150 ~BatchPlot() { |
| 151 SkDELETE_ARRAY(fData); |
| 152 fData = NULL; |
| 153 delete fRects; |
| 154 } |
| 155 |
| 156 void init(GrBatchAtlas* atlas, GrTexture* texture, int index, uint32_t gener
ation, |
| 157 int offX, int offY, int width, int height, size_t bpp) { |
| 158 fIndex = index; |
| 159 fGenID = generation; |
| 160 fID = create_id(index, generation); |
| 161 fWidth = width; |
| 162 fHeight = height; |
| 163 fX = offX; |
| 164 fY = offY; |
| 165 fRects = GrRectanizer::Factory(width, height); |
| 166 fAtlas = atlas; |
| 167 fOffset.set(offX * width, offY * height); |
| 168 fBytesPerPixel = bpp; |
| 169 fData = NULL; |
| 170 fDirtyRect.setEmpty(); |
| 171 SkDEBUGCODE(fDirty = false;) |
| 172 fTexture = texture; |
| 173 |
| 174 // allocate backing store |
| 175 fData = SkNEW_ARRAY(unsigned char, fBytesPerPixel * width * height); |
| 176 memset(fData, 0, fBytesPerPixel * width * height); |
| 177 } |
| 178 |
| 179 BatchToken fLastUpload; |
| 180 BatchToken fLastRef; |
| 181 |
| 182 uint32_t fIndex; |
| 183 uint32_t fGenID; |
| 184 GrBatchAtlas::AtlasID fID; |
| 185 unsigned char* fData; |
| 186 int fWidth; |
| 187 int fHeight; |
| 188 int fX; |
| 189 int fY; |
| 190 GrTexture* fTexture; |
| 191 GrRectanizer* fRects; |
| 192 GrBatchAtlas* fAtlas; |
| 193 SkIPoint16 fOffset; // the offset of the plot in the backing texture |
| 194 size_t fBytesPerPixel; |
| 195 SkIRect fDirtyRect; |
| 196 SkDEBUGCODE(bool fDirty;) |
| 197 |
| 198 friend class GrBatchAtlas; |
| 199 |
| 200 typedef SkRefCnt INHERITED; |
| 201 }; |
| 202 |
| 203 //////////////////////////////////////////////////////////////////////////////// |
| 204 |
| 205 class GrPlotUploader : public GrBatchTarget::Uploader { |
| 206 public: |
| 207 GrPlotUploader(BatchPlot* plot) |
| 208 : INHERITED(plot->lastUploadToken()) |
| 209 , fPlot(SkRef(plot)) { |
| 210 SkASSERT(plot); |
| 211 } |
| 212 |
| 213 void upload(GrBatchTarget::TextureUploader uploader) SK_OVERRIDE { |
| 214 fPlot->uploadToTexture(uploader); |
| 215 } |
| 216 |
| 217 private: |
| 218 SkAutoTUnref<BatchPlot> fPlot; |
| 219 |
| 220 typedef GrBatchTarget::Uploader INHERITED; |
| 221 }; |
| 222 |
| 223 /////////////////////////////////////////////////////////////////////////////// |
| 224 |
| 225 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) |
| 226 : fTexture(texture) |
| 227 , fNumPlotsX(numPlotsX) |
| 228 , fNumPlotsY(numPlotsY) |
| 229 , fPlotWidth(texture->width() / numPlotsX) |
| 230 , fPlotHeight(texture->height() / numPlotsY) { |
| 231 SkASSERT(fPlotWidth * fNumPlotsX == texture->width()); |
| 232 SkASSERT(fPlotHeight * fNumPlotsY == texture->height()); |
| 233 |
| 234 // We currently do not support compressed atlases... |
| 235 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig)); |
| 236 |
| 237 // set up allocated plots |
| 238 fBPP = GrBytesPerPixel(texture->desc().fConfig); |
| 239 fPlotArray = SkNEW_ARRAY(SkAutoTUnref<BatchPlot>, (fNumPlotsX * fNumPlotsY))
; |
| 240 |
| 241 SkAutoTUnref<BatchPlot>* currPlot = fPlotArray; |
| 242 for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) { |
| 243 for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) { |
| 244 int id = r * fNumPlotsX + c; |
| 245 currPlot->reset(SkNEW(BatchPlot)); |
| 246 (*currPlot)->init(this, texture, id, 0, x, y, fPlotWidth, fPlotHeigh
t, fBPP); |
| 247 |
| 248 // build LRU list |
| 249 fPlotList.addToHead(currPlot->get()); |
| 250 ++currPlot; |
| 251 } |
| 252 } |
| 253 } |
| 254 |
| 255 GrBatchAtlas::~GrBatchAtlas() { |
| 256 SkSafeUnref(fTexture); |
| 257 SkDELETE_ARRAY(fPlotArray); |
| 258 |
| 259 #if ATLAS_STATS |
| 260 SkDebugf("Num uploads: %d\n", g_UploadCount); |
| 261 #endif |
| 262 } |
| 263 |
| 264 void GrBatchAtlas::processEviction(AtlasID id) { |
| 265 for (int i = 0; i < fEvictionCallbacks.count(); i++) { |
| 266 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); |
| 267 } |
| 268 } |
| 269 |
| 270 void GrBatchAtlas::makeMRU(BatchPlot* plot) { |
| 271 if (fPlotList.head() == plot) { |
| 272 return; |
| 273 } |
| 274 |
| 275 fPlotList.remove(plot); |
| 276 fPlotList.addToHead(plot); |
| 277 } |
| 278 |
| 279 inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, Ba
tchPlot* plot) { |
| 280 this->makeMRU(plot); |
| 281 |
| 282 // If our most recent upload has already occurred then we have to insert a n
ew |
| 283 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu
rred. |
| 284 // This new update will piggy back on that previously scheduled update. |
| 285 if (batchTarget->isIssued(plot->lastUploadToken())) { |
| 286 plot->setLastUploadToken(batchTarget->asapToken()); |
| 287 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot))
); |
| 288 batchTarget->upload(uploader); |
| 289 } |
| 290 *id = plot->id(); |
| 291 } |
| 292 |
| 293 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget, |
| 294 int width, int height, const void* image, SkIPoint
16* loc) { |
| 295 // We should already have a texture, TODO clean this up |
| 296 SkASSERT(fTexture && width < fPlotWidth && height < fPlotHeight); |
| 297 |
| 298 // now look through all allocated plots for one we can share, in Most Recent
ly Refed order |
| 299 GrBatchPlotList::Iter plotIter; |
| 300 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); |
| 301 BatchPlot* plot; |
| 302 while ((plot = plotIter.get())) { |
| 303 if (plot->addSubImage(width, height, image, loc, fBPP * width)) { |
| 304 this->updatePlot(batchTarget, id, plot); |
| 305 return true; |
| 306 } |
| 307 plotIter.next(); |
| 308 } |
| 309 |
| 310 // If the above fails, then see if the least recently refed plot has already
been flushed to the |
| 311 // gpu |
| 312 plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); |
| 313 plot = plotIter.get(); |
| 314 SkASSERT(plot); |
| 315 if (batchTarget->isIssued(plot->lastRefToken())) { |
| 316 this->processEviction(plot->id()); |
| 317 plot->resetRects(); |
| 318 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc,
fBPP * width); |
| 319 SkASSERT(verify); |
| 320 this->updatePlot(batchTarget, id, plot); |
| 321 return true; |
| 322 } |
| 323 |
| 324 // The least recently refed plot hasn't been flushed to the gpu yet, however
, if we have flushed |
| 325 // it to the batch target than we can reuse it. Our last ref token is guara
nteed to be less |
| 326 // than or equal to the current token. If its 'less than' the current token
, than we can spin |
| 327 // off the plot(ie let the batch target manage it) and create a new plot in
its place in our |
| 328 // array. If it is equal to the currentToken, then the caller has to flush
draws to the batch |
| 329 // target so we can spin off the plot |
| 330 if (plot->lastRefToken() == batchTarget->currentToken()) { |
| 331 return false; |
| 332 } |
| 333 |
| 334 // We take an extra ref here so our plot isn't deleted when we reset its ind
ex in the array. |
| 335 plot->ref(); |
| 336 int index = plot->index(); |
| 337 int x = plot->x(); |
| 338 int y = plot->y(); |
| 339 int generation = plot->genID(); |
| 340 |
| 341 this->processEviction(plot->id()); |
| 342 fPlotList.remove(plot); |
| 343 SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()]; |
| 344 newPlot.reset(SkNEW(BatchPlot)); |
| 345 newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHe
ight, fBPP); |
| 346 |
| 347 fPlotList.addToHead(newPlot.get()); |
| 348 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc, f
BPP * width); |
| 349 SkASSERT(verify); |
| 350 newPlot->setLastUploadToken(batchTarget->currentToken()); |
| 351 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (newPlot)))
; |
| 352 batchTarget->upload(uploader); |
| 353 *id = newPlot->id(); |
| 354 plot->unref(); |
| 355 return true; |
| 356 } |
| 357 |
| 358 bool GrBatchAtlas::hasID(AtlasID id) { |
| 359 int index = this->getIndexFromID(id); |
| 360 SkASSERT(index < fNumPlotsX * fNumPlotsY); |
| 361 return fPlotArray[index]->genID() == this->getGenerationFromID(id); |
| 362 } |
| 363 |
| 364 void GrBatchAtlas::setLastRefToken(AtlasID id, BatchToken batchToken) { |
| 365 SkASSERT(this->hasID(id)); |
| 366 int index = this->getIndexFromID(id); |
| 367 this->makeMRU(fPlotArray[index]); |
| 368 fPlotArray[index]->setLastRefToken(batchToken); |
| 369 } |
OLD | NEW |