Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 /* | |
| 2 * Copyright 2015 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #include "GrBatchAtlas.h" | |
| 9 #include "GrBatchTarget.h" | |
| 10 #include "GrGpu.h" | |
| 11 #include "GrRectanizer.h" | |
| 12 #include "GrTracing.h" | |
| 13 | |
| 14 // for testing | |
| 15 #define ATLAS_STATS 0 | |
| 16 #if ATLAS_STATS | |
| 17 static int g_UploadCount = 0; | |
| 18 #endif | |
| 19 | |
| 20 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) { | |
| 21 loc->fX += offset.fX; | |
| 22 loc->fY += offset.fY; | |
| 23 } | |
| 24 | |
| 25 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrB atchPlots. | |
| 26 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot | |
| 27 // manages the lifetime of its data using two tokens, a last ref toke and a last upload token. | |
| 28 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage acco rding to the | |
| 29 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been | |
| 30 // flushed through to the gpu. | |
| 31 | |
| 32 class GrBatchPlot : public SkRefCnt { | |
|
bsalomon
2015/03/11 13:34:12
Doesn't need the Gr prefix when in a cpp
joshualitt
2015/03/11 16:03:27
Acknowledged.
| |
| 33 public: | |
| 34 typedef GrDrawTarget::BatchToken BatchToken; | |
| 35 SK_DECLARE_INST_COUNT(GrBatchPlot); | |
| 36 SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrBatchPlot); | |
| 37 | |
| 38 // index() refers to the index of the plot in the owning GrAtlas's plot arra y. genID() is a | |
| 39 // monotonically incrementing number which is bumped every time the cpu back ing store is | |
| 40 // wiped, or when the plot itself is evicted from the atlas(ie, there is con tinuity in genID() | |
| 41 // across atlas spills) | |
| 42 int index() const { return fIndex; } | |
| 43 int genID() const { return fGenID; } | |
| 44 | |
| 45 GrTexture* texture() const { return fTexture; } | |
| 46 | |
| 47 bool addSubImage(int width, int height, const void* image, SkIPoint16* loc) { | |
|
bsalomon
2015/03/11 13:34:12
should this take a rowbytes? It seems like we're a
joshualitt
2015/03/11 16:03:27
might want to check I did this one right
| |
| 48 if (!fRects->addRect(width, height, loc)) { | |
| 49 return false; | |
| 50 } | |
| 51 | |
| 52 SkASSERT(fData); | |
| 53 const unsigned char* imagePtr = (const unsigned char*)image; | |
| 54 // point ourselves at the right starting spot | |
| 55 unsigned char* dataPtr = fData; | |
| 56 dataPtr += fBytesPerPixel * fWidth * loc->fY; | |
| 57 dataPtr += fBytesPerPixel * loc->fX; | |
| 58 // copy into the data buffer | |
| 59 for (int i = 0; i < height; ++i) { | |
| 60 memcpy(dataPtr, imagePtr, fBytesPerPixel * width); | |
| 61 dataPtr += fBytesPerPixel * fWidth; | |
| 62 imagePtr += fBytesPerPixel * width; | |
| 63 } | |
| 64 | |
| 65 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); | |
| 66 adjust_for_offset(loc, fOffset); | |
| 67 SkDEBUGCODE(fDirty = true;) | |
| 68 | |
| 69 #if ATLAS_STATS | |
| 70 ++g_UploadCount; | |
| 71 #endif | |
| 72 | |
| 73 return true; | |
| 74 } | |
| 75 | |
| 76 // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when | |
| 77 // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need | |
| 78 // to issue a new upload even if we update the cpu backing store. We use la stref to determine | |
| 79 // when we can evict a plot from the cache, ie if the last ref has already f lushed through | |
| 80 // the gpu then we can reuse the plot | |
| 81 BatchToken lastUploadToken() const { return fLastUpload; } | |
| 82 BatchToken lastRefToken() const { return fLastRef; } | |
| 83 void setLastUploadToken(BatchToken batchToken) { fLastUpload = batchToken; } | |
| 84 void setLastRefToken(BatchToken batchToken) { fLastRef = batchToken; } | |
| 85 | |
| 86 void uploadToTexture(GrTextureUploader uploader) { | |
| 87 // We should only be issuing uploads if we are in fact dirty | |
| 88 SkASSERT(fDirty); | |
| 89 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::upload ToTexture"); | |
| 90 SkASSERT(fTexture); | |
| 91 size_t rowBytes = fBytesPerPixel * fRects->width(); | |
| 92 const unsigned char* dataPtr = fData; | |
| 93 dataPtr += rowBytes * fDirtyRect.fTop; | |
| 94 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; | |
| 95 uploader.writeTexturePixels(fTexture, | |
| 96 fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, | |
| 97 fDirtyRect.width(), fDirtyRect.height(), | |
| 98 fTexture->config(), dataPtr, rowBytes); | |
| 99 fDirtyRect.setEmpty(); | |
| 100 SkDEBUGCODE(fDirty = false;) | |
| 101 } | |
| 102 | |
| 103 void resetRects() { | |
| 104 SkASSERT(fRects); | |
| 105 fRects->reset(); | |
| 106 fGenID++; | |
| 107 | |
| 108 // zero out the plot | |
| 109 SkASSERT(fData); | |
| 110 memset(fData, 0, fBytesPerPixel * fWidth * fHeight); | |
| 111 | |
| 112 fDirtyRect.setEmpty(); | |
| 113 SkDEBUGCODE(fDirty = false;) | |
| 114 } | |
| 115 | |
| 116 int x() const { return fX; } | |
| 117 int y() const { return fY; } | |
| 118 | |
| 119 private: | |
| 120 GrBatchPlot() | |
| 121 : fLastUpload(0) | |
| 122 , fLastRef(0) | |
| 123 , fIndex(-1) | |
| 124 , fGenID(-1) | |
| 125 , fData(NULL) | |
| 126 , fWidth(0) | |
| 127 , fHeight(0) | |
| 128 , fX(0) | |
| 129 , fY(0) | |
| 130 , fTexture(NULL) | |
| 131 , fRects(NULL) | |
| 132 , fAtlas(NULL) | |
| 133 , fBytesPerPixel(1) | |
| 134 #ifdef SK_DEBUG | |
| 135 , fDirty(false) | |
| 136 #endif | |
| 137 { | |
| 138 fOffset.set(0, 0); | |
| 139 } | |
| 140 | |
| 141 ~GrBatchPlot() { | |
| 142 SkDELETE_ARRAY(fData); | |
| 143 fData = NULL; | |
| 144 delete fRects; | |
| 145 } | |
| 146 | |
| 147 void init(GrBatchAtlas* atlas, GrTexture* texture, int id, uint32_t generati on, | |
| 148 int offX, int offY, int width, int height, size_t bpp) { | |
| 149 fIndex = id; | |
| 150 fGenID = generation; | |
| 151 fWidth = width; | |
| 152 fHeight = height; | |
| 153 fX = offX; | |
| 154 fY = offY; | |
| 155 fRects = GrRectanizer::Factory(width, height); | |
| 156 fAtlas = atlas; | |
| 157 fOffset.set(offX * width, offY * height); | |
| 158 fBytesPerPixel = bpp; | |
| 159 fData = NULL; | |
| 160 fDirtyRect.setEmpty(); | |
| 161 SkDEBUGCODE(fDirty = false;) | |
| 162 fTexture = texture; | |
| 163 | |
| 164 // allocate backing store | |
| 165 fData = SkNEW_ARRAY(unsigned char, fBytesPerPixel * width * height); | |
| 166 memset(fData, 0, fBytesPerPixel * width * height); | |
| 167 } | |
| 168 | |
| 169 BatchToken fLastUpload; | |
| 170 BatchToken fLastRef; | |
| 171 | |
| 172 uint32_t fIndex; | |
| 173 uint32_t fGenID; | |
| 174 unsigned char* fData; | |
| 175 int fWidth; | |
| 176 int fHeight; | |
| 177 int fX; | |
| 178 int fY; | |
| 179 GrTexture* fTexture; | |
| 180 GrRectanizer* fRects; | |
| 181 GrBatchAtlas* fAtlas; | |
| 182 SkIPoint16 fOffset; // the offset of the plot in the backing texture | |
| 183 size_t fBytesPerPixel; | |
| 184 SkIRect fDirtyRect; | |
| 185 SkDEBUGCODE(bool fDirty;) | |
| 186 | |
| 187 friend class GrBatchAtlas; | |
| 188 | |
| 189 typedef SkRefCnt INHERITED; | |
| 190 }; | |
| 191 | |
| 192 //////////////////////////////////////////////////////////////////////////////// | |
| 193 | |
| 194 class GrPlotUploader : public GrUploader { | |
| 195 public: | |
| 196 GrPlotUploader(GrBatchPlot* plot) | |
| 197 : INHERITED(plot->lastUploadToken()) | |
| 198 , fPlot(SkRef(plot)) { | |
| 199 SkASSERT(plot); | |
| 200 } | |
| 201 | |
| 202 void upload(GrTextureUploader uploader) SK_OVERRIDE { fPlot->uploadToTexture (uploader); } | |
| 203 | |
| 204 private: | |
| 205 SkAutoTUnref<GrBatchPlot> fPlot; | |
| 206 | |
| 207 typedef GrUploader INHERITED; | |
| 208 }; | |
| 209 | |
| 210 /////////////////////////////////////////////////////////////////////////////// | |
| 211 | |
| 212 static GrBatchAtlas::AtlasID create_id(int index, int generation) { | |
| 213 // Generation ID can roll over because we only check for equality | |
| 214 SkASSERT(index < (1 << 16)); | |
| 215 return generation << 16 | index; | |
| 216 } | |
| 217 | |
| 218 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) | |
| 219 : fTexture(texture) | |
| 220 , fNumPlotsX(numPlotsX) | |
| 221 , fNumPlotsY(numPlotsY) | |
| 222 , fPlotWidth(texture->width() / numPlotsX) | |
| 223 , fPlotHeight(texture->height() / numPlotsY) { | |
| 224 SkASSERT(fPlotWidth * fNumPlotsX == texture->width()); | |
| 225 SkASSERT(fPlotHeight * fNumPlotsY == texture->height()); | |
| 226 | |
| 227 // We currently do not support compressed atlases... | |
| 228 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig)); | |
| 229 | |
| 230 // set up allocated plots | |
| 231 fBPP = GrBytesPerPixel(texture->desc().fConfig); | |
| 232 fPlotArray = SkNEW_ARRAY(SkAutoTUnref<GrBatchPlot>, (fNumPlotsX * fNumPlotsY )); | |
| 233 | |
| 234 SkAutoTUnref<GrBatchPlot>* currPlot = fPlotArray; | |
| 235 for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) { | |
| 236 for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) { | |
| 237 int id = r * fNumPlotsX + c; | |
| 238 currPlot->reset(SkNEW(GrBatchPlot)); | |
| 239 (*currPlot)->init(this, texture, id, 0, x, y, fPlotWidth, fPlotHeigh t, fBPP); | |
| 240 | |
| 241 // build LRU list | |
| 242 fPlotList.addToHead(currPlot->get()); | |
| 243 ++currPlot; | |
| 244 } | |
| 245 } | |
| 246 } | |
| 247 | |
| 248 GrBatchAtlas::~GrBatchAtlas() { | |
| 249 SkSafeUnref(fTexture); | |
| 250 SkDELETE_ARRAY(fPlotArray); | |
| 251 | |
| 252 #if ATLAS_STATS | |
| 253 SkDebugf("Num uploads: %d\n", g_UploadCount); | |
| 254 #endif | |
| 255 } | |
| 256 | |
| 257 void GrBatchAtlas::makeMRU(GrBatchPlot* plot) { | |
| 258 if (fPlotList.head() == plot) { | |
| 259 return; | |
| 260 } | |
| 261 | |
| 262 fPlotList.remove(plot); | |
| 263 fPlotList.addToHead(plot); | |
| 264 } | |
| 265 | |
| 266 | |
| 267 inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, Gr BatchPlot* plot) { | |
| 268 this->makeMRU(plot); | |
| 269 | |
| 270 // If our most recent upload has already occurred then we have to insert a n ew | |
| 271 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred. | |
| 272 // This new update will piggy back on that previously scheduled update. | |
| 273 if (batchTarget->isIssued(plot->lastUploadToken())) { | |
| 274 plot->setLastUploadToken(batchTarget->asapToken()); | |
| 275 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)) ); | |
| 276 batchTarget->upload(uploader); | |
| 277 } | |
| 278 *id = create_id(plot->index(), plot->genID()); | |
| 279 } | |
| 280 | |
| 281 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget, | |
| 282 int width, int height, const void* image, SkIPoint 16* loc) { | |
| 283 // We should already have a texture, TODO clean this up | |
| 284 SkASSERT(fTexture && width < fPlotWidth && height < fPlotHeight); | |
| 285 | |
| 286 // now look through all allocated plots for one we can share, in Most Recent ly Refed order | |
| 287 GrBatchPlotList::Iter plotIter; | |
| 288 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); | |
| 289 GrBatchPlot* plot; | |
| 290 while ((plot = plotIter.get())) { | |
| 291 if (plot->addSubImage(width, height, image, loc)) { | |
| 292 this->updatePlot(batchTarget, id, plot); | |
| 293 return true; | |
| 294 } | |
| 295 plotIter.next(); | |
| 296 } | |
| 297 | |
| 298 // If the above fails, then see if the least recently refed plot has already been flushed to the | |
| 299 // gpu | |
| 300 plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); | |
| 301 plot = plotIter.get(); | |
| 302 SkASSERT(plot); | |
| 303 if (batchTarget->isIssued(plot->lastRefToken())) { | |
| 304 plot->resetRects(); | |
| 305 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc); | |
| 306 SkASSERT(verify); | |
| 307 this->updatePlot(batchTarget, id, plot); | |
| 308 return true; | |
| 309 } | |
| 310 | |
| 311 // The least recently refed plot hasn't been flushed to the gpu yet, however , if we have flushed | |
| 312 // it to the batch target than we can reuse it. Our last ref token is guara nteed to be less | |
| 313 // than or equal to the current token. If its 'less than' the current token , than we can spin | |
| 314 // off the plot(ie let the batch target manage it) and create a new plot in its place in our | |
| 315 // array. If it is equal to the currentToken, then the caller has to flush draws to the batch | |
| 316 // target so we can spin off the plot | |
| 317 if (plot->lastRefToken() == batchTarget->currentToken()) { | |
| 318 return false; | |
| 319 } | |
| 320 | |
| 321 // We take an extra ref here so our plot isn't deleted when we reset its ind ex in the array. | |
| 322 plot->ref(); | |
| 323 int index = plot->index(); | |
| 324 int x = plot->x(); | |
| 325 int y = plot->y(); | |
| 326 int generation = plot->genID(); | |
| 327 | |
| 328 fPlotList.remove(plot); | |
| 329 SkAutoTUnref<GrBatchPlot>& newPlot = fPlotArray[plot->index()]; | |
| 330 newPlot.reset(SkNEW(GrBatchPlot)); | |
| 331 newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHe ight, fBPP); | |
| 332 | |
| 333 fPlotList.addToHead(newPlot.get()); | |
| 334 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); | |
| 335 SkASSERT(verify); | |
| 336 newPlot->setLastUploadToken(batchTarget->currentToken()); | |
| 337 SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (newPlot))) ; | |
| 338 batchTarget->upload(uploader); | |
| 339 *id = create_id(newPlot->index(), newPlot->genID()); | |
| 340 plot->unref(); | |
| 341 return true; | |
| 342 } | |
| 343 | |
| 344 bool GrBatchAtlas::hasID(AtlasID id) { | |
| 345 int index = this->getIndexFromID(id); | |
|
bsalomon
2015/03/11 13:34:12
this is much clearer.
| |
| 346 SkASSERT(index < fNumPlotsX * fNumPlotsY); | |
| 347 return fPlotArray[index]->genID() == this->getGenerationFromID(id); | |
| 348 } | |
| 349 | |
| 350 void GrBatchAtlas::setLastRefToken(AtlasID id, BatchToken batchToken) { | |
| 351 SkASSERT(this->hasID(id)); | |
| 352 int index = this->getIndexFromID(id); | |
| 353 this->makeMRU(fPlotArray[index]); | |
| 354 fPlotArray[index]->setLastRefToken(batchToken); | |
| 355 } | |
| 356 | |
| 357 #ifdef SK_DEBUG | |
| 358 void GrBatchAtlas::uploadPlotsToTexture(GrTextureUploader uploader) { | |
| 359 GrBatchPlotList::Iter plotIter; | |
| 360 plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); | |
| 361 GrBatchPlot* plot; | |
| 362 while ((plot = plotIter.get())) { | |
| 363 plot->uploadToTexture(uploader); | |
| 364 plotIter.next(); | |
| 365 } | |
| 366 } | |
| 367 #endif | |
| OLD | NEW |