| Index: src/gpu/GrBatchAtlas.cpp
|
| diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
|
| index ebd0907841ce3bb1f17694b5315f30aace5ad420..8f866e72eaaae436b546e085bee0f8e1c176c265 100644
|
| --- a/src/gpu/GrBatchAtlas.cpp
|
| +++ b/src/gpu/GrBatchAtlas.cpp
|
| @@ -11,42 +11,35 @@
|
| #include "GrTracing.h"
|
| #include "GrVertexBuffer.h"
|
|
|
| -static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) {
|
| - loc->fX += offset.fX;
|
| - loc->fY += offset.fY;
|
| -}
|
| -
|
| -static GrBatchAtlas::AtlasID create_id(uint32_t index, uint64_t generation) {
|
| - SkASSERT(index < (1 << 16));
|
| - SkASSERT(generation < ((uint64_t)1 << 48));
|
| - return generation << 16 | index;
|
| -}
|
| -
|
| -// The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrBatchPlots.
|
| -// The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot
|
| -// manages the lifetime of its data using two tokens, a last ref toke and a last upload token.
|
| -// Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage according to the
|
| -// GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been
|
| +// The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of BatchPlots.
|
| +// The BatchPlots keep track of subimage placement via their GrRectanizer. A BatchPlot
|
| +// manages the lifetime of its data using two tokens, a last use token and a last upload token.
|
| +// Once a BatchPlot is "full" (i.e. there is no room for the new subimage according to the
|
| +// GrRectanizer), it can no longer be used unless the last use of the GrPlot has already been
|
| // flushed through to the gpu.
|
|
|
| class BatchPlot : public SkRefCnt {
|
| -public:
|
| SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
|
|
|
| - // index() refers to the index of the plot in the owning GrAtlas's plot array. genID() is a
|
| - // monotonically incrementing number which is bumped every time the cpu backing store is
|
| - // wiped, or when the plot itself is evicted from the atlas(ie, there is continuity in genID()
|
| - // across atlas spills)
|
| +public:
|
| + // index() is a unique id for the plot relative to the owning GrAtlas. genID() is a
|
| + // monotonically incremented number which is bumped every time this plot is
|
| + // evicted from the cache (i.e., there is continuity in genID() across atlas spills).
|
| uint32_t index() const { return fIndex; }
|
| uint64_t genID() const { return fGenID; }
|
| - GrBatchAtlas::AtlasID id() {
|
| + GrBatchAtlas::AtlasID id() const {
|
| SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID);
|
| return fID;
|
| }
|
| + SkDEBUGCODE(size_t bpp() const { return fBytesPerPixel; })
|
|
|
| - GrTexture* texture() const { return fTexture; }
|
| + bool addSubImage(int width, int height, const void* image, SkIPoint16* loc) {
|
| + SkASSERT(width <= fWidth && height <= fHeight);
|
| +
|
| + if (!fRects) {
|
| + fRects = GrRectanizer::Factory(fWidth, fHeight);
|
| + }
|
|
|
| - bool addSubImage(int width, int height, const void* image, SkIPoint16* loc, size_t rowBytes) {
|
| if (!fRects->addRect(width, height, loc)) {
|
| return false;
|
| }
|
| @@ -55,6 +48,7 @@ public:
|
| fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
|
| fHeight));
|
| }
|
| + size_t rowBytes = width * fBytesPerPixel;
|
| const unsigned char* imagePtr = (const unsigned char*)image;
|
| // point ourselves at the right starting spot
|
| unsigned char* dataPtr = fData;
|
| @@ -68,17 +62,19 @@ public:
|
| }
|
|
|
| fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
|
| - adjust_for_offset(loc, fOffset);
|
| +
|
| + loc->fX += fOffset.fX;
|
| + loc->fY += fOffset.fY;
|
| SkDEBUGCODE(fDirty = true;)
|
|
|
| return true;
|
| }
|
|
|
| - // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when
|
| - // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need
|
| - // to issue a new upload even if we update the cpu backing store. We use lastref to determine
|
| - // when we can evict a plot from the cache, ie if the last ref has already flushed through
|
| - // the gpu then we can reuse the plot
|
| + // To manage the lifetime of a plot, we use two tokens. We use the last upload token to know
|
| + // when we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't
|
| + // need to issue a new upload even if we update the cpu backing store. We use lastUse to
|
| + // determine when we can evict a plot from the cache, ie if the last use has already flushed
|
| + // through the gpu then we can reuse the plot.
|
| GrBatchToken lastUploadToken() const { return fLastUpload; }
|
| GrBatchToken lastUseToken() const { return fLastUse; }
|
| void setLastUploadToken(GrBatchToken batchToken) {
|
| @@ -90,27 +86,29 @@ public:
|
| fLastUse = batchToken;
|
| }
|
|
|
| - void uploadToTexture(GrBatchUploader::TextureUploader* uploader) {
|
| + void uploadToTexture(GrBatchUploader::TextureUploader* uploader, GrTexture* texture) {
|
| // We should only be issuing uploads if we are in fact dirty
|
| - SkASSERT(fDirty && fData && fTexture);
|
| + SkASSERT(fDirty && fData && texture);
|
| TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
|
| - size_t rowBytes = fBytesPerPixel * fRects->width();
|
| + size_t rowBytes = fBytesPerPixel * fWidth;
|
| const unsigned char* dataPtr = fData;
|
| dataPtr += rowBytes * fDirtyRect.fTop;
|
| dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
|
| - uploader->writeTexturePixels(fTexture,
|
| + uploader->writeTexturePixels(texture,
|
| fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
|
| fDirtyRect.width(), fDirtyRect.height(),
|
| - fTexture->config(), dataPtr, rowBytes);
|
| + fConfig, dataPtr, rowBytes);
|
| fDirtyRect.setEmpty();
|
| SkDEBUGCODE(fDirty = false;)
|
| }
|
|
|
| void resetRects() {
|
| - SkASSERT(fRects);
|
| - fRects->reset();
|
| + if (fRects) {
|
| + fRects->reset();
|
| + }
|
| +
|
| fGenID++;
|
| - fID = create_id(fIndex, fGenID);
|
| + fID = CreateId(fIndex, fGenID);
|
|
|
| // zero out the plot
|
| if (fData) {
|
| @@ -121,75 +119,64 @@ public:
|
| SkDEBUGCODE(fDirty = false;)
|
| }
|
|
|
| - uint32_t x() const { return fX; }
|
| - uint32_t y() const { return fY; }
|
| -
|
| private:
|
| - BatchPlot()
|
| + BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int height,
|
| + GrPixelConfig config)
|
| : fLastUpload(0)
|
| , fLastUse(0)
|
| - , fIndex(-1)
|
| - , fGenID(-1)
|
| - , fID(0)
|
| + , fIndex(index)
|
| + , fGenID(genID)
|
| + , fID(CreateId(fIndex, fGenID))
|
| , fData(nullptr)
|
| - , fWidth(0)
|
| - , fHeight(0)
|
| - , fX(0)
|
| - , fY(0)
|
| - , fTexture(nullptr)
|
| + , fWidth(width)
|
| + , fHeight(height)
|
| + , fX(offX)
|
| + , fY(offY)
|
| , fRects(nullptr)
|
| - , fAtlas(nullptr)
|
| - , fBytesPerPixel(1)
|
| + , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
|
| + , fConfig(config)
|
| + , fBytesPerPixel(GrBytesPerPixel(config))
|
| #ifdef SK_DEBUG
|
| , fDirty(false)
|
| #endif
|
| {
|
| - fOffset.set(0, 0);
|
| + fDirtyRect.setEmpty();
|
| }
|
|
|
| - ~BatchPlot() {
|
| + ~BatchPlot() override {
|
| sk_free(fData);
|
| - fData = nullptr;
|
| delete fRects;
|
| }
|
|
|
| - void init(GrBatchAtlas* atlas, GrTexture* texture, int index, uint64_t generation,
|
| - int offX, int offY, int width, int height, size_t bpp) {
|
| - fIndex = index;
|
| - fGenID = generation;
|
| - fID = create_id(index, generation);
|
| - fWidth = width;
|
| - fHeight = height;
|
| - fX = offX;
|
| - fY = offY;
|
| - fRects = GrRectanizer::Factory(width, height);
|
| - fAtlas = atlas;
|
| - fOffset.set(offX * width, offY * height);
|
| - fBytesPerPixel = bpp;
|
| - fData = nullptr;
|
| - fDirtyRect.setEmpty();
|
| - SkDEBUGCODE(fDirty = false;)
|
| - fTexture = texture;
|
| + // Create a clone of this plot. The cloned plot will take the place of the
|
| + // current plot in the atlas.
|
| + BatchPlot* clone() const {
|
| + return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig);
|
| + }
|
| +
|
| + static GrBatchAtlas::AtlasID CreateId(uint32_t index, uint64_t generation) {
|
| + SkASSERT(index < (1 << 16));
|
| + SkASSERT(generation < ((uint64_t)1 << 48));
|
| + return generation << 16 | index;
|
| }
|
|
|
| - GrBatchToken fLastUpload;
|
| - GrBatchToken fLastUse;
|
| + GrBatchToken fLastUpload;
|
| + GrBatchToken fLastUse;
|
|
|
| - uint32_t fIndex;
|
| - uint64_t fGenID;
|
| + const uint32_t fIndex;
|
| + uint64_t fGenID;
|
| GrBatchAtlas::AtlasID fID;
|
| - unsigned char* fData;
|
| - uint32_t fWidth;
|
| - uint32_t fHeight;
|
| - uint32_t fX;
|
| - uint32_t fY;
|
| - GrTexture* fTexture;
|
| - GrRectanizer* fRects;
|
| - GrBatchAtlas* fAtlas;
|
| - SkIPoint16 fOffset; // the offset of the plot in the backing texture
|
| - size_t fBytesPerPixel;
|
| - SkIRect fDirtyRect;
|
| - SkDEBUGCODE(bool fDirty;)
|
| + unsigned char* fData;
|
| + const int fWidth;
|
| + const int fHeight;
|
| + const int fX;
|
| + const int fY;
|
| + GrRectanizer* fRects;
|
| + const SkIPoint16 fOffset; // the offset of the plot in the backing texture
|
| + const GrPixelConfig fConfig;
|
| + const size_t fBytesPerPixel;
|
| + SkIRect fDirtyRect;
|
| + SkDEBUGCODE(bool fDirty;)
|
|
|
| friend class GrBatchAtlas;
|
|
|
| @@ -200,18 +187,20 @@ private:
|
|
|
| class GrPlotUploader : public GrBatchUploader {
|
| public:
|
| - GrPlotUploader(BatchPlot* plot)
|
| + GrPlotUploader(BatchPlot* plot, GrTexture* texture)
|
| : INHERITED(plot->lastUploadToken())
|
| - , fPlot(SkRef(plot)) {
|
| + , fPlot(SkRef(plot))
|
| + , fTexture(texture) {
|
| SkASSERT(plot);
|
| }
|
|
|
| void upload(TextureUploader* uploader) override {
|
| - fPlot->uploadToTexture(uploader);
|
| + fPlot->uploadToTexture(uploader, fTexture);
|
| }
|
|
|
| private:
|
| SkAutoTUnref<BatchPlot> fPlot;
|
| + GrTexture* fTexture;
|
|
|
| typedef GrBatchUploader INHERITED;
|
| };
|
| @@ -220,28 +209,28 @@ private:
|
|
|
| GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
|
| : fTexture(texture)
|
| - , fNumPlotsX(numPlotsX)
|
| - , fNumPlotsY(numPlotsY)
|
| - , fPlotWidth(texture->width() / numPlotsX)
|
| - , fPlotHeight(texture->height() / numPlotsY)
|
| , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
|
| - SkASSERT(fNumPlotsX * fNumPlotsY <= BulkUseTokenUpdater::kMaxPlots);
|
| - SkASSERT(fPlotWidth * fNumPlotsX == static_cast<uint32_t>(texture->width()));
|
| - SkASSERT(fPlotHeight * fNumPlotsY == static_cast<uint32_t>(texture->height()));
|
| +
|
| + int plotWidth = texture->width() / numPlotsX;
|
| + int plotHeight = texture->height() / numPlotsY;
|
| + SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
|
| + SkASSERT(plotWidth * numPlotsX == texture->width());
|
| + SkASSERT(plotHeight * numPlotsY == texture->height());
|
| +
|
| + SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
|
|
|
| // We currently do not support compressed atlases...
|
| SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
|
|
|
| // set up allocated plots
|
| - fBPP = GrBytesPerPixel(texture->desc().fConfig);
|
| - fPlotArray = new SkAutoTUnref<BatchPlot>[(fNumPlotsX * fNumPlotsY)];
|
| + fPlotArray = new SkAutoTUnref<BatchPlot>[numPlotsX * numPlotsY];
|
|
|
| SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
|
| - for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) {
|
| - for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) {
|
| - uint32_t id = r * fNumPlotsX + c;
|
| - currPlot->reset(new BatchPlot);
|
| - (*currPlot)->init(this, texture, id, 1, x, y, fPlotWidth, fPlotHeight, fBPP);
|
| + for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
|
| + for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
|
| + uint32_t index = r * numPlotsX + c;
|
| + currPlot->reset(new BatchPlot(index, 1, x, y, plotWidth, plotHeight,
|
| + texture->desc().fConfig));
|
|
|
| // build LRU list
|
| fPlotList.addToHead(currPlot->get());
|
| @@ -278,7 +267,7 @@ inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
|
| // This new update will piggy back on that previously scheduled update.
|
| if (target->hasTokenBeenFlushed(plot->lastUploadToken())) {
|
| plot->setLastUploadToken(target->asapToken());
|
| - SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot));
|
| + SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(plot, fTexture));
|
| target->upload(uploader);
|
| }
|
| *id = plot->id();
|
| @@ -287,16 +276,15 @@ inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
|
| bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
|
| int width, int height, const void* image, SkIPoint16* loc) {
|
| // We should already have a texture, TODO clean this up
|
| - SkASSERT(fTexture &&
|
| - static_cast<uint32_t>(width) <= fPlotWidth &&
|
| - static_cast<uint32_t>(height) <= fPlotHeight);
|
| + SkASSERT(fTexture);
|
|
|
| // now look through all allocated plots for one we can share, in Most Recently Refed order
|
| GrBatchPlotList::Iter plotIter;
|
| plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
|
| BatchPlot* plot;
|
| while ((plot = plotIter.get())) {
|
| - if (plot->addSubImage(width, height, image, loc, fBPP * width)) {
|
| + SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
|
| + if (plot->addSubImage(width, height, image, loc)) {
|
| this->updatePlot(batchTarget, id, plot);
|
| return true;
|
| }
|
| @@ -305,64 +293,63 @@ bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
|
|
|
| // If the above fails, then see if the least recently refed plot has already been flushed to the
|
| // gpu
|
| - plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
|
| - plot = plotIter.get();
|
| + plot = fPlotList.tail();
|
| SkASSERT(plot);
|
| if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) {
|
| this->processEviction(plot->id());
|
| plot->resetRects();
|
| - SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
|
| + SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
|
| + SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
|
| SkASSERT(verify);
|
| this->updatePlot(batchTarget, id, plot);
|
| fAtlasGeneration++;
|
| return true;
|
| }
|
|
|
| - // The least recently refed plot hasn't been flushed to the gpu yet, however, if we have flushed
|
| - // it to the batch target than we can reuse it. Our last ref token is guaranteed to be less
|
| + // The least recently used plot hasn't been flushed to the gpu yet, however, if we have flushed
|
| + // it to the batch target than we can reuse it. Our last use token is guaranteed to be less
|
| // than or equal to the current token. If its 'less than' the current token, than we can spin
|
| - // off the plot(ie let the batch target manage it) and create a new plot in its place in our
|
| + // off the plot (ie let the batch target manage it) and create a new plot in its place in our
|
| // array. If it is equal to the currentToken, then the caller has to flush draws to the batch
|
| // target so we can spin off the plot
|
| if (plot->lastUseToken() == batchTarget->currentToken()) {
|
| return false;
|
| }
|
|
|
| - // We take an extra ref here so our plot isn't deleted when we reset its index in the array.
|
| - plot->ref();
|
| - int index = plot->index();
|
| - int x = plot->x();
|
| - int y = plot->y();
|
| - uint64_t generation = plot->genID();
|
| + SkASSERT(plot->lastUseToken() < batchTarget->currentToken());
|
| + SkASSERT(!batchTarget->hasTokenBeenFlushed(batchTarget->currentToken()));
|
| +
|
| + SkASSERT(!plot->unique()); // The GrPlotUpdater should have a ref too
|
|
|
| this->processEviction(plot->id());
|
| fPlotList.remove(plot);
|
| SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
|
| - newPlot.reset(new BatchPlot);
|
| - newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHeight, fBPP);
|
| + newPlot.reset(plot->clone());
|
|
|
| fPlotList.addToHead(newPlot.get());
|
| - SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc, fBPP * width);
|
| + SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp());
|
| + SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
|
| SkASSERT(verify);
|
| +
|
| newPlot->setLastUploadToken(batchTarget->currentToken());
|
| - SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot));
|
| + SkAutoTUnref<GrPlotUploader> uploader(new GrPlotUploader(newPlot, fTexture));
|
| batchTarget->upload(uploader);
|
| *id = newPlot->id();
|
| - plot->unref();
|
| +
|
| fAtlasGeneration++;
|
| return true;
|
| }
|
|
|
| bool GrBatchAtlas::hasID(AtlasID id) {
|
| uint32_t index = GetIndexFromID(id);
|
| - SkASSERT(index < fNumPlotsX * fNumPlotsY);
|
| + SkASSERT(index < fNumPlots);
|
| return fPlotArray[index]->genID() == GetGenerationFromID(id);
|
| }
|
|
|
| void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) {
|
| SkASSERT(this->hasID(id));
|
| uint32_t index = GetIndexFromID(id);
|
| - SkASSERT(index < fNumPlotsX * fNumPlotsY);
|
| + SkASSERT(index < fNumPlots);
|
| this->makeMRU(fPlotArray[index]);
|
| fPlotArray[index]->setLastUseToken(batchToken);
|
| }
|
|
|