Chromium Code Reviews| Index: src/gpu/GrBatchAtlas.cpp |
| diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..2542aebce78f3dc885246ad6236bbefea5f404a4 |
| --- /dev/null |
| +++ b/src/gpu/GrBatchAtlas.cpp |
| @@ -0,0 +1,278 @@ |
| + |
| +/* |
| + * Copyright 2010 Google Inc. |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#include "GrBatchAtlas.h" |
| +#include "GrBatchTarget.h" |
| +#include "GrGpu.h" |
| +#include "GrRectanizer.h" |
| +#include "GrTracing.h" |
| + |
| +/////////////////////////////////////////////////////////////////////////////// |
| + |
| +static GrBatchAtlas::AtlasID create_id(int id, int generation) { |
| + // Generation ID can roll over because we only check for equality |
| + SkASSERT(id < (1 << 16)); |
| + return id << 16 | generation; |
| +} |
| + |
| +// for testing |
| +#define FONT_CACHE_STATS 0 |
|
bsalomon
2015/03/06 16:34:59
ATLAS_STAST?
joshualitt
2015/03/09 19:45:14
Acknowledged.
|
| +#if FONT_CACHE_STATS |
| +static int g_UploadCount = 0; |
| +#endif |
| + |
| +GrBatchPlot::GrBatchPlot() |
| + : fLastUpload(0) |
| + , fLastRef(0) |
| + , fID(-1) |
| + , fGeneration(-1) |
| + , fPlotData(NULL) |
| + , fPlotWidth(0) |
| + , fPlotHeight(0) |
| + , fX(0) |
| + , fY(0) |
| + , fGpu(NULL) |
| + , fTexture(NULL) |
| + , fRects(NULL) |
| + , fAtlas(NULL) |
| + , fBytesPerPixel(1) |
| +#ifdef SK_DEBUG |
| + , fDirty(false) |
| +#endif |
| +{ |
| + fOffset.set(0, 0); |
| +} |
| + |
| +GrBatchPlot::~GrBatchPlot() { |
| + SkDELETE_ARRAY(fPlotData); |
| + fPlotData = NULL; |
| + delete fRects; |
| +} |
| + |
| +void GrBatchPlot::init(GrGpu* gpu, GrBatchAtlas* atlas, GrTexture* texture, int id, |
| + uint32_t generation, |
| + int offX, int offY, |
| + int width, int height, |
| + size_t bpp) { |
| + fID = id; |
| + fGeneration = generation; |
| + fPlotWidth = width; |
| + fPlotHeight = height; |
| + fX = offX; |
| + fY = offY; |
| + fRects = GrRectanizer::Factory(width, height); |
| + fGpu = gpu; |
| + fAtlas = atlas; |
| + fOffset.set(offX * width, offY * height); |
| + fBytesPerPixel = bpp; |
| + fPlotData = NULL; |
| + fDirtyRect.setEmpty(); |
| + SkDEBUGCODE(fDirty = false;) |
| + fTexture = texture; |
| + |
| + // allocate backing store |
| + fPlotData = SkNEW_ARRAY(unsigned char, fBytesPerPixel * width * height); |
| + memset(fPlotData, 0, fBytesPerPixel * width * height); |
| +} |
| + |
| +static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) { |
| + loc->fX += offset.fX; |
| + loc->fY += offset.fY; |
| +} |
| + |
| +bool GrBatchPlot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) { |
| + if (!fRects->addRect(width, height, loc)) { |
| + return false; |
| + } |
| + |
| + SkASSERT(fPlotData); |
| + const unsigned char* imagePtr = (const unsigned char*)image; |
| + // point ourselves at the right starting spot |
| + unsigned char* dataPtr = fPlotData; |
| + dataPtr += fBytesPerPixel * fPlotWidth * loc->fY; |
| + dataPtr += fBytesPerPixel * loc->fX; |
| + // copy into the data buffer |
| + for (int i = 0; i < height; ++i) { |
| + memcpy(dataPtr, imagePtr, fBytesPerPixel * width); |
| + dataPtr += fBytesPerPixel * fPlotWidth; |
| + imagePtr += fBytesPerPixel * width; |
| + } |
| + |
| + fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); |
| + adjust_for_offset(loc, fOffset); |
| + SkDEBUGCODE(fDirty = true;) |
| + |
| +#if FONT_CACHE_STATS |
| + ++g_UploadCount; |
| +#endif |
| + |
| + return true; |
| +} |
| + |
| +void GrBatchPlot::uploadToTexture() { |
| + // We should only be issuing uploads if we are in fact dirty |
| + SkASSERT(fDirty); |
| + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrPlot::uploadToTexture"); |
|
bsalomon
2015/03/06 16:34:59
update string?
joshualitt
2015/03/09 19:45:14
Acknowledged.
|
| + SkASSERT(fTexture); |
| + size_t rowBytes = fBytesPerPixel * fRects->width(); |
| + const unsigned char* dataPtr = fPlotData; |
| + dataPtr += rowBytes * fDirtyRect.fTop; |
| + dataPtr += fBytesPerPixel * fDirtyRect.fLeft; |
| + fGpu->writeTexturePixels(fTexture, |
|
bsalomon
2015/03/06 16:34:59
I think we really need a constrained API for the u
joshualitt
2015/03/09 19:45:14
Acknowledged.
|
| + fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, |
| + fDirtyRect.width(), fDirtyRect.height(), |
| + fTexture->config(), dataPtr, rowBytes); |
| + fDirtyRect.setEmpty(); |
| + SkDEBUGCODE(fDirty = false;) |
| +} |
| + |
| +void GrBatchPlot::resetRects() { |
| + SkASSERT(fRects); |
| + fRects->reset(); |
| + fGeneration++; |
| + |
| + // zero out the plot |
| + SkASSERT(fPlotData); |
| + memset(fPlotData, 0, fBytesPerPixel * fPlotWidth * fPlotHeight); |
| + |
| + fDirtyRect.setEmpty(); |
| + SkDEBUGCODE(fDirty = false;) |
| +} |
| + |
| +/////////////////////////////////////////////////////////////////////////////// |
| + |
| +GrBatchAtlas::GrBatchAtlas(GrGpu* gpu, GrTexture* texture, int numPlotsX, int numPlotsY) |
| + : fGpu(gpu) |
| + , fTexture(texture) |
| + , fNumPlotsX(numPlotsX) |
| + , fNumPlotsY(numPlotsY) |
| + , fPlotWidth(texture->width() / numPlotsX) |
| + , fPlotHeight(texture->height() / numPlotsY) { |
| + SkASSERT(fPlotWidth * fNumPlotsX == texture->width()); |
| + SkASSERT(fPlotHeight * fNumPlotsY == texture->height()); |
| + |
| + // We currently do not support compressed atlases... |
| + SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig)); |
| + |
| + // set up allocated plots |
| + fBPP = GrBytesPerPixel(texture->desc().fConfig); |
| + fPlotArray = SkNEW_ARRAY(SkAutoTUnref<GrBatchPlot>, (fNumPlotsX * fNumPlotsY)); |
| + |
| + SkAutoTUnref<GrBatchPlot>* currPlot = fPlotArray; |
| + for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) { |
| + for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) { |
| + int id = r * fNumPlotsX + c; |
| + currPlot->reset(SkNEW(GrBatchPlot)); |
| + (*currPlot)->init(fGpu, this, texture, id, 0, x, y, fPlotWidth, fPlotHeight, fBPP); |
| + |
| + // build LRU list |
| + fPlotList.addToHead(currPlot->get()); |
| + ++currPlot; |
| + } |
| + } |
| +} |
| + |
| +GrBatchAtlas::~GrBatchAtlas() { |
| + SkSafeUnref(fTexture); |
| + SkDELETE_ARRAY(fPlotArray); |
| + |
| +#if FONT_CACHE_STATS |
| + SkDebugf("Num uploads: %d\n", g_UploadCount); |
| +#endif |
| +} |
| + |
| +void GrBatchAtlas::makeMRU(GrBatchPlot* plot) { |
| + if (fPlotList.head() == plot) { |
| + return; |
| + } |
| + |
| + fPlotList.remove(plot); |
| + fPlotList.addToHead(plot); |
| +} |
| + |
| + |
| +inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, GrBatchPlot* plot) { |
| + this->makeMRU(plot); |
| + |
| + // if we've already issued the last plot upload, then we have to issue a new one. Otherwise we |
| + // can just piggyback |
|
bsalomon
2015/03/06 16:34:59
I understand this but might not in a few months...
joshualitt
2015/03/09 19:45:15
Acknowledged.
|
| + if (batchTarget->isIssued(plot->lastUploadToken())) { |
| + plot->setLastUploadToken(batchTarget->asapToken()); |
| + batchTarget->update(GrPlotUpdater(plot)); |
| + } |
| + *id = create_id(plot->id(), plot->generation()); |
| +} |
| + |
| +bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget, |
| + int width, int height, const void* image, SkIPoint16* loc) { |
| + // We should already have a texture, TODO clean this up |
| + SkASSERT(fTexture && width < fPlotWidth && height < fPlotHeight); |
| + |
| + // now look through all allocated plots for one we can share, in MRU order |
| + GrBatchPlotList::Iter plotIter; |
| + plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); |
| + GrBatchPlot* plot; |
| + while ((plot = plotIter.get())) { |
| + if (plot->addSubImage(width, height, image, loc)) { |
| + this->updatePlot(batchTarget, id, plot); |
| + return true; |
| + } |
| + plotIter.next(); |
| + } |
| + |
| + // If the above fails, then find an unused LRU spot |
| + plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); |
| + while ((plot = plotIter.get())) { |
| + if (batchTarget->isIssued(plot->lastRefToken())) { |
| + plot->resetRects(); |
| + SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc); |
| + SkASSERT(verify); |
| + this->updatePlot(batchTarget, id, plot); |
| + return true; |
| + } |
| + plotIter.prev(); |
| + } |
| + |
| + // get LRU, queue up an upload. However, if our plot was referenced in this draw, then we can't |
|
Jvsquare
2015/03/06 15:45:36
I don't see where we are queuing up an upload here
joshualitt
2015/03/09 19:45:14
Acknowledged.
|
| + // reuse it. We return false to force the caller to start a new draw |
| + plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart); |
| + plot = plotIter.get(); |
| + SkASSERT(plot); |
| + if (plot->lastRefToken() == batchTarget->currentToken()) { |
| + return false; |
| + } |
| + |
| + // Now we have to remove the old plot from the plot list and the plot array and add the new plot |
| + int plotID = plot->id(); |
| + int x = plot->x(); |
| + int y = plot->y(); |
| + int generation = plot->generation(); |
| + |
| + fPlotList.remove(plot); |
| + SkAutoTUnref<GrBatchPlot>& newPlot = fPlotArray[plot->id()]; |
| + newPlot.reset(SkNEW(GrBatchPlot)); |
| + newPlot->init(fGpu, this, fTexture, plotID, ++generation, x, y, fPlotWidth, fPlotHeight, fBPP); |
| + |
| + fPlotList.addToHead(newPlot.get()); |
| + SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); |
| + SkASSERT(verify); |
| + newPlot->setLastUploadToken(batchTarget->currentToken()); |
| + batchTarget->update(GrPlotUpdater(newPlot)); |
|
bsalomon
2015/03/06 16:34:59
@Jvsquare: I think it is here.
|
| + *id = create_id(newPlot->id(), newPlot->generation()); |
| + return true; |
| +} |
| + |
| +void GrBatchAtlas::uploadPlotsToTexture() { |
| + GrBatchPlotList::Iter plotIter; |
| + plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart); |
| + GrBatchPlot* plot; |
| + while ((plot = plotIter.get())) { |
| + plot->uploadToTexture(); |
| + plotIter.next(); |
| + } |
| +} |