Index: src/gpu/GrBatchAtlas.cpp |
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp |
index 17e82ef1fe3fc48af9a28e98ab214d8d3a481906..40ab0e6c0e09d562ecc483d6130a2a65db509a41 100644 |
--- a/src/gpu/GrBatchAtlas.cpp |
+++ b/src/gpu/GrBatchAtlas.cpp |
@@ -7,23 +7,19 @@ |
#include "GrBatchAtlas.h" |
#include "GrBatchFlushState.h" |
-#include "GrBuffer.h" |
#include "GrRectanizer.h" |
-#include "GrResourceProvider.h" |
#include "GrTracing.h" |
//////////////////////////////////////////////////////////////////////////////// |
GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY, int width, |
- int height, GrPixelConfig config, GrResourceProvider* rp) |
- : fResourceProvider(rp) |
- , fLastUpload(GrBatchDrawToken::AlreadyFlushedToken()) |
+ int height, GrPixelConfig config) |
+ : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken()) |
, fLastUse(GrBatchDrawToken::AlreadyFlushedToken()) |
, fIndex(index) |
, fGenID(genID) |
, fID(CreateId(fIndex, fGenID)) |
- , fDataPtr(nullptr) |
- , fTransferBuffer(nullptr) |
+ , fData(nullptr) |
, fWidth(width) |
, fHeight(height) |
, fX(offX) |
@@ -40,11 +36,7 @@ |
} |
GrBatchAtlas::BatchPlot::~BatchPlot() { |
- if (fTransferBuffer) { |
- fTransferBuffer->unref(); |
- } else { |
- sk_free(fDataPtr); |
- } |
+ sk_free(fData); |
delete fRects; |
} |
@@ -60,26 +52,14 @@ |
return false; |
} |
- if (!fDataPtr) { |
- if (!fTransferBuffer) { |
- fTransferBuffer = |
- fResourceProvider->createBuffer(fBytesPerPixel * fWidth * fHeight, |
- kXferCpuToGpu_GrBufferType, |
- kDynamic_GrAccessPattern, |
- GrResourceProvider::kNoPendingIO_Flag); |
- } |
- if (fTransferBuffer) { |
- fDataPtr = (unsigned char*)fTransferBuffer->map(); |
- } else { |
- fDataPtr = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * |
- fHeight)); |
- } |
- } |
- |
+ if (!fData) { |
+ fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * |
+ fHeight)); |
+ } |
size_t rowBytes = width * fBytesPerPixel; |
const unsigned char* imagePtr = (const unsigned char*)image; |
// point ourselves at the right starting spot |
- unsigned char* dataPtr = fDataPtr; |
+ unsigned char* dataPtr = fData; |
dataPtr += fBytesPerPixel * fWidth * loc->fY; |
dataPtr += fBytesPerPixel * loc->fX; |
// copy into the data buffer |
@@ -99,25 +79,16 @@ |
} |
void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writePixels, |
- GrDrawBatch::TransferPixelsFn& xferPixels, |
GrTexture* texture) { |
// We should only be issuing uploads if we are in fact dirty |
- SkASSERT(fDirty && fDataPtr && texture); |
+ SkASSERT(fDirty && fData && texture); |
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture"); |
size_t rowBytes = fBytesPerPixel * fWidth; |
- size_t dataOffset = rowBytes * fDirtyRect.fTop + fBytesPerPixel * fDirtyRect.fLeft; |
- if (fTransferBuffer) { |
- fTransferBuffer->unmap(); |
- fDataPtr = nullptr; |
- xferPixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, |
- fDirtyRect.width(), fDirtyRect.height(), fConfig, fTransferBuffer, dataOffset, |
- rowBytes); |
- } else { |
- writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, |
- fDirtyRect.width(), fDirtyRect.height(), fConfig, fDataPtr + dataOffset, |
- rowBytes); |
- } |
- |
+ const unsigned char* dataPtr = fData; |
+ dataPtr += rowBytes * fDirtyRect.fTop; |
+ dataPtr += fBytesPerPixel * fDirtyRect.fLeft; |
+ writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, |
+ fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBytes); |
fDirtyRect.setEmpty(); |
SkDEBUGCODE(fDirty = false;) |
} |
@@ -131,8 +102,8 @@ |
fID = CreateId(fIndex, fGenID); |
// zero out the plot |
- if (fDataPtr) { |
- sk_bzero(fDataPtr, fBytesPerPixel * fWidth * fHeight); |
+ if (fData) { |
+ sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); |
} |
fDirtyRect.setEmpty(); |
@@ -141,7 +112,7 @@ |
/////////////////////////////////////////////////////////////////////////////// |
-GrBatchAtlas::GrBatchAtlas(GrResourceProvider* rp, GrTexture* texture, int numPlotsX, int numPlotsY) |
+GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) |
: fTexture(texture) |
, fAtlasGeneration(kInvalidAtlasGeneration + 1) { |
@@ -164,7 +135,7 @@ |
for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) { |
uint32_t index = r * numPlotsX + c; |
currPlot->reset(new BatchPlot(index, 1, x, y, plotWidth, plotHeight, |
- texture->desc().fConfig, rp)); |
+ texture->desc().fConfig)); |
// build LRU list |
fPlotList.addToHead(currPlot->get()); |
@@ -191,13 +162,12 @@ |
// upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred. |
// This new update will piggy back on that previously scheduled update. |
if (target->hasDrawBeenFlushed(plot->lastUploadToken())) { |
- // With c+14 we could move sk_sp into lambda to only ref once. |
+ // With c+14 we could move sk_sp into lamba to only ref once. |
sk_sp<BatchPlot> plotsp(SkRef(plot)); |
GrTexture* texture = fTexture; |
GrBatchDrawToken lastUploadToken = target->addAsapUpload( |
- [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels, |
- GrDrawBatch::TransferPixelsFn& transferPixels) { |
- plotsp->uploadToTexture(writePixels, transferPixels, texture); |
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) { |
+ plotsp->uploadToTexture(writePixels, texture); |
} |
); |
plot->setLastUploadToken(lastUploadToken); |
@@ -265,9 +235,8 @@ |
sk_sp<BatchPlot> plotsp(SkRef(newPlot.get())); |
GrTexture* texture = fTexture; |
GrBatchDrawToken lastUploadToken = target->addInlineUpload( |
- [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels, |
- GrDrawBatch::TransferPixelsFn& transferPixels) { |
- plotsp->uploadToTexture(writePixels, transferPixels, texture); |
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) { |
+ plotsp->uploadToTexture(writePixels, texture); |
} |
); |
newPlot->setLastUploadToken(lastUploadToken); |