Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: src/gpu/GrBatchAtlas.cpp

Issue 1888473002: Use transfer buffer for BatchAtlas texture copies Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Rebase to ToT Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrBatchAtlas.h" 8 #include "GrBatchAtlas.h"
9 #include "GrBatchFlushState.h" 9 #include "GrBatchFlushState.h"
10 #include "GrBuffer.h"
10 #include "GrRectanizer.h" 11 #include "GrRectanizer.h"
12 #include "GrResourceProvider.h"
11 #include "GrTracing.h" 13 #include "GrTracing.h"
12 14
13 //////////////////////////////////////////////////////////////////////////////// 15 ////////////////////////////////////////////////////////////////////////////////
14 16
15 GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY , int width, 17 GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY , int width,
16 int height, GrPixelConfig config) 18 int height, GrPixelConfig config, GrResourceP rovider* rp)
17 : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken()) 19 : fResourceProvider(rp)
20 , fLastUpload(GrBatchDrawToken::AlreadyFlushedToken())
18 , fLastUse(GrBatchDrawToken::AlreadyFlushedToken()) 21 , fLastUse(GrBatchDrawToken::AlreadyFlushedToken())
19 , fIndex(index) 22 , fIndex(index)
20 , fGenID(genID) 23 , fGenID(genID)
21 , fID(CreateId(fIndex, fGenID)) 24 , fID(CreateId(fIndex, fGenID))
22 , fData(nullptr) 25 , fDataPtr(nullptr)
26 , fTransferBuffer(nullptr)
27 , fTransferFence(0)
23 , fWidth(width) 28 , fWidth(width)
24 , fHeight(height) 29 , fHeight(height)
25 , fX(offX) 30 , fX(offX)
26 , fY(offY) 31 , fY(offY)
27 , fRects(nullptr) 32 , fRects(nullptr)
28 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) 33 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
29 , fConfig(config) 34 , fConfig(config)
30 , fBytesPerPixel(GrBytesPerPixel(config)) 35 , fBytesPerPixel(GrBytesPerPixel(config))
31 #ifdef SK_DEBUG 36 #ifdef SK_DEBUG
32 , fDirty(false) 37 , fDirty(false)
33 #endif 38 #endif
34 { 39 {
35 fDirtyRect.setEmpty(); 40 fDirtyRect.setEmpty();
36 } 41 }
37 42
38 GrBatchAtlas::BatchPlot::~BatchPlot() { 43 GrBatchAtlas::BatchPlot::~BatchPlot() {
39 sk_free(fData); 44 if (fTransferBuffer) {
45 fTransferBuffer->unref();
46 } else {
47 sk_free(fDataPtr);
48 }
40 delete fRects; 49 delete fRects;
41 } 50 }
42 51
43 bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* ima ge, 52 bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* ima ge,
44 SkIPoint16* loc) { 53 SkIPoint16* loc) {
45 SkASSERT(width <= fWidth && height <= fHeight); 54 SkASSERT(width <= fWidth && height <= fHeight);
46 55
47 if (!fRects) { 56 if (!fRects) {
48 fRects = GrRectanizer::Factory(fWidth, fHeight); 57 fRects = GrRectanizer::Factory(fWidth, fHeight);
49 } 58 }
50 59
51 if (!fRects->addRect(width, height, loc)) { 60 if (!fRects->addRect(width, height, loc)) {
52 return false; 61 return false;
53 } 62 }
54 63
55 if (!fData) { 64 if (!fDataPtr) {
56 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * 65 if (!fTransferBuffer) {
57 fHeight)); 66 fTransferBuffer =
67 fResourceProvider->createBuffer(fBytesPerPixel * fWidth * fHeigh t,
68 kXferCpuToGpu_GrBufferType,
69 kDynamic_GrAccessPattern,
70 GrResourceProvider::kNoPendingIO _Flag);
71 }
72 if (fTransferBuffer) {
73 fDataPtr = (unsigned char*)fTransferBuffer->map();
74 } else {
75 fDataPtr = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPe rPixel * fWidth *
76 fHeight) );
77 }
58 } 78 }
79
59 size_t rowBytes = width * fBytesPerPixel; 80 size_t rowBytes = width * fBytesPerPixel;
60 const unsigned char* imagePtr = (const unsigned char*)image; 81 const unsigned char* imagePtr = (const unsigned char*)image;
61 // point ourselves at the right starting spot 82 // point ourselves at the right starting spot
62 unsigned char* dataPtr = fData; 83 unsigned char* dataPtr = fDataPtr;
63 dataPtr += fBytesPerPixel * fWidth * loc->fY; 84 dataPtr += fBytesPerPixel * fWidth * loc->fY;
64 dataPtr += fBytesPerPixel * loc->fX; 85 dataPtr += fBytesPerPixel * loc->fX;
65 // copy into the data buffer 86 // copy into the data buffer
66 for (int i = 0; i < height; ++i) { 87 for (int i = 0; i < height; ++i) {
67 memcpy(dataPtr, imagePtr, rowBytes); 88 memcpy(dataPtr, imagePtr, rowBytes);
68 dataPtr += fBytesPerPixel * fWidth; 89 dataPtr += fBytesPerPixel * fWidth;
69 imagePtr += rowBytes; 90 imagePtr += rowBytes;
70 } 91 }
71 92
72 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); 93 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
73 94
74 loc->fX += fOffset.fX; 95 loc->fX += fOffset.fX;
75 loc->fY += fOffset.fY; 96 loc->fY += fOffset.fY;
76 SkDEBUGCODE(fDirty = true;) 97 SkDEBUGCODE(fDirty = true;)
77 98
78 return true; 99 return true;
79 } 100 }
80 101
81 void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writeP ixels, 102 void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writeP ixels,
103 GrDrawBatch::TransferPixelsFn& xfe rPixels,
82 GrTexture* texture) { 104 GrTexture* texture) {
83 // We should only be issuing uploads if we are in fact dirty 105 // We should only be issuing uploads if we are in fact dirty
84 SkASSERT(fDirty && fData && texture); 106 SkASSERT(fDirty && fDataPtr && texture);
85 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTe xture"); 107 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTe xture");
86 size_t rowBytes = fBytesPerPixel * fWidth; 108 size_t rowBytes = fBytesPerPixel * fWidth;
87 const unsigned char* dataPtr = fData; 109 size_t dataOffset = rowBytes * fDirtyRect.fTop + fBytesPerPixel * fDirtyRect .fLeft;
88 dataPtr += rowBytes * fDirtyRect.fTop; 110 if (fTransferBuffer) {
89 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 111 fTransferBuffer->unmap();
90 writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect. fTop, 112 fDataPtr = nullptr;
91 fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBy tes); 113 xferPixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRe ct.fTop,
114 fDirtyRect.width(), fDirtyRect.height(), fConfig, fTransferBu ffer, dataOffset,
115 rowBytes, &fTransferFence);
116 } else {
117 writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyR ect.fTop,
118 fDirtyRect.width(), fDirtyRect.height(), fConfig, fDataPtr + dataOffset,
119 rowBytes);
120 }
121
92 fDirtyRect.setEmpty(); 122 fDirtyRect.setEmpty();
93 SkDEBUGCODE(fDirty = false;) 123 SkDEBUGCODE(fDirty = false;)
94 } 124 }
95 125
96 void GrBatchAtlas::BatchPlot::resetRects() { 126 void GrBatchAtlas::BatchPlot::resetRects() {
97 if (fRects) { 127 if (fRects) {
98 fRects->reset(); 128 fRects->reset();
99 } 129 }
100 130
101 fGenID++; 131 fGenID++;
102 fID = CreateId(fIndex, fGenID); 132 fID = CreateId(fIndex, fGenID);
103 133
104 // zero out the plot 134 // zero out the plot
105 if (fData) { 135 if (fDataPtr) {
106 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); 136 sk_bzero(fDataPtr, fBytesPerPixel * fWidth * fHeight);
107 } 137 }
108 138
109 fDirtyRect.setEmpty(); 139 fDirtyRect.setEmpty();
110 SkDEBUGCODE(fDirty = false;) 140 SkDEBUGCODE(fDirty = false;)
111 } 141 }
112 142
113 /////////////////////////////////////////////////////////////////////////////// 143 ///////////////////////////////////////////////////////////////////////////////
114 144
115 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY) 145 GrBatchAtlas::GrBatchAtlas(GrResourceProvider* rp, GrTexture* texture, int numPl otsX, int numPlotsY)
116 : fTexture(texture) 146 : fTexture(texture)
117 , fAtlasGeneration(kInvalidAtlasGeneration + 1) { 147 , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
118 148
119 fPlotWidth = texture->width() / numPlotsX; 149 fPlotWidth = texture->width() / numPlotsX;
120 fPlotHeight = texture->height() / numPlotsY; 150 fPlotHeight = texture->height() / numPlotsY;
121 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots); 151 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
122 SkASSERT(fPlotWidth * numPlotsX == texture->width()); 152 SkASSERT(fPlotWidth * numPlotsX == texture->width());
123 SkASSERT(fPlotHeight * numPlotsY == texture->height()); 153 SkASSERT(fPlotHeight * numPlotsY == texture->height());
124 154
125 SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;) 155 SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
126 156
127 // We currently do not support compressed atlases... 157 // We currently do not support compressed atlases...
128 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig)); 158 SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
129 159
130 // set up allocated plots 160 // set up allocated plots
131 fPlotArray = new SkAutoTUnref<BatchPlot>[numPlotsX * numPlotsY]; 161 fPlotArray = new SkAutoTUnref<BatchPlot>[numPlotsX * numPlotsY];
132 162
133 SkAutoTUnref<BatchPlot>* currPlot = fPlotArray; 163 SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
134 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) { 164 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
135 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) { 165 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
136 uint32_t index = r * numPlotsX + c; 166 uint32_t index = r * numPlotsX + c;
137 currPlot->reset(new BatchPlot(index, 1, x, y, fPlotWidth, fPlotHeigh t, 167 currPlot->reset(new BatchPlot(index, 1, x, y, fPlotWidth, fPlotHeigh t,
138 texture->desc().fConfig)); 168 texture->desc().fConfig, rp));
139 169
140 // build LRU list 170 // build LRU list
141 fPlotList.addToHead(currPlot->get()); 171 fPlotList.addToHead(currPlot->get());
142 ++currPlot; 172 ++currPlot;
143 } 173 }
144 } 174 }
145 } 175 }
146 176
147 GrBatchAtlas::~GrBatchAtlas() { 177 GrBatchAtlas::~GrBatchAtlas() {
148 SkSafeUnref(fTexture); 178 SkSafeUnref(fTexture);
149 delete[] fPlotArray; 179 delete[] fPlotArray;
150 } 180 }
151 181
152 void GrBatchAtlas::processEviction(AtlasID id) { 182 void GrBatchAtlas::processEviction(AtlasID id) {
153 for (int i = 0; i < fEvictionCallbacks.count(); i++) { 183 for (int i = 0; i < fEvictionCallbacks.count(); i++) {
154 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); 184 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
155 } 185 }
156 } 186 }
157 187
158 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) { 188 inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B atchPlot* plot) {
159 this->makeMRU(plot); 189 this->makeMRU(plot);
160 190
161 // If our most recent upload has already occurred then we have to insert a n ew 191 // If our most recent upload has already occurred then we have to insert a n ew
162 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred. 192 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocu rred.
163 // This new update will piggy back on that previously scheduled update. 193 // This new update will piggy back on that previously scheduled update.
164 if (target->hasDrawBeenFlushed(plot->lastUploadToken())) { 194 if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
165 // With c+14 we could move sk_sp into lamba to only ref once. 195 // With c+14 we could move sk_sp into lambda to only ref once.
166 sk_sp<BatchPlot> plotsp(SkRef(plot)); 196 sk_sp<BatchPlot> plotsp(SkRef(plot));
167 GrTexture* texture = fTexture; 197 GrTexture* texture = fTexture;
168 GrBatchDrawToken lastUploadToken = target->addAsapUpload( 198 GrBatchDrawToken lastUploadToken = target->addAsapUpload(
169 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) { 199 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels,
170 plotsp->uploadToTexture(writePixels, texture); 200 GrDrawBatch::TransferPixelsFn& transferPixels) {
201 plotsp->uploadToTexture(writePixels, transferPixels, texture);
171 } 202 }
172 ); 203 );
173 plot->setLastUploadToken(lastUploadToken); 204 plot->setLastUploadToken(lastUploadToken);
174 } 205 }
175 *id = plot->id(); 206 *id = plot->id();
176 } 207 }
177 208
178 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target, 209 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target,
179 int width, int height, const void* image, SkIPoint 16* loc) { 210 int width, int height, const void* image, SkIPoint 16* loc) {
180 // We should already have a texture, TODO clean this up 211 // We should already have a texture, TODO clean this up
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp()); 260 SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp());
230 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); 261 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
231 SkASSERT(verify); 262 SkASSERT(verify);
232 263
233 // Note that this plot will be uploaded inline with the draws whereas the 264 // Note that this plot will be uploaded inline with the draws whereas the
234 // one it displaced most likely was uploaded asap. 265 // one it displaced most likely was uploaded asap.
235 // With c+14 we could move sk_sp into lamba to only ref once. 266 // With c+14 we could move sk_sp into lamba to only ref once.
236 sk_sp<BatchPlot> plotsp(SkRef(newPlot.get())); 267 sk_sp<BatchPlot> plotsp(SkRef(newPlot.get()));
237 GrTexture* texture = fTexture; 268 GrTexture* texture = fTexture;
238 GrBatchDrawToken lastUploadToken = target->addInlineUpload( 269 GrBatchDrawToken lastUploadToken = target->addInlineUpload(
239 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) { 270 [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels,
240 plotsp->uploadToTexture(writePixels, texture); 271 GrDrawBatch::TransferPixelsFn& transferPixels) {
272 plotsp->uploadToTexture(writePixels, transferPixels, texture);
241 } 273 }
242 ); 274 );
243 newPlot->setLastUploadToken(lastUploadToken); 275 newPlot->setLastUploadToken(lastUploadToken);
244 276
245 *id = newPlot->id(); 277 *id = newPlot->id();
246 278
247 fAtlasGeneration++; 279 fAtlasGeneration++;
248 return true; 280 return true;
249 } 281 }
OLDNEW
« no previous file with comments | « src/gpu/GrBatchAtlas.h ('k') | src/gpu/GrBatchFlushState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698