| OLD | NEW |
| 1 | 1 |
| 2 /* | 2 /* |
| 3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
| 4 * | 4 * |
| 5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
| 6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 #include "GrAtlas.h" | 9 #include "GrAtlas.h" |
| 10 #include "GrContext.h" | 10 #include "GrContext.h" |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 66 // if batching uploads, create backing memory on first use | 66 // if batching uploads, create backing memory on first use |
| 67 // once the plot is nearly full we will revert to uploading each subimage in
dividually | 67 // once the plot is nearly full we will revert to uploading each subimage in
dividually |
| 68 int plotWidth = fRects->width(); | 68 int plotWidth = fRects->width(); |
| 69 int plotHeight = fRects->height(); | 69 int plotHeight = fRects->height(); |
| 70 if (fBatchUploads && NULL == fPlotData && 0.0f == percentFull) { | 70 if (fBatchUploads && NULL == fPlotData && 0.0f == percentFull) { |
| 71 fPlotData = SkNEW_ARRAY(unsigned char, fBytesPerPixel*plotWidth*plotHeig
ht); | 71 fPlotData = SkNEW_ARRAY(unsigned char, fBytesPerPixel*plotWidth*plotHeig
ht); |
| 72 memset(fPlotData, 0, fBytesPerPixel*plotWidth*plotHeight); | 72 memset(fPlotData, 0, fBytesPerPixel*plotWidth*plotHeight); |
| 73 } | 73 } |
| 74 | 74 |
| 75 // if we have backing memory, copy to the memory and set for future upload | 75 // if we have backing memory, copy to the memory and set for future upload |
| 76 if (NULL != fPlotData) { | 76 if (fPlotData) { |
| 77 const unsigned char* imagePtr = (const unsigned char*) image; | 77 const unsigned char* imagePtr = (const unsigned char*) image; |
| 78 // point ourselves at the right starting spot | 78 // point ourselves at the right starting spot |
| 79 unsigned char* dataPtr = fPlotData; | 79 unsigned char* dataPtr = fPlotData; |
| 80 dataPtr += fBytesPerPixel*plotWidth*loc->fY; | 80 dataPtr += fBytesPerPixel*plotWidth*loc->fY; |
| 81 dataPtr += fBytesPerPixel*loc->fX; | 81 dataPtr += fBytesPerPixel*loc->fX; |
| 82 // copy into the data buffer | 82 // copy into the data buffer |
| 83 for (int i = 0; i < height; ++i) { | 83 for (int i = 0; i < height; ++i) { |
| 84 memcpy(dataPtr, imagePtr, fBytesPerPixel*width); | 84 memcpy(dataPtr, imagePtr, fBytesPerPixel*width); |
| 85 dataPtr += fBytesPerPixel*plotWidth; | 85 dataPtr += fBytesPerPixel*plotWidth; |
| 86 imagePtr += fBytesPerPixel*width; | 86 imagePtr += fBytesPerPixel*width; |
| 87 } | 87 } |
| 88 | 88 |
| 89 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); | 89 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); |
| 90 adjust_for_offset(loc, fOffset); | 90 adjust_for_offset(loc, fOffset); |
| 91 fDirty = true; | 91 fDirty = true; |
| 92 // otherwise, just upload the image directly | 92 // otherwise, just upload the image directly |
| 93 } else if (NULL != image) { | 93 } else if (image) { |
| 94 adjust_for_offset(loc, fOffset); | 94 adjust_for_offset(loc, fOffset); |
| 95 GrContext* context = fTexture->getContext(); | 95 GrContext* context = fTexture->getContext(); |
| 96 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrPlot::uploadToTex
ture"); | 96 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrPlot::uploadToTex
ture"); |
| 97 context->writeTexturePixels(fTexture, | 97 context->writeTexturePixels(fTexture, |
| 98 loc->fX, loc->fY, width, height, | 98 loc->fX, loc->fY, width, height, |
| 99 fTexture->config(), image, 0, | 99 fTexture->config(), image, 0, |
| 100 GrContext::kDontFlush_PixelOpsFlag); | 100 GrContext::kDontFlush_PixelOpsFlag); |
| 101 } else { | 101 } else { |
| 102 adjust_for_offset(loc, fOffset); | 102 adjust_for_offset(loc, fOffset); |
| 103 } | 103 } |
| 104 | 104 |
| 105 #if FONT_CACHE_STATS | 105 #if FONT_CACHE_STATS |
| 106 ++g_UploadCount; | 106 ++g_UploadCount; |
| 107 #endif | 107 #endif |
| 108 | 108 |
| 109 return true; | 109 return true; |
| 110 } | 110 } |
| 111 | 111 |
| 112 void GrPlot::uploadToTexture() { | 112 void GrPlot::uploadToTexture() { |
| 113 static const float kNearlyFullTolerance = 0.85f; | 113 static const float kNearlyFullTolerance = 0.85f; |
| 114 | 114 |
| 115 // should only do this if batching is enabled | 115 // should only do this if batching is enabled |
| 116 SkASSERT(fBatchUploads); | 116 SkASSERT(fBatchUploads); |
| 117 | 117 |
| 118 if (fDirty) { | 118 if (fDirty) { |
| 119 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrPlot::uploadToTex
ture"); | 119 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrPlot::uploadToTex
ture"); |
| 120 SkASSERT(NULL != fTexture); | 120 SkASSERT(fTexture); |
| 121 GrContext* context = fTexture->getContext(); | 121 GrContext* context = fTexture->getContext(); |
| 122 // We pass the flag that does not force a flush. We assume our caller is | 122 // We pass the flag that does not force a flush. We assume our caller is |
| 123 // smart and hasn't referenced the part of the texture we're about to up
date | 123 // smart and hasn't referenced the part of the texture we're about to up
date |
| 124 // since the last flush. | 124 // since the last flush. |
| 125 size_t rowBytes = fBytesPerPixel*fRects->width(); | 125 size_t rowBytes = fBytesPerPixel*fRects->width(); |
| 126 const unsigned char* dataPtr = fPlotData; | 126 const unsigned char* dataPtr = fPlotData; |
| 127 dataPtr += rowBytes*fDirtyRect.fTop; | 127 dataPtr += rowBytes*fDirtyRect.fTop; |
| 128 dataPtr += fBytesPerPixel*fDirtyRect.fLeft; | 128 dataPtr += fBytesPerPixel*fDirtyRect.fLeft; |
| 129 context->writeTexturePixels(fTexture, | 129 context->writeTexturePixels(fTexture, |
| 130 fOffset.fX + fDirtyRect.fLeft, fOffset.fY +
fDirtyRect.fTop, | 130 fOffset.fX + fDirtyRect.fLeft, fOffset.fY +
fDirtyRect.fTop, |
| 131 fDirtyRect.width(), fDirtyRect.height(), | 131 fDirtyRect.width(), fDirtyRect.height(), |
| 132 fTexture->config(), dataPtr, | 132 fTexture->config(), dataPtr, |
| 133 rowBytes, | 133 rowBytes, |
| 134 GrContext::kDontFlush_PixelOpsFlag); | 134 GrContext::kDontFlush_PixelOpsFlag); |
| 135 fDirtyRect.setEmpty(); | 135 fDirtyRect.setEmpty(); |
| 136 fDirty = false; | 136 fDirty = false; |
| 137 // If the Plot is nearly full, anything else we add will probably be sma
ll and one | 137 // If the Plot is nearly full, anything else we add will probably be sma
ll and one |
| 138 // at a time, so free up the memory and after this upload any new images
directly. | 138 // at a time, so free up the memory and after this upload any new images
directly. |
| 139 if (fRects->percentFull() > kNearlyFullTolerance) { | 139 if (fRects->percentFull() > kNearlyFullTolerance) { |
| 140 SkDELETE_ARRAY(fPlotData); | 140 SkDELETE_ARRAY(fPlotData); |
| 141 fPlotData = NULL; | 141 fPlotData = NULL; |
| 142 } | 142 } |
| 143 } | 143 } |
| 144 } | 144 } |
| 145 | 145 |
| 146 void GrPlot::resetRects() { | 146 void GrPlot::resetRects() { |
| 147 SkASSERT(NULL != fRects); | 147 SkASSERT(fRects); |
| 148 fRects->reset(); | 148 fRects->reset(); |
| 149 } | 149 } |
| 150 | 150 |
| 151 /////////////////////////////////////////////////////////////////////////////// | 151 /////////////////////////////////////////////////////////////////////////////// |
| 152 | 152 |
| 153 GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrTextureFlags flags, | 153 GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrTextureFlags flags, |
| 154 const SkISize& backingTextureSize, | 154 const SkISize& backingTextureSize, |
| 155 int numPlotsX, int numPlotsY, bool batchUploads) { | 155 int numPlotsX, int numPlotsY, bool batchUploads) { |
| 156 fGpu = SkRef(gpu); | 156 fGpu = SkRef(gpu); |
| 157 fPixelConfig = config; | 157 fPixelConfig = config; |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 234 fTexture = fGpu->createTexture(desc, NULL, 0); | 234 fTexture = fGpu->createTexture(desc, NULL, 0); |
| 235 if (NULL == fTexture) { | 235 if (NULL == fTexture) { |
| 236 return NULL; | 236 return NULL; |
| 237 } | 237 } |
| 238 } | 238 } |
| 239 | 239 |
| 240 // now look through all allocated plots for one we can share, in MRU order | 240 // now look through all allocated plots for one we can share, in MRU order |
| 241 GrPlotList::Iter plotIter; | 241 GrPlotList::Iter plotIter; |
| 242 plotIter.init(fPlotList, GrPlotList::Iter::kHead_IterStart); | 242 plotIter.init(fPlotList, GrPlotList::Iter::kHead_IterStart); |
| 243 GrPlot* plot; | 243 GrPlot* plot; |
| 244 while (NULL != (plot = plotIter.get())) { | 244 while ((plot = plotIter.get())) { |
| 245 // make sure texture is set for quick lookup | 245 // make sure texture is set for quick lookup |
| 246 plot->fTexture = fTexture; | 246 plot->fTexture = fTexture; |
| 247 if (plot->addSubImage(width, height, image, loc)) { | 247 if (plot->addSubImage(width, height, image, loc)) { |
| 248 this->makeMRU(plot); | 248 this->makeMRU(plot); |
| 249 // new plot for atlas, put at end of array | 249 // new plot for atlas, put at end of array |
| 250 SkASSERT(!usage->fPlots.contains(plot)); | 250 SkASSERT(!usage->fPlots.contains(plot)); |
| 251 *(usage->fPlots.append()) = plot; | 251 *(usage->fPlots.append()) = plot; |
| 252 return plot; | 252 return plot; |
| 253 } | 253 } |
| 254 plotIter.next(); | 254 plotIter.next(); |
| 255 } | 255 } |
| 256 | 256 |
| 257 // If the above fails, then the current plot list has no room | 257 // If the above fails, then the current plot list has no room |
| 258 return NULL; | 258 return NULL; |
| 259 } | 259 } |
| 260 | 260 |
| 261 void GrAtlas::RemovePlot(ClientPlotUsage* usage, const GrPlot* plot) { | 261 void GrAtlas::RemovePlot(ClientPlotUsage* usage, const GrPlot* plot) { |
| 262 int index = usage->fPlots.find(const_cast<GrPlot*>(plot)); | 262 int index = usage->fPlots.find(const_cast<GrPlot*>(plot)); |
| 263 if (index >= 0) { | 263 if (index >= 0) { |
| 264 usage->fPlots.remove(index); | 264 usage->fPlots.remove(index); |
| 265 } | 265 } |
| 266 } | 266 } |
| 267 | 267 |
| 268 // get a plot that's not being used by the current draw | 268 // get a plot that's not being used by the current draw |
| 269 GrPlot* GrAtlas::getUnusedPlot() { | 269 GrPlot* GrAtlas::getUnusedPlot() { |
| 270 GrPlotList::Iter plotIter; | 270 GrPlotList::Iter plotIter; |
| 271 plotIter.init(fPlotList, GrPlotList::Iter::kTail_IterStart); | 271 plotIter.init(fPlotList, GrPlotList::Iter::kTail_IterStart); |
| 272 GrPlot* plot; | 272 GrPlot* plot; |
| 273 while (NULL != (plot = plotIter.get())) { | 273 while ((plot = plotIter.get())) { |
| 274 if (plot->drawToken().isIssued()) { | 274 if (plot->drawToken().isIssued()) { |
| 275 return plot; | 275 return plot; |
| 276 } | 276 } |
| 277 plotIter.prev(); | 277 plotIter.prev(); |
| 278 } | 278 } |
| 279 | 279 |
| 280 return NULL; | 280 return NULL; |
| 281 } | 281 } |
| 282 | 282 |
| 283 void GrAtlas::uploadPlotsToTexture() { | 283 void GrAtlas::uploadPlotsToTexture() { |
| 284 if (fBatchUploads) { | 284 if (fBatchUploads) { |
| 285 GrPlotList::Iter plotIter; | 285 GrPlotList::Iter plotIter; |
| 286 plotIter.init(fPlotList, GrPlotList::Iter::kHead_IterStart); | 286 plotIter.init(fPlotList, GrPlotList::Iter::kHead_IterStart); |
| 287 GrPlot* plot; | 287 GrPlot* plot; |
| 288 while (NULL != (plot = plotIter.get())) { | 288 while ((plot = plotIter.get())) { |
| 289 plot->uploadToTexture(); | 289 plot->uploadToTexture(); |
| 290 plotIter.next(); | 290 plotIter.next(); |
| 291 } | 291 } |
| 292 } | 292 } |
| 293 } | 293 } |
| OLD | NEW |