Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(884)

Side by Side Diff: src/gpu/GrContext.cpp

Issue 858123002: Add specialized content key class for resources. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: remove default template arg Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrCacheID.cpp ('k') | src/gpu/GrGpuResource.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2011 Google Inc. 3 * Copyright 2011 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 #include "GrContext.h" 9 #include "GrContext.h"
10 10
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
216 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() && 216 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() &&
217 renderTarget->isMultisampled()) { 217 renderTarget->isMultisampled()) {
218 return GrStencilAndCoverTextContext::Create(this, leakyProperties); 218 return GrStencilAndCoverTextContext::Create(this, leakyProperties);
219 } 219 }
220 220
221 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDista nceFieldFonts); 221 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDista nceFieldFonts);
222 } 222 }
223 223
224 //////////////////////////////////////////////////////////////////////////////// 224 ////////////////////////////////////////////////////////////////////////////////
225 225
226 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
227 const GrCacheID& cacheID,
228 const GrTextureParams* params) {
229 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca cheID);
230
231 GrGpuResource* resource = this->findAndRefCachedResource(resourceKey);
232 if (resource) {
233 SkASSERT(static_cast<GrSurface*>(resource)->asTexture());
234 return static_cast<GrSurface*>(resource)->asTexture();
235 }
236 return NULL;
237 }
238
239 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
240 const GrCacheID& cacheID,
241 const GrTextureParams* params) const {
242 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca cheID);
243 return fResourceCache2->hasContentKey(resourceKey);
244 }
245
246 static void stretch_image(void* dst, 226 static void stretch_image(void* dst,
247 int dstW, 227 int dstW,
248 int dstH, 228 int dstH,
249 const void* src, 229 const void* src,
250 int srcW, 230 int srcW,
251 int srcH, 231 int srcH,
252 size_t bpp) { 232 size_t bpp) {
253 SkFixed dx = (srcW << 16) / dstW; 233 SkFixed dx = (srcW << 16) / dstW;
254 SkFixed dy = (srcH << 16) / dstH; 234 SkFixed dy = (srcH << 16) / dstH;
255 235
256 SkFixed y = dy >> 1; 236 SkFixed y = dy >> 1;
257 237
258 size_t dstXLimit = dstW*bpp; 238 size_t dstXLimit = dstW*bpp;
259 for (int j = 0; j < dstH; ++j) { 239 for (int j = 0; j < dstH; ++j) {
260 SkFixed x = dx >> 1; 240 SkFixed x = dx >> 1;
261 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16) *srcW*bpp; 241 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16) *srcW*bpp;
262 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp; 242 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
263 for (size_t i = 0; i < dstXLimit; i += bpp) { 243 for (size_t i = 0; i < dstXLimit; i += bpp) {
264 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp); 244 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
265 x += dx; 245 x += dx;
266 } 246 }
267 y += dy; 247 y += dy;
268 } 248 }
269 } 249 }
270 250
251 enum ResizeFlags {
252 /**
253 * The kStretchToPOT bit is set when the texture is NPOT and is being repeat ed or mipped but the
254 * hardware doesn't support that feature.
255 */
256 kStretchToPOT_ResizeFlag = 0x1,
257 /**
258 * The kBilerp bit can only be set when the kStretchToPOT flag is set and in dicates whether the
259 * stretched texture should be bilerped.
260 */
261 kBilerp_ResizeFlag = 0x2,
262 };
263
264 static uint32_t get_texture_flags(const GrGpu* gpu,
265 const GrTextureParams* params,
266 const GrSurfaceDesc& desc) {
267 uint32_t flags = 0;
268 bool tiled = params && params->isTiled();
269 if (tiled && !gpu->caps()->npotTextureTileSupport()) {
270 if (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight)) {
271 flags |= kStretchToPOT_ResizeFlag;
272 switch(params->filterMode()) {
273 case GrTextureParams::kNone_FilterMode:
274 break;
275 case GrTextureParams::kBilerp_FilterMode:
276 case GrTextureParams::kMipMap_FilterMode:
277 flags |= kBilerp_ResizeFlag;
278 break;
279 }
280 }
281 }
282 return flags;
283 }
271 // The desired texture is NPOT and tiled but that isn't supported by 284 // The desired texture is NPOT and tiled but that isn't supported by
272 // the current hardware. Resize the texture to be a POT 285 // the current hardware. Resize the texture to be a POT
273 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc, 286 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
274 const GrCacheID& cacheID, 287 const GrContentKey& origKey,
275 const void* srcData, 288 const void* srcData,
276 size_t rowBytes, 289 size_t rowBytes,
277 bool filter) { 290 bool filter) {
278 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID , NULL)); 291 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, origKey , NULL));
279 if (NULL == clampedTexture) { 292 if (NULL == clampedTexture) {
280 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, r owBytes)); 293 clampedTexture.reset(this->createTexture(NULL, desc, origKey, srcData, r owBytes));
281 294
282 if (NULL == clampedTexture) { 295 if (NULL == clampedTexture) {
283 return NULL; 296 return NULL;
284 } 297 }
298 clampedTexture->cacheAccess().setContentKey(origKey);
285 } 299 }
286 300
287 GrSurfaceDesc rtDesc = desc; 301 GrSurfaceDesc rtDesc = desc;
288 rtDesc.fFlags = rtDesc.fFlags | 302 rtDesc.fFlags = rtDesc.fFlags |
289 kRenderTarget_GrSurfaceFlag | 303 kRenderTarget_GrSurfaceFlag |
290 kNoStencil_GrSurfaceFlag; 304 kNoStencil_GrSurfaceFlag;
291 rtDesc.fWidth = GrNextPow2(desc.fWidth); 305 rtDesc.fWidth = GrNextPow2(desc.fWidth);
292 rtDesc.fHeight = GrNextPow2(desc.fHeight); 306 rtDesc.fHeight = GrNextPow2(desc.fHeight);
293 307
294 GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0); 308 GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0);
(...skipping 16 matching lines...) Expand all
311 GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE)); 325 GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE));
312 326
313 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStrid e(), 0); 327 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStrid e(), 0);
314 SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint)); 328 SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint));
315 329
316 if (arg.succeeded()) { 330 if (arg.succeeded()) {
317 SkPoint* verts = (SkPoint*) arg.vertices(); 331 SkPoint* verts = (SkPoint*) arg.vertices();
318 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint)); 332 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
319 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint)); 333 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
320 fDrawBuffer->drawNonIndexed(&pipelineBuilder, gp, kTriangleFan_GrPri mitiveType, 0, 4); 334 fDrawBuffer->drawNonIndexed(&pipelineBuilder, gp, kTriangleFan_GrPri mitiveType, 0, 4);
335 } else {
336 texture->unref();
337 texture = NULL;
321 } 338 }
322 } else { 339 } else {
323 // TODO: Our CPU stretch doesn't filter. But we create separate 340 // TODO: Our CPU stretch doesn't filter. But we create separate
324 // stretched textures when the texture params is either filtered or 341 // stretched textures when the texture params is either filtered or
325 // not. Either implement filtered stretch blit on CPU or just create 342 // not. Either implement filtered stretch blit on CPU or just create
326 // one when FBO case fails. 343 // one when FBO case fails.
327 344
328 rtDesc.fFlags = kNone_GrSurfaceFlags; 345 rtDesc.fFlags = kNone_GrSurfaceFlags;
329 // no longer need to clamp at min RT size. 346 // no longer need to clamp at min RT size.
330 rtDesc.fWidth = GrNextPow2(desc.fWidth); 347 rtDesc.fWidth = GrNextPow2(desc.fWidth);
331 rtDesc.fHeight = GrNextPow2(desc.fHeight); 348 rtDesc.fHeight = GrNextPow2(desc.fHeight);
332 349
333 // We shouldn't be resizing a compressed texture. 350 // We shouldn't be resizing a compressed texture.
334 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig)); 351 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
335 352
336 size_t bpp = GrBytesPerPixel(desc.fConfig); 353 size_t bpp = GrBytesPerPixel(desc.fConfig);
337 GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHe ight); 354 GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHe ight);
338 stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 355 stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
339 srcData, desc.fWidth, desc.fHeight, bpp); 356 srcData, desc.fWidth, desc.fHeight, bpp);
340 357
341 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 358 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
342 359
343 texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stret chedRowBytes); 360 texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stret chedRowBytes);
344 SkASSERT(texture); 361 SkASSERT(texture);
345 } 362 }
346 363
347 return texture; 364 return texture;
348 } 365 }
349 366
367 static GrContentKey::Domain ResizeDomain() {
368 static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain();
369 return kDomain;
370 }
371
350 GrTexture* GrContext::createTexture(const GrTextureParams* params, 372 GrTexture* GrContext::createTexture(const GrTextureParams* params,
351 const GrSurfaceDesc& desc, 373 const GrSurfaceDesc& desc,
352 const GrCacheID& cacheID, 374 const GrContentKey& origKey,
353 const void* srcData, 375 const void* srcData,
354 size_t rowBytes, 376 size_t rowBytes,
355 GrResourceKey* cacheKey) { 377 GrContentKey* outKey) {
356 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca cheID); 378 GrTexture* texture;
379 uint32_t flags = get_texture_flags(fGpu, params, desc);
380 SkTCopyOnFirstWrite<GrContentKey> key(origKey);
381 if (flags) {
382 // We don't have a code path to resize compressed textures.
383 if (GrPixelConfigIsCompressed(desc.fConfig)) {
384 return NULL;
385 }
386 texture = this->createResizedTexture(desc, origKey, srcData, rowBytes,
387 SkToBool(flags & kBilerp_ResizeFlag ));
357 388
358 GrTexture* texture; 389 GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1 );
359 if (GrTexturePriv::NeedsResizing(resourceKey)) { 390 builder[0] = flags;
360 // We do not know how to resize compressed textures.
361 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
362 391
363 texture = this->createResizedTexture(desc, cacheID,
364 srcData, rowBytes,
365 GrTexturePriv::NeedsBilerp(resource Key));
366 } else { 392 } else {
367 texture = fGpu->createTexture(desc, true, srcData, rowBytes); 393 texture = fGpu->createTexture(desc, true, srcData, rowBytes);
368 } 394 }
369 395
370 if (texture) { 396 if (texture) {
371 if (texture->cacheAccess().setContentKey(resourceKey)) { 397 if (texture->cacheAccess().setContentKey(*key)) {
372 if (cacheKey) { 398 if (outKey) {
373 *cacheKey = resourceKey; 399 *outKey = *key;
374 } 400 }
375 } else { 401 } else {
376 texture->unref(); 402 texture->unref();
377 texture = NULL; 403 texture = NULL;
378 } 404 }
379 } 405 }
380 406
381 return texture; 407 return texture;
382 } 408 }
383 409
410 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
411 const GrContentKey& origKey,
412 const GrTextureParams* params) {
413 uint32_t flags = get_texture_flags(fGpu, params, desc);
414 SkTCopyOnFirstWrite<GrContentKey> key(origKey);
415 if (flags) {
416 GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1 );
417 builder[0] = flags;
418 }
419
420 GrGpuResource* resource = this->findAndRefCachedResource(*key);
421 if (resource) {
422 SkASSERT(static_cast<GrSurface*>(resource)->asTexture());
423 return static_cast<GrSurface*>(resource)->asTexture();
424 }
425 return NULL;
426 }
427
428 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
429 const GrContentKey& origKey,
430 const GrTextureParams* params) const {
431 uint32_t flags = get_texture_flags(fGpu, params, desc);
432 SkTCopyOnFirstWrite<GrContentKey> key(origKey);
433 if (flags) {
434 GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1 );
435 builder[0] = flags;
436 }
437
438 return fResourceCache2->hasContentKey(*key);
439 }
440
384 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexM atch match, 441 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexM atch match,
385 bool calledDuringFlush) { 442 bool calledDuringFlush) {
386 // kNoStencil has no meaning if kRT isn't set. 443 // kNoStencil has no meaning if kRT isn't set.
387 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || 444 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
388 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag)); 445 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
389 446
390 // Make sure caller has checked for renderability if kRT is set. 447 // Make sure caller has checked for renderability if kRT is set.
391 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || 448 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
392 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0)); 449 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
393 450
(...skipping 1302 matching lines...) Expand 10 before | Expand all | Expand 10 after
1696 } 1753 }
1697 GrConfigConversionEffect::PMConversion upmToPM = 1754 GrConfigConversionEffect::PMConversion upmToPM =
1698 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1755 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1699 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1756 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1700 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, mat rix); 1757 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, mat rix);
1701 } else { 1758 } else {
1702 return NULL; 1759 return NULL;
1703 } 1760 }
1704 } 1761 }
1705 1762
1706 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResour ce* resource) { 1763 void GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resou rce) {
1707 resource->cacheAccess().setContentKey(resourceKey); 1764 resource->cacheAccess().setContentKey(key);
1708 } 1765 }
1709 1766
1710 GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resource Key) { 1767 GrGpuResource* GrContext::findAndRefCachedResource(const GrContentKey& key) {
1711 return fResourceCache2->findAndRefContentResource(resourceKey); 1768 return fResourceCache2->findAndRefContentResource(key);
1712 } 1769 }
1713 1770
1714 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { 1771 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1715 fGpu->addGpuTraceMarker(marker); 1772 fGpu->addGpuTraceMarker(marker);
1716 if (fDrawBuffer) { 1773 if (fDrawBuffer) {
1717 fDrawBuffer->addGpuTraceMarker(marker); 1774 fDrawBuffer->addGpuTraceMarker(marker);
1718 } 1775 }
1719 } 1776 }
1720 1777
1721 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { 1778 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1722 fGpu->removeGpuTraceMarker(marker); 1779 fGpu->removeGpuTraceMarker(marker);
1723 if (fDrawBuffer) { 1780 if (fDrawBuffer) {
1724 fDrawBuffer->removeGpuTraceMarker(marker); 1781 fDrawBuffer->removeGpuTraceMarker(marker);
1725 } 1782 }
1726 } 1783 }
1727 1784
1728 /////////////////////////////////////////////////////////////////////////////// 1785 ///////////////////////////////////////////////////////////////////////////////
1729 #if GR_CACHE_STATS 1786 #if GR_CACHE_STATS
1730 void GrContext::printCacheStats() const { 1787 void GrContext::printCacheStats() const {
1731 fResourceCache2->printStats(); 1788 fResourceCache2->printStats();
1732 } 1789 }
1733 #endif 1790 #endif
1734 1791
1735 #if GR_GPU_STATS 1792 #if GR_GPU_STATS
1736 const GrContext::GPUStats* GrContext::gpuStats() const { 1793 const GrContext::GPUStats* GrContext::gpuStats() const {
1737 return fGpu->gpuStats(); 1794 return fGpu->gpuStats();
1738 } 1795 }
1739 #endif 1796 #endif
1740 1797
OLDNEW
« no previous file with comments | « src/gpu/GrCacheID.cpp ('k') | src/gpu/GrGpuResource.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698