OLD | NEW |
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2011 Google Inc. | 3 * Copyright 2011 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 #include "GrContext.h" | 9 #include "GrContext.h" |
10 | 10 |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
223 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer()
&& | 223 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer()
&& |
224 renderTarget->isMultisampled())
{ | 224 renderTarget->isMultisampled())
{ |
225 return GrStencilAndCoverTextContext::Create(this, leakyProperties); | 225 return GrStencilAndCoverTextContext::Create(this, leakyProperties); |
226 } | 226 } |
227 | 227 |
228 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDista
nceFieldFonts); | 228 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDista
nceFieldFonts); |
229 } | 229 } |
230 | 230 |
231 //////////////////////////////////////////////////////////////////////////////// | 231 //////////////////////////////////////////////////////////////////////////////// |
232 | 232 |
233 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc, | |
234 const GrCacheID& cacheID, | |
235 const GrTextureParams* params) { | |
236 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca
cheID); | |
237 | |
238 GrGpuResource* resource = this->findAndRefCachedResource(resourceKey); | |
239 if (resource) { | |
240 SkASSERT(static_cast<GrSurface*>(resource)->asTexture()); | |
241 return static_cast<GrSurface*>(resource)->asTexture(); | |
242 } | |
243 return NULL; | |
244 } | |
245 | |
246 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc, | |
247 const GrCacheID& cacheID, | |
248 const GrTextureParams* params) const { | |
249 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca
cheID); | |
250 return fResourceCache2->hasContentKey(resourceKey); | |
251 } | |
252 | |
253 static void stretch_image(void* dst, | 233 static void stretch_image(void* dst, |
254 int dstW, | 234 int dstW, |
255 int dstH, | 235 int dstH, |
256 const void* src, | 236 const void* src, |
257 int srcW, | 237 int srcW, |
258 int srcH, | 238 int srcH, |
259 size_t bpp) { | 239 size_t bpp) { |
260 SkFixed dx = (srcW << 16) / dstW; | 240 SkFixed dx = (srcW << 16) / dstW; |
261 SkFixed dy = (srcH << 16) / dstH; | 241 SkFixed dy = (srcH << 16) / dstH; |
262 | 242 |
263 SkFixed y = dy >> 1; | 243 SkFixed y = dy >> 1; |
264 | 244 |
265 size_t dstXLimit = dstW*bpp; | 245 size_t dstXLimit = dstW*bpp; |
266 for (int j = 0; j < dstH; ++j) { | 246 for (int j = 0; j < dstH; ++j) { |
267 SkFixed x = dx >> 1; | 247 SkFixed x = dx >> 1; |
268 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)
*srcW*bpp; | 248 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)
*srcW*bpp; |
269 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp; | 249 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp; |
270 for (size_t i = 0; i < dstXLimit; i += bpp) { | 250 for (size_t i = 0; i < dstXLimit; i += bpp) { |
271 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp); | 251 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp); |
272 x += dx; | 252 x += dx; |
273 } | 253 } |
274 y += dy; | 254 y += dy; |
275 } | 255 } |
276 } | 256 } |
277 | 257 |
| 258 enum ResizeFlags { |
| 259 /** |
| 260 * The kStretchToPOT bit is set when the texture is NPOT and is being repeat
ed or mipped but the |
| 261 * hardware doesn't support that feature. |
| 262 */ |
| 263 kStretchToPOT_ResizeFlag = 0x1, |
| 264 /** |
| 265 * The kBilerp bit can only be set when the kStretchToPOT flag is set and in
dicates whether the |
| 266 * stretched texture should be bilerped. |
| 267 */ |
| 268 kBilerp_ResizeFlag = 0x2, |
| 269 }; |
| 270 |
| 271 static uint32_t get_texture_flags(const GrGpu* gpu, |
| 272 const GrTextureParams* params, |
| 273 const GrSurfaceDesc& desc) { |
| 274 uint32_t flags = 0; |
| 275 bool tiled = params && params->isTiled(); |
| 276 if (tiled && !gpu->caps()->npotTextureTileSupport()) { |
| 277 if (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight)) { |
| 278 flags |= kStretchToPOT_ResizeFlag; |
| 279 switch(params->filterMode()) { |
| 280 case GrTextureParams::kNone_FilterMode: |
| 281 break; |
| 282 case GrTextureParams::kBilerp_FilterMode: |
| 283 case GrTextureParams::kMipMap_FilterMode: |
| 284 flags |= kBilerp_ResizeFlag; |
| 285 break; |
| 286 } |
| 287 } |
| 288 } |
| 289 return flags; |
| 290 } |
278 // The desired texture is NPOT and tiled but that isn't supported by | 291 // The desired texture is NPOT and tiled but that isn't supported by |
279 // the current hardware. Resize the texture to be a POT | 292 // the current hardware. Resize the texture to be a POT |
280 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc, | 293 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc, |
281 const GrCacheID& cacheID, | 294 const GrContentKey& origKey, |
282 const void* srcData, | 295 const void* srcData, |
283 size_t rowBytes, | 296 size_t rowBytes, |
284 bool filter) { | 297 bool filter) { |
285 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID
, NULL)); | 298 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, origKey
, NULL)); |
286 if (NULL == clampedTexture) { | 299 if (NULL == clampedTexture) { |
287 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, r
owBytes)); | 300 clampedTexture.reset(this->createTexture(NULL, desc, origKey, srcData, r
owBytes)); |
288 | 301 |
289 if (NULL == clampedTexture) { | 302 if (NULL == clampedTexture) { |
290 return NULL; | 303 return NULL; |
291 } | 304 } |
| 305 clampedTexture->cacheAccess().setContentKey(origKey); |
292 } | 306 } |
293 | 307 |
294 GrSurfaceDesc rtDesc = desc; | 308 GrSurfaceDesc rtDesc = desc; |
295 rtDesc.fFlags = rtDesc.fFlags | | 309 rtDesc.fFlags = rtDesc.fFlags | |
296 kRenderTarget_GrSurfaceFlag | | 310 kRenderTarget_GrSurfaceFlag | |
297 kNoStencil_GrSurfaceFlag; | 311 kNoStencil_GrSurfaceFlag; |
298 rtDesc.fWidth = GrNextPow2(desc.fWidth); | 312 rtDesc.fWidth = GrNextPow2(desc.fWidth); |
299 rtDesc.fHeight = GrNextPow2(desc.fHeight); | 313 rtDesc.fHeight = GrNextPow2(desc.fHeight); |
300 | 314 |
301 GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0); | 315 GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0); |
(...skipping 16 matching lines...) Expand all Loading... |
318 GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE)); | 332 GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE)); |
319 | 333 |
320 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStrid
e(), 0); | 334 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStrid
e(), 0); |
321 SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint)); | 335 SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint)); |
322 | 336 |
323 if (arg.succeeded()) { | 337 if (arg.succeeded()) { |
324 SkPoint* verts = (SkPoint*) arg.vertices(); | 338 SkPoint* verts = (SkPoint*) arg.vertices(); |
325 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 *
sizeof(SkPoint)); | 339 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 *
sizeof(SkPoint)); |
326 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint)); | 340 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint)); |
327 fDrawBuffer->drawNonIndexed(&drawState, gp, kTriangleFan_GrPrimitive
Type, 0, 4); | 341 fDrawBuffer->drawNonIndexed(&drawState, gp, kTriangleFan_GrPrimitive
Type, 0, 4); |
| 342 } else { |
| 343 texture->unref(); |
| 344 texture = NULL; |
328 } | 345 } |
329 } else { | 346 } else { |
330 // TODO: Our CPU stretch doesn't filter. But we create separate | 347 // TODO: Our CPU stretch doesn't filter. But we create separate |
331 // stretched textures when the texture params is either filtered or | 348 // stretched textures when the texture params is either filtered or |
332 // not. Either implement filtered stretch blit on CPU or just create | 349 // not. Either implement filtered stretch blit on CPU or just create |
333 // one when FBO case fails. | 350 // one when FBO case fails. |
334 | 351 |
335 rtDesc.fFlags = kNone_GrSurfaceFlags; | 352 rtDesc.fFlags = kNone_GrSurfaceFlags; |
336 // no longer need to clamp at min RT size. | 353 // no longer need to clamp at min RT size. |
337 rtDesc.fWidth = GrNextPow2(desc.fWidth); | 354 rtDesc.fWidth = GrNextPow2(desc.fWidth); |
(...skipping 11 matching lines...) Expand all Loading... |
349 | 366 |
350 texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stret
chedRowBytes); | 367 texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stret
chedRowBytes); |
351 SkASSERT(texture); | 368 SkASSERT(texture); |
352 } | 369 } |
353 | 370 |
354 return texture; | 371 return texture; |
355 } | 372 } |
356 | 373 |
357 GrTexture* GrContext::createTexture(const GrTextureParams* params, | 374 GrTexture* GrContext::createTexture(const GrTextureParams* params, |
358 const GrSurfaceDesc& desc, | 375 const GrSurfaceDesc& desc, |
359 const GrCacheID& cacheID, | 376 const GrContentKey& origKey, |
360 const void* srcData, | 377 const void* srcData, |
361 size_t rowBytes, | 378 size_t rowBytes, |
362 GrResourceKey* cacheKey) { | 379 GrContentKey* outKey) { |
363 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, ca
cheID); | 380 GrTexture* texture; |
| 381 uint32_t flags = get_texture_flags(fGpu, params, desc); |
| 382 SkTCopyOnFirstWrite<GrContentKey> key(origKey); |
| 383 if (flags) { |
| 384 // We don't have a code path to resize compressed textures. |
| 385 if (GrPixelConfigIsCompressed(desc.fConfig)) { |
| 386 return false; |
| 387 } |
| 388 texture = this->createResizedTexture(desc, origKey, srcData, rowBytes, |
| 389 SkToBool(flags & kBilerp_ResizeFlag
)); |
364 | 390 |
365 GrTexture* texture; | 391 static const GrContentKey::Domain kResizeDomain = GrContentKey::Generate
Domain(); |
366 if (GrTexturePriv::NeedsResizing(resourceKey)) { | 392 GrContentKey::Builder builder(key.writable(), origKey, kResizeDomain, 1)
; |
367 // We do not know how to resize compressed textures. | 393 builder[0] = flags; |
368 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig)); | |
369 | 394 |
370 texture = this->createResizedTexture(desc, cacheID, | |
371 srcData, rowBytes, | |
372 GrTexturePriv::NeedsBilerp(resource
Key)); | |
373 } else { | 395 } else { |
374 texture = fGpu->createTexture(desc, true, srcData, rowBytes); | 396 texture = fGpu->createTexture(desc, true, srcData, rowBytes); |
375 } | 397 } |
376 | 398 |
377 if (texture) { | 399 if (texture) { |
378 if (texture->cacheAccess().setContentKey(resourceKey)) { | 400 if (texture->cacheAccess().setContentKey(*key)) { |
379 if (cacheKey) { | 401 if (outKey) { |
380 *cacheKey = resourceKey; | 402 *outKey = *key; |
381 } | 403 } |
382 } else { | 404 } else { |
383 texture->unref(); | 405 texture->unref(); |
384 texture = NULL; | 406 texture = NULL; |
385 } | 407 } |
386 } | 408 } |
387 | 409 |
388 return texture; | 410 return texture; |
389 } | 411 } |
390 | 412 |
| 413 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc, |
| 414 const GrContentKey& origKey, |
| 415 const GrTextureParams* params) { |
| 416 uint32_t flags = get_texture_flags(fGpu, params, desc); |
| 417 SkTCopyOnFirstWrite<GrContentKey> key(origKey); |
| 418 if (flags) { |
| 419 static const GrContentKey::Domain kResizeDomain = GrContentKey::Generate
Domain(); |
| 420 GrContentKey::Builder builder(key.writable(), origKey, kResizeDomain, 1)
; |
| 421 builder[0] = flags; |
| 422 } |
| 423 |
| 424 GrGpuResource* resource = this->findAndRefCachedResource(*key); |
| 425 if (resource) { |
| 426 SkASSERT(static_cast<GrSurface*>(resource)->asTexture()); |
| 427 return static_cast<GrSurface*>(resource)->asTexture(); |
| 428 } |
| 429 return NULL; |
| 430 } |
| 431 |
| 432 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc, |
| 433 const GrContentKey& origKey, |
| 434 const GrTextureParams* params) const { |
| 435 uint32_t flags = get_texture_flags(fGpu, params, desc); |
| 436 SkTCopyOnFirstWrite<GrContentKey> key(origKey); |
| 437 if (flags) { |
| 438 static const GrContentKey::Domain kResizeDomain = GrContentKey::Generate
Domain(); |
| 439 GrContentKey::Builder builder(key.writable(), origKey, kResizeDomain, 1)
; |
| 440 builder[0] = flags; |
| 441 } |
| 442 |
| 443 return fResourceCache2->hasContentKey(*key); |
| 444 } |
| 445 |
391 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexM
atch match, | 446 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexM
atch match, |
392 bool calledDuringFlush) { | 447 bool calledDuringFlush) { |
393 // kNoStencil has no meaning if kRT isn't set. | 448 // kNoStencil has no meaning if kRT isn't set. |
394 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || | 449 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || |
395 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag)); | 450 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag)); |
396 | 451 |
397 // Make sure caller has checked for renderability if kRT is set. | 452 // Make sure caller has checked for renderability if kRT is set. |
398 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || | 453 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || |
399 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0)); | 454 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0)); |
400 | 455 |
(...skipping 1308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1709 } | 1764 } |
1710 GrConfigConversionEffect::PMConversion upmToPM = | 1765 GrConfigConversionEffect::PMConversion upmToPM = |
1711 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); | 1766 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); |
1712 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { | 1767 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { |
1713 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, mat
rix); | 1768 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, mat
rix); |
1714 } else { | 1769 } else { |
1715 return NULL; | 1770 return NULL; |
1716 } | 1771 } |
1717 } | 1772 } |
1718 | 1773 |
1719 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResour
ce* resource) { | 1774 void GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resou
rce) { |
1720 resource->cacheAccess().setContentKey(resourceKey); | 1775 resource->cacheAccess().setContentKey(key); |
1721 } | 1776 } |
1722 | 1777 |
1723 GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resource
Key) { | 1778 GrGpuResource* GrContext::findAndRefCachedResource(const GrContentKey& key) { |
1724 return fResourceCache2->findAndRefContentResource(resourceKey); | 1779 return fResourceCache2->findAndRefContentResource(key); |
1725 } | 1780 } |
1726 | 1781 |
1727 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { | 1782 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { |
1728 fGpu->addGpuTraceMarker(marker); | 1783 fGpu->addGpuTraceMarker(marker); |
1729 if (fDrawBuffer) { | 1784 if (fDrawBuffer) { |
1730 fDrawBuffer->addGpuTraceMarker(marker); | 1785 fDrawBuffer->addGpuTraceMarker(marker); |
1731 } | 1786 } |
1732 } | 1787 } |
1733 | 1788 |
1734 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { | 1789 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { |
1735 fGpu->removeGpuTraceMarker(marker); | 1790 fGpu->removeGpuTraceMarker(marker); |
1736 if (fDrawBuffer) { | 1791 if (fDrawBuffer) { |
1737 fDrawBuffer->removeGpuTraceMarker(marker); | 1792 fDrawBuffer->removeGpuTraceMarker(marker); |
1738 } | 1793 } |
1739 } | 1794 } |
1740 | 1795 |
1741 /////////////////////////////////////////////////////////////////////////////// | 1796 /////////////////////////////////////////////////////////////////////////////// |
1742 #if GR_CACHE_STATS | 1797 #if GR_CACHE_STATS |
1743 void GrContext::printCacheStats() const { | 1798 void GrContext::printCacheStats() const { |
1744 fResourceCache2->printStats(); | 1799 fResourceCache2->printStats(); |
1745 } | 1800 } |
1746 #endif | 1801 #endif |
1747 | 1802 |
1748 #if GR_GPU_STATS | 1803 #if GR_GPU_STATS |
1749 const GrContext::GPUStats* GrContext::gpuStats() const { | 1804 const GrContext::GPUStats* GrContext::gpuStats() const { |
1750 return fGpu->gpuStats(); | 1805 return fGpu->gpuStats(); |
1751 } | 1806 } |
1752 #endif | 1807 #endif |
1753 | 1808 |
OLD | NEW |