| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkGpuDevice.h" | 8 #include "SkGpuDevice.h" |
| 9 | 9 |
| 10 #include "effects/GrBicubicEffect.h" | 10 #include "effects/GrBicubicEffect.h" |
| (...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 600 return false; | 600 return false; |
| 601 } | 601 } |
| 602 | 602 |
| 603 // we now have a device-aligned 8bit mask in dstM, ready to be drawn using | 603 // we now have a device-aligned 8bit mask in dstM, ready to be drawn using |
| 604 // the current clip (and identity matrix) and GrPaint settings | 604 // the current clip (and identity matrix) and GrPaint settings |
| 605 GrTextureDesc desc; | 605 GrTextureDesc desc; |
| 606 desc.fWidth = dstM.fBounds.width(); | 606 desc.fWidth = dstM.fBounds.width(); |
| 607 desc.fHeight = dstM.fBounds.height(); | 607 desc.fHeight = dstM.fBounds.height(); |
| 608 desc.fConfig = kAlpha_8_GrPixelConfig; | 608 desc.fConfig = kAlpha_8_GrPixelConfig; |
| 609 | 609 |
| 610 GrAutoScratchTexture ast(context, desc); | 610 SkAutoTUnref<GrTexture> texture( |
| 611 GrTexture* texture = ast.texture(); | 611 context->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch)); |
| 612 | 612 if (!texture) { |
| 613 if (NULL == texture) { | |
| 614 return false; | 613 return false; |
| 615 } | 614 } |
| 616 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, | 615 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, |
| 617 dstM.fImage, dstM.fRowBytes); | 616 dstM.fImage, dstM.fRowBytes); |
| 618 | 617 |
| 619 SkRect maskRect = SkRect::Make(dstM.fBounds); | 618 SkRect maskRect = SkRect::Make(dstM.fBounds); |
| 620 | 619 |
| 621 return draw_mask(context, maskRect, grp, texture); | 620 return draw_mask(context, maskRect, grp, texture); |
| 622 } | 621 } |
| 623 | 622 |
| 624 // Create a mask of 'devPath' and place the result in 'mask'. Return true on | 623 // Create a mask of 'devPath' and place the result in 'mask'. |
| 625 // success; false otherwise. | 624 GrTexture* create_mask_GPU(GrContext* context, |
| 626 bool create_mask_GPU(GrContext* context, | 625 const SkRect& maskRect, |
| 627 const SkRect& maskRect, | 626 const SkPath& devPath, |
| 628 const SkPath& devPath, | 627 const GrStrokeInfo& strokeInfo, |
| 629 const GrStrokeInfo& strokeInfo, | 628 bool doAA, |
| 630 bool doAA, | 629 int sampleCnt) { |
| 631 GrAutoScratchTexture* mask, | |
| 632 int SampleCnt) { | |
| 633 GrTextureDesc desc; | 630 GrTextureDesc desc; |
| 634 desc.fFlags = kRenderTarget_GrTextureFlagBit; | 631 desc.fFlags = kRenderTarget_GrTextureFlagBit; |
| 635 desc.fWidth = SkScalarCeilToInt(maskRect.width()); | 632 desc.fWidth = SkScalarCeilToInt(maskRect.width()); |
| 636 desc.fHeight = SkScalarCeilToInt(maskRect.height()); | 633 desc.fHeight = SkScalarCeilToInt(maskRect.height()); |
| 637 desc.fSampleCnt = doAA ? SampleCnt : 0; | 634 desc.fSampleCnt = doAA ? sampleCnt : 0; |
| 638 // We actually only need A8, but it often isn't supported as a | 635 // We actually only need A8, but it often isn't supported as a |
| 639 // render target so default to RGBA_8888 | 636 // render target so default to RGBA_8888 |
| 640 desc.fConfig = kRGBA_8888_GrPixelConfig; | 637 desc.fConfig = kRGBA_8888_GrPixelConfig; |
| 641 | 638 |
| 642 if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, | 639 if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, |
| 643 desc.fSampleCnt > 0)) { | 640 desc.fSampleCnt > 0)) { |
| 644 desc.fConfig = kAlpha_8_GrPixelConfig; | 641 desc.fConfig = kAlpha_8_GrPixelConfig; |
| 645 } | 642 } |
| 646 | 643 |
| 647 mask->set(context, desc); | 644 GrTexture* mask = context->refScratchTexture(desc,GrContext::kApprox_Scratch
TexMatch); |
| 648 if (NULL == mask->texture()) { | 645 if (NULL == mask) { |
| 649 return false; | 646 return NULL; |
| 650 } | 647 } |
| 651 | 648 |
| 652 GrTexture* maskTexture = mask->texture(); | |
| 653 SkRect clipRect = SkRect::MakeWH(maskRect.width(), maskRect.height()); | 649 SkRect clipRect = SkRect::MakeWH(maskRect.width(), maskRect.height()); |
| 654 | 650 |
| 655 GrContext::AutoRenderTarget art(context, maskTexture->asRenderTarget()); | 651 GrContext::AutoRenderTarget art(context, mask->asRenderTarget()); |
| 656 GrContext::AutoClip ac(context, clipRect); | 652 GrContext::AutoClip ac(context, clipRect); |
| 657 | 653 |
| 658 context->clear(NULL, 0x0, true); | 654 context->clear(NULL, 0x0, true); |
| 659 | 655 |
| 660 GrPaint tempPaint; | 656 GrPaint tempPaint; |
| 661 if (doAA) { | 657 if (doAA) { |
| 662 tempPaint.setAntiAlias(true); | 658 tempPaint.setAntiAlias(true); |
| 663 // AA uses the "coverage" stages on GrDrawTarget. Coverage with a dst | 659 // AA uses the "coverage" stages on GrDrawTarget. Coverage with a dst |
| 664 // blend coeff of zero requires dual source blending support in order | 660 // blend coeff of zero requires dual source blending support in order |
| 665 // to properly blend partially covered pixels. This means the AA | 661 // to properly blend partially covered pixels. This means the AA |
| 666 // code path may not be taken. So we use a dst blend coeff of ISA. We | 662 // code path may not be taken. So we use a dst blend coeff of ISA. We |
| 667 // could special case AA draws to a dst surface with known alpha=0 to | 663 // could special case AA draws to a dst surface with known alpha=0 to |
| 668 // use a zero dst coeff when dual source blending isn't available. | 664 // use a zero dst coeff when dual source blending isn't available. |
| 669 tempPaint.setBlendFunc(kOne_GrBlendCoeff, kISC_GrBlendCoeff); | 665 tempPaint.setBlendFunc(kOne_GrBlendCoeff, kISC_GrBlendCoeff); |
| 670 } | 666 } |
| 671 | 667 |
| 672 GrContext::AutoMatrix am; | 668 GrContext::AutoMatrix am; |
| 673 | 669 |
| 674 // Draw the mask into maskTexture with the path's top-left at the origin usi
ng tempPaint. | 670 // Draw the mask into maskTexture with the path's top-left at the origin usi
ng tempPaint. |
| 675 SkMatrix translate; | 671 SkMatrix translate; |
| 676 translate.setTranslate(-maskRect.fLeft, -maskRect.fTop); | 672 translate.setTranslate(-maskRect.fLeft, -maskRect.fTop); |
| 677 am.set(context, translate); | 673 am.set(context, translate); |
| 678 context->drawPath(tempPaint, devPath, strokeInfo); | 674 context->drawPath(tempPaint, devPath, strokeInfo); |
| 679 return true; | 675 return mask; |
| 680 } | 676 } |
| 681 | 677 |
| 682 SkBitmap wrap_texture(GrTexture* texture) { | 678 SkBitmap wrap_texture(GrTexture* texture) { |
| 683 SkBitmap result; | 679 SkBitmap result; |
| 684 result.setInfo(texture->surfacePriv().info()); | 680 result.setInfo(texture->surfacePriv().info()); |
| 685 result.setPixelRef(SkNEW_ARGS(SkGrPixelRef, (result.info(), texture)))->unre
f(); | 681 result.setPixelRef(SkNEW_ARGS(SkGrPixelRef, (result.info(), texture)))->unre
f(); |
| 686 return result; | 682 return result; |
| 687 } | 683 } |
| 688 | 684 |
| 689 }; | 685 }; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 764 return; | 760 return; |
| 765 } | 761 } |
| 766 | 762 |
| 767 if (paint.getMaskFilter()->directFilterMaskGPU(fContext, &grPaint, | 763 if (paint.getMaskFilter()->directFilterMaskGPU(fContext, &grPaint, |
| 768 stroke, *devPathPtr))
{ | 764 stroke, *devPathPtr))
{ |
| 769 // the mask filter was able to draw itself directly, so there's
nothing | 765 // the mask filter was able to draw itself directly, so there's
nothing |
| 770 // left to do. | 766 // left to do. |
| 771 return; | 767 return; |
| 772 } | 768 } |
| 773 | 769 |
| 774 GrAutoScratchTexture mask; | |
| 775 | 770 |
| 776 if (create_mask_GPU(fContext, maskRect, *devPathPtr, strokeInfo, | 771 SkAutoTUnref<GrTexture> mask(create_mask_GPU(fContext, maskRect, *de
vPathPtr, |
| 777 grPaint.isAntiAlias(), &mask, fRenderTarget->num
Samples())) { | 772 strokeInfo, grPaint.isA
ntiAlias(), |
| 773 fRenderTarget->numSampl
es())); |
| 774 if (mask) { |
| 778 GrTexture* filtered; | 775 GrTexture* filtered; |
| 779 | 776 |
| 780 if (paint.getMaskFilter()->filterMaskGPU(mask.texture(), | 777 if (paint.getMaskFilter()->filterMaskGPU(mask, ctm, maskRect, &f
iltered, true)) { |
| 781 ctm, maskRect, &filtere
d, true)) { | |
| 782 // filterMaskGPU gives us ownership of a ref to the result | 778 // filterMaskGPU gives us ownership of a ref to the result |
| 783 SkAutoTUnref<GrTexture> atu(filtered); | 779 SkAutoTUnref<GrTexture> atu(filtered); |
| 784 | |
| 785 // If the scratch texture that we used as the filter src als
o holds the filter | |
| 786 // result then we must detach so that this texture isn't rec
ycled for a later | |
| 787 // draw. | |
| 788 if (filtered == mask.texture()) { | |
| 789 mask.detach(); | |
| 790 filtered->unref(); // detach transfers GrAutoScratchText
ure's ref to us. | |
| 791 } | |
| 792 | |
| 793 if (draw_mask(fContext, maskRect, &grPaint, filtered)) { | 780 if (draw_mask(fContext, maskRect, &grPaint, filtered)) { |
| 794 // This path is completely drawn | 781 // This path is completely drawn |
| 795 return; | 782 return; |
| 796 } | 783 } |
| 797 } | 784 } |
| 798 } | 785 } |
| 799 } | 786 } |
| 800 | 787 |
| 801 // draw the mask on the CPU - this is a fallthrough path in case the | 788 // draw the mask on the CPU - this is a fallthrough path in case the |
| 802 // GPU path fails | 789 // GPU path fails |
| (...skipping 961 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1764 SkAutoTUnref<GrTexture> texture; | 1751 SkAutoTUnref<GrTexture> texture; |
| 1765 // Skia's convention is to only clear a device if it is non-opaque. | 1752 // Skia's convention is to only clear a device if it is non-opaque. |
| 1766 unsigned flags = info.isOpaque() ? 0 : kNeedClear_Flag; | 1753 unsigned flags = info.isOpaque() ? 0 : kNeedClear_Flag; |
| 1767 | 1754 |
| 1768 #if CACHE_COMPATIBLE_DEVICE_TEXTURES | 1755 #if CACHE_COMPATIBLE_DEVICE_TEXTURES |
| 1769 // layers are never draw in repeat modes, so we can request an approx | 1756 // layers are never draw in repeat modes, so we can request an approx |
| 1770 // match and ignore any padding. | 1757 // match and ignore any padding. |
| 1771 const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? | 1758 const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? |
| 1772 GrContext::kApprox_ScratchTexMat
ch : | 1759 GrContext::kApprox_ScratchTexMat
ch : |
| 1773 GrContext::kExact_ScratchTexMatc
h; | 1760 GrContext::kExact_ScratchTexMatc
h; |
| 1774 texture.reset(fContext->lockAndRefScratchTexture(desc, match)); | 1761 texture.reset(fContext->refScratchTexture(desc, match)); |
| 1775 #else | 1762 #else |
| 1776 texture.reset(fContext->createUncachedTexture(desc, NULL, 0)); | 1763 texture.reset(fContext->createUncachedTexture(desc, NULL, 0)); |
| 1777 #endif | 1764 #endif |
| 1778 if (texture.get()) { | 1765 if (texture.get()) { |
| 1779 return SkGpuDevice::Create(texture, SkSurfaceProps(SkSurfaceProps::kLega
cyFontHost_InitType), flags); | 1766 return SkGpuDevice::Create(texture, SkSurfaceProps(SkSurfaceProps::kLega
cyFontHost_InitType), flags); |
| 1780 } else { | 1767 } else { |
| 1781 GrPrintf("---- failed to create compatible device texture [%d %d]\n", | 1768 GrPrintf("---- failed to create compatible device texture [%d %d]\n", |
| 1782 info.width(), info.height()); | 1769 info.width(), info.height()); |
| 1783 return NULL; | 1770 return NULL; |
| 1784 } | 1771 } |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1838 GrLayerHoister::UnlockLayers(fContext, atlased, nonAtlased, recycled); | 1825 GrLayerHoister::UnlockLayers(fContext, atlased, nonAtlased, recycled); |
| 1839 | 1826 |
| 1840 return true; | 1827 return true; |
| 1841 } | 1828 } |
| 1842 | 1829 |
| 1843 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { | 1830 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { |
| 1844 // We always return a transient cache, so it is freed after each | 1831 // We always return a transient cache, so it is freed after each |
| 1845 // filter traversal. | 1832 // filter traversal. |
| 1846 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); | 1833 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); |
| 1847 } | 1834 } |
| OLD | NEW |