| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkGpuDevice.h" | 8 #include "SkGpuDevice.h" |
| 9 | 9 |
| 10 #include "effects/GrBicubicEffect.h" | 10 #include "effects/GrBicubicEffect.h" |
| (...skipping 591 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 602 return false; | 602 return false; |
| 603 } | 603 } |
| 604 | 604 |
| 605 // we now have a device-aligned 8bit mask in dstM, ready to be drawn using | 605 // we now have a device-aligned 8bit mask in dstM, ready to be drawn using |
| 606 // the current clip (and identity matrix) and GrPaint settings | 606 // the current clip (and identity matrix) and GrPaint settings |
| 607 GrTextureDesc desc; | 607 GrTextureDesc desc; |
| 608 desc.fWidth = dstM.fBounds.width(); | 608 desc.fWidth = dstM.fBounds.width(); |
| 609 desc.fHeight = dstM.fBounds.height(); | 609 desc.fHeight = dstM.fBounds.height(); |
| 610 desc.fConfig = kAlpha_8_GrPixelConfig; | 610 desc.fConfig = kAlpha_8_GrPixelConfig; |
| 611 | 611 |
| 612 GrAutoScratchTexture ast(context, desc); | 612 SkAutoTUnref<GrTexture> texture( |
| 613 GrTexture* texture = ast.texture(); | 613 context->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch)); |
| 614 | 614 if (!texture) { |
| 615 if (NULL == texture) { | |
| 616 return false; | 615 return false; |
| 617 } | 616 } |
| 618 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, | 617 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, |
| 619 dstM.fImage, dstM.fRowBytes); | 618 dstM.fImage, dstM.fRowBytes); |
| 620 | 619 |
| 621 SkRect maskRect = SkRect::Make(dstM.fBounds); | 620 SkRect maskRect = SkRect::Make(dstM.fBounds); |
| 622 | 621 |
| 623 return draw_mask(context, maskRect, grp, texture); | 622 return draw_mask(context, maskRect, grp, texture); |
| 624 } | 623 } |
| 625 | 624 |
| 626 // Create a mask of 'devPath' and place the result in 'mask'. Return true on | 625 // Create a mask of 'devPath' and place the result in 'mask'. |
| 627 // success; false otherwise. | 626 GrTexture* create_mask_GPU(GrContext* context, |
| 628 bool create_mask_GPU(GrContext* context, | 627 const SkRect& maskRect, |
| 629 const SkRect& maskRect, | 628 const SkPath& devPath, |
| 630 const SkPath& devPath, | 629 const GrStrokeInfo& strokeInfo, |
| 631 const GrStrokeInfo& strokeInfo, | 630 bool doAA, |
| 632 bool doAA, | 631 int sampleCnt) { |
| 633 GrAutoScratchTexture* mask, | |
| 634 int SampleCnt) { | |
| 635 GrTextureDesc desc; | 632 GrTextureDesc desc; |
| 636 desc.fFlags = kRenderTarget_GrTextureFlagBit; | 633 desc.fFlags = kRenderTarget_GrTextureFlagBit; |
| 637 desc.fWidth = SkScalarCeilToInt(maskRect.width()); | 634 desc.fWidth = SkScalarCeilToInt(maskRect.width()); |
| 638 desc.fHeight = SkScalarCeilToInt(maskRect.height()); | 635 desc.fHeight = SkScalarCeilToInt(maskRect.height()); |
| 639 desc.fSampleCnt = doAA ? SampleCnt : 0; | 636 desc.fSampleCnt = doAA ? sampleCnt : 0; |
| 640 // We actually only need A8, but it often isn't supported as a | 637 // We actually only need A8, but it often isn't supported as a |
| 641 // render target so default to RGBA_8888 | 638 // render target so default to RGBA_8888 |
| 642 desc.fConfig = kRGBA_8888_GrPixelConfig; | 639 desc.fConfig = kRGBA_8888_GrPixelConfig; |
| 643 | 640 |
| 644 if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, | 641 if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, |
| 645 desc.fSampleCnt > 0)) { | 642 desc.fSampleCnt > 0)) { |
| 646 desc.fConfig = kAlpha_8_GrPixelConfig; | 643 desc.fConfig = kAlpha_8_GrPixelConfig; |
| 647 } | 644 } |
| 648 | 645 |
| 649 mask->set(context, desc); | 646 GrTexture* mask = context->refScratchTexture(desc,GrContext::kApprox_Scratch
TexMatch); |
| 650 if (NULL == mask->texture()) { | 647 if (NULL == mask) { |
| 651 return false; | 648 return NULL; |
| 652 } | 649 } |
| 653 | 650 |
| 654 GrTexture* maskTexture = mask->texture(); | |
| 655 SkRect clipRect = SkRect::MakeWH(maskRect.width(), maskRect.height()); | 651 SkRect clipRect = SkRect::MakeWH(maskRect.width(), maskRect.height()); |
| 656 | 652 |
| 657 GrContext::AutoRenderTarget art(context, maskTexture->asRenderTarget()); | 653 GrContext::AutoRenderTarget art(context, mask->asRenderTarget()); |
| 658 GrContext::AutoClip ac(context, clipRect); | 654 GrContext::AutoClip ac(context, clipRect); |
| 659 | 655 |
| 660 context->clear(NULL, 0x0, true); | 656 context->clear(NULL, 0x0, true); |
| 661 | 657 |
| 662 GrPaint tempPaint; | 658 GrPaint tempPaint; |
| 663 if (doAA) { | 659 if (doAA) { |
| 664 tempPaint.setAntiAlias(true); | 660 tempPaint.setAntiAlias(true); |
| 665 // AA uses the "coverage" stages on GrDrawTarget. Coverage with a dst | 661 // AA uses the "coverage" stages on GrDrawTarget. Coverage with a dst |
| 666 // blend coeff of zero requires dual source blending support in order | 662 // blend coeff of zero requires dual source blending support in order |
| 667 // to properly blend partially covered pixels. This means the AA | 663 // to properly blend partially covered pixels. This means the AA |
| 668 // code path may not be taken. So we use a dst blend coeff of ISA. We | 664 // code path may not be taken. So we use a dst blend coeff of ISA. We |
| 669 // could special case AA draws to a dst surface with known alpha=0 to | 665 // could special case AA draws to a dst surface with known alpha=0 to |
| 670 // use a zero dst coeff when dual source blending isn't available. | 666 // use a zero dst coeff when dual source blending isn't available. |
| 671 tempPaint.setBlendFunc(kOne_GrBlendCoeff, kISC_GrBlendCoeff); | 667 tempPaint.setBlendFunc(kOne_GrBlendCoeff, kISC_GrBlendCoeff); |
| 672 } | 668 } |
| 673 | 669 |
| 674 GrContext::AutoMatrix am; | 670 GrContext::AutoMatrix am; |
| 675 | 671 |
| 676 // Draw the mask into maskTexture with the path's top-left at the origin usi
ng tempPaint. | 672 // Draw the mask into maskTexture with the path's top-left at the origin usi
ng tempPaint. |
| 677 SkMatrix translate; | 673 SkMatrix translate; |
| 678 translate.setTranslate(-maskRect.fLeft, -maskRect.fTop); | 674 translate.setTranslate(-maskRect.fLeft, -maskRect.fTop); |
| 679 am.set(context, translate); | 675 am.set(context, translate); |
| 680 context->drawPath(tempPaint, devPath, strokeInfo); | 676 context->drawPath(tempPaint, devPath, strokeInfo); |
| 681 return true; | 677 return mask; |
| 682 } | 678 } |
| 683 | 679 |
| 684 SkBitmap wrap_texture(GrTexture* texture) { | 680 SkBitmap wrap_texture(GrTexture* texture) { |
| 685 SkBitmap result; | 681 SkBitmap result; |
| 686 result.setInfo(texture->surfacePriv().info()); | 682 result.setInfo(texture->surfacePriv().info()); |
| 687 result.setPixelRef(SkNEW_ARGS(SkGrPixelRef, (result.info(), texture)))->unre
f(); | 683 result.setPixelRef(SkNEW_ARGS(SkGrPixelRef, (result.info(), texture)))->unre
f(); |
| 688 return result; | 684 return result; |
| 689 } | 685 } |
| 690 | 686 |
| 691 }; | 687 }; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 766 return; | 762 return; |
| 767 } | 763 } |
| 768 | 764 |
| 769 if (paint.getMaskFilter()->directFilterMaskGPU(fContext, &grPaint, | 765 if (paint.getMaskFilter()->directFilterMaskGPU(fContext, &grPaint, |
| 770 stroke, *devPathPtr))
{ | 766 stroke, *devPathPtr))
{ |
| 771 // the mask filter was able to draw itself directly, so there's
nothing | 767 // the mask filter was able to draw itself directly, so there's
nothing |
| 772 // left to do. | 768 // left to do. |
| 773 return; | 769 return; |
| 774 } | 770 } |
| 775 | 771 |
| 776 GrAutoScratchTexture mask; | |
| 777 | 772 |
| 778 if (create_mask_GPU(fContext, maskRect, *devPathPtr, strokeInfo, | 773 SkAutoTUnref<GrTexture> mask(create_mask_GPU(fContext, maskRect, *de
vPathPtr, |
| 779 grPaint.isAntiAlias(), &mask, fRenderTarget->num
Samples())) { | 774 strokeInfo, grPaint.isA
ntiAlias(), |
| 775 fRenderTarget->numSampl
es())); |
| 776 if (mask) { |
| 780 GrTexture* filtered; | 777 GrTexture* filtered; |
| 781 | 778 |
| 782 if (paint.getMaskFilter()->filterMaskGPU(mask.texture(), | 779 if (paint.getMaskFilter()->filterMaskGPU(mask, ctm, maskRect, &f
iltered, true)) { |
| 783 ctm, maskRect, &filtere
d, true)) { | |
| 784 // filterMaskGPU gives us ownership of a ref to the result | 780 // filterMaskGPU gives us ownership of a ref to the result |
| 785 SkAutoTUnref<GrTexture> atu(filtered); | 781 SkAutoTUnref<GrTexture> atu(filtered); |
| 786 | |
| 787 // If the scratch texture that we used as the filter src als
o holds the filter | |
| 788 // result then we must detach so that this texture isn't rec
ycled for a later | |
| 789 // draw. | |
| 790 if (filtered == mask.texture()) { | |
| 791 mask.detach(); | |
| 792 filtered->unref(); // detach transfers GrAutoScratchText
ure's ref to us. | |
| 793 } | |
| 794 | |
| 795 if (draw_mask(fContext, maskRect, &grPaint, filtered)) { | 782 if (draw_mask(fContext, maskRect, &grPaint, filtered)) { |
| 796 // This path is completely drawn | 783 // This path is completely drawn |
| 797 return; | 784 return; |
| 798 } | 785 } |
| 799 } | 786 } |
| 800 } | 787 } |
| 801 } | 788 } |
| 802 | 789 |
| 803 // draw the mask on the CPU - this is a fallthrough path in case the | 790 // draw the mask on the CPU - this is a fallthrough path in case the |
| 804 // GPU path fails | 791 // GPU path fails |
| (...skipping 979 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1784 SkAutoTUnref<GrTexture> texture; | 1771 SkAutoTUnref<GrTexture> texture; |
| 1785 // Skia's convention is to only clear a device if it is non-opaque. | 1772 // Skia's convention is to only clear a device if it is non-opaque. |
| 1786 unsigned flags = info.isOpaque() ? 0 : kNeedClear_Flag; | 1773 unsigned flags = info.isOpaque() ? 0 : kNeedClear_Flag; |
| 1787 | 1774 |
| 1788 #if CACHE_COMPATIBLE_DEVICE_TEXTURES | 1775 #if CACHE_COMPATIBLE_DEVICE_TEXTURES |
| 1789 // layers are never draw in repeat modes, so we can request an approx | 1776 // layers are never draw in repeat modes, so we can request an approx |
| 1790 // match and ignore any padding. | 1777 // match and ignore any padding. |
| 1791 const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? | 1778 const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? |
| 1792 GrContext::kApprox_ScratchTexMat
ch : | 1779 GrContext::kApprox_ScratchTexMat
ch : |
| 1793 GrContext::kExact_ScratchTexMatc
h; | 1780 GrContext::kExact_ScratchTexMatc
h; |
| 1794 texture.reset(fContext->lockAndRefScratchTexture(desc, match)); | 1781 texture.reset(fContext->refScratchTexture(desc, match)); |
| 1795 #else | 1782 #else |
| 1796 texture.reset(fContext->createUncachedTexture(desc, NULL, 0)); | 1783 texture.reset(fContext->createUncachedTexture(desc, NULL, 0)); |
| 1797 #endif | 1784 #endif |
| 1798 if (texture.get()) { | 1785 if (texture.get()) { |
| 1799 return SkGpuDevice::Create(texture, SkSurfaceProps(SkSurfaceProps::kLega
cyFontHost_InitType), flags); | 1786 return SkGpuDevice::Create(texture, SkSurfaceProps(SkSurfaceProps::kLega
cyFontHost_InitType), flags); |
| 1800 } else { | 1787 } else { |
| 1801 GrPrintf("---- failed to create compatible device texture [%d %d]\n", | 1788 GrPrintf("---- failed to create compatible device texture [%d %d]\n", |
| 1802 info.width(), info.height()); | 1789 info.width(), info.height()); |
| 1803 return NULL; | 1790 return NULL; |
| 1804 } | 1791 } |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1858 GrLayerHoister::UnlockLayers(fContext, atlased, nonAtlased, recycled); | 1845 GrLayerHoister::UnlockLayers(fContext, atlased, nonAtlased, recycled); |
| 1859 | 1846 |
| 1860 return true; | 1847 return true; |
| 1861 } | 1848 } |
| 1862 | 1849 |
| 1863 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { | 1850 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { |
| 1864 // We always return a transient cache, so it is freed after each | 1851 // We always return a transient cache, so it is freed after each |
| 1865 // filter traversal. | 1852 // filter traversal. |
| 1866 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); | 1853 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); |
| 1867 } | 1854 } |
| OLD | NEW |