OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkGpuDevice.h" | 8 #include "SkGpuDevice.h" |
9 | 9 |
10 #include "effects/GrBicubicEffect.h" | 10 #include "effects/GrBicubicEffect.h" |
(...skipping 1858 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1869 | 1869 |
1870 fContext->getLayerCache()->trackPicture(picture); | 1870 fContext->getLayerCache()->trackPicture(picture); |
1871 } | 1871 } |
1872 | 1872 |
1873 static void wrap_texture(GrTexture* texture, int width, int height, SkBitmap* re
sult) { | 1873 static void wrap_texture(GrTexture* texture, int width, int height, SkBitmap* re
sult) { |
1874 SkImageInfo info = SkImageInfo::MakeN32Premul(width, height); | 1874 SkImageInfo info = SkImageInfo::MakeN32Premul(width, height); |
1875 result->setInfo(info); | 1875 result->setInfo(info); |
1876 result->setPixelRef(SkNEW_ARGS(SkGrPixelRef, (info, texture)))->unref(); | 1876 result->setPixelRef(SkNEW_ARGS(SkGrPixelRef, (info, texture)))->unref(); |
1877 } | 1877 } |
1878 | 1878 |
1879 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
* picture, | 1879 // Return true if any layers are suitable for hoisting |
1880 const SkMatrix* matrix, const SkPaint
* paint) { | 1880 bool SkGpuDevice::FindLayersToHoist(const GrAccelData *gpuData, |
1881 // todo: should handle these natively | 1881 const SkPicture::OperationList* ops, |
1882 if (matrix || paint) { | 1882 const SkIRect& query, |
1883 return false; | 1883 bool* pullForward) { |
1884 } | 1884 bool anyHoisted = false; |
1885 | 1885 |
1886 fContext->getLayerCache()->processDeletedPictures(); | 1886 // Layer hoisting pre-renders the entire layer since it will be cached and p
otentially |
1887 | |
1888 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); | |
1889 | |
1890 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); | |
1891 if (NULL == data) { | |
1892 return false; | |
1893 } | |
1894 | |
1895 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); | |
1896 | |
1897 if (0 == gpuData->numSaveLayers()) { | |
1898 return false; | |
1899 } | |
1900 | |
1901 SkAutoTArray<bool> pullForward(gpuData->numSaveLayers()); | |
1902 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { | |
1903 pullForward[i] = false; | |
1904 } | |
1905 | |
1906 SkRect clipBounds; | |
1907 if (!mainCanvas->getClipBounds(&clipBounds)) { | |
1908 return true; | |
1909 } | |
1910 SkIRect query; | |
1911 clipBounds.roundOut(&query); | |
1912 | |
1913 SkAutoTDelete<const SkPicture::OperationList> ops(picture->EXPERIMENTAL_getA
ctiveOps(query)); | |
1914 | |
1915 // This code pre-renders the entire layer since it will be cached and potent
ially | |
1916 // reused with different clips (e.g., in different tiles). Because of this t
he | 1887 // reused with different clips (e.g., in different tiles). Because of this t
he |
1917 // clip will not be limiting the size of the pre-rendered layer. kSaveLayerM
axSize | 1888 // clip will not be limiting the size of the pre-rendered layer. kSaveLayerM
axSize |
1918 // is used to limit which clips are pre-rendered. | 1889 // is used to limit which clips are pre-rendered. |
1919 static const int kSaveLayerMaxSize = 256; | 1890 static const int kSaveLayerMaxSize = 256; |
1920 | 1891 |
1921 if (NULL != ops.get()) { | 1892 if (NULL != ops) { |
1922 // In this case the picture has been generated with a BBH so we use | 1893 // In this case the picture has been generated with a BBH so we use |
1923 // the BBH to limit the pre-rendering to just the layers needed to cover | 1894 // the BBH to limit the pre-rendering to just the layers needed to cover |
1924 // the region being drawn | 1895 // the region being drawn |
1925 for (int i = 0; i < ops->numOps(); ++i) { | 1896 for (int i = 0; i < ops->numOps(); ++i) { |
1926 uint32_t offset = ops->offset(i); | 1897 uint32_t offset = ops->offset(i); |
1927 | 1898 |
1928 // For now we're saving all the layers in the GrAccelData so they | 1899 // For now we're saving all the layers in the GrAccelData so they |
1929 // can be nested. Additionally, the nested layers appear before | 1900 // can be nested. Additionally, the nested layers appear before |
1930 // their parent in the list. | 1901 // their parent in the list. |
1931 for (int j = 0 ; j < gpuData->numSaveLayers(); ++j) { | 1902 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { |
1932 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(
j); | 1903 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(
j); |
1933 | 1904 |
1934 if (pullForward[j]) { | 1905 if (pullForward[j]) { |
1935 continue; // already pulling forward | 1906 continue; // already pulling forward |
1936 } | 1907 } |
1937 | 1908 |
1938 if (offset < info.fSaveLayerOpID || offset > info.fRestoreOpID)
{ | 1909 if (offset < info.fSaveLayerOpID || offset > info.fRestoreOpID)
{ |
1939 continue; // the op isn't in this range | 1910 continue; // the op isn't in this range |
1940 } | 1911 } |
1941 | 1912 |
1942 // TODO: once this code is more stable unsuitable layers can | 1913 // TODO: once this code is more stable unsuitable layers can |
1943 // just be omitted during the optimization stage | 1914 // just be omitted during the optimization stage |
1944 if (!info.fValid || | 1915 if (!info.fValid || |
1945 kSaveLayerMaxSize < info.fSize.fWidth || | 1916 kSaveLayerMaxSize < info.fSize.fWidth || |
1946 kSaveLayerMaxSize < info.fSize.fHeight || | 1917 kSaveLayerMaxSize < info.fSize.fHeight || |
1947 info.fIsNested) { | 1918 info.fIsNested) { |
1948 continue; // this layer is unsuitable | 1919 continue; // this layer is unsuitable |
1949 } | 1920 } |
1950 | 1921 |
1951 pullForward[j] = true; | 1922 pullForward[j] = true; |
| 1923 anyHoisted = true; |
1952 } | 1924 } |
1953 } | 1925 } |
1954 } else { | 1926 } else { |
1955 // In this case there is no BBH associated with the picture. Pre-render | 1927 // In this case there is no BBH associated with the picture. Pre-render |
1956 // all the layers that intersect the drawn region | 1928 // all the layers that intersect the drawn region |
1957 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { | 1929 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { |
1958 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(j); | 1930 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(j); |
1959 | 1931 |
1960 SkIRect layerRect = SkIRect::MakeXYWH(info.fOffset.fX, | 1932 SkIRect layerRect = SkIRect::MakeXYWH(info.fOffset.fX, |
1961 info.fOffset.fY, | 1933 info.fOffset.fY, |
1962 info.fSize.fWidth, | 1934 info.fSize.fWidth, |
1963 info.fSize.fHeight); | 1935 info.fSize.fHeight); |
1964 | 1936 |
1965 if (!SkIRect::Intersects(query, layerRect)) { | 1937 if (!SkIRect::Intersects(query, layerRect)) { |
1966 continue; | 1938 continue; |
1967 } | 1939 } |
1968 | 1940 |
1969 // TODO: once this code is more stable unsuitable layers can | 1941 // TODO: once this code is more stable unsuitable layers can |
1970 // just be omitted during the optimization stage | 1942 // just be omitted during the optimization stage |
1971 if (!info.fValid || | 1943 if (!info.fValid || |
1972 kSaveLayerMaxSize < info.fSize.fWidth || | 1944 kSaveLayerMaxSize < info.fSize.fWidth || |
1973 kSaveLayerMaxSize < info.fSize.fHeight || | 1945 kSaveLayerMaxSize < info.fSize.fHeight || |
1974 info.fIsNested) { | 1946 info.fIsNested) { |
1975 continue; | 1947 continue; |
1976 } | 1948 } |
1977 | 1949 |
1978 pullForward[j] = true; | 1950 pullForward[j] = true; |
| 1951 anyHoisted = true; |
1979 } | 1952 } |
1980 } | 1953 } |
1981 | 1954 |
| 1955 return anyHoisted; |
| 1956 } |
| 1957 |
| 1958 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
* picture, |
| 1959 const SkMatrix* matrix, const SkPaint
* paint) { |
| 1960 // todo: should handle these natively |
| 1961 if (matrix || paint) { |
| 1962 return false; |
| 1963 } |
| 1964 |
| 1965 fContext->getLayerCache()->processDeletedPictures(); |
| 1966 |
| 1967 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
| 1968 |
| 1969 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
| 1970 if (NULL == data) { |
| 1971 return false; |
| 1972 } |
| 1973 |
| 1974 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
| 1975 |
| 1976 if (0 == gpuData->numSaveLayers()) { |
| 1977 return false; |
| 1978 } |
| 1979 |
| 1980 SkAutoTArray<bool> pullForward(gpuData->numSaveLayers()); |
| 1981 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
| 1982 pullForward[i] = false; |
| 1983 } |
| 1984 |
| 1985 SkRect clipBounds; |
| 1986 if (!mainCanvas->getClipBounds(&clipBounds)) { |
| 1987 return true; |
| 1988 } |
| 1989 SkIRect query; |
| 1990 clipBounds.roundOut(&query); |
| 1991 |
| 1992 SkAutoTDelete<const SkPicture::OperationList> ops(picture->EXPERIMENTAL_getA
ctiveOps(query)); |
| 1993 |
| 1994 if (!FindLayersToHoist(gpuData, ops.get(), query, pullForward.get())) { |
| 1995 return false; |
| 1996 } |
| 1997 |
1982 SkPictureReplacementPlayback::PlaybackReplacements replacements; | 1998 SkPictureReplacementPlayback::PlaybackReplacements replacements; |
1983 | 1999 |
1984 SkTDArray<GrCachedLayer*> atlased, nonAtlased; | 2000 SkTDArray<GrCachedLayer*> atlased, nonAtlased; |
1985 atlased.setReserve(gpuData->numSaveLayers()); | 2001 atlased.setReserve(gpuData->numSaveLayers()); |
1986 | 2002 |
1987 // Generate the layer and/or ensure it is locked | 2003 // Generate the layer and/or ensure it is locked |
1988 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { | 2004 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
1989 if (pullForward[i]) { | 2005 if (pullForward[i]) { |
1990 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); | 2006 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); |
1991 | 2007 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2030 if (needsRendering) { | 2046 if (needsRendering) { |
2031 if (layer->isAtlased()) { | 2047 if (layer->isAtlased()) { |
2032 *atlased.append() = layer; | 2048 *atlased.append() = layer; |
2033 } else { | 2049 } else { |
2034 *nonAtlased.append() = layer; | 2050 *nonAtlased.append() = layer; |
2035 } | 2051 } |
2036 } | 2052 } |
2037 } | 2053 } |
2038 } | 2054 } |
2039 | 2055 |
| 2056 this->drawLayers(picture, atlased, nonAtlased); |
| 2057 |
| 2058 // Render the entire picture using new layers |
| 2059 SkPictureReplacementPlayback playback(picture, &replacements, ops.get()); |
| 2060 |
| 2061 playback.draw(mainCanvas, NULL); |
| 2062 |
| 2063 this->unlockLayers(picture); |
| 2064 |
| 2065 return true; |
| 2066 } |
| 2067 |
| 2068 void SkGpuDevice::drawLayers(const SkPicture* picture, |
| 2069 const SkTDArray<GrCachedLayer*>& atlased, |
| 2070 const SkTDArray<GrCachedLayer*>& nonAtlased) { |
2040 // Render the atlased layers that require it | 2071 // Render the atlased layers that require it |
2041 if (atlased.count() > 0) { | 2072 if (atlased.count() > 0) { |
2042 // All the atlased layers are rendered into the same GrTexture | 2073 // All the atlased layers are rendered into the same GrTexture |
2043 SkAutoTUnref<SkSurface> surface(SkSurface::NewRenderTargetDirect( | 2074 SkAutoTUnref<SkSurface> surface(SkSurface::NewRenderTargetDirect( |
2044 atlased[0]->texture()->asRenderTarget(), | 2075 atlased[0]->texture()->asRenderTarge
t(), |
2045 SkSurface::kStandard_TextRenderMode, | 2076 SkSurface::kStandard_TextRenderMode, |
2046 SkSurface::kDontClear_RenderTargetFlag))
; | 2077 SkSurface::kDontClear_RenderTargetFl
ag)); |
2047 | 2078 |
2048 SkCanvas* atlasCanvas = surface->getCanvas(); | 2079 SkCanvas* atlasCanvas = surface->getCanvas(); |
2049 | 2080 |
2050 SkPaint paint; | 2081 SkPaint paint; |
2051 paint.setColor(SK_ColorTRANSPARENT); | 2082 paint.setColor(SK_ColorTRANSPARENT); |
2052 paint.setXfermode(SkXfermode::Create(SkXfermode::kSrc_Mode))->unref(); | 2083 paint.setXfermode(SkXfermode::Create(SkXfermode::kSrc_Mode))->unref(); |
2053 | 2084 |
2054 for (int i = 0; i < atlased.count(); ++i) { | 2085 for (int i = 0; i < atlased.count(); ++i) { |
2055 GrCachedLayer* layer = atlased[i]; | 2086 GrCachedLayer* layer = atlased[i]; |
2056 | 2087 |
(...skipping 27 matching lines...) Expand all Loading... |
2084 | 2115 |
2085 atlasCanvas->flush(); | 2116 atlasCanvas->flush(); |
2086 } | 2117 } |
2087 | 2118 |
2088 // Render the non-atlased layers that require it | 2119 // Render the non-atlased layers that require it |
2089 for (int i = 0; i < nonAtlased.count(); ++i) { | 2120 for (int i = 0; i < nonAtlased.count(); ++i) { |
2090 GrCachedLayer* layer = nonAtlased[i]; | 2121 GrCachedLayer* layer = nonAtlased[i]; |
2091 | 2122 |
2092 // Each non-atlased layer has its own GrTexture | 2123 // Each non-atlased layer has its own GrTexture |
2093 SkAutoTUnref<SkSurface> surface(SkSurface::NewRenderTargetDirect( | 2124 SkAutoTUnref<SkSurface> surface(SkSurface::NewRenderTargetDirect( |
2094 layer->texture()->asRenderTarget(), | 2125 layer->texture()->asRenderTarget(), |
2095 SkSurface::kStandard_TextRenderMode, | 2126 SkSurface::kStandard_TextRenderMode, |
2096 SkSurface::kDontClear_RenderTargetFlag))
; | 2127 SkSurface::kDontClear_RenderTargetFl
ag)); |
2097 | 2128 |
2098 SkCanvas* layerCanvas = surface->getCanvas(); | 2129 SkCanvas* layerCanvas = surface->getCanvas(); |
2099 | 2130 |
2100 // Add a rect clip to make sure the rendering doesn't | 2131 // Add a rect clip to make sure the rendering doesn't |
2101 // extend beyond the boundaries of the atlased sub-rect | 2132 // extend beyond the boundaries of the atlased sub-rect |
2102 SkRect bound = SkRect::MakeXYWH(SkIntToScalar(layer->rect().fLeft), | 2133 SkRect bound = SkRect::MakeXYWH(SkIntToScalar(layer->rect().fLeft), |
2103 SkIntToScalar(layer->rect().fTop), | 2134 SkIntToScalar(layer->rect().fTop), |
2104 SkIntToScalar(layer->rect().width()), | 2135 SkIntToScalar(layer->rect().width()), |
2105 SkIntToScalar(layer->rect().height())); | 2136 SkIntToScalar(layer->rect().height())); |
2106 | 2137 |
2107 layerCanvas->clipRect(bound); // TODO: still useful? | 2138 layerCanvas->clipRect(bound); // TODO: still useful? |
2108 | 2139 |
2109 layerCanvas->clear(SK_ColorTRANSPARENT); | 2140 layerCanvas->clear(SK_ColorTRANSPARENT); |
2110 | 2141 |
2111 layerCanvas->concat(layer->ctm()); | 2142 layerCanvas->concat(layer->ctm()); |
2112 | 2143 |
2113 SkPictureRangePlayback rangePlayback(picture, | 2144 SkPictureRangePlayback rangePlayback(picture, |
2114 layer->start(), | 2145 layer->start(), |
2115 layer->stop()); | 2146 layer->stop()); |
2116 rangePlayback.draw(layerCanvas, NULL); | 2147 rangePlayback.draw(layerCanvas, NULL); |
2117 | 2148 |
2118 layerCanvas->flush(); | 2149 layerCanvas->flush(); |
2119 } | 2150 } |
| 2151 } |
2120 | 2152 |
2121 // Render the entire picture using new layers | 2153 void SkGpuDevice::unlockLayers(const SkPicture* picture) { |
2122 SkPictureReplacementPlayback playback(picture, &replacements, ops.get()); | 2154 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
2123 | 2155 |
2124 playback.draw(mainCanvas, NULL); | 2156 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
| 2157 SkASSERT(NULL != data); |
| 2158 |
| 2159 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
| 2160 SkASSERT(0 != gpuData->numSaveLayers()); |
2125 | 2161 |
2126 // unlock the layers | 2162 // unlock the layers |
2127 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { | 2163 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
2128 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); | 2164 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); |
2129 | 2165 |
2130 GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture->uni
queID(), | 2166 GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture->uni
queID(), |
2131 info.fSaveLa
yerOpID, | 2167 info.fSaveLa
yerOpID, |
2132 info.fRestor
eOpID, | 2168 info.fRestor
eOpID, |
2133 info.fCTM); | 2169 info.fCTM); |
2134 fContext->getLayerCache()->unlock(layer); | 2170 fContext->getLayerCache()->unlock(layer); |
2135 } | 2171 } |
2136 | 2172 |
2137 #if DISABLE_CACHING | 2173 #if DISABLE_CACHING |
2138 // This code completely clears out the atlas. It is required when | 2174 // This code completely clears out the atlas. It is required when |
2139 // caching is disabled so the atlas doesn't fill up and force more | 2175 // caching is disabled so the atlas doesn't fill up and force more |
2140 // free floating layers | 2176 // free floating layers |
2141 fContext->getLayerCache()->purge(picture->uniqueID()); | 2177 fContext->getLayerCache()->purge(picture->uniqueID()); |
2142 | 2178 |
2143 fContext->getLayerCache()->purgeAll(); | 2179 fContext->getLayerCache()->purgeAll(); |
2144 #endif | 2180 #endif |
2145 | |
2146 return true; | |
2147 } | 2181 } |
2148 | 2182 |
2149 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { | 2183 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { |
2150 // We always return a transient cache, so it is freed after each | 2184 // We always return a transient cache, so it is freed after each |
2151 // filter traversal. | 2185 // filter traversal. |
2152 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); | 2186 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); |
2153 } | 2187 } |
OLD | NEW |