OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkGpuDevice.h" | 8 #include "SkGpuDevice.h" |
9 | 9 |
10 #include "effects/GrBicubicEffect.h" | 10 #include "effects/GrBicubicEffect.h" |
(...skipping 1869 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1880 bool SkGpuDevice::FindLayersToHoist(const GrAccelData *gpuData, | 1880 bool SkGpuDevice::FindLayersToHoist(const GrAccelData *gpuData, |
1881 const SkPicture::OperationList* ops, | 1881 const SkPicture::OperationList* ops, |
1882 const SkIRect& query, | 1882 const SkIRect& query, |
1883 bool* pullForward) { | 1883 bool* pullForward) { |
1884 bool anyHoisted = false; | 1884 bool anyHoisted = false; |
1885 | 1885 |
1886 // Layer hoisting pre-renders the entire layer since it will be cached and p
otentially | 1886 // Layer hoisting pre-renders the entire layer since it will be cached and p
otentially |
1887 // reused with different clips (e.g., in different tiles). Because of this t
he | 1887 // reused with different clips (e.g., in different tiles). Because of this t
he |
1888 // clip will not be limiting the size of the pre-rendered layer. kSaveLayerM
axSize | 1888 // clip will not be limiting the size of the pre-rendered layer. kSaveLayerM
axSize |
1889 // is used to limit which clips are pre-rendered. | 1889 // is used to limit which clips are pre-rendered. |
1890 static const int kSaveLayerMaxSize = 256; | 1890 static const int kSaveLayerMaxSize1 = 512; |
1891 | 1891 |
1892 if (NULL != ops) { | 1892 if (NULL != ops) { |
1893 // In this case the picture has been generated with a BBH so we use | 1893 // In this case the picture has been generated with a BBH so we use |
1894 // the BBH to limit the pre-rendering to just the layers needed to cover | 1894 // the BBH to limit the pre-rendering to just the layers needed to cover |
1895 // the region being drawn | 1895 // the region being drawn |
1896 for (int i = 0; i < ops->numOps(); ++i) { | 1896 for (int i = 0; i < ops->numOps(); ++i) { |
1897 uint32_t offset = ops->offset(i); | 1897 uint32_t offset = ops->offset(i); |
1898 | 1898 |
1899 // For now we're saving all the layers in the GrAccelData so they | 1899 // For now we're saving all the layers in the GrAccelData so they |
1900 // can be nested. Additionally, the nested layers appear before | 1900 // can be nested. Additionally, the nested layers appear before |
1901 // their parent in the list. | 1901 // their parent in the list. |
1902 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { | 1902 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { |
1903 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(
j); | 1903 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(
j); |
1904 | 1904 |
1905 if (pullForward[j]) { | 1905 if (pullForward[j]) { |
1906 continue; // already pulling forward | 1906 continue; // already pulling forward |
1907 } | 1907 } |
1908 | 1908 |
1909 if (offset < info.fSaveLayerOpID || offset > info.fRestoreOpID)
{ | 1909 if (offset < info.fSaveLayerOpID || offset > info.fRestoreOpID)
{ |
1910 continue; // the op isn't in this range | 1910 continue; // the op isn't in this range |
1911 } | 1911 } |
1912 | 1912 |
1913 // TODO: once this code is more stable unsuitable layers can | 1913 // TODO: once this code is more stable unsuitable layers can |
1914 // just be omitted during the optimization stage | 1914 // just be omitted during the optimization stage |
1915 if (!info.fValid || | 1915 if (!info.fValid || |
1916 kSaveLayerMaxSize < info.fSize.fWidth || | 1916 kSaveLayerMaxSize1 < info.fSize.fWidth || |
1917 kSaveLayerMaxSize < info.fSize.fHeight || | 1917 kSaveLayerMaxSize1 < info.fSize.fHeight || |
| 1918 info.fSize.fWidth * info.fSize.fHeight > 256 * 256 || |
1918 info.fIsNested) { | 1919 info.fIsNested) { |
1919 continue; // this layer is unsuitable | 1920 continue; // this layer is unsuitable |
1920 } | 1921 } |
1921 | 1922 |
1922 pullForward[j] = true; | 1923 pullForward[j] = true; |
1923 anyHoisted = true; | 1924 anyHoisted = true; |
1924 } | 1925 } |
1925 } | 1926 } |
1926 } else { | 1927 } else { |
1927 // In this case there is no BBH associated with the picture. Pre-render | 1928 // In this case there is no BBH associated with the picture. Pre-render |
1928 // all the layers that intersect the drawn region | 1929 // all the layers that intersect the drawn region |
1929 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { | 1930 for (int j = 0; j < gpuData->numSaveLayers(); ++j) { |
1930 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(j); | 1931 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(j); |
1931 | 1932 |
1932 SkIRect layerRect = SkIRect::MakeXYWH(info.fOffset.fX, | 1933 SkIRect layerRect = SkIRect::MakeXYWH(info.fOffset.fX, |
1933 info.fOffset.fY, | 1934 info.fOffset.fY, |
1934 info.fSize.fWidth, | 1935 info.fSize.fWidth, |
1935 info.fSize.fHeight); | 1936 info.fSize.fHeight); |
1936 | 1937 |
1937 if (!SkIRect::Intersects(query, layerRect)) { | 1938 if (!SkIRect::Intersects(query, layerRect)) { |
1938 continue; | 1939 continue; |
1939 } | 1940 } |
1940 | 1941 |
1941 // TODO: once this code is more stable unsuitable layers can | 1942 // TODO: once this code is more stable unsuitable layers can |
1942 // just be omitted during the optimization stage | 1943 // just be omitted during the optimization stage |
1943 if (!info.fValid || | 1944 if (!info.fValid || |
1944 kSaveLayerMaxSize < info.fSize.fWidth || | 1945 kSaveLayerMaxSize1 < info.fSize.fWidth || |
1945 kSaveLayerMaxSize < info.fSize.fHeight || | 1946 kSaveLayerMaxSize1 < info.fSize.fHeight || |
| 1947 info.fSize.fWidth * info.fSize.fHeight > 256 * 256 || |
1946 info.fIsNested) { | 1948 info.fIsNested) { |
1947 continue; | 1949 continue; |
1948 } | 1950 } |
1949 | 1951 |
1950 pullForward[j] = true; | 1952 pullForward[j] = true; |
1951 anyHoisted = true; | 1953 anyHoisted = true; |
1952 } | 1954 } |
1953 } | 1955 } |
1954 | 1956 |
1955 return anyHoisted; | 1957 return anyHoisted; |
1956 } | 1958 } |
1957 | 1959 |
1958 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
* picture, | 1960 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
* picture, |
1959 const SkMatrix* matrix, const SkPaint
* paint) { | 1961 const SkMatrix* matrix, const SkPaint
* paint) { |
1960 // todo: should handle these natively | 1962 if (NULL != matrix || NULL != paint) { |
1961 if (matrix || paint) { | |
1962 return false; | 1963 return false; |
1963 } | 1964 } |
1964 | 1965 |
1965 fContext->getLayerCache()->processDeletedPictures(); | 1966 fContext->getLayerCache()->processDeletedPictures(); |
1966 | 1967 |
1967 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); | 1968 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
1968 | 1969 |
1969 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); | 1970 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
1970 if (NULL == data) { | 1971 if (NULL == data) { |
1971 return false; | 1972 return false; |
(...skipping 17 matching lines...) Expand all Loading... |
1989 SkIRect query; | 1990 SkIRect query; |
1990 clipBounds.roundOut(&query); | 1991 clipBounds.roundOut(&query); |
1991 | 1992 |
1992 SkAutoTDelete<const SkPicture::OperationList> ops(picture->EXPERIMENTAL_getA
ctiveOps(query)); | 1993 SkAutoTDelete<const SkPicture::OperationList> ops(picture->EXPERIMENTAL_getA
ctiveOps(query)); |
1993 | 1994 |
1994 if (!FindLayersToHoist(gpuData, ops.get(), query, pullForward.get())) { | 1995 if (!FindLayersToHoist(gpuData, ops.get(), query, pullForward.get())) { |
1995 return false; | 1996 return false; |
1996 } | 1997 } |
1997 | 1998 |
1998 SkPictureReplacementPlayback::PlaybackReplacements replacements; | 1999 SkPictureReplacementPlayback::PlaybackReplacements replacements; |
| 2000 } |
1999 | 2001 |
2000 SkTDArray<GrCachedLayer*> atlased, nonAtlased; | 2002 void SkGpuDevice::EXPERIMENTAL_preDrawPicture(const SkPicture* picture, |
2001 atlased.setReserve(gpuData->numSaveLayers()); | 2003 const SkMatrix* matrix, const SkPa
int* paint, |
| 2004 const SkIRect* target) { |
| 2005 if (NULL != matrix || NULL != paint) { |
| 2006 return; |
| 2007 } |
2002 | 2008 |
2003 // Generate the layer and/or ensure it is locked | 2009 fContext->getLayerCache()->processDeletedPictures(); |
| 2010 |
| 2011 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
| 2012 |
| 2013 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
| 2014 if (NULL == data) { |
| 2015 return; |
| 2016 } |
| 2017 |
| 2018 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
| 2019 |
| 2020 if (0 == gpuData->numSaveLayers()) { |
| 2021 return; |
| 2022 } |
| 2023 |
| 2024 SkAutoTArray<bool> pullForward(gpuData->numSaveLayers()); |
2004 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { | 2025 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
2005 if (pullForward[i]) { | 2026 pullForward[i] = false; |
2006 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); | 2027 } |
2007 | 2028 |
2008 GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(
picture->uniqueID(), | 2029 SkAutoTDelete<const SkPicture::OperationList> ops; |
2009
info.fSaveLayerOpID, | 2030 SkIRect query; |
2010
info.fRestoreOpID, | |
2011
info.fCTM); | |
2012 | 2031 |
2013 SkPictureReplacementPlayback::PlaybackReplacements::ReplacementInfo*
layerInfo = | 2032 if (NULL != target) { |
2014 replacem
ents.push(); | 2033 // TODO: this is suboptimal since it will be repeated in EXPERIMENTAL_dr
awPicture! |
2015 layerInfo->fStart = info.fSaveLayerOpID; | 2034 // TODO: just always use 'target' to determine hoisting (rather than act
ive ops)? |
2016 layerInfo->fStop = info.fRestoreOpID; | 2035 ops.reset(picture->EXPERIMENTAL_getActiveOps(*target)); |
2017 layerInfo->fPos = info.fOffset; | 2036 query = *target; |
| 2037 } else { |
| 2038 query = SkIRect::MakeWH(picture->width(), picture->height()); |
| 2039 } |
2018 | 2040 |
2019 GrTextureDesc desc; | 2041 if (!FindLayersToHoist(gpuData, ops.get(), query, pullForward.get())) { |
2020 desc.fFlags = kRenderTarget_GrTextureFlagBit; | 2042 return; |
2021 desc.fWidth = info.fSize.fWidth; | 2043 } |
2022 desc.fHeight = info.fSize.fHeight; | |
2023 desc.fConfig = kSkia8888_GrPixelConfig; | |
2024 // TODO: need to deal with sample count | |
2025 | 2044 |
2026 bool needsRendering = fContext->getLayerCache()->lock(layer, desc, | 2045 *fPreprocessed.append() = SkRef(picture); |
2027 info.fHasNestedLayers || inf
o.fIsNested); | 2046 |
2028 if (NULL == layer->texture()) { | 2047 fDeferredAtlasDraws.setReserve(fDeferredAtlasDraws.count() + gpuData->numSav
eLayers()); |
2029 continue; | 2048 |
| 2049 // Lock the memory required for all the hoisted layers and add them |
| 2050 // to the deferred drawing lists |
| 2051 // TODO: this may be a bit more VRAM intensive then necessary |
| 2052 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
| 2053 if (!pullForward[i]) { |
| 2054 continue; |
| 2055 } |
| 2056 |
| 2057 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); |
| 2058 |
| 2059 GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(pict
ure->uniqueID(), |
| 2060 info
.fSaveLayerOpID, |
| 2061 info
.fRestoreOpID, |
| 2062 info
.fCTM); |
| 2063 |
| 2064 GrTextureDesc desc; |
| 2065 desc.fFlags = kRenderTarget_GrTextureFlagBit; |
| 2066 desc.fWidth = info.fSize.fWidth; |
| 2067 desc.fHeight = info.fSize.fHeight; |
| 2068 desc.fConfig = kSkia8888_GrPixelConfig; |
| 2069 // TODO: need to deal with sample count |
| 2070 |
| 2071 bool needsRendering = fContext->getLayerCache()->lock(layer, desc, |
| 2072 info.fHasNestedLay
ers || info.fIsNested); |
| 2073 if (NULL == layer->texture()) { |
| 2074 continue; |
| 2075 } |
| 2076 |
| 2077 if (needsRendering) { |
| 2078 DeferredDraw* draw; |
| 2079 |
| 2080 if (layer->isAtlased()) { |
| 2081 draw = fDeferredAtlasDraws.append(); |
| 2082 } else { |
| 2083 draw = fDeferredNonAtlasDraws.append(); |
2030 } | 2084 } |
2031 | 2085 |
2032 layerInfo->fBM = SkNEW(SkBitmap); // fBM is allocated so Replacemen
tInfo can be POD | 2086 draw->layer = layer; |
2033 wrap_texture(layer->texture(), | 2087 draw->picture1 = picture; |
2034 !layer->isAtlased() ? desc.fWidth : layer->texture()->w
idth(), | |
2035 !layer->isAtlased() ? desc.fHeight : layer->texture()->
height(), | |
2036 layerInfo->fBM); | |
2037 | |
2038 SkASSERT(info.fPaint); | |
2039 layerInfo->fPaint = info.fPaint; | |
2040 | |
2041 layerInfo->fSrcRect = SkIRect::MakeXYWH(layer->rect().fLeft, | |
2042 layer->rect().fTop, | |
2043 layer->rect().width(), | |
2044 layer->rect().height()); | |
2045 | |
2046 if (needsRendering) { | |
2047 if (layer->isAtlased()) { | |
2048 *atlased.append() = layer; | |
2049 } else { | |
2050 *nonAtlased.append() = layer; | |
2051 } | |
2052 } | |
2053 } | 2088 } |
2054 } | 2089 } |
| 2090 } |
2055 | 2091 |
2056 this->drawLayers(picture, atlased, nonAtlased); | 2092 this->drawLayers(picture, atlased, nonAtlased); |
2057 | 2093 |
2058 // Render the entire picture using new layers | 2094 // Render the entire picture using new layers |
2059 SkPictureReplacementPlayback playback(picture, &replacements, ops.get()); | 2095 SkPictureReplacementPlayback playback(picture, &replacements, ops.get()); |
2060 | 2096 |
2061 playback.draw(mainCanvas, NULL); | 2097 playback.draw(mainCanvas, NULL); |
2062 | 2098 |
2063 this->unlockLayers(picture); | 2099 this->unlockLayers(picture); |
2064 | 2100 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2143 | 2179 |
2144 SkPictureRangePlayback rangePlayback(picture, | 2180 SkPictureRangePlayback rangePlayback(picture, |
2145 layer->start(), | 2181 layer->start(), |
2146 layer->stop()); | 2182 layer->stop()); |
2147 rangePlayback.draw(layerCanvas, NULL); | 2183 rangePlayback.draw(layerCanvas, NULL); |
2148 | 2184 |
2149 layerCanvas->flush(); | 2185 layerCanvas->flush(); |
2150 } | 2186 } |
2151 } | 2187 } |
2152 | 2188 |
| 2189 <<<<<<< HEAD |
2153 void SkGpuDevice::unlockLayers(const SkPicture* picture) { | 2190 void SkGpuDevice::unlockLayers(const SkPicture* picture) { |
2154 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); | 2191 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
2155 | 2192 |
2156 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); | 2193 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
2157 SkASSERT(NULL != data); | 2194 SkASSERT(NULL != data); |
2158 | 2195 |
2159 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); | 2196 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
2160 SkASSERT(0 != gpuData->numSaveLayers()); | 2197 SkASSERT(0 != gpuData->numSaveLayers()); |
| 2198 ======= |
| 2199 fDeferredAtlasDraws.rewind(); |
| 2200 fDeferredNonAtlasDraws.rewind(); |
| 2201 } |
2161 | 2202 |
2162 // unlock the layers | 2203 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
* picture, |
| 2204 const SkMatrix* matrix, const SkPaint
* paint) { |
| 2205 // todo: should handle these natively |
| 2206 if (matrix || paint) { |
| 2207 return false; |
| 2208 } |
| 2209 |
| 2210 fContext->getLayerCache()->processDeletedPictures(); |
| 2211 |
| 2212 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
| 2213 |
| 2214 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key); |
| 2215 if (NULL == data) { |
| 2216 return false; |
| 2217 } |
| 2218 |
| 2219 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
| 2220 |
| 2221 if (0 == gpuData->numSaveLayers()) { |
| 2222 return false; |
| 2223 } |
| 2224 |
| 2225 SkAutoTArray<bool> pullForward(gpuData->numSaveLayers()); |
2163 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { | 2226 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
| 2227 pullForward[i] = false; |
| 2228 } |
| 2229 |
| 2230 SkRect clipBounds; |
| 2231 if (!mainCanvas->getClipBounds(&clipBounds)) { |
| 2232 return true; |
| 2233 } |
| 2234 SkIRect query; |
| 2235 clipBounds.roundOut(&query); |
| 2236 |
| 2237 SkAutoTDelete<const SkPicture::OperationList> ops(picture->EXPERIMENTAL_getA
ctiveOps(query)); |
| 2238 >>>>>>> Reapply patch |
| 2239 |
| 2240 if (!FindLayersToHoist(gpuData, ops.get(), query, pullForward.get())) { |
| 2241 return false; |
| 2242 } |
| 2243 |
| 2244 this->flushDeferredDraws(); |
| 2245 |
| 2246 SkPictureReplacementPlayback::PlaybackReplacements replacements; |
| 2247 |
| 2248 // Assemble the information needed to render the original picture with the |
| 2249 // hoisted layers |
| 2250 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
| 2251 if (!pullForward[i]) { |
| 2252 continue; |
| 2253 } |
| 2254 |
2164 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); | 2255 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); |
2165 | 2256 |
| 2257 <<<<<<< HEAD |
2166 GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture->uni
queID(), | 2258 GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture->uni
queID(), |
2167 info.fSaveLa
yerOpID, | 2259 info.fSaveLa
yerOpID, |
2168 info.fRestor
eOpID, | 2260 info.fRestor
eOpID, |
2169 info.fCTM); | 2261 info.fCTM); |
2170 fContext->getLayerCache()->unlock(layer); | 2262 fContext->getLayerCache()->unlock(layer); |
| 2263 ======= |
| 2264 GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(pict
ure->uniqueID(), |
| 2265 info
.fSaveLayerOpID, |
| 2266 info
.fRestoreOpID, |
| 2267 info
.fCTM); |
| 2268 SkASSERT(layer->locked()); |
| 2269 |
| 2270 SkPictureReplacementPlayback::PlaybackReplacements::ReplacementInfo* lay
erInfo = |
| 2271 replacements
.push(); |
| 2272 layerInfo->fStart = info.fSaveLayerOpID; |
| 2273 layerInfo->fStop = info.fRestoreOpID; |
| 2274 layerInfo->fPos = info.fOffset; |
| 2275 |
| 2276 layerInfo->fBM = SkNEW(SkBitmap); // fBM is allocated so ReplacementInf
o can be POD |
| 2277 wrap_texture(layer->texture(), |
| 2278 layer->isAtlased() ? layer->texture()->width() : info.f
Size.fWidth, |
| 2279 layer->isAtlased() ? layer->texture()->height() : info.f
Size.fHeight, |
| 2280 layerInfo->fBM); |
| 2281 |
| 2282 SkASSERT(info.fPaint); |
| 2283 layerInfo->fPaint = info.fPaint; |
| 2284 |
| 2285 layerInfo->fSrcRect = SkIRect::MakeXYWH(layer->rect().fLeft, |
| 2286 layer->rect().fTop, |
| 2287 layer->rect().width(), |
| 2288 layer->rect().height()); |
| 2289 >>>>>>> Reapply patch |
| 2290 } |
| 2291 |
| 2292 // Render the entire picture using new layers |
| 2293 SkPictureReplacementPlayback playback(picture, &replacements, ops.get()); |
| 2294 |
| 2295 playback.draw(mainCanvas, NULL); |
| 2296 |
| 2297 return true; |
| 2298 } |
| 2299 |
| 2300 void SkGpuDevice::EXPERIMENTAL_postDrawPicture() { |
| 2301 SkPicture::AccelData::Key key = GrAccelData::ComputeAccelDataKey(); |
| 2302 |
| 2303 for (int i = 0; i < fPreprocessed.count(); ++i) { |
| 2304 const SkPicture* picture = fPreprocessed[i]; |
| 2305 |
| 2306 const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(ke
y); |
| 2307 SkASSERT(NULL != data); |
| 2308 |
| 2309 const GrAccelData *gpuData = static_cast<const GrAccelData*>(data); |
| 2310 SkASSERT(0 != gpuData->numSaveLayers()); |
| 2311 |
| 2312 // unlock the layers associated with the current picture |
| 2313 for (int i = 0; i < gpuData->numSaveLayers(); ++i) { |
| 2314 const GrAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i); |
| 2315 |
| 2316 GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture-
>uniqueID(), |
| 2317 info.fSa
veLayerOpID, |
| 2318 info.fRe
storeOpID, |
| 2319 info.fCT
M); |
| 2320 fContext->getLayerCache()->unlock(layer); |
| 2321 } |
| 2322 |
| 2323 #if DISABLE_CACHING |
| 2324 // TODO: for debugging only |
| 2325 fContext->getLayerCache()->purge(picture->uniqueID()); |
| 2326 #endif |
| 2327 |
| 2328 picture->unref(); |
2171 } | 2329 } |
2172 | 2330 |
2173 #if DISABLE_CACHING | 2331 #if DISABLE_CACHING |
2174 // This code completely clears out the atlas. It is required when | 2332 // This code completely clears out the atlas. It is required when |
2175 // caching is disabled so the atlas doesn't fill up and force more | 2333 // caching is disabled so the atlas doesn't fill up and force more |
2176 // free floating layers | 2334 // free floating layers |
2177 fContext->getLayerCache()->purge(picture->uniqueID()); | |
2178 | |
2179 fContext->getLayerCache()->purgeAll(); | 2335 fContext->getLayerCache()->purgeAll(); |
2180 #endif | 2336 #endif |
| 2337 |
| 2338 fPreprocessed.rewind(); |
2181 } | 2339 } |
2182 | 2340 |
2183 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { | 2341 SkImageFilter::Cache* SkGpuDevice::getImageFilterCache() { |
2184 // We always return a transient cache, so it is freed after each | 2342 // We always return a transient cache, so it is freed after each |
2185 // filter traversal. | 2343 // filter traversal. |
2186 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); | 2344 return SkImageFilter::Cache::Create(kDefaultImageFilterCacheSize); |
2187 } | 2345 } |
OLD | NEW |