OLD | NEW |
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2011 Google Inc. | 3 * Copyright 2011 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 #include "GrContext.h" | 9 #include "GrContext.h" |
10 | 10 |
11 #include "GrAARectRenderer.h" | 11 #include "GrAARectRenderer.h" |
12 #include "GrAtlasTextContext.h" | 12 #include "GrAtlasTextContext.h" |
13 #include "GrBatch.h" | 13 #include "GrBatch.h" |
14 #include "GrBatchFontCache.h" | 14 #include "GrBatchFontCache.h" |
15 #include "GrBatchTarget.h" | 15 #include "GrBatchTarget.h" |
16 #include "GrBufferAllocPool.h" | 16 #include "GrBufferAllocPool.h" |
17 #include "GrDefaultGeoProcFactory.h" | 17 #include "GrDefaultGeoProcFactory.h" |
18 #include "GrGpuResource.h" | 18 #include "GrGpuResource.h" |
19 #include "GrGpuResourcePriv.h" | 19 #include "GrGpuResourcePriv.h" |
20 #include "GrDrawTargetCaps.h" | 20 #include "GrDrawTargetCaps.h" |
21 #include "GrGpu.h" | 21 #include "GrGpu.h" |
22 #include "GrIndexBuffer.h" | 22 #include "GrIndexBuffer.h" |
23 #include "GrInOrderDrawBuffer.h" | 23 #include "GrInOrderDrawBuffer.h" |
24 #include "GrLayerCache.h" | 24 #include "GrLayerCache.h" |
25 #include "GrOvalRenderer.h" | 25 #include "GrOvalRenderer.h" |
26 #include "GrPathRenderer.h" | 26 #include "GrPathRenderer.h" |
27 #include "GrPathUtils.h" | 27 #include "GrPathUtils.h" |
28 #include "GrRenderTargetPriv.h" | 28 #include "GrRenderTargetPriv.h" |
29 #include "GrResourceCache.h" | 29 #include "GrResourceCache.h" |
| 30 #include "GrResourceProvider.h" |
30 #include "GrSoftwarePathRenderer.h" | 31 #include "GrSoftwarePathRenderer.h" |
31 #include "GrStencilAndCoverTextContext.h" | 32 #include "GrStencilAndCoverTextContext.h" |
32 #include "GrStrokeInfo.h" | 33 #include "GrStrokeInfo.h" |
33 #include "GrSurfacePriv.h" | 34 #include "GrSurfacePriv.h" |
34 #include "GrTextBlobCache.h" | 35 #include "GrTextBlobCache.h" |
35 #include "GrTexturePriv.h" | 36 #include "GrTexturePriv.h" |
36 #include "GrTraceMarker.h" | 37 #include "GrTraceMarker.h" |
37 #include "GrTracing.h" | 38 #include "GrTracing.h" |
38 #include "SkDashPathPriv.h" | 39 #include "SkDashPathPriv.h" |
39 #include "SkConfig8888.h" | 40 #include "SkConfig8888.h" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
85 if (context->init(backend, backendContext)) { | 86 if (context->init(backend, backendContext)) { |
86 return context; | 87 return context; |
87 } else { | 88 } else { |
88 context->unref(); | 89 context->unref(); |
89 return NULL; | 90 return NULL; |
90 } | 91 } |
91 } | 92 } |
92 | 93 |
93 GrContext::GrContext(const Options& opts) : fOptions(opts) { | 94 GrContext::GrContext(const Options& opts) : fOptions(opts) { |
94 fGpu = NULL; | 95 fGpu = NULL; |
| 96 fResourceCache = NULL; |
| 97 fResourceProvider = NULL; |
95 fPathRendererChain = NULL; | 98 fPathRendererChain = NULL; |
96 fSoftwarePathRenderer = NULL; | 99 fSoftwarePathRenderer = NULL; |
97 fResourceCache = NULL; | |
98 fBatchFontCache = NULL; | 100 fBatchFontCache = NULL; |
99 fDrawBuffer = NULL; | 101 fDrawBuffer = NULL; |
100 fDrawBufferVBAllocPool = NULL; | 102 fDrawBufferVBAllocPool = NULL; |
101 fDrawBufferIBAllocPool = NULL; | 103 fDrawBufferIBAllocPool = NULL; |
102 fFlushToReduceCacheSize = false; | 104 fFlushToReduceCacheSize = false; |
103 fAARectRenderer = NULL; | 105 fAARectRenderer = NULL; |
104 fOvalRenderer = NULL; | 106 fOvalRenderer = NULL; |
105 fMaxTextureSizeOverride = 1 << 20; | 107 fMaxTextureSizeOverride = 1 << 20; |
106 } | 108 } |
107 | 109 |
108 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { | 110 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { |
109 SkASSERT(NULL == fGpu); | 111 SkASSERT(NULL == fGpu); |
110 | 112 |
111 fGpu = GrGpu::Create(backend, backendContext, this); | 113 fGpu = GrGpu::Create(backend, backendContext, this); |
112 if (NULL == fGpu) { | 114 if (NULL == fGpu) { |
113 return false; | 115 return false; |
114 } | 116 } |
115 this->initCommon(); | 117 this->initCommon(); |
116 return true; | 118 return true; |
117 } | 119 } |
118 | 120 |
119 void GrContext::initCommon() { | 121 void GrContext::initCommon() { |
120 fResourceCache = SkNEW(GrResourceCache); | 122 fResourceCache = SkNEW(GrResourceCache); |
121 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); | 123 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); |
| 124 fResourceProvider = SkNEW_ARGS(GrResourceProvider, (fGpu, fResourceCache)); |
122 | 125 |
123 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); | 126 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); |
124 | 127 |
125 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu)); | 128 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu)); |
126 fOvalRenderer = SkNEW_ARGS(GrOvalRenderer, (fGpu)); | 129 fOvalRenderer = SkNEW_ARGS(GrOvalRenderer, (fGpu)); |
127 | 130 |
128 fDidTestPMConversions = false; | 131 fDidTestPMConversions = false; |
129 | 132 |
130 this->setupDrawBuffer(); | 133 this->setupDrawBuffer(); |
131 | 134 |
132 // GrBatchFontCache will eventually replace GrFontCache | 135 // GrBatchFontCache will eventually replace GrFontCache |
133 fBatchFontCache = SkNEW_ARGS(GrBatchFontCache, (this)); | 136 fBatchFontCache = SkNEW_ARGS(GrBatchFontCache, (this)); |
134 | 137 |
135 fTextBlobCache.reset(SkNEW_ARGS(GrTextBlobCache, (TextBlobCacheOverBudgetCB,
this))); | 138 fTextBlobCache.reset(SkNEW_ARGS(GrTextBlobCache, (TextBlobCacheOverBudgetCB,
this))); |
136 } | 139 } |
137 | 140 |
138 GrContext::~GrContext() { | 141 GrContext::~GrContext() { |
139 if (NULL == fGpu) { | 142 if (NULL == fGpu) { |
140 return; | 143 return; |
141 } | 144 } |
142 | 145 |
143 this->flush(); | 146 this->flush(); |
144 | 147 |
145 for (int i = 0; i < fCleanUpData.count(); ++i) { | 148 for (int i = 0; i < fCleanUpData.count(); ++i) { |
146 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); | 149 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); |
147 } | 150 } |
148 | 151 |
| 152 SkDELETE(fResourceProvider); |
149 SkDELETE(fResourceCache); | 153 SkDELETE(fResourceCache); |
150 SkDELETE(fBatchFontCache); | 154 SkDELETE(fBatchFontCache); |
151 SkDELETE(fDrawBuffer); | 155 SkDELETE(fDrawBuffer); |
152 SkDELETE(fDrawBufferVBAllocPool); | 156 SkDELETE(fDrawBufferVBAllocPool); |
153 SkDELETE(fDrawBufferIBAllocPool); | 157 SkDELETE(fDrawBufferIBAllocPool); |
154 | 158 |
155 fAARectRenderer->unref(); | 159 fAARectRenderer->unref(); |
156 fOvalRenderer->unref(); | 160 fOvalRenderer->unref(); |
157 | 161 |
158 fGpu->unref(); | 162 fGpu->unref(); |
159 SkSafeUnref(fPathRendererChain); | 163 SkSafeUnref(fPathRendererChain); |
160 SkSafeUnref(fSoftwarePathRenderer); | 164 SkSafeUnref(fSoftwarePathRenderer); |
161 } | 165 } |
162 | 166 |
163 void GrContext::abandonContext() { | 167 void GrContext::abandonContext() { |
| 168 fResourceProvider->abandon(); |
164 // abandon first to so destructors | 169 // abandon first to so destructors |
165 // don't try to free the resources in the API. | 170 // don't try to free the resources in the API. |
166 fResourceCache->abandonAll(); | 171 fResourceCache->abandonAll(); |
167 | 172 |
168 fGpu->contextAbandoned(); | 173 fGpu->contextAbandoned(); |
169 | 174 |
170 // a path renderer may be holding onto resources that | 175 // a path renderer may be holding onto resources that |
171 // are now unusable | 176 // are now unusable |
172 SkSafeSetNull(fPathRendererChain); | 177 SkSafeSetNull(fPathRendererChain); |
173 SkSafeSetNull(fSoftwarePathRenderer); | 178 SkSafeSetNull(fSoftwarePathRenderer); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 GrStencilAttachment* sb = renderTarget->renderTargetPriv().attachStencil
Attachment(); | 235 GrStencilAttachment* sb = renderTarget->renderTargetPriv().attachStencil
Attachment(); |
231 if (sb) { | 236 if (sb) { |
232 return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyPr
operties); | 237 return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyPr
operties); |
233 } | 238 } |
234 } | 239 } |
235 | 240 |
236 return GrAtlasTextContext::Create(this, gpuDevice, leakyProperties, enableDi
stanceFieldFonts); | 241 return GrAtlasTextContext::Create(this, gpuDevice, leakyProperties, enableDi
stanceFieldFonts); |
237 } | 242 } |
238 | 243 |
239 //////////////////////////////////////////////////////////////////////////////// | 244 //////////////////////////////////////////////////////////////////////////////// |
240 enum ScratchTextureFlags { | |
241 kExact_ScratchTextureFlag = 0x1, | |
242 kNoPendingIO_ScratchTextureFlag = 0x2, | |
243 kNoCreate_ScratchTextureFlag = 0x4, | |
244 }; | |
245 | 245 |
246 bool GrContext::isConfigTexturable(GrPixelConfig config) const { | 246 bool GrContext::isConfigTexturable(GrPixelConfig config) const { |
247 return fGpu->caps()->isConfigTexturable(config); | 247 return fGpu->caps()->isConfigTexturable(config); |
248 } | 248 } |
249 | 249 |
250 bool GrContext::npotTextureTileSupport() const { | 250 bool GrContext::npotTextureTileSupport() const { |
251 return fGpu->caps()->npotTextureTileSupport(); | 251 return fGpu->caps()->npotTextureTileSupport(); |
252 } | 252 } |
253 | 253 |
254 GrTexture* GrContext::createTexture(const GrSurfaceDesc& desc, bool budgeted, co
nst void* srcData, | |
255 size_t rowBytes) { | |
256 RETURN_NULL_IF_ABANDONED | |
257 if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) && | |
258 !this->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { | |
259 return NULL; | |
260 } | |
261 if (!GrPixelConfigIsCompressed(desc.fConfig)) { | |
262 static const uint32_t kFlags = kExact_ScratchTextureFlag | | |
263 kNoCreate_ScratchTextureFlag; | |
264 if (GrTexture* texture = this->internalRefScratchTexture(desc, kFlags))
{ | |
265 if (!srcData || texture->writePixels(0, 0, desc.fWidth, desc.fHeight
, desc.fConfig, | |
266 srcData, rowBytes)) { | |
267 if (!budgeted) { | |
268 texture->resourcePriv().makeUnbudgeted(); | |
269 } | |
270 return texture; | |
271 } | |
272 texture->unref(); | |
273 } | |
274 } | |
275 return fGpu->createTexture(desc, budgeted, srcData, rowBytes); | |
276 } | |
277 | |
278 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& desc, ScratchTexMat
ch match, | |
279 bool calledDuringFlush) { | |
280 RETURN_NULL_IF_ABANDONED | |
281 // Currently we don't recycle compressed textures as scratch. | |
282 if (GrPixelConfigIsCompressed(desc.fConfig)) { | |
283 return NULL; | |
284 } else { | |
285 uint32_t flags = 0; | |
286 if (kExact_ScratchTexMatch == match) { | |
287 flags |= kExact_ScratchTextureFlag; | |
288 } | |
289 if (calledDuringFlush) { | |
290 flags |= kNoPendingIO_ScratchTextureFlag; | |
291 } | |
292 return this->internalRefScratchTexture(desc, flags); | |
293 } | |
294 } | |
295 | |
296 GrTexture* GrContext::internalRefScratchTexture(const GrSurfaceDesc& inDesc, uin
t32_t flags) { | |
297 SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig)); | |
298 | |
299 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); | |
300 | |
301 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_Gr
SurfaceFlag)) { | |
302 if (!(kExact_ScratchTextureFlag & flags)) { | |
303 // bin by pow2 with a reasonable min | |
304 static const int MIN_SIZE = 16; | |
305 GrSurfaceDesc* wdesc = desc.writable(); | |
306 wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth)); | |
307 wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight)); | |
308 } | |
309 | |
310 GrScratchKey key; | |
311 GrTexturePriv::ComputeScratchKey(*desc, &key); | |
312 uint32_t scratchFlags = 0; | |
313 if (kNoPendingIO_ScratchTextureFlag & flags) { | |
314 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; | |
315 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { | |
316 // If it is not a render target then it will most likely be populate
d by | |
317 // writePixels() which will trigger a flush if the texture has pendi
ng IO. | |
318 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; | |
319 } | |
320 GrGpuResource* resource = fResourceCache->findAndRefScratchResource(key,
scratchFlags); | |
321 if (resource) { | |
322 GrSurface* surface = static_cast<GrSurface*>(resource); | |
323 GrRenderTarget* rt = surface->asRenderTarget(); | |
324 if (rt && fGpu->caps()->discardRenderTargetSupport()) { | |
325 rt->discard(); | |
326 } | |
327 return surface->asTexture(); | |
328 } | |
329 } | |
330 | |
331 if (!(kNoCreate_ScratchTextureFlag & flags)) { | |
332 return fGpu->createTexture(*desc, true, NULL, 0); | |
333 } | |
334 | |
335 return NULL; | |
336 } | |
337 | |
338 void GrContext::OverBudgetCB(void* data) { | 254 void GrContext::OverBudgetCB(void* data) { |
339 SkASSERT(data); | 255 SkASSERT(data); |
340 | 256 |
341 GrContext* context = reinterpret_cast<GrContext*>(data); | 257 GrContext* context = reinterpret_cast<GrContext*>(data); |
342 | 258 |
343 // Flush the InOrderDrawBuffer to possibly free up some textures | 259 // Flush the InOrderDrawBuffer to possibly free up some textures |
344 context->fFlushToReduceCacheSize = true; | 260 context->fFlushToReduceCacheSize = true; |
345 } | 261 } |
346 | 262 |
347 void GrContext::TextBlobCacheOverBudgetCB(void* data) { | 263 void GrContext::TextBlobCacheOverBudgetCB(void* data) { |
(...skipping 14 matching lines...) Expand all Loading... |
362 int GrContext::getMaxRenderTargetSize() const { | 278 int GrContext::getMaxRenderTargetSize() const { |
363 return fGpu->caps()->maxRenderTargetSize(); | 279 return fGpu->caps()->maxRenderTargetSize(); |
364 } | 280 } |
365 | 281 |
366 int GrContext::getMaxSampleCount() const { | 282 int GrContext::getMaxSampleCount() const { |
367 return fGpu->caps()->maxSampleCount(); | 283 return fGpu->caps()->maxSampleCount(); |
368 } | 284 } |
369 | 285 |
370 /////////////////////////////////////////////////////////////////////////////// | 286 /////////////////////////////////////////////////////////////////////////////// |
371 | 287 |
372 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { | |
373 RETURN_NULL_IF_ABANDONED | |
374 return fGpu->wrapBackendTexture(desc); | |
375 } | |
376 | |
377 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& desc) { | |
378 RETURN_NULL_IF_ABANDONED | |
379 return fGpu->wrapBackendRenderTarget(desc); | |
380 } | |
381 | |
382 //////////////////////////////////////////////////////////////////////////////// | |
383 | |
384 void GrContext::clear(const SkIRect* rect, | 288 void GrContext::clear(const SkIRect* rect, |
385 const GrColor color, | 289 const GrColor color, |
386 bool canIgnoreRect, | 290 bool canIgnoreRect, |
387 GrRenderTarget* renderTarget) { | 291 GrRenderTarget* renderTarget) { |
388 RETURN_IF_ABANDONED | 292 RETURN_IF_ABANDONED |
389 ASSERT_OWNED_RESOURCE(renderTarget); | 293 ASSERT_OWNED_RESOURCE(renderTarget); |
390 SkASSERT(renderTarget); | 294 SkASSERT(renderTarget); |
391 | 295 |
392 AutoCheckFlush acf(this); | 296 AutoCheckFlush acf(this); |
393 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this); | 297 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this); |
(...skipping 1135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1529 if (GrPixelConfigSwapRAndB(srcConfig) == | 1433 if (GrPixelConfigSwapRAndB(srcConfig) == |
1530 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { | 1434 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { |
1531 writeConfig = GrPixelConfigSwapRAndB(srcConfig); | 1435 writeConfig = GrPixelConfigSwapRAndB(srcConfig); |
1532 swapRAndB = true; | 1436 swapRAndB = true; |
1533 } | 1437 } |
1534 | 1438 |
1535 GrSurfaceDesc desc; | 1439 GrSurfaceDesc desc; |
1536 desc.fWidth = width; | 1440 desc.fWidth = width; |
1537 desc.fHeight = height; | 1441 desc.fHeight = height; |
1538 desc.fConfig = writeConfig; | 1442 desc.fConfig = writeConfig; |
1539 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_Scratc
hTexMatch)); | 1443 SkAutoTUnref<GrTexture> texture(this->textureProvider()->refScratchTexture(d
esc, |
| 1444 GrTextureProvider::kApprox_ScratchTexMatch)); |
1540 if (!texture) { | 1445 if (!texture) { |
1541 return false; | 1446 return false; |
1542 } | 1447 } |
1543 | 1448 |
1544 SkAutoTUnref<const GrFragmentProcessor> fp; | 1449 SkAutoTUnref<const GrFragmentProcessor> fp; |
1545 SkMatrix textureMatrix; | 1450 SkMatrix textureMatrix; |
1546 textureMatrix.setIDiv(texture->width(), texture->height()); | 1451 textureMatrix.setIDiv(texture->width(), texture->height()); |
1547 | 1452 |
1548 // allocate a tmp buffer and sw convert the pixels to premul | 1453 // allocate a tmp buffer and sw convert the pixels to premul |
1549 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); | 1454 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1674 desc.fFlags = kRenderTarget_GrSurfaceFlag; | 1579 desc.fFlags = kRenderTarget_GrSurfaceFlag; |
1675 desc.fWidth = width; | 1580 desc.fWidth = width; |
1676 desc.fHeight = height; | 1581 desc.fHeight = height; |
1677 desc.fConfig = readConfig; | 1582 desc.fConfig = readConfig; |
1678 desc.fOrigin = kTopLeft_GrSurfaceOrigin; | 1583 desc.fOrigin = kTopLeft_GrSurfaceOrigin; |
1679 | 1584 |
1680 // When a full read back is faster than a partial we could always make t
he scratch exactly | 1585 // When a full read back is faster than a partial we could always make t
he scratch exactly |
1681 // match the passed rect. However, if we see many different size rectang
les we will trash | 1586 // match the passed rect. However, if we see many different size rectang
les we will trash |
1682 // our texture cache and pay the cost of creating and destroying many te
xtures. So, we only | 1587 // our texture cache and pay the cost of creating and destroying many te
xtures. So, we only |
1683 // request an exact match when the caller is reading an entire RT. | 1588 // request an exact match when the caller is reading an entire RT. |
1684 ScratchTexMatch match = kApprox_ScratchTexMatch; | 1589 GrTextureProvider::ScratchTexMatch match = GrTextureProvider::kApprox_Sc
ratchTexMatch; |
1685 if (0 == left && | 1590 if (0 == left && |
1686 0 == top && | 1591 0 == top && |
1687 target->width() == width && | 1592 target->width() == width && |
1688 target->height() == height && | 1593 target->height() == height && |
1689 fGpu->fullReadPixelsIsFasterThanPartial()) { | 1594 fGpu->fullReadPixelsIsFasterThanPartial()) { |
1690 match = kExact_ScratchTexMatch; | 1595 match = GrTextureProvider::kExact_ScratchTexMatch; |
1691 } | 1596 } |
1692 tempTexture.reset(this->refScratchTexture(desc, match)); | 1597 tempTexture.reset(this->textureProvider()->refScratchTexture(desc, match
)); |
1693 if (tempTexture) { | 1598 if (tempTexture) { |
1694 // compute a matrix to perform the draw | 1599 // compute a matrix to perform the draw |
1695 SkMatrix textureMatrix; | 1600 SkMatrix textureMatrix; |
1696 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); | 1601 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); |
1697 textureMatrix.postIDiv(src->width(), src->height()); | 1602 textureMatrix.postIDiv(src->width(), src->height()); |
1698 | 1603 |
1699 SkAutoTUnref<const GrFragmentProcessor> fp; | 1604 SkAutoTUnref<const GrFragmentProcessor> fp; |
1700 if (unpremul) { | 1605 if (unpremul) { |
1701 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix
)); | 1606 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix
)); |
1702 if (fp) { | 1607 if (fp) { |
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1976 } | 1881 } |
1977 if (maxTextureBytes) { | 1882 if (maxTextureBytes) { |
1978 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); | 1883 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); |
1979 } | 1884 } |
1980 } | 1885 } |
1981 | 1886 |
1982 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes)
{ | 1887 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes)
{ |
1983 fResourceCache->setLimits(maxTextures, maxTextureBytes); | 1888 fResourceCache->setLimits(maxTextures, maxTextureBytes); |
1984 } | 1889 } |
1985 | 1890 |
1986 void GrContext::addResourceToCache(const GrUniqueKey& key, GrGpuResource* resour
ce) { | |
1987 ASSERT_OWNED_RESOURCE(resource); | |
1988 if (!resource) { | |
1989 return; | |
1990 } | |
1991 resource->resourcePriv().setUniqueKey(key); | |
1992 } | |
1993 | |
1994 bool GrContext::isResourceInCache(const GrUniqueKey& key) const { | |
1995 return fResourceCache->hasUniqueKey(key); | |
1996 } | |
1997 | |
1998 GrGpuResource* GrContext::findAndRefCachedResource(const GrUniqueKey& key) { | |
1999 return fResourceCache->findAndRefUniqueResource(key); | |
2000 } | |
2001 | |
2002 ////////////////////////////////////////////////////////////////////////////// | 1891 ////////////////////////////////////////////////////////////////////////////// |
2003 | 1892 |
2004 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { | 1893 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { |
2005 fGpu->addGpuTraceMarker(marker); | 1894 fGpu->addGpuTraceMarker(marker); |
2006 if (fDrawBuffer) { | 1895 if (fDrawBuffer) { |
2007 fDrawBuffer->addGpuTraceMarker(marker); | 1896 fDrawBuffer->addGpuTraceMarker(marker); |
2008 } | 1897 } |
2009 } | 1898 } |
2010 | 1899 |
2011 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { | 1900 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { |
2012 fGpu->removeGpuTraceMarker(marker); | 1901 fGpu->removeGpuTraceMarker(marker); |
2013 if (fDrawBuffer) { | 1902 if (fDrawBuffer) { |
2014 fDrawBuffer->removeGpuTraceMarker(marker); | 1903 fDrawBuffer->removeGpuTraceMarker(marker); |
2015 } | 1904 } |
2016 } | 1905 } |
2017 | |
OLD | NEW |