OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2012 Google Inc. | 2 * Copyright 2012 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkImage_Base.h" | 8 #include "SkImage_Base.h" |
9 #include "SkBitmap.h" | 9 #include "SkBitmap.h" |
10 #include "SkBitmapProcShader.h" | 10 #include "SkBitmapProcShader.h" |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
108 // like a lazy decode or imagegenerator. PreLocked means it is flat pixels already. | 108 // like a lazy decode or imagegenerator. PreLocked means it is flat pixels already. |
109 fBitmap.lockPixels(); | 109 fBitmap.lockPixels(); |
110 } | 110 } |
111 SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable()); | 111 SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable()); |
112 } | 112 } |
113 | 113 |
114 bool onIsLazyGenerated() const override { | 114 bool onIsLazyGenerated() const override { |
115 return fBitmap.pixelRef() && fBitmap.pixelRef()->isLazyGenerated(); | 115 return fBitmap.pixelRef() && fBitmap.pixelRef()->isLazyGenerated(); |
116 } | 116 } |
117 | 117 |
118 #if SK_SUPPORT_GPU | |
119 sk_sp<GrTexture> refPinnedTexture(uint32_t* uniqueID) const override; | |
120 void onPinAsTexture(GrContext*) const override; | |
121 void onUnpinAsTexture(GrContext*) const override; | |
122 #endif | |
123 | |
118 private: | 124 private: |
119 SkBitmap fBitmap; | 125 SkBitmap fBitmap; |
120 | 126 |
127 #if SK_SUPPORT_GPU | |
128 mutable sk_sp<GrTexture> fPinnedTexture; | |
129 mutable int32_t fPinnedCount = 0; | |
130 mutable uint32_t fPinnedUniqueID = 0; | |
131 #endif | |
132 | |
121 typedef SkImage_Base INHERITED; | 133 typedef SkImage_Base INHERITED; |
122 }; | 134 }; |
123 | 135 |
124 /////////////////////////////////////////////////////////////////////////////// | 136 /////////////////////////////////////////////////////////////////////////////// |
125 | 137 |
126 static void release_data(void* addr, void* context) { | 138 static void release_data(void* addr, void* context) { |
127 SkData* data = static_cast<SkData*>(context); | 139 SkData* data = static_cast<SkData*>(context); |
128 data->unref(); | 140 data->unref(); |
129 } | 141 } |
130 | 142 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
171 return pr->refEncodedData(); | 183 return pr->refEncodedData(); |
172 } | 184 } |
173 return nullptr; | 185 return nullptr; |
174 } | 186 } |
175 | 187 |
176 bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const { | 188 bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const { |
177 *dst = fBitmap; | 189 *dst = fBitmap; |
178 return true; | 190 return true; |
179 } | 191 } |
180 | 192 |
193 static SkMutex gRasterImageMutex; | |
194 | |
195 #include "GrImageIDTextureAdjuster.h" | |
196 | |
181 GrTexture* SkImage_Raster::asTextureRef(GrContext* ctx, const GrTextureParams& p arams, | 197 GrTexture* SkImage_Raster::asTextureRef(GrContext* ctx, const GrTextureParams& p arams, |
182 SkSourceGammaTreatment gammaTreatment) c onst { | 198 SkSourceGammaTreatment gammaTreatment) c onst { |
183 #if SK_SUPPORT_GPU | 199 #if SK_SUPPORT_GPU |
184 if (!ctx) { | 200 if (!ctx) { |
185 return nullptr; | 201 return nullptr; |
186 } | 202 } |
187 | 203 |
204 uint32_t uniqueID; | |
205 sk_sp<GrTexture> tex = this->refPinnedTexture(&uniqueID); | |
bsalomon
2016/08/16 16:36:47
Doesn't #1 need to be checked here? i.e. that tex-
reed1
2016/08/16 16:41:33
Does this sort of check not already exist for all
| |
206 if (tex) { | |
207 GrTextureAdjuster adjuster(fPinnedTexture.get(), fBitmap.bounds(), | |
208 fPinnedUniqueID, fBitmap.colorSpace()); | |
209 return adjuster.refTextureSafeForParams(params, gammaTreatment, nullptr) ; | |
210 } | |
211 | |
188 return GrRefCachedBitmapTexture(ctx, fBitmap, params, gammaTreatment); | 212 return GrRefCachedBitmapTexture(ctx, fBitmap, params, gammaTreatment); |
189 #endif | 213 #endif |
190 | 214 |
191 return nullptr; | 215 return nullptr; |
192 } | 216 } |
193 | 217 |
218 #if SK_SUPPORT_GPU | |
219 | |
220 sk_sp<GrTexture> SkImage_Raster::refPinnedTexture(uint32_t* uniqueID) const { | |
221 SkAutoMutexAcquire ama(gRasterImageMutex); | |
222 if (fPinnedTexture) { | |
223 SkASSERT(fPinnedCount > 0); | |
224 SkASSERT(fPinnedUniqueID != 0); | |
225 *uniqueID = fPinnedUniqueID; | |
226 return fPinnedTexture; | |
227 } | |
228 return nullptr; | |
229 } | |
230 | |
231 void SkImage_Raster::onPinAsTexture(GrContext* ctx) const { | |
232 SkAutoMutexAcquire ama(gRasterImageMutex); | |
233 | |
234 if (fPinnedTexture) { | |
bsalomon
2016/08/16 16:36:47
What if fPinnedTexture's context is not ctx?
reed1
2016/08/16 16:41:33
Oops, missed it here. I was only looking at unpin.
| |
235 SkASSERT(fPinnedCount > 0); | |
236 SkASSERT(fPinnedUniqueID != 0); | |
237 } else { | |
238 SkASSERT(fPinnedCount == 0); | |
239 SkASSERT(fPinnedUniqueID == 0); | |
240 fPinnedTexture.reset(GrRefCachedBitmapTexture(ctx, fBitmap, | |
241 GrTextureParams::ClampNoFi lter(), | |
242 SkSourceGammaTreatment::kR espect)); | |
243 fPinnedUniqueID = fBitmap.getGenerationID(); | |
244 } | |
245 // Note: we always increment, even if we failed to create the pinned texture | |
246 ++fPinnedCount; | |
247 } | |
248 | |
249 void SkImage_Raster::onUnpinAsTexture(GrContext* ctx) const { | |
250 SkAutoMutexAcquire ama(gRasterImageMutex); | |
251 | |
252 // Note: we always decrement, even if fPinnedTexture is null | |
253 SkASSERT(fPinnedCount > 0); | |
254 SkASSERT(fPinnedUniqueID != 0); | |
255 if (fPinnedTexture) { | |
256 SkASSERT(fPinnedTexture->getContext() == ctx); | |
257 } | |
258 | |
259 if (0 == --fPinnedCount) { | |
260 fPinnedTexture.reset(nullptr); | |
261 fPinnedUniqueID = 0; | |
262 } | |
263 } | |
264 #endif | |
265 | |
194 sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset) const { | 266 sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset) const { |
195 // TODO : could consider heurist of sharing pixels, if subset is pretty clos e to complete | 267 // TODO : could consider heurist of sharing pixels, if subset is pretty clos e to complete |
196 | 268 |
197 SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), fBi tmap.alphaType()); | 269 SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), fBi tmap.alphaType()); |
198 auto surface(SkSurface::MakeRaster(info)); | 270 auto surface(SkSurface::MakeRaster(info)); |
199 if (!surface) { | 271 if (!surface) { |
200 return nullptr; | 272 return nullptr; |
201 } | 273 } |
202 surface->getCanvas()->clear(0); | 274 surface->getCanvas()->clear(0); |
203 surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToSca lar(-subset.y()), | 275 surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToSca lar(-subset.y()), |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
301 // pixelref since the caller might call setImmutable() themselves | 373 // pixelref since the caller might call setImmutable() themselves |
302 // (thus changing our state). | 374 // (thus changing our state). |
303 if (fBitmap.isImmutable()) { | 375 if (fBitmap.isImmutable()) { |
304 bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes()); | 376 bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes()); |
305 bitmap->setPixelRef(fBitmap.pixelRef(), fBitmap.pixelRefOrigin()); | 377 bitmap->setPixelRef(fBitmap.pixelRef(), fBitmap.pixelRefOrigin()); |
306 return true; | 378 return true; |
307 } | 379 } |
308 } | 380 } |
309 return this->INHERITED::onAsLegacyBitmap(bitmap, mode); | 381 return this->INHERITED::onAsLegacyBitmap(bitmap, mode); |
310 } | 382 } |
OLD | NEW |