| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2012 Google Inc. | 2 * Copyright 2012 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkImage_Base.h" | 8 #include "SkImage_Base.h" |
| 9 #include "SkBitmap.h" | 9 #include "SkBitmap.h" |
| 10 #include "SkBitmapProcShader.h" | 10 #include "SkBitmapProcShader.h" |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 108 // like a lazy decode or imagegenerator. PreLocked means it is flat
pixels already. | 108 // like a lazy decode or imagegenerator. PreLocked means it is flat
pixels already. |
| 109 fBitmap.lockPixels(); | 109 fBitmap.lockPixels(); |
| 110 } | 110 } |
| 111 SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable()); | 111 SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable()); |
| 112 } | 112 } |
| 113 | 113 |
| 114 bool onIsLazyGenerated() const override { | 114 bool onIsLazyGenerated() const override { |
| 115 return fBitmap.pixelRef() && fBitmap.pixelRef()->isLazyGenerated(); | 115 return fBitmap.pixelRef() && fBitmap.pixelRef()->isLazyGenerated(); |
| 116 } | 116 } |
| 117 | 117 |
| 118 #if SK_SUPPORT_GPU |
| 119 sk_sp<GrTexture> refPinnedTexture(uint32_t* uniqueID) const override; |
| 120 void onPinAsTexture(GrContext*) const override; |
| 121 void onUnpinAsTexture(GrContext*) const override; |
| 122 #endif |
| 123 |
| 118 private: | 124 private: |
| 119 SkBitmap fBitmap; | 125 SkBitmap fBitmap; |
| 120 | 126 |
| 127 #if SK_SUPPORT_GPU |
| 128 mutable sk_sp<GrTexture> fPinnedTexture; |
| 129 mutable int32_t fPinnedCount = 0; |
| 130 mutable uint32_t fPinnedUniqueID = 0; |
| 131 #endif |
| 132 |
| 121 typedef SkImage_Base INHERITED; | 133 typedef SkImage_Base INHERITED; |
| 122 }; | 134 }; |
| 123 | 135 |
| 124 /////////////////////////////////////////////////////////////////////////////// | 136 /////////////////////////////////////////////////////////////////////////////// |
| 125 | 137 |
| 126 static void release_data(void* addr, void* context) { | 138 static void release_data(void* addr, void* context) { |
| 127 SkData* data = static_cast<SkData*>(context); | 139 SkData* data = static_cast<SkData*>(context); |
| 128 data->unref(); | 140 data->unref(); |
| 129 } | 141 } |
| 130 | 142 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 142 SkImage_Raster::SkImage_Raster(const Info& info, SkPixelRef* pr, const SkIPoint&
pixelRefOrigin, | 154 SkImage_Raster::SkImage_Raster(const Info& info, SkPixelRef* pr, const SkIPoint&
pixelRefOrigin, |
| 143 size_t rowBytes) | 155 size_t rowBytes) |
| 144 : INHERITED(info.width(), info.height(), pr->getGenerationID()) | 156 : INHERITED(info.width(), info.height(), pr->getGenerationID()) |
| 145 { | 157 { |
| 146 fBitmap.setInfo(info, rowBytes); | 158 fBitmap.setInfo(info, rowBytes); |
| 147 fBitmap.setPixelRef(pr, pixelRefOrigin); | 159 fBitmap.setPixelRef(pr, pixelRefOrigin); |
| 148 fBitmap.lockPixels(); | 160 fBitmap.lockPixels(); |
| 149 SkASSERT(fBitmap.isImmutable()); | 161 SkASSERT(fBitmap.isImmutable()); |
| 150 } | 162 } |
| 151 | 163 |
| 152 SkImage_Raster::~SkImage_Raster() {} | 164 SkImage_Raster::~SkImage_Raster() { |
| 165 #if SK_SUPPORT_GPU |
| 166 SkASSERT(nullptr == fPinnedTexture.get()); // want the caller to have manua
lly unpinned |
| 167 #endif |
| 168 } |
| 153 | 169 |
| 154 bool SkImage_Raster::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, s
ize_t dstRowBytes, | 170 bool SkImage_Raster::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, s
ize_t dstRowBytes, |
| 155 int srcX, int srcY, CachingHint) const { | 171 int srcX, int srcY, CachingHint) const { |
| 156 SkBitmap shallowCopy(fBitmap); | 172 SkBitmap shallowCopy(fBitmap); |
| 157 return shallowCopy.readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY); | 173 return shallowCopy.readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY); |
| 158 } | 174 } |
| 159 | 175 |
| 160 bool SkImage_Raster::onPeekPixels(SkPixmap* pm) const { | 176 bool SkImage_Raster::onPeekPixels(SkPixmap* pm) const { |
| 161 return fBitmap.peekPixels(pm); | 177 return fBitmap.peekPixels(pm); |
| 162 } | 178 } |
| 163 | 179 |
| 164 SkData* SkImage_Raster::onRefEncoded(GrContext*) const { | 180 SkData* SkImage_Raster::onRefEncoded(GrContext*) const { |
| 165 SkPixelRef* pr = fBitmap.pixelRef(); | 181 SkPixelRef* pr = fBitmap.pixelRef(); |
| 166 const SkImageInfo prInfo = pr->info(); | 182 const SkImageInfo prInfo = pr->info(); |
| 167 const SkImageInfo bmInfo = fBitmap.info(); | 183 const SkImageInfo bmInfo = fBitmap.info(); |
| 168 | 184 |
| 169 // we only try if we (the image) cover the entire area of the pixelRef | 185 // we only try if we (the image) cover the entire area of the pixelRef |
| 170 if (prInfo.width() == bmInfo.width() && prInfo.height() == bmInfo.height())
{ | 186 if (prInfo.width() == bmInfo.width() && prInfo.height() == bmInfo.height())
{ |
| 171 return pr->refEncodedData(); | 187 return pr->refEncodedData(); |
| 172 } | 188 } |
| 173 return nullptr; | 189 return nullptr; |
| 174 } | 190 } |
| 175 | 191 |
| 176 bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const { | 192 bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const { |
| 177 *dst = fBitmap; | 193 *dst = fBitmap; |
| 178 return true; | 194 return true; |
| 179 } | 195 } |
| 180 | 196 |
| 197 #include "GrImageIDTextureAdjuster.h" |
| 198 |
| 181 GrTexture* SkImage_Raster::asTextureRef(GrContext* ctx, const GrTextureParams& p
arams, | 199 GrTexture* SkImage_Raster::asTextureRef(GrContext* ctx, const GrTextureParams& p
arams, |
| 182 SkSourceGammaTreatment gammaTreatment) c
onst { | 200 SkSourceGammaTreatment gammaTreatment) c
onst { |
| 183 #if SK_SUPPORT_GPU | 201 #if SK_SUPPORT_GPU |
| 184 if (!ctx) { | 202 if (!ctx) { |
| 185 return nullptr; | 203 return nullptr; |
| 186 } | 204 } |
| 187 | 205 |
| 206 uint32_t uniqueID; |
| 207 sk_sp<GrTexture> tex = this->refPinnedTexture(&uniqueID); |
| 208 if (tex) { |
| 209 GrTextureAdjuster adjuster(fPinnedTexture.get(), fBitmap.bounds(), |
| 210 fPinnedUniqueID, fBitmap.colorSpace()); |
| 211 return adjuster.refTextureSafeForParams(params, gammaTreatment, nullptr)
; |
| 212 } |
| 213 |
| 188 return GrRefCachedBitmapTexture(ctx, fBitmap, params, gammaTreatment); | 214 return GrRefCachedBitmapTexture(ctx, fBitmap, params, gammaTreatment); |
| 189 #endif | 215 #endif |
| 190 | 216 |
| 191 return nullptr; | 217 return nullptr; |
| 192 } | 218 } |
| 193 | 219 |
| 220 #if SK_SUPPORT_GPU |
| 221 |
| 222 sk_sp<GrTexture> SkImage_Raster::refPinnedTexture(uint32_t* uniqueID) const { |
| 223 if (fPinnedTexture) { |
| 224 SkASSERT(fPinnedCount > 0); |
| 225 SkASSERT(fPinnedUniqueID != 0); |
| 226 *uniqueID = fPinnedUniqueID; |
| 227 return fPinnedTexture; |
| 228 } |
| 229 return nullptr; |
| 230 } |
| 231 |
| 232 void SkImage_Raster::onPinAsTexture(GrContext* ctx) const { |
| 233 if (fPinnedTexture) { |
| 234 SkASSERT(fPinnedCount > 0); |
| 235 SkASSERT(fPinnedUniqueID != 0); |
| 236 SkASSERT(fPinnedTexture->getContext() == ctx); |
| 237 } else { |
| 238 SkASSERT(fPinnedCount == 0); |
| 239 SkASSERT(fPinnedUniqueID == 0); |
| 240 fPinnedTexture.reset(GrRefCachedBitmapTexture(ctx, fBitmap, |
| 241 GrTextureParams::ClampNoFi
lter(), |
| 242 SkSourceGammaTreatment::kR
espect)); |
| 243 fPinnedUniqueID = fBitmap.getGenerationID(); |
| 244 } |
| 245 // Note: we always increment, even if we failed to create the pinned texture |
| 246 ++fPinnedCount; |
| 247 } |
| 248 |
| 249 void SkImage_Raster::onUnpinAsTexture(GrContext* ctx) const { |
| 250 // Note: we always decrement, even if fPinnedTexture is null |
| 251 SkASSERT(fPinnedCount > 0); |
| 252 SkASSERT(fPinnedUniqueID != 0); |
| 253 if (fPinnedTexture) { |
| 254 SkASSERT(fPinnedTexture->getContext() == ctx); |
| 255 } |
| 256 |
| 257 if (0 == --fPinnedCount) { |
| 258 fPinnedTexture.reset(nullptr); |
| 259 fPinnedUniqueID = 0; |
| 260 } |
| 261 } |
| 262 #endif |
| 263 |
| 194 sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset) const { | 264 sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset) const { |
| 195 // TODO : could consider heurist of sharing pixels, if subset is pretty clos
e to complete | 265 // TODO : could consider heurist of sharing pixels, if subset is pretty clos
e to complete |
| 196 | 266 |
| 197 SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), fBi
tmap.alphaType()); | 267 SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), fBi
tmap.alphaType()); |
| 198 auto surface(SkSurface::MakeRaster(info)); | 268 auto surface(SkSurface::MakeRaster(info)); |
| 199 if (!surface) { | 269 if (!surface) { |
| 200 return nullptr; | 270 return nullptr; |
| 201 } | 271 } |
| 202 surface->getCanvas()->clear(0); | 272 surface->getCanvas()->clear(0); |
| 203 surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToSca
lar(-subset.y()), | 273 surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToSca
lar(-subset.y()), |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 301 // pixelref since the caller might call setImmutable() themselves | 371 // pixelref since the caller might call setImmutable() themselves |
| 302 // (thus changing our state). | 372 // (thus changing our state). |
| 303 if (fBitmap.isImmutable()) { | 373 if (fBitmap.isImmutable()) { |
| 304 bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes()); | 374 bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes()); |
| 305 bitmap->setPixelRef(fBitmap.pixelRef(), fBitmap.pixelRefOrigin()); | 375 bitmap->setPixelRef(fBitmap.pixelRef(), fBitmap.pixelRefOrigin()); |
| 306 return true; | 376 return true; |
| 307 } | 377 } |
| 308 } | 378 } |
| 309 return this->INHERITED::onAsLegacyBitmap(bitmap, mode); | 379 return this->INHERITED::onAsLegacyBitmap(bitmap, mode); |
| 310 } | 380 } |
| OLD | NEW |