| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2010 Google Inc. | 2 * Copyright 2010 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkGr.h" | 8 #include "SkGr.h" |
| 9 | 9 |
| 10 #include "GrDrawTargetCaps.h" | 10 #include "GrDrawTargetCaps.h" |
| 11 #include "GrGpu.h" | 11 #include "GrGpu.h" |
| 12 #include "GrGpuResourceCacheAccess.h" |
| 12 #include "GrXferProcessor.h" | 13 #include "GrXferProcessor.h" |
| 13 #include "SkColorFilter.h" | 14 #include "SkColorFilter.h" |
| 14 #include "SkConfig8888.h" | 15 #include "SkConfig8888.h" |
| 15 #include "SkData.h" | 16 #include "SkData.h" |
| 16 #include "SkMessageBus.h" | 17 #include "SkMessageBus.h" |
| 17 #include "SkPixelRef.h" | 18 #include "SkPixelRef.h" |
| 18 #include "SkResourceCache.h" | 19 #include "SkResourceCache.h" |
| 19 #include "SkTextureCompressor.h" | 20 #include "SkTextureCompressor.h" |
| 20 #include "SkYUVPlanesCache.h" | 21 #include "SkYUVPlanesCache.h" |
| 21 #include "effects/GrDitherEffect.h" | 22 #include "effects/GrDitherEffect.h" |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 79 for (int y = 0; y < bitmap.height(); y++) { | 80 for (int y = 0; y < bitmap.height(); y++) { |
| 80 memcpy(dst, src, width); | 81 memcpy(dst, src, width); |
| 81 src += rowBytes; | 82 src += rowBytes; |
| 82 dst += width; | 83 dst += width; |
| 83 } | 84 } |
| 84 } | 85 } |
| 85 } | 86 } |
| 86 | 87 |
| 87 //////////////////////////////////////////////////////////////////////////////// | 88 //////////////////////////////////////////////////////////////////////////////// |
| 88 | 89 |
| 89 static void generate_bitmap_key(const SkBitmap& bitmap, GrContentKey* key) { | 90 enum Stretch { |
| 91 kNo_Stretch, |
| 92 kBilerp_Stretch, |
| 93 kNearest_Stretch |
| 94 }; |
| 95 |
| 96 static Stretch get_stretch_type(const GrContext* ctx, int width, int height, |
| 97 const GrTextureParams* params) { |
| 98 if (params && params->isTiled()) { |
| 99 const GrDrawTargetCaps* caps = ctx->getGpu()->caps(); |
| 100 if (!caps->npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(he
ight))) { |
| 101 switch(params->filterMode()) { |
| 102 case GrTextureParams::kNone_FilterMode: |
| 103 return kNearest_Stretch; |
| 104 case GrTextureParams::kBilerp_FilterMode: |
| 105 case GrTextureParams::kMipMap_FilterMode: |
| 106 return kBilerp_Stretch; |
| 107 } |
| 108 } |
| 109 } |
| 110 return kNo_Stretch; |
| 111 } |
| 112 |
| 113 static bool make_resize_key(const GrContentKey& origKey, Stretch stretch, GrCont
entKey* resizeKey) { |
| 114 if (origKey.isValid() && kNo_Stretch != stretch) { |
| 115 static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain
(); |
| 116 GrContentKey::Builder builder(resizeKey, origKey, kDomain, 1); |
| 117 builder[0] = stretch; |
| 118 builder.finish(); |
| 119 return true; |
| 120 } |
| 121 SkASSERT(!resizeKey->isValid()); |
| 122 return false; |
| 123 } |
| 124 |
| 125 static void generate_bitmap_keys(const SkBitmap& bitmap, |
| 126 Stretch stretch, |
| 127 GrContentKey* key, |
| 128 GrContentKey* resizedKey) { |
| 90 // Our id includes the offset, width, and height so that bitmaps created by
extractSubset() | 129 // Our id includes the offset, width, and height so that bitmaps created by
extractSubset() |
| 91 // are unique. | 130 // are unique. |
| 92 uint32_t genID = bitmap.getGenerationID(); | 131 uint32_t genID = bitmap.getGenerationID(); |
| 93 SkIPoint origin = bitmap.pixelRefOrigin(); | 132 SkIPoint origin = bitmap.pixelRefOrigin(); |
| 94 uint32_t width = SkToU16(bitmap.width()); | 133 uint32_t width = SkToU16(bitmap.width()); |
| 95 uint32_t height = SkToU16(bitmap.height()); | 134 uint32_t height = SkToU16(bitmap.height()); |
| 96 | 135 |
| 97 static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain(); | 136 static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain(); |
| 98 GrContentKey::Builder builder(key, kDomain, 4); | 137 GrContentKey::Builder builder(key, kDomain, 4); |
| 99 builder[0] = genID; | 138 builder[0] = genID; |
| 100 builder[1] = origin.fX; | 139 builder[1] = origin.fX; |
| 101 builder[2] = origin.fY; | 140 builder[2] = origin.fY; |
| 102 builder[3] = width | (height << 16); | 141 builder[3] = width | (height << 16); |
| 142 builder.finish(); |
| 143 |
| 144 if (kNo_Stretch != stretch) { |
| 145 make_resize_key(*key, stretch, resizedKey); |
| 146 } |
| 103 } | 147 } |
| 104 | 148 |
| 105 static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc*
desc) { | 149 static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc*
desc) { |
| 106 desc->fFlags = kNone_GrSurfaceFlags; | 150 desc->fFlags = kNone_GrSurfaceFlags; |
| 107 desc->fWidth = bitmap.width(); | 151 desc->fWidth = bitmap.width(); |
| 108 desc->fHeight = bitmap.height(); | 152 desc->fHeight = bitmap.height(); |
| 109 desc->fConfig = SkImageInfo2GrPixelConfig(bitmap.info()); | 153 desc->fConfig = SkImageInfo2GrPixelConfig(bitmap.info()); |
| 110 desc->fSampleCnt = 0; | 154 desc->fSampleCnt = 0; |
| 111 } | 155 } |
| 112 | 156 |
| 113 namespace { | 157 namespace { |
| 114 | 158 |
| 115 // When the SkPixelRef genID changes, invalidate a corresponding GrResource desc
ribed by key. | 159 // When the SkPixelRef genID changes, invalidate a corresponding GrResource desc
ribed by key. |
| 116 class GrResourceInvalidator : public SkPixelRef::GenIDChangeListener { | 160 class GrResourceInvalidator : public SkPixelRef::GenIDChangeListener { |
| 117 public: | 161 public: |
| 118 explicit GrResourceInvalidator(const GrContentKey& key) : fKey(key) {} | 162 explicit GrResourceInvalidator(const GrContentKey& key) : fKey(key) {} |
| 119 private: | 163 private: |
| 120 GrContentKey fKey; | 164 GrContentKey fKey; |
| 121 | 165 |
| 122 void onChange() SK_OVERRIDE { | 166 void onChange() SK_OVERRIDE { |
| 123 const GrResourceInvalidatedMessage message = { fKey }; | 167 const GrResourceInvalidatedMessage message = { fKey }; |
| 124 SkMessageBus<GrResourceInvalidatedMessage>::Post(message); | 168 SkMessageBus<GrResourceInvalidatedMessage>::Post(message); |
| 125 } | 169 } |
| 126 }; | 170 }; |
| 127 | 171 |
| 128 } // namespace | 172 } // namespace |
| 129 | 173 |
| 174 #if 0 // TODO: plug this back up |
| 130 static void add_genID_listener(const GrContentKey& key, SkPixelRef* pixelRef) { | 175 static void add_genID_listener(const GrContentKey& key, SkPixelRef* pixelRef) { |
| 131 SkASSERT(pixelRef); | 176 SkASSERT(pixelRef); |
| 132 pixelRef->addGenIDChangeListener(SkNEW_ARGS(GrResourceInvalidator, (key))); | 177 pixelRef->addGenIDChangeListener(SkNEW_ARGS(GrResourceInvalidator, (key))); |
| 133 } | 178 } |
| 179 #endif |
| 134 | 180 |
| 135 static GrTexture* sk_gr_allocate_texture(GrContext* ctx, | 181 static GrTexture* create_texture_for_bmp(GrContext* ctx, |
| 136 bool cache, | 182 const GrContentKey& optionalKey, |
| 137 const GrTextureParams* params, | |
| 138 const SkBitmap& bm, | |
| 139 GrSurfaceDesc desc, | 183 GrSurfaceDesc desc, |
| 140 const void* pixels, | 184 const void* pixels, |
| 141 size_t rowBytes) { | 185 size_t rowBytes) { |
| 142 GrTexture* result; | 186 GrTexture* result; |
| 143 if (cache) { | 187 if (optionalKey.isValid()) { |
| 144 // This texture is likely to be used again so leave it in the cache | 188 result = ctx->createTexture(desc, pixels, rowBytes); |
| 145 GrContentKey key; | |
| 146 generate_bitmap_key(bm, &key); | |
| 147 | |
| 148 result = ctx->createTexture(params, desc, key, pixels, rowBytes, &key); | |
| 149 if (result) { | 189 if (result) { |
| 150 add_genID_listener(key, bm.pixelRef()); | 190 SkAssertResult(ctx->addResourceToCache(optionalKey, result)); |
| 151 } | 191 } |
| 152 } else { | 192 } else { |
| 153 // This texture is unlikely to be used again (in its present form) so | |
| 154 // just use a scratch texture. This will remove the texture from the | |
| 155 // cache so no one else can find it. Additionally, once unlocked, the | |
| 156 // scratch texture will go to the end of the list for purging so will | |
| 157 // likely be available for this volatile bitmap the next time around. | |
| 158 result = ctx->refScratchTexture(desc, GrContext::kExact_ScratchTexMatch)
; | 193 result = ctx->refScratchTexture(desc, GrContext::kExact_ScratchTexMatch)
; |
| 159 if (pixels) { | 194 if (pixels && result) { |
| 160 result->writePixels(0, 0, bm.width(), bm.height(), desc.fConfig, pix
els, rowBytes); | 195 result->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, p
ixels, rowBytes); |
| 161 } | 196 } |
| 162 } | 197 } |
| 163 return result; | 198 return result; |
| 164 } | 199 } |
| 165 | 200 |
| 201 // creates a new texture that is the input texture scaled up to the next power o
f two in |
| 202 // width or height. If optionalKey is valid it will be set on the new texture. s
tretch |
| 203 // controls whether the scaling is done using nearest or bilerp filtering. |
| 204 GrTexture* resize_texture(GrTexture* inputTexture, Stretch stretch, |
| 205 const GrContentKey& optionalKey) { |
| 206 SkASSERT(kNo_Stretch != stretch); |
| 207 |
| 208 GrContext* context = inputTexture->getContext(); |
| 209 SkASSERT(context); |
| 210 |
| 211 // Either it's a cache miss or the original wasn't cached to begin with. |
| 212 GrSurfaceDesc rtDesc = inputTexture->desc(); |
| 213 rtDesc.fFlags = rtDesc.fFlags | |
| 214 kRenderTarget_GrSurfaceFlag | |
| 215 kNoStencil_GrSurfaceFlag; |
| 216 rtDesc.fWidth = GrNextPow2(rtDesc.fWidth); |
| 217 rtDesc.fHeight = GrNextPow2(rtDesc.fHeight); |
| 218 rtDesc.fConfig = GrMakePixelConfigUncompressed(rtDesc.fConfig); |
| 219 |
| 220 // If the config isn't renderable try converting to either A8 or an 32 bit c
onfig. Otherwise, |
| 221 // fail. |
| 222 if (!context->isConfigRenderable(rtDesc.fConfig, false)) { |
| 223 if (GrPixelConfigIsAlphaOnly(rtDesc.fConfig)) { |
| 224 if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) { |
| 225 rtDesc.fConfig = kAlpha_8_GrPixelConfig; |
| 226 } else if (context->isConfigRenderable(kSkia8888_GrPixelConfig, fals
e)) { |
| 227 rtDesc.fConfig = kSkia8888_GrPixelConfig; |
| 228 } else { |
| 229 return NULL; |
| 230 } |
| 231 } else if (kRGB_GrColorComponentFlags == |
| 232 (kRGB_GrColorComponentFlags & GrPixelConfigComponentMask(rtDe
sc.fConfig))) { |
| 233 if (context->isConfigRenderable(kSkia8888_GrPixelConfig, false)) { |
| 234 rtDesc.fConfig = kSkia8888_GrPixelConfig; |
| 235 } else { |
| 236 return NULL; |
| 237 } |
| 238 } else { |
| 239 return NULL; |
| 240 } |
| 241 } |
| 242 |
| 243 GrTexture* resized = create_texture_for_bmp(context, optionalKey, rtDesc, NU
LL, 0); |
| 244 |
| 245 if (!resized) { |
| 246 return NULL; |
| 247 } |
| 248 GrPaint paint; |
| 249 |
| 250 // If filtering is not desired then we want to ensure all texels in the resa
mpled image are |
| 251 // copies of texels from the original. |
| 252 GrTextureParams params(SkShader::kClamp_TileMode, |
| 253 kBilerp_Stretch == stretch ? GrTextureParams::kBilerp
_FilterMode : |
| 254 GrTextureParams::kNone_F
ilterMode); |
| 255 paint.addColorTextureProcessor(inputTexture, SkMatrix::I(), params); |
| 256 |
| 257 SkRect rect = SkRect::MakeWH(SkIntToScalar(rtDesc.fWidth), SkIntToScalar(rtD
esc.fHeight)); |
| 258 SkRect localRect = SkRect::MakeWH(1.f, 1.f); |
| 259 |
| 260 GrContext::AutoRenderTarget autoRT(context, resized->asRenderTarget()); |
| 261 GrContext::AutoClip ac(context, GrContext::AutoClip::kWideOpen_InitialClip); |
| 262 context->drawNonAARectToRect(paint, SkMatrix::I(), rect, localRect); |
| 263 |
| 264 return resized; |
| 265 } |
| 266 |
| 166 #ifndef SK_IGNORE_ETC1_SUPPORT | 267 #ifndef SK_IGNORE_ETC1_SUPPORT |
| 167 static GrTexture *load_etc1_texture(GrContext* ctx, bool cache, | 268 static GrTexture *load_etc1_texture(GrContext* ctx, const GrContentKey& optional
Key, |
| 168 const GrTextureParams* params, | |
| 169 const SkBitmap &bm, GrSurfaceDesc desc) { | 269 const SkBitmap &bm, GrSurfaceDesc desc) { |
| 170 SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData()); | 270 SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData()); |
| 171 | 271 |
| 172 // Is this even encoded data? | 272 // Is this even encoded data? |
| 173 if (NULL == data) { | 273 if (NULL == data) { |
| 174 return NULL; | 274 return NULL; |
| 175 } | 275 } |
| 176 | 276 |
| 177 // Is this a valid PKM encoded data? | 277 // Is this a valid PKM encoded data? |
| 178 const uint8_t *bytes = data->bytes(); | 278 const uint8_t *bytes = data->bytes(); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 203 if (ktx.width() != bm.width() || ktx.height() != bm.height()) { | 303 if (ktx.width() != bm.width() || ktx.height() != bm.height()) { |
| 204 return NULL; | 304 return NULL; |
| 205 } | 305 } |
| 206 | 306 |
| 207 bytes = ktx.pixelData(); | 307 bytes = ktx.pixelData(); |
| 208 desc.fConfig = kETC1_GrPixelConfig; | 308 desc.fConfig = kETC1_GrPixelConfig; |
| 209 } else { | 309 } else { |
| 210 return NULL; | 310 return NULL; |
| 211 } | 311 } |
| 212 | 312 |
| 213 return sk_gr_allocate_texture(ctx, cache, params, bm, desc, bytes, 0); | 313 return create_texture_for_bmp(ctx, optionalKey, desc, bytes, 0); |
| 214 } | 314 } |
| 215 #endif // SK_IGNORE_ETC1_SUPPORT | 315 #endif // SK_IGNORE_ETC1_SUPPORT |
| 216 | 316 |
| 217 static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTexturePa
rams* params, | 317 static GrTexture* load_yuv_texture(GrContext* ctx, const GrContentKey& optionalK
ey, |
| 218 const SkBitmap& bm, const GrSurfaceDesc& desc
) { | 318 const SkBitmap& bm, const GrSurfaceDesc& desc
) { |
| 219 // Subsets are not supported, the whole pixelRef is loaded when using YUV de
coding | 319 // Subsets are not supported, the whole pixelRef is loaded when using YUV de
coding |
| 220 SkPixelRef* pixelRef = bm.pixelRef(); | 320 SkPixelRef* pixelRef = bm.pixelRef(); |
| 221 if ((NULL == pixelRef) || | 321 if ((NULL == pixelRef) || |
| 222 (pixelRef->info().width() != bm.info().width()) || | 322 (pixelRef->info().width() != bm.info().width()) || |
| 223 (pixelRef->info().height() != bm.info().height())) { | 323 (pixelRef->info().height() != bm.info().height())) { |
| 224 return NULL; | 324 return NULL; |
| 225 } | 325 } |
| 226 | 326 |
| 227 SkYUVPlanesCache::Info yuvInfo; | 327 SkYUVPlanesCache::Info yuvInfo; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 275 yuvDesc.fConfig, planes[i], yuvInfo.fRo
wBytes[i])) { | 375 yuvDesc.fConfig, planes[i], yuvInfo.fRo
wBytes[i])) { |
| 276 return NULL; | 376 return NULL; |
| 277 } | 377 } |
| 278 } | 378 } |
| 279 | 379 |
| 280 GrSurfaceDesc rtDesc = desc; | 380 GrSurfaceDesc rtDesc = desc; |
| 281 rtDesc.fFlags = rtDesc.fFlags | | 381 rtDesc.fFlags = rtDesc.fFlags | |
| 282 kRenderTarget_GrSurfaceFlag | | 382 kRenderTarget_GrSurfaceFlag | |
| 283 kNoStencil_GrSurfaceFlag; | 383 kNoStencil_GrSurfaceFlag; |
| 284 | 384 |
| 285 GrTexture* result = sk_gr_allocate_texture(ctx, cache, params, bm, rtDesc, N
ULL, 0); | 385 GrTexture* result = create_texture_for_bmp(ctx, optionalKey, rtDesc, NULL, 0
); |
| 386 if (!result) { |
| 387 return NULL; |
| 388 } |
| 286 | 389 |
| 287 GrRenderTarget* renderTarget = result ? result->asRenderTarget() : NULL; | 390 GrRenderTarget* renderTarget = result->asRenderTarget(); |
| 288 if (renderTarget) { | 391 SkASSERT(renderTarget); |
| 289 SkAutoTUnref<GrFragmentProcessor> yuvToRgbProcessor(GrYUVtoRGBEffect::Cr
eate( | 392 |
| 290 yuvTextures[0], yuvTextures[1], yuvTextures[2], yuvInfo.fColorSp
ace)); | 393 SkAutoTUnref<GrFragmentProcessor> |
| 291 GrPaint paint; | 394 yuvToRgbProcessor(GrYUVtoRGBEffect::Create(yuvTextures[0], yuvTextures[1
], yuvTextures[2], |
| 292 paint.addColorProcessor(yuvToRgbProcessor); | 395 yuvInfo.fColorSpace)); |
| 293 SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth), | 396 GrPaint paint; |
| 294 SkIntToScalar(yuvInfo.fSize[0].fHeight)); | 397 paint.addColorProcessor(yuvToRgbProcessor); |
| 295 GrContext::AutoRenderTarget autoRT(ctx, renderTarget); | 398 SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth), |
| 296 GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip); | 399 SkIntToScalar(yuvInfo.fSize[0].fHeight)); |
| 297 ctx->drawRect(paint, SkMatrix::I(), r); | 400 GrContext::AutoRenderTarget autoRT(ctx, renderTarget); |
| 298 } else { | 401 GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip); |
| 299 SkSafeSetNull(result); | 402 ctx->drawRect(paint, SkMatrix::I(), r); |
| 300 } | |
| 301 | 403 |
| 302 return result; | 404 return result; |
| 303 } | 405 } |
| 304 | 406 |
| 305 static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, | 407 static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx, |
| 306 bool cache, | 408 const SkBitmap& origBitmap, |
| 307 const GrTextureParams* params, | 409 const GrContentKey& optional
Key) { |
| 308 const SkBitmap& origBitmap) { | |
| 309 SkBitmap tmpBitmap; | 410 SkBitmap tmpBitmap; |
| 310 | 411 |
| 311 const SkBitmap* bitmap = &origBitmap; | 412 const SkBitmap* bitmap = &origBitmap; |
| 312 | 413 |
| 313 GrSurfaceDesc desc; | 414 GrSurfaceDesc desc; |
| 314 generate_bitmap_texture_desc(*bitmap, &desc); | 415 generate_bitmap_texture_desc(*bitmap, &desc); |
| 315 | 416 |
| 316 if (kIndex_8_SkColorType == bitmap->colorType()) { | 417 if (kIndex_8_SkColorType == bitmap->colorType()) { |
| 317 // build_compressed_data doesn't do npot->pot expansion | 418 if (ctx->supportsIndex8PixelConfig()) { |
| 318 // and paletted textures can't be sub-updated | |
| 319 if (cache && ctx->supportsIndex8PixelConfig(params, bitmap->width(), bit
map->height())) { | |
| 320 size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig
, | 419 size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig
, |
| 321 bitmap->width(), bitma
p->height()); | 420 bitmap->width(), bitma
p->height()); |
| 322 SkAutoMalloc storage(imageSize); | 421 SkAutoMalloc storage(imageSize); |
| 323 build_index8_data(storage.get(), origBitmap); | 422 build_index8_data(storage.get(), origBitmap); |
| 324 | 423 |
| 325 // our compressed data will be trimmed, so pass width() for its | 424 // our compressed data will be trimmed, so pass width() for its |
| 326 // "rowBytes", since they are the same now. | 425 // "rowBytes", since they are the same now. |
| 327 return sk_gr_allocate_texture(ctx, cache, params, origBitmap, | 426 return create_texture_for_bmp(ctx, optionalKey, desc, storage.get(),
bitmap->width()); |
| 328 desc, storage.get(), bitmap->width()); | |
| 329 } else { | 427 } else { |
| 330 origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); | 428 origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); |
| 331 // now bitmap points to our temp, which has been promoted to 32bits | 429 // now bitmap points to our temp, which has been promoted to 32bits |
| 332 bitmap = &tmpBitmap; | 430 bitmap = &tmpBitmap; |
| 333 desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); | 431 desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); |
| 334 } | 432 } |
| 335 } | 433 } |
| 336 | 434 |
| 337 // Is this an ETC1 encoded texture? | 435 // Is this an ETC1 encoded texture? |
| 338 #ifndef SK_IGNORE_ETC1_SUPPORT | 436 #ifndef SK_IGNORE_ETC1_SUPPORT |
| 339 else if ( | 437 else if ( |
| 340 // We do not support scratch ETC1 textures, hence they should all be at
least | 438 // We do not support scratch ETC1 textures, hence they should all be at
least |
| 341 // trying to go to the cache. | 439 // trying to go to the cache. |
| 342 cache | 440 optionalKey.isValid() |
| 343 // Make sure that the underlying device supports ETC1 textures before we
go ahead | 441 // Make sure that the underlying device supports ETC1 textures before we
go ahead |
| 344 // and check the data. | 442 // and check the data. |
| 345 && ctx->getGpu()->caps()->isConfigTexturable(kETC1_GrPixelConfig) | 443 && ctx->getGpu()->caps()->isConfigTexturable(kETC1_GrPixelConfig) |
| 346 // If the bitmap had compressed data and was then uncompressed, it'll st
ill return | 444 // If the bitmap had compressed data and was then uncompressed, it'll st
ill return |
| 347 // compressed data on 'refEncodedData' and upload it. Probably not good,
since if | 445 // compressed data on 'refEncodedData' and upload it. Probably not good,
since if |
| 348 // the bitmap has available pixels, then they might not be what the deco
mpressed | 446 // the bitmap has available pixels, then they might not be what the deco
mpressed |
| 349 // data is. | 447 // data is. |
| 350 && !(bitmap->readyToDraw())) { | 448 && !(bitmap->readyToDraw())) { |
| 351 GrTexture *texture = load_etc1_texture(ctx, cache, params, *bitmap, desc
); | 449 GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc); |
| 352 if (texture) { | 450 if (texture) { |
| 353 return texture; | 451 return texture; |
| 354 } | 452 } |
| 355 } | 453 } |
| 356 #endif // SK_IGNORE_ETC1_SUPPORT | 454 #endif // SK_IGNORE_ETC1_SUPPORT |
| 357 | 455 |
| 358 else { | 456 else { |
| 359 GrTexture *texture = load_yuv_texture(ctx, cache, params, *bitmap, desc)
; | 457 GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc); |
| 360 if (texture) { | 458 if (texture) { |
| 361 return texture; | 459 return texture; |
| 362 } | 460 } |
| 363 } | 461 } |
| 364 SkAutoLockPixels alp(*bitmap); | 462 SkAutoLockPixels alp(*bitmap); |
| 365 if (!bitmap->readyToDraw()) { | 463 if (!bitmap->readyToDraw()) { |
| 366 return NULL; | 464 return NULL; |
| 367 } | 465 } |
| 368 | 466 |
| 369 return sk_gr_allocate_texture(ctx, cache, params, origBitmap, desc, | 467 return create_texture_for_bmp(ctx, optionalKey, desc, bitmap->getPixels(), b
itmap->rowBytes()); |
| 370 bitmap->getPixels(), bitmap->rowBytes()); | 468 } |
| 469 |
| 470 static GrTexture* create_bitmap_texture(GrContext* ctx, |
| 471 const SkBitmap& bmp, |
| 472 Stretch stretch, |
| 473 const GrContentKey& unstretchedKey, |
| 474 const GrContentKey& stretchedKey) { |
| 475 if (kNo_Stretch != stretch) { |
| 476 SkAutoTUnref<GrTexture> unstretched; |
| 477 // Check if we have the unstretched version in the cache, if not create
it. |
| 478 if (unstretchedKey.isValid()) { |
| 479 unstretched.reset(ctx->findAndRefCachedTexture(unstretchedKey)); |
| 480 } |
| 481 if (!unstretched) { |
| 482 unstretched.reset(create_unstretched_bitmap_texture(ctx, bmp, unstre
tchedKey)); |
| 483 if (!unstretched) { |
| 484 return NULL; |
| 485 } |
| 486 } |
| 487 GrTexture* resized = resize_texture(unstretched, stretch, stretchedKey); |
| 488 return resized; |
| 489 } |
| 490 |
| 491 return create_unstretched_bitmap_texture(ctx, bmp, unstretchedKey); |
| 492 |
| 371 } | 493 } |
| 372 | 494 |
| 373 static GrTexture* get_texture_backing_bmp(const SkBitmap& bitmap, const GrContex
t* context, | 495 static GrTexture* get_texture_backing_bmp(const SkBitmap& bitmap, const GrContex
t* context, |
| 374 const GrTextureParams* params) { | 496 const GrTextureParams* params) { |
| 375 if (GrTexture* texture = bitmap.getTexture()) { | 497 if (GrTexture* texture = bitmap.getTexture()) { |
| 376 // Our texture-resizing-for-tiling used to upscale NPOT textures for til
ing only works with | 498 // Our texture-resizing-for-tiling used to upscale NPOT textures for til
ing only works with |
| 377 // content-key cached resources. Rather than invest in that legacy code
path, we'll just | 499 // content-key cached resources. Rather than invest in that legacy code
path, we'll just |
| 378 // take the horribly slow route of causing a cache miss which will cause
the pixels to be | 500 // take the horribly slow route of causing a cache miss which will cause
the pixels to be |
| 379 // read and reuploaded to a texture with a content key. | 501 // read and reuploaded to a texture with a content key. |
| 380 if (params && !context->getGpu()->caps()->npotTextureTileSupport() && | 502 if (params && !context->getGpu()->caps()->npotTextureTileSupport() && |
| 381 (params->isTiled() || GrTextureParams::kMipMap_FilterMode == params-
>filterMode())) { | 503 (params->isTiled() || GrTextureParams::kMipMap_FilterMode == params-
>filterMode())) { |
| 382 return NULL; | 504 return NULL; |
| 383 } | 505 } |
| 384 return texture; | 506 return texture; |
| 385 } | 507 } |
| 386 return NULL; | 508 return NULL; |
| 387 } | 509 } |
| 388 | 510 |
| 389 bool GrIsBitmapInCache(const GrContext* ctx, | 511 bool GrIsBitmapInCache(const GrContext* ctx, |
| 390 const SkBitmap& bitmap, | 512 const SkBitmap& bitmap, |
| 391 const GrTextureParams* params) { | 513 const GrTextureParams* params) { |
| 392 if (get_texture_backing_bmp(bitmap, ctx, params)) { | 514 if (get_texture_backing_bmp(bitmap, ctx, params)) { |
| 393 return true; | 515 return true; |
| 394 } | 516 } |
| 395 | 517 |
| 396 GrContentKey key; | 518 // We don't cache volatile bitmaps |
| 397 generate_bitmap_key(bitmap, &key); | 519 if (bitmap.isVolatile()) { |
| 520 return false; |
| 521 } |
| 522 |
| 523 // If it is inherently texture backed, consider it in the cache |
| 524 if (bitmap.getTexture()) { |
| 525 return true; |
| 526 } |
| 527 |
| 528 Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), par
ams); |
| 529 GrContentKey key, resizedKey; |
| 530 generate_bitmap_keys(bitmap, stretch, &key, &resizedKey); |
| 398 | 531 |
| 399 GrSurfaceDesc desc; | 532 GrSurfaceDesc desc; |
| 400 generate_bitmap_texture_desc(bitmap, &desc); | 533 generate_bitmap_texture_desc(bitmap, &desc); |
| 401 return ctx->isTextureInCache(desc, key, params); | 534 return ctx->isResourceInCache((kNo_Stretch == stretch) ? key : resizedKey); |
| 402 } | 535 } |
| 403 | 536 |
| 404 GrTexture* GrRefCachedBitmapTexture(GrContext* ctx, | 537 GrTexture* GrRefCachedBitmapTexture(GrContext* ctx, |
| 405 const SkBitmap& bitmap, | 538 const SkBitmap& bitmap, |
| 406 const GrTextureParams* params) { | 539 const GrTextureParams* params) { |
| 407 GrTexture* result = get_texture_backing_bmp(bitmap, ctx, params); | 540 GrTexture* result = get_texture_backing_bmp(bitmap, ctx, params); |
| 408 if (result) { | 541 if (result) { |
| 409 return SkRef(result); | 542 return SkRef(result); |
| 410 } | 543 } |
| 411 | 544 |
| 412 bool cache = !bitmap.isVolatile(); | 545 Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), par
ams); |
| 546 GrContentKey key, resizedKey; |
| 413 | 547 |
| 414 if (cache) { | 548 if (!bitmap.isVolatile()) { |
| 415 // If the bitmap isn't changing try to find a cached copy first. | 549 // If the bitmap isn't changing try to find a cached copy first. |
| 550 generate_bitmap_keys(bitmap, stretch, &key, &resizedKey); |
| 416 | 551 |
| 417 GrContentKey key; | 552 result = ctx->findAndRefCachedTexture(resizedKey.isValid() ? resizedKey
: key); |
| 418 generate_bitmap_key(bitmap, &key); | 553 if (result) { |
| 554 return result; |
| 555 } |
| 556 } |
| 419 | 557 |
| 420 GrSurfaceDesc desc; | 558 result = create_bitmap_texture(ctx, bitmap, stretch, key, resizedKey); |
| 421 generate_bitmap_texture_desc(bitmap, &desc); | 559 if (result) { |
| 560 return result; |
| 561 } |
| 422 | 562 |
| 423 result = ctx->findAndRefTexture(desc, key, params); | 563 SkDebugf("---- failed to create texture for cache [%d %d]\n", |
| 424 } | 564 bitmap.width(), bitmap.height()); |
| 425 if (NULL == result) { | 565 |
| 426 result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap); | 566 return NULL; |
| 427 } | |
| 428 if (NULL == result) { | |
| 429 SkDebugf("---- failed to create texture for cache [%d %d]\n", | |
| 430 bitmap.width(), bitmap.height()); | |
| 431 } | |
| 432 return result; | |
| 433 } | 567 } |
| 434 | |
| 435 /////////////////////////////////////////////////////////////////////////////// | 568 /////////////////////////////////////////////////////////////////////////////// |
| 436 | 569 |
| 437 // alphatype is ignore for now, but if GrPixelConfig is expanded to encompass | 570 // alphatype is ignore for now, but if GrPixelConfig is expanded to encompass |
| 438 // alpha info, that will be considered. | 571 // alpha info, that will be considered. |
| 439 GrPixelConfig SkImageInfo2GrPixelConfig(SkColorType ct, SkAlphaType, SkColorProf
ileType pt) { | 572 GrPixelConfig SkImageInfo2GrPixelConfig(SkColorType ct, SkAlphaType, SkColorProf
ileType pt) { |
| 440 switch (ct) { | 573 switch (ct) { |
| 441 case kUnknown_SkColorType: | 574 case kUnknown_SkColorType: |
| 442 return kUnknown_GrPixelConfig; | 575 return kUnknown_GrPixelConfig; |
| 443 case kAlpha_8_SkColorType: | 576 case kAlpha_8_SkColorType: |
| 444 return kAlpha_8_GrPixelConfig; | 577 return kAlpha_8_GrPixelConfig; |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 584 if (shader->asFragmentProcessor(context, skPaint, viewM, NULL, &paintCol
or, &fp) && fp) { | 717 if (shader->asFragmentProcessor(context, skPaint, viewM, NULL, &paintCol
or, &fp) && fp) { |
| 585 grPaint->addColorProcessor(fp)->unref(); | 718 grPaint->addColorProcessor(fp)->unref(); |
| 586 constantColor = false; | 719 constantColor = false; |
| 587 } | 720 } |
| 588 } | 721 } |
| 589 | 722 |
| 590 // The grcolor is automatically set when calling asFragmentProcessor. | 723 // The grcolor is automatically set when calling asFragmentProcessor. |
| 591 // If the shader can be seen as an effect it returns true and adds its effec
t to the grpaint. | 724 // If the shader can be seen as an effect it returns true and adds its effec
t to the grpaint. |
| 592 SkPaint2GrPaintNoShader(context, skPaint, paintColor, constantColor, grPaint
); | 725 SkPaint2GrPaintNoShader(context, skPaint, paintColor, constantColor, grPaint
); |
| 593 } | 726 } |
| OLD | NEW |