OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkBitmap.h" | 8 #include "SkBitmap.h" |
9 #include "SkBitmapCache.h" | 9 #include "SkBitmapCache.h" |
10 #include "SkImage_Base.h" | 10 #include "SkImage_Base.h" |
11 #include "SkImageCacherator.h" | 11 #include "SkImageCacherator.h" |
12 #include "SkMallocPixelRef.h" | 12 #include "SkMallocPixelRef.h" |
| 13 #include "SkMutex.h" |
13 #include "SkNextID.h" | 14 #include "SkNextID.h" |
14 #include "SkPixelRef.h" | 15 #include "SkPixelRef.h" |
15 #include "SkResourceCache.h" | 16 #include "SkResourceCache.h" |
16 | 17 |
17 #if SK_SUPPORT_GPU | 18 #if SK_SUPPORT_GPU |
18 #include "GrContext.h" | 19 #include "GrContext.h" |
19 #include "GrGpuResourcePriv.h" | 20 #include "GrGpuResourcePriv.h" |
20 #include "GrImageIDTextureAdjuster.h" | 21 #include "GrImageIDTextureAdjuster.h" |
21 #include "GrResourceKey.h" | 22 #include "GrResourceKey.h" |
22 #include "GrTextureParams.h" | 23 #include "GrTextureParams.h" |
23 #include "GrYUVProvider.h" | 24 #include "GrYUVProvider.h" |
24 #include "SkGr.h" | 25 #include "SkGr.h" |
25 #include "SkGrPriv.h" | 26 #include "SkGrPriv.h" |
26 #endif | 27 #endif |
27 | 28 |
28 // Until we actually have codecs/etc. that can contain/support a GPU texture for
mat | 29 // Until we actually have codecs/etc. that can contain/support a GPU texture for
mat |
29 // skip this step, since for some generators, returning their encoded data as a
SkData | 30 // skip this step, since for some generators, returning their encoded data as a
SkData |
30 // can be somewhat expensive, and this call doesn't indicate to the generator th
at we're | 31 // can be somewhat expensive, and this call doesn't indicate to the generator th
at we're |
31 // only interested in GPU datas... | 32 // only interested in GPU datas... |
32 // see skbug.com/ 4971, 5128, ... | 33 // see skbug.com/ 4971, 5128, ... |
33 //#define SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR | 34 //#define SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR |
34 | 35 |
| 36 // Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing of one gene
rator |
| 37 // among several cacherators. |
| 38 class SkImageCacherator::SharedGenerator final : public SkNVRefCnt<SharedGenerat
or> { |
| 39 public: |
| 40 static sk_sp<SharedGenerator> Make(SkImageGenerator* gen) { |
| 41 return gen ? sk_sp<SharedGenerator>(new SharedGenerator(gen)) : nullptr; |
| 42 } |
| 43 |
| 44 private: |
| 45 explicit SharedGenerator(SkImageGenerator* gen) : fGenerator(gen) { SkASSERT
(gen); } |
| 46 |
| 47 friend class ScopedGenerator; |
| 48 |
| 49 std::unique_ptr<SkImageGenerator> fGenerator; |
| 50 SkMutex fMutex; |
| 51 }; |
| 52 |
| 53 |
| 54 // Helper for exclusive access to a shared generator. |
| 55 class SkImageCacherator::ScopedGenerator { |
| 56 public: |
| 57 ScopedGenerator(const sk_sp<SharedGenerator>& gen) |
| 58 : fSharedGenerator(gen) |
| 59 , fAutoAquire(gen->fMutex) {} |
| 60 |
| 61 SkImageGenerator* operator->() const { |
| 62 fSharedGenerator->fMutex.assertHeld(); |
| 63 return fSharedGenerator->fGenerator.get(); |
| 64 } |
| 65 |
| 66 operator SkImageGenerator*() const { |
| 67 fSharedGenerator->fMutex.assertHeld(); |
| 68 return fSharedGenerator->fGenerator.get(); |
| 69 } |
| 70 |
| 71 private: |
| 72 const sk_sp<SharedGenerator>& fSharedGenerator; |
| 73 SkAutoExclusive fAutoAquire; |
| 74 }; |
| 75 |
35 SkImageCacherator::Validator::Validator(SkImageGenerator* gen, const SkIRect* su
bset) | 76 SkImageCacherator::Validator::Validator(SkImageGenerator* gen, const SkIRect* su
bset) |
36 // We are required to take ownership of gen, regardless of whether we instan
tiate a cacherator | 77 // We are required to take ownership of gen, regardless of whether we instan
tiate a cacherator |
37 // or not. On instantiation, the client is responsible for transferring own
ership. | 78 // or not. On instantiation, the client is responsible for transferring own
ership. |
38 : fGenerator(gen) { | 79 : fSharedGenerator(SkImageCacherator::SharedGenerator::Make(gen)) { |
39 | 80 |
40 if (!gen) { | 81 if (!fSharedGenerator) { |
41 return; | 82 return; |
42 } | 83 } |
43 | 84 |
44 const SkImageInfo& info = gen->getInfo(); | 85 const SkImageInfo& info = gen->getInfo(); |
45 if (info.isEmpty()) { | 86 if (info.isEmpty()) { |
46 fGenerator.reset(); | 87 fSharedGenerator.reset(); |
47 return; | 88 return; |
48 } | 89 } |
49 | 90 |
50 fUniqueID = gen->uniqueID(); | 91 fUniqueID = gen->uniqueID(); |
51 const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height()); | 92 const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height()); |
52 if (subset) { | 93 if (subset) { |
53 if (!bounds.contains(*subset)) { | 94 if (!bounds.contains(*subset)) { |
54 fGenerator.reset(); | 95 fSharedGenerator.reset(); |
55 return; | 96 return; |
56 } | 97 } |
57 if (*subset != bounds) { | 98 if (*subset != bounds) { |
58 // we need a different uniqueID since we really are a subset of the
raw generator | 99 // we need a different uniqueID since we really are a subset of the
raw generator |
59 fUniqueID = SkNextID::ImageID(); | 100 fUniqueID = SkNextID::ImageID(); |
60 } | 101 } |
61 } else { | 102 } else { |
62 subset = &bounds; | 103 subset = &bounds; |
63 } | 104 } |
64 | 105 |
65 fInfo = info.makeWH(subset->width(), subset->height()); | 106 fInfo = info.makeWH(subset->width(), subset->height()); |
66 fOrigin = SkIPoint::Make(subset->x(), subset->y()); | 107 fOrigin = SkIPoint::Make(subset->x(), subset->y()); |
67 } | 108 } |
68 | 109 |
| 110 SkImageCacherator::Validator::~Validator() {} |
| 111 |
69 SkImageCacherator* SkImageCacherator::NewFromGenerator(SkImageGenerator* gen, | 112 SkImageCacherator* SkImageCacherator::NewFromGenerator(SkImageGenerator* gen, |
70 const SkIRect* subset) { | 113 const SkIRect* subset) { |
71 Validator validator(gen, subset); | 114 Validator validator(gen, subset); |
72 | 115 |
73 return validator ? new SkImageCacherator(&validator) : nullptr; | 116 return validator ? new SkImageCacherator(&validator) : nullptr; |
74 } | 117 } |
75 | 118 |
76 SkImageCacherator::SkImageCacherator(Validator* validator) | 119 SkImageCacherator::SkImageCacherator(Validator* validator) |
77 : fNotThreadSafeGenerator(validator->fGenerator.release()) // we take owners
hip | 120 : fSharedGenerator(std::move(validator->fSharedGenerator)) // we take owners
hip |
78 , fInfo(validator->fInfo) | 121 , fInfo(validator->fInfo) |
79 , fOrigin(validator->fOrigin) | 122 , fOrigin(validator->fOrigin) |
80 , fUniqueID(validator->fUniqueID) | 123 , fUniqueID(validator->fUniqueID) |
81 { | 124 { |
82 SkASSERT(fNotThreadSafeGenerator); | 125 SkASSERT(fSharedGenerator); |
83 } | 126 } |
84 | 127 |
| 128 SkImageCacherator::~SkImageCacherator() {} |
| 129 |
85 SkData* SkImageCacherator::refEncoded(GrContext* ctx) { | 130 SkData* SkImageCacherator::refEncoded(GrContext* ctx) { |
86 ScopedGenerator generator(this); | 131 ScopedGenerator generator(fSharedGenerator); |
87 return generator->refEncodedData(ctx); | 132 return generator->refEncodedData(ctx); |
88 } | 133 } |
89 | 134 |
90 static bool check_output_bitmap(const SkBitmap& bitmap, uint32_t expectedID) { | 135 static bool check_output_bitmap(const SkBitmap& bitmap, uint32_t expectedID) { |
91 SkASSERT(bitmap.getGenerationID() == expectedID); | 136 SkASSERT(bitmap.getGenerationID() == expectedID); |
92 SkASSERT(bitmap.isImmutable()); | 137 SkASSERT(bitmap.isImmutable()); |
93 SkASSERT(bitmap.getPixels()); | 138 SkASSERT(bitmap.getPixels()); |
94 return true; | 139 return true; |
95 } | 140 } |
96 | 141 |
97 // Note, this returns a new, mutable, bitmap, with a new genID. | 142 // Note, this returns a new, mutable, bitmap, with a new genID. |
98 // If you want the immutable bitmap with the same ID as our cacherator, call try
LockAsBitmap() | 143 // If you want the immutable bitmap with the same ID as our cacherator, call try
LockAsBitmap() |
99 // | 144 // |
100 bool SkImageCacherator::generateBitmap(SkBitmap* bitmap) { | 145 bool SkImageCacherator::generateBitmap(SkBitmap* bitmap) { |
101 SkBitmap::Allocator* allocator = SkResourceCache::GetAllocator(); | 146 SkBitmap::Allocator* allocator = SkResourceCache::GetAllocator(); |
102 | 147 |
103 ScopedGenerator generator(this); | 148 ScopedGenerator generator(fSharedGenerator); |
104 const SkImageInfo& genInfo = generator->getInfo(); | 149 const SkImageInfo& genInfo = generator->getInfo(); |
105 if (fInfo.dimensions() == genInfo.dimensions()) { | 150 if (fInfo.dimensions() == genInfo.dimensions()) { |
106 SkASSERT(fOrigin.x() == 0 && fOrigin.y() == 0); | 151 SkASSERT(fOrigin.x() == 0 && fOrigin.y() == 0); |
107 // fast-case, no copy needed | 152 // fast-case, no copy needed |
108 return generator->tryGenerateBitmap(bitmap, fInfo, allocator); | 153 return generator->tryGenerateBitmap(bitmap, fInfo, allocator); |
109 } else { | 154 } else { |
110 // need to handle subsetting, so we first generate the full size version
, and then | 155 // need to handle subsetting, so we first generate the full size version
, and then |
111 // "read" from it to get our subset. See https://bug.skia.org/4213 | 156 // "read" from it to get our subset. See https://bug.skia.org/4213 |
112 | 157 |
113 SkBitmap full; | 158 SkBitmap full; |
114 if (!generator->tryGenerateBitmap(&full, genInfo, allocator)) { | 159 if (!generator->tryGenerateBitmap(&full, genInfo, allocator)) { |
115 return false; | 160 return false; |
116 } | 161 } |
117 if (!bitmap->tryAllocPixels(fInfo, nullptr, full.getColorTable())) { | 162 if (!bitmap->tryAllocPixels(fInfo, nullptr, full.getColorTable())) { |
118 return false; | 163 return false; |
119 } | 164 } |
120 return full.readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowB
ytes(), | 165 return full.readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowB
ytes(), |
121 fOrigin.x(), fOrigin.y()); | 166 fOrigin.x(), fOrigin.y()); |
122 } | 167 } |
123 } | 168 } |
124 | 169 |
125 bool SkImageCacherator::directGeneratePixels(const SkImageInfo& info, void* pixe
ls, size_t rb, | 170 bool SkImageCacherator::directGeneratePixels(const SkImageInfo& info, void* pixe
ls, size_t rb, |
126 int srcX, int srcY) { | 171 int srcX, int srcY) { |
127 ScopedGenerator generator(this); | 172 ScopedGenerator generator(fSharedGenerator); |
128 const SkImageInfo& genInfo = generator->getInfo(); | 173 const SkImageInfo& genInfo = generator->getInfo(); |
129 // Currently generators do not natively handle subsets, so check that first. | 174 // Currently generators do not natively handle subsets, so check that first. |
130 if (srcX || srcY || genInfo.width() != info.width() || genInfo.height() != i
nfo.height()) { | 175 if (srcX || srcY || genInfo.width() != info.width() || genInfo.height() != i
nfo.height()) { |
131 return false; | 176 return false; |
132 } | 177 } |
133 return generator->getPixels(info, pixels, rb); | 178 return generator->getPixels(info, pixels, rb); |
134 } | 179 } |
135 | 180 |
136 ////////////////////////////////////////////////////////////////////////////////
////////////////// | 181 ////////////////////////////////////////////////////////////////////////////////
////////////////// |
137 | 182 |
(...skipping 24 matching lines...) Expand all Loading... |
162 SkImage::CachingHint chint) { | 207 SkImage::CachingHint chint) { |
163 if (this->tryLockAsBitmap(bitmap, client, chint)) { | 208 if (this->tryLockAsBitmap(bitmap, client, chint)) { |
164 return check_output_bitmap(*bitmap, fUniqueID); | 209 return check_output_bitmap(*bitmap, fUniqueID); |
165 } | 210 } |
166 | 211 |
167 #if SK_SUPPORT_GPU | 212 #if SK_SUPPORT_GPU |
168 // Try to get a texture and read it back to raster (and then cache that with
our ID) | 213 // Try to get a texture and read it back to raster (and then cache that with
our ID) |
169 SkAutoTUnref<GrTexture> tex; | 214 SkAutoTUnref<GrTexture> tex; |
170 | 215 |
171 { | 216 { |
172 ScopedGenerator generator(this); | 217 ScopedGenerator generator(fSharedGenerator); |
173 SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width
(), fInfo.height()); | 218 SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width
(), fInfo.height()); |
174 tex.reset(generator->generateTexture(nullptr, &subset)); | 219 tex.reset(generator->generateTexture(nullptr, &subset)); |
175 } | 220 } |
176 if (!tex) { | 221 if (!tex) { |
177 bitmap->reset(); | 222 bitmap->reset(); |
178 return false; | 223 return false; |
179 } | 224 } |
180 | 225 |
181 if (!bitmap->tryAllocPixels(fInfo)) { | 226 if (!bitmap->tryAllocPixels(fInfo)) { |
182 bitmap->reset(); | 227 bitmap->reset(); |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
274 if (key.isValid()) { | 319 if (key.isValid()) { |
275 if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKe
y(key)) { | 320 if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKe
y(key)) { |
276 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexture
Path, | 321 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexture
Path, |
277 kLockTexturePathCount); | 322 kLockTexturePathCount); |
278 return tex; | 323 return tex; |
279 } | 324 } |
280 } | 325 } |
281 | 326 |
282 // 2. Ask the generator to natively create one | 327 // 2. Ask the generator to natively create one |
283 { | 328 { |
284 ScopedGenerator generator(this); | 329 ScopedGenerator generator(fSharedGenerator); |
285 SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width
(), fInfo.height()); | 330 SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width
(), fInfo.height()); |
286 if (GrTexture* tex = generator->generateTexture(ctx, &subset)) { | 331 if (GrTexture* tex = generator->generateTexture(ctx, &subset)) { |
287 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, | 332 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, |
288 kLockTexturePathCount); | 333 kLockTexturePathCount); |
289 return set_key_and_return(tex, key); | 334 return set_key_and_return(tex, key); |
290 } | 335 } |
291 } | 336 } |
292 | 337 |
293 const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo, *ctx->caps()); | 338 const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo, *ctx->caps()); |
294 | 339 |
295 #ifdef SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR | 340 #ifdef SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR |
296 // 3. Ask the generator to return a compressed form that the GPU might suppo
rt | 341 // 3. Ask the generator to return a compressed form that the GPU might suppo
rt |
297 sk_sp<SkData> data(this->refEncoded(ctx)); | 342 sk_sp<SkData> data(this->refEncoded(ctx)); |
298 if (data) { | 343 if (data) { |
299 GrTexture* tex = load_compressed_into_texture(ctx, data, desc); | 344 GrTexture* tex = load_compressed_into_texture(ctx, data, desc); |
300 if (tex) { | 345 if (tex) { |
301 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kCompressed_LockTextureP
ath, | 346 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kCompressed_LockTextureP
ath, |
302 kLockTexturePathCount); | 347 kLockTexturePathCount); |
303 return set_key_and_return(tex, key); | 348 return set_key_and_return(tex, key); |
304 } | 349 } |
305 } | 350 } |
306 #endif | 351 #endif |
307 | 352 |
308 // 4. Ask the generator to return YUV planes, which the GPU can convert | 353 // 4. Ask the generator to return YUV planes, which the GPU can convert |
309 { | 354 { |
310 ScopedGenerator generator(this); | 355 ScopedGenerator generator(fSharedGenerator); |
311 Generator_GrYUVProvider provider(generator); | 356 Generator_GrYUVProvider provider(generator); |
312 sk_sp<GrTexture> tex = provider.refAsTexture(ctx, desc, true); | 357 sk_sp<GrTexture> tex = provider.refAsTexture(ctx, desc, true); |
313 if (tex) { | 358 if (tex) { |
314 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, | 359 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, |
315 kLockTexturePathCount); | 360 kLockTexturePathCount); |
316 return set_key_and_return(tex.release(), key); | 361 return set_key_and_return(tex.release(), key); |
317 } | 362 } |
318 } | 363 } |
319 | 364 |
320 // 5. Ask the generator to return RGB(A) data, which the GPU can convert | 365 // 5. Ask the generator to return RGB(A) data, which the GPU can convert |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
353 | 398 |
354 #else | 399 #else |
355 | 400 |
356 GrTexture* SkImageCacherator::lockAsTexture(GrContext* ctx, const GrTextureParam
s&, | 401 GrTexture* SkImageCacherator::lockAsTexture(GrContext* ctx, const GrTextureParam
s&, |
357 SkSourceGammaTreatment gammaTreatmen
t, | 402 SkSourceGammaTreatment gammaTreatmen
t, |
358 const SkImage* client, SkImage::Cach
ingHint) { | 403 const SkImage* client, SkImage::Cach
ingHint) { |
359 return nullptr; | 404 return nullptr; |
360 } | 405 } |
361 | 406 |
362 #endif | 407 #endif |
OLD | NEW |