OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkBitmapCache.h" | 8 #include "SkBitmapCache.h" |
9 #include "SkPixelRef.h" | 9 #include "SkPixelRef.h" |
10 #include "SkThread.h" | 10 #include "SkThread.h" |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 // Neither ID is unique any more. | 153 // Neither ID is unique any more. |
154 // (These & ~1u are actually redundant. that.getGenerationID() just did it
for us.) | 154 // (These & ~1u are actually redundant. that.getGenerationID() just did it
for us.) |
155 this->fTaggedGenID.store(genID & ~1u); | 155 this->fTaggedGenID.store(genID & ~1u); |
156 that. fTaggedGenID.store(genID & ~1u); | 156 that. fTaggedGenID.store(genID & ~1u); |
157 | 157 |
158 // This method isn't threadsafe, so these asserts should be fine. | 158 // This method isn't threadsafe, so these asserts should be fine. |
159 SkASSERT(!this->genIDIsUnique()); | 159 SkASSERT(!this->genIDIsUnique()); |
160 SkASSERT(!that. genIDIsUnique()); | 160 SkASSERT(!that. genIDIsUnique()); |
161 } | 161 } |
162 | 162 |
| 163 static void validate_pixels_ctable(const SkImageInfo& info, const void* pixels, |
| 164 const SkColorTable* ctable) { |
| 165 if (info.isEmpty()) { |
| 166 return; // can't require pixels if the dimensions are empty |
| 167 } |
| 168 SkASSERT(pixels); |
| 169 if (kIndex_8_SkColorType == info.colorType()) { |
| 170 SkASSERT(ctable); |
| 171 } else { |
| 172 SkASSERT(NULL == ctable); |
| 173 } |
| 174 } |
| 175 |
163 void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctabl
e) { | 176 void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctabl
e) { |
164 #ifndef SK_IGNORE_PIXELREF_SETPRELOCKED | 177 #ifndef SK_IGNORE_PIXELREF_SETPRELOCKED |
| 178 validate_pixels_ctable(fInfo, pixels, ctable); |
165 // only call me in your constructor, otherwise fLockCount tracking can get | 179 // only call me in your constructor, otherwise fLockCount tracking can get |
166 // out of sync. | 180 // out of sync. |
167 fRec.fPixels = pixels; | 181 fRec.fPixels = pixels; |
168 fRec.fColorTable = ctable; | 182 fRec.fColorTable = ctable; |
169 fRec.fRowBytes = rowBytes; | 183 fRec.fRowBytes = rowBytes; |
170 fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT; | 184 fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT; |
171 fPreLocked = true; | 185 fPreLocked = true; |
172 #endif | 186 #endif |
173 } | 187 } |
174 | 188 |
175 bool SkPixelRef::lockPixelsInsideMutex(LockRec* rec) { | 189 // Increments fLockCount only on success |
| 190 bool SkPixelRef::lockPixelsInsideMutex() { |
176 fMutex->assertHeld(); | 191 fMutex->assertHeld(); |
177 | 192 |
178 // For historical reasons, we always inc fLockCount, even if we return false
. | |
179 // It would be nice to change this (it seems), and only inc if we actually s
ucceed... | |
180 if (1 == ++fLockCount) { | 193 if (1 == ++fLockCount) { |
181 SkASSERT(fRec.isZero()); | 194 SkASSERT(fRec.isZero()); |
182 | 195 if (!this->onNewLockPixels(&fRec)) { |
183 LockRec rec; | 196 fRec.zero(); |
184 if (!this->onNewLockPixels(&rec)) { | |
185 fLockCount -= 1; // we return fLockCount unchanged if we fail. | 197 fLockCount -= 1; // we return fLockCount unchanged if we fail. |
186 return false; | 198 return false; |
187 } | 199 } |
188 SkASSERT(!rec.isZero()); // else why did onNewLock return true? | |
189 fRec = rec; | |
190 } | 200 } |
191 *rec = fRec; | 201 validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable); |
192 return true; | 202 return true; |
193 } | 203 } |
194 | 204 |
195 bool SkPixelRef::lockPixels(LockRec* rec) { | 205 // For historical reasons, we always inc fLockCount, even if we return false. |
| 206 // It would be nice to change this (it seems), and only inc if we actually succe
ed... |
| 207 bool SkPixelRef::lockPixels() { |
196 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); | 208 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); |
197 | 209 |
198 if (fPreLocked) { | 210 if (!fPreLocked) { |
199 *rec = fRec; | |
200 return true; | |
201 } else { | |
202 TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex"); | 211 TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex"); |
203 SkAutoMutexAcquire ac(*fMutex); | 212 SkAutoMutexAcquire ac(*fMutex); |
204 TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex"); | 213 TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex"); |
205 SkDEBUGCODE(int oldCount = fLockCount;) | 214 SkDEBUGCODE(int oldCount = fLockCount;) |
206 bool success = this->lockPixelsInsideMutex(rec); | 215 bool success = this->lockPixelsInsideMutex(); |
207 // lockPixelsInsideMutex only increments the count if it succeeds. | 216 // lockPixelsInsideMutex only increments the count if it succeeds. |
208 SkASSERT(oldCount + (int)success == fLockCount); | 217 SkASSERT(oldCount + (int)success == fLockCount); |
209 | 218 |
210 if (!success) { | 219 if (!success) { |
211 // For compatibility with SkBitmap calling lockPixels, we still want
to increment | 220 // For compatibility with SkBitmap calling lockPixels, we still want
to increment |
212 // fLockCount even if we failed. If we updated SkBitmap we could rem
ove this oddity. | 221 // fLockCount even if we failed. If we updated SkBitmap we could rem
ove this oddity. |
213 fLockCount += 1; | 222 fLockCount += 1; |
| 223 return false; |
214 } | 224 } |
215 return success; | |
216 } | 225 } |
| 226 validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable); |
| 227 return true; |
217 } | 228 } |
218 | 229 |
219 bool SkPixelRef::lockPixels() { | 230 bool SkPixelRef::lockPixels(LockRec* rec) { |
220 LockRec rec; | 231 if (this->lockPixels()) { |
221 return this->lockPixels(&rec); | 232 *rec = fRec; |
| 233 return true; |
| 234 } |
| 235 return false; |
222 } | 236 } |
223 | 237 |
224 void SkPixelRef::unlockPixels() { | 238 void SkPixelRef::unlockPixels() { |
225 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); | 239 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); |
226 | 240 |
227 if (!fPreLocked) { | 241 if (!fPreLocked) { |
228 SkAutoMutexAcquire ac(*fMutex); | 242 SkAutoMutexAcquire ac(*fMutex); |
229 | 243 |
230 SkASSERT(fLockCount > 0); | 244 SkASSERT(fLockCount > 0); |
231 if (0 == --fLockCount) { | 245 if (0 == --fLockCount) { |
(...skipping 14 matching lines...) Expand all Loading... |
246 return false; | 260 return false; |
247 } | 261 } |
248 | 262 |
249 if (fPreLocked) { | 263 if (fPreLocked) { |
250 result->fUnlockProc = NULL; | 264 result->fUnlockProc = NULL; |
251 result->fUnlockContext = NULL; | 265 result->fUnlockContext = NULL; |
252 result->fCTable = fRec.fColorTable; | 266 result->fCTable = fRec.fColorTable; |
253 result->fPixels = fRec.fPixels; | 267 result->fPixels = fRec.fPixels; |
254 result->fRowBytes = fRec.fRowBytes; | 268 result->fRowBytes = fRec.fRowBytes; |
255 result->fSize.set(fInfo.width(), fInfo.height()); | 269 result->fSize.set(fInfo.width(), fInfo.height()); |
256 return true; | |
257 } else { | 270 } else { |
258 SkAutoMutexAcquire ac(*fMutex); | 271 SkAutoMutexAcquire ac(*fMutex); |
259 return this->onRequestLock(request, result); | 272 if (!this->onRequestLock(request, result)) { |
| 273 return false; |
| 274 } |
260 } | 275 } |
| 276 validate_pixels_ctable(fInfo, result->fPixels, result->fCTable); |
| 277 return true; |
261 } | 278 } |
262 | 279 |
263 bool SkPixelRef::lockPixelsAreWritable() const { | 280 bool SkPixelRef::lockPixelsAreWritable() const { |
264 return this->onLockPixelsAreWritable(); | 281 return this->onLockPixelsAreWritable(); |
265 } | 282 } |
266 | 283 |
267 bool SkPixelRef::onLockPixelsAreWritable() const { | 284 bool SkPixelRef::onLockPixelsAreWritable() const { |
268 return true; | 285 return true; |
269 } | 286 } |
270 | 287 |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
351 return 0; | 368 return 0; |
352 } | 369 } |
353 | 370 |
354 static void unlock_legacy_result(void* ctx) { | 371 static void unlock_legacy_result(void* ctx) { |
355 SkPixelRef* pr = (SkPixelRef*)ctx; | 372 SkPixelRef* pr = (SkPixelRef*)ctx; |
356 pr->unlockPixels(); | 373 pr->unlockPixels(); |
357 pr->unref(); // balancing the Ref in onRequestLoc | 374 pr->unref(); // balancing the Ref in onRequestLoc |
358 } | 375 } |
359 | 376 |
360 bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) { | 377 bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) { |
361 LockRec rec; | 378 if (!this->lockPixelsInsideMutex()) { |
362 if (!this->lockPixelsInsideMutex(&rec)) { | |
363 return false; | 379 return false; |
364 } | 380 } |
365 | 381 |
366 result->fUnlockProc = unlock_legacy_result; | 382 result->fUnlockProc = unlock_legacy_result; |
367 result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockPr
oc | 383 result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockPr
oc |
368 result->fCTable = rec.fColorTable; | 384 result->fCTable = fRec.fColorTable; |
369 result->fPixels = rec.fPixels; | 385 result->fPixels = fRec.fPixels; |
370 result->fRowBytes = rec.fRowBytes; | 386 result->fRowBytes = fRec.fRowBytes; |
371 result->fSize.set(fInfo.width(), fInfo.height()); | 387 result->fSize.set(fInfo.width(), fInfo.height()); |
372 return true; | 388 return true; |
373 } | 389 } |
OLD | NEW |