| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkBitmapCache.h" | 8 #include "SkBitmapCache.h" |
| 9 #include "SkPixelRef.h" | 9 #include "SkPixelRef.h" |
| 10 #include "SkThread.h" | 10 #include "SkThread.h" |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 153 // Neither ID is unique any more. | 153 // Neither ID is unique any more. |
| 154 // (These & ~1u are actually redundant. that.getGenerationID() just did it
for us.) | 154 // (These & ~1u are actually redundant. that.getGenerationID() just did it
for us.) |
| 155 this->fTaggedGenID.store(genID & ~1u); | 155 this->fTaggedGenID.store(genID & ~1u); |
| 156 that. fTaggedGenID.store(genID & ~1u); | 156 that. fTaggedGenID.store(genID & ~1u); |
| 157 | 157 |
| 158 // This method isn't threadsafe, so these asserts should be fine. | 158 // This method isn't threadsafe, so these asserts should be fine. |
| 159 SkASSERT(!this->genIDIsUnique()); | 159 SkASSERT(!this->genIDIsUnique()); |
| 160 SkASSERT(!that. genIDIsUnique()); | 160 SkASSERT(!that. genIDIsUnique()); |
| 161 } | 161 } |
| 162 | 162 |
| 163 static void validate_pixels_ctable(const SkImageInfo& info, const void* pixels, | |
| 164 const SkColorTable* ctable) { | |
| 165 if (info.isEmpty()) { | |
| 166 return; // can't require pixels if the dimensions are empty | |
| 167 } | |
| 168 SkASSERT(pixels); | |
| 169 if (kIndex_8_SkColorType == info.colorType()) { | |
| 170 SkASSERT(ctable); | |
| 171 } else { | |
| 172 SkASSERT(NULL == ctable); | |
| 173 } | |
| 174 } | |
| 175 | |
| 176 void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctabl
e) { | 163 void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctabl
e) { |
| 177 #ifndef SK_IGNORE_PIXELREF_SETPRELOCKED | 164 #ifndef SK_IGNORE_PIXELREF_SETPRELOCKED |
| 178 validate_pixels_ctable(fInfo, pixels, ctable); | |
| 179 // only call me in your constructor, otherwise fLockCount tracking can get | 165 // only call me in your constructor, otherwise fLockCount tracking can get |
| 180 // out of sync. | 166 // out of sync. |
| 181 fRec.fPixels = pixels; | 167 fRec.fPixels = pixels; |
| 182 fRec.fColorTable = ctable; | 168 fRec.fColorTable = ctable; |
| 183 fRec.fRowBytes = rowBytes; | 169 fRec.fRowBytes = rowBytes; |
| 184 fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT; | 170 fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT; |
| 185 fPreLocked = true; | 171 fPreLocked = true; |
| 186 #endif | 172 #endif |
| 187 } | 173 } |
| 188 | 174 |
| 189 // Increments fLockCount only on success | 175 bool SkPixelRef::lockPixelsInsideMutex(LockRec* rec) { |
| 190 bool SkPixelRef::lockPixelsInsideMutex() { | |
| 191 fMutex->assertHeld(); | 176 fMutex->assertHeld(); |
| 192 | 177 |
| 178 // For historical reasons, we always inc fLockCount, even if we return false
. |
| 179 // It would be nice to change this (it seems), and only inc if we actually s
ucceed... |
| 193 if (1 == ++fLockCount) { | 180 if (1 == ++fLockCount) { |
| 194 SkASSERT(fRec.isZero()); | 181 SkASSERT(fRec.isZero()); |
| 195 if (!this->onNewLockPixels(&fRec)) { | 182 |
| 196 fRec.zero(); | 183 LockRec rec; |
| 184 if (!this->onNewLockPixels(&rec)) { |
| 197 fLockCount -= 1; // we return fLockCount unchanged if we fail. | 185 fLockCount -= 1; // we return fLockCount unchanged if we fail. |
| 198 return false; | 186 return false; |
| 199 } | 187 } |
| 188 SkASSERT(!rec.isZero()); // else why did onNewLock return true? |
| 189 fRec = rec; |
| 200 } | 190 } |
| 201 validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable); | 191 *rec = fRec; |
| 202 return true; | 192 return true; |
| 203 } | 193 } |
| 204 | 194 |
| 205 // For historical reasons, we always inc fLockCount, even if we return false. | 195 bool SkPixelRef::lockPixels(LockRec* rec) { |
| 206 // It would be nice to change this (it seems), and only inc if we actually succe
ed... | |
| 207 bool SkPixelRef::lockPixels() { | |
| 208 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); | 196 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); |
| 209 | 197 |
| 210 if (!fPreLocked) { | 198 if (fPreLocked) { |
| 199 *rec = fRec; |
| 200 return true; |
| 201 } else { |
| 211 TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex"); | 202 TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex"); |
| 212 SkAutoMutexAcquire ac(*fMutex); | 203 SkAutoMutexAcquire ac(*fMutex); |
| 213 TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex"); | 204 TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex"); |
| 214 SkDEBUGCODE(int oldCount = fLockCount;) | 205 SkDEBUGCODE(int oldCount = fLockCount;) |
| 215 bool success = this->lockPixelsInsideMutex(); | 206 bool success = this->lockPixelsInsideMutex(rec); |
| 216 // lockPixelsInsideMutex only increments the count if it succeeds. | 207 // lockPixelsInsideMutex only increments the count if it succeeds. |
| 217 SkASSERT(oldCount + (int)success == fLockCount); | 208 SkASSERT(oldCount + (int)success == fLockCount); |
| 218 | 209 |
| 219 if (!success) { | 210 if (!success) { |
| 220 // For compatibility with SkBitmap calling lockPixels, we still want
to increment | 211 // For compatibility with SkBitmap calling lockPixels, we still want
to increment |
| 221 // fLockCount even if we failed. If we updated SkBitmap we could rem
ove this oddity. | 212 // fLockCount even if we failed. If we updated SkBitmap we could rem
ove this oddity. |
| 222 fLockCount += 1; | 213 fLockCount += 1; |
| 223 return false; | |
| 224 } | 214 } |
| 215 return success; |
| 225 } | 216 } |
| 226 validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable); | |
| 227 return true; | |
| 228 } | 217 } |
| 229 | 218 |
| 230 bool SkPixelRef::lockPixels(LockRec* rec) { | 219 bool SkPixelRef::lockPixels() { |
| 231 if (this->lockPixels()) { | 220 LockRec rec; |
| 232 *rec = fRec; | 221 return this->lockPixels(&rec); |
| 233 return true; | |
| 234 } | |
| 235 return false; | |
| 236 } | 222 } |
| 237 | 223 |
| 238 void SkPixelRef::unlockPixels() { | 224 void SkPixelRef::unlockPixels() { |
| 239 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); | 225 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); |
| 240 | 226 |
| 241 if (!fPreLocked) { | 227 if (!fPreLocked) { |
| 242 SkAutoMutexAcquire ac(*fMutex); | 228 SkAutoMutexAcquire ac(*fMutex); |
| 243 | 229 |
| 244 SkASSERT(fLockCount > 0); | 230 SkASSERT(fLockCount > 0); |
| 245 if (0 == --fLockCount) { | 231 if (0 == --fLockCount) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 260 return false; | 246 return false; |
| 261 } | 247 } |
| 262 | 248 |
| 263 if (fPreLocked) { | 249 if (fPreLocked) { |
| 264 result->fUnlockProc = NULL; | 250 result->fUnlockProc = NULL; |
| 265 result->fUnlockContext = NULL; | 251 result->fUnlockContext = NULL; |
| 266 result->fCTable = fRec.fColorTable; | 252 result->fCTable = fRec.fColorTable; |
| 267 result->fPixels = fRec.fPixels; | 253 result->fPixels = fRec.fPixels; |
| 268 result->fRowBytes = fRec.fRowBytes; | 254 result->fRowBytes = fRec.fRowBytes; |
| 269 result->fSize.set(fInfo.width(), fInfo.height()); | 255 result->fSize.set(fInfo.width(), fInfo.height()); |
| 256 return true; |
| 270 } else { | 257 } else { |
| 271 SkAutoMutexAcquire ac(*fMutex); | 258 SkAutoMutexAcquire ac(*fMutex); |
| 272 if (!this->onRequestLock(request, result)) { | 259 return this->onRequestLock(request, result); |
| 273 return false; | |
| 274 } | |
| 275 } | 260 } |
| 276 validate_pixels_ctable(fInfo, result->fPixels, result->fCTable); | |
| 277 return true; | |
| 278 } | 261 } |
| 279 | 262 |
| 280 bool SkPixelRef::lockPixelsAreWritable() const { | 263 bool SkPixelRef::lockPixelsAreWritable() const { |
| 281 return this->onLockPixelsAreWritable(); | 264 return this->onLockPixelsAreWritable(); |
| 282 } | 265 } |
| 283 | 266 |
| 284 bool SkPixelRef::onLockPixelsAreWritable() const { | 267 bool SkPixelRef::onLockPixelsAreWritable() const { |
| 285 return true; | 268 return true; |
| 286 } | 269 } |
| 287 | 270 |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 368 return 0; | 351 return 0; |
| 369 } | 352 } |
| 370 | 353 |
| 371 static void unlock_legacy_result(void* ctx) { | 354 static void unlock_legacy_result(void* ctx) { |
| 372 SkPixelRef* pr = (SkPixelRef*)ctx; | 355 SkPixelRef* pr = (SkPixelRef*)ctx; |
| 373 pr->unlockPixels(); | 356 pr->unlockPixels(); |
| 374 pr->unref(); // balancing the Ref in onRequestLoc | 357 pr->unref(); // balancing the Ref in onRequestLoc |
| 375 } | 358 } |
| 376 | 359 |
| 377 bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) { | 360 bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) { |
| 378 if (!this->lockPixelsInsideMutex()) { | 361 LockRec rec; |
| 362 if (!this->lockPixelsInsideMutex(&rec)) { |
| 379 return false; | 363 return false; |
| 380 } | 364 } |
| 381 | 365 |
| 382 result->fUnlockProc = unlock_legacy_result; | 366 result->fUnlockProc = unlock_legacy_result; |
| 383 result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockPr
oc | 367 result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockPr
oc |
| 384 result->fCTable = fRec.fColorTable; | 368 result->fCTable = rec.fColorTable; |
| 385 result->fPixels = fRec.fPixels; | 369 result->fPixels = rec.fPixels; |
| 386 result->fRowBytes = fRec.fRowBytes; | 370 result->fRowBytes = rec.fRowBytes; |
| 387 result->fSize.set(fInfo.width(), fInfo.height()); | 371 result->fSize.set(fInfo.width(), fInfo.height()); |
| 388 return true; | 372 return true; |
| 389 } | 373 } |
| OLD | NEW |