| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkBitmapCache.h" | 8 #include "SkBitmapCache.h" |
| 9 #include "SkBitmapController.h" |
| 9 #include "SkBitmapProcState.h" | 10 #include "SkBitmapProcState.h" |
| 10 #include "SkColorPriv.h" | 11 #include "SkColorPriv.h" |
| 11 #include "SkFilterProc.h" | 12 #include "SkFilterProc.h" |
| 12 #include "SkPaint.h" | 13 #include "SkPaint.h" |
| 13 #include "SkShader.h" // for tilemodes | 14 #include "SkShader.h" // for tilemodes |
| 14 #include "SkUtilsArm.h" | 15 #include "SkUtilsArm.h" |
| 15 #include "SkBitmapScaler.h" | 16 #include "SkBitmapScaler.h" |
| 16 #include "SkMipMap.h" | 17 #include "SkMipMap.h" |
| 17 #include "SkPixelRef.h" | 18 #include "SkPixelRef.h" |
| 18 #include "SkImageEncoder.h" | 19 #include "SkImageEncoder.h" |
| (...skipping 10 matching lines...) Expand all Loading... |
| 29 extern void SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&,
int, int, uint32_t*, int); | 30 extern void SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&,
int, int, uint32_t*, int); |
| 30 extern void Clamp_SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcSt
ate&, int, int, uint32_t*, int); | 31 extern void Clamp_SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcSt
ate&, int, int, uint32_t*, int); |
| 31 #endif | 32 #endif |
| 32 | 33 |
| 33 extern void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const SkBitmapProcState&
, int, int, uint32_t*, int); | 34 extern void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const SkBitmapProcState&
, int, int, uint32_t*, int); |
| 34 | 35 |
| 35 #define NAME_WRAP(x) x | 36 #define NAME_WRAP(x) x |
| 36 #include "SkBitmapProcState_filter.h" | 37 #include "SkBitmapProcState_filter.h" |
| 37 #include "SkBitmapProcState_procs.h" | 38 #include "SkBitmapProcState_procs.h" |
| 38 | 39 |
| 40 SkBitmapProcState::SkBitmapProcState() : fBMState(NULL) {} |
| 41 |
| 42 SkBitmapProcState::~SkBitmapProcState() { |
| 43 SkInPlaceDeleteCheck(fBMState, fBMStateStorage.get()); |
| 44 } |
| 45 |
| 39 /////////////////////////////////////////////////////////////////////////////// | 46 /////////////////////////////////////////////////////////////////////////////// |
| 40 | 47 |
| 41 // true iff the matrix contains, at most, scale and translate elements | 48 // true iff the matrix contains, at most, scale and translate elements |
| 42 static bool matrix_only_scale_translate(const SkMatrix& m) { | 49 static bool matrix_only_scale_translate(const SkMatrix& m) { |
| 43 return m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask); | 50 return m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask); |
| 44 } | 51 } |
| 45 | 52 |
| 46 /** | 53 /** |
| 47 * For the purposes of drawing bitmaps, if a matrix is "almost" translate | 54 * For the purposes of drawing bitmaps, if a matrix is "almost" translate |
| 48 * go ahead and treat it as if it were, so that subsequent code can go fast. | 55 * go ahead and treat it as if it were, so that subsequent code can go fast. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 return false; | 90 return false; |
| 84 } | 91 } |
| 85 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol)) { | 92 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol)) { |
| 86 return false; | 93 return false; |
| 87 } | 94 } |
| 88 } | 95 } |
| 89 // if we got here, treat us as either kTranslate_Mask or identity | 96 // if we got here, treat us as either kTranslate_Mask or identity |
| 90 return true; | 97 return true; |
| 91 } | 98 } |
| 92 | 99 |
| 93 /////////////////////////////////////////////////////////////////////////////// | |
| 94 | |
| 95 static bool valid_for_filtering(unsigned dimension) { | 100 static bool valid_for_filtering(unsigned dimension) { |
| 96 // for filtering, width and height must fit in 14bits, since we use steal | 101 // for filtering, width and height must fit in 14bits, since we use steal |
| 97 // 2 bits from each to store our 4bit subpixel data | 102 // 2 bits from each to store our 4bit subpixel data |
| 98 return (dimension & ~0x3FFF) == 0; | 103 return (dimension & ~0x3FFF) == 0; |
| 99 } | 104 } |
| 100 | 105 |
| 101 // Check to see that the size of the bitmap that would be produced by | |
| 102 // scaling by the given inverted matrix is less than the maximum allowed. | |
| 103 static inline bool cache_size_okay(const SkBitmap& bm, const SkMatrix& invMat) { | |
| 104 size_t maximumAllocation = SkResourceCache::GetEffectiveSingleAllocationByte
Limit(); | |
| 105 if (0 == maximumAllocation) { | |
| 106 return true; | |
| 107 } | |
| 108 // float matrixScaleFactor = 1.0 / (invMat.scaleX * invMat.scaleY); | |
| 109 // return ((origBitmapSize * matrixScaleFactor) < maximumAllocationSize); | |
| 110 // Skip the division step: | |
| 111 return bm.info().getSafeSize(bm.info().minRowBytes()) | |
| 112 < (maximumAllocation * invMat.getScaleX() * invMat.getScaleY()); | |
| 113 } | |
| 114 | |
| 115 /* | |
| 116 * High quality is implemented by performing up-right scale-only filtering and
then | |
| 117 * using bilerp for any remaining transformations. | |
| 118 */ | |
| 119 void SkBitmapProcState::processHQRequest() { | |
| 120 SkASSERT(kHigh_SkFilterQuality == fFilterLevel); | |
| 121 | |
| 122 // Our default return state is to downgrade the request to Medium, w/ or w/o
setting fBitmap | |
| 123 // to a valid bitmap. If we succeed, we will set this to Low instead. | |
| 124 fFilterLevel = kMedium_SkFilterQuality; | |
| 125 | |
| 126 if (kN32_SkColorType != fOrigBitmap.colorType() || !cache_size_okay(fOrigBit
map, fInvMatrix) || | |
| 127 fInvMatrix.hasPerspective()) | |
| 128 { | |
| 129 return; // can't handle the reqeust | |
| 130 } | |
| 131 | |
| 132 SkScalar invScaleX = fInvMatrix.getScaleX(); | |
| 133 SkScalar invScaleY = fInvMatrix.getScaleY(); | |
| 134 if (fInvMatrix.getType() & SkMatrix::kAffine_Mask) { | |
| 135 SkSize scale; | |
| 136 if (!fInvMatrix.decomposeScale(&scale)) { | |
| 137 return; | |
| 138 } | |
| 139 invScaleX = scale.width(); | |
| 140 invScaleY = scale.height(); | |
| 141 } | |
| 142 if (SkScalarNearlyEqual(invScaleX, 1) && SkScalarNearlyEqual(invScaleY, 1))
{ | |
| 143 return; // no need for HQ | |
| 144 } | |
| 145 | |
| 146 SkScalar trueDestWidth = fOrigBitmap.width() / invScaleX; | |
| 147 SkScalar trueDestHeight = fOrigBitmap.height() / invScaleY; | |
| 148 SkScalar roundedDestWidth = SkScalarRoundToScalar(trueDestWidth); | |
| 149 SkScalar roundedDestHeight = SkScalarRoundToScalar(trueDestHeight); | |
| 150 | |
| 151 if (!SkBitmapCache::Find(fOrigBitmap, roundedDestWidth, roundedDestHeight, &
fScaledBitmap)) { | |
| 152 if (!SkBitmapScaler::Resize(&fScaledBitmap, | |
| 153 fOrigBitmap, | |
| 154 SkBitmapScaler::RESIZE_BEST, | |
| 155 roundedDestWidth, | |
| 156 roundedDestHeight, | |
| 157 SkResourceCache::GetAllocator())) { | |
| 158 return; // we failed to create fScaledBitmap | |
| 159 } | |
| 160 | |
| 161 SkASSERT(fScaledBitmap.getPixels()); | |
| 162 fScaledBitmap.setImmutable(); | |
| 163 SkBitmapCache::Add(fOrigBitmap, roundedDestWidth, roundedDestHeight, fSc
aledBitmap); | |
| 164 } | |
| 165 | |
| 166 SkASSERT(fScaledBitmap.getPixels()); | |
| 167 fBitmap = &fScaledBitmap; | |
| 168 | |
| 169 fInvMatrix.postScale(roundedDestWidth / fOrigBitmap.width(), | |
| 170 roundedDestHeight / fOrigBitmap.height()); | |
| 171 fFilterLevel = kLow_SkFilterQuality; | |
| 172 } | |
| 173 | |
| 174 /* | |
| 175 * Modulo internal errors, this should always succeed *if* the matrix is downsc
aling | |
| 176 * (in this case, we have the inverse, so it succeeds if fInvMatrix is upscalin
g) | |
| 177 */ | |
| 178 void SkBitmapProcState::processMediumRequest() { | |
| 179 SkASSERT(kMedium_SkFilterQuality == fFilterLevel); | |
| 180 | |
| 181 // Our default return state is to downgrade the request to Low, w/ or w/o se
tting fBitmap | |
| 182 // to a valid bitmap. | |
| 183 fFilterLevel = kLow_SkFilterQuality; | |
| 184 | |
| 185 SkSize invScaleSize; | |
| 186 if (!fInvMatrix.decomposeScale(&invScaleSize, NULL)) { | |
| 187 return; | |
| 188 } | |
| 189 SkScalar invScale = SkScalarSqrt(invScaleSize.width() * invScaleSize.height(
)); | |
| 190 | |
| 191 if (invScale > SK_Scalar1) { | |
| 192 fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap)); | |
| 193 if (NULL == fCurrMip.get()) { | |
| 194 fCurrMip.reset(SkMipMapCache::AddAndRef(fOrigBitmap)); | |
| 195 if (NULL == fCurrMip.get()) { | |
| 196 return; | |
| 197 } | |
| 198 } | |
| 199 // diagnostic for a crasher... | |
| 200 if (NULL == fCurrMip->data()) { | |
| 201 sk_throw(); | |
| 202 } | |
| 203 | |
| 204 SkScalar levelScale = SkScalarInvert(invScale); | |
| 205 SkMipMap::Level level; | |
| 206 if (fCurrMip->extractLevel(levelScale, &level)) { | |
| 207 SkScalar invScaleFixup = level.fScale; | |
| 208 fInvMatrix.postScale(invScaleFixup, invScaleFixup); | |
| 209 | |
| 210 const SkImageInfo info = fOrigBitmap.info().makeWH(level.fWidth, lev
el.fHeight); | |
| 211 // todo: if we could wrap the fCurrMip in a pixelref, then we could
just install | |
| 212 // that here, and not need to explicitly track it ourselves. | |
| 213 fScaledBitmap.installPixels(info, level.fPixels, level.fRowBytes); | |
| 214 fBitmap = &fScaledBitmap; | |
| 215 } else { | |
| 216 // failed to extract, so release the mipmap | |
| 217 fCurrMip.reset(NULL); | |
| 218 } | |
| 219 } | |
| 220 } | |
| 221 | |
| 222 bool SkBitmapProcState::lockBaseBitmap() { | |
| 223 // TODO(reed): use bitmap cache here? | |
| 224 fScaledBitmap = fOrigBitmap; | |
| 225 fScaledBitmap.lockPixels(); | |
| 226 if (NULL == fScaledBitmap.getPixels()) { | |
| 227 return false; | |
| 228 } | |
| 229 fBitmap = &fScaledBitmap; | |
| 230 return true; | |
| 231 } | |
| 232 | |
| 233 static bool valid_for_drawing(const SkBitmap& bm) { | |
| 234 if (0 == bm.width() || 0 == bm.height()) { | |
| 235 return false; // nothing to draw | |
| 236 } | |
| 237 if (NULL == bm.pixelRef()) { | |
| 238 return false; // no pixels to read | |
| 239 } | |
| 240 if (bm.getTexture()) { | |
| 241 // we can handle texture (ugh) since lockPixels will perform a read-back | |
| 242 return true; | |
| 243 } | |
| 244 if (kIndex_8_SkColorType == bm.colorType()) { | |
| 245 SkAutoLockPixels alp(bm); // but we need to call it before getColorTable
() is safe. | |
| 246 if (!bm.getColorTable()) { | |
| 247 return false; | |
| 248 } | |
| 249 } | |
| 250 return true; | |
| 251 } | |
| 252 | |
| 253 /* | 106 /* |
| 254 * Analyze filter-quality and matrix, and decide how to implement that. | 107 * Analyze filter-quality and matrix, and decide how to implement that. |
| 255 * | 108 * |
| 256 * In general, we cascade down the request level [ High ... None ] | 109 * In general, we cascade down the request level [ High ... None ] |
| 257 * - for a given level, if we can fulfill it, fine, else | 110 * - for a given level, if we can fulfill it, fine, else |
| 258 * - else we downgrade to the next lower level and try again. | 111 * - else we downgrade to the next lower level and try again. |
| 259 * We can always fulfill requests for Low and None | 112 * We can always fulfill requests for Low and None |
| 260 * - sometimes we will "ignore" Low and give None, but this is likely a legacy
perf hack | 113 * - sometimes we will "ignore" Low and give None, but this is likely a legacy
perf hack |
| 261 * and may be removed. | 114 * and may be removed. |
| 262 */ | 115 */ |
| 263 bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) { | 116 bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) { |
| 264 if (!valid_for_drawing(fOrigBitmap)) { | |
| 265 return false; | |
| 266 } | |
| 267 | |
| 268 fBitmap = NULL; | 117 fBitmap = NULL; |
| 269 fInvMatrix = inv; | 118 fInvMatrix = inv; |
| 270 fFilterLevel = paint.getFilterQuality(); | 119 fFilterLevel = paint.getFilterQuality(); |
| 271 | 120 |
| 272 if (kHigh_SkFilterQuality == fFilterLevel) { | 121 SkDefaultBitmapController controller; |
| 273 this->processHQRequest(); | 122 fBMState = controller.requestBitmap(fOrigBitmap, inv, paint.getFilterQuality
(), |
| 123 fBMStateStorage.get(), fBMStateStorage.s
ize()); |
| 124 if (NULL == fBMState) { |
| 125 return false; |
| 274 } | 126 } |
| 275 SkASSERT(fFilterLevel < kHigh_SkFilterQuality); | 127 fBitmap = &fBMState->lockedBitmap(); |
| 276 | 128 fInvMatrix = fBMState->invMatrix(); |
| 277 if (kMedium_SkFilterQuality == fFilterLevel) { | 129 fFilterLevel = fBMState->quality(); |
| 278 this->processMediumRequest(); | 130 SkASSERT(fBitmap->getPixels()); |
| 279 } | 131 |
| 280 SkASSERT(fFilterLevel < kMedium_SkFilterQuality); | |
| 281 | |
| 282 if (NULL == fBitmap) { | |
| 283 if (!this->lockBaseBitmap()) { | |
| 284 return false; | |
| 285 } | |
| 286 } | |
| 287 SkASSERT(fBitmap); | |
| 288 | |
| 289 bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) ==
0; | 132 bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) ==
0; |
| 290 bool clampClamp = SkShader::kClamp_TileMode == fTileModeX && | 133 bool clampClamp = SkShader::kClamp_TileMode == fTileModeX && |
| 291 SkShader::kClamp_TileMode == fTileModeY; | 134 SkShader::kClamp_TileMode == fTileModeY; |
| 292 | 135 |
| 293 // Most of the scanline procs deal with "unit" texture coordinates, as this | 136 // Most of the scanline procs deal with "unit" texture coordinates, as this |
| 294 // makes it easy to perform tiling modes (repeat = (x & 0xFFFF)). To generat
e | 137 // makes it easy to perform tiling modes (repeat = (x & 0xFFFF)). To generat
e |
| 295 // those, we divide the matrix by its dimensions here. | 138 // those, we divide the matrix by its dimensions here. |
| 296 // | 139 // |
| 297 // We don't do this if we're either trivial (can ignore the matrix) or clamp
ing | 140 // We don't do this if we're either trivial (can ignore the matrix) or clamp
ing |
| 298 // in both X and Y since clamping to width,height is just as easy as to 0xFF
FF. | 141 // in both X and Y since clamping to width,height is just as easy as to 0xFF
FF. |
| (...skipping 697 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 996 fx += dx; | 839 fx += dx; |
| 997 } | 840 } |
| 998 } else { | 841 } else { |
| 999 for (int i = 0; i < count; ++i) { | 842 for (int i = 0; i < count; ++i) { |
| 1000 dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)]; | 843 dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)]; |
| 1001 fx += dx; | 844 fx += dx; |
| 1002 } | 845 } |
| 1003 } | 846 } |
| 1004 } | 847 } |
| 1005 | 848 |
| OLD | NEW |