| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2006 The Android Open Source Project | 2 * Copyright 2006 The Android Open Source Project |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkMatrix.h" | 8 #include "SkMatrix.h" |
| 9 #include "Sk64.h" | 9 #include "Sk64.h" |
| 10 #include "SkFloatBits.h" | 10 #include "SkFloatBits.h" |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 46 }; | 46 }; |
| 47 | 47 |
| 48 #ifdef SK_SCALAR_IS_FLOAT | 48 #ifdef SK_SCALAR_IS_FLOAT |
| 49 static const int32_t kScalar1Int = 0x3f800000; | 49 static const int32_t kScalar1Int = 0x3f800000; |
| 50 #else | 50 #else |
| 51 #define scalarAsInt(x) (x) | 51 #define scalarAsInt(x) (x) |
| 52 static const int32_t kScalar1Int = (1 << 16); | 52 static const int32_t kScalar1Int = (1 << 16); |
| 53 static const int32_t kPersp1Int = (1 << 30); | 53 static const int32_t kPersp1Int = (1 << 30); |
| 54 #endif | 54 #endif |
| 55 | 55 |
| 56 #ifdef SK_SCALAR_SLOW_COMPARES | |
| 57 static const int32_t kPersp1Int = 0x3f800000; | |
| 58 #endif | |
| 59 | |
| 60 uint8_t SkMatrix::computePerspectiveTypeMask() const { | 56 uint8_t SkMatrix::computePerspectiveTypeMask() const { |
| 61 #ifdef SK_SCALAR_SLOW_COMPARES | |
| 62 if (SkScalarAs2sCompliment(fMat[kMPersp0]) | | |
| 63 SkScalarAs2sCompliment(fMat[kMPersp1]) | | |
| 64 (SkScalarAs2sCompliment(fMat[kMPersp2]) - kPersp1Int)) { | |
| 65 return SkToU8(kORableMasks); | |
| 66 } | |
| 67 #else | |
| 68 // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment | 57 // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment |
| 69 // is a win, but replacing those below is not. We don't yet understand | 58 // is a win, but replacing those below is not. We don't yet understand |
| 70 // that result. | 59 // that result. |
| 71 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || | 60 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || |
| 72 fMat[kMPersp2] != kMatrix22Elem) { | 61 fMat[kMPersp2] != kMatrix22Elem) { |
| 73 // If this is a perspective transform, we return true for all other | 62 // If this is a perspective transform, we return true for all other |
| 74 // transform flags - this does not disable any optimizations, respects | 63 // transform flags - this does not disable any optimizations, respects |
| 75 // the rule that the type mask must be conservative, and speeds up | 64 // the rule that the type mask must be conservative, and speeds up |
| 76 // type mask computation. | 65 // type mask computation. |
| 77 return SkToU8(kORableMasks); | 66 return SkToU8(kORableMasks); |
| 78 } | 67 } |
| 79 #endif | |
| 80 | 68 |
| 81 return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask); | 69 return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask); |
| 82 } | 70 } |
| 83 | 71 |
| 84 uint8_t SkMatrix::computeTypeMask() const { | 72 uint8_t SkMatrix::computeTypeMask() const { |
| 85 unsigned mask = 0; | 73 unsigned mask = 0; |
| 86 | 74 |
| 87 #ifdef SK_SCALAR_SLOW_COMPARES | |
| 88 if (SkScalarAs2sCompliment(fMat[kMPersp0]) | | |
| 89 SkScalarAs2sCompliment(fMat[kMPersp1]) | | |
| 90 (SkScalarAs2sCompliment(fMat[kMPersp2]) - kPersp1Int)) { | |
| 91 return SkToU8(kORableMasks); | |
| 92 } | |
| 93 | |
| 94 if (SkScalarAs2sCompliment(fMat[kMTransX]) | | |
| 95 SkScalarAs2sCompliment(fMat[kMTransY])) { | |
| 96 mask |= kTranslate_Mask; | |
| 97 } | |
| 98 #else | |
| 99 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || | 75 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || |
| 100 fMat[kMPersp2] != kMatrix22Elem) { | 76 fMat[kMPersp2] != kMatrix22Elem) { |
| 101 // Once it is determined that that this is a perspective transform, | 77 // Once it is determined that that this is a perspective transform, |
| 102 // all other flags are moot as far as optimizations are concerned. | 78 // all other flags are moot as far as optimizations are concerned. |
| 103 return SkToU8(kORableMasks); | 79 return SkToU8(kORableMasks); |
| 104 } | 80 } |
| 105 | 81 |
| 106 if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) { | 82 if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) { |
| 107 mask |= kTranslate_Mask; | 83 mask |= kTranslate_Mask; |
| 108 } | 84 } |
| 109 #endif | |
| 110 | 85 |
| 111 int m00 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleX]); | 86 int m00 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleX]); |
| 112 int m01 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewX]); | 87 int m01 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewX]); |
| 113 int m10 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewY]); | 88 int m10 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewY]); |
| 114 int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]); | 89 int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]); |
| 115 | 90 |
| 116 if (m01 | m10) { | 91 if (m01 | m10) { |
| 117 // The skew components may be scale-inducing, unless we are dealing | 92 // The skew components may be scale-inducing, unless we are dealing |
| 118 // with a pure rotation. Testing for a pure rotation is expensive, | 93 // with a pure rotation. Testing for a pure rotation is expensive, |
| 119 // so we opt for being conservative by always setting the scale bit. | 94 // so we opt for being conservative by always setting the scale bit. |
| (...skipping 2017 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2137 rotation1->fX = cos1; | 2112 rotation1->fX = cos1; |
| 2138 rotation1->fY = sin1; | 2113 rotation1->fY = sin1; |
| 2139 } | 2114 } |
| 2140 if (NULL != rotation2) { | 2115 if (NULL != rotation2) { |
| 2141 rotation2->fX = cos2; | 2116 rotation2->fX = cos2; |
| 2142 rotation2->fY = sin2; | 2117 rotation2->fY = sin2; |
| 2143 } | 2118 } |
| 2144 | 2119 |
| 2145 return true; | 2120 return true; |
| 2146 } | 2121 } |
| OLD | NEW |