Index: include/private/SkFixed.h |
diff --git a/include/private/SkFixed.h b/include/private/SkFixed.h |
index c90d5e9ffd093e98a7335fd210be962a2436ac3e..7023f59cac626bdd9a9d9174afb5eb89c7eaaa23 100644 |
--- a/include/private/SkFixed.h |
+++ b/include/private/SkFixed.h |
@@ -31,7 +31,63 @@ typedef int32_t SkFixed; |
#define SK_FixedRoot2Over2 (0xB505) |
#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f) |
-#define SkFloatToFixed(x) ((SkFixed)((x) * SK_Fixed1)) |
+ |
+/////////////////////////////////////////////////////////////////////////////// |
+// ASM alternatives for our portable versions. |
+ |
+#if defined(SK_CPU_ARM32) |
+ /* This guy does not handle NaN or other obscurities, but is faster than |
+ than (int)(x*65536). When built on Android with -Os, needs forcing |
+ to inline or we lose the speed benefit. |
+ */ |
+ SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x) |
+ { |
+ int32_t y, z; |
+ asm("movs %1, %3, lsl #1 \n" |
+ "mov %2, #0x8E \n" |
+ "sub %1, %2, %1, lsr #24 \n" |
+ "mov %2, %3, lsl #8 \n" |
+ "orr %2, %2, #0x80000000 \n" |
+ "mov %1, %2, lsr %1 \n" |
+ "it cs \n" |
+ "rsbcs %1, %1, #0 \n" |
+ : "=r"(x), "=&r"(y), "=&r"(z) |
+ : "r"(x) |
+ : "cc" |
+ ); |
+ return y; |
+ } |
+ inline SkFixed SkFixedMul_arm(SkFixed x, SkFixed y) |
+ { |
+ int32_t t; |
+ asm("smull %0, %2, %1, %3 \n" |
+ "mov %0, %0, lsr #16 \n" |
+ "orr %0, %0, %2, lsl #16 \n" |
+ : "=r"(x), "=&r"(y), "=r"(t) |
+ : "r"(x), "1"(y) |
+ : |
+ ); |
+ return x; |
+ } |
+ |
+ #define SkFixedMul(x, y) SkFixedMul_arm(x, y) |
+ #define SkFloatToFixed_Unsafe(x) SkFloatToFixed_arm(x) |
+#else |
+ inline SkFixed SkFixedMul_longlong(SkFixed a, SkFixed b) { |
+ return (SkFixed)((int64_t)a * b >> 16); |
+ } |
+ |
+ #define SkFixedMul(x, y) SkFixedMul_longlong(x, y) |
+ #define SkFloatToFixed_Unsafe(x) ((SkFixed)((x) * SK_Fixed1)) |
+#endif |
+ |
+/////////////////////////////////////////////////////////////////////////////// |
+ |
+static inline SkFixed SkFloatToFixed(float x) { |
+ const SkFixed result = SkFloatToFixed_Unsafe(x); |
+ SkASSERT(truncf(x * SK_Fixed1) == static_cast<float>(result)); |
+ return result; |
+} |
// Pins over/under flows to SK_FixedMax/SK_FixedMin (slower than just a cast). |
static inline SkFixed SkFloatPinToFixed(float x) { |
@@ -44,19 +100,14 @@ static inline SkFixed SkFloatPinToFixed(float x) { |
return result; |
} |
-#ifdef SK_DEBUG |
- static inline SkFixed SkFloatToFixed_Check(float x) { |
- int64_t n64 = (int64_t)(x * SK_Fixed1); |
- SkFixed n32 = (SkFixed)n64; |
- SkASSERT(n64 == n32); |
- return n32; |
- } |
-#else |
- #define SkFloatToFixed_Check(x) SkFloatToFixed(x) |
-#endif |
+#define SkFixedToDouble(x) ((x) * 1.52587890625e-5) |
+#define SkDoubleToFixed_Unsafe(x) ((SkFixed)((x) * SK_Fixed1)) |
-#define SkFixedToDouble(x) ((x) * 1.52587890625e-5) |
-#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1)) |
+static inline SkFixed SkDoubleToFixed(double x) { |
+ const SkFixed result = SkDoubleToFixed_Unsafe(x); |
+ SkASSERT(trunc(x * SK_Fixed1) == static_cast<double>(result)); |
+ return result; |
+} |
// Pins over/under flows to SK_FixedMax/SK_FixedMin (slower than just a cast). |
static inline SkFixed SkDoublePinToFixed(double x) { |
@@ -107,56 +158,6 @@ static inline SkFixed SkDoublePinToFixed(double x) { |
SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)numer, 16) / denom), SK_MinS32, SK_MaxS32)) |
#endif |
-////////////////////////////////////////////////////////////////////////////////////////////////////// |
-// Now look for ASM overrides for our portable versions (should consider putting this in its own file) |
- |
-inline SkFixed SkFixedMul_longlong(SkFixed a, SkFixed b) { |
- return (SkFixed)((int64_t)a * b >> 16); |
-} |
-#define SkFixedMul(a,b) SkFixedMul_longlong(a,b) |
- |
- |
-#if defined(SK_CPU_ARM32) |
- /* This guy does not handle NaN or other obscurities, but is faster than |
- than (int)(x*65536). When built on Android with -Os, needs forcing |
- to inline or we lose the speed benefit. |
- */ |
- SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x) |
- { |
- int32_t y, z; |
- asm("movs %1, %3, lsl #1 \n" |
- "mov %2, #0x8E \n" |
- "sub %1, %2, %1, lsr #24 \n" |
- "mov %2, %3, lsl #8 \n" |
- "orr %2, %2, #0x80000000 \n" |
- "mov %1, %2, lsr %1 \n" |
- "it cs \n" |
- "rsbcs %1, %1, #0 \n" |
- : "=r"(x), "=&r"(y), "=&r"(z) |
- : "r"(x) |
- : "cc" |
- ); |
- return y; |
- } |
- inline SkFixed SkFixedMul_arm(SkFixed x, SkFixed y) |
- { |
- int32_t t; |
- asm("smull %0, %2, %1, %3 \n" |
- "mov %0, %0, lsr #16 \n" |
- "orr %0, %0, %2, lsl #16 \n" |
- : "=r"(x), "=&r"(y), "=r"(t) |
- : "r"(x), "1"(y) |
- : |
- ); |
- return x; |
- } |
- #undef SkFixedMul |
- #define SkFixedMul(x, y) SkFixedMul_arm(x, y) |
- |
- #undef SkFloatToFixed |
- #define SkFloatToFixed(x) SkFloatToFixed_arm(x) |
-#endif |
- |
/////////////////////////////////////////////////////////////////////////////// |
#if SK_SCALAR_IS_FLOAT |
@@ -177,11 +178,18 @@ inline SkFixed SkFixedMul_longlong(SkFixed a, SkFixed b) { |
typedef int64_t SkFixed3232; // 32.32 |
-#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32)) |
-#define SkFixed3232ToInt(x) ((int)((x) >> 32)) |
-#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16)) |
-#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16)) |
-#define SkFloatToFixed3232(x) ((SkFixed3232)((x) * (65536.0f * 65536.0f))) |
+#define SK_Fixed3232_1 (static_cast<SkFixed3232>(1) << 32) |
+#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32)) |
+#define SkFixed3232ToInt(x) ((int)((x) >> 32)) |
+#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16)) |
+#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16)) |
+#define SkFloatToFixed3232_Unsafe(x) (static_cast<SkFixed3232>((x) * SK_Fixed3232_1)) |
+ |
+static inline SkFixed3232 SkFloatToFixed3232(float x) { |
+ const SkFixed3232 result = SkFloatToFixed3232_Unsafe(x); |
+ SkASSERT(truncf(x * SK_Fixed3232_1) == static_cast<float>(result)); |
+ return result; |
+} |
#define SkScalarToFixed3232(x) SkFloatToFixed3232(x) |