| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "Test.h" | 8 #include "Test.h" |
| 9 #include "SkColor.h" | 9 #include "SkColor.h" |
| 10 #include "SkColorPriv.h" | 10 #include "SkColorPriv.h" |
| 11 #include "SkTaskGroup.h" | 11 #include "SkTaskGroup.h" |
| 12 #include "SkXfermode.h" | 12 #include "SkXfermode.h" |
| 13 #include <functional> |
| 13 | 14 |
| 14 #define ASSERT(x) REPORTER_ASSERT(r, x) | 15 struct Results { int diffs, diffs_0x00, diffs_0xff, diffs_by_1; }; |
| 15 | 16 |
| 16 static uint8_t double_to_u8(double d) { | 17 static bool acceptable(const Results& r) { |
| 17 SkASSERT(d >= 0); | 18 #if 0 |
| 18 SkASSERT(d < 256); | 19 SkDebugf("%d diffs, %d at 0x00, %d at 0xff, %d off by 1, all out of 65536\n"
, |
| 19 return uint8_t(d); | 20 r.diffs, r.diffs_0x00, r.diffs_0xff, r.diffs_by_1); |
| 21 #endif |
| 22 return r.diffs_by_1 == r.diffs // never off by more than 1 |
| 23 && r.diffs_0x00 == 0 // transparent must stay transparent |
| 24 && r.diffs_0xff == 0; // opaque must stay opaque |
| 20 } | 25 } |
| 21 | 26 |
| 22 // All algorithms we're testing have this interface. | 27 template <typename Fn> |
| 23 // We want a single channel blend, src over dst, assuming src is premultiplied b
y srcAlpha. | 28 static Results test(Fn&& multiply) { |
| 24 typedef uint8_t(*Blend)(uint8_t dst, uint8_t src, uint8_t srcAlpha); | 29 Results r = { 0,0,0,0 }; |
| 25 | 30 for (int x = 0; x < 256; x++) { |
| 26 // This is our golden algorithm. | 31 for (int y = 0; y < 256; y++) { |
| 27 static uint8_t blend_double_round(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | 32 int p = multiply(x, y), |
| 28 SkASSERT(src <= srcAlpha); | 33 ideal = (x*y+127)/255; |
| 29 return double_to_u8(0.5 + src + dst * (255.0 - srcAlpha) / 255.0); | 34 if (p != ideal) { |
| 35 r.diffs++; |
| 36 if (x == 0x00 || y == 0x00) { r.diffs_0x00++; } |
| 37 if (x == 0xff || y == 0xff) { r.diffs_0xff++; } |
| 38 if (SkTAbs(ideal - p) == 1) { r.diffs_by_1++; } |
| 39 } |
| 40 }} |
| 41 return r; |
| 30 } | 42 } |
| 31 | 43 |
| 32 static uint8_t abs_diff(uint8_t a, uint8_t b) { | 44 DEF_TEST(Blend_byte_multiply, r) { |
| 33 const int diff = a - b; | 45 // These are all temptingly close but fundamentally broken. |
| 34 return diff > 0 ? diff : -diff; | 46 int (*broken[])(int, int) = { |
| 47 [](int x, int y) { return (x*y)>>8; }, |
| 48 [](int x, int y) { return (x*y+128)>>8; }, |
| 49 [](int x, int y) { y += y>>7; return (x*y)>>8; }, |
| 50 }; |
| 51 for (auto multiply : broken) { REPORTER_ASSERT(r, !acceptable(test(multiply)
)); } |
| 52 |
| 53 // These are fine to use, but not perfect. |
| 54 int (*fine[])(int, int) = { |
| 55 [](int x, int y) { return (x*y+x)>>8; }, |
| 56 [](int x, int y) { return (x*y+y)>>8; }, |
| 57 [](int x, int y) { return (x*y+255)>>8; }, |
| 58 [](int x, int y) { y += y>>7; return (x*y+128)>>8; }, |
| 59 }; |
| 60 for (auto multiply : fine) { REPORTER_ASSERT(r, acceptable(test(multiply)));
} |
| 61 |
| 62 // These are pefect. |
| 63 int (*perfect[])(int, int) = { |
| 64 [](int x, int y) { return (x*y+127)/255; }, // Duh. |
| 65 [](int x, int y) { int p = (x*y+128); return (p+(p>>8))>>8; }, |
| 66 [](int x, int y) { return ((x*y+128)*257)>>16; }, |
| 67 }; |
| 68 for (auto multiply : perfect) { REPORTER_ASSERT(r, test(multiply).diffs == 0
); } |
| 35 } | 69 } |
| 36 | 70 |
| 37 static void test(skiatest::Reporter* r, int maxDiff, Blend algorithm, | |
| 38 uint8_t dst, uint8_t src, uint8_t alpha) { | |
| 39 const uint8_t golden = blend_double_round(dst, src, alpha); | |
| 40 const uint8_t blend = algorithm(dst, src, alpha); | |
| 41 if (abs_diff(blend, golden) > maxDiff) { | |
| 42 SkDebugf("dst %02x, src %02x, alpha %02x, |%02x - %02x| > %d\n", | |
| 43 dst, src, alpha, blend, golden, maxDiff); | |
| 44 ASSERT(abs_diff(blend, golden) <= maxDiff); | |
| 45 } | |
| 46 } | |
| 47 | |
| 48 // Exhaustively compare an algorithm against our golden, for a given alpha. | |
| 49 static void test_alpha(skiatest::Reporter* r, uint8_t alpha, int maxDiff, Blend
algorithm) { | |
| 50 SkASSERT(maxDiff >= 0); | |
| 51 | |
| 52 for (unsigned src = 0; src <= alpha; src++) { | |
| 53 for (unsigned dst = 0; dst < 256; dst++) { | |
| 54 test(r, maxDiff, algorithm, dst, src, alpha); | |
| 55 } | |
| 56 } | |
| 57 } | |
| 58 | |
| 59 // Exhaustively compare an algorithm against our golden, for a given dst. | |
| 60 static void test_dst(skiatest::Reporter* r, uint8_t dst, int maxDiff, Blend algo
rithm) { | |
| 61 SkASSERT(maxDiff >= 0); | |
| 62 | |
| 63 for (unsigned alpha = 0; alpha < 256; alpha++) { | |
| 64 for (unsigned src = 0; src <= alpha; src++) { | |
| 65 test(r, maxDiff, algorithm, dst, src, alpha); | |
| 66 } | |
| 67 } | |
| 68 } | |
| 69 | |
| 70 static uint8_t blend_double_trunc(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 71 return double_to_u8(src + dst * (255.0 - srcAlpha) / 255.0); | |
| 72 } | |
| 73 | |
| 74 static uint8_t blend_float_trunc(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 75 return double_to_u8(src + dst * (255.0f - srcAlpha) / 255.0f); | |
| 76 } | |
| 77 | |
| 78 static uint8_t blend_float_round(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 79 return double_to_u8(0.5f + src + dst * (255.0f - srcAlpha) / 255.0f); | |
| 80 } | |
| 81 | |
| 82 static uint8_t blend_255_trunc(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 83 const uint16_t invAlpha = 255 - srcAlpha; | |
| 84 const uint16_t product = dst * invAlpha; | |
| 85 return src + (product >> 8); | |
| 86 } | |
| 87 | |
| 88 static uint8_t blend_255_round(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 89 const uint16_t invAlpha = 255 - srcAlpha; | |
| 90 const uint16_t product = dst * invAlpha + 128; | |
| 91 return src + (product >> 8); | |
| 92 } | |
| 93 | |
| 94 static uint8_t blend_256_trunc(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 95 const uint16_t invAlpha = 256 - (srcAlpha + (srcAlpha >> 7)); | |
| 96 const uint16_t product = dst * invAlpha; | |
| 97 return src + (product >> 8); | |
| 98 } | |
| 99 | |
| 100 static uint8_t blend_256_round(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 101 const uint16_t invAlpha = 256 - (srcAlpha + (srcAlpha >> 7)); | |
| 102 const uint16_t product = dst * invAlpha + 128; | |
| 103 return src + (product >> 8); | |
| 104 } | |
| 105 | |
| 106 static uint8_t blend_256_round_alt(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 107 const uint8_t invAlpha8 = 255 - srcAlpha; | |
| 108 const uint16_t invAlpha = invAlpha8 + (invAlpha8 >> 7); | |
| 109 const uint16_t product = dst * invAlpha + 128; | |
| 110 return src + (product >> 8); | |
| 111 } | |
| 112 | |
| 113 static uint8_t blend_256_plus1_trunc(uint8_t dst, uint8_t src, uint8_t srcAlpha)
{ | |
| 114 const uint16_t invAlpha = 256 - (srcAlpha + 1); | |
| 115 const uint16_t product = dst * invAlpha; | |
| 116 return src + (product >> 8); | |
| 117 } | |
| 118 | |
| 119 static uint8_t blend_256_plus1_round(uint8_t dst, uint8_t src, uint8_t srcAlpha)
{ | |
| 120 const uint16_t invAlpha = 256 - (srcAlpha + 1); | |
| 121 const uint16_t product = dst * invAlpha + 128; | |
| 122 return src + (product >> 8); | |
| 123 } | |
| 124 | |
| 125 static uint8_t blend_perfect(uint8_t dst, uint8_t src, uint8_t srcAlpha) { | |
| 126 const uint8_t invAlpha = 255 - srcAlpha; | |
| 127 const uint16_t product = dst * invAlpha + 128; | |
| 128 return src + ((product + (product >> 8)) >> 8); | |
| 129 } | |
| 130 | |
| 131 | |
| 132 // We want 0 diff whenever src is fully transparent. | |
| 133 DEF_TEST(Blend_alpha_0x00, r) { | |
| 134 const uint8_t alpha = 0x00; | |
| 135 | |
| 136 // GOOD | |
| 137 test_alpha(r, alpha, 0, blend_256_round); | |
| 138 test_alpha(r, alpha, 0, blend_256_round_alt); | |
| 139 test_alpha(r, alpha, 0, blend_256_trunc); | |
| 140 test_alpha(r, alpha, 0, blend_double_trunc); | |
| 141 test_alpha(r, alpha, 0, blend_float_round); | |
| 142 test_alpha(r, alpha, 0, blend_float_trunc); | |
| 143 test_alpha(r, alpha, 0, blend_perfect); | |
| 144 | |
| 145 // BAD | |
| 146 test_alpha(r, alpha, 1, blend_255_round); | |
| 147 test_alpha(r, alpha, 1, blend_255_trunc); | |
| 148 test_alpha(r, alpha, 1, blend_256_plus1_round); | |
| 149 test_alpha(r, alpha, 1, blend_256_plus1_trunc); | |
| 150 } | |
| 151 | |
| 152 // We want 0 diff whenever dst is 0. | |
| 153 DEF_TEST(Blend_dst_0x00, r) { | |
| 154 const uint8_t dst = 0x00; | |
| 155 | |
| 156 // GOOD | |
| 157 test_dst(r, dst, 0, blend_255_round); | |
| 158 test_dst(r, dst, 0, blend_255_trunc); | |
| 159 test_dst(r, dst, 0, blend_256_plus1_round); | |
| 160 test_dst(r, dst, 0, blend_256_plus1_trunc); | |
| 161 test_dst(r, dst, 0, blend_256_round); | |
| 162 test_dst(r, dst, 0, blend_256_round_alt); | |
| 163 test_dst(r, dst, 0, blend_256_trunc); | |
| 164 test_dst(r, dst, 0, blend_double_trunc); | |
| 165 test_dst(r, dst, 0, blend_float_round); | |
| 166 test_dst(r, dst, 0, blend_float_trunc); | |
| 167 test_dst(r, dst, 0, blend_perfect); | |
| 168 | |
| 169 // BAD | |
| 170 } | |
| 171 | |
| 172 // We want 0 diff whenever src is fully opaque. | |
| 173 DEF_TEST(Blend_alpha_0xFF, r) { | |
| 174 const uint8_t alpha = 0xFF; | |
| 175 | |
| 176 // GOOD | |
| 177 test_alpha(r, alpha, 0, blend_255_round); | |
| 178 test_alpha(r, alpha, 0, blend_255_trunc); | |
| 179 test_alpha(r, alpha, 0, blend_256_plus1_round); | |
| 180 test_alpha(r, alpha, 0, blend_256_plus1_trunc); | |
| 181 test_alpha(r, alpha, 0, blend_256_round); | |
| 182 test_alpha(r, alpha, 0, blend_256_round_alt); | |
| 183 test_alpha(r, alpha, 0, blend_256_trunc); | |
| 184 test_alpha(r, alpha, 0, blend_double_trunc); | |
| 185 test_alpha(r, alpha, 0, blend_float_round); | |
| 186 test_alpha(r, alpha, 0, blend_float_trunc); | |
| 187 test_alpha(r, alpha, 0, blend_perfect); | |
| 188 | |
| 189 // BAD | |
| 190 } | |
| 191 | |
| 192 // We want 0 diff whenever dst is 0xFF. | |
| 193 DEF_TEST(Blend_dst_0xFF, r) { | |
| 194 const uint8_t dst = 0xFF; | |
| 195 | |
| 196 // GOOD | |
| 197 test_dst(r, dst, 0, blend_256_round); | |
| 198 test_dst(r, dst, 0, blend_256_round_alt); | |
| 199 test_dst(r, dst, 0, blend_double_trunc); | |
| 200 test_dst(r, dst, 0, blend_float_round); | |
| 201 test_dst(r, dst, 0, blend_float_trunc); | |
| 202 test_dst(r, dst, 0, blend_perfect); | |
| 203 | |
| 204 // BAD | |
| 205 test_dst(r, dst, 1, blend_255_round); | |
| 206 test_dst(r, dst, 1, blend_255_trunc); | |
| 207 test_dst(r, dst, 1, blend_256_plus1_round); | |
| 208 test_dst(r, dst, 1, blend_256_plus1_trunc); | |
| 209 test_dst(r, dst, 1, blend_256_trunc); | |
| 210 } | |
| 211 | |
| 212 // We'd like diff <= 1 everywhere. | |
| 213 DEF_TEST(Blend_alpha_Exhaustive, r) { | |
| 214 for (unsigned alpha = 0; alpha < 256; alpha++) { | |
| 215 // PERFECT | |
| 216 test_alpha(r, alpha, 0, blend_float_round); | |
| 217 test_alpha(r, alpha, 0, blend_perfect); | |
| 218 | |
| 219 // GOOD | |
| 220 test_alpha(r, alpha, 1, blend_255_round); | |
| 221 test_alpha(r, alpha, 1, blend_256_plus1_round); | |
| 222 test_alpha(r, alpha, 1, blend_256_round); | |
| 223 test_alpha(r, alpha, 1, blend_256_round_alt); | |
| 224 test_alpha(r, alpha, 1, blend_256_trunc); | |
| 225 test_alpha(r, alpha, 1, blend_double_trunc); | |
| 226 test_alpha(r, alpha, 1, blend_float_trunc); | |
| 227 | |
| 228 // BAD | |
| 229 test_alpha(r, alpha, 2, blend_255_trunc); | |
| 230 test_alpha(r, alpha, 2, blend_256_plus1_trunc); | |
| 231 } | |
| 232 } | |
| 233 | |
| 234 // We'd like diff <= 1 everywhere. | |
| 235 DEF_TEST(Blend_dst_Exhaustive, r) { | |
| 236 for (unsigned dst = 0; dst < 256; dst++) { | |
| 237 // PERFECT | |
| 238 test_dst(r, dst, 0, blend_float_round); | |
| 239 test_dst(r, dst, 0, blend_perfect); | |
| 240 | |
| 241 // GOOD | |
| 242 test_dst(r, dst, 1, blend_255_round); | |
| 243 test_dst(r, dst, 1, blend_256_plus1_round); | |
| 244 test_dst(r, dst, 1, blend_256_round); | |
| 245 test_dst(r, dst, 1, blend_256_round_alt); | |
| 246 test_dst(r, dst, 1, blend_256_trunc); | |
| 247 test_dst(r, dst, 1, blend_double_trunc); | |
| 248 test_dst(r, dst, 1, blend_float_trunc); | |
| 249 | |
| 250 // BAD | |
| 251 test_dst(r, dst, 2, blend_255_trunc); | |
| 252 test_dst(r, dst, 2, blend_256_plus1_trunc); | |
| 253 } | |
| 254 } | |
| 255 // Overall summary: | |
| 256 // PERFECT | |
| 257 // blend_double_round | |
| 258 // blend_float_round | |
| 259 // blend_perfect | |
| 260 // GOOD ENOUGH | |
| 261 // blend_double_trunc | |
| 262 // blend_float_trunc | |
| 263 // blend_256_round | |
| 264 // blend_256_round_alt | |
| 265 // NOT GOOD ENOUGH | |
| 266 // all others | |
| 267 // | |
| 268 // Algorithms that make sense to use in Skia: blend_256_round, blend_256_round_
alt, blend_perfect | |
| 269 | |
| 270 DEF_TEST(Blend_premul_begets_premul, r) { | 71 DEF_TEST(Blend_premul_begets_premul, r) { |
| 271 // This test is quite slow, even if you have enough cores to run each mode i
n parallel. | 72 // This test is quite slow, even if you have enough cores to run each mode i
n parallel. |
| 272 if (!r->allowExtendedTest()) { | 73 if (!r->allowExtendedTest()) { |
| 273 return; | 74 return; |
| 274 } | 75 } |
| 275 | 76 |
| 276 // No matter what xfermode we use, premul inputs should create premul output
s. | 77 // No matter what xfermode we use, premul inputs should create premul output
s. |
| 277 auto test_mode = [&](int m) { | 78 auto test_mode = [&](int m) { |
| 278 SkXfermode::Mode mode = (SkXfermode::Mode)m; | 79 SkXfermode::Mode mode = (SkXfermode::Mode)m; |
| 279 if (mode == SkXfermode::kSrcOver_Mode) { | 80 if (mode == SkXfermode::kSrcOver_Mode) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 292 xfermode->xfer32(&dst, &src, 1, nullptr); // To keep it simple, no
AA. | 93 xfermode->xfer32(&dst, &src, 1, nullptr); // To keep it simple, no
AA. |
| 293 if (!SkPMColorValid(dst)) { | 94 if (!SkPMColorValid(dst)) { |
| 294 ERRORF(r, "%08x is not premul using %s", dst, SkXfermode::ModeNa
me(mode)); | 95 ERRORF(r, "%08x is not premul using %s", dst, SkXfermode::ModeNa
me(mode)); |
| 295 } | 96 } |
| 296 }}}} | 97 }}}} |
| 297 }; | 98 }; |
| 298 | 99 |
| 299 // Parallelism helps speed things up on my desktop from ~725s to ~50s. | 100 // Parallelism helps speed things up on my desktop from ~725s to ~50s. |
| 300 sk_parallel_for(SkXfermode::kLastMode, test_mode); | 101 sk_parallel_for(SkXfermode::kLastMode, test_mode); |
| 301 } | 102 } |
| OLD | NEW |