OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "SkBlitter.h" | |
9 #include "SkColor.h" | |
10 #include "SkColorFilter.h" | |
11 #include "SkPM4f.h" | |
12 #include "SkPM4fPriv.h" | |
13 #include "SkRasterPipeline.h" | |
14 #include "SkShader.h" | |
15 #include "SkSRGB.h" | |
16 #include "SkXfermode.h" | |
17 | |
18 | |
19 class SkRasterPipelineBlitter : public SkBlitter { | |
20 public: | |
21 static std::unique_ptr<SkBlitter> Create(const SkPixmap&, const SkPaint&); | |
22 | |
23 void blitH (int x, int y, int w) override; | |
24 void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override; | |
25 void blitMask (const SkMask&, const SkIRect& clip) override; | |
26 | |
27 // TODO: The default implementations of the other blits look fine, | |
28 // but some of them like blitV could probably benefit from custom | |
29 // blits using something like a SkRasterPipeline::runFew() method. | |
30 | |
31 private: | |
32 SkRasterPipelineBlitter(SkPixmap dst, | |
33 SkRasterPipeline shader, | |
34 SkRasterPipeline colorFilter, | |
35 SkRasterPipeline xfermode, | |
36 SkPM4f paintColor) | |
37 : fDst(dst) | |
38 , fShader(shader) | |
39 , fColorFilter(colorFilter) | |
40 , fXfermode(xfermode) | |
41 , fPaintColor(paintColor) | |
42 {} | |
43 | |
44 SkPixmap fDst; | |
45 SkRasterPipeline fShader, fColorFilter, fXfermode; | |
46 SkPM4f fPaintColor; | |
47 | |
48 typedef SkBlitter INHERITED; | |
49 }; | |
50 | |
51 std::unique_ptr<SkBlitter> SkCreateRasterPipelineBlitter(const SkPixmap& dst, | |
52 const SkPaint& paint) { | |
53 return SkRasterPipelineBlitter::Create(dst, paint); | |
54 } | |
55 | |
56 | |
57 // The default shader produces a constant color (from the SkPaint). | |
58 static void SK_VECTORCALL constant_color(SkRasterPipeline::Stage* st, size_t x, | |
59 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
60 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
61 auto color = st->ctx<const SkPM4f*>(); | |
62 r = color->r(); | |
63 g = color->g(); | |
64 b = color->b(); | |
65 a = color->a(); | |
66 st->next(x, r,g,b,a, dr,dg,db,da); | |
67 } | |
68 | |
69 // The default transfer mode is srcover, s' = s + d*(1-sa). | |
70 static void SK_VECTORCALL srcover(SkRasterPipeline::Stage* st, size_t x, | |
71 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
72 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
73 auto A = 1.0f - a; | |
74 r += dr*A; | |
75 g += dg*A; | |
76 b += db*A; | |
77 a += da*A; | |
78 st->next(x, r,g,b,a, dr,dg,db,da); | |
79 } | |
80 | |
81 static Sk4f lerp(const Sk4f& from, const Sk4f& to, const Sk4f& cov) { | |
82 return from + (to-from)*cov; | |
83 } | |
84 | |
85 // s' = d(1-c) + sc, for a constant c. | |
86 static void SK_VECTORCALL lerp_constant_float(SkRasterPipeline::Stage* st, size_
t x, | |
87 Sk4f r, Sk4f g, Sk4f b, Sk4f a
, | |
88 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da
) { | |
89 Sk4f c = *st->ctx<const float*>(); | |
90 | |
91 r = lerp(dr, r, c); | |
92 g = lerp(dg, g, c); | |
93 b = lerp(db, b, c); | |
94 a = lerp(da, a, c); | |
95 st->next(x, r,g,b,a, dr,dg,db,da); | |
96 } | |
97 | |
98 // s' = d(1-c) + sc, 4 pixels at a time for 8-bit coverage. | |
99 static void SK_VECTORCALL lerp_a8(SkRasterPipeline::Stage* st, size_t x, | |
100 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
101 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
102 auto ptr = st->ctx<const uint8_t*>() + x; | |
103 Sk4f c = SkNx_cast<float>(Sk4b::Load(ptr)) * (1/255.0f); | |
104 | |
105 r = lerp(dr, r, c); | |
106 g = lerp(dg, g, c); | |
107 b = lerp(db, b, c); | |
108 a = lerp(da, a, c); | |
109 st->next(x, r,g,b,a, dr,dg,db,da); | |
110 } | |
111 | |
112 // Tail variant of lerp_a8() handling 1 pixel at a time. | |
113 static void SK_VECTORCALL lerp_a8_1(SkRasterPipeline::Stage* st, size_t x, | |
114 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
115 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
116 auto ptr = st->ctx<const uint8_t*>() + x; | |
117 Sk4f c = *ptr * (1/255.0f); | |
118 | |
119 r = lerp(dr, r, c); | |
120 g = lerp(dg, g, c); | |
121 b = lerp(db, b, c); | |
122 a = lerp(da, a, c); | |
123 st->next(x, r,g,b,a, dr,dg,db,da); | |
124 } | |
125 | |
126 static void upscale_lcd16(const Sk4h& lcd16, Sk4f* r, Sk4f* g, Sk4f* b) { | |
127 Sk4i _32_bit = SkNx_cast<int>(lcd16); | |
128 | |
129 *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_
IN_PLACE); | |
130 *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_
IN_PLACE); | |
131 *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_
IN_PLACE); | |
132 } | |
133 | |
134 // s' = d(1-c) + sc, 4 pixels at a time for 565 coverage. | |
135 static void SK_VECTORCALL lerp_lcd16(SkRasterPipeline::Stage* st, size_t x, | |
136 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
137 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
138 auto ptr = st->ctx<const uint16_t*>() + x; | |
139 Sk4f cr, cg, cb; | |
140 upscale_lcd16(Sk4h::Load(ptr), &cr, &cg, &cb); | |
141 | |
142 r = lerp(dr, r, cr); | |
143 g = lerp(dg, g, cg); | |
144 b = lerp(db, b, cb); | |
145 a = 1.0f; | |
146 st->next(x, r,g,b,a, dr,dg,db,da); | |
147 } | |
148 | |
149 // Tail variant of lerp_lcd16() handling 1 pixel at a time. | |
150 static void SK_VECTORCALL lerp_lcd16_1(SkRasterPipeline::Stage* st, size_t x, | |
151 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
152 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
153 auto ptr = st->ctx<const uint16_t*>() + x; | |
154 Sk4f cr, cg, cb; | |
155 upscale_lcd16({*ptr,0,0,0}, &cr, &cg, &cb); | |
156 | |
157 r = lerp(dr, r, cr); | |
158 g = lerp(dg, g, cg); | |
159 b = lerp(db, b, cb); | |
160 a = 1.0f; | |
161 st->next(x, r,g,b,a, dr,dg,db,da); | |
162 } | |
163 | |
164 // Load 4 8-bit sRGB pixels from SkPMColor order to RGBA. | |
165 static void SK_VECTORCALL load_d_srgb(SkRasterPipeline::Stage* st, size_t x, | |
166 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
167 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
168 auto ptr = st->ctx<const uint32_t*>() + x; | |
169 | |
170 dr = { sk_linear_from_srgb[(ptr[0] >> SK_R32_SHIFT) & 0xff], | |
171 sk_linear_from_srgb[(ptr[1] >> SK_R32_SHIFT) & 0xff], | |
172 sk_linear_from_srgb[(ptr[2] >> SK_R32_SHIFT) & 0xff], | |
173 sk_linear_from_srgb[(ptr[3] >> SK_R32_SHIFT) & 0xff] }; | |
174 | |
175 dg = { sk_linear_from_srgb[(ptr[0] >> SK_G32_SHIFT) & 0xff], | |
176 sk_linear_from_srgb[(ptr[1] >> SK_G32_SHIFT) & 0xff], | |
177 sk_linear_from_srgb[(ptr[2] >> SK_G32_SHIFT) & 0xff], | |
178 sk_linear_from_srgb[(ptr[3] >> SK_G32_SHIFT) & 0xff] }; | |
179 | |
180 db = { sk_linear_from_srgb[(ptr[0] >> SK_B32_SHIFT) & 0xff], | |
181 sk_linear_from_srgb[(ptr[1] >> SK_B32_SHIFT) & 0xff], | |
182 sk_linear_from_srgb[(ptr[2] >> SK_B32_SHIFT) & 0xff], | |
183 sk_linear_from_srgb[(ptr[3] >> SK_B32_SHIFT) & 0xff] }; | |
184 | |
185 // TODO: this >> doesn't really need mask if we make it logical instead of a
rithmetic. | |
186 da = SkNx_cast<float>((Sk4i::Load(ptr) >> SK_A32_SHIFT) & 0xff) * (1/255.0f)
; | |
187 | |
188 st->next(x, r,g,b,a, dr,dg,db,da); | |
189 } | |
190 | |
191 // Tail variant of load_d_srgb() handling 1 pixel at a time. | |
192 static void SK_VECTORCALL load_d_srgb_1(SkRasterPipeline::Stage* st, size_t x, | |
193 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
194 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
195 auto ptr = st->ctx<const uint32_t*>() + x; | |
196 | |
197 dr = { sk_linear_from_srgb[(*ptr >> SK_R32_SHIFT) & 0xff], 0,0,0 }; | |
198 dg = { sk_linear_from_srgb[(*ptr >> SK_G32_SHIFT) & 0xff], 0,0,0 }; | |
199 db = { sk_linear_from_srgb[(*ptr >> SK_B32_SHIFT) & 0xff], 0,0,0 }; | |
200 da = { (1/255.0f) * (*ptr >> SK_A32_SHIFT) , 0,0,0 }; | |
201 | |
202 st->next(x, r,g,b,a, dr,dg,db,da); | |
203 } | |
204 | |
205 // Write out 4 pixels as 8-bit SkPMColor-order sRGB. | |
206 static void SK_VECTORCALL store_srgb(SkRasterPipeline::Stage* st, size_t x, | |
207 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
208 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
209 auto dst = st->ctx<uint32_t*>() + x; | |
210 ( sk_linear_to_srgb(r) << SK_R32_SHIFT | |
211 | sk_linear_to_srgb(g) << SK_G32_SHIFT | |
212 | sk_linear_to_srgb(b) << SK_B32_SHIFT | |
213 | Sk4f_round(255.0f*a) << SK_A32_SHIFT).store(dst); | |
214 } | |
215 | |
216 // Tail variant of store_srgb() handling 1 pixel at a time. | |
217 static void SK_VECTORCALL store_srgb_1(SkRasterPipeline::Stage* st, size_t x, | |
218 Sk4f r, Sk4f g, Sk4f b, Sk4f a, | |
219 Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { | |
220 auto dst = st->ctx<uint32_t*>() + x; | |
221 *dst = Sk4f_toS32(swizzle_rb_if_bgra({ r[0], g[0], b[0], a[0] })); | |
222 } | |
223 | |
224 | |
225 template <typename Effect> | |
226 static bool append_effect_stages(const Effect* effect, SkRasterPipeline* pipelin
e) { | |
227 return !effect || effect->appendStages(pipeline); | |
228 } | |
229 | |
230 | |
231 std::unique_ptr<SkBlitter> SkRasterPipelineBlitter::Create(const SkPixmap& dst, | |
232 const SkPaint& paint)
{ | |
233 if (!dst.info().gammaCloseToSRGB()) { | |
234 return nullptr; // TODO: f16, etc. | |
235 } | |
236 if (paint.getShader()) { | |
237 return nullptr; // TODO: need to work out how shaders and their context
s work | |
238 } | |
239 | |
240 SkRasterPipeline shader, colorFilter, xfermode; | |
241 if (!append_effect_stages(paint.getColorFilter(), &colorFilter) || | |
242 !append_effect_stages(paint.getXfermode(), &xfermode )) { | |
243 return nullptr; | |
244 } | |
245 | |
246 std::unique_ptr<SkRasterPipelineBlitter> blitter(new SkRasterPipelineBlitter
{ | |
247 dst, | |
248 shader, colorFilter, xfermode, | |
249 SkColor4f::FromColor(paint.getColor()).premul(), | |
250 }); | |
251 | |
252 if (!paint.getShader()) { | |
253 blitter->fShader.append(constant_color, &blitter->fPaintColor); | |
254 } | |
255 if (!paint.getXfermode()) { | |
256 blitter->fXfermode.append(srcover); | |
257 } | |
258 | |
259 return std::move(blitter); | |
260 } | |
261 | |
262 void SkRasterPipelineBlitter::blitH(int x, int y, int w) { | |
263 auto dst = fDst.writable_addr(0,y); | |
264 | |
265 SkRasterPipeline p; | |
266 p.extend(fShader); | |
267 p.extend(fColorFilter); | |
268 p.append(load_d_srgb, load_d_srgb_1, dst); | |
269 p.extend(fXfermode); | |
270 p.append(store_srgb, store_srgb_1, dst); | |
271 | |
272 p.run(x, w); | |
273 } | |
274 | |
275 void SkRasterPipelineBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const
int16_t runs[]) { | |
276 auto dst = fDst.writable_addr(0,y); | |
277 float coverage; | |
278 | |
279 SkRasterPipeline p; | |
280 p.extend(fShader); | |
281 p.extend(fColorFilter); | |
282 p.append(load_d_srgb, load_d_srgb_1, dst); | |
283 p.extend(fXfermode); | |
284 p.append(lerp_constant_float, &coverage); | |
285 p.append(store_srgb, store_srgb_1, dst); | |
286 | |
287 for (int16_t run = *runs; run > 0; run = *runs) { | |
288 coverage = *aa * (1/255.0f); | |
289 p.run(x, run); | |
290 | |
291 x += run; | |
292 runs += run; | |
293 aa += run; | |
294 } | |
295 } | |
296 | |
297 void SkRasterPipelineBlitter::blitMask(const SkMask& mask, const SkIRect& clip)
{ | |
298 if (mask.fFormat == SkMask::kBW_Format) { | |
299 // TODO: native BW masks? | |
300 return INHERITED::blitMask(mask, clip); | |
301 } | |
302 | |
303 int x = clip.left(); | |
304 for (int y = clip.top(); y < clip.bottom(); y++) { | |
305 auto dst = fDst.writable_addr(0,y); | |
306 | |
307 SkRasterPipeline p; | |
308 p.extend(fShader); | |
309 p.extend(fColorFilter); | |
310 p.append(load_d_srgb, load_d_srgb_1, dst); | |
311 p.extend(fXfermode); | |
312 switch (mask.fFormat) { | |
313 case SkMask::kA8_Format: | |
314 p.append(lerp_a8, lerp_a8_1, mask.getAddr8(x,y)-x); | |
315 break; | |
316 case SkMask::kLCD16_Format: | |
317 p.append(lerp_lcd16, lerp_lcd16_1, mask.getAddrLCD16(x,y)-x); | |
318 break; | |
319 default: break; | |
320 } | |
321 p.append(store_srgb, store_srgb_1, dst); | |
322 | |
323 p.run(x, clip.width()); | |
324 } | |
325 } | |
OLD | NEW |