OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "Test.h" | 8 #include "Test.h" |
9 #include "SkRasterPipeline.h" | 9 #include "SkRasterPipeline.h" |
10 | 10 |
11 // load needs two variants, one to load 4 values... | 11 // load needs two variants, one to load 4 values... |
12 static void SK_VECTORCALL load(SkRasterPipeline::Stage* st, size_t x, | 12 SK_RASTER_STAGE(load) { |
13 Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, | 13 auto ptr = (const float*)ctx + x; |
14 Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { | 14 r = Sk4f{ptr[0]}; |
15 auto ptr = st->ctx<const float*>(); | 15 g = Sk4f{ptr[1]}; |
16 v0 = Sk4f{ptr[x+0]}; | 16 b = Sk4f{ptr[2]}; |
17 v1 = Sk4f{ptr[x+1]}; | 17 a = Sk4f{ptr[3]}; |
18 v2 = Sk4f{ptr[x+2]}; | |
19 v3 = Sk4f{ptr[x+3]}; | |
20 | |
21 st->next(x, v0,v1,v2,v3, v4,v5,v6,v7); | |
22 } | 18 } |
23 | 19 |
24 // ...and one to load a single value. | 20 // ...and one to load a single value. |
25 static void SK_VECTORCALL load_tail(SkRasterPipeline::Stage* st, size_t x, | 21 SK_RASTER_STAGE(load_tail) { |
26 Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, | 22 auto ptr = (const float*)ctx + x; |
27 Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { | 23 r = Sk4f{*ptr}; |
28 auto ptr = st->ctx<const float*>(); | |
29 v0 = Sk4f{ptr[x]}; | |
30 | |
31 st->next(x, v0,v1,v2,v3, v4,v5,v6,v7); | |
32 } | 24 } |
33 | 25 |
34 // square doesn't really care how many of its inputs are active, nor does it nee
d a context. | 26 // square doesn't really care how many of its inputs are active, nor does it nee
d a context. |
35 static void SK_VECTORCALL square(SkRasterPipeline::Stage* st, size_t x, | 27 SK_RASTER_STAGE(square) { |
36 Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, | 28 r *= r; |
37 Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { | 29 g *= g; |
38 v0 *= v0; | 30 b *= b; |
39 v1 *= v1; | 31 a *= a; |
40 v2 *= v2; | |
41 v3 *= v3; | |
42 st->next(x, v0,v1,v2,v3, v4,v5,v6,v7); | |
43 } | 32 } |
44 | 33 |
45 // Like load, store has a _tail variant. It ends the pipeline by returning. | 34 // Like load, store has a _tail variant. |
46 static void SK_VECTORCALL store(SkRasterPipeline::Stage* st, size_t x, | 35 SK_RASTER_STAGE(store) { |
47 Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, | 36 auto ptr = (float*)ctx + x; |
48 Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { | 37 ptr[0] = r[0]; |
49 auto ptr = st->ctx<float*>(); | 38 ptr[1] = g[0]; |
50 ptr[x+0] = v0[0]; | 39 ptr[2] = b[0]; |
51 ptr[x+1] = v1[0]; | 40 ptr[3] = a[0]; |
52 ptr[x+2] = v2[0]; | |
53 ptr[x+3] = v3[0]; | |
54 } | 41 } |
55 | 42 |
56 static void SK_VECTORCALL store_tail(SkRasterPipeline::Stage* st, size_t x, | 43 SK_RASTER_STAGE(store_tail) { |
57 Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, | 44 auto ptr = (float*)ctx + x; |
58 Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { | 45 *ptr = r[0]; |
59 auto ptr = st->ctx<float*>(); | |
60 ptr[x+0] = v0[0]; | |
61 } | 46 } |
62 | 47 |
63 DEF_TEST(SkRasterPipeline, r) { | 48 DEF_TEST(SkRasterPipeline, r) { |
64 // We'll build up and run a simple pipeline that exercises the salient | 49 // We'll build up and run a simple pipeline that exercises the salient |
65 // mechanics of SkRasterPipeline: | 50 // mechanics of SkRasterPipeline: |
66 // - context pointers | 51 // - context pointers |
67 // - stages sensitive to the number of pixels | 52 // - stages sensitive to the number of pixels |
68 // - stages insensitive to the number of pixels | 53 // - stages insensitive to the number of pixels |
69 // | 54 // |
70 // This pipeline loads up some values, squares them, then writes them back t
o memory. | 55 // This pipeline loads up some values, squares them, then writes them back t
o memory. |
71 | 56 |
72 const float src_vals[] = { 1,2,3,4,5 }; | 57 const float src_vals[] = { 1,2,3,4,5 }; |
73 float dst_vals[] = { 0,0,0,0,0 }; | 58 float dst_vals[] = { 0,0,0,0,0 }; |
74 | 59 |
75 SkRasterPipeline p; | 60 SkRasterPipeline p; |
76 p.append(load, load_tail, src_vals); | 61 p.append<load, load_tail>(src_vals); |
77 p.append(square); | 62 p.append<square>(); |
78 p.append(store, store_tail, dst_vals); | 63 p.append<store, store_tail>(dst_vals); |
79 | 64 |
80 p.run(5); | 65 p.run(5); |
81 | 66 |
82 REPORTER_ASSERT(r, dst_vals[0] == 1); | 67 REPORTER_ASSERT(r, dst_vals[0] == 1); |
83 REPORTER_ASSERT(r, dst_vals[1] == 4); | 68 REPORTER_ASSERT(r, dst_vals[1] == 4); |
84 REPORTER_ASSERT(r, dst_vals[2] == 9); | 69 REPORTER_ASSERT(r, dst_vals[2] == 9); |
85 REPORTER_ASSERT(r, dst_vals[3] == 16); | 70 REPORTER_ASSERT(r, dst_vals[3] == 16); |
86 REPORTER_ASSERT(r, dst_vals[4] == 25); | 71 REPORTER_ASSERT(r, dst_vals[4] == 25); |
87 } | 72 } |
88 | 73 |
89 DEF_TEST(SkRasterPipeline_empty, r) { | 74 DEF_TEST(SkRasterPipeline_empty, r) { |
90 // No asserts... just a test that this is safe to run. | 75 // No asserts... just a test that this is safe to run. |
91 SkRasterPipeline p; | 76 SkRasterPipeline p; |
92 p.run(20); | 77 p.run(20); |
93 } | 78 } |
94 | 79 |
95 DEF_TEST(SkRasterPipeline_nonsense, r) { | 80 DEF_TEST(SkRasterPipeline_nonsense, r) { |
96 // No asserts... just a test that this is safe to run and terminates. | 81 // No asserts... just a test that this is safe to run and terminates. |
97 // square() always calls st->next(); this makes sure we've always got someth
ing there to call. | 82 // square() always calls st->next(); this makes sure we've always got someth
ing there to call. |
98 SkRasterPipeline p; | 83 SkRasterPipeline p; |
99 p.append(square); | 84 p.append<square>(); |
100 p.run(20); | 85 p.run(20); |
101 } | 86 } |
OLD | NEW |