OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkRasterPipeline.h" | 8 #include "SkRasterPipeline.h" |
9 | 9 |
10 SkRasterPipeline::SkRasterPipeline() {} | 10 SkRasterPipeline::SkRasterPipeline() {} |
11 | 11 |
12 void SkRasterPipeline::append(SkRasterPipeline::Fn body_fn, const void* body_ctx
, | 12 void SkRasterPipeline::append(SkRasterPipeline::Fn body_fn, const void* body_ctx
, |
13 SkRasterPipeline::Fn tail_fn, const void* tail_ctx
) { | 13 SkRasterPipeline::Fn tail_fn, const void* tail_ctx
) { |
14 // Each stage holds its own context and the next function to call. | 14 // Each stage holds its own context and the next function to call. |
15 // So the pipeline itself has to hold onto the first function that starts th
e pipeline. | 15 // So the pipeline itself has to hold onto the first function that starts th
e pipeline. |
16 (fBody.empty() ? fBodyStart : fBody.back().fNext) = body_fn; | 16 (fBody.empty() ? fBodyStart : fBody.back().fNext) = body_fn; |
17 (fTail.empty() ? fTailStart : fTail.back().fNext) = tail_fn; | 17 (fTail.empty() ? fTailStart : fTail.back().fNext) = tail_fn; |
18 | 18 |
19 // Each last stage starts with its next function set to JustReturn as a safe
ty net. | 19 // Each last stage starts with its next function set to JustReturn as a safe
ty net. |
20 // It'll be overwritten by the next call to append(). | 20 // It'll be overwritten by the next call to append(). |
21 fBody.push_back({ &JustReturn, const_cast<void*>(body_ctx) }); | 21 fBody.push_back({ &JustReturn, const_cast<void*>(body_ctx) }); |
22 fTail.push_back({ &JustReturn, const_cast<void*>(tail_ctx) }); | 22 fTail.push_back({ &JustReturn, const_cast<void*>(tail_ctx) }); |
23 } | 23 } |
24 | 24 |
25 void SkRasterPipeline::run(size_t n) { | 25 void SkRasterPipeline::extend(const SkRasterPipeline& src) { |
| 26 SkASSERT(src.fBody.count() == src.fTail.count()); |
| 27 |
| 28 Fn body_fn = src.fBodyStart, |
| 29 tail_fn = src.fTailStart; |
| 30 for (int i = 0; i < src.fBody.count(); i++) { |
| 31 this->append(body_fn, src.fBody[i].fCtx, |
| 32 tail_fn, src.fTail[i].fCtx); |
| 33 body_fn = src.fBody[i].fNext; |
| 34 tail_fn = src.fTail[i].fNext; |
| 35 } |
| 36 } |
| 37 |
| 38 void SkRasterPipeline::run(size_t x, size_t n) { |
26 // It's fastest to start uninitialized if the compilers all let us. If not,
next fastest is 0. | 39 // It's fastest to start uninitialized if the compilers all let us. If not,
next fastest is 0. |
27 Sk4f v; | 40 Sk4f v; |
28 | 41 |
29 size_t x = 0; | |
30 while (n >= 4) { | 42 while (n >= 4) { |
31 fBodyStart(fBody.begin(), x, v,v,v,v, v,v,v,v); | 43 fBodyStart(fBody.begin(), x, v,v,v,v, v,v,v,v); |
32 x += 4; | 44 x += 4; |
33 n -= 4; | 45 n -= 4; |
34 } | 46 } |
35 while (n > 0) { | 47 while (n > 0) { |
36 fTailStart(fTail.begin(), x, v,v,v,v, v,v,v,v); | 48 fTailStart(fTail.begin(), x, v,v,v,v, v,v,v,v); |
37 x += 1; | 49 x += 1; |
38 n -= 1; | 50 n -= 1; |
39 } | 51 } |
40 } | 52 } |
41 | 53 |
42 void SK_VECTORCALL SkRasterPipeline::JustReturn(Stage*, size_t, Sk4f,Sk4f,Sk4f,S
k4f, | 54 void SK_VECTORCALL SkRasterPipeline::JustReturn(Stage*, size_t, Sk4f,Sk4f,Sk4f,S
k4f, |
43 Sk4f,Sk4f,Sk4f,S
k4f) {} | 55 Sk4f,Sk4f,Sk4f,S
k4f) {} |
OLD | NEW |