| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkHalf.h" | 8 #include "SkHalf.h" |
| 9 #include "SkPM4fPriv.h" | 9 #include "SkPM4fPriv.h" |
| 10 #include "SkUtils.h" | 10 #include "SkUtils.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 50 // | 50 // |
| 51 template <DstType D> Sk4f load_from_dst(uint64_t dst) { | 51 template <DstType D> Sk4f load_from_dst(uint64_t dst) { |
| 52 return (D == kU16_Dst) ? load_from_u16(dst) : SkHalfToFloat_01(dst); | 52 return (D == kU16_Dst) ? load_from_u16(dst) : SkHalfToFloat_01(dst); |
| 53 } | 53 } |
| 54 | 54 |
| 55 // Assumes x4 is already in the "natural" bias (either unit-float or 16bit int) | 55 // Assumes x4 is already in the "natural" bias (either unit-float or 16bit int) |
| 56 template <DstType D> uint64_t store_to_dst(const Sk4f& x4) { | 56 template <DstType D> uint64_t store_to_dst(const Sk4f& x4) { |
| 57 return (D == kU16_Dst) ? store_to_u16(x4) : SkFloatToHalf_01(x4); | 57 return (D == kU16_Dst) ? store_to_u16(x4) : SkFloatToHalf_01(x4); |
| 58 } | 58 } |
| 59 | 59 |
| 60 static inline Sk4f pm_to_rgba_order(const Sk4f& x) { |
| 61 if (SkPM4f::R == 0) { |
| 62 return x; // we're already RGBA |
| 63 } else { |
| 64 // we're BGRA, so swap R and B |
| 65 return SkNx_shuffle<2, 1, 0, 3>(x); |
| 66 } |
| 67 } |
| 68 |
| 60 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 69 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
| 61 | 70 |
| 62 template <DstType D> void src_1(const SkXfermode::U64State& state, uint64_t dst[
], | 71 template <DstType D> void src_1(const SkXfermode::U64State& state, uint64_t dst[
], |
| 63 const SkPM4f& src, int count, const SkAlpha aa[]
) { | 72 const SkPM4f& src, int count, const SkAlpha aa[]
) { |
| 64 const Sk4f s4 = unit_to_dst_bias<D>(Sk4f::Load(src.fVec)); | 73 const Sk4f s4 = pm_to_rgba_order(unit_to_dst_bias<D>(Sk4f::Load(src.fVec))); |
| 65 if (aa) { | 74 if (aa) { |
| 66 for (int i = 0; i < count; ++i) { | 75 for (int i = 0; i < count; ++i) { |
| 67 const Sk4f d4 = load_from_dst<D>(dst[i]); | 76 const Sk4f d4 = load_from_dst<D>(dst[i]); |
| 68 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 77 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
| 69 } | 78 } |
| 70 } else { | 79 } else { |
| 71 sk_memset64(dst, store_to_dst<D>(s4), count); | 80 sk_memset64(dst, store_to_dst<D>(s4), count); |
| 72 } | 81 } |
| 73 } | 82 } |
| 74 | 83 |
| 75 template <DstType D> void src_n(const SkXfermode::U64State& state, uint64_t dst[
], | 84 template <DstType D> void src_n(const SkXfermode::U64State& state, uint64_t dst[
], |
| 76 const SkPM4f src[], int count, const SkAlpha aa[
]) { | 85 const SkPM4f src[], int count, const SkAlpha aa[
]) { |
| 77 if (aa) { | 86 if (aa) { |
| 78 for (int i = 0; i < count; ++i) { | 87 for (int i = 0; i < count; ++i) { |
| 79 const Sk4f s4 = unit_to_dst_bias<D>(Sk4f::Load(src[i].fVec)); | 88 const Sk4f s4 = pm_to_rgba_order(unit_to_dst_bias<D>(Sk4f::Load(src[
i].fVec))); |
| 80 const Sk4f d4 = load_from_dst<D>(dst[i]); | 89 const Sk4f d4 = load_from_dst<D>(dst[i]); |
| 81 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 90 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
| 82 } | 91 } |
| 83 } else { | 92 } else { |
| 84 for (int i = 0; i < count; ++i) { | 93 for (int i = 0; i < count; ++i) { |
| 85 const Sk4f s4 = unit_to_dst_bias<D>(Sk4f::Load(src[i].fVec)); | 94 const Sk4f s4 = pm_to_rgba_order(unit_to_dst_bias<D>(Sk4f::Load(src[
i].fVec))); |
| 86 dst[i] = store_to_dst<D>(s4); | 95 dst[i] = store_to_dst<D>(s4); |
| 87 } | 96 } |
| 88 } | 97 } |
| 89 } | 98 } |
| 90 | 99 |
| 91 const U64ProcPair gU64Procs_Src[] = { | 100 const U64ProcPair gU64Procs_Src[] = { |
| 92 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 alpha | 101 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 alpha |
| 93 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 opaque | 102 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 opaque |
| 94 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 alpha | 103 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 alpha |
| 95 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 opaque | 104 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 opaque |
| 96 }; | 105 }; |
| 97 | 106 |
| 98 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 107 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
| 99 | 108 |
| 100 template <DstType D> void srcover_1(const SkXfermode::U64State& state, uint64_t
dst[], | 109 template <DstType D> void srcover_1(const SkXfermode::U64State& state, uint64_t
dst[], |
| 101 const SkPM4f& src, int count, const SkAlpha
aa[]) { | 110 const SkPM4f& src, int count, const SkAlpha
aa[]) { |
| 102 const Sk4f s4 = Sk4f::Load(src.fVec); | 111 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src.fVec)); |
| 103 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 112 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
| 104 const Sk4f s4bias = unit_to_dst_bias<D>(s4); | 113 const Sk4f s4bias = unit_to_dst_bias<D>(s4); |
| 105 for (int i = 0; i < count; ++i) { | 114 for (int i = 0; i < count; ++i) { |
| 106 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 115 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
| 107 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 116 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
| 108 if (aa) { | 117 if (aa) { |
| 109 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 118 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
| 110 } else { | 119 } else { |
| 111 dst[i] = store_to_dst<D>(r4bias); | 120 dst[i] = store_to_dst<D>(r4bias); |
| 112 } | 121 } |
| 113 } | 122 } |
| 114 } | 123 } |
| 115 | 124 |
| 116 template <DstType D> void srcover_n(const SkXfermode::U64State& state, uint64_t
dst[], | 125 template <DstType D> void srcover_n(const SkXfermode::U64State& state, uint64_t
dst[], |
| 117 const SkPM4f src[], int count, const SkAlpha
aa[]) { | 126 const SkPM4f src[], int count, const SkAlpha
aa[]) { |
| 118 for (int i = 0; i < count; ++i) { | 127 for (int i = 0; i < count; ++i) { |
| 119 const Sk4f s4 = Sk4f::Load(src[i].fVec); | 128 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src[i].fVec)); |
| 120 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 129 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
| 121 const Sk4f s4bias = unit_to_dst_bias<D>(s4); | 130 const Sk4f s4bias = unit_to_dst_bias<D>(s4); |
| 122 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 131 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
| 123 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 132 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
| 124 if (aa) { | 133 if (aa) { |
| 125 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 134 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
| 126 } else { | 135 } else { |
| 127 dst[i] = store_to_dst<D>(r4bias); | 136 dst[i] = store_to_dst<D>(r4bias); |
| 128 } | 137 } |
| 129 } | 138 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 151 return { nullptr, nullptr }; | 160 return { nullptr, nullptr }; |
| 152 } | 161 } |
| 153 | 162 |
| 154 SkXfermode::U64Proc1 SkXfermode::GetU64Proc1(Mode mode, uint32_t flags) { | 163 SkXfermode::U64Proc1 SkXfermode::GetU64Proc1(Mode mode, uint32_t flags) { |
| 155 return find_procs(mode, flags).fP1; | 164 return find_procs(mode, flags).fP1; |
| 156 } | 165 } |
| 157 | 166 |
| 158 SkXfermode::U64ProcN SkXfermode::GetU64ProcN(Mode mode, uint32_t flags) { | 167 SkXfermode::U64ProcN SkXfermode::GetU64ProcN(Mode mode, uint32_t flags) { |
| 159 return find_procs(mode, flags).fPN; | 168 return find_procs(mode, flags).fPN; |
| 160 } | 169 } |
| OLD | NEW |