| Index: src/opts/Sk4px_NEON.h
|
| diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h
|
| index ede5f2cd8eaf72e9f3eb3b378f6d9f56f949ee00..368551949cbc466b023c76ad57e989c53893c92a 100644
|
| --- a/src/opts/Sk4px_NEON.h
|
| +++ b/src/opts/Sk4px_NEON.h
|
| @@ -48,3 +48,33 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
|
| return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec),
|
| vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
|
| }
|
| +
|
| +inline Sk4px Sk4px::alphas() const {
|
| + static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
|
| + auto as = vshrq_n_u32((uint32x4_t)this->fVec, 24); // ___3 ___2 ___1 ___0
|
| + as = vorrq_u32(as, vshlq_n_u32(as, 8)); // __33 __22 __11 __11
|
| + as = vorrq_u32(as, vshlq_n_u32(as, 16)); // 3333 2222 1111 1111
|
| + return Sk16b((uint8x16_t)as);
|
| +}
|
| +
|
| +inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
|
| + uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
|
| + a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
|
| + a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
|
| + a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0
|
| + a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0
|
| + auto a32 = (uint32x4_t)a8; //
|
| + a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // __33 __22 __11 __00
|
| + a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // 3333 2222 1111 0000
|
| + return Sk16b((uint8x16_t)a32);
|
| +}
|
| +
|
| +inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
|
| + uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
|
| + a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
|
| + a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
|
| + auto a32 = (uint32x4_t)a8; //
|
| + a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // ____ ____ __11 __00
|
| + a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // ____ ____ 1111 0000
|
| + return Sk16b((uint8x16_t)a32);
|
| +}
|
|
|