Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(16)

Side by Side Diff: src/opts/Sk4px_NEON.h

Issue 1203513002: Use vmulq_n_u32(..., 0x01010101) to distribute alphas. (Closed) Base URL: https://skia.googlesource.com/skia@master
Patch Set: rebase Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 namespace { // See Sk4px.h 8 namespace { // See Sk4px.h
9 9
10 inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b((uint8x16_t)vdupq_n_ u32(px)); } 10 inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b((uint8x16_t)vdupq_n_ u32(px)); }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec))); 45 vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec)));
46 } 46 }
47 47
48 inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { 48 inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
49 const Sk4px::Wide o(other); // Should be no code, but allows us to access f Lo, fHi. 49 const Sk4px::Wide o(other); // Should be no code, but allows us to access f Lo, fHi.
50 return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec), 50 return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec),
51 vaddhn_u16(this->fHi.fVec, o.fHi.fVec))); 51 vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
52 } 52 }
53 53
54 inline Sk4px Sk4px::alphas() const { 54 inline Sk4px Sk4px::alphas() const {
55 static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian."); 55 auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 __ _0
56 auto as = vshrq_n_u32((uint32x4_t)this->fVec, 24); // ___3 ___2 ___1 ___0 56 return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 00 00
57 as = vorrq_u32(as, vshlq_n_u32(as, 8)); // __33 __22 __11 __11
58 as = vorrq_u32(as, vshlq_n_u32(as, 16)); // 3333 2222 1111 1111
59 return Sk16b((uint8x16_t)as);
60 } 57 }
61 58
62 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { 59 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
63 uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ 60 uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ _ ___
64 a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 61 a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ _ __0
65 a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 62 a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 _ __0
66 a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0 63 a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 _ __0
67 a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0 64 a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 _ __0
68 auto a32 = (uint32x4_t)a8; // 65 auto a32 = (uint32x4_t)a8; //
69 a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // __33 __22 __11 __00 66 return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // 3333 2222 1111 0 000
70 a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // 3333 2222 1111 0000
71 return Sk16b((uint8x16_t)a32);
72 } 67 }
73 68
74 inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { 69 inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
75 uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ 70 uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ _ ___
76 a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 71 a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ _ __0
77 a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 72 a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 _ __0
78 auto a32 = (uint32x4_t)a8; // 73 auto a32 = (uint32x4_t)a8; //
79 a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // ____ ____ __11 __00 74 return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // ____ ____ 1111 0 000
80 a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // ____ ____ 1111 0000
81 return Sk16b((uint8x16_t)a32);
82 } 75 }
83 76
84 inline Sk4px Sk4px::zeroColors() const { 77 inline Sk4px Sk4px::zeroColors() const {
85 return Sk16b(vandq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHI FT))); 78 return Sk16b(vandq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHI FT)));
86 } 79 }
87 80
88 inline Sk4px Sk4px::zeroAlphas() const { 81 inline Sk4px Sk4px::zeroAlphas() const {
89 // vbic(a,b) == a & ~b 82 // vbic(a,b) == a & ~b
90 return Sk16b(vbicq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHI FT))); 83 return Sk16b(vbicq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHI FT)));
91 } 84 }
92 85
93 } // namespace 86 } // namespace
94 87
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698