| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2015 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #ifndef SkPx_neon_DEFINED | |
| 9 #define SkPx_neon_DEFINED | |
| 10 | |
| 11 // When we have NEON, we like to work 8 pixels at a time. | |
| 12 // This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t, | |
| 13 // Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane. | |
| 14 | |
| 15 namespace neon { | |
| 16 | |
| 17 struct SkPx { | |
| 18 static const int N = 8; | |
| 19 | |
| 20 uint8x8x4_t fVec; | |
| 21 SkPx(uint8x8x4_t vec) : fVec(vec) {} | |
| 22 | |
| 23 static SkPx Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px); } | |
| 24 static SkPx Load(const uint32_t* px) { return vld4_u8((const uint8_t*)px); } | |
| 25 static SkPx Load(const uint32_t* px, int n) { | |
| 26 SkASSERT(0 < n && n < 8); | |
| 27 uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all
lanes with pixel 0. | |
| 28 switch (n) { | |
| 29 case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall thr
ough | |
| 30 case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall thr
ough | |
| 31 case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall thr
ough | |
| 32 case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall thr
ough | |
| 33 case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall thr
ough | |
| 34 case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1); | |
| 35 } | |
| 36 return v; | |
| 37 } | |
| 38 | |
| 39 void store(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); } | |
| 40 void store(uint32_t* px, int n) const { | |
| 41 SkASSERT(0 < n && n < 8); | |
| 42 switch (n) { | |
| 43 case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6); | |
| 44 case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5); | |
| 45 case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4); | |
| 46 case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3); | |
| 47 case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2); | |
| 48 case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1); | |
| 49 case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0); | |
| 50 } | |
| 51 } | |
| 52 | |
| 53 struct Alpha { | |
| 54 uint8x8_t fA; | |
| 55 Alpha(uint8x8_t a) : fA(a) {} | |
| 56 | |
| 57 static Alpha Dup(uint8_t a) { return vdup_n_u8(a); } | |
| 58 static Alpha Load(const uint8_t* a) { return vld1_u8(a); } | |
| 59 static Alpha Load(const uint8_t* a, int n) { | |
| 60 SkASSERT(0 < n && n < 8); | |
| 61 uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alph
a 0. | |
| 62 switch (n) { | |
| 63 case 7: v = vld1_lane_u8(a+6, v, 6); // fall through | |
| 64 case 6: v = vld1_lane_u8(a+5, v, 5); // fall through | |
| 65 case 5: v = vld1_lane_u8(a+4, v, 4); // fall through | |
| 66 case 4: v = vld1_lane_u8(a+3, v, 3); // fall through | |
| 67 case 3: v = vld1_lane_u8(a+2, v, 2); // fall through | |
| 68 case 2: v = vld1_lane_u8(a+1, v, 1); | |
| 69 } | |
| 70 return v; | |
| 71 } | |
| 72 Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); } | |
| 73 }; | |
| 74 | |
| 75 struct Wide { | |
| 76 uint16x8x4_t fVec; | |
| 77 Wide(uint16x8x4_t vec) : fVec(vec) {} | |
| 78 | |
| 79 Wide operator+(const Wide& o) const { | |
| 80 return (uint16x8x4_t) {{ | |
| 81 vaddq_u16(fVec.val[0], o.fVec.val[0]), | |
| 82 vaddq_u16(fVec.val[1], o.fVec.val[1]), | |
| 83 vaddq_u16(fVec.val[2], o.fVec.val[2]), | |
| 84 vaddq_u16(fVec.val[3], o.fVec.val[3]), | |
| 85 }}; | |
| 86 } | |
| 87 Wide operator-(const Wide& o) const { | |
| 88 return (uint16x8x4_t) {{ | |
| 89 vsubq_u16(fVec.val[0], o.fVec.val[0]), | |
| 90 vsubq_u16(fVec.val[1], o.fVec.val[1]), | |
| 91 vsubq_u16(fVec.val[2], o.fVec.val[2]), | |
| 92 vsubq_u16(fVec.val[3], o.fVec.val[3]), | |
| 93 }}; | |
| 94 } | |
| 95 | |
| 96 template <int bits> Wide shl() const { | |
| 97 return (uint16x8x4_t) {{ | |
| 98 vshlq_n_u16(fVec.val[0], bits), | |
| 99 vshlq_n_u16(fVec.val[1], bits), | |
| 100 vshlq_n_u16(fVec.val[2], bits), | |
| 101 vshlq_n_u16(fVec.val[3], bits), | |
| 102 }}; | |
| 103 } | |
| 104 template <int bits> Wide shr() const { | |
| 105 return (uint16x8x4_t) {{ | |
| 106 vshrq_n_u16(fVec.val[0], bits), | |
| 107 vshrq_n_u16(fVec.val[1], bits), | |
| 108 vshrq_n_u16(fVec.val[2], bits), | |
| 109 vshrq_n_u16(fVec.val[3], bits), | |
| 110 }}; | |
| 111 } | |
| 112 | |
| 113 SkPx addNarrowHi(const SkPx& o) const { | |
| 114 return (uint8x8x4_t) {{ | |
| 115 vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8), | |
| 116 vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8), | |
| 117 vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8), | |
| 118 vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8), | |
| 119 }}; | |
| 120 } | |
| 121 }; | |
| 122 | |
| 123 Alpha alpha() const { return fVec.val[3]; } | |
| 124 | |
| 125 Wide widenLo() const { | |
| 126 return (uint16x8x4_t) {{ | |
| 127 vmovl_u8(fVec.val[0]), | |
| 128 vmovl_u8(fVec.val[1]), | |
| 129 vmovl_u8(fVec.val[2]), | |
| 130 vmovl_u8(fVec.val[3]), | |
| 131 }}; | |
| 132 } | |
| 133 // TODO: these two can probably be done faster. | |
| 134 Wide widenHi() const { return this->widenLo().shl<8>(); } | |
| 135 Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } | |
| 136 | |
| 137 SkPx operator+(const SkPx& o) const { | |
| 138 return (uint8x8x4_t) {{ | |
| 139 vadd_u8(fVec.val[0], o.fVec.val[0]), | |
| 140 vadd_u8(fVec.val[1], o.fVec.val[1]), | |
| 141 vadd_u8(fVec.val[2], o.fVec.val[2]), | |
| 142 vadd_u8(fVec.val[3], o.fVec.val[3]), | |
| 143 }}; | |
| 144 } | |
| 145 SkPx operator-(const SkPx& o) const { | |
| 146 return (uint8x8x4_t) {{ | |
| 147 vsub_u8(fVec.val[0], o.fVec.val[0]), | |
| 148 vsub_u8(fVec.val[1], o.fVec.val[1]), | |
| 149 vsub_u8(fVec.val[2], o.fVec.val[2]), | |
| 150 vsub_u8(fVec.val[3], o.fVec.val[3]), | |
| 151 }}; | |
| 152 } | |
| 153 SkPx saturatedAdd(const SkPx& o) const { | |
| 154 return (uint8x8x4_t) {{ | |
| 155 vqadd_u8(fVec.val[0], o.fVec.val[0]), | |
| 156 vqadd_u8(fVec.val[1], o.fVec.val[1]), | |
| 157 vqadd_u8(fVec.val[2], o.fVec.val[2]), | |
| 158 vqadd_u8(fVec.val[3], o.fVec.val[3]), | |
| 159 }}; | |
| 160 } | |
| 161 | |
| 162 Wide operator*(const Alpha& a) const { | |
| 163 return (uint16x8x4_t) {{ | |
| 164 vmull_u8(fVec.val[0], a.fA), | |
| 165 vmull_u8(fVec.val[1], a.fA), | |
| 166 vmull_u8(fVec.val[2], a.fA), | |
| 167 vmull_u8(fVec.val[3], a.fA), | |
| 168 }}; | |
| 169 } | |
| 170 SkPx approxMulDiv255(const Alpha& a) const { | |
| 171 return (*this * a).addNarrowHi(*this); | |
| 172 } | |
| 173 | |
| 174 SkPx addAlpha(const Alpha& a) const { | |
| 175 return (uint8x8x4_t) {{ | |
| 176 fVec.val[0], | |
| 177 fVec.val[1], | |
| 178 fVec.val[2], | |
| 179 vadd_u8(fVec.val[3], a.fA), | |
| 180 }}; | |
| 181 } | |
| 182 }; | |
| 183 | |
| 184 } // namespace neon | |
| 185 | |
| 186 typedef neon::SkPx SkPx; | |
| 187 | |
| 188 #endif//SkPx_neon_DEFINED | |
| OLD | NEW |