OLD | NEW |
(Empty) | |
| 1 // Copyright 2011 Google Inc. All Rights Reserved. |
| 2 // |
| 3 // This code is licensed under the same terms as WebM: |
| 4 // Software License Agreement: http://www.webmproject.org/license/software/ |
| 5 // Additional IP Rights Grant: http://www.webmproject.org/license/additional/ |
| 6 // ----------------------------------------------------------------------------- |
| 7 // |
| 8 // NEON version of YUV to RGB upsampling functions. |
| 9 // |
| 10 // Author: mans@mansr.com (Mans Rullgard) |
| 11 // Based on SSE code by: somnath@google.com (Somnath Banerjee) |
| 12 |
| 13 #include "./dsp.h" |
| 14 |
| 15 #if defined(__cplusplus) || defined(c_plusplus) |
| 16 extern "C" { |
| 17 #endif |
| 18 |
| 19 #if defined(WEBP_USE_NEON) |
| 20 |
| 21 #include <assert.h> |
| 22 #include <arm_neon.h> |
| 23 #include <string.h> |
| 24 #include "./yuv.h" |
| 25 |
| 26 #ifdef FANCY_UPSAMPLING |
| 27 |
| 28 // Loads 9 pixels each from rows r1 and r2 and generates 16 pixels. |
| 29 #define UPSAMPLE_16PIXELS(r1, r2, out) { \ |
| 30 uint8x8_t a = vld1_u8(r1); \ |
| 31 uint8x8_t b = vld1_u8(r1 + 1); \ |
| 32 uint8x8_t c = vld1_u8(r2); \ |
| 33 uint8x8_t d = vld1_u8(r2 + 1); \ |
| 34 \ |
| 35 uint16x8_t al = vshll_n_u8(a, 1); \ |
| 36 uint16x8_t bl = vshll_n_u8(b, 1); \ |
| 37 uint16x8_t cl = vshll_n_u8(c, 1); \ |
| 38 uint16x8_t dl = vshll_n_u8(d, 1); \ |
| 39 \ |
| 40 uint8x8_t diag1, diag2; \ |
| 41 uint16x8_t sl; \ |
| 42 \ |
| 43 /* a + b + c + d */ \ |
| 44 sl = vaddl_u8(a, b); \ |
| 45 sl = vaddw_u8(sl, c); \ |
| 46 sl = vaddw_u8(sl, d); \ |
| 47 \ |
| 48 al = vaddq_u16(sl, al); /* 3a + b + c + d */ \ |
| 49 bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \ |
| 50 \ |
| 51 al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \ |
| 52 bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \ |
| 53 \ |
| 54 diag2 = vshrn_n_u16(al, 3); \ |
| 55 diag1 = vshrn_n_u16(bl, 3); \ |
| 56 \ |
| 57 a = vrhadd_u8(a, diag1); \ |
| 58 b = vrhadd_u8(b, diag2); \ |
| 59 c = vrhadd_u8(c, diag2); \ |
| 60 d = vrhadd_u8(d, diag1); \ |
| 61 \ |
| 62 { \ |
| 63 const uint8x8x2_t a_b = {{ a, b }}; \ |
| 64 const uint8x8x2_t c_d = {{ c, d }}; \ |
| 65 vst2_u8(out, a_b); \ |
| 66 vst2_u8(out + 32, c_d); \ |
| 67 } \ |
| 68 } |
| 69 |
| 70 // Turn the macro into a function for reducing code-size when non-critical |
| 71 static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2, |
| 72 uint8_t *out) { |
| 73 UPSAMPLE_16PIXELS(r1, r2, out); |
| 74 } |
| 75 |
| 76 #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ |
| 77 uint8_t r1[9], r2[9]; \ |
| 78 memcpy(r1, (tb), (num_pixels)); \ |
| 79 memcpy(r2, (bb), (num_pixels)); \ |
| 80 /* replicate last byte */ \ |
| 81 memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \ |
| 82 memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \ |
| 83 Upsample16Pixels(r1, r2, out); \ |
| 84 } |
| 85 |
| 86 #define CY 76283 |
| 87 #define CVR 89858 |
| 88 #define CUG 22014 |
| 89 #define CVG 45773 |
| 90 #define CUB 113618 |
| 91 |
| 92 static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 }; |
| 93 |
| 94 #define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \ |
| 95 int i; \ |
| 96 for (i = 0; i < N; i += 8) { \ |
| 97 int off = ((cur_x) + i) * XSTEP; \ |
| 98 uint8x8_t y = vld1_u8(src_y + (cur_x) + i); \ |
| 99 uint8x8_t u = vld1_u8((src_uv) + i); \ |
| 100 uint8x8_t v = vld1_u8((src_uv) + i + 16); \ |
| 101 int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \ |
| 102 int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \ |
| 103 int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \ |
| 104 \ |
| 105 int16x8_t ud = vshlq_n_s16(uu, 1); \ |
| 106 int16x8_t vd = vshlq_n_s16(vv, 1); \ |
| 107 \ |
| 108 int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1), \ |
| 109 vget_low_s16(vd), cf16, 0); \ |
| 110 int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \ |
| 111 vget_high_s16(vd), cf16, 0); \ |
| 112 int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16), \ |
| 113 vrshrn_n_s32(vrh, 16)); \ |
| 114 \ |
| 115 int32x4_t vl = vmovl_s16(vget_low_s16(vv)); \ |
| 116 int32x4_t vh = vmovl_s16(vget_high_s16(vv)); \ |
| 117 int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu), cf16, 1); \ |
| 118 int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1); \ |
| 119 int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv), cf16, 2); \ |
| 120 int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2); \ |
| 121 int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16), \ |
| 122 vrshrn_n_s32(gch, 16)); \ |
| 123 \ |
| 124 int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1), \ |
| 125 vget_low_s16(ud), cf16, 3); \ |
| 126 int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \ |
| 127 vget_high_s16(ud), cf16, 3); \ |
| 128 int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16), \ |
| 129 vrshrn_n_s32(ubh, 16)); \ |
| 130 \ |
| 131 int32x4_t rl = vaddl_s16(vget_low_s16(yy), vget_low_s16(vr)); \ |
| 132 int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr)); \ |
| 133 int32x4_t gl = vsubl_s16(vget_low_s16(yy), vget_low_s16(gc)); \ |
| 134 int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc)); \ |
| 135 int32x4_t bl = vaddl_s16(vget_low_s16(yy), vget_low_s16(ub)); \ |
| 136 int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub)); \ |
| 137 \ |
| 138 rl = vmulq_lane_s32(rl, cf32, 0); \ |
| 139 rh = vmulq_lane_s32(rh, cf32, 0); \ |
| 140 gl = vmulq_lane_s32(gl, cf32, 0); \ |
| 141 gh = vmulq_lane_s32(gh, cf32, 0); \ |
| 142 bl = vmulq_lane_s32(bl, cf32, 0); \ |
| 143 bh = vmulq_lane_s32(bh, cf32, 0); \ |
| 144 \ |
| 145 y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16), \ |
| 146 vrshrn_n_s32(rh, 16))); \ |
| 147 u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16), \ |
| 148 vrshrn_n_s32(gh, 16))); \ |
| 149 v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16), \ |
| 150 vrshrn_n_s32(bh, 16))); \ |
| 151 STR_ ## FMT(out + off, y, u, v); \ |
| 152 } \ |
| 153 } |
| 154 |
| 155 #define v255 vmov_n_u8(255) |
| 156 |
| 157 #define STR_Rgb(out, r, g, b) do { \ |
| 158 const uint8x8x3_t r_g_b = {{ r, g, b }}; \ |
| 159 vst3_u8(out, r_g_b); \ |
| 160 } while (0) |
| 161 |
| 162 #define STR_Bgr(out, r, g, b) do { \ |
| 163 const uint8x8x3_t b_g_r = {{ b, g, r }}; \ |
| 164 vst3_u8(out, b_g_r); \ |
| 165 } while (0) |
| 166 |
| 167 #define STR_Rgba(out, r, g, b) do { \ |
| 168 const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \ |
| 169 vst4_u8(out, r_g_b_v255); \ |
| 170 } while (0) |
| 171 |
| 172 #define STR_Bgra(out, r, g, b) do { \ |
| 173 const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \ |
| 174 vst4_u8(out, b_g_r_v255); \ |
| 175 } while (0) |
| 176 |
| 177 #define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) { \ |
| 178 int i; \ |
| 179 for (i = 0; i < N; i++) { \ |
| 180 int off = ((cur_x) + i) * XSTEP; \ |
| 181 int y = src_y[(cur_x) + i]; \ |
| 182 int u = (src_uv)[i]; \ |
| 183 int v = (src_uv)[i + 16]; \ |
| 184 VP8YuvTo ## FMT(y, u, v, rgb + off); \ |
| 185 } \ |
| 186 } |
| 187 |
| 188 #define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \ |
| 189 top_dst, bottom_dst, cur_x, len) { \ |
| 190 if (top_y) { \ |
| 191 CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \ |
| 192 } \ |
| 193 if (bottom_y) { \ |
| 194 CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \ |
| 195 } \ |
| 196 } |
| 197 |
| 198 #define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv, \ |
| 199 top_dst, bottom_dst, cur_x, len) { \ |
| 200 if (top_y) { \ |
| 201 CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \ |
| 202 } \ |
| 203 if (bottom_y) { \ |
| 204 CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \ |
| 205 } \ |
| 206 } |
| 207 |
| 208 #define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \ |
| 209 static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \ |
| 210 const uint8_t *top_u, const uint8_t *top_v, \ |
| 211 const uint8_t *cur_u, const uint8_t *cur_v, \ |
| 212 uint8_t *top_dst, uint8_t *bottom_dst, int len) { \ |
| 213 int block; \ |
| 214 /* 16 byte aligned array to cache reconstructed u and v */ \ |
| 215 uint8_t uv_buf[2 * 32 + 15]; \ |
| 216 uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ |
| 217 const int uv_len = (len + 1) >> 1; \ |
| 218 /* 9 pixels must be read-able for each block */ \ |
| 219 const int num_blocks = (uv_len - 1) >> 3; \ |
| 220 const int leftover = uv_len - num_blocks * 8; \ |
| 221 const int last_pos = 1 + 16 * num_blocks; \ |
| 222 \ |
| 223 const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ |
| 224 const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ |
| 225 \ |
| 226 const int16x4_t cf16 = vld1_s16(coef); \ |
| 227 const int32x2_t cf32 = vmov_n_s32(CY); \ |
| 228 const uint8x8_t u16 = vmov_n_u8(16); \ |
| 229 const uint8x8_t u128 = vmov_n_u8(128); \ |
| 230 \ |
| 231 /* Treat the first pixel in regular way */ \ |
| 232 if (top_y) { \ |
| 233 const int u0 = (top_u[0] + u_diag) >> 1; \ |
| 234 const int v0 = (top_v[0] + v_diag) >> 1; \ |
| 235 VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \ |
| 236 } \ |
| 237 if (bottom_y) { \ |
| 238 const int u0 = (cur_u[0] + u_diag) >> 1; \ |
| 239 const int v0 = (cur_v[0] + v_diag) >> 1; \ |
| 240 VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \ |
| 241 } \ |
| 242 \ |
| 243 for (block = 0; block < num_blocks; ++block) { \ |
| 244 UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \ |
| 245 UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \ |
| 246 CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \ |
| 247 top_dst, bottom_dst, 16 * block + 1, 16); \ |
| 248 top_u += 8; \ |
| 249 cur_u += 8; \ |
| 250 top_v += 8; \ |
| 251 cur_v += 8; \ |
| 252 } \ |
| 253 \ |
| 254 UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \ |
| 255 UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \ |
| 256 CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv, \ |
| 257 top_dst, bottom_dst, last_pos, len - last_pos); \ |
| 258 } |
| 259 |
| 260 // NEON variants of the fancy upsampler. |
| 261 NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3) |
| 262 NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3) |
| 263 NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4) |
| 264 NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4) |
| 265 |
| 266 #endif // FANCY_UPSAMPLING |
| 267 |
| 268 #endif // WEBP_USE_NEON |
| 269 |
| 270 //------------------------------------------------------------------------------ |
| 271 |
| 272 extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; |
| 273 |
| 274 void WebPInitUpsamplersNEON(void) { |
| 275 #if defined(WEBP_USE_NEON) |
| 276 WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON; |
| 277 WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON; |
| 278 WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON; |
| 279 WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON; |
| 280 #endif // WEBP_USE_NEON |
| 281 } |
| 282 |
| 283 void WebPInitPremultiplyNEON(void) { |
| 284 #if defined(WEBP_USE_NEON) |
| 285 WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON; |
| 286 WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON; |
| 287 #endif // WEBP_USE_NEON |
| 288 } |
| 289 |
| 290 #if defined(__cplusplus) || defined(c_plusplus) |
| 291 } // extern "C" |
| 292 #endif |
OLD | NEW |