OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 /* MFQE: Multiframe Quality Enhancement | 12 /* MFQE: Multiframe Quality Enhancement |
13 * In rate limited situations keyframes may cause significant visual artifacts | 13 * In rate limited situations keyframes may cause significant visual artifacts |
14 * commonly referred to as "popping." This file implements a postproccesing | 14 * commonly referred to as "popping." This file implements a postproccesing |
15 * algorithm which blends data from the preceeding frame when there is no | 15 * algorithm which blends data from the preceeding frame when there is no |
16 * motion and the q from the previous frame is lower which indicates that it is | 16 * motion and the q from the previous frame is lower which indicates that it is |
17 * higher quality. | 17 * higher quality. |
18 */ | 18 */ |
19 | 19 |
20 #include "postproc.h" | 20 #include "./vp8_rtcd.h" |
21 #include "variance.h" | 21 #include "./vpx_dsp_rtcd.h" |
| 22 #include "vp8/common/postproc.h" |
| 23 #include "vp8/common/variance.h" |
22 #include "vpx_mem/vpx_mem.h" | 24 #include "vpx_mem/vpx_mem.h" |
23 #include "vp8_rtcd.h" | |
24 #include "vpx_scale/yv12config.h" | 25 #include "vpx_scale/yv12config.h" |
25 | 26 |
26 #include <limits.h> | 27 #include <limits.h> |
27 #include <stdlib.h> | 28 #include <stdlib.h> |
28 | 29 |
29 static void filter_by_weight(unsigned char *src, int src_stride, | 30 static void filter_by_weight(unsigned char *src, int src_stride, |
30 unsigned char *dst, int dst_stride, | 31 unsigned char *dst, int dst_stride, |
31 int block_size, int src_weight) | 32 int block_size, int src_weight) |
32 { | 33 { |
33 int dst_weight = (1 << MFQE_PRECISION) - src_weight; | 34 int dst_weight = (1 << MFQE_PRECISION) - src_weight; |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
146 unsigned char *vp; | 147 unsigned char *vp; |
147 unsigned char *vdp; | 148 unsigned char *vdp; |
148 | 149 |
149 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; | 150 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; |
150 | 151 |
151 if (blksize == 16) | 152 if (blksize == 16) |
152 { | 153 { |
153 actd = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8; | 154 actd = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8; |
154 act = (vp8_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8; | 155 act = (vp8_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8; |
155 #ifdef USE_SSD | 156 #ifdef USE_SSD |
156 sad = (vp8_variance16x16(y, y_stride, yd, yd_stride, &sse)); | 157 vp8_variance16x16(y, y_stride, yd, yd_stride, &sse); |
157 sad = (sse + 128)>>8; | 158 sad = (sse + 128)>>8; |
158 usad = (vp8_variance8x8(u, uv_stride, ud, uvd_stride, &sse)); | 159 vp8_variance8x8(u, uv_stride, ud, uvd_stride, &sse); |
159 usad = (sse + 32)>>6; | 160 usad = (sse + 32)>>6; |
160 vsad = (vp8_variance8x8(v, uv_stride, vd, uvd_stride, &sse)); | 161 vp8_variance8x8(v, uv_stride, vd, uvd_stride, &sse); |
161 vsad = (sse + 32)>>6; | 162 vsad = (sse + 32)>>6; |
162 #else | 163 #else |
163 sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, UINT_MAX) + 128) >> 8; | 164 sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; |
164 usad = (vp8_sad8x8(u, uv_stride, ud, uvd_stride, UINT_MAX) + 32) >> 6; | 165 usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; |
165 vsad = (vp8_sad8x8(v, uv_stride, vd, uvd_stride, UINT_MAX)+ 32) >> 6; | 166 vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride)+ 32) >> 6; |
166 #endif | 167 #endif |
167 } | 168 } |
168 else /* if (blksize == 8) */ | 169 else /* if (blksize == 8) */ |
169 { | 170 { |
170 actd = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6; | 171 actd = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6; |
171 act = (vp8_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6; | 172 act = (vp8_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6; |
172 #ifdef USE_SSD | 173 #ifdef USE_SSD |
173 sad = (vp8_variance8x8(y, y_stride, yd, yd_stride, &sse)); | 174 vp8_variance8x8(y, y_stride, yd, yd_stride, &sse); |
174 sad = (sse + 32)>>6; | 175 sad = (sse + 32)>>6; |
175 usad = (vp8_variance4x4(u, uv_stride, ud, uvd_stride, &sse)); | 176 vp8_variance4x4(u, uv_stride, ud, uvd_stride, &sse); |
176 usad = (sse + 8)>>4; | 177 usad = (sse + 8)>>4; |
177 vsad = (vp8_variance4x4(v, uv_stride, vd, uvd_stride, &sse)); | 178 vp8_variance4x4(v, uv_stride, vd, uvd_stride, &sse); |
178 vsad = (sse + 8)>>4; | 179 vsad = (sse + 8)>>4; |
179 #else | 180 #else |
180 sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, UINT_MAX) + 32) >> 6; | 181 sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; |
181 usad = (vp8_sad4x4(u, uv_stride, ud, uvd_stride, UINT_MAX) + 8) >> 4; | 182 usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; |
182 vsad = (vp8_sad4x4(v, uv_stride, vd, uvd_stride, UINT_MAX) + 8) >> 4; | 183 vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; |
183 #endif | 184 #endif |
184 } | 185 } |
185 | 186 |
186 actrisk = (actd > act * 5); | 187 actrisk = (actd > act * 5); |
187 | 188 |
188 /* thr = qdiff/16 + log2(act) + log4(qprev) */ | 189 /* thr = qdiff/16 + log2(act) + log4(qprev) */ |
189 thr = (qdiff >> 4); | 190 thr = (qdiff >> 4); |
190 while (actd >>= 1) thr++; | 191 while (actd >>= 1) thr++; |
191 while (qprev >>= 2) thr++; | 192 while (qprev >>= 2) thr++; |
192 | 193 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
224 if (blksize == 16) | 225 if (blksize == 16) |
225 { | 226 { |
226 vp8_copy_mem16x16(y, y_stride, yd, yd_stride); | 227 vp8_copy_mem16x16(y, y_stride, yd, yd_stride); |
227 vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride); | 228 vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride); |
228 vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride); | 229 vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride); |
229 } | 230 } |
230 else /* if (blksize == 8) */ | 231 else /* if (blksize == 8) */ |
231 { | 232 { |
232 vp8_copy_mem8x8(y, y_stride, yd, yd_stride); | 233 vp8_copy_mem8x8(y, y_stride, yd, yd_stride); |
233 for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, u
dp += uvd_stride) | 234 for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, u
dp += uvd_stride) |
234 vpx_memcpy(udp, up, uvblksize); | 235 memcpy(udp, up, uvblksize); |
235 for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, v
dp += uvd_stride) | 236 for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, v
dp += uvd_stride) |
236 vpx_memcpy(vdp, vp, uvblksize); | 237 memcpy(vdp, vp, uvblksize); |
237 } | 238 } |
238 } | 239 } |
239 } | 240 } |
240 | 241 |
241 static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) | 242 static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) |
242 { | 243 { |
243 if (mode_info_context->mbmi.mb_skip_coeff) | 244 if (mode_info_context->mbmi.mb_skip_coeff) |
244 map[0] = map[1] = map[2] = map[3] = 1; | 245 map[0] = map[1] = map[2] = map[3] = 1; |
245 else if (mode_info_context->mbmi.mode==SPLITMV) | 246 else if (mode_info_context->mbmi.mode==SPLITMV) |
246 { | 247 { |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
334 int k; | 335 int k; |
335 unsigned char *up = u_ptr + 4*(i*show->uv_stride
+j); | 336 unsigned char *up = u_ptr + 4*(i*show->uv_stride
+j); |
336 unsigned char *udp = ud_ptr + 4*(i*dest->uv_stri
de+j); | 337 unsigned char *udp = ud_ptr + 4*(i*dest->uv_stri
de+j); |
337 unsigned char *vp = v_ptr + 4*(i*show->uv_stride
+j); | 338 unsigned char *vp = v_ptr + 4*(i*show->uv_stride
+j); |
338 unsigned char *vdp = vd_ptr + 4*(i*dest->uv_stri
de+j); | 339 unsigned char *vdp = vd_ptr + 4*(i*dest->uv_stri
de+j); |
339 vp8_copy_mem8x8(y_ptr + 8*(i*show->y_stride+j),
show->y_stride, | 340 vp8_copy_mem8x8(y_ptr + 8*(i*show->y_stride+j),
show->y_stride, |
340 yd_ptr + 8*(i*dest->y_stride+j),
dest->y_stride); | 341 yd_ptr + 8*(i*dest->y_stride+j),
dest->y_stride); |
341 for (k = 0; k < 4; ++k, up += show->uv_stride, u
dp += dest->uv_stride, | 342 for (k = 0; k < 4; ++k, up += show->uv_stride, u
dp += dest->uv_stride, |
342 vp += show->uv_stride, v
dp += dest->uv_stride) | 343 vp += show->uv_stride, v
dp += dest->uv_stride) |
343 { | 344 { |
344 vpx_memcpy(udp, up, 4); | 345 memcpy(udp, up, 4); |
345 vpx_memcpy(vdp, vp, 4); | 346 memcpy(vdp, vp, 4); |
346 } | 347 } |
347 } | 348 } |
348 } | 349 } |
349 } | 350 } |
350 else /* totmap = 4 */ | 351 else /* totmap = 4 */ |
351 { | 352 { |
352 multiframe_quality_enhance_block(16, qcurr, qprev, y_ptr, | 353 multiframe_quality_enhance_block(16, qcurr, qprev, y_ptr, |
353 u_ptr, v_ptr, | 354 u_ptr, v_ptr, |
354 show->y_stride, | 355 show->y_stride, |
355 show->uv_stride, | 356 show->uv_stride, |
(...skipping 20 matching lines...) Expand all Loading... |
376 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; | 377 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; |
377 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; | 378 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; |
378 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; | 379 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; |
379 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; | 380 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; |
380 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; | 381 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; |
381 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; | 382 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; |
382 | 383 |
383 mode_info_context++; /* Skip border mb */ | 384 mode_info_context++; /* Skip border mb */ |
384 } | 385 } |
385 } | 386 } |
OLD | NEW |