| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 #include "vpx_ports/vpx_timer.h" | 27 #include "vpx_ports/vpx_timer.h" |
| 28 #include "vpx_scale/vpx_scale.h" | 28 #include "vpx_scale/vpx_scale.h" |
| 29 | 29 |
| 30 static int fixed_divide[512]; | 30 static int fixed_divide[512]; |
| 31 | 31 |
| 32 static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, | 32 static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, |
| 33 uint8_t *y_mb_ptr, | 33 uint8_t *y_mb_ptr, |
| 34 uint8_t *u_mb_ptr, | 34 uint8_t *u_mb_ptr, |
| 35 uint8_t *v_mb_ptr, | 35 uint8_t *v_mb_ptr, |
| 36 int stride, | 36 int stride, |
| 37 int uv_block_size, | 37 int uv_block_width, |
| 38 int uv_block_height, |
| 38 int mv_row, | 39 int mv_row, |
| 39 int mv_col, | 40 int mv_col, |
| 40 uint8_t *pred, | 41 uint8_t *pred, |
| 41 struct scale_factors *scale, | 42 struct scale_factors *scale, |
| 42 int x, int y) { | 43 int x, int y) { |
| 43 const int which_mv = 0; | 44 const int which_mv = 0; |
| 44 const MV mv = { mv_row, mv_col }; | 45 const MV mv = { mv_row, mv_col }; |
| 45 const InterpKernel *const kernel = | 46 const InterpKernel *const kernel = |
| 46 vp9_get_interp_kernel(xd->mi[0]->mbmi.interp_filter); | 47 vp9_get_interp_kernel(xd->mi[0]->mbmi.interp_filter); |
| 47 | 48 |
| 48 enum mv_precision mv_precision_uv; | 49 enum mv_precision mv_precision_uv; |
| 49 int uv_stride; | 50 int uv_stride; |
| 50 if (uv_block_size == 8) { | 51 if (uv_block_width == 8) { |
| 51 uv_stride = (stride + 1) >> 1; | 52 uv_stride = (stride + 1) >> 1; |
| 52 mv_precision_uv = MV_PRECISION_Q4; | 53 mv_precision_uv = MV_PRECISION_Q4; |
| 53 } else { | 54 } else { |
| 54 uv_stride = stride; | 55 uv_stride = stride; |
| 55 mv_precision_uv = MV_PRECISION_Q3; | 56 mv_precision_uv = MV_PRECISION_Q3; |
| 56 } | 57 } |
| 57 | 58 |
| 58 vp9_build_inter_predictor(y_mb_ptr, stride, | 59 vp9_build_inter_predictor(y_mb_ptr, stride, |
| 59 &pred[0], 16, | 60 &pred[0], 16, |
| 60 &mv, | 61 &mv, |
| 61 scale, | 62 scale, |
| 62 16, 16, | 63 16, 16, |
| 63 which_mv, | 64 which_mv, |
| 64 kernel, MV_PRECISION_Q3, x, y); | 65 kernel, MV_PRECISION_Q3, x, y); |
| 65 | 66 |
| 66 vp9_build_inter_predictor(u_mb_ptr, uv_stride, | 67 vp9_build_inter_predictor(u_mb_ptr, uv_stride, |
| 67 &pred[256], uv_block_size, | 68 &pred[256], uv_block_width, |
| 68 &mv, | 69 &mv, |
| 69 scale, | 70 scale, |
| 70 uv_block_size, uv_block_size, | 71 uv_block_width, uv_block_height, |
| 71 which_mv, | 72 which_mv, |
| 72 kernel, mv_precision_uv, x, y); | 73 kernel, mv_precision_uv, x, y); |
| 73 | 74 |
| 74 vp9_build_inter_predictor(v_mb_ptr, uv_stride, | 75 vp9_build_inter_predictor(v_mb_ptr, uv_stride, |
| 75 &pred[512], uv_block_size, | 76 &pred[512], uv_block_width, |
| 76 &mv, | 77 &mv, |
| 77 scale, | 78 scale, |
| 78 uv_block_size, uv_block_size, | 79 uv_block_width, uv_block_height, |
| 79 which_mv, | 80 which_mv, |
| 80 kernel, mv_precision_uv, x, y); | 81 kernel, mv_precision_uv, x, y); |
| 81 } | 82 } |
| 82 | 83 |
| 83 void vp9_temporal_filter_init() { | 84 void vp9_temporal_filter_init() { |
| 84 int i; | 85 int i; |
| 85 | 86 |
| 86 fixed_divide[0] = 0; | 87 fixed_divide[0] = 0; |
| 87 for (i = 1; i < 512; ++i) | 88 for (i = 1; i < 512; ++i) |
| 88 fixed_divide[i] = 0x80000 / i; | 89 fixed_divide[i] = 0x80000 / i; |
| 89 } | 90 } |
| 90 | 91 |
| 91 void vp9_temporal_filter_apply_c(uint8_t *frame1, | 92 void vp9_temporal_filter_apply_c(uint8_t *frame1, |
| 92 unsigned int stride, | 93 unsigned int stride, |
| 93 uint8_t *frame2, | 94 uint8_t *frame2, |
| 94 unsigned int block_size, | 95 unsigned int block_width, |
| 96 unsigned int block_height, |
| 95 int strength, | 97 int strength, |
| 96 int filter_weight, | 98 int filter_weight, |
| 97 unsigned int *accumulator, | 99 unsigned int *accumulator, |
| 98 uint16_t *count) { | 100 uint16_t *count) { |
| 99 unsigned int i, j, k; | 101 unsigned int i, j, k; |
| 100 int modifier; | 102 int modifier; |
| 101 int byte = 0; | 103 int byte = 0; |
| 102 const int rounding = strength > 0 ? 1 << (strength - 1) : 0; | 104 const int rounding = strength > 0 ? 1 << (strength - 1) : 0; |
| 103 | 105 |
| 104 for (i = 0, k = 0; i < block_size; i++) { | 106 for (i = 0, k = 0; i < block_height; i++) { |
| 105 for (j = 0; j < block_size; j++, k++) { | 107 for (j = 0; j < block_width; j++, k++) { |
| 106 int src_byte = frame1[byte]; | 108 int src_byte = frame1[byte]; |
| 107 int pixel_value = *frame2++; | 109 int pixel_value = *frame2++; |
| 108 | 110 |
| 109 modifier = src_byte - pixel_value; | 111 modifier = src_byte - pixel_value; |
| 110 // This is an integer approximation of: | 112 // This is an integer approximation of: |
| 111 // float coeff = (3.0 * modifer * modifier) / pow(2, strength); | 113 // float coeff = (3.0 * modifer * modifier) / pow(2, strength); |
| 112 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); | 114 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); |
| 113 modifier *= modifier; | 115 modifier *= modifier; |
| 114 modifier *= 3; | 116 modifier *= 3; |
| 115 modifier += rounding; | 117 modifier += rounding; |
| 116 modifier >>= strength; | 118 modifier >>= strength; |
| 117 | 119 |
| 118 if (modifier > 16) | 120 if (modifier > 16) |
| 119 modifier = 16; | 121 modifier = 16; |
| 120 | 122 |
| 121 modifier = 16 - modifier; | 123 modifier = 16 - modifier; |
| 122 modifier *= filter_weight; | 124 modifier *= filter_weight; |
| 123 | 125 |
| 124 count[k] += modifier; | 126 count[k] += modifier; |
| 125 accumulator[k] += modifier * pixel_value; | 127 accumulator[k] += modifier * pixel_value; |
| 126 | 128 |
| 127 byte++; | 129 byte++; |
| 128 } | 130 } |
| 129 | 131 |
| 130 byte += stride - block_size; | 132 byte += stride - block_width; |
| 131 } | 133 } |
| 132 } | 134 } |
| 133 | 135 |
| 134 static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, | 136 static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, |
| 135 uint8_t *arf_frame_buf, | 137 uint8_t *arf_frame_buf, |
| 136 uint8_t *frame_ptr_buf, | 138 uint8_t *frame_ptr_buf, |
| 137 int stride) { | 139 int stride) { |
| 138 MACROBLOCK *x = &cpi->mb; | 140 MACROBLOCK *const x = &cpi->mb; |
| 139 MACROBLOCKD* const xd = &x->e_mbd; | 141 MACROBLOCKD *const xd = &x->e_mbd; |
| 142 const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; |
| 140 int step_param; | 143 int step_param; |
| 141 int sadpb = x->sadperbit16; | 144 int sadpb = x->sadperbit16; |
| 142 int bestsme = INT_MAX; | 145 int bestsme = INT_MAX; |
| 143 int distortion; | 146 int distortion; |
| 144 unsigned int sse; | 147 unsigned int sse; |
| 145 | 148 |
| 146 MV best_ref_mv1 = {0, 0}; | 149 MV best_ref_mv1 = {0, 0}; |
| 147 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ | 150 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ |
| 148 MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv; | 151 MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv; |
| 149 | 152 |
| 150 // Save input state | 153 // Save input state |
| 151 struct buf_2d src = x->plane[0].src; | 154 struct buf_2d src = x->plane[0].src; |
| 152 struct buf_2d pre = xd->plane[0].pre[0]; | 155 struct buf_2d pre = xd->plane[0].pre[0]; |
| 153 | 156 |
| 154 best_ref_mv1_full.col = best_ref_mv1.col >> 3; | 157 best_ref_mv1_full.col = best_ref_mv1.col >> 3; |
| 155 best_ref_mv1_full.row = best_ref_mv1.row >> 3; | 158 best_ref_mv1_full.row = best_ref_mv1.row >> 3; |
| 156 | 159 |
| 157 // Setup frame pointers | 160 // Setup frame pointers |
| 158 x->plane[0].src.buf = arf_frame_buf; | 161 x->plane[0].src.buf = arf_frame_buf; |
| 159 x->plane[0].src.stride = stride; | 162 x->plane[0].src.stride = stride; |
| 160 xd->plane[0].pre[0].buf = frame_ptr_buf; | 163 xd->plane[0].pre[0].buf = frame_ptr_buf; |
| 161 xd->plane[0].pre[0].stride = stride; | 164 xd->plane[0].pre[0].stride = stride; |
| 162 | 165 |
| 163 step_param = cpi->sf.reduce_first_step_size + (cpi->oxcf.speed > 5 ? 1 : 0); | 166 step_param = mv_sf->reduce_first_step_size + (cpi->oxcf.speed > 5 ? 1 : 0); |
| 164 step_param = MIN(step_param, cpi->sf.max_step_search_steps - 2); | 167 step_param = MIN(step_param, mv_sf->max_step_search_steps - 2); |
| 165 | 168 |
| 166 // Ignore mv costing by sending NULL pointer instead of cost arrays | 169 // Ignore mv costing by sending NULL pointer instead of cost arrays |
| 167 vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, | 170 vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, |
| 168 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv); | 171 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv); |
| 169 | 172 |
| 170 // Ignore mv costing by sending NULL pointer instead of cost array | 173 // Ignore mv costing by sending NULL pointer instead of cost array |
| 171 bestsme = cpi->find_fractional_mv_step(x, ref_mv, | 174 bestsme = cpi->find_fractional_mv_step(x, ref_mv, |
| 172 &best_ref_mv1, | 175 &best_ref_mv1, |
| 173 cpi->common.allow_high_precision_mv, | 176 cpi->common.allow_high_precision_mv, |
| 174 x->errorperbit, | 177 x->errorperbit, |
| 175 &cpi->fn_ptr[BLOCK_16X16], | 178 &cpi->fn_ptr[BLOCK_16X16], |
| 176 0, cpi->sf.subpel_iters_per_step, | 179 0, mv_sf->subpel_iters_per_step, |
| 177 NULL, NULL, | 180 NULL, NULL, |
| 178 &distortion, &sse); | 181 &distortion, &sse); |
| 179 | 182 |
| 180 // Restore input state | 183 // Restore input state |
| 181 x->plane[0].src = src; | 184 x->plane[0].src = src; |
| 182 xd->plane[0].pre[0] = pre; | 185 xd->plane[0].pre[0] = pre; |
| 183 | 186 |
| 184 return bestsme; | 187 return bestsme; |
| 185 } | 188 } |
| 186 | 189 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 197 int mb_rows = cpi->common.mb_rows; | 200 int mb_rows = cpi->common.mb_rows; |
| 198 int mb_y_offset = 0; | 201 int mb_y_offset = 0; |
| 199 int mb_uv_offset = 0; | 202 int mb_uv_offset = 0; |
| 200 DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3); | 203 DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3); |
| 201 DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3); | 204 DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3); |
| 202 MACROBLOCKD *mbd = &cpi->mb.e_mbd; | 205 MACROBLOCKD *mbd = &cpi->mb.e_mbd; |
| 203 YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; | 206 YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; |
| 204 uint8_t *dst1, *dst2; | 207 uint8_t *dst1, *dst2; |
| 205 DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 * 3); | 208 DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 * 3); |
| 206 const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y; | 209 const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y; |
| 210 const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x; |
| 207 | 211 |
| 208 // Save input state | 212 // Save input state |
| 209 uint8_t* input_buffer[MAX_MB_PLANE]; | 213 uint8_t* input_buffer[MAX_MB_PLANE]; |
| 210 int i; | 214 int i; |
| 211 | 215 |
| 212 // TODO(aconverse): Add 4:2:2 support | |
| 213 assert(mbd->plane[1].subsampling_x == mbd->plane[1].subsampling_y); | |
| 214 | |
| 215 for (i = 0; i < MAX_MB_PLANE; i++) | 216 for (i = 0; i < MAX_MB_PLANE; i++) |
| 216 input_buffer[i] = mbd->plane[i].pre[0].buf; | 217 input_buffer[i] = mbd->plane[i].pre[0].buf; |
| 217 | 218 |
| 218 for (mb_row = 0; mb_row < mb_rows; mb_row++) { | 219 for (mb_row = 0; mb_row < mb_rows; mb_row++) { |
| 219 // Source frames are extended to 16 pixels. This is different than | 220 // Source frames are extended to 16 pixels. This is different than |
| 220 // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS) | 221 // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS) |
| 221 // A 6/8 tap filter is used for motion search. This requires 2 pixels | 222 // A 6/8 tap filter is used for motion search. This requires 2 pixels |
| 222 // before and 3 pixels after. So the largest Y mv on a border would | 223 // before and 3 pixels after. So the largest Y mv on a border would |
| 223 // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the | 224 // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the |
| 224 // Y and therefore only extended by 8. The largest mv that a UV block | 225 // Y and therefore only extended by 8. The largest mv that a UV block |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 268 ? 2 : err < thresh_high ? 1 : 0; | 269 ? 2 : err < thresh_high ? 1 : 0; |
| 269 } | 270 } |
| 270 | 271 |
| 271 if (filter_weight != 0) { | 272 if (filter_weight != 0) { |
| 272 // Construct the predictors | 273 // Construct the predictors |
| 273 temporal_filter_predictors_mb_c(mbd, | 274 temporal_filter_predictors_mb_c(mbd, |
| 274 cpi->frames[frame]->y_buffer + mb_y_offset, | 275 cpi->frames[frame]->y_buffer + mb_y_offset, |
| 275 cpi->frames[frame]->u_buffer + mb_uv_offset, | 276 cpi->frames[frame]->u_buffer + mb_uv_offset, |
| 276 cpi->frames[frame]->v_buffer + mb_uv_offset, | 277 cpi->frames[frame]->v_buffer + mb_uv_offset, |
| 277 cpi->frames[frame]->y_stride, | 278 cpi->frames[frame]->y_stride, |
| 278 mb_uv_height, | 279 mb_uv_width, mb_uv_height, |
| 279 mbd->mi[0]->bmi[0].as_mv[0].as_mv.row, | 280 mbd->mi[0]->bmi[0].as_mv[0].as_mv.row, |
| 280 mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, | 281 mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, |
| 281 predictor, scale, | 282 predictor, scale, |
| 282 mb_col * 16, mb_row * 16); | 283 mb_col * 16, mb_row * 16); |
| 283 | 284 |
| 284 // Apply the filter (YUV) | 285 // Apply the filter (YUV) |
| 285 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, | 286 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, |
| 286 predictor, 16, strength, filter_weight, | 287 predictor, 16, 16, |
| 288 strength, filter_weight, |
| 287 accumulator, count); | 289 accumulator, count); |
| 288 | |
| 289 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, | 290 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, |
| 290 predictor + 256, mb_uv_height, strength, | 291 predictor + 256, |
| 292 mb_uv_width, mb_uv_height, strength, |
| 291 filter_weight, accumulator + 256, | 293 filter_weight, accumulator + 256, |
| 292 count + 256); | 294 count + 256); |
| 293 | |
| 294 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, | 295 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, |
| 295 predictor + 512, mb_uv_height, strength, | 296 predictor + 512, |
| 297 mb_uv_width, mb_uv_height, strength, |
| 296 filter_weight, accumulator + 512, | 298 filter_weight, accumulator + 512, |
| 297 count + 512); | 299 count + 512); |
| 298 } | 300 } |
| 299 } | 301 } |
| 300 | 302 |
| 301 // Normalize filter output to produce AltRef frame | 303 // Normalize filter output to produce AltRef frame |
| 302 dst1 = cpi->alt_ref_buffer.y_buffer; | 304 dst1 = cpi->alt_ref_buffer.y_buffer; |
| 303 stride = cpi->alt_ref_buffer.y_stride; | 305 stride = cpi->alt_ref_buffer.y_stride; |
| 304 byte = mb_y_offset; | 306 byte = mb_y_offset; |
| 305 for (i = 0, k = 0; i < 16; i++) { | 307 for (i = 0, k = 0; i < 16; i++) { |
| 306 for (j = 0; j < 16; j++, k++) { | 308 for (j = 0; j < 16; j++, k++) { |
| 307 unsigned int pval = accumulator[k] + (count[k] >> 1); | 309 unsigned int pval = accumulator[k] + (count[k] >> 1); |
| 308 pval *= fixed_divide[count[k]]; | 310 pval *= fixed_divide[count[k]]; |
| 309 pval >>= 19; | 311 pval >>= 19; |
| 310 | 312 |
| 311 dst1[byte] = (uint8_t)pval; | 313 dst1[byte] = (uint8_t)pval; |
| 312 | 314 |
| 313 // move to next pixel | 315 // move to next pixel |
| 314 byte++; | 316 byte++; |
| 315 } | 317 } |
| 316 byte += stride - 16; | 318 byte += stride - 16; |
| 317 } | 319 } |
| 318 | 320 |
| 319 dst1 = cpi->alt_ref_buffer.u_buffer; | 321 dst1 = cpi->alt_ref_buffer.u_buffer; |
| 320 dst2 = cpi->alt_ref_buffer.v_buffer; | 322 dst2 = cpi->alt_ref_buffer.v_buffer; |
| 321 stride = cpi->alt_ref_buffer.uv_stride; | 323 stride = cpi->alt_ref_buffer.uv_stride; |
| 322 byte = mb_uv_offset; | 324 byte = mb_uv_offset; |
| 323 for (i = 0, k = 256; i < mb_uv_height; i++) { | 325 for (i = 0, k = 256; i < mb_uv_height; i++) { |
| 324 for (j = 0; j < mb_uv_height; j++, k++) { | 326 for (j = 0; j < mb_uv_width; j++, k++) { |
| 325 int m = k + 256; | 327 int m = k + 256; |
| 326 | 328 |
| 327 // U | 329 // U |
| 328 unsigned int pval = accumulator[k] + (count[k] >> 1); | 330 unsigned int pval = accumulator[k] + (count[k] >> 1); |
| 329 pval *= fixed_divide[count[k]]; | 331 pval *= fixed_divide[count[k]]; |
| 330 pval >>= 19; | 332 pval >>= 19; |
| 331 dst1[byte] = (uint8_t)pval; | 333 dst1[byte] = (uint8_t)pval; |
| 332 | 334 |
| 333 // V | 335 // V |
| 334 pval = accumulator[m] + (count[m] >> 1); | 336 pval = accumulator[m] + (count[m] >> 1); |
| 335 pval *= fixed_divide[count[m]]; | 337 pval *= fixed_divide[count[m]]; |
| 336 pval >>= 19; | 338 pval >>= 19; |
| 337 dst2[byte] = (uint8_t)pval; | 339 dst2[byte] = (uint8_t)pval; |
| 338 | 340 |
| 339 // move to next pixel | 341 // move to next pixel |
| 340 byte++; | 342 byte++; |
| 341 } | 343 } |
| 342 byte += stride - mb_uv_height; | 344 byte += stride - mb_uv_width; |
| 343 } | 345 } |
| 344 mb_y_offset += 16; | 346 mb_y_offset += 16; |
| 345 mb_uv_offset += mb_uv_height; | 347 mb_uv_offset += mb_uv_width; |
| 346 } | 348 } |
| 347 mb_y_offset += 16 * (f->y_stride - mb_cols); | 349 mb_y_offset += 16 * (f->y_stride - mb_cols); |
| 348 mb_uv_offset += mb_uv_height * (f->uv_stride - mb_cols); | 350 mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols; |
| 349 } | 351 } |
| 350 | 352 |
| 351 // Restore input state | 353 // Restore input state |
| 352 for (i = 0; i < MAX_MB_PLANE; i++) | 354 for (i = 0; i < MAX_MB_PLANE; i++) |
| 353 mbd->plane[i].pre[0].buf = input_buffer[i]; | 355 mbd->plane[i].pre[0].buf = input_buffer[i]; |
| 354 } | 356 } |
| 355 | 357 |
| 356 void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { | 358 void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { |
| 357 VP9_COMMON *const cm = &cpi->common; | 359 VP9_COMMON *const cm = &cpi->common; |
| 358 int frame = 0; | 360 int frame = 0; |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 513 | 515 |
| 514 // Adjust number of frames in filter and strength based on gf boost level. | 516 // Adjust number of frames in filter and strength based on gf boost level. |
| 515 if (cpi->active_arnr_frames > (group_boost / 150)) { | 517 if (cpi->active_arnr_frames > (group_boost / 150)) { |
| 516 cpi->active_arnr_frames = (group_boost / 150); | 518 cpi->active_arnr_frames = (group_boost / 150); |
| 517 cpi->active_arnr_frames += !(cpi->active_arnr_frames & 1); | 519 cpi->active_arnr_frames += !(cpi->active_arnr_frames & 1); |
| 518 } | 520 } |
| 519 if (cpi->active_arnr_strength > (group_boost / 300)) { | 521 if (cpi->active_arnr_strength > (group_boost / 300)) { |
| 520 cpi->active_arnr_strength = (group_boost / 300); | 522 cpi->active_arnr_strength = (group_boost / 300); |
| 521 } | 523 } |
| 522 } | 524 } |
| OLD | NEW |