Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(257)

Side by Side Diff: source/libvpx/vp8/encoder/pickinter.c

Issue 7671004: Update libvpx snapshot to v0.9.7-p1 (Cayuga). (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: '' Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp8/encoder/pickinter.h ('k') | source/libvpx/vp8/encoder/picklpf.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 24 matching lines...) Expand all
35 35
36 extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd); 36 extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
37 37
38 #ifdef SPEEDSTATS 38 #ifdef SPEEDSTATS
39 extern unsigned int cnt_pm; 39 extern unsigned int cnt_pm;
40 #endif 40 #endif
41 41
42 extern const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES]; 42 extern const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES];
43 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES]; 43 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
44 44
45
46 extern unsigned int (*vp8_get16x16pred_error)(unsigned char *src_ptr, int src_st ride, unsigned char *ref_ptr, int ref_stride);
47 extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stri de, unsigned char *ref_ptr, int recon_stride); 45 extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stri de, unsigned char *ref_ptr, int recon_stride);
48 extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes t_ref_mv, int best_rd, int *, int *, int *, int, int *mvcost[2], int, int fullpi xel);
49 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]); 46 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
50 extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv) ;
51 47
52 48
53 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2] ) 49 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
50 int_mv *bestmv, int_mv *ref_mv,
51 int error_per_bit,
52 const vp8_variance_fn_ptr_t *vfp,
53 int *mvcost[2], int *distortion,
54 unsigned int *sse)
54 { 55 {
55 (void) b; 56 (void) b;
56 (void) d; 57 (void) d;
57 (void) ref_mv; 58 (void) ref_mv;
58 (void) error_per_bit; 59 (void) error_per_bit;
59 (void) vfp; 60 (void) vfp;
60 (void) mvcost; 61 (void) mvcost;
61 bestmv->row <<= 3; 62 (void) distortion;
62 bestmv->col <<= 3; 63 (void) sse;
64 bestmv->as_mv.row <<= 3;
65 bestmv->as_mv.col <<= 3;
63 return 0; 66 return 0;
64 } 67 }
65 68
66 69
67 static int get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *v fp, unsigned int *sse) 70 static int get_inter_mbpred_error(MACROBLOCK *mb,
71 const vp8_variance_fn_ptr_t *vfp,
72 unsigned int *sse,
73 int_mv this_mv)
68 { 74 {
69 75
70 BLOCK *b = &mb->block[0]; 76 BLOCK *b = &mb->block[0];
71 BLOCKD *d = &mb->e_mbd.block[0]; 77 BLOCKD *d = &mb->e_mbd.block[0];
72 unsigned char *what = (*(b->base_src) + b->src); 78 unsigned char *what = (*(b->base_src) + b->src);
73 int what_stride = b->src_stride; 79 int what_stride = b->src_stride;
74 unsigned char *in_what = *(d->base_pre) + d->pre ; 80 unsigned char *in_what = *(d->base_pre) + d->pre ;
75 int in_what_stride = d->pre_stride; 81 int in_what_stride = d->pre_stride;
76 int xoffset = d->bmi.mv.as_mv.col & 7; 82 int xoffset = this_mv.as_mv.col & 7;
77 int yoffset = d->bmi.mv.as_mv.row & 7; 83 int yoffset = this_mv.as_mv.row & 7;
78 84
79 in_what += (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); 85 in_what += (this_mv.as_mv.row >> 3) * d->pre_stride + (this_mv.as_mv.col >> 3);
80 86
81 if (xoffset | yoffset) 87 if (xoffset | yoffset)
82 { 88 {
83 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what, what_st ride, sse); 89 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what, what_st ride, sse);
84 } 90 }
85 else 91 else
86 { 92 {
87 return vfp->vf(what, what_stride, in_what, in_what_stride, sse); 93 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
88 } 94 }
89 95
90 } 96 }
91 97
92 unsigned int vp8_get16x16pred_error_c
93 (
94 const unsigned char *src_ptr,
95 int src_stride,
96 const unsigned char *ref_ptr,
97 int ref_stride,
98 int max_sad
99 )
100 {
101 unsigned pred_error = 0;
102 int i, j;
103 int sum = 0;
104
105 for (i = 0; i < 16; i++)
106 {
107 int diff;
108
109 for (j = 0; j < 16; j++)
110 {
111 diff = src_ptr[j] - ref_ptr[j];
112 sum += diff;
113 pred_error += diff * diff;
114 }
115
116 src_ptr += src_stride;
117 ref_ptr += ref_stride;
118 }
119
120 pred_error -= sum * sum / 256;
121 return pred_error;
122 }
123
124 98
125 unsigned int vp8_get4x4sse_cs_c 99 unsigned int vp8_get4x4sse_cs_c
126 ( 100 (
127 const unsigned char *src_ptr, 101 const unsigned char *src_ptr,
128 int source_stride, 102 int source_stride,
129 const unsigned char *ref_ptr, 103 const unsigned char *ref_ptr,
130 int recon_stride, 104 int recon_stride
131 int max_sad
132 ) 105 )
133 { 106 {
134 int distortion = 0; 107 int distortion = 0;
135 int r, c; 108 int r, c;
136 109
137 for (r = 0; r < 4; r++) 110 for (r = 0; r < 4; r++)
138 { 111 {
139 for (c = 0; c < 4; c++) 112 for (c = 0; c < 4; c++)
140 { 113 {
141 int diff = src_ptr[c] - ref_ptr[c]; 114 int diff = src_ptr[c] - ref_ptr[c];
142 distortion += diff * diff; 115 distortion += diff * diff;
143 } 116 }
144 117
145 src_ptr += source_stride; 118 src_ptr += source_stride;
146 ref_ptr += recon_stride; 119 ref_ptr += recon_stride;
147 } 120 }
148 121
149 return distortion; 122 return distortion;
150 } 123 }
151 124
152 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vt able_t *rtcd) 125 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vt able_t *rtcd)
153 { 126 {
154 unsigned char *sptr; 127 unsigned char *sptr;
155 unsigned char *dptr; 128 unsigned char *dptr;
156 sptr = (*(be->base_src) + be->src); 129 sptr = (*(be->base_src) + be->src);
157 dptr = b->predictor; 130 dptr = b->predictor;
158 131
159 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16, 0 x7fffffff); 132 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16);
160 133
161 } 134 }
162 135
163 static int pick_intra4x4block( 136 static int pick_intra4x4block(
164 const VP8_ENCODER_RTCD *rtcd, 137 const VP8_ENCODER_RTCD *rtcd,
165 MACROBLOCK *x, 138 MACROBLOCK *x,
166 BLOCK *be, 139 int ib,
167 BLOCKD *b,
168 B_PREDICTION_MODE *best_mode, 140 B_PREDICTION_MODE *best_mode,
169 B_PREDICTION_MODE above, 141 unsigned int *mode_costs,
170 B_PREDICTION_MODE left,
171 142
172 int *bestrate, 143 int *bestrate,
173 int *bestdistortion) 144 int *bestdistortion)
174 { 145 {
146
147 BLOCKD *b = &x->e_mbd.block[ib];
148 BLOCK *be = &x->block[ib];
175 B_PREDICTION_MODE mode; 149 B_PREDICTION_MODE mode;
176 int best_rd = INT_MAX; // 1<<30 150 int best_rd = INT_MAX; // 1<<30
177 int rate; 151 int rate;
178 int distortion; 152 int distortion;
179 unsigned int *mode_costs;
180
181 if (x->e_mbd.frame_type == KEY_FRAME)
182 {
183 mode_costs = x->bmode_costs[above][left];
184 }
185 else
186 {
187 mode_costs = x->inter_bmode_costs;
188 }
189 153
190 for (mode = B_DC_PRED; mode <= B_HE_PRED /*B_HU_PRED*/; mode++) 154 for (mode = B_DC_PRED; mode <= B_HE_PRED /*B_HU_PRED*/; mode++)
191 { 155 {
192 int this_rd; 156 int this_rd;
193 157
194 rate = mode_costs[mode]; 158 rate = mode_costs[mode];
195 vp8_predict_intra4x4(b, mode, b->predictor); 159 RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
160 (b, mode, b->predictor);
196 distortion = get_prediction_error(be, b, &rtcd->variance); 161 distortion = get_prediction_error(be, b, &rtcd->variance);
197 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate, distortion); 162 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
198 163
199 if (this_rd < best_rd) 164 if (this_rd < best_rd)
200 { 165 {
201 *bestrate = rate; 166 *bestrate = rate;
202 *bestdistortion = distortion; 167 *bestdistortion = distortion;
203 best_rd = this_rd; 168 best_rd = this_rd;
204 *best_mode = mode; 169 *best_mode = mode;
205 } 170 }
206 } 171 }
207 172
208 b->bmi.mode = (B_PREDICTION_MODE)(*best_mode); 173 b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
209 vp8_encode_intra4x4block(rtcd, x, be, b, b->bmi.mode); 174 vp8_encode_intra4x4block(rtcd, x, ib);
210
211 return best_rd; 175 return best_rd;
212 } 176 }
213 177
214 178
215 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb, int *Rate, int *best_dist) 179 static int pick_intra4x4mby_modes
180 (
181 const VP8_ENCODER_RTCD *rtcd,
182 MACROBLOCK *mb,
183 int *Rate,
184 int *best_dist
185 )
216 { 186 {
217 MACROBLOCKD *const xd = &mb->e_mbd; 187 MACROBLOCKD *const xd = &mb->e_mbd;
218 int i; 188 int i;
219 int cost = mb->mbmode_cost [xd->frame_type] [B_PRED]; 189 int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
220 int error; 190 int error;
221 int distortion = 0; 191 int distortion = 0;
192 unsigned int *bmode_costs;
222 193
223 vp8_intra_prediction_down_copy(xd); 194 vp8_intra_prediction_down_copy(xd);
224 195
196 bmode_costs = mb->inter_bmode_costs;
197
225 for (i = 0; i < 16; i++) 198 for (i = 0; i < 16; i++)
226 { 199 {
227 MODE_INFO *const mic = xd->mode_info_context; 200 MODE_INFO *const mic = xd->mode_info_context;
228 const int mis = xd->mode_info_stride; 201 const int mis = xd->mode_info_stride;
229 const B_PREDICTION_MODE A = vp8_above_bmi(mic, i, mis)->mode; 202
230 const B_PREDICTION_MODE L = vp8_left_bmi(mic, i)->mode;
231 B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); 203 B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
232 int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d); 204 int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d);
233 205
234 pick_intra4x4block(rtcd, mb, mb->block + i, xd->block + i, 206 if (mb->e_mbd.frame_type == KEY_FRAME)
235 &best_mode, A, L, &r, &d); 207 {
208 const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
209 const B_PREDICTION_MODE L = left_block_mode(mic, i);
210
211 bmode_costs = mb->bmode_costs[A][L];
212 }
213
214
215 pick_intra4x4block(rtcd, mb, i, &best_mode, bmode_costs, &r, &d);
236 216
237 cost += r; 217 cost += r;
238 distortion += d; 218 distortion += d;
219 mic->bmi[i].as_mode = best_mode;
239 220
240 mic->bmi[i].mode = xd->block[i].bmi.mode = best_mode; 221 // Break out case where we have already exceeded best so far value
241 222 // that was passed in
242 // Break out case where we have already exceeded best so far value that was bassed in
243 if (distortion > *best_dist) 223 if (distortion > *best_dist)
244 break; 224 break;
245 } 225 }
246 226
247 for (i = 0; i < 16; i++)
248 xd->block[i].bmi.mv.as_int = 0;
249
250 *Rate = cost; 227 *Rate = cost;
251 228
252 if (i == 16) 229 if (i == 16)
253 { 230 {
254 *best_dist = distortion; 231 *best_dist = distortion;
255 error = RD_ESTIMATE(mb->rdmult, mb->rddiv, cost, distortion); 232 error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
256 } 233 }
257 else 234 else
258 { 235 {
259 *best_dist = INT_MAX; 236 *best_dist = INT_MAX;
260 error = INT_MAX; 237 error = INT_MAX;
261 } 238 }
262 239
263 return error; 240 return error;
264 } 241 }
265 242
266 int vp8_pick_intra_mbuv_mode(MACROBLOCK *mb) 243 static void pick_intra_mbuv_mode(MACROBLOCK *mb)
267 { 244 {
268 245
269 MACROBLOCKD *x = &mb->e_mbd; 246 MACROBLOCKD *x = &mb->e_mbd;
270 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride; 247 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
271 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride; 248 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
272 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src); 249 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
273 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src); 250 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
274 int uvsrc_stride = mb->block[16].src_stride; 251 int uvsrc_stride = mb->block[16].src_stride;
275 unsigned char uleft_col[8]; 252 unsigned char uleft_col[8];
276 unsigned char vleft_col[8]; 253 unsigned char vleft_col[8];
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
401 { 378 {
402 if (best_error > pred_error[i]) 379 if (best_error > pred_error[i])
403 { 380 {
404 best_error = pred_error[i]; 381 best_error = pred_error[i];
405 best_mode = (MB_PREDICTION_MODE)i; 382 best_mode = (MB_PREDICTION_MODE)i;
406 } 383 }
407 } 384 }
408 385
409 386
410 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode; 387 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
411 return best_error;
412 388
413 } 389 }
414 390
415 int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec on_uvoffset, int *returnrate, int *returndistortion, int *returnintra) 391 static void update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
392 {
393 /* Split MV modes currently not supported when RD is nopt enabled,
394 * therefore, only need to modify MVcount in NEWMV mode. */
395 if (xd->mode_info_context->mbmi.mode == NEWMV)
396 {
397 cpi->MVcount[0][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.row -
398 best_ref_mv->as_mv.row) >> 1)]++;
399 cpi->MVcount[1][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.col -
400 best_ref_mv->as_mv.col) >> 1)]++;
401 }
402 }
403
404 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
405 int recon_uvoffset, int *returnrate,
406 int *returndistortion, int *returnintra)
416 { 407 {
417 BLOCK *b = &x->block[0]; 408 BLOCK *b = &x->block[0];
418 BLOCKD *d = &x->e_mbd.block[0]; 409 BLOCKD *d = &x->e_mbd.block[0];
419 MACROBLOCKD *xd = &x->e_mbd; 410 MACROBLOCKD *xd = &x->e_mbd;
420 B_MODE_INFO best_bmodes[16];
421 MB_MODE_INFO best_mbmode; 411 MB_MODE_INFO best_mbmode;
422 PARTITION_INFO best_partition; 412
423 MV best_ref_mv; 413 int_mv best_ref_mv;
424 MV mode_mv[MB_MODE_COUNT]; 414 int_mv mode_mv[MB_MODE_COUNT];
425 MB_PREDICTION_MODE this_mode; 415 MB_PREDICTION_MODE this_mode;
426 int num00; 416 int num00;
427 int i;
428 int mdcounts[4]; 417 int mdcounts[4];
429 int best_rd = INT_MAX; // 1 << 30; 418 int best_rd = INT_MAX; // 1 << 30;
430 int best_intra_rd = INT_MAX; 419 int best_intra_rd = INT_MAX;
431 int mode_index; 420 int mode_index;
432 int ref_frame_cost[MAX_REF_FRAMES];
433 int rate; 421 int rate;
434 int rate2; 422 int rate2;
435 int distortion2; 423 int distortion2;
436 int bestsme; 424 int bestsme;
437 //int all_rds[MAX_MODES]; // Experimental debug code. 425 //int all_rds[MAX_MODES]; // Experimental debug code.
438 int best_mode_index = 0; 426 int best_mode_index = 0;
439 int sse = INT_MAX; 427 unsigned int sse = INT_MAX, best_sse = INT_MAX;
440 428
441 MV mvp; 429 int_mv mvp;
442 int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7}; 430 int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
443 int saddone=0; 431 int saddone=0;
444 int sr=0; //search range got from mv_pred(). It uses step_param levels. ( 0-7) 432 int sr=0; //search range got from mv_pred(). It uses step_param levels. ( 0-7)
445 433
446 MV nearest_mv[4]; 434 int_mv nearest_mv[4];
447 MV near_mv[4]; 435 int_mv near_mv[4];
448 MV frame_best_ref_mv[4]; 436 int_mv frame_best_ref_mv[4];
449 int MDCounts[4][4]; 437 int MDCounts[4][4];
450 unsigned char *y_buffer[4]; 438 unsigned char *y_buffer[4];
451 unsigned char *u_buffer[4]; 439 unsigned char *u_buffer[4];
452 unsigned char *v_buffer[4]; 440 unsigned char *v_buffer[4];
453 441
454 int skip_mode[4] = {0, 0, 0, 0}; 442 int skip_mode[4] = {0, 0, 0, 0};
455 443
444 int have_subp_search = cpi->sf.half_pixel_search; /* In real-time mode, whe n Speed >= 15, no sub-pixel search. */
445
456 vpx_memset(mode_mv, 0, sizeof(mode_mv)); 446 vpx_memset(mode_mv, 0, sizeof(mode_mv));
457 vpx_memset(nearest_mv, 0, sizeof(nearest_mv)); 447 vpx_memset(nearest_mv, 0, sizeof(nearest_mv));
458 vpx_memset(near_mv, 0, sizeof(near_mv)); 448 vpx_memset(near_mv, 0, sizeof(near_mv));
459 vpx_memset(&best_mbmode, 0, sizeof(best_mbmode)); 449 vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
460 450
461 451
462 // set up all the refframe dependent pointers. 452 // set up all the refframe dependent pointers.
463 if (cpi->ref_frame_flags & VP8_LAST_FLAG) 453 if (cpi->ref_frame_flags & VP8_LAST_FLAG)
464 { 454 {
465 YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_i dx]; 455 YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_i dx];
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
497 487
498 y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset; 488 y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset;
499 u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset; 489 u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset;
500 v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset; 490 v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset;
501 } 491 }
502 else 492 else
503 skip_mode[ALTREF_FRAME] = 1; 493 skip_mode[ALTREF_FRAME] = 1;
504 494
505 cpi->mbs_tested_so_far++; // Count of the number of MBs tested so f ar this frame 495 cpi->mbs_tested_so_far++; // Count of the number of MBs tested so f ar this frame
506 496
507 *returnintra = best_intra_rd; 497 *returnintra = INT_MAX;
508 x->skip = 0; 498 x->skip = 0;
509 499
510 ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
511
512 // Special case treatment when GF and ARF are not sensible options for refer ence
513 if (cpi->ref_frame_flags == VP8_LAST_FLAG)
514 {
515 ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
516 + vp8_cost_zero(255);
517 ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
518 + vp8_cost_one(255)
519 + vp8_cost_zero(128);
520 ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
521 + vp8_cost_one(255)
522 + vp8_cost_one(128);
523 }
524 else
525 {
526 ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
527 + vp8_cost_zero(cpi->prob_last_coded);
528 ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
529 + vp8_cost_one(cpi->prob_last_coded)
530 + vp8_cost_zero(cpi->prob_gf_coded);
531 ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
532 + vp8_cost_one(cpi->prob_last_coded)
533 + vp8_cost_one(cpi->prob_gf_coded);
534 }
535
536 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; 500 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
537 501
538 // if we encode a new mv this is important 502 // if we encode a new mv this is important
539 // find the best new motion vector 503 // find the best new motion vector
540 for (mode_index = 0; mode_index < MAX_MODES; mode_index++) 504 for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
541 { 505 {
542 int frame_cost; 506 int frame_cost;
543 int this_rd = INT_MAX; 507 int this_rd = INT_MAX;
544 508
545 if (best_rd <= cpi->rd_threshes[mode_index]) 509 if (best_rd <= cpi->rd_threshes[mode_index])
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
577 541
578 this_mode = vp8_mode_order[mode_index]; 542 this_mode = vp8_mode_order[mode_index];
579 543
580 // Experimental debug code. 544 // Experimental debug code.
581 //all_rds[mode_index] = -1; 545 //all_rds[mode_index] = -1;
582 546
583 x->e_mbd.mode_info_context->mbmi.mode = this_mode; 547 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
584 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; 548 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
585 549
586 // Work out the cost assosciated with selecting the reference frame 550 // Work out the cost assosciated with selecting the reference frame
587 frame_cost = ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; 551 frame_cost =
552 x->e_mbd.ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
588 rate2 += frame_cost; 553 rate2 += frame_cost;
589 554
590 // everything but intra 555 // everything but intra
591 if (x->e_mbd.mode_info_context->mbmi.ref_frame) 556 if (x->e_mbd.mode_info_context->mbmi.ref_frame)
592 { 557 {
593 x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame]; 558 x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame];
594 x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame]; 559 x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame];
595 x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame]; 560 x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.re f_frame];
596 mode_mv[NEARESTMV] = nearest_mv[x->e_mbd.mode_info_context->mbmi.ref _frame]; 561 mode_mv[NEARESTMV] = nearest_mv[x->e_mbd.mode_info_context->mbmi.ref _frame];
597 mode_mv[NEARMV] = near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame ]; 562 mode_mv[NEARMV] = near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame ];
598 best_ref_mv = frame_best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref _frame]; 563 best_ref_mv = frame_best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref _frame];
599 memcpy(mdcounts, MDCounts[x->e_mbd.mode_info_context->mbmi.ref_frame ], sizeof(mdcounts)); 564 memcpy(mdcounts, MDCounts[x->e_mbd.mode_info_context->mbmi.ref_frame ], sizeof(mdcounts));
600 } 565 }
601 566
602 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, 567 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
603 // unless ARNR filtering is enabled in which case we want 568 // unless ARNR filtering is enabled in which case we want
604 // an unfiltered alternative 569 // an unfiltered alternative
605 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) 570 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
606 { 571 {
607 if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_fram e != ALTREF_FRAME) 572 if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_fram e != ALTREF_FRAME)
608 continue; 573 continue;
609 } 574 }
610 575
611 if(cpi->sf.improved_mv_pred && x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
612 {
613 if(!saddone)
614 {
615 vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] );
616 saddone = 1;
617 }
618
619 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
620 x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common. ref_frame_sign_bias, &sr, &near_sadidx[0]);
621
622 /* adjust mvp to make sure it is within MV range */
623 if(mvp.row > best_ref_mv.row + MAX_FULL_PEL_VAL)
624 mvp.row = best_ref_mv.row + MAX_FULL_PEL_VAL;
625 else if(mvp.row < best_ref_mv.row - MAX_FULL_PEL_VAL)
626 mvp.row = best_ref_mv.row - MAX_FULL_PEL_VAL;
627 if(mvp.col > best_ref_mv.col + MAX_FULL_PEL_VAL)
628 mvp.col = best_ref_mv.col + MAX_FULL_PEL_VAL;
629 else if(mvp.col < best_ref_mv.col - MAX_FULL_PEL_VAL)
630 mvp.col = best_ref_mv.col - MAX_FULL_PEL_VAL;
631 }
632
633 switch (this_mode) 576 switch (this_mode)
634 { 577 {
635 case B_PRED: 578 case B_PRED:
636 distortion2 = *returndistortion; // Best so far p assed in as breakout value to vp8_pick_intra4x4mby_modes 579 // Pass best so far to pick_intra4x4mby_modes to use as breakout
637 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate, &distortio n2); 580 distortion2 = best_sse;
638 rate2 += rate; 581 pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate, &distortion2);
639 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror )(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
640 582
641 if (distortion2 == INT_MAX) 583 if (distortion2 == INT_MAX)
642 { 584 {
643 this_rd = INT_MAX; 585 this_rd = INT_MAX;
644 } 586 }
645 else 587 else
646 { 588 {
647 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2); 589 rate2 += rate;
590 distortion2 = VARIANCE_INVOKE
591 (&cpi->rtcd.variance, var16x16)(
592 *(b->base_src), b->src_stride,
593 x->e_mbd.predictor, 16, &sse);
594 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
648 595
649 if (this_rd < best_intra_rd) 596 if (this_rd < best_intra_rd)
650 { 597 {
651 best_intra_rd = this_rd; 598 best_intra_rd = this_rd;
652 *returnintra = best_intra_rd ; 599 *returnintra = distortion2;
653 } 600 }
654 } 601 }
655 602
656 break; 603 break;
657 604
658 case SPLITMV: 605 case SPLITMV:
659 606
660 // Split MV modes currently not supported when RD is nopt enabled. 607 // Split MV modes currently not supported when RD is nopt enabled.
661 break; 608 break;
662 609
663 case DC_PRED: 610 case DC_PRED:
664 case V_PRED: 611 case V_PRED:
665 case H_PRED: 612 case H_PRED:
666 case TM_PRED: 613 case TM_PRED:
667 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby) 614 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
668 (&x->e_mbd); 615 (&x->e_mbd);
669 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror )(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff); 616 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
617 (*(b->base_src), b->src_stride,
618 x->e_mbd.predictor, 16, &sse);
670 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_cont ext->mbmi.mode]; 619 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_cont ext->mbmi.mode];
671 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2); 620 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
672 621
673 if (this_rd < best_intra_rd) 622 if (this_rd < best_intra_rd)
674 { 623 {
675 best_intra_rd = this_rd; 624 best_intra_rd = this_rd;
676 *returnintra = best_intra_rd ; 625 *returnintra = distortion2;
677 } 626 }
678
679 break; 627 break;
680 628
681 case NEWMV: 629 case NEWMV:
682 { 630 {
683 int thissme; 631 int thissme;
684 int step_param; 632 int step_param;
685 int further_steps; 633 int further_steps;
686 int n = 0; 634 int n = 0;
687 int sadpb = x->sadperbit16; 635 int sadpb = x->sadperbit16;
636 int_mv mvp_full;
688 637
689 int col_min; 638 int col_min = (best_ref_mv.as_mv.col>>3) - MAX_FULL_PEL_VAL + ((best _ref_mv.as_mv.col & 7)?1:0);
690 int col_max; 639 int row_min = (best_ref_mv.as_mv.row>>3) - MAX_FULL_PEL_VAL + ((best _ref_mv.as_mv.row & 7)?1:0);
691 int row_min; 640 int col_max = (best_ref_mv.as_mv.col>>3) + MAX_FULL_PEL_VAL;
692 int row_max; 641 int row_max = (best_ref_mv.as_mv.row>>3) + MAX_FULL_PEL_VAL;
693 642
694 int tmp_col_min = x->mv_col_min; 643 int tmp_col_min = x->mv_col_min;
695 int tmp_col_max = x->mv_col_max; 644 int tmp_col_max = x->mv_col_max;
696 int tmp_row_min = x->mv_row_min; 645 int tmp_row_min = x->mv_row_min;
697 int tmp_row_max = x->mv_row_max; 646 int tmp_row_max = x->mv_row_max;
698 647
699 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1 ; 648 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1 ;
700 649
701 // Further step/diamond searches as necessary 650 // Further step/diamond searches as necessary
702 step_param = cpi->sf.first_step + speed_adjust; 651 step_param = cpi->sf.first_step + speed_adjust;
703 652
704 if(cpi->sf.improved_mv_pred) 653 if(cpi->sf.improved_mv_pred)
705 { 654 {
655 if(!saddone)
656 {
657 vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] );
658 saddone = 1;
659 }
660
661 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
662 x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->com mon.ref_frame_sign_bias, &sr, &near_sadidx[0]);
663
706 sr += speed_adjust; 664 sr += speed_adjust;
707 //adjust search range according to sr from mv prediction 665 //adjust search range according to sr from mv prediction
708 if(sr > step_param) 666 if(sr > step_param)
709 step_param = sr; 667 step_param = sr;
710 668
711 col_min = (best_ref_mv.col - MAX_FULL_PEL_VAL) >>3; 669 mvp_full.as_mv.col = mvp.as_mv.col>>3;
712 col_max = (best_ref_mv.col + MAX_FULL_PEL_VAL) >>3; 670 mvp_full.as_mv.row = mvp.as_mv.row>>3;
713 row_min = (best_ref_mv.row - MAX_FULL_PEL_VAL) >>3;
714 row_max = (best_ref_mv.row + MAX_FULL_PEL_VAL) >>3;
715 671
716 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
717 if (x->mv_col_min < col_min )
718 x->mv_col_min = col_min;
719 if (x->mv_col_max > col_max )
720 x->mv_col_max = col_max;
721 if (x->mv_row_min < row_min )
722 x->mv_row_min = row_min;
723 if (x->mv_row_max > row_max )
724 x->mv_row_max = row_max;
725 }else 672 }else
726 { 673 {
727 mvp.row = best_ref_mv.row; 674 mvp.as_int = best_ref_mv.as_int;
728 mvp.col = best_ref_mv.col; 675 mvp_full.as_mv.col = best_ref_mv.as_mv.col>>3;
676 mvp_full.as_mv.row = best_ref_mv.as_mv.row>>3;
729 } 677 }
730 678
679 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
680 if (x->mv_col_min < col_min )
681 x->mv_col_min = col_min;
682 if (x->mv_col_max > col_max )
683 x->mv_col_max = col_max;
684 if (x->mv_row_min < row_min )
685 x->mv_row_min = row_min;
686 if (x->mv_row_max > row_max )
687 x->mv_row_max = row_max;
688
731 further_steps = (cpi->Speed >= 8)? 0: (cpi->sf.max_step_search_steps - 1 - step_param); 689 further_steps = (cpi->Speed >= 8)? 0: (cpi->sf.max_step_search_steps - 1 - step_param);
732 690
733 if (cpi->sf.search_method == HEX) 691 if (cpi->sf.search_method == HEX)
734 { 692 {
735 bestsme = vp8_hex_search(x, b, d, &mvp, &d->bmi.mv.as_mv, step_p aram, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv); 693 bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_pa ram,
736 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; 694 sadpb, &cpi->fn_ptr[BLOCK_16X16],
737 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col; 695 x->mvsadcost, x->mvcost, &best_ref_mv);
696 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
738 } 697 }
739 else 698 else
740 { 699 {
741 bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_m v, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x ->mvsadcost, x->mvcost, &best_ref_mv); //sadpb < 9 700 bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv ,
742 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; 701 step_param, sadpb, &num00,
743 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col; 702 &cpi->fn_ptr[BLOCK_16X16],
703 x->mvcost, &best_ref_mv);
704 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
744 705
745 // Further step/diamond searches as necessary 706 // Further step/diamond searches as necessary
746 n = 0; 707 n = 0;
747 //further_steps = (cpi->sf.max_step_search_steps - 1) - step_par am; 708 //further_steps = (cpi->sf.max_step_search_steps - 1) - step_par am;
748 709
749 n = num00; 710 n = num00;
750 num00 = 0; 711 num00 = 0;
751 712
752 while (n < further_steps) 713 while (n < further_steps)
753 { 714 {
754 n++; 715 n++;
755 716
756 if (num00) 717 if (num00)
757 num00--; 718 num00--;
758 else 719 else
759 { 720 {
760 thissme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi .mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLO CK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv); //sadpb = 9 721 thissme =
761 722 cpi->diamond_search_sad(x, b, d, &mvp_full,
723 &d->bmi.mv,
724 step_param + n,
725 sadpb, &num00,
726 &cpi->fn_ptr[BLOCK_16X16],
727 x->mvcost, &best_ref_mv);
762 if (thissme < bestsme) 728 if (thissme < bestsme)
763 { 729 {
764 bestsme = thissme; 730 bestsme = thissme;
765 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; 731 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
766 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
767 } 732 }
768 else 733 else
769 { 734 {
770 d->bmi.mv.as_mv.row = mode_mv[NEWMV].row; 735 d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
771 d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
772 } 736 }
773 } 737 }
774 } 738 }
775 } 739 }
776 740
777 if(cpi->sf.improved_mv_pred) 741 x->mv_col_min = tmp_col_min;
778 { 742 x->mv_col_max = tmp_col_max;
779 x->mv_col_min = tmp_col_min; 743 x->mv_row_min = tmp_row_min;
780 x->mv_col_max = tmp_col_max; 744 x->mv_row_max = tmp_row_max;
781 x->mv_row_min = tmp_row_min;
782 x->mv_row_max = tmp_row_max;
783 }
784 745
785 if (bestsme < INT_MAX) 746 if (bestsme < INT_MAX)
786 cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_re f_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost); 747 cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
748 x->errorperbit,
749 &cpi->fn_ptr[BLOCK_16X16],
750 cpi->mb.mvcost,
751 &distortion2,&sse);
787 752
788 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; 753 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
789 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
790 754
791 // mv cost; 755 // mv cost;
792 rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvco st, 128); 756 rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvco st, 128);
793 } 757 }
794 758
795 case NEARESTMV: 759 case NEARESTMV:
796 case NEARMV: 760 case NEARMV:
797 761
798 if (mode_mv[this_mode].row == 0 && mode_mv[this_mode].col == 0) 762 if (mode_mv[this_mode].as_int == 0)
799 continue; 763 continue;
800 764
801 case ZEROMV: 765 case ZEROMV:
802 766
803 // Trap vectors that reach beyond the UMV borders 767 // Trap vectors that reach beyond the UMV borders
804 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops t hrough to this point 768 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops t hrough to this point
805 // because of the lack of break statements in the previous two cases . 769 // because of the lack of break statements in the previous two cases .
806 if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[thi s_mode].row >> 3) > x->mv_row_max) || 770 if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_ mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
807 ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[thi s_mode].col >> 3) > x->mv_col_max)) 771 ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_ mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
808 continue; 772 continue;
809 773
810 rate2 += vp8_cost_mv_ref(this_mode, mdcounts); 774 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
811 x->e_mbd.mode_info_context->mbmi.mode = this_mode; 775 x->e_mbd.mode_info_context->mbmi.mv.as_int =
812 x->e_mbd.mode_info_context->mbmi.mv.as_mv = mode_mv[this_mode]; 776 mode_mv[this_mode].as_int;
813 x->e_mbd.block[0].bmi.mode = this_mode;
814 x->e_mbd.block[0].bmi.mv.as_int = x->e_mbd.mode_info_context->mbmi.m v.as_int;
815 777
816 distortion2 = get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], ( unsigned int *)(&sse)); 778 if((this_mode != NEWMV) ||
779 !(have_subp_search) || cpi->common.full_pixel==1)
780 distortion2 = get_inter_mbpred_error(x,
781 &cpi->fn_ptr[BLOCK_16X16],
782 &sse, mode_mv[this_mode]);
817 783
818 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2); 784 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
819 785
820 if (cpi->active_map_enabled && x->active_ptr[0] == 0) 786 if (cpi->active_map_enabled && x->active_ptr[0] == 0)
821 { 787 {
822 x->skip = 1; 788 x->skip = 1;
823 } 789 }
824 else if (sse < x->encode_breakout) 790 else if (sse < x->encode_breakout)
825 { 791 {
826 // Check u and v to make sure skip is ok 792 // Check u and v to make sure skip is ok
827 int sse2 = 0; 793 int sse2 = 0;
828 794
(...skipping 13 matching lines...) Expand all
842 // Experimental debug code. 808 // Experimental debug code.
843 //all_rds[mode_index] = this_rd; 809 //all_rds[mode_index] = this_rd;
844 810
845 if (this_rd < best_rd || x->skip) 811 if (this_rd < best_rd || x->skip)
846 { 812 {
847 // Note index of best mode 813 // Note index of best mode
848 best_mode_index = mode_index; 814 best_mode_index = mode_index;
849 815
850 *returnrate = rate2; 816 *returnrate = rate2;
851 *returndistortion = distortion2; 817 *returndistortion = distortion2;
818 best_sse = sse;
852 best_rd = this_rd; 819 best_rd = this_rd;
853 vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(M B_MODE_INFO)); 820 vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(M B_MODE_INFO));
854 vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO ));
855
856 if (this_mode == B_PRED || this_mode == SPLITMV)
857 for (i = 0; i < 16; i++)
858 {
859 vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B _MODE_INFO));
860 }
861 else
862 {
863 best_bmodes[0].mv = x->e_mbd.block[0].bmi.mv;
864 }
865 821
866 // Testing this mode gave rise to an improvement in best error score . Lower threshold a bit for next time 822 // Testing this mode gave rise to an improvement in best error score . Lower threshold a bit for next time
867 cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] > = (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT; 823 cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] > = (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
868 cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index]; 824 cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
869 } 825 }
870 826
871 // If the mode did not help improve the best error case then raise the t hreshold for testing that mode next time around. 827 // If the mode did not help improve the best error case then raise the t hreshold for testing that mode next time around.
872 else 828 else
873 { 829 {
874 cpi->rd_thresh_mult[mode_index] += 4; 830 cpi->rd_thresh_mult[mode_index] += 4;
(...skipping 10 matching lines...) Expand all
885 841
886 // Reduce the activation RD thresholds for the best choice mode 842 // Reduce the activation RD thresholds for the best choice mode
887 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thre sh[best_mode_index] < (INT_MAX >> 2))) 843 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thre sh[best_mode_index] < (INT_MAX >> 2)))
888 { 844 {
889 int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3); 845 int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3);
890 846
891 cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_in dex] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_inde x] - best_adjustment : MIN_THRESHMULT; 847 cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_in dex] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_inde x] - best_adjustment : MIN_THRESHMULT;
892 cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_i ndex] >> 7) * cpi->rd_thresh_mult[best_mode_index]; 848 cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_i ndex] >> 7) * cpi->rd_thresh_mult[best_mode_index];
893 } 849 }
894 850
895 // Keep a record of best mode index for use in next loop
896 cpi->last_best_mode_index = best_mode_index;
897
898 if (best_mbmode.mode <= B_PRED)
899 {
900 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
901 vp8_pick_intra_mbuv_mode(x);
902 best_mbmode.uv_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
903 }
904
905 851
906 { 852 {
907 int this_rdbin = (*returndistortion >> 7); 853 int this_rdbin = (*returndistortion >> 7);
908 854
909 if (this_rdbin >= 1024) 855 if (this_rdbin >= 1024)
910 { 856 {
911 this_rdbin = 1023; 857 this_rdbin = 1023;
912 } 858 }
913 859
914 cpi->error_bins[this_rdbin] ++; 860 cpi->error_bins[this_rdbin] ++;
915 } 861 }
916 862
863 if (cpi->is_src_frame_alt_ref &&
864 (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME))
865 {
866 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
867 x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
868 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
869 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
870 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
871 (cpi->common.mb_no_coeff_skip) ? 1 : 0;
872 x->e_mbd.mode_info_context->mbmi.partitioning = 0;
917 873
918 if (cpi->is_src_frame_alt_ref && (best_mbmode.mode != ZEROMV || best_mbmode. ref_frame != ALTREF_FRAME)) 874 return;
919 {
920 best_mbmode.mode = ZEROMV;
921 best_mbmode.ref_frame = ALTREF_FRAME;
922 best_mbmode.mv.as_int = 0;
923 best_mbmode.uv_mode = 0;
924 best_mbmode.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
925 best_mbmode.partitioning = 0;
926 best_mbmode.dc_diff = 0;
927
928 vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MO DE_INFO));
929 vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
930
931 for (i = 0; i < 16; i++)
932 {
933 vpx_memset(&x->e_mbd.block[i].bmi, 0, sizeof(B_MODE_INFO));
934 }
935
936 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
937
938 return best_rd;
939 } 875 }
940 876
877 /* set to the best mb mode */
878 vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_I NFO));
941 879
942 // macroblock modes 880 if (best_mbmode.mode <= B_PRED)
943 vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_I NFO));
944 vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
945
946 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED || x->e_mbd.mode_info_co ntext->mbmi.mode == SPLITMV)
947 for (i = 0; i < 16; i++)
948 {
949 vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_IN FO));
950
951 }
952 else
953 { 881 {
954 vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_b modes[0].mv.as_mv); 882 /* set mode_info_context->mbmi.uv_mode */
883 pick_intra_mbuv_mode(x);
955 } 884 }
956 885
957 x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv; 886 update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbm i.ref_frame]);
887 }
958 888
959 return best_rd; 889
890 void vp8_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
891 {
892 int error4x4, error16x16 = INT_MAX;
893 int rate, best_rate = 0, distortion, best_sse;
894 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
895 int this_rd;
896 unsigned int sse;
897 BLOCK *b = &x->block[0];
898
899 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
900
901 pick_intra_mbuv_mode(x);
902
903 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
904 {
905 x->e_mbd.mode_info_context->mbmi.mode = mode;
906 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
907 (&x->e_mbd);
908 distortion = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
909 (*(b->base_src), b->src_stride, x->e_mbd.predictor, 16, &sse);
910 rate = x->mbmode_cost[x->e_mbd.frame_type][mode];
911 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
912
913 if (error16x16 > this_rd)
914 {
915 error16x16 = this_rd;
916 best_mode = mode;
917 best_sse = sse;
918 best_rate = rate;
919 }
920 }
921 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
922
923 error4x4 = pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate,
924 &best_sse);
925 if (error4x4 < error16x16)
926 {
927 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
928 best_rate = rate;
929 }
930
931 *rate_ = best_rate;
960 } 932 }
OLDNEW
« no previous file with comments | « source/libvpx/vp8/encoder/pickinter.h ('k') | source/libvpx/vp8/encoder/picklpf.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698