Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(39)

Side by Side Diff: source/libvpx/vp9/encoder/vp9_encodeframe.c

Issue 11555023: libvpx: Add VP9 decoder. (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_ports/config.h"
13 #include "vp9/encoder/vp9_encodeframe.h"
14 #include "vp9/encoder/vp9_encodemb.h"
15 #include "vp9/encoder/vp9_encodemv.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/encoder/vp9_onyx_int.h"
18 #include "vp9/common/vp9_extend.h"
19 #include "vp9/common/vp9_entropymode.h"
20 #include "vp9/common/vp9_quant_common.h"
21 #include "vp9/encoder/vp9_segmentation.h"
22 #include "vp9/common/vp9_setupintrarecon.h"
23 #include "vp9/common/vp9_reconintra4x4.h"
24 #include "vp9/encoder/vp9_encodeintra.h"
25 #include "vp9/common/vp9_reconinter.h"
26 #include "vp9/common/vp9_invtrans.h"
27 #include "vp9/encoder/vp9_rdopt.h"
28 #include "vp9/common/vp9_findnearmv.h"
29 #include "vp9/common/vp9_reconintra.h"
30 #include "vp9/common/vp9_seg_common.h"
31 #include "vp9/encoder/vp9_tokenize.h"
32 #include "vp9_rtcd.h"
33 #include <stdio.h>
34 #include <math.h>
35 #include <limits.h>
36 #include "vpx_ports/vpx_timer.h"
37 #include "vp9/common/vp9_pred_common.h"
38 #include "vp9/common/vp9_mvref_common.h"
39
40 #define DBG_PRNT_SEGMAP 0
41
42 // #define ENC_DEBUG
43 #ifdef ENC_DEBUG
44 int enc_debug = 0;
45 #endif
46
47 static void encode_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
48 TOKENEXTRA **t, int recon_yoffset,
49 int recon_uvoffset, int output_enabled,
50 int mb_col, int mb_row);
51
52 static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
53 TOKENEXTRA **t, int recon_yoffset,
54 int recon_uvoffset, int mb_col, int mb_row);
55
56 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
57
58 #ifdef MODE_STATS
59 unsigned int inter_y_modes[MB_MODE_COUNT];
60 unsigned int inter_uv_modes[VP9_UV_MODES];
61 unsigned int inter_b_modes[B_MODE_COUNT];
62 unsigned int y_modes[VP9_YMODES];
63 unsigned int i8x8_modes[VP9_I8X8_MODES];
64 unsigned int uv_modes[VP9_UV_MODES];
65 unsigned int uv_modes_y[VP9_YMODES][VP9_UV_MODES];
66 unsigned int b_modes[B_MODE_COUNT];
67 #endif
68
69
70 /* activity_avg must be positive, or flat regions could get a zero weight
71 * (infinite lambda), which confounds analysis.
72 * This also avoids the need for divide by zero checks in
73 * vp9_activity_masking().
74 */
75 #define VP9_ACTIVITY_AVG_MIN (64)
76
77 /* This is used as a reference when computing the source variance for the
78 * purposes of activity masking.
79 * Eventually this should be replaced by custom no-reference routines,
80 * which will be faster.
81 */
82 static const unsigned char VP9_VAR_OFFS[16] = {
83 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
84 };
85
86
87 // Original activity measure from Tim T's code.
88 static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
89 unsigned int act;
90 unsigned int sse;
91 /* TODO: This could also be done over smaller areas (8x8), but that would
92 * require extensive changes elsewhere, as lambda is assumed to be fixed
93 * over an entire MB in most of the code.
94 * Another option is to compute four 8x8 variances, and pick a single
95 * lambda using a non-linear combination (e.g., the smallest, or second
96 * smallest, etc.).
97 */
98 act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
99 &sse);
100 act = act << 4;
101
102 /* If the region is flat, lower the activity some more. */
103 if (act < 8 << 12)
104 act = act < 5 << 12 ? act : 5 << 12;
105
106 return act;
107 }
108
109 // Stub for alternative experimental activity measures.
110 static unsigned int alt_activity_measure(VP9_COMP *cpi,
111 MACROBLOCK *x, int use_dc_pred) {
112 return vp9_encode_intra(cpi, x, use_dc_pred);
113 }
114
115
116 // Measure the activity of the current macroblock
117 // What we measure here is TBD so abstracted to this function
118 #define ALT_ACT_MEASURE 1
119 static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
120 int mb_row, int mb_col) {
121 unsigned int mb_activity;
122
123 if (ALT_ACT_MEASURE) {
124 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
125
126 // Or use and alternative.
127 mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
128 } else {
129 // Original activity measure from Tim T's code.
130 mb_activity = tt_activity_measure(cpi, x);
131 }
132
133 if (mb_activity < VP9_ACTIVITY_AVG_MIN)
134 mb_activity = VP9_ACTIVITY_AVG_MIN;
135
136 return mb_activity;
137 }
138
139 // Calculate an "average" mb activity value for the frame
140 #define ACT_MEDIAN 0
141 static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
142 #if ACT_MEDIAN
143 // Find median: Simple n^2 algorithm for experimentation
144 {
145 unsigned int median;
146 unsigned int i, j;
147 unsigned int *sortlist;
148 unsigned int tmp;
149
150 // Create a list to sort to
151 CHECK_MEM_ERROR(sortlist,
152 vpx_calloc(sizeof(unsigned int),
153 cpi->common.MBs));
154
155 // Copy map to sort list
156 vpx_memcpy(sortlist, cpi->mb_activity_map,
157 sizeof(unsigned int) * cpi->common.MBs);
158
159
160 // Ripple each value down to its correct position
161 for (i = 1; i < cpi->common.MBs; i ++) {
162 for (j = i; j > 0; j --) {
163 if (sortlist[j] < sortlist[j - 1]) {
164 // Swap values
165 tmp = sortlist[j - 1];
166 sortlist[j - 1] = sortlist[j];
167 sortlist[j] = tmp;
168 } else
169 break;
170 }
171 }
172
173 // Even number MBs so estimate median as mean of two either side.
174 median = (1 + sortlist[cpi->common.MBs >> 1] +
175 sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
176
177 cpi->activity_avg = median;
178
179 vpx_free(sortlist);
180 }
181 #else
182 // Simple mean for now
183 cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
184 #endif
185
186 if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
187 cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
188
189 // Experimental code: return fixed value normalized for several clips
190 if (ALT_ACT_MEASURE)
191 cpi->activity_avg = 100000;
192 }
193
194 #define USE_ACT_INDEX 0
195 #define OUTPUT_NORM_ACT_STATS 0
196
197 #if USE_ACT_INDEX
198 // Calculate and activity index for each mb
199 static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
200 VP9_COMMON *const cm = &cpi->common;
201 int mb_row, mb_col;
202
203 int64_t act;
204 int64_t a;
205 int64_t b;
206
207 #if OUTPUT_NORM_ACT_STATS
208 FILE *f = fopen("norm_act.stt", "a");
209 fprintf(f, "\n%12d\n", cpi->activity_avg);
210 #endif
211
212 // Reset pointers to start of activity map
213 x->mb_activity_ptr = cpi->mb_activity_map;
214
215 // Calculate normalized mb activity number.
216 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
217 // for each macroblock col in image
218 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
219 // Read activity from the map
220 act = *(x->mb_activity_ptr);
221
222 // Calculate a normalized activity number
223 a = act + 4 * cpi->activity_avg;
224 b = 4 * act + cpi->activity_avg;
225
226 if (b >= a)
227 *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
228 else
229 *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
230
231 #if OUTPUT_NORM_ACT_STATS
232 fprintf(f, " %6d", *(x->mb_activity_ptr));
233 #endif
234 // Increment activity map pointers
235 x->mb_activity_ptr++;
236 }
237
238 #if OUTPUT_NORM_ACT_STATS
239 fprintf(f, "\n");
240 #endif
241
242 }
243
244 #if OUTPUT_NORM_ACT_STATS
245 fclose(f);
246 #endif
247
248 }
249 #endif
250
251 // Loop through all MBs. Note activity of each, average activity and
252 // calculate a normalized activity for each
253 static void build_activity_map(VP9_COMP *cpi) {
254 MACROBLOCK *const x = &cpi->mb;
255 MACROBLOCKD *xd = &x->e_mbd;
256 VP9_COMMON *const cm = &cpi->common;
257
258 #if ALT_ACT_MEASURE
259 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
260 int recon_yoffset;
261 int recon_y_stride = new_yv12->y_stride;
262 #endif
263
264 int mb_row, mb_col;
265 unsigned int mb_activity;
266 int64_t activity_sum = 0;
267
268 // for each macroblock row in image
269 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
270 #if ALT_ACT_MEASURE
271 // reset above block coeffs
272 xd->up_available = (mb_row != 0);
273 recon_yoffset = (mb_row * recon_y_stride * 16);
274 #endif
275 // for each macroblock col in image
276 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
277 #if ALT_ACT_MEASURE
278 xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
279 xd->left_available = (mb_col != 0);
280 recon_yoffset += 16;
281 #endif
282 #if !CONFIG_SUPERBLOCKS
283 // Copy current mb to a buffer
284 vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
285 #endif
286
287 // measure activity
288 mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
289
290 // Keep frame sum
291 activity_sum += mb_activity;
292
293 // Store MB level activity details.
294 *x->mb_activity_ptr = mb_activity;
295
296 // Increment activity map pointer
297 x->mb_activity_ptr++;
298
299 // adjust to the next column of source macroblocks
300 x->src.y_buffer += 16;
301 }
302
303
304 // adjust to the next row of mbs
305 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
306
307 #if ALT_ACT_MEASURE
308 // extend the recon for intra prediction
309 vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
310 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
311 #endif
312
313 }
314
315 // Calculate an "average" MB activity
316 calc_av_activity(cpi, activity_sum);
317
318 #if USE_ACT_INDEX
319 // Calculate an activity index number of each mb
320 calc_activity_index(cpi, x);
321 #endif
322
323 }
324
325 // Macroblock activity masking
326 void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
327 #if USE_ACT_INDEX
328 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
329 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
330 x->errorperbit += (x->errorperbit == 0);
331 #else
332 int64_t a;
333 int64_t b;
334 int64_t act = *(x->mb_activity_ptr);
335
336 // Apply the masking to the RD multiplier.
337 a = act + (2 * cpi->activity_avg);
338 b = (2 * act) + cpi->activity_avg;
339
340 x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
341 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
342 x->errorperbit += (x->errorperbit == 0);
343 #endif
344
345 // Activity based Zbin adjustment
346 adjust_act_zbin(cpi, x);
347 }
348
349 #if CONFIG_NEW_MVREF
350 static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
351 int cost;
352
353 // Encode the index for the MV reference.
354 switch (mv_ref_id) {
355 case 0:
356 cost = vp9_cost_zero(ref_id_probs[0]);
357 break;
358 case 1:
359 cost = vp9_cost_one(ref_id_probs[0]);
360 cost += vp9_cost_zero(ref_id_probs[1]);
361 break;
362 case 2:
363 cost = vp9_cost_one(ref_id_probs[0]);
364 cost += vp9_cost_one(ref_id_probs[1]);
365 cost += vp9_cost_zero(ref_id_probs[2]);
366 break;
367 case 3:
368 cost = vp9_cost_one(ref_id_probs[0]);
369 cost += vp9_cost_one(ref_id_probs[1]);
370 cost += vp9_cost_one(ref_id_probs[2]);
371 break;
372
373 // TRAP.. This should not happen
374 default:
375 assert(0);
376 break;
377 }
378 return cost;
379 }
380
381 // Estimate the cost of each coding the vector using each reference candidate
382 static unsigned int pick_best_mv_ref(MACROBLOCK *x,
383 MV_REFERENCE_FRAME ref_frame,
384 int_mv target_mv,
385 int_mv * mv_ref_list,
386 int_mv * best_ref) {
387 int i;
388 int best_index = 0;
389 int cost, cost2;
390 int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
391 MACROBLOCKD *xd = &x->e_mbd;
392 int max_mv = MV_MAX;
393
394 cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
395 vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
396 x->mvcost, 96, xd->allow_high_precision_mv);
397
398 // Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
399 for (i = 1; i < 4; ++i) {
400 // If we see a 0,0 reference vector for a second time we have reached
401 // the end of the list of valid candidate vectors.
402 if (!mv_ref_list[i].as_int) {
403 if (zero_seen)
404 break;
405 else
406 zero_seen = TRUE;
407 }
408
409 // Check for cases where the reference choice would give rise to an
410 // uncodable/out of range residual for row or col.
411 if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
412 (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
413 continue;
414 }
415
416 cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
417 vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
418 x->mvcost, 96, xd->allow_high_precision_mv);
419
420 if (cost2 < cost) {
421 cost = cost2;
422 best_index = i;
423 }
424 }
425
426 best_ref->as_int = mv_ref_list[best_index].as_int;
427
428 return best_index;
429 }
430 #endif
431
432 static void update_state(VP9_COMP *cpi, MACROBLOCK *x,
433 PICK_MODE_CONTEXT *ctx) {
434 int i;
435 MACROBLOCKD *xd = &x->e_mbd;
436 MODE_INFO *mi = &ctx->mic;
437 MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
438 int mb_mode = mi->mbmi.mode;
439 int mb_mode_index = ctx->best_mode_index;
440
441 #if CONFIG_DEBUG
442 assert(mb_mode < MB_MODE_COUNT);
443 assert(mb_mode_index < MAX_MODES);
444 assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
445 #endif
446
447 // Restore the coding context of the MB to that that was in place
448 // when the mode was picked for it
449 vpx_memcpy(xd->mode_info_context, mi, sizeof(MODE_INFO));
450 #if CONFIG_SUPERBLOCKS
451 if (mi->mbmi.encoded_as_sb) {
452 const int mis = cpi->common.mode_info_stride;
453 if (xd->mb_to_right_edge >= 0)
454 vpx_memcpy(xd->mode_info_context + 1, mi, sizeof(MODE_INFO));
455 if (xd->mb_to_bottom_edge >= 0) {
456 vpx_memcpy(xd->mode_info_context + mis, mi, sizeof(MODE_INFO));
457 if (xd->mb_to_right_edge >= 0)
458 vpx_memcpy(xd->mode_info_context + mis + 1, mi, sizeof(MODE_INFO));
459 }
460 }
461 #endif
462
463 if (mb_mode == B_PRED) {
464 for (i = 0; i < 16; i++) {
465 xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
466 assert(xd->block[i].bmi.as_mode.first < B_MODE_COUNT);
467 }
468 } else if (mb_mode == I8X8_PRED) {
469 for (i = 0; i < 16; i++) {
470 xd->block[i].bmi = xd->mode_info_context->bmi[i];
471 }
472 } else if (mb_mode == SPLITMV) {
473 vpx_memcpy(x->partition_info, &ctx->partition_info,
474 sizeof(PARTITION_INFO));
475
476 mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
477 mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
478 }
479
480 {
481 int segment_id = mbmi->segment_id;
482 if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
483 vp9_get_segdata(xd, segment_id, SEG_LVL_EOB)) {
484 for (i = 0; i < NB_TXFM_MODES; i++) {
485 cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
486 }
487 }
488 }
489
490 if (cpi->common.frame_type == KEY_FRAME) {
491 // Restore the coding modes to that held in the coding context
492 // if (mb_mode == B_PRED)
493 // for (i = 0; i < 16; i++)
494 // {
495 // xd->block[i].bmi.as_mode =
496 // xd->mode_info_context->bmi[i].as_mode;
497 // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
498 // }
499 #if CONFIG_INTERNAL_STATS
500 static const int kf_mode_index[] = {
501 THR_DC /*DC_PRED*/,
502 THR_V_PRED /*V_PRED*/,
503 THR_H_PRED /*H_PRED*/,
504 THR_D45_PRED /*D45_PRED*/,
505 THR_D135_PRED /*D135_PRED*/,
506 THR_D117_PRED /*D117_PRED*/,
507 THR_D153_PRED /*D153_PRED*/,
508 THR_D27_PRED /*D27_PRED*/,
509 THR_D63_PRED /*D63_PRED*/,
510 THR_TM /*TM_PRED*/,
511 THR_I8X8_PRED /*I8X8_PRED*/,
512 THR_B_PRED /*B_PRED*/,
513 };
514 cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
515 #endif
516 } else {
517 /*
518 // Reduce the activation RD thresholds for the best choice mode
519 if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
520 (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
521 {
522 int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
523
524 cpi->rd_thresh_mult[mb_mode_index] =
525 (cpi->rd_thresh_mult[mb_mode_index]
526 >= (MIN_THRESHMULT + best_adjustment)) ?
527 cpi->rd_thresh_mult[mb_mode_index] - best_adjust ment :
528 MIN_THRESHMULT;
529 cpi->rd_threshes[mb_mode_index] =
530 (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
531 * cpi->rd_thresh_mult[mb_mode_index];
532
533 }
534 */
535 // Note how often each mode chosen as best
536 cpi->mode_chosen_counts[mb_mode_index]++;
537 if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
538 int_mv best_mv, best_second_mv;
539 MV_REFERENCE_FRAME rf = mbmi->ref_frame;
540 #if CONFIG_NEW_MVREF
541 unsigned int best_index;
542 MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
543 #endif
544 best_mv.as_int = ctx->best_ref_mv.as_int;
545 best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
546 if (mbmi->mode == NEWMV) {
547 best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
548 best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
549 #if CONFIG_NEW_MVREF
550 best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
551 mbmi->ref_mvs[rf], &best_mv);
552 mbmi->best_index = best_index;
553
554 if (mbmi->second_ref_frame > 0) {
555 unsigned int best_index;
556 best_index =
557 pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
558 mbmi->ref_mvs[sec_ref_frame],
559 &best_second_mv);
560 mbmi->best_second_index = best_index;
561 }
562 #endif
563 }
564 mbmi->best_mv.as_int = best_mv.as_int;
565 mbmi->best_second_mv.as_int = best_second_mv.as_int;
566 vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
567 }
568 #if CONFIG_COMP_INTERINTRA_PRED
569 if (mbmi->mode >= NEARESTMV && mbmi->mode < SPLITMV &&
570 mbmi->second_ref_frame <= INTRA_FRAME) {
571 if (mbmi->second_ref_frame == INTRA_FRAME) {
572 ++cpi->interintra_count[1];
573 ++cpi->ymode_count[mbmi->interintra_mode];
574 #if SEPARATE_INTERINTRA_UV
575 ++cpi->y_uv_mode_count[mbmi->interintra_mode][mbmi->interintra_uv_mode];
576 #endif
577 } else {
578 ++cpi->interintra_count[0];
579 }
580 }
581 if (cpi->common.mcomp_filter_type == SWITCHABLE &&
582 mbmi->mode >= NEARESTMV &&
583 mbmi->mode <= SPLITMV) {
584 ++cpi->switchable_interp_count
585 [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
586 [vp9_switchable_interp_map[mbmi->interp_filter]];
587 }
588 #endif
589
590 cpi->prediction_error += ctx->distortion;
591 cpi->intra_error += ctx->intra_error;
592
593 cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
594 cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
595 cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
596 }
597 }
598
599 static void pick_mb_modes(VP9_COMP *cpi,
600 VP9_COMMON *cm,
601 int mb_row,
602 int mb_col,
603 MACROBLOCK *x,
604 MACROBLOCKD *xd,
605 TOKENEXTRA **tp,
606 int *totalrate,
607 int *totaldist) {
608 int i;
609 int map_index;
610 int recon_yoffset, recon_uvoffset;
611 int ref_fb_idx = cm->lst_fb_idx;
612 int dst_fb_idx = cm->new_fb_idx;
613 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
614 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
615 ENTROPY_CONTEXT_PLANES left_context[2];
616 ENTROPY_CONTEXT_PLANES above_context[2];
617 ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
618 + mb_col;
619
620 // Offsets to move pointers from MB to MB within a SB in raster order
621 int row_delta[4] = { 0, +1, 0, -1};
622 int col_delta[4] = { +1, -1, +1, +1};
623
624 /* Function should not modify L & A contexts; save and restore on exit */
625 vpx_memcpy(left_context,
626 cm->left_context,
627 sizeof(left_context));
628 vpx_memcpy(above_context,
629 initial_above_context_ptr,
630 sizeof(above_context));
631
632 /* Encode MBs in raster order within the SB */
633 for (i = 0; i < 4; i++) {
634 int dy = row_delta[i];
635 int dx = col_delta[i];
636 int offset_unextended = dy * cm->mb_cols + dx;
637 int offset_extended = dy * xd->mode_info_stride + dx;
638 MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
639
640 // TODO Many of the index items here can be computed more efficiently!
641
642 if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
643 // MB lies outside frame, move on
644 mb_row += dy;
645 mb_col += dx;
646
647 // Update pointers
648 x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
649 x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
650 x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
651
652 x->gf_active_ptr += offset_unextended;
653 x->partition_info += offset_extended;
654 xd->mode_info_context += offset_extended;
655 xd->prev_mode_info_context += offset_extended;
656 #if CONFIG_DEBUG
657 assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
658 (xd->mode_info_context - cpi->common.mip));
659 #endif
660 continue;
661 }
662
663 // Index of the MB in the SB 0..3
664 xd->mb_index = i;
665
666 map_index = (mb_row * cpi->common.mb_cols) + mb_col;
667 x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
668
669 // set above context pointer
670 xd->above_context = cm->above_context + mb_col;
671
672 // Restore the appropriate left context depending on which
673 // row in the SB the MB is situated
674 xd->left_context = cm->left_context + (i >> 1);
675
676 // Set up distance of MB to edge of frame in 1/8th pel units
677 xd->mb_to_top_edge = -((mb_row * 16) << 3);
678 xd->mb_to_left_edge = -((mb_col * 16) << 3);
679 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
680 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
681
682 // Set up limit values for MV components to prevent them from
683 // extending beyond the UMV borders assuming 16x16 block size
684 x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
685 x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
686 x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
687 (VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
688 x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
689 (VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
690
691 xd->up_available = (mb_row != 0);
692 xd->left_available = (mb_col != 0);
693
694 recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
695 recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
696
697 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
698 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
699 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
700
701 #if !CONFIG_SUPERBLOCKS
702 // Copy current MB to a work buffer
703 vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
704 #endif
705
706 x->rddiv = cpi->RDDIV;
707 x->rdmult = cpi->RDMULT;
708
709 if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
710 vp9_activity_masking(cpi, x);
711
712 // Is segmentation enabled
713 if (xd->segmentation_enabled) {
714 // Code to set segment id in xd->mbmi.segment_id
715 if (xd->update_mb_segmentation_map)
716 mbmi->segment_id = cpi->segmentation_map[map_index];
717 else
718 mbmi->segment_id = cm->last_frame_seg_map[map_index];
719 if (mbmi->segment_id > 3)
720 mbmi->segment_id = 0;
721
722 vp9_mb_init_quantizer(cpi, x);
723 } else
724 // Set to Segment 0 by default
725 mbmi->segment_id = 0;
726
727 x->active_ptr = cpi->active_map + map_index;
728
729 #if CONFIG_SUPERBLOCKS
730 xd->mode_info_context->mbmi.encoded_as_sb = 0;
731 #endif
732
733 cpi->update_context = 0; // TODO Do we need this now??
734
735 vp9_intra_prediction_down_copy(xd);
736
737 #ifdef ENC_DEBUG
738 enc_debug = (cpi->common.current_video_frame == 46 &&
739 mb_row == 5 && mb_col == 2);
740 #endif
741 // Find best coding mode & reconstruct the MB so it is available
742 // as a predictor for MBs that follow in the SB
743 if (cm->frame_type == KEY_FRAME) {
744 int r, d;
745 #ifdef ENC_DEBUG
746 if (enc_debug)
747 printf("intra pick_mb_modes %d %d\n", mb_row, mb_col);
748 #endif
749 vp9_rd_pick_intra_mode(cpi, x, &r, &d);
750 *totalrate += r;
751 *totaldist += d;
752
753 // Dummy encode, do not do the tokenization
754 encode_macroblock(cpi, x, tp,
755 recon_yoffset, recon_uvoffset, 0, mb_col, mb_row);
756 // Note the encoder may have changed the segment_id
757
758 // Save the coding context
759 vpx_memcpy(&x->mb_context[i].mic, xd->mode_info_context,
760 sizeof(MODE_INFO));
761 } else {
762 int seg_id, r, d;
763
764 if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
765 !vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
766 vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
767 vp9_check_segref(xd, 1, INTRA_FRAME) +
768 vp9_check_segref(xd, 1, LAST_FRAME) +
769 vp9_check_segref(xd, 1, GOLDEN_FRAME) +
770 vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
771 cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
772 } else {
773 cpi->seg0_progress = (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols + i) << 16) / cm->MBs;
774 }
775
776 #ifdef ENC_DEBUG
777 if (enc_debug)
778 printf("inter pick_mb_modes %d %d\n", mb_row, mb_col);
779 #endif
780 vp9_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
781 recon_uvoffset, &r, &d);
782 *totalrate += r;
783 *totaldist += d;
784
785 // Dummy encode, do not do the tokenization
786 encode_macroblock(cpi, x, tp,
787 recon_yoffset, recon_uvoffset, 0, mb_col, mb_row);
788
789 seg_id = mbmi->segment_id;
790 if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
791 cpi->seg0_idx++;
792 }
793 if (!xd->segmentation_enabled ||
794 !vp9_segfeature_active(xd, seg_id, SEG_LVL_REF_FRAME) ||
795 vp9_check_segref(xd, seg_id, INTRA_FRAME) +
796 vp9_check_segref(xd, seg_id, LAST_FRAME) +
797 vp9_check_segref(xd, seg_id, GOLDEN_FRAME) +
798 vp9_check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
799 // Get the prediction context and status
800 int pred_flag = vp9_get_pred_flag(xd, PRED_REF);
801 int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
802
803 // Count prediction success
804 cpi->ref_pred_count[pred_context][pred_flag]++;
805 }
806 }
807
808 // Next MB
809 mb_row += dy;
810 mb_col += dx;
811
812 x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
813 x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
814 x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
815
816 x->gf_active_ptr += offset_unextended;
817 x->partition_info += offset_extended;
818 xd->mode_info_context += offset_extended;
819 xd->prev_mode_info_context += offset_extended;
820
821 #if CONFIG_DEBUG
822 assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
823 (xd->mode_info_context - cpi->common.mip));
824 #endif
825 }
826
827 /* Restore L & A coding context to those in place on entry */
828 vpx_memcpy(cm->left_context,
829 left_context,
830 sizeof(left_context));
831 vpx_memcpy(initial_above_context_ptr,
832 above_context,
833 sizeof(above_context));
834 }
835
836 #if CONFIG_SUPERBLOCKS
837 static void pick_sb_modes (VP9_COMP *cpi,
838 VP9_COMMON *cm,
839 int mb_row,
840 int mb_col,
841 MACROBLOCK *x,
842 MACROBLOCKD *xd,
843 TOKENEXTRA **tp,
844 int *totalrate,
845 int *totaldist)
846 {
847 int map_index;
848 int recon_yoffset, recon_uvoffset;
849 int ref_fb_idx = cm->lst_fb_idx;
850 int dst_fb_idx = cm->new_fb_idx;
851 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
852 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
853 ENTROPY_CONTEXT_PLANES left_context[2];
854 ENTROPY_CONTEXT_PLANES above_context[2];
855 ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
856 + mb_col;
857
858 /* Function should not modify L & A contexts; save and restore on exit */
859 vpx_memcpy (left_context,
860 cm->left_context,
861 sizeof(left_context));
862 vpx_memcpy (above_context,
863 initial_above_context_ptr,
864 sizeof(above_context));
865
866 map_index = (mb_row * cpi->common.mb_cols) + mb_col;
867 x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
868
869 /* set above context pointer */
870 xd->above_context = cm->above_context + mb_col;
871
872 /* Restore the appropriate left context depending on which
873 * row in the SB the MB is situated */
874 xd->left_context = cm->left_context;
875
876 // Set up distance of MB to edge of frame in 1/8th pel units
877 xd->mb_to_top_edge = -((mb_row * 16) << 3);
878 xd->mb_to_left_edge = -((mb_col * 16) << 3);
879 xd->mb_to_bottom_edge = ((cm->mb_rows - 2 - mb_row) * 16) << 3;
880 xd->mb_to_right_edge = ((cm->mb_cols - 2 - mb_col) * 16) << 3;
881
882 /* Set up limit values for MV components to prevent them from
883 * extending beyond the UMV borders assuming 16x16 block size */
884 x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
885 x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
886 x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
887 (VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
888 x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
889 (VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
890
891 xd->up_available = (mb_row != 0);
892 xd->left_available = (mb_col != 0);
893
894 recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
895 recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
896
897 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
898 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
899 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
900 #if 0 // FIXME
901 /* Copy current MB to a work buffer */
902 vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
903 #endif
904 x->rddiv = cpi->RDDIV;
905 x->rdmult = cpi->RDMULT;
906 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
907 vp9_activity_masking(cpi, x);
908 /* Is segmentation enabled */
909 if (xd->segmentation_enabled)
910 {
911 /* Code to set segment id in xd->mbmi.segment_id */
912 if (xd->update_mb_segmentation_map)
913 xd->mode_info_context->mbmi.segment_id =
914 cpi->segmentation_map[map_index] &&
915 cpi->segmentation_map[map_index + 1] &&
916 cpi->segmentation_map[map_index + cm->mb_cols] &&
917 cpi->segmentation_map[map_index + cm->mb_cols + 1];
918 else
919 xd->mode_info_context->mbmi.segment_id =
920 cm->last_frame_seg_map[map_index] &&
921 cm->last_frame_seg_map[map_index + 1] &&
922 cm->last_frame_seg_map[map_index + cm->mb_cols] &&
923 cm->last_frame_seg_map[map_index + cm->mb_cols + 1];
924 if (xd->mode_info_context->mbmi.segment_id > 3)
925 xd->mode_info_context->mbmi.segment_id = 0;
926
927 vp9_mb_init_quantizer(cpi, x);
928 }
929 else
930 /* Set to Segment 0 by default */
931 xd->mode_info_context->mbmi.segment_id = 0;
932
933 x->active_ptr = cpi->active_map + map_index;
934
935 cpi->update_context = 0; // TODO Do we need this now??
936
937 /* Find best coding mode & reconstruct the MB so it is available
938 * as a predictor for MBs that follow in the SB */
939 if (cm->frame_type == KEY_FRAME)
940 {
941 vp9_rd_pick_intra_mode_sb(cpi, x,
942 totalrate,
943 totaldist);
944
945 /* Save the coding context */
946 vpx_memcpy(&x->sb_context[0].mic, xd->mode_info_context,
947 sizeof(MODE_INFO));
948 } else {
949 if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
950 !vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
951 vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
952 vp9_check_segref(xd, 1, INTRA_FRAME) +
953 vp9_check_segref(xd, 1, LAST_FRAME) +
954 vp9_check_segref(xd, 1, GOLDEN_FRAME) +
955 vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
956 cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
957 } else {
958 cpi->seg0_progress =
959 (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols) << 16) / cm->MBs;
960 }
961
962 vp9_rd_pick_inter_mode_sb(cpi, x,
963 recon_yoffset,
964 recon_uvoffset,
965 totalrate,
966 totaldist);
967 }
968
969 /* Restore L & A coding context to those in place on entry */
970 vpx_memcpy (cm->left_context,
971 left_context,
972 sizeof(left_context));
973 vpx_memcpy (initial_above_context_ptr,
974 above_context,
975 sizeof(above_context));
976 }
977 #endif
978
979 static void encode_sb(VP9_COMP *cpi,
980 VP9_COMMON *cm,
981 int mbrow,
982 int mbcol,
983 MACROBLOCK *x,
984 MACROBLOCKD *xd,
985 TOKENEXTRA **tp) {
986 int i;
987 int map_index;
988 int mb_row, mb_col;
989 int recon_yoffset, recon_uvoffset;
990 int ref_fb_idx = cm->lst_fb_idx;
991 int dst_fb_idx = cm->new_fb_idx;
992 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
993 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
994 int row_delta[4] = { 0, +1, 0, -1};
995 int col_delta[4] = { +1, -1, +1, +1};
996
997 mb_row = mbrow;
998 mb_col = mbcol;
999
1000 /* Encode MBs in raster order within the SB */
1001 for (i = 0; i < 4; i++) {
1002 int dy = row_delta[i];
1003 int dx = col_delta[i];
1004 int offset_extended = dy * xd->mode_info_stride + dx;
1005 int offset_unextended = dy * cm->mb_cols + dx;
1006 MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
1007
1008 if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
1009 // MB lies outside frame, move on
1010 mb_row += dy;
1011 mb_col += dx;
1012
1013 x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
1014 x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
1015 x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
1016
1017 x->gf_active_ptr += offset_unextended;
1018 x->partition_info += offset_extended;
1019 xd->mode_info_context += offset_extended;
1020 xd->prev_mode_info_context += offset_extended;
1021
1022 #if CONFIG_DEBUG
1023 assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
1024 (xd->mode_info_context - cpi->common.mip));
1025 #endif
1026 continue;
1027 }
1028
1029 xd->mb_index = i;
1030
1031 // Restore MB state to that when it was picked
1032 #if CONFIG_SUPERBLOCKS
1033 if (xd->mode_info_context->mbmi.encoded_as_sb) {
1034 update_state(cpi, x, &x->sb_context[i]);
1035 cpi->sb_count++;
1036 } else
1037 #endif
1038 update_state(cpi, x, &x->mb_context[i]);
1039
1040 map_index = (mb_row * cpi->common.mb_cols) + mb_col;
1041 x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
1042
1043 // reset above block coeffs
1044 xd->above_context = cm->above_context + mb_col;
1045 xd->left_context = cm->left_context + (i >> 1);
1046
1047 // Set up distance of MB to edge of the frame in 1/8th pel units
1048 // Set up limit values for MV components to prevent them from
1049 // extending beyond the UMV borders assuming 32x32 block size
1050 x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
1051 x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
1052
1053 xd->mb_to_top_edge = -((mb_row * 16) << 3);
1054 xd->mb_to_left_edge = -((mb_col * 16) << 3);
1055
1056 #if CONFIG_SUPERBLOCKS
1057 if (xd->mode_info_context->mbmi.encoded_as_sb) {
1058 x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
1059 (VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
1060 x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
1061 (VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
1062
1063 xd->mb_to_bottom_edge = ((cm->mb_rows - 2 - mb_row) * 16) << 3;
1064 xd->mb_to_right_edge = ((cm->mb_cols - 2 - mb_col) * 16) << 3;
1065 } else {
1066 #endif
1067 x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
1068 (VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
1069 x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
1070 (VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
1071
1072 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
1073 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
1074 #if CONFIG_SUPERBLOCKS
1075 }
1076 #endif
1077
1078 xd->up_available = (mb_row != 0);
1079 xd->left_available = (mb_col != 0);
1080
1081 recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
1082 recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
1083
1084 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
1085 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
1086 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
1087
1088 #if !CONFIG_SUPERBLOCKS
1089 // Copy current MB to a work buffer
1090 vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
1091 #endif
1092
1093 if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
1094 vp9_activity_masking(cpi, x);
1095
1096 // Is segmentation enabled
1097 if (xd->segmentation_enabled) {
1098 vp9_mb_init_quantizer(cpi, x);
1099 }
1100
1101 x->active_ptr = cpi->active_map + map_index;
1102
1103 cpi->update_context = 0;
1104
1105 #if CONFIG_SUPERBLOCKS
1106 if (!xd->mode_info_context->mbmi.encoded_as_sb)
1107 #endif
1108 vp9_intra_prediction_down_copy(xd);
1109
1110 if (cm->frame_type == KEY_FRAME) {
1111 #if CONFIG_SUPERBLOCKS
1112 if (xd->mode_info_context->mbmi.encoded_as_sb)
1113 encode_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset,
1114 mb_col, mb_row);
1115 else
1116 #endif
1117 encode_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, 1,
1118 mb_col, mb_row);
1119 // Note the encoder may have changed the segment_id
1120
1121 #ifdef MODE_STATS
1122 y_modes[mbmi->mode]++;
1123 #endif
1124 } else {
1125 unsigned char *segment_id;
1126 int seg_ref_active;
1127
1128 if (xd->mode_info_context->mbmi.ref_frame) {
1129 unsigned char pred_context;
1130
1131 pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
1132
1133 if (xd->mode_info_context->mbmi.second_ref_frame <= INTRA_FRAME)
1134 cpi->single_pred_count[pred_context]++;
1135 else
1136 cpi->comp_pred_count[pred_context]++;
1137 }
1138
1139 #if CONFIG_SUPERBLOCKS
1140 if (xd->mode_info_context->mbmi.encoded_as_sb)
1141 encode_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset,
1142 mb_col, mb_row);
1143 else
1144 #endif
1145 encode_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, 1,
1146 mb_col, mb_row);
1147 // Note the encoder may have changed the segment_id
1148
1149 #ifdef MODE_STATS
1150 inter_y_modes[mbmi->mode]++;
1151
1152 if (mbmi->mode == SPLITMV) {
1153 int b;
1154
1155 for (b = 0; b < x->partition_info->count; b++) {
1156 inter_b_modes[x->partition_info->bmi[b].mode]++;
1157 }
1158 }
1159
1160 #endif
1161
1162 // If we have just a single reference frame coded for a segment then
1163 // exclude from the reference frame counts used to work out
1164 // probabilities. NOTE: At the moment we dont support custom trees
1165 // for the reference frame coding for each segment but this is a
1166 // possible future action.
1167 segment_id = &mbmi->segment_id;
1168 seg_ref_active = vp9_segfeature_active(xd, *segment_id,
1169 SEG_LVL_REF_FRAME);
1170 if (!seg_ref_active ||
1171 ((vp9_check_segref(xd, *segment_id, INTRA_FRAME) +
1172 vp9_check_segref(xd, *segment_id, LAST_FRAME) +
1173 vp9_check_segref(xd, *segment_id, GOLDEN_FRAME) +
1174 vp9_check_segref(xd, *segment_id, ALTREF_FRAME)) > 1)) {
1175 {
1176 cpi->count_mb_ref_frame_usage[mbmi->ref_frame]++;
1177 }
1178 }
1179
1180 // Count of last ref frame 0,0 usage
1181 if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
1182 cpi->inter_zz_count++;
1183 }
1184
1185 #if CONFIG_SUPERBLOCKS
1186 if (xd->mode_info_context->mbmi.encoded_as_sb) {
1187 x->src.y_buffer += 32;
1188 x->src.u_buffer += 16;
1189 x->src.v_buffer += 16;
1190
1191 x->gf_active_ptr += 2;
1192 x->partition_info += 2;
1193 xd->mode_info_context += 2;
1194 xd->prev_mode_info_context += 2;
1195
1196 (*tp)->Token = EOSB_TOKEN;
1197 (*tp)++;
1198 if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
1199 break;
1200 }
1201 #endif
1202
1203 // Next MB
1204 mb_row += dy;
1205 mb_col += dx;
1206
1207 x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
1208 x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
1209 x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
1210
1211 x->gf_active_ptr += offset_unextended;
1212 x->partition_info += offset_extended;
1213 xd->mode_info_context += offset_extended;
1214 xd->prev_mode_info_context += offset_extended;
1215
1216 #if CONFIG_DEBUG
1217 assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
1218 (xd->mode_info_context - cpi->common.mip));
1219 #endif
1220 (*tp)->Token = EOSB_TOKEN;
1221 (*tp)++;
1222 if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
1223 }
1224
1225 // debug output
1226 #if DBG_PRNT_SEGMAP
1227 {
1228 FILE *statsfile;
1229 statsfile = fopen("segmap2.stt", "a");
1230 fprintf(statsfile, "\n");
1231 fclose(statsfile);
1232 }
1233 #endif
1234 }
1235
1236 static
1237 void encode_sb_row(VP9_COMP *cpi,
1238 VP9_COMMON *cm,
1239 int mb_row,
1240 MACROBLOCK *x,
1241 MACROBLOCKD *xd,
1242 TOKENEXTRA **tp,
1243 int *totalrate) {
1244 int mb_col;
1245 int mb_cols = cm->mb_cols;
1246
1247 // Initialize the left context for the new SB row
1248 vpx_memset(cm->left_context, 0, sizeof(cm->left_context));
1249
1250 // Code each SB in the row
1251 for (mb_col = 0; mb_col < mb_cols; mb_col += 2) {
1252 int mb_rate = 0, mb_dist = 0;
1253 #if CONFIG_SUPERBLOCKS
1254 int sb_rate = INT_MAX, sb_dist;
1255 #endif
1256
1257 #if CONFIG_DEBUG
1258 MODE_INFO *mic = xd->mode_info_context;
1259 PARTITION_INFO *pi = x->partition_info;
1260 signed char *gfa = x->gf_active_ptr;
1261 unsigned char *yb = x->src.y_buffer;
1262 unsigned char *ub = x->src.u_buffer;
1263 unsigned char *vb = x->src.v_buffer;
1264 #endif
1265
1266 #if CONFIG_SUPERBLOCKS
1267 // Pick modes assuming the SB is coded as 4 independent MBs
1268 xd->mode_info_context->mbmi.encoded_as_sb = 0;
1269 #endif
1270 pick_mb_modes(cpi, cm, mb_row, mb_col, x, xd, tp, &mb_rate, &mb_dist);
1271 #if CONFIG_SUPERBLOCKS
1272 mb_rate += vp9_cost_bit(cm->sb_coded, 0);
1273 #endif
1274
1275 x->src.y_buffer -= 32;
1276 x->src.u_buffer -= 16;
1277 x->src.v_buffer -= 16;
1278
1279 x->gf_active_ptr -= 2;
1280 x->partition_info -= 2;
1281 xd->mode_info_context -= 2;
1282 xd->prev_mode_info_context -= 2;
1283
1284 #if CONFIG_DEBUG
1285 assert(x->gf_active_ptr == gfa);
1286 assert(x->partition_info == pi);
1287 assert(xd->mode_info_context == mic);
1288 assert(x->src.y_buffer == yb);
1289 assert(x->src.u_buffer == ub);
1290 assert(x->src.v_buffer == vb);
1291 #endif
1292
1293 #if CONFIG_SUPERBLOCKS
1294 if (!((( mb_cols & 1) && mb_col == mb_cols - 1) ||
1295 ((cm->mb_rows & 1) && mb_row == cm->mb_rows - 1))) {
1296 /* Pick a mode assuming that it applies to all 4 of the MBs in the SB */
1297 xd->mode_info_context->mbmi.encoded_as_sb = 1;
1298 pick_sb_modes(cpi, cm, mb_row, mb_col, x, xd, tp, &sb_rate, &sb_dist);
1299 sb_rate += vp9_cost_bit(cm->sb_coded, 1);
1300 }
1301
1302 /* Decide whether to encode as a SB or 4xMBs */
1303 if (sb_rate < INT_MAX &&
1304 RDCOST(x->rdmult, x->rddiv, sb_rate, sb_dist) <
1305 RDCOST(x->rdmult, x->rddiv, mb_rate, mb_dist)) {
1306 xd->mode_info_context->mbmi.encoded_as_sb = 1;
1307 xd->mode_info_context[1].mbmi.encoded_as_sb = 1;
1308 xd->mode_info_context[cm->mode_info_stride].mbmi.encoded_as_sb = 1;
1309 xd->mode_info_context[1 + cm->mode_info_stride].mbmi.encoded_as_sb = 1;
1310 *totalrate += sb_rate;
1311 } else
1312 #endif
1313 {
1314 #if CONFIG_SUPERBLOCKS
1315 xd->mode_info_context->mbmi.encoded_as_sb = 0;
1316 if (cm->mb_cols - 1 > mb_col)
1317 xd->mode_info_context[1].mbmi.encoded_as_sb = 0;
1318 if (cm->mb_rows - 1 > mb_row) {
1319 xd->mode_info_context[cm->mode_info_stride].mbmi.encoded_as_sb = 0;
1320 if (cm->mb_cols - 1 > mb_col)
1321 xd->mode_info_context[1 + cm->mode_info_stride].mbmi.encoded_as_sb = 0 ;
1322 }
1323 #endif
1324 *totalrate += mb_rate;
1325 }
1326
1327 /* Encode SB using best computed mode(s) */
1328 encode_sb(cpi, cm, mb_row, mb_col, x, xd, tp);
1329
1330 #if CONFIG_DEBUG
1331 assert(x->gf_active_ptr == gfa + 2);
1332 assert(x->partition_info == pi + 2);
1333 assert(xd->mode_info_context == mic + 2);
1334 assert(x->src.y_buffer == yb + 32);
1335 assert(x->src.u_buffer == ub + 16);
1336 assert(x->src.v_buffer == vb + 16);
1337 #endif
1338 }
1339
1340 // this is to account for the border
1341 x->gf_active_ptr += mb_cols - (mb_cols & 0x1);
1342 x->partition_info += xd->mode_info_stride + 1 - (mb_cols & 0x1);
1343 xd->mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
1344 xd->prev_mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
1345
1346 #if CONFIG_DEBUG
1347 assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
1348 (xd->mode_info_context - cpi->common.mip));
1349 #endif
1350 }
1351
1352 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
1353 MACROBLOCK *const x = &cpi->mb;
1354 VP9_COMMON *const cm = &cpi->common;
1355 MACROBLOCKD *const xd = &x->e_mbd;
1356
1357 // GF active flags data structure
1358 x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
1359
1360 // Activity map pointer
1361 x->mb_activity_ptr = cpi->mb_activity_map;
1362
1363 x->act_zbin_adj = 0;
1364 cpi->seg0_idx = 0;
1365 vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
1366
1367 x->partition_info = x->pi;
1368
1369 xd->mode_info_context = cm->mi;
1370 xd->mode_info_stride = cm->mode_info_stride;
1371 xd->prev_mode_info_context = cm->prev_mi;
1372
1373 xd->frame_type = cm->frame_type;
1374
1375 xd->frames_since_golden = cm->frames_since_golden;
1376 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
1377
1378 // reset intra mode contexts
1379 if (cm->frame_type == KEY_FRAME)
1380 vp9_init_mbmode_probs(cm);
1381
1382 // Copy data over into macro block data structures.
1383 x->src = * cpi->Source;
1384 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
1385 xd->dst = cm->yv12_fb[cm->new_fb_idx];
1386
1387 // set up frame for intra coded blocks
1388 vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
1389
1390 vp9_build_block_offsets(x);
1391
1392 vp9_setup_block_dptrs(&x->e_mbd);
1393
1394 vp9_setup_block_ptrs(x);
1395
1396 xd->mode_info_context->mbmi.mode = DC_PRED;
1397 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
1398
1399 vp9_zero(cpi->count_mb_ref_frame_usage)
1400 vp9_zero(cpi->bmode_count)
1401 vp9_zero(cpi->ymode_count)
1402 vp9_zero(cpi->i8x8_mode_count)
1403 vp9_zero(cpi->y_uv_mode_count)
1404 vp9_zero(cpi->sub_mv_ref_count)
1405 vp9_zero(cpi->mbsplit_count)
1406 vp9_zero(cpi->common.fc.mv_ref_ct)
1407 #if CONFIG_SUPERBLOCKS
1408 vp9_zero(cpi->sb_ymode_count)
1409 cpi->sb_count = 0;
1410 #endif
1411 #if CONFIG_COMP_INTERINTRA_PRED
1412 vp9_zero(cpi->interintra_count);
1413 vp9_zero(cpi->interintra_select_count);
1414 #endif
1415
1416 vpx_memset(cm->above_context, 0,
1417 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
1418
1419 xd->fullpixel_mask = 0xffffffff;
1420 if (cm->full_pixel)
1421 xd->fullpixel_mask = 0xfffffff8;
1422 }
1423
1424 static void encode_frame_internal(VP9_COMP *cpi) {
1425 int mb_row;
1426 MACROBLOCK *const x = &cpi->mb;
1427 VP9_COMMON *const cm = &cpi->common;
1428 MACROBLOCKD *const xd = &x->e_mbd;
1429
1430 TOKENEXTRA *tp = cpi->tok;
1431 int totalrate;
1432
1433 // printf("encode_frame_internal frame %d (%d)\n",
1434 // cpi->common.current_video_frame, cpi->common.show_frame);
1435
1436 // Compute a modified set of reference frame probabilities to use when
1437 // prediction fails. These are based on the current general estimates for
1438 // this frame which may be updated with each iteration of the recode loop.
1439 vp9_compute_mod_refprobs(cm);
1440
1441 #if CONFIG_NEW_MVREF
1442 // temp stats reset
1443 vp9_zero( cpi->best_ref_index_counts );
1444 #endif
1445
1446 // debug output
1447 #if DBG_PRNT_SEGMAP
1448 {
1449 FILE *statsfile;
1450 statsfile = fopen("segmap2.stt", "a");
1451 fprintf(statsfile, "\n");
1452 fclose(statsfile);
1453 }
1454 #endif
1455
1456 totalrate = 0;
1457
1458 // Functions setup for all frame types so we can use MC in AltRef
1459 vp9_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
1460
1461 // Reset frame count of inter 0,0 motion vector usage.
1462 cpi->inter_zz_count = 0;
1463
1464 cpi->prediction_error = 0;
1465 cpi->intra_error = 0;
1466 cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
1467 cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
1468
1469 #if CONFIG_PRED_FILTER
1470 if (cm->current_video_frame == 0) {
1471 // Initially assume that we'll signal the prediction filter
1472 // state at the frame level and that it is off.
1473 cpi->common.pred_filter_mode = 0;
1474 cpi->common.prob_pred_filter_off = 128;
1475 }
1476 cpi->pred_filter_on_count = 0;
1477 cpi->pred_filter_off_count = 0;
1478 #endif
1479 vp9_zero(cpi->switchable_interp_count);
1480
1481 xd->mode_info_context = cm->mi;
1482 xd->prev_mode_info_context = cm->prev_mi;
1483
1484 vp9_zero(cpi->NMVcount);
1485 vp9_zero(cpi->coef_counts);
1486 vp9_zero(cpi->hybrid_coef_counts);
1487 vp9_zero(cpi->coef_counts_8x8);
1488 vp9_zero(cpi->hybrid_coef_counts_8x8);
1489 vp9_zero(cpi->coef_counts_16x16);
1490 vp9_zero(cpi->hybrid_coef_counts_16x16);
1491
1492 vp9_frame_init_quantizer(cpi);
1493
1494 vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
1495 vp9_initialize_me_consts(cpi, cm->base_qindex);
1496
1497 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
1498 // Initialize encode frame context.
1499 init_encode_frame_mb_context(cpi);
1500
1501 // Build a frame level activity map
1502 build_activity_map(cpi);
1503 }
1504
1505 // re-initencode frame context.
1506 init_encode_frame_mb_context(cpi);
1507
1508 vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
1509 vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
1510 vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
1511 vpx_memset(cpi->txfm_count, 0, sizeof(cpi->txfm_count));
1512 vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
1513 vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
1514 {
1515 struct vpx_usec_timer emr_timer;
1516 vpx_usec_timer_start(&emr_timer);
1517
1518 {
1519 // For each row of SBs in the frame
1520 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) {
1521 int offset = (cm->mb_cols + 1) & ~0x1;
1522
1523 encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
1524
1525 // adjust to the next row of SBs
1526 x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
1527 x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
1528 x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
1529 }
1530
1531 cpi->tok_count = (unsigned int)(tp - cpi->tok);
1532 }
1533
1534 vpx_usec_timer_mark(&emr_timer);
1535 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
1536
1537 }
1538
1539 // 256 rate units to the bit,
1540 // projected_frame_size in units of BYTES
1541 cpi->projected_frame_size = totalrate >> 8;
1542
1543
1544 #if 0
1545 // Keep record of the total distortion this time around for future use
1546 cpi->last_frame_distortion = cpi->frame_distortion;
1547 #endif
1548
1549 }
1550
1551 static int check_dual_ref_flags(VP9_COMP *cpi) {
1552 MACROBLOCKD *xd = &cpi->mb.e_mbd;
1553 int ref_flags = cpi->ref_frame_flags;
1554
1555 if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
1556 if ((ref_flags & (VP9_LAST_FLAG | VP9_GOLD_FLAG)) == (VP9_LAST_FLAG | VP9_GO LD_FLAG) &&
1557 vp9_check_segref(xd, 1, LAST_FRAME))
1558 return 1;
1559 if ((ref_flags & (VP9_GOLD_FLAG | VP9_ALT_FLAG)) == (VP9_GOLD_FLAG | VP9_ALT _FLAG) &&
1560 vp9_check_segref(xd, 1, GOLDEN_FRAME))
1561 return 1;
1562 if ((ref_flags & (VP9_ALT_FLAG | VP9_LAST_FLAG)) == (VP9_ALT_FLAG | VP9_LA ST_FLAG) &&
1563 vp9_check_segref(xd, 1, ALTREF_FRAME))
1564 return 1;
1565 return 0;
1566 } else {
1567 return (!!(ref_flags & VP9_GOLD_FLAG) +
1568 !!(ref_flags & VP9_LAST_FLAG) +
1569 !!(ref_flags & VP9_ALT_FLAG)) >= 2;
1570 }
1571 }
1572
1573 static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
1574 VP9_COMMON *cm = &cpi->common;
1575 int mb_row, mb_col, mis = cm->mode_info_stride, segment_id;
1576 MODE_INFO *mi, *mi_ptr = cm->mi;
1577 #if CONFIG_SUPERBLOCKS
1578 int skip;
1579 MODE_INFO *sb_mi_ptr = cm->mi, *sb_mi;
1580 MB_MODE_INFO *sb_mbmi;
1581 #endif
1582 MB_MODE_INFO *mbmi;
1583 MACROBLOCK *x = &cpi->mb;
1584 MACROBLOCKD *xd = &x->e_mbd;
1585
1586 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++, mi_ptr += mis) {
1587 mi = mi_ptr;
1588 #if CONFIG_SUPERBLOCKS
1589 sb_mi = sb_mi_ptr;
1590 #endif
1591 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++, mi++) {
1592 mbmi = &mi->mbmi;
1593 #if CONFIG_SUPERBLOCKS
1594 sb_mbmi = &sb_mi->mbmi;
1595 #endif
1596 if (mbmi->txfm_size > txfm_max) {
1597 #if CONFIG_SUPERBLOCKS
1598 if (sb_mbmi->encoded_as_sb) {
1599 if (!((mb_col & 1) || (mb_row & 1))) {
1600 segment_id = mbmi->segment_id;
1601 skip = mbmi->mb_skip_coeff;
1602 if (mb_col < cm->mb_cols - 1) {
1603 segment_id = segment_id && mi[1].mbmi.segment_id;
1604 skip = skip && mi[1].mbmi.mb_skip_coeff;
1605 }
1606 if (mb_row < cm->mb_rows - 1) {
1607 segment_id = segment_id &&
1608 mi[cm->mode_info_stride].mbmi.segment_id;
1609 skip = skip && mi[cm->mode_info_stride].mbmi.mb_skip_coeff;
1610 if (mb_col < cm->mb_cols - 1) {
1611 segment_id = segment_id &&
1612 mi[cm->mode_info_stride + 1].mbmi.segment_id;
1613 skip = skip && mi[cm->mode_info_stride + 1].mbmi.mb_skip_coeff;
1614 }
1615 }
1616 xd->mode_info_context = mi;
1617 assert((vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
1618 vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) ||
1619 (cm->mb_no_coeff_skip && skip));
1620 mbmi->txfm_size = txfm_max;
1621 } else {
1622 mbmi->txfm_size = sb_mbmi->txfm_size;
1623 }
1624 } else {
1625 #endif
1626 segment_id = mbmi->segment_id;
1627 xd->mode_info_context = mi;
1628 assert((vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
1629 vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) ||
1630 (cm->mb_no_coeff_skip && mbmi->mb_skip_coeff));
1631 mbmi->txfm_size = txfm_max;
1632 #if CONFIG_SUPERBLOCKS
1633 }
1634 #endif
1635 }
1636 #if CONFIG_SUPERBLOCKS
1637 if (mb_col & 1)
1638 sb_mi += 2;
1639 #endif
1640 }
1641 #if CONFIG_SUPERBLOCKS
1642 if (mb_row & 1)
1643 sb_mi_ptr += 2 * mis;
1644 #endif
1645 }
1646 }
1647
1648 void vp9_encode_frame(VP9_COMP *cpi) {
1649 if (cpi->sf.RD) {
1650 int i, frame_type, pred_type;
1651 TXFM_MODE txfm_type;
1652
1653 /*
1654 * This code does a single RD pass over the whole frame assuming
1655 * either compound, single or hybrid prediction as per whatever has
1656 * worked best for that type of frame in the past.
1657 * It also predicts whether another coding mode would have worked
1658 * better that this coding mode. If that is the case, it remembers
1659 * that for subsequent frames.
1660 * It does the same analysis for transform size selection also.
1661 */
1662 if (cpi->common.frame_type == KEY_FRAME)
1663 frame_type = 0;
1664 else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
1665 frame_type = 3;
1666 else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_fra me)
1667 frame_type = 1;
1668 else
1669 frame_type = 2;
1670
1671 /* prediction (compound, single or hybrid) mode selection */
1672 if (frame_type == 3)
1673 pred_type = SINGLE_PREDICTION_ONLY;
1674 else if (cpi->rd_prediction_type_threshes[frame_type][1] >
1675 cpi->rd_prediction_type_threshes[frame_type][0] &&
1676 cpi->rd_prediction_type_threshes[frame_type][1] >
1677 cpi->rd_prediction_type_threshes[frame_type][2] &&
1678 check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
1679 pred_type = COMP_PREDICTION_ONLY;
1680 else if (cpi->rd_prediction_type_threshes[frame_type][0] >
1681 cpi->rd_prediction_type_threshes[frame_type][2])
1682 pred_type = SINGLE_PREDICTION_ONLY;
1683 else
1684 pred_type = HYBRID_PREDICTION;
1685
1686 /* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
1687 #if CONFIG_LOSSLESS
1688 if (cpi->oxcf.lossless) {
1689 txfm_type = ONLY_4X4;
1690 } else
1691 #endif
1692 /* FIXME (rbultje)
1693 * this is a hack (no really), basically to work around the complete
1694 * nonsense coefficient cost prediction for keyframes. The probabilities
1695 * are reset to defaults, and thus we basically have no idea how expensive
1696 * a 4x4 vs. 8x8 will really be. The result is that any estimate at which
1697 * of the two is better is utterly bogus.
1698 * I'd like to eventually remove this hack, but in order to do that, we
1699 * need to move the frame reset code from the frame encode init to the
1700 * bitstream write code, or alternatively keep a backup of the previous
1701 * keyframe's probabilities as an estimate of what the current keyframe's
1702 * coefficient cost distributions may look like. */
1703 if (frame_type == 0) {
1704 txfm_type = ALLOW_16X16;
1705 } else
1706 #if 0
1707 /* FIXME (rbultje)
1708 * this code is disabled for a similar reason as the code above; the
1709 * problem is that each time we "revert" to 4x4 only (or even 8x8 only),
1710 * the coefficient probabilities for 16x16 (and 8x8) start lagging behind,
1711 * thus leading to them lagging further behind and not being chosen for
1712 * subsequent frames either. This is essentially a local minimum problem
1713 * that we can probably fix by estimating real costs more closely within
1714 * a frame, perhaps by re-calculating costs on-the-fly as frame encoding
1715 * progresses. */
1716 if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
1717 cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
1718 cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
1719 cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
1720 cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
1721 cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
1722 txfm_type = TX_MODE_SELECT;
1723 } else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
1724 cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
1725 && cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
1726 cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
1727 ) {
1728 txfm_type = ONLY_4X4;
1729 } else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
1730 cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
1731 txfm_type = ALLOW_16X16;
1732 } else
1733 txfm_type = ALLOW_8X8;
1734 #else
1735 txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
1736 cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
1737 ALLOW_16X16 : TX_MODE_SELECT;
1738 #endif
1739 cpi->common.txfm_mode = txfm_type;
1740 if (txfm_type != TX_MODE_SELECT) {
1741 cpi->common.prob_tx[0] = 128;
1742 cpi->common.prob_tx[1] = 128;
1743 }
1744 cpi->common.comp_pred_mode = pred_type;
1745 encode_frame_internal(cpi);
1746
1747 for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
1748 const int diff = (int)(cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
1749 cpi->rd_prediction_type_threshes[frame_type][i] += diff;
1750 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
1751 }
1752
1753 for (i = 0; i < NB_TXFM_MODES; ++i) {
1754 int64_t pd = cpi->rd_tx_select_diff[i];
1755 int diff;
1756 if (i == TX_MODE_SELECT)
1757 pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZE_MAX - 1), 0) ;
1758 diff = (int)(pd / cpi->common.MBs);
1759 cpi->rd_tx_select_threshes[frame_type][i] += diff;
1760 cpi->rd_tx_select_threshes[frame_type][i] /= 2;
1761 }
1762
1763 if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
1764 int single_count_zero = 0;
1765 int comp_count_zero = 0;
1766
1767 for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
1768 single_count_zero += cpi->single_pred_count[i];
1769 comp_count_zero += cpi->comp_pred_count[i];
1770 }
1771
1772 if (comp_count_zero == 0) {
1773 cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
1774 } else if (single_count_zero == 0) {
1775 cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
1776 }
1777 }
1778
1779 if (cpi->common.txfm_mode == TX_MODE_SELECT) {
1780 const int count4x4 = cpi->txfm_count[TX_4X4] + cpi->txfm_count_8x8p[TX_4X4 ];
1781 const int count8x8 = cpi->txfm_count[TX_8X8];
1782 const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
1783 const int count16x16 = cpi->txfm_count[TX_16X16];
1784
1785 if (count4x4 == 0 && count16x16 == 0) {
1786 cpi->common.txfm_mode = ALLOW_8X8;
1787 reset_skip_txfm_size(cpi, TX_8X8);
1788 } else if (count8x8 == 0 && count16x16 == 0 && count8x8_8x8p == 0) {
1789 cpi->common.txfm_mode = ONLY_4X4;
1790 reset_skip_txfm_size(cpi, TX_4X4);
1791 } else if (count8x8 == 0 && count4x4 == 0) {
1792 cpi->common.txfm_mode = ALLOW_16X16;
1793 }
1794 }
1795 } else {
1796 encode_frame_internal(cpi);
1797 }
1798
1799 }
1800
1801 void vp9_setup_block_ptrs(MACROBLOCK *x) {
1802 int r, c;
1803 int i;
1804
1805 for (r = 0; r < 4; r++) {
1806 for (c = 0; c < 4; c++) {
1807 x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
1808 }
1809 }
1810
1811 for (r = 0; r < 2; r++) {
1812 for (c = 0; c < 2; c++) {
1813 x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
1814 }
1815 }
1816
1817
1818 for (r = 0; r < 2; r++) {
1819 for (c = 0; c < 2; c++) {
1820 x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
1821 }
1822 }
1823
1824 x->block[24].src_diff = x->src_diff + 384;
1825
1826
1827 for (i = 0; i < 25; i++) {
1828 x->block[i].coeff = x->coeff + i * 16;
1829 }
1830 }
1831
1832 void vp9_build_block_offsets(MACROBLOCK *x) {
1833 int block = 0;
1834 int br, bc;
1835
1836 vp9_build_block_doffsets(&x->e_mbd);
1837
1838 #if !CONFIG_SUPERBLOCKS
1839 // y blocks
1840 x->thismb_ptr = &x->thismb[0];
1841 for (br = 0; br < 4; br++) {
1842 for (bc = 0; bc < 4; bc++) {
1843 BLOCK *this_block = &x->block[block];
1844 // this_block->base_src = &x->src.y_buffer;
1845 // this_block->src_stride = x->src.y_stride;
1846 // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1847 this_block->base_src = &x->thismb_ptr;
1848 this_block->src_stride = 16;
1849 this_block->src = 4 * br * 16 + 4 * bc;
1850 ++block;
1851 }
1852 }
1853 #else
1854 for (br = 0; br < 4; br++) {
1855 for (bc = 0; bc < 4; bc++) {
1856 BLOCK *this_block = &x->block[block];
1857 // this_block->base_src = &x->src.y_buffer;
1858 // this_block->src_stride = x->src.y_stride;
1859 // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1860 this_block->base_src = &x->src.y_buffer;
1861 this_block->src_stride = x->src.y_stride;
1862 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1863 ++block;
1864 }
1865 }
1866 #endif
1867
1868 // u blocks
1869 for (br = 0; br < 2; br++) {
1870 for (bc = 0; bc < 2; bc++) {
1871 BLOCK *this_block = &x->block[block];
1872 this_block->base_src = &x->src.u_buffer;
1873 this_block->src_stride = x->src.uv_stride;
1874 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1875 ++block;
1876 }
1877 }
1878
1879 // v blocks
1880 for (br = 0; br < 2; br++) {
1881 for (bc = 0; bc < 2; bc++) {
1882 BLOCK *this_block = &x->block[block];
1883 this_block->base_src = &x->src.v_buffer;
1884 this_block->src_stride = x->src.uv_stride;
1885 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1886 ++block;
1887 }
1888 }
1889 }
1890
1891 static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
1892 const MACROBLOCKD *xd = &x->e_mbd;
1893 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1894 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1895
1896 #ifdef MODE_STATS
1897 const int is_key = cpi->common.frame_type == KEY_FRAME;
1898
1899 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1900 ++ uv_modes_y[m][uvm];
1901
1902 if (m == B_PRED) {
1903 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1904
1905 int b = 0;
1906
1907 do {
1908 ++ bct[xd->block[b].bmi.as_mode.first];
1909 } while (++b < 16);
1910 }
1911
1912 if (m == I8X8_PRED) {
1913 i8x8_modes[xd->block[0].bmi.as_mode.first]++;
1914 i8x8_modes[xd->block[2].bmi.as_mode.first]++;
1915 i8x8_modes[xd->block[8].bmi.as_mode.first]++;
1916 i8x8_modes[xd->block[10].bmi.as_mode.first]++;
1917 }
1918 #endif
1919
1920 #if CONFIG_SUPERBLOCKS
1921 if (xd->mode_info_context->mbmi.encoded_as_sb) {
1922 ++cpi->sb_ymode_count[m];
1923 } else
1924 #endif
1925 ++cpi->ymode_count[m];
1926 if (m != I8X8_PRED)
1927 ++cpi->y_uv_mode_count[m][uvm];
1928 else {
1929 cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
1930 cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
1931 cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
1932 cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
1933 }
1934 if (m == B_PRED) {
1935 int b = 0;
1936 do {
1937 int m = xd->block[b].bmi.as_mode.first;
1938 #if CONFIG_NEWBINTRAMODES
1939 if (m == B_CONTEXT_PRED) m -= CONTEXT_PRED_REPLACEMENTS;
1940 #endif
1941 ++cpi->bmode_count[m];
1942 } while (++b < 16);
1943 }
1944 }
1945
1946 // Experimental stub function to create a per MB zbin adjustment based on
1947 // some previously calculated measure of MB activity.
1948 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
1949 #if USE_ACT_INDEX
1950 x->act_zbin_adj = *(x->mb_activity_ptr);
1951 #else
1952 int64_t a;
1953 int64_t b;
1954 int64_t act = *(x->mb_activity_ptr);
1955
1956 // Apply the masking to the RD multiplier.
1957 a = act + 4 * cpi->activity_avg;
1958 b = 4 * act + cpi->activity_avg;
1959
1960 if (act > cpi->activity_avg)
1961 x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
1962 else
1963 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
1964 #endif
1965 }
1966
1967 #if CONFIG_SUPERBLOCKS
1968 static void update_sb_skip_coeff_state(VP9_COMP *cpi,
1969 MACROBLOCK *x,
1970 ENTROPY_CONTEXT_PLANES ta[4],
1971 ENTROPY_CONTEXT_PLANES tl[4],
1972 TOKENEXTRA *t[4],
1973 TOKENEXTRA **tp,
1974 int skip[4])
1975 {
1976 TOKENEXTRA tokens[4][16 * 24];
1977 int n_tokens[4], n;
1978
1979 // if there were no skips, we don't need to do anything
1980 if (!skip[0] && !skip[1] && !skip[2] && !skip[3])
1981 return;
1982
1983 // if we don't do coeff skipping for this frame, we don't
1984 // need to do anything here
1985 if (!cpi->common.mb_no_coeff_skip)
1986 return;
1987
1988 // if all 4 MBs skipped coeff coding, nothing to be done
1989 if (skip[0] && skip[1] && skip[2] && skip[3])
1990 return;
1991
1992 // so the situation now is that we want to skip coeffs
1993 // for some MBs, but not all, and we didn't code EOB
1994 // coefficients for them. However, the skip flag for this
1995 // SB will be 0 overall, so we need to insert EOBs in the
1996 // middle of the token tree. Do so here.
1997 n_tokens[0] = t[1] - t[0];
1998 n_tokens[1] = t[2] - t[1];
1999 n_tokens[2] = t[3] - t[2];
2000 n_tokens[3] = *tp - t[3];
2001 if (n_tokens[0])
2002 memcpy(tokens[0], t[0], n_tokens[0] * sizeof(*t[0]));
2003 if (n_tokens[1])
2004 memcpy(tokens[1], t[1], n_tokens[1] * sizeof(*t[0]));
2005 if (n_tokens[2])
2006 memcpy(tokens[2], t[2], n_tokens[2] * sizeof(*t[0]));
2007 if (n_tokens[3])
2008 memcpy(tokens[3], t[3], n_tokens[3] * sizeof(*t[0]));
2009
2010 // reset pointer, stuff EOBs where necessary
2011 *tp = t[0];
2012 for (n = 0; n < 4; n++) {
2013 if (skip[n]) {
2014 x->e_mbd.above_context = &ta[n];
2015 x->e_mbd.left_context = &tl[n];
2016 vp9_stuff_mb(cpi, &x->e_mbd, tp, 0);
2017 } else {
2018 if (n_tokens[n]) {
2019 memcpy(*tp, tokens[n], sizeof(*t[0]) * n_tokens[n]);
2020 }
2021 (*tp) += n_tokens[n];
2022 }
2023 }
2024 }
2025 #endif /* CONFIG_SUPERBLOCKS */
2026
2027 static void encode_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
2028 TOKENEXTRA **t, int recon_yoffset,
2029 int recon_uvoffset, int output_enabled,
2030 int mb_col, int mb_row) {
2031 VP9_COMMON *cm = &cpi->common;
2032 MACROBLOCKD *const xd = &x->e_mbd;
2033 MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
2034 unsigned char *segment_id = &mbmi->segment_id;
2035 int seg_ref_active;
2036 unsigned char ref_pred_flag;
2037
2038 x->skip = 0;
2039 #if CONFIG_SUPERBLOCKS
2040 assert(!xd->mode_info_context->mbmi.encoded_as_sb);
2041 #endif
2042
2043 #ifdef ENC_DEBUG
2044 enc_debug = (cpi->common.current_video_frame == 46 &&
2045 mb_row == 5 && mb_col == 2);
2046 if (enc_debug)
2047 printf("Encode MB %d %d output %d\n", mb_row, mb_col, output_enabled);
2048 #endif
2049 if (cm->frame_type == KEY_FRAME) {
2050 if (cpi->oxcf.tuning == VP8_TUNE_SSIM && output_enabled) {
2051 // Adjust the zbin based on this MB rate.
2052 adjust_act_zbin(cpi, x);
2053 vp9_update_zbin_extra(cpi, x);
2054 }
2055 } else {
2056 vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
2057
2058 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2059 // Adjust the zbin based on this MB rate.
2060 adjust_act_zbin(cpi, x);
2061 }
2062
2063 // Experimental code. Special case for gf and arf zeromv modes.
2064 // Increase zbin size to suppress noise
2065 cpi->zbin_mode_boost = 0;
2066 if (cpi->zbin_mode_boost_enabled) {
2067 if (mbmi->ref_frame != INTRA_FRAME) {
2068 if (mbmi->mode == ZEROMV) {
2069 if (mbmi->ref_frame != LAST_FRAME)
2070 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
2071 else
2072 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
2073 } else if (mbmi->mode == SPLITMV)
2074 cpi->zbin_mode_boost = 0;
2075 else
2076 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
2077 }
2078 }
2079
2080 vp9_update_zbin_extra(cpi, x);
2081
2082 seg_ref_active = vp9_segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
2083
2084 // SET VARIOUS PREDICTION FLAGS
2085
2086 // Did the chosen reference frame match its predicted value.
2087 ref_pred_flag = ((mbmi->ref_frame == vp9_get_pred_ref(cm, xd)));
2088 vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
2089 }
2090
2091 if (mbmi->ref_frame == INTRA_FRAME) {
2092 #ifdef ENC_DEBUG
2093 if (enc_debug) {
2094 printf("Mode %d skip %d tx_size %d\n", mbmi->mode, x->skip,
2095 mbmi->txfm_size);
2096 }
2097 #endif
2098 if (mbmi->mode == B_PRED) {
2099 vp9_encode_intra16x16mbuv(x);
2100 vp9_encode_intra4x4mby(x);
2101 } else if (mbmi->mode == I8X8_PRED) {
2102 vp9_encode_intra8x8mby(x);
2103 vp9_encode_intra8x8mbuv(x);
2104 } else {
2105 vp9_encode_intra16x16mbuv(x);
2106 vp9_encode_intra16x16mby(x);
2107 }
2108
2109 if (output_enabled)
2110 sum_intra_stats(cpi, x);
2111 } else {
2112 int ref_fb_idx;
2113 #ifdef ENC_DEBUG
2114 if (enc_debug)
2115 printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
2116 mbmi->mode, x->skip, mbmi->txfm_size,
2117 mbmi->ref_frame, mbmi->second_ref_frame,
2118 mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
2119 mbmi->interp_filter);
2120 #endif
2121
2122 assert(cm->frame_type != KEY_FRAME);
2123
2124 if (mbmi->ref_frame == LAST_FRAME)
2125 ref_fb_idx = cpi->common.lst_fb_idx;
2126 else if (mbmi->ref_frame == GOLDEN_FRAME)
2127 ref_fb_idx = cpi->common.gld_fb_idx;
2128 else
2129 ref_fb_idx = cpi->common.alt_fb_idx;
2130
2131 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
2132 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset ;
2133 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset ;
2134
2135 if (mbmi->second_ref_frame > 0) {
2136 int second_ref_fb_idx;
2137
2138 if (mbmi->second_ref_frame == LAST_FRAME)
2139 second_ref_fb_idx = cpi->common.lst_fb_idx;
2140 else if (mbmi->second_ref_frame == GOLDEN_FRAME)
2141 second_ref_fb_idx = cpi->common.gld_fb_idx;
2142 else
2143 second_ref_fb_idx = cpi->common.alt_fb_idx;
2144
2145 xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
2146 recon_yoffset;
2147 xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
2148 recon_uvoffset;
2149 xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
2150 recon_uvoffset;
2151 }
2152
2153 if (!x->skip) {
2154 vp9_encode_inter16x16(x);
2155
2156 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
2157 if (!cpi->common.mb_no_coeff_skip)
2158 mbmi->mb_skip_coeff = 0;
2159
2160 } else {
2161 vp9_build_1st_inter16x16_predictors_mb(xd,
2162 xd->dst.y_buffer,
2163 xd->dst.u_buffer,
2164 xd->dst.v_buffer,
2165 xd->dst.y_stride,
2166 xd->dst.uv_stride);
2167 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
2168 vp9_build_2nd_inter16x16_predictors_mb(xd,
2169 xd->dst.y_buffer,
2170 xd->dst.u_buffer,
2171 xd->dst.v_buffer,
2172 xd->dst.y_stride,
2173 xd->dst.uv_stride);
2174 }
2175 #if CONFIG_COMP_INTERINTRA_PRED
2176 else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
2177 vp9_build_interintra_16x16_predictors_mb(xd,
2178 xd->dst.y_buffer,
2179 xd->dst.u_buffer,
2180 xd->dst.v_buffer,
2181 xd->dst.y_stride,
2182 xd->dst.uv_stride);
2183 }
2184 #endif
2185 }
2186 }
2187
2188 if (!x->skip) {
2189 #ifdef ENC_DEBUG
2190 if (enc_debug) {
2191 int i, j;
2192 printf("\n");
2193 printf("qcoeff\n");
2194 for (i = 0; i < 400; i++) {
2195 printf("%3d ", xd->qcoeff[i]);
2196 if (i % 16 == 15) printf("\n");
2197 }
2198 printf("\n");
2199 printf("predictor\n");
2200 for (i = 0; i < 384; i++) {
2201 printf("%3d ", xd->predictor[i]);
2202 if (i % 16 == 15) printf("\n");
2203 }
2204 printf("\n");
2205 printf("src_diff\n");
2206 for (i = 0; i < 384; i++) {
2207 printf("%3d ", x->src_diff[i]);
2208 if (i % 16 == 15) printf("\n");
2209 }
2210 printf("\n");
2211 printf("diff\n");
2212 for (i = 0; i < 384; i++) {
2213 printf("%3d ", xd->block[0].diff[i]);
2214 if (i % 16 == 15) printf("\n");
2215 }
2216 printf("\n");
2217 printf("final y\n");
2218 for (i = 0; i < 16; i++) {
2219 for (j = 0; j < 16; j++)
2220 printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
2221 printf("\n");
2222 }
2223 printf("\n");
2224 printf("final u\n");
2225 for (i = 0; i < 8; i++) {
2226 for (j = 0; j < 8; j++)
2227 printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
2228 printf("\n");
2229 }
2230 printf("\n");
2231 printf("final v\n");
2232 for (i = 0; i < 8; i++) {
2233 for (j = 0; j < 8; j++)
2234 printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
2235 printf("\n");
2236 }
2237 fflush(stdout);
2238 }
2239 #endif
2240
2241 vp9_tokenize_mb(cpi, xd, t, !output_enabled);
2242
2243 } else {
2244 int mb_skip_context =
2245 cpi->common.mb_no_coeff_skip ?
2246 (x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
2247 (x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_ coeff :
2248 0;
2249 if (cpi->common.mb_no_coeff_skip) {
2250 mbmi->mb_skip_coeff = 1;
2251 if (output_enabled)
2252 cpi->skip_true_count[mb_skip_context]++;
2253 vp9_fix_contexts(xd);
2254 } else {
2255 vp9_stuff_mb(cpi, xd, t, !output_enabled);
2256 mbmi->mb_skip_coeff = 0;
2257 if (output_enabled)
2258 cpi->skip_false_count[mb_skip_context]++;
2259 }
2260 }
2261
2262 if (output_enabled) {
2263 int segment_id = mbmi->segment_id;
2264 if (cpi->common.txfm_mode == TX_MODE_SELECT &&
2265 !((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
2266 (vp9_segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
2267 vp9_get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
2268 if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
2269 mbmi->mode != SPLITMV) {
2270 cpi->txfm_count[mbmi->txfm_size]++;
2271 } else if (mbmi->mode == I8X8_PRED ||
2272 (mbmi->mode == SPLITMV &&
2273 mbmi->partitioning != PARTITIONING_4X4)) {
2274 cpi->txfm_count_8x8p[mbmi->txfm_size]++;
2275 }
2276 } else if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
2277 mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
2278 mbmi->txfm_size = TX_16X16;
2279 } else if (mbmi->mode != B_PRED &&
2280 !(mbmi->mode == SPLITMV &&
2281 mbmi->partitioning == PARTITIONING_4X4) &&
2282 cpi->common.txfm_mode >= ALLOW_8X8) {
2283 mbmi->txfm_size = TX_8X8;
2284 } else {
2285 mbmi->txfm_size = TX_4X4;
2286 }
2287 }
2288 }
2289
2290 #if CONFIG_SUPERBLOCKS
2291 static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
2292 TOKENEXTRA **t, int recon_yoffset,
2293 int recon_uvoffset, int mb_col, int mb_row) {
2294 VP9_COMMON *const cm = &cpi->common;
2295 MACROBLOCKD *const xd = &x->e_mbd;
2296 const uint8_t *src = x->src.y_buffer;
2297 uint8_t *dst = xd->dst.y_buffer;
2298 const uint8_t *usrc = x->src.u_buffer;
2299 uint8_t *udst = xd->dst.u_buffer;
2300 const uint8_t *vsrc = x->src.v_buffer;
2301 uint8_t *vdst = xd->dst.v_buffer;
2302 int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
2303 int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
2304 int seg_ref_active;
2305 unsigned char ref_pred_flag;
2306 int n;
2307 TOKENEXTRA *tp[4];
2308 int skip[4];
2309 MODE_INFO *mi = x->e_mbd.mode_info_context;
2310 unsigned int segment_id = mi->mbmi.segment_id;
2311 ENTROPY_CONTEXT_PLANES ta[4], tl[4];
2312
2313 x->skip = 0;
2314
2315 if (cm->frame_type == KEY_FRAME) {
2316 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2317 adjust_act_zbin(cpi, x);
2318 vp9_update_zbin_extra(cpi, x);
2319 }
2320 } else {
2321 vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, cm);
2322
2323 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2324 // Adjust the zbin based on this MB rate.
2325 adjust_act_zbin(cpi, x);
2326 }
2327
2328 // Experimental code. Special case for gf and arf zeromv modes.
2329 // Increase zbin size to suppress noise
2330 cpi->zbin_mode_boost = 0;
2331 if (cpi->zbin_mode_boost_enabled) {
2332 if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
2333 if (xd->mode_info_context->mbmi.mode == ZEROMV) {
2334 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
2335 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
2336 else
2337 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
2338 } else if (xd->mode_info_context->mbmi.mode == SPLITMV)
2339 cpi->zbin_mode_boost = 0;
2340 else
2341 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
2342 }
2343 }
2344
2345 vp9_update_zbin_extra(cpi, x);
2346
2347 seg_ref_active = vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME);
2348
2349 // SET VARIOUS PREDICTION FLAGS
2350
2351 // Did the chosen reference frame match its predicted value.
2352 ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
2353 vp9_get_pred_ref(cm, xd)));
2354 vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
2355 }
2356
2357
2358 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
2359 vp9_build_intra_predictors_sby_s(&x->e_mbd);
2360 vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
2361 sum_intra_stats(cpi, x);
2362 } else {
2363 int ref_fb_idx;
2364
2365 assert(cm->frame_type != KEY_FRAME);
2366
2367 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
2368 ref_fb_idx = cpi->common.lst_fb_idx;
2369 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
2370 ref_fb_idx = cpi->common.gld_fb_idx;
2371 else
2372 ref_fb_idx = cpi->common.alt_fb_idx;
2373
2374 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
2375 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset ;
2376 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset ;
2377
2378 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
2379 int second_ref_fb_idx;
2380
2381 if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
2382 second_ref_fb_idx = cpi->common.lst_fb_idx;
2383 else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
2384 second_ref_fb_idx = cpi->common.gld_fb_idx;
2385 else
2386 second_ref_fb_idx = cpi->common.alt_fb_idx;
2387
2388 xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
2389 recon_yoffset;
2390 xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
2391 recon_uvoffset;
2392 xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
2393 recon_uvoffset;
2394 }
2395
2396 vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
2397 xd->dst.u_buffer, xd->dst.v_buffer,
2398 xd->dst.y_stride, xd->dst.uv_stride);
2399 }
2400
2401 for (n = 0; n < 4; n++) {
2402 int x_idx = n & 1, y_idx = n >> 1;
2403
2404 xd->left_context = cm->left_context + y_idx;
2405 xd->above_context = cm->above_context + mb_col + x_idx;
2406 memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
2407 memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
2408 tp[n] = *t;
2409 xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
2410
2411 vp9_subtract_mby_s_c(x->src_diff,
2412 src + x_idx * 16 + y_idx * 16 * src_y_stride,
2413 src_y_stride,
2414 dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
2415 dst_y_stride);
2416 vp9_subtract_mbuv_s_c(x->src_diff,
2417 usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
2418 vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
2419 src_uv_stride,
2420 udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
2421 vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
2422 dst_uv_stride);
2423 vp9_fidct_mb(x);
2424 vp9_recon_mby_s_c(&x->e_mbd,
2425 dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
2426 vp9_recon_mbuv_s_c(&x->e_mbd,
2427 udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
2428 vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
2429
2430 if (!x->skip) {
2431 vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
2432 skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
2433 } else {
2434 int mb_skip_context =
2435 cpi->common.mb_no_coeff_skip ?
2436 (x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
2437 (x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb _skip_coeff :
2438 0;
2439 xd->mode_info_context->mbmi.mb_skip_coeff = skip[n] = 1;
2440 if (cpi->common.mb_no_coeff_skip) {
2441 // TODO(rbultje) this should be done per-sb instead of per-mb?
2442 cpi->skip_true_count[mb_skip_context]++;
2443 vp9_fix_contexts(xd);
2444 } else {
2445 vp9_stuff_mb(cpi, xd, t, 0);
2446 // TODO(rbultje) this should be done per-sb instead of per-mb?
2447 cpi->skip_false_count[mb_skip_context]++;
2448 }
2449 }
2450 }
2451
2452 xd->mode_info_context = mi;
2453 update_sb_skip_coeff_state(cpi, x, ta, tl, tp, t, skip);
2454 if (cm->txfm_mode == TX_MODE_SELECT &&
2455 !((cm->mb_no_coeff_skip && skip[0] && skip[1] && skip[2] && skip[3]) ||
2456 (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
2457 vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
2458 cpi->txfm_count[mi->mbmi.txfm_size]++;
2459 } else {
2460 TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_16X16 : cm->txfm_mode;
2461 mi->mbmi.txfm_size = sz;
2462 if (mb_col < cm->mb_cols - 1)
2463 mi[1].mbmi.txfm_size = sz;
2464 if (mb_row < cm->mb_rows - 1) {
2465 mi[cm->mode_info_stride].mbmi.txfm_size = sz;
2466 if (mb_col < cm->mb_cols - 1)
2467 mi[cm->mode_info_stride + 1].mbmi.txfm_size = sz;
2468 }
2469 }
2470 }
2471 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698