OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 11 matching lines...) Expand all Loading... |
22 #include "vp9/common/vp9_entropymode.h" | 22 #include "vp9/common/vp9_entropymode.h" |
23 #include "vp9/common/vp9_idct.h" | 23 #include "vp9/common/vp9_idct.h" |
24 #include "vp9/common/vp9_mvref_common.h" | 24 #include "vp9/common/vp9_mvref_common.h" |
25 #include "vp9/common/vp9_pred_common.h" | 25 #include "vp9/common/vp9_pred_common.h" |
26 #include "vp9/common/vp9_quant_common.h" | 26 #include "vp9/common/vp9_quant_common.h" |
27 #include "vp9/common/vp9_reconintra.h" | 27 #include "vp9/common/vp9_reconintra.h" |
28 #include "vp9/common/vp9_reconinter.h" | 28 #include "vp9/common/vp9_reconinter.h" |
29 #include "vp9/common/vp9_seg_common.h" | 29 #include "vp9/common/vp9_seg_common.h" |
30 #include "vp9/common/vp9_systemdependent.h" | 30 #include "vp9/common/vp9_systemdependent.h" |
31 #include "vp9/common/vp9_tile_common.h" | 31 #include "vp9/common/vp9_tile_common.h" |
| 32 |
| 33 #include "vp9/encoder/vp9_aq_complexity.h" |
| 34 #include "vp9/encoder/vp9_aq_cyclicrefresh.h" |
| 35 #include "vp9/encoder/vp9_aq_variance.h" |
32 #include "vp9/encoder/vp9_encodeframe.h" | 36 #include "vp9/encoder/vp9_encodeframe.h" |
33 #include "vp9/encoder/vp9_encodemb.h" | 37 #include "vp9/encoder/vp9_encodemb.h" |
34 #include "vp9/encoder/vp9_encodemv.h" | 38 #include "vp9/encoder/vp9_encodemv.h" |
35 #include "vp9/encoder/vp9_extend.h" | 39 #include "vp9/encoder/vp9_extend.h" |
36 #include "vp9/encoder/vp9_onyx_int.h" | |
37 #include "vp9/encoder/vp9_pickmode.h" | 40 #include "vp9/encoder/vp9_pickmode.h" |
38 #include "vp9/encoder/vp9_rdopt.h" | 41 #include "vp9/encoder/vp9_rdopt.h" |
39 #include "vp9/encoder/vp9_segmentation.h" | 42 #include "vp9/encoder/vp9_segmentation.h" |
40 #include "vp9/encoder/vp9_tokenize.h" | 43 #include "vp9/encoder/vp9_tokenize.h" |
41 #include "vp9/encoder/vp9_vaq.h" | 44 |
| 45 #define GF_ZEROMV_ZBIN_BOOST 0 |
| 46 #define LF_ZEROMV_ZBIN_BOOST 0 |
| 47 #define MV_ZBIN_BOOST 0 |
| 48 #define SPLIT_MV_ZBIN_BOOST 0 |
| 49 #define INTRA_ZBIN_BOOST 0 |
42 | 50 |
43 static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { | 51 static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { |
44 switch (subsize) { | 52 switch (subsize) { |
45 case BLOCK_64X64: | 53 case BLOCK_64X64: |
46 case BLOCK_64X32: | 54 case BLOCK_64X32: |
47 case BLOCK_32X64: | 55 case BLOCK_32X64: |
48 case BLOCK_32X32: | 56 case BLOCK_32X32: |
49 return &x->sb_index; | 57 return &x->sb_index; |
50 case BLOCK_32X16: | 58 case BLOCK_32X16: |
51 case BLOCK_16X32: | 59 case BLOCK_16X32: |
(...skipping 15 matching lines...) Expand all Loading... |
67 | 75 |
68 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, | 76 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, |
69 int mi_row, int mi_col, BLOCK_SIZE bsize); | 77 int mi_row, int mi_col, BLOCK_SIZE bsize); |
70 | 78 |
71 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x); | 79 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x); |
72 | 80 |
73 // activity_avg must be positive, or flat regions could get a zero weight | 81 // activity_avg must be positive, or flat regions could get a zero weight |
74 // (infinite lambda), which confounds analysis. | 82 // (infinite lambda), which confounds analysis. |
75 // This also avoids the need for divide by zero checks in | 83 // This also avoids the need for divide by zero checks in |
76 // vp9_activity_masking(). | 84 // vp9_activity_masking(). |
77 #define ACTIVITY_AVG_MIN (64) | 85 #define ACTIVITY_AVG_MIN 64 |
78 | 86 |
79 // Motion vector component magnitude threshold for defining fast motion. | 87 // Motion vector component magnitude threshold for defining fast motion. |
80 #define FAST_MOTION_MV_THRESH (24) | 88 #define FAST_MOTION_MV_THRESH 24 |
81 | 89 |
82 // This is used as a reference when computing the source variance for the | 90 // This is used as a reference when computing the source variance for the |
83 // purposes of activity masking. | 91 // purposes of activity masking. |
84 // Eventually this should be replaced by custom no-reference routines, | 92 // Eventually this should be replaced by custom no-reference routines, |
85 // which will be faster. | 93 // which will be faster. |
86 static const uint8_t VP9_VAR_OFFS[64] = { | 94 static const uint8_t VP9_VAR_OFFS[64] = { |
87 128, 128, 128, 128, 128, 128, 128, 128, | 95 128, 128, 128, 128, 128, 128, 128, 128, |
88 128, 128, 128, 128, 128, 128, 128, 128, | 96 128, 128, 128, 128, 128, 128, 128, 128, |
89 128, 128, 128, 128, 128, 128, 128, 128, | 97 128, 128, 128, 128, 128, 128, 128, 128, |
90 128, 128, 128, 128, 128, 128, 128, 128, | 98 128, 128, 128, 128, 128, 128, 128, 128, |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
142 mi_row, mi_col, | 150 mi_row, mi_col, |
143 BLOCK_64X64); | 151 BLOCK_64X64); |
144 if (var < 4) | 152 if (var < 4) |
145 return BLOCK_64X64; | 153 return BLOCK_64X64; |
146 else if (var < 10) | 154 else if (var < 10) |
147 return BLOCK_32X32; | 155 return BLOCK_32X32; |
148 else | 156 else |
149 return BLOCK_16X16; | 157 return BLOCK_16X16; |
150 } | 158 } |
151 | 159 |
| 160 // Lighter version of set_offsets that only sets the mode info |
| 161 // pointers. |
| 162 static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm, |
| 163 MACROBLOCKD *const xd, |
| 164 int mi_row, |
| 165 int mi_col) { |
| 166 const int idx_str = xd->mi_stride * mi_row + mi_col; |
| 167 xd->mi = cm->mi_grid_visible + idx_str; |
| 168 xd->mi[0] = cm->mi + idx_str; |
| 169 } |
| 170 |
| 171 static int is_block_in_mb_map(const VP9_COMP *cpi, int mi_row, int mi_col, |
| 172 BLOCK_SIZE bsize) { |
| 173 const VP9_COMMON *const cm = &cpi->common; |
| 174 const int mb_rows = cm->mb_rows; |
| 175 const int mb_cols = cm->mb_cols; |
| 176 const int mb_row = mi_row >> 1; |
| 177 const int mb_col = mi_col >> 1; |
| 178 const int mb_width = num_8x8_blocks_wide_lookup[bsize] >> 1; |
| 179 const int mb_height = num_8x8_blocks_high_lookup[bsize] >> 1; |
| 180 int r, c; |
| 181 if (bsize <= BLOCK_16X16) { |
| 182 return cpi->active_map[mb_row * mb_cols + mb_col]; |
| 183 } |
| 184 for (r = 0; r < mb_height; ++r) { |
| 185 for (c = 0; c < mb_width; ++c) { |
| 186 int row = mb_row + r; |
| 187 int col = mb_col + c; |
| 188 if (row >= mb_rows || col >= mb_cols) |
| 189 continue; |
| 190 if (cpi->active_map[row * mb_cols + col]) |
| 191 return 1; |
| 192 } |
| 193 } |
| 194 return 0; |
| 195 } |
| 196 |
| 197 static int check_active_map(const VP9_COMP *cpi, const MACROBLOCK *x, |
| 198 int mi_row, int mi_col, |
| 199 BLOCK_SIZE bsize) { |
| 200 if (cpi->active_map_enabled && !x->e_mbd.lossless) { |
| 201 return is_block_in_mb_map(cpi, mi_row, mi_col, bsize); |
| 202 } else { |
| 203 return 1; |
| 204 } |
| 205 } |
| 206 |
| 207 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, |
| 208 int mi_row, int mi_col, BLOCK_SIZE bsize) { |
| 209 MACROBLOCK *const x = &cpi->mb; |
| 210 VP9_COMMON *const cm = &cpi->common; |
| 211 MACROBLOCKD *const xd = &x->e_mbd; |
| 212 MB_MODE_INFO *mbmi; |
| 213 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
| 214 const int mi_height = num_8x8_blocks_high_lookup[bsize]; |
| 215 const int mb_row = mi_row >> 1; |
| 216 const int mb_col = mi_col >> 1; |
| 217 const int idx_map = mb_row * cm->mb_cols + mb_col; |
| 218 const struct segmentation *const seg = &cm->seg; |
| 219 |
| 220 set_skip_context(xd, mi_row, mi_col); |
| 221 |
| 222 // Activity map pointer |
| 223 x->mb_activity_ptr = &cpi->mb_activity_map[idx_map]; |
| 224 x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize); |
| 225 |
| 226 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
| 227 |
| 228 mbmi = &xd->mi[0]->mbmi; |
| 229 |
| 230 // Set up destination pointers. |
| 231 vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); |
| 232 |
| 233 // Set up limit values for MV components. |
| 234 // Mv beyond the range do not produce new/different prediction block. |
| 235 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND); |
| 236 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND); |
| 237 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND; |
| 238 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND; |
| 239 |
| 240 // Set up distance of MB to edge of frame in 1/8th pel units. |
| 241 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1))); |
| 242 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, |
| 243 cm->mi_rows, cm->mi_cols); |
| 244 |
| 245 // Set up source buffers. |
| 246 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col); |
| 247 |
| 248 // R/D setup. |
| 249 x->rddiv = cpi->RDDIV; |
| 250 x->rdmult = cpi->RDMULT; |
| 251 |
| 252 // Setup segment ID. |
| 253 if (seg->enabled) { |
| 254 if (cpi->oxcf.aq_mode != VARIANCE_AQ) { |
| 255 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
| 256 : cm->last_frame_seg_map; |
| 257 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 258 } |
| 259 vp9_init_plane_quantizers(cpi, x); |
| 260 |
| 261 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; |
| 262 } else { |
| 263 mbmi->segment_id = 0; |
| 264 x->encode_breakout = cpi->encode_breakout; |
| 265 } |
| 266 } |
| 267 |
| 268 static void duplicate_mode_info_in_sb(VP9_COMMON * const cm, |
| 269 MACROBLOCKD *const xd, |
| 270 int mi_row, |
| 271 int mi_col, |
| 272 BLOCK_SIZE bsize) { |
| 273 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
| 274 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 275 int i, j; |
| 276 for (j = 0; j < block_height; ++j) |
| 277 for (i = 0; i < block_width; ++i) { |
| 278 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols) |
| 279 xd->mi[j * xd->mi_stride + i] = xd->mi[0]; |
| 280 } |
| 281 } |
| 282 |
| 283 static void set_block_size(VP9_COMP * const cpi, |
| 284 const TileInfo *const tile, |
| 285 int mi_row, int mi_col, |
| 286 BLOCK_SIZE bsize) { |
| 287 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) { |
| 288 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 289 set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col); |
| 290 xd->mi[0]->mbmi.sb_type = bsize; |
| 291 duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize); |
| 292 } |
| 293 } |
| 294 |
| 295 typedef struct { |
| 296 int64_t sum_square_error; |
| 297 int64_t sum_error; |
| 298 int count; |
| 299 int variance; |
| 300 } var; |
| 301 |
| 302 typedef struct { |
| 303 var none; |
| 304 var horz[2]; |
| 305 var vert[2]; |
| 306 } partition_variance; |
| 307 |
| 308 typedef struct { |
| 309 partition_variance part_variances; |
| 310 var split[4]; |
| 311 } v8x8; |
| 312 |
| 313 typedef struct { |
| 314 partition_variance part_variances; |
| 315 v8x8 split[4]; |
| 316 } v16x16; |
| 317 |
| 318 typedef struct { |
| 319 partition_variance part_variances; |
| 320 v16x16 split[4]; |
| 321 } v32x32; |
| 322 |
| 323 typedef struct { |
| 324 partition_variance part_variances; |
| 325 v32x32 split[4]; |
| 326 } v64x64; |
| 327 |
| 328 typedef struct { |
| 329 partition_variance *part_variances; |
| 330 var *split[4]; |
| 331 } variance_node; |
| 332 |
| 333 typedef enum { |
| 334 V16X16, |
| 335 V32X32, |
| 336 V64X64, |
| 337 } TREE_LEVEL; |
| 338 |
| 339 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) { |
| 340 int i; |
| 341 switch (bsize) { |
| 342 case BLOCK_64X64: { |
| 343 v64x64 *vt = (v64x64 *) data; |
| 344 node->part_variances = &vt->part_variances; |
| 345 for (i = 0; i < 4; i++) |
| 346 node->split[i] = &vt->split[i].part_variances.none; |
| 347 break; |
| 348 } |
| 349 case BLOCK_32X32: { |
| 350 v32x32 *vt = (v32x32 *) data; |
| 351 node->part_variances = &vt->part_variances; |
| 352 for (i = 0; i < 4; i++) |
| 353 node->split[i] = &vt->split[i].part_variances.none; |
| 354 break; |
| 355 } |
| 356 case BLOCK_16X16: { |
| 357 v16x16 *vt = (v16x16 *) data; |
| 358 node->part_variances = &vt->part_variances; |
| 359 for (i = 0; i < 4; i++) |
| 360 node->split[i] = &vt->split[i].part_variances.none; |
| 361 break; |
| 362 } |
| 363 case BLOCK_8X8: { |
| 364 v8x8 *vt = (v8x8 *) data; |
| 365 node->part_variances = &vt->part_variances; |
| 366 for (i = 0; i < 4; i++) |
| 367 node->split[i] = &vt->split[i]; |
| 368 break; |
| 369 } |
| 370 default: { |
| 371 assert(0); |
| 372 } |
| 373 } |
| 374 } |
| 375 |
| 376 // Set variance values given sum square error, sum error, count. |
| 377 static void fill_variance(int64_t s2, int64_t s, int c, var *v) { |
| 378 v->sum_square_error = s2; |
| 379 v->sum_error = s; |
| 380 v->count = c; |
| 381 if (c > 0) |
| 382 v->variance = (int)(256 * |
| 383 (v->sum_square_error - v->sum_error * v->sum_error / |
| 384 v->count) / v->count); |
| 385 else |
| 386 v->variance = 0; |
| 387 } |
| 388 |
| 389 void sum_2_variances(const var *a, const var *b, var *r) { |
| 390 fill_variance(a->sum_square_error + b->sum_square_error, |
| 391 a->sum_error + b->sum_error, a->count + b->count, r); |
| 392 } |
| 393 |
| 394 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) { |
| 395 variance_node node; |
| 396 tree_to_node(data, bsize, &node); |
| 397 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]); |
| 398 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]); |
| 399 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]); |
| 400 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); |
| 401 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], |
| 402 &node.part_variances->none); |
| 403 } |
| 404 |
| 405 static int set_vt_partitioning(VP9_COMP *cpi, |
| 406 void *data, |
| 407 const TileInfo *const tile, |
| 408 BLOCK_SIZE bsize, |
| 409 int mi_row, |
| 410 int mi_col, |
| 411 int mi_size) { |
| 412 VP9_COMMON * const cm = &cpi->common; |
| 413 variance_node vt; |
| 414 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
| 415 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 416 // TODO(debargha): Choose this more intelligently. |
| 417 const int64_t threshold_multiplier = 25; |
| 418 int64_t threshold = threshold_multiplier * cpi->common.base_qindex; |
| 419 assert(block_height == block_width); |
| 420 |
| 421 tree_to_node(data, bsize, &vt); |
| 422 |
| 423 // Split none is available only if we have more than half a block size |
| 424 // in width and height inside the visible image. |
| 425 if (mi_col + block_width / 2 < cm->mi_cols && |
| 426 mi_row + block_height / 2 < cm->mi_rows && |
| 427 vt.part_variances->none.variance < threshold) { |
| 428 set_block_size(cpi, tile, mi_row, mi_col, bsize); |
| 429 return 1; |
| 430 } |
| 431 |
| 432 // Vertical split is available on all but the bottom border. |
| 433 if (mi_row + block_height / 2 < cm->mi_rows && |
| 434 vt.part_variances->vert[0].variance < threshold && |
| 435 vt.part_variances->vert[1].variance < threshold) { |
| 436 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); |
| 437 set_block_size(cpi, tile, mi_row, mi_col, subsize); |
| 438 set_block_size(cpi, tile, mi_row, mi_col + block_width / 2, subsize); |
| 439 return 1; |
| 440 } |
| 441 |
| 442 // Horizontal split is available on all but the right border. |
| 443 if (mi_col + block_width / 2 < cm->mi_cols && |
| 444 vt.part_variances->horz[0].variance < threshold && |
| 445 vt.part_variances->horz[1].variance < threshold) { |
| 446 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); |
| 447 set_block_size(cpi, tile, mi_row, mi_col, subsize); |
| 448 set_block_size(cpi, tile, mi_row + block_height / 2, mi_col, subsize); |
| 449 return 1; |
| 450 } |
| 451 return 0; |
| 452 } |
| 453 |
| 454 // TODO(debargha): Fix this function and make it work as expected. |
| 455 static void choose_partitioning(VP9_COMP *cpi, |
| 456 const TileInfo *const tile, |
| 457 int mi_row, int mi_col) { |
| 458 VP9_COMMON * const cm = &cpi->common; |
| 459 MACROBLOCK *x = &cpi->mb; |
| 460 MACROBLOCKD *xd = &cpi->mb.e_mbd; |
| 461 |
| 462 int i, j, k; |
| 463 v64x64 vt; |
| 464 uint8_t *s; |
| 465 const uint8_t *d; |
| 466 int sp; |
| 467 int dp; |
| 468 int pixels_wide = 64, pixels_high = 64; |
| 469 int_mv nearest_mv, near_mv; |
| 470 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); |
| 471 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; |
| 472 |
| 473 vp9_zero(vt); |
| 474 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
| 475 |
| 476 if (xd->mb_to_right_edge < 0) |
| 477 pixels_wide += (xd->mb_to_right_edge >> 3); |
| 478 if (xd->mb_to_bottom_edge < 0) |
| 479 pixels_high += (xd->mb_to_bottom_edge >> 3); |
| 480 |
| 481 s = x->plane[0].src.buf; |
| 482 sp = x->plane[0].src.stride; |
| 483 |
| 484 if (cm->frame_type != KEY_FRAME) { |
| 485 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); |
| 486 |
| 487 xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME; |
| 488 xd->mi[0]->mbmi.sb_type = BLOCK_64X64; |
| 489 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, |
| 490 xd->mi[0]->mbmi.ref_mvs[LAST_FRAME], |
| 491 &nearest_mv, &near_mv); |
| 492 |
| 493 xd->mi[0]->mbmi.mv[0] = nearest_mv; |
| 494 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); |
| 495 |
| 496 d = xd->plane[0].dst.buf; |
| 497 dp = xd->plane[0].dst.stride; |
| 498 } else { |
| 499 d = VP9_VAR_OFFS; |
| 500 dp = 0; |
| 501 } |
| 502 |
| 503 // Fill in the entire tree of 8x8 variances for splits. |
| 504 for (i = 0; i < 4; i++) { |
| 505 const int x32_idx = ((i & 1) << 5); |
| 506 const int y32_idx = ((i >> 1) << 5); |
| 507 for (j = 0; j < 4; j++) { |
| 508 const int x16_idx = x32_idx + ((j & 1) << 4); |
| 509 const int y16_idx = y32_idx + ((j >> 1) << 4); |
| 510 v16x16 *vst = &vt.split[i].split[j]; |
| 511 for (k = 0; k < 4; k++) { |
| 512 int x_idx = x16_idx + ((k & 1) << 3); |
| 513 int y_idx = y16_idx + ((k >> 1) << 3); |
| 514 unsigned int sse = 0; |
| 515 int sum = 0; |
| 516 if (x_idx < pixels_wide && y_idx < pixels_high) |
| 517 vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp, |
| 518 d + y_idx * dp + x_idx, dp, &sse, &sum); |
| 519 fill_variance(sse, sum, 64, &vst->split[k].part_variances.none); |
| 520 } |
| 521 } |
| 522 } |
| 523 // Fill the rest of the variance tree by summing split partition values. |
| 524 for (i = 0; i < 4; i++) { |
| 525 for (j = 0; j < 4; j++) { |
| 526 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); |
| 527 } |
| 528 fill_variance_tree(&vt.split[i], BLOCK_32X32); |
| 529 } |
| 530 fill_variance_tree(&vt, BLOCK_64X64); |
| 531 |
| 532 // Now go through the entire structure, splitting every block size until |
| 533 // we get to one that's got a variance lower than our threshold, or we |
| 534 // hit 8x8. |
| 535 if (!set_vt_partitioning(cpi, &vt, tile, BLOCK_64X64, |
| 536 mi_row, mi_col, 8)) { |
| 537 for (i = 0; i < 4; ++i) { |
| 538 const int x32_idx = ((i & 1) << 2); |
| 539 const int y32_idx = ((i >> 1) << 2); |
| 540 if (!set_vt_partitioning(cpi, &vt.split[i], tile, BLOCK_32X32, |
| 541 (mi_row + y32_idx), (mi_col + x32_idx), 4)) { |
| 542 for (j = 0; j < 4; ++j) { |
| 543 const int x16_idx = ((j & 1) << 1); |
| 544 const int y16_idx = ((j >> 1) << 1); |
| 545 // NOTE: This is a temporary hack to disable 8x8 partitions, |
| 546 // since it works really bad - possibly due to a bug |
| 547 #define DISABLE_8X8_VAR_BASED_PARTITION |
| 548 #ifdef DISABLE_8X8_VAR_BASED_PARTITION |
| 549 if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows && |
| 550 mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) { |
| 551 set_block_size(cpi, tile, |
| 552 (mi_row + y32_idx + y16_idx), |
| 553 (mi_col + x32_idx + x16_idx), |
| 554 BLOCK_16X16); |
| 555 } else { |
| 556 for (k = 0; k < 4; ++k) { |
| 557 const int x8_idx = (k & 1); |
| 558 const int y8_idx = (k >> 1); |
| 559 set_block_size(cpi, tile, |
| 560 (mi_row + y32_idx + y16_idx + y8_idx), |
| 561 (mi_col + x32_idx + x16_idx + x8_idx), |
| 562 BLOCK_8X8); |
| 563 } |
| 564 } |
| 565 #else |
| 566 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile, |
| 567 BLOCK_16X16, |
| 568 (mi_row + y32_idx + y16_idx), |
| 569 (mi_col + x32_idx + x16_idx), 2)) { |
| 570 for (k = 0; k < 4; ++k) { |
| 571 const int x8_idx = (k & 1); |
| 572 const int y8_idx = (k >> 1); |
| 573 set_block_size(cpi, tile, |
| 574 (mi_row + y32_idx + y16_idx + y8_idx), |
| 575 (mi_col + x32_idx + x16_idx + x8_idx), |
| 576 BLOCK_8X8); |
| 577 } |
| 578 } |
| 579 #endif |
| 580 } |
| 581 } |
| 582 } |
| 583 } |
| 584 } |
| 585 |
152 // Original activity measure from Tim T's code. | 586 // Original activity measure from Tim T's code. |
153 static unsigned int tt_activity_measure(MACROBLOCK *x) { | 587 static unsigned int tt_activity_measure(MACROBLOCK *x) { |
154 unsigned int sse; | 588 unsigned int sse; |
155 /* TODO: This could also be done over smaller areas (8x8), but that would | 589 // TODO: This could also be done over smaller areas (8x8), but that would |
156 * require extensive changes elsewhere, as lambda is assumed to be fixed | 590 // require extensive changes elsewhere, as lambda is assumed to be fixed |
157 * over an entire MB in most of the code. | 591 // over an entire MB in most of the code. |
158 * Another option is to compute four 8x8 variances, and pick a single | 592 // Another option is to compute four 8x8 variances, and pick a single |
159 * lambda using a non-linear combination (e.g., the smallest, or second | 593 // lambda using a non-linear combination (e.g., the smallest, or second |
160 * smallest, etc.). | 594 // smallest, etc.). |
161 */ | 595 const unsigned int act = vp9_variance16x16(x->plane[0].src.buf, |
162 unsigned int act = vp9_variance16x16(x->plane[0].src.buf, | 596 x->plane[0].src.stride, |
163 x->plane[0].src.stride, | 597 VP9_VAR_OFFS, 0, &sse) << 4; |
164 VP9_VAR_OFFS, 0, &sse) << 4; | |
165 // If the region is flat, lower the activity some more. | 598 // If the region is flat, lower the activity some more. |
166 if (act < (8 << 12)) | 599 return act < (8 << 12) ? MIN(act, 5 << 12) : act; |
167 act = MIN(act, 5 << 12); | |
168 | |
169 return act; | |
170 } | 600 } |
171 | 601 |
172 // Stub for alternative experimental activity measures. | 602 // Stub for alternative experimental activity measures. |
173 static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) { | 603 static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) { |
174 return vp9_encode_intra(x, use_dc_pred); | 604 return vp9_encode_intra(x, use_dc_pred); |
175 } | 605 } |
176 | 606 |
177 // Measure the activity of the current macroblock | 607 // Measure the activity of the current macroblock |
178 // What we measure here is TBD so abstracted to this function | 608 // What we measure here is TBD so abstracted to this function |
179 #define ALT_ACT_MEASURE 1 | 609 #define ALT_ACT_MEASURE 1 |
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
380 | 810 |
381 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a); | 811 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a); |
382 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); | 812 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); |
383 x->errorperbit += (x->errorperbit == 0); | 813 x->errorperbit += (x->errorperbit == 0); |
384 #endif | 814 #endif |
385 | 815 |
386 // Activity based Zbin adjustment | 816 // Activity based Zbin adjustment |
387 adjust_act_zbin(cpi, x); | 817 adjust_act_zbin(cpi, x); |
388 } | 818 } |
389 | 819 |
390 // Select a segment for the current SB64 | |
391 static void select_in_frame_q_segment(VP9_COMP *cpi, | |
392 int mi_row, int mi_col, | |
393 int output_enabled, int projected_rate) { | |
394 VP9_COMMON *const cm = &cpi->common; | |
395 | |
396 const int mi_offset = mi_row * cm->mi_cols + mi_col; | |
397 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; | |
398 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; | |
399 const int xmis = MIN(cm->mi_cols - mi_col, bw); | |
400 const int ymis = MIN(cm->mi_rows - mi_row, bh); | |
401 int complexity_metric = 64; | |
402 int x, y; | |
403 | |
404 unsigned char segment; | |
405 | |
406 if (!output_enabled) { | |
407 segment = 0; | |
408 } else { | |
409 // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). | |
410 // It is converted to bits * 256 units | |
411 const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / | |
412 (bw * bh); | |
413 | |
414 if (projected_rate < (target_rate / 4)) { | |
415 segment = 1; | |
416 } else { | |
417 segment = 0; | |
418 } | |
419 | |
420 if (target_rate > 0) { | |
421 complexity_metric = | |
422 clamp((int)((projected_rate * 64) / target_rate), 16, 255); | |
423 } | |
424 } | |
425 | |
426 // Fill in the entires in the segment map corresponding to this SB64 | |
427 for (y = 0; y < ymis; y++) { | |
428 for (x = 0; x < xmis; x++) { | |
429 cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment; | |
430 cpi->complexity_map[mi_offset + y * cm->mi_cols + x] = | |
431 (unsigned char)complexity_metric; | |
432 } | |
433 } | |
434 } | |
435 | |
436 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, | 820 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, |
437 BLOCK_SIZE bsize, int output_enabled) { | 821 int mi_row, int mi_col, BLOCK_SIZE bsize, |
| 822 int output_enabled) { |
438 int i, x_idx, y; | 823 int i, x_idx, y; |
439 VP9_COMMON *const cm = &cpi->common; | 824 VP9_COMMON *const cm = &cpi->common; |
440 MACROBLOCK *const x = &cpi->mb; | 825 MACROBLOCK *const x = &cpi->mb; |
441 MACROBLOCKD *const xd = &x->e_mbd; | 826 MACROBLOCKD *const xd = &x->e_mbd; |
442 struct macroblock_plane *const p = x->plane; | 827 struct macroblock_plane *const p = x->plane; |
443 struct macroblockd_plane *const pd = xd->plane; | 828 struct macroblockd_plane *const pd = xd->plane; |
444 MODE_INFO *mi = &ctx->mic; | 829 MODE_INFO *mi = &ctx->mic; |
445 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; | 830 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; |
446 MODE_INFO *mi_addr = xd->mi_8x8[0]; | 831 MODE_INFO *mi_addr = xd->mi[0]; |
| 832 const struct segmentation *const seg = &cm->seg; |
447 | 833 |
448 const int mis = cm->mode_info_stride; | 834 const int mis = cm->mi_stride; |
449 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; | 835 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
450 const int mi_height = num_8x8_blocks_high_lookup[bsize]; | 836 const int mi_height = num_8x8_blocks_high_lookup[bsize]; |
451 int max_plane; | 837 int max_plane; |
452 | 838 |
453 assert(mi->mbmi.mode < MB_MODE_COUNT); | |
454 assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES); | |
455 assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES); | |
456 assert(mi->mbmi.sb_type == bsize); | 839 assert(mi->mbmi.sb_type == bsize); |
457 | 840 |
458 // For in frame adaptive Q copy over the chosen segment id into the | 841 *mi_addr = *mi; |
459 // mode innfo context for the chosen mode / partition. | |
460 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && output_enabled) | |
461 mi->mbmi.segment_id = xd->mi_8x8[0]->mbmi.segment_id; | |
462 | 842 |
463 *mi_addr = *mi; | 843 // If segmentation in use |
| 844 if (seg->enabled && output_enabled) { |
| 845 // For in frame complexity AQ copy the segment id from the segment map. |
| 846 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { |
| 847 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
| 848 : cm->last_frame_seg_map; |
| 849 mi_addr->mbmi.segment_id = |
| 850 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 851 } |
| 852 // Else for cyclic refresh mode update the segment map, set the segment id |
| 853 // and then update the quantizer. |
| 854 else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { |
| 855 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, |
| 856 mi_row, mi_col, bsize, 1); |
| 857 vp9_init_plane_quantizers(cpi, x); |
| 858 } |
| 859 } |
464 | 860 |
465 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; | 861 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; |
466 for (i = 0; i < max_plane; ++i) { | 862 for (i = 0; i < max_plane; ++i) { |
467 p[i].coeff = ctx->coeff_pbuf[i][1]; | 863 p[i].coeff = ctx->coeff_pbuf[i][1]; |
468 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; | 864 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; |
469 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; | 865 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; |
470 p[i].eobs = ctx->eobs_pbuf[i][1]; | 866 p[i].eobs = ctx->eobs_pbuf[i][1]; |
471 } | 867 } |
472 | 868 |
473 for (i = max_plane; i < MAX_MB_PLANE; ++i) { | 869 for (i = max_plane; i < MAX_MB_PLANE; ++i) { |
474 p[i].coeff = ctx->coeff_pbuf[i][2]; | 870 p[i].coeff = ctx->coeff_pbuf[i][2]; |
475 p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; | 871 p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; |
476 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; | 872 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; |
477 p[i].eobs = ctx->eobs_pbuf[i][2]; | 873 p[i].eobs = ctx->eobs_pbuf[i][2]; |
478 } | 874 } |
479 | 875 |
480 // Restore the coding context of the MB to that that was in place | 876 // Restore the coding context of the MB to that that was in place |
481 // when the mode was picked for it | 877 // when the mode was picked for it |
482 for (y = 0; y < mi_height; y++) | 878 for (y = 0; y < mi_height; y++) |
483 for (x_idx = 0; x_idx < mi_width; x_idx++) | 879 for (x_idx = 0; x_idx < mi_width; x_idx++) |
484 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx | 880 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx |
485 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { | 881 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { |
486 xd->mi_8x8[x_idx + y * mis] = mi_addr; | 882 xd->mi[x_idx + y * mis] = mi_addr; |
487 } | 883 } |
488 | 884 |
489 if ((cpi->oxcf.aq_mode == VARIANCE_AQ) || | 885 if (cpi->oxcf.aq_mode) |
490 (cpi->oxcf.aq_mode == COMPLEXITY_AQ)) { | |
491 vp9_init_plane_quantizers(cpi, x); | 886 vp9_init_plane_quantizers(cpi, x); |
492 } | |
493 | 887 |
494 // FIXME(rbultje) I'm pretty sure this should go to the end of this block | 888 // FIXME(rbultje) I'm pretty sure this should go to the end of this block |
495 // (i.e. after the output_enabled) | 889 // (i.e. after the output_enabled) |
496 if (bsize < BLOCK_32X32) { | 890 if (bsize < BLOCK_32X32) { |
497 if (bsize < BLOCK_16X16) | 891 if (bsize < BLOCK_16X16) |
498 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8]; | 892 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8]; |
499 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16]; | 893 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16]; |
500 } | 894 } |
501 | 895 |
502 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) { | 896 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) { |
(...skipping 28 matching lines...) Expand all Loading... |
531 THR_TM /*TM_PRED*/, | 925 THR_TM /*TM_PRED*/, |
532 }; | 926 }; |
533 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; | 927 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; |
534 } else { | 928 } else { |
535 // Note how often each mode chosen as best | 929 // Note how often each mode chosen as best |
536 ++cpi->mode_chosen_counts[ctx->best_mode_index]; | 930 ++cpi->mode_chosen_counts[ctx->best_mode_index]; |
537 } | 931 } |
538 #endif | 932 #endif |
539 if (!frame_is_intra_only(cm)) { | 933 if (!frame_is_intra_only(cm)) { |
540 if (is_inter_block(mbmi)) { | 934 if (is_inter_block(mbmi)) { |
541 if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { | 935 vp9_update_mv_count(cm, xd); |
542 MV best_mv[2]; | |
543 for (i = 0; i < 1 + has_second_ref(mbmi); ++i) | |
544 best_mv[i] = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv; | |
545 vp9_update_mv_count(cm, xd, best_mv); | |
546 } | |
547 | 936 |
548 if (cm->interp_filter == SWITCHABLE) { | 937 if (cm->interp_filter == SWITCHABLE) { |
549 const int ctx = vp9_get_pred_context_switchable_interp(xd); | 938 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
550 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; | 939 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; |
551 } | 940 } |
552 } | 941 } |
553 | 942 |
554 cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; | 943 cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; |
555 cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; | 944 cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; |
556 cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; | 945 cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; |
(...skipping 13 matching lines...) Expand all Loading... |
570 | 959 |
571 // Set current frame pointer. | 960 // Set current frame pointer. |
572 x->e_mbd.cur_buf = src; | 961 x->e_mbd.cur_buf = src; |
573 | 962 |
574 for (i = 0; i < MAX_MB_PLANE; i++) | 963 for (i = 0; i < MAX_MB_PLANE; i++) |
575 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col, | 964 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col, |
576 NULL, x->e_mbd.plane[i].subsampling_x, | 965 NULL, x->e_mbd.plane[i].subsampling_x, |
577 x->e_mbd.plane[i].subsampling_y); | 966 x->e_mbd.plane[i].subsampling_y); |
578 } | 967 } |
579 | 968 |
580 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, | |
581 int mi_row, int mi_col, BLOCK_SIZE bsize) { | |
582 MACROBLOCK *const x = &cpi->mb; | |
583 VP9_COMMON *const cm = &cpi->common; | |
584 MACROBLOCKD *const xd = &x->e_mbd; | |
585 MB_MODE_INFO *mbmi; | |
586 const int idx_str = xd->mode_info_stride * mi_row + mi_col; | |
587 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; | |
588 const int mi_height = num_8x8_blocks_high_lookup[bsize]; | |
589 const int mb_row = mi_row >> 1; | |
590 const int mb_col = mi_col >> 1; | |
591 const int idx_map = mb_row * cm->mb_cols + mb_col; | |
592 const struct segmentation *const seg = &cm->seg; | |
593 | |
594 set_skip_context(xd, cpi->above_context, cpi->left_context, mi_row, mi_col); | |
595 | |
596 // Activity map pointer | |
597 x->mb_activity_ptr = &cpi->mb_activity_map[idx_map]; | |
598 x->active_ptr = cpi->active_map + idx_map; | |
599 | |
600 xd->mi_8x8 = cm->mi_grid_visible + idx_str; | |
601 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; | |
602 | |
603 xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; | |
604 | |
605 xd->mi_8x8[0] = cm->mi + idx_str; | |
606 | |
607 mbmi = &xd->mi_8x8[0]->mbmi; | |
608 | |
609 // Set up destination pointers | |
610 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); | |
611 | |
612 // Set up limit values for MV components | |
613 // mv beyond the range do not produce new/different prediction block | |
614 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND); | |
615 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND); | |
616 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND; | |
617 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND; | |
618 | |
619 // Set up distance of MB to edge of frame in 1/8th pel units | |
620 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1))); | |
621 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, | |
622 cm->mi_rows, cm->mi_cols); | |
623 | |
624 /* set up source buffers */ | |
625 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col); | |
626 | |
627 /* R/D setup */ | |
628 x->rddiv = cpi->RDDIV; | |
629 x->rdmult = cpi->RDMULT; | |
630 | |
631 /* segment ID */ | |
632 if (seg->enabled) { | |
633 if (cpi->oxcf.aq_mode != VARIANCE_AQ) { | |
634 const uint8_t *const map = seg->update_map ? cpi->segmentation_map | |
635 : cm->last_frame_seg_map; | |
636 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | |
637 } | |
638 vp9_init_plane_quantizers(cpi, x); | |
639 | |
640 if (seg->enabled && cpi->seg0_cnt > 0 && | |
641 !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME) && | |
642 vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) { | |
643 cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt; | |
644 } else { | |
645 const int y = mb_row & ~3; | |
646 const int x = mb_col & ~3; | |
647 const int p16 = ((mb_row & 1) << 1) + (mb_col & 1); | |
648 const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1); | |
649 const int tile_progress = tile->mi_col_start * cm->mb_rows >> 1; | |
650 const int mb_cols = (tile->mi_col_end - tile->mi_col_start) >> 1; | |
651 | |
652 cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress) | |
653 << 16) / cm->MBs; | |
654 } | |
655 | |
656 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; | |
657 } else { | |
658 mbmi->segment_id = 0; | |
659 x->encode_breakout = cpi->encode_breakout; | |
660 } | |
661 } | |
662 | |
663 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, | 969 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, |
664 int mi_row, int mi_col, | 970 int mi_row, int mi_col, |
665 int *totalrate, int64_t *totaldist, | 971 int *totalrate, int64_t *totaldist, |
666 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, | 972 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, |
667 int64_t best_rd) { | 973 int64_t best_rd) { |
668 VP9_COMMON *const cm = &cpi->common; | 974 VP9_COMMON *const cm = &cpi->common; |
669 MACROBLOCK *const x = &cpi->mb; | 975 MACROBLOCK *const x = &cpi->mb; |
670 MACROBLOCKD *const xd = &x->e_mbd; | 976 MACROBLOCKD *const xd = &x->e_mbd; |
| 977 MB_MODE_INFO *mbmi; |
671 struct macroblock_plane *const p = x->plane; | 978 struct macroblock_plane *const p = x->plane; |
672 struct macroblockd_plane *const pd = xd->plane; | 979 struct macroblockd_plane *const pd = xd->plane; |
673 int i; | 980 const AQ_MODE aq_mode = cpi->oxcf.aq_mode; |
674 int orig_rdmult = x->rdmult; | 981 int i, orig_rdmult; |
675 double rdmult_ratio; | 982 double rdmult_ratio; |
676 | 983 |
677 vp9_clear_system_state(); | 984 vp9_clear_system_state(); |
678 rdmult_ratio = 1.0; // avoid uninitialized warnings | 985 rdmult_ratio = 1.0; // avoid uninitialized warnings |
679 | 986 |
680 // Use the lower precision, but faster, 32x32 fdct for mode selection. | 987 // Use the lower precision, but faster, 32x32 fdct for mode selection. |
681 x->use_lp32x32fdct = 1; | 988 x->use_lp32x32fdct = 1; |
682 | 989 |
683 if (bsize < BLOCK_8X8) { | 990 if (bsize < BLOCK_8X8) { |
684 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 991 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
685 // there is nothing to be done. | 992 // there is nothing to be done. |
686 if (x->ab_index != 0) { | 993 if (x->ab_index != 0) { |
687 *totalrate = 0; | 994 *totalrate = 0; |
688 *totaldist = 0; | 995 *totaldist = 0; |
689 return; | 996 return; |
690 } | 997 } |
691 } | 998 } |
692 | 999 |
693 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1000 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
694 xd->mi_8x8[0]->mbmi.sb_type = bsize; | 1001 mbmi = &xd->mi[0]->mbmi; |
| 1002 mbmi->sb_type = bsize; |
695 | 1003 |
696 for (i = 0; i < MAX_MB_PLANE; ++i) { | 1004 for (i = 0; i < MAX_MB_PLANE; ++i) { |
697 p[i].coeff = ctx->coeff_pbuf[i][0]; | 1005 p[i].coeff = ctx->coeff_pbuf[i][0]; |
698 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; | 1006 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; |
699 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; | 1007 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; |
700 p[i].eobs = ctx->eobs_pbuf[i][0]; | 1008 p[i].eobs = ctx->eobs_pbuf[i][0]; |
701 } | 1009 } |
702 ctx->is_coded = 0; | 1010 ctx->is_coded = 0; |
703 x->skip_recode = 0; | 1011 x->skip_recode = 0; |
704 | 1012 |
705 // Set to zero to make sure we do not use the previous encoded frame stats | 1013 // Set to zero to make sure we do not use the previous encoded frame stats |
706 xd->mi_8x8[0]->mbmi.skip = 0; | 1014 mbmi->skip = 0; |
707 | 1015 |
708 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); | 1016 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); |
709 | 1017 |
710 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 1018 if (aq_mode == VARIANCE_AQ) { |
711 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy | 1019 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy |
712 : vp9_block_energy(cpi, x, bsize); | 1020 : vp9_block_energy(cpi, x, bsize); |
713 | 1021 |
714 if (cm->frame_type == KEY_FRAME || | 1022 if (cm->frame_type == KEY_FRAME || |
715 cpi->refresh_alt_ref_frame || | 1023 cpi->refresh_alt_ref_frame || |
716 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { | 1024 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { |
717 xd->mi_8x8[0]->mbmi.segment_id = vp9_vaq_segment_id(energy); | 1025 mbmi->segment_id = vp9_vaq_segment_id(energy); |
718 } else { | 1026 } else { |
719 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map | 1027 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map |
720 : cm->last_frame_seg_map; | 1028 : cm->last_frame_seg_map; |
721 xd->mi_8x8[0]->mbmi.segment_id = | 1029 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
722 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | |
723 } | 1030 } |
724 | 1031 |
725 rdmult_ratio = vp9_vaq_rdmult_ratio(energy); | 1032 rdmult_ratio = vp9_vaq_rdmult_ratio(energy); |
726 vp9_init_plane_quantizers(cpi, x); | 1033 vp9_init_plane_quantizers(cpi, x); |
727 } | 1034 } |
728 | 1035 |
| 1036 // Save rdmult before it might be changed, so it can be restored later. |
| 1037 orig_rdmult = x->rdmult; |
729 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) | 1038 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) |
730 activity_masking(cpi, x); | 1039 activity_masking(cpi, x); |
731 | 1040 |
732 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 1041 if (aq_mode == VARIANCE_AQ) { |
733 vp9_clear_system_state(); | 1042 vp9_clear_system_state(); |
734 x->rdmult = (int)round(x->rdmult * rdmult_ratio); | 1043 x->rdmult = (int)round(x->rdmult * rdmult_ratio); |
735 } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { | 1044 } else if (aq_mode == COMPLEXITY_AQ) { |
736 const int mi_offset = mi_row * cm->mi_cols + mi_col; | 1045 const int mi_offset = mi_row * cm->mi_cols + mi_col; |
737 unsigned char complexity = cpi->complexity_map[mi_offset]; | 1046 unsigned char complexity = cpi->complexity_map[mi_offset]; |
738 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) || | 1047 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) || |
739 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2)); | 1048 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2)); |
740 | 1049 if (!is_edge && (complexity > 128)) |
741 if (!is_edge && (complexity > 128)) { | 1050 x->rdmult += ((x->rdmult * (complexity - 128)) / 256); |
742 x->rdmult = x->rdmult + ((x->rdmult * (complexity - 128)) / 256); | 1051 } else if (aq_mode == CYCLIC_REFRESH_AQ) { |
743 } | 1052 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map |
| 1053 : cm->last_frame_seg_map; |
| 1054 // If segment 1, use rdmult for that segment. |
| 1055 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)) |
| 1056 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh); |
744 } | 1057 } |
745 | 1058 |
746 // Find best coding mode & reconstruct the MB so it is available | 1059 // Find best coding mode & reconstruct the MB so it is available |
747 // as a predictor for MBs that follow in the SB | 1060 // as a predictor for MBs that follow in the SB |
748 if (frame_is_intra_only(cm)) { | 1061 if (frame_is_intra_only(cm)) { |
749 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx, | 1062 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx, |
750 best_rd); | 1063 best_rd); |
751 } else { | 1064 } else { |
752 if (bsize >= BLOCK_8X8) | 1065 if (bsize >= BLOCK_8X8) |
753 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, | 1066 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, |
754 totalrate, totaldist, bsize, ctx, best_rd); | 1067 totalrate, totaldist, bsize, ctx, best_rd); |
755 else | 1068 else |
756 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate, | 1069 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate, |
757 totaldist, bsize, ctx, best_rd); | 1070 totaldist, bsize, ctx, best_rd); |
758 } | 1071 } |
759 | 1072 |
760 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 1073 if (aq_mode == VARIANCE_AQ) { |
761 x->rdmult = orig_rdmult; | 1074 x->rdmult = orig_rdmult; |
762 if (*totalrate != INT_MAX) { | 1075 if (*totalrate != INT_MAX) { |
763 vp9_clear_system_state(); | 1076 vp9_clear_system_state(); |
764 *totalrate = (int)round(*totalrate * rdmult_ratio); | 1077 *totalrate = (int)round(*totalrate * rdmult_ratio); |
765 } | 1078 } |
766 } | 1079 } else if (aq_mode == COMPLEXITY_AQ || aq_mode == CYCLIC_REFRESH_AQ) { |
767 else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { | |
768 x->rdmult = orig_rdmult; | 1080 x->rdmult = orig_rdmult; |
769 } | 1081 } |
770 } | 1082 } |
771 | 1083 |
772 static void update_stats(VP9_COMP *cpi) { | 1084 static void update_stats(VP9_COMP *cpi) { |
773 VP9_COMMON *const cm = &cpi->common; | 1085 VP9_COMMON *const cm = &cpi->common; |
774 const MACROBLOCK *const x = &cpi->mb; | 1086 const MACROBLOCK *const x = &cpi->mb; |
775 const MACROBLOCKD *const xd = &x->e_mbd; | 1087 const MACROBLOCKD *const xd = &x->e_mbd; |
776 const MODE_INFO *const mi = xd->mi_8x8[0]; | 1088 const MODE_INFO *const mi = xd->mi[0]; |
777 const MB_MODE_INFO *const mbmi = &mi->mbmi; | 1089 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
778 | 1090 |
779 if (!frame_is_intra_only(cm)) { | 1091 if (!frame_is_intra_only(cm)) { |
780 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, | 1092 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, |
781 SEG_LVL_REF_FRAME); | 1093 SEG_LVL_REF_FRAME); |
782 if (!seg_ref_active) { | 1094 if (!seg_ref_active) { |
783 FRAME_COUNTS *const counts = &cm->counts; | 1095 FRAME_COUNTS *const counts = &cm->counts; |
784 const int inter_block = is_inter_block(mbmi); | 1096 const int inter_block = is_inter_block(mbmi); |
785 | 1097 |
786 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++; | 1098 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
833 BLOCK_SIZE bsize) { | 1145 BLOCK_SIZE bsize) { |
834 MACROBLOCK *const x = &cpi->mb; | 1146 MACROBLOCK *const x = &cpi->mb; |
835 MACROBLOCKD *const xd = &x->e_mbd; | 1147 MACROBLOCKD *const xd = &x->e_mbd; |
836 int p; | 1148 int p; |
837 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 1149 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; |
838 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 1150 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; |
839 int mi_width = num_8x8_blocks_wide_lookup[bsize]; | 1151 int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
840 int mi_height = num_8x8_blocks_high_lookup[bsize]; | 1152 int mi_height = num_8x8_blocks_high_lookup[bsize]; |
841 for (p = 0; p < MAX_MB_PLANE; p++) { | 1153 for (p = 0; p < MAX_MB_PLANE; p++) { |
842 vpx_memcpy( | 1154 vpx_memcpy( |
843 cpi->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x), | 1155 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x), |
844 a + num_4x4_blocks_wide * p, | 1156 a + num_4x4_blocks_wide * p, |
845 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> | 1157 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> |
846 xd->plane[p].subsampling_x); | 1158 xd->plane[p].subsampling_x); |
847 vpx_memcpy( | 1159 vpx_memcpy( |
848 cpi->left_context[p] | 1160 xd->left_context[p] |
849 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), | 1161 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), |
850 l + num_4x4_blocks_high * p, | 1162 l + num_4x4_blocks_high * p, |
851 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> | 1163 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> |
852 xd->plane[p].subsampling_y); | 1164 xd->plane[p].subsampling_y); |
853 } | 1165 } |
854 vpx_memcpy(cpi->above_seg_context + mi_col, sa, | 1166 vpx_memcpy(xd->above_seg_context + mi_col, sa, |
855 sizeof(*cpi->above_seg_context) * mi_width); | 1167 sizeof(*xd->above_seg_context) * mi_width); |
856 vpx_memcpy(cpi->left_seg_context + (mi_row & MI_MASK), sl, | 1168 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl, |
857 sizeof(cpi->left_seg_context[0]) * mi_height); | 1169 sizeof(xd->left_seg_context[0]) * mi_height); |
858 } | 1170 } |
859 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col, | 1171 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col, |
860 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE], | 1172 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE], |
861 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], | 1173 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], |
862 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8], | 1174 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8], |
863 BLOCK_SIZE bsize) { | 1175 BLOCK_SIZE bsize) { |
864 const MACROBLOCK *const x = &cpi->mb; | 1176 const MACROBLOCK *const x = &cpi->mb; |
865 const MACROBLOCKD *const xd = &x->e_mbd; | 1177 const MACROBLOCKD *const xd = &x->e_mbd; |
866 int p; | 1178 int p; |
867 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 1179 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; |
868 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 1180 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; |
869 int mi_width = num_8x8_blocks_wide_lookup[bsize]; | 1181 int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
870 int mi_height = num_8x8_blocks_high_lookup[bsize]; | 1182 int mi_height = num_8x8_blocks_high_lookup[bsize]; |
871 | 1183 |
872 // buffer the above/left context information of the block in search. | 1184 // buffer the above/left context information of the block in search. |
873 for (p = 0; p < MAX_MB_PLANE; ++p) { | 1185 for (p = 0; p < MAX_MB_PLANE; ++p) { |
874 vpx_memcpy( | 1186 vpx_memcpy( |
875 a + num_4x4_blocks_wide * p, | 1187 a + num_4x4_blocks_wide * p, |
876 cpi->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x), | 1188 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x), |
877 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> | 1189 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> |
878 xd->plane[p].subsampling_x); | 1190 xd->plane[p].subsampling_x); |
879 vpx_memcpy( | 1191 vpx_memcpy( |
880 l + num_4x4_blocks_high * p, | 1192 l + num_4x4_blocks_high * p, |
881 cpi->left_context[p] | 1193 xd->left_context[p] |
882 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), | 1194 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), |
883 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> | 1195 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> |
884 xd->plane[p].subsampling_y); | 1196 xd->plane[p].subsampling_y); |
885 } | 1197 } |
886 vpx_memcpy(sa, cpi->above_seg_context + mi_col, | 1198 vpx_memcpy(sa, xd->above_seg_context + mi_col, |
887 sizeof(*cpi->above_seg_context) * mi_width); | 1199 sizeof(*xd->above_seg_context) * mi_width); |
888 vpx_memcpy(sl, cpi->left_seg_context + (mi_row & MI_MASK), | 1200 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK), |
889 sizeof(cpi->left_seg_context[0]) * mi_height); | 1201 sizeof(xd->left_seg_context[0]) * mi_height); |
890 } | 1202 } |
891 | 1203 |
892 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, | 1204 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, |
893 TOKENEXTRA **tp, int mi_row, int mi_col, | 1205 TOKENEXTRA **tp, int mi_row, int mi_col, |
894 int output_enabled, BLOCK_SIZE bsize) { | 1206 int output_enabled, BLOCK_SIZE bsize) { |
895 MACROBLOCK *const x = &cpi->mb; | 1207 MACROBLOCK *const x = &cpi->mb; |
896 | 1208 |
897 if (bsize < BLOCK_8X8) { | 1209 if (bsize < BLOCK_8X8) { |
898 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 1210 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
899 // there is nothing to be done. | 1211 // there is nothing to be done. |
900 if (x->ab_index > 0) | 1212 if (x->ab_index > 0) |
901 return; | 1213 return; |
902 } | 1214 } |
903 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1215 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
904 update_state(cpi, get_block_context(x, bsize), bsize, output_enabled); | 1216 update_state(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize, |
| 1217 output_enabled); |
905 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); | 1218 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); |
906 | 1219 |
907 if (output_enabled) { | 1220 if (output_enabled) { |
908 update_stats(cpi); | 1221 update_stats(cpi); |
909 | 1222 |
910 (*tp)->token = EOSB_TOKEN; | 1223 (*tp)->token = EOSB_TOKEN; |
911 (*tp)++; | 1224 (*tp)++; |
912 } | 1225 } |
913 } | 1226 } |
914 | 1227 |
915 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, | 1228 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, |
916 TOKENEXTRA **tp, int mi_row, int mi_col, | 1229 TOKENEXTRA **tp, int mi_row, int mi_col, |
917 int output_enabled, BLOCK_SIZE bsize) { | 1230 int output_enabled, BLOCK_SIZE bsize) { |
918 VP9_COMMON *const cm = &cpi->common; | 1231 VP9_COMMON *const cm = &cpi->common; |
919 MACROBLOCK *const x = &cpi->mb; | 1232 MACROBLOCK *const x = &cpi->mb; |
| 1233 MACROBLOCKD *const xd = &x->e_mbd; |
| 1234 |
920 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 1235 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; |
921 int ctx; | 1236 int ctx; |
922 PARTITION_TYPE partition; | 1237 PARTITION_TYPE partition; |
923 BLOCK_SIZE subsize; | 1238 BLOCK_SIZE subsize; |
924 | 1239 |
925 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1240 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
926 return; | 1241 return; |
927 | 1242 |
928 if (bsize >= BLOCK_8X8) { | 1243 if (bsize >= BLOCK_8X8) { |
929 ctx = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, | 1244 ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
930 mi_row, mi_col, bsize); | |
931 subsize = *get_sb_partitioning(x, bsize); | 1245 subsize = *get_sb_partitioning(x, bsize); |
932 } else { | 1246 } else { |
933 ctx = 0; | 1247 ctx = 0; |
934 subsize = BLOCK_4X4; | 1248 subsize = BLOCK_4X4; |
935 } | 1249 } |
936 | 1250 |
937 partition = partition_lookup[bsl][subsize]; | 1251 partition = partition_lookup[bsl][subsize]; |
938 | 1252 |
939 switch (partition) { | 1253 switch (partition) { |
940 case PARTITION_NONE: | 1254 case PARTITION_NONE: |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
975 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize); | 1289 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize); |
976 *get_sb_index(x, subsize) = 3; | 1290 *get_sb_index(x, subsize) = 3; |
977 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, | 1291 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, |
978 subsize); | 1292 subsize); |
979 break; | 1293 break; |
980 default: | 1294 default: |
981 assert("Invalid partition type."); | 1295 assert("Invalid partition type."); |
982 } | 1296 } |
983 | 1297 |
984 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) | 1298 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) |
985 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, | 1299 update_partition_context(xd, mi_row, mi_col, subsize, bsize); |
986 mi_row, mi_col, subsize, bsize); | |
987 } | 1300 } |
988 | 1301 |
989 // Check to see if the given partition size is allowed for a specified number | 1302 // Check to see if the given partition size is allowed for a specified number |
990 // of 8x8 block rows and columns remaining in the image. | 1303 // of 8x8 block rows and columns remaining in the image. |
991 // If not then return the largest allowed partition size | 1304 // If not then return the largest allowed partition size |
992 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, | 1305 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, |
993 int rows_left, int cols_left, | 1306 int rows_left, int cols_left, |
994 int *bh, int *bw) { | 1307 int *bh, int *bw) { |
995 if (rows_left <= 0 || cols_left <= 0) { | 1308 if (rows_left <= 0 || cols_left <= 0) { |
996 return MIN(bsize, BLOCK_8X8); | 1309 return MIN(bsize, BLOCK_8X8); |
997 } else { | 1310 } else { |
998 for (; bsize > 0; bsize -= 3) { | 1311 for (; bsize > 0; bsize -= 3) { |
999 *bh = num_8x8_blocks_high_lookup[bsize]; | 1312 *bh = num_8x8_blocks_high_lookup[bsize]; |
1000 *bw = num_8x8_blocks_wide_lookup[bsize]; | 1313 *bw = num_8x8_blocks_wide_lookup[bsize]; |
1001 if ((*bh <= rows_left) && (*bw <= cols_left)) { | 1314 if ((*bh <= rows_left) && (*bw <= cols_left)) { |
1002 break; | 1315 break; |
1003 } | 1316 } |
1004 } | 1317 } |
1005 } | 1318 } |
1006 return bsize; | 1319 return bsize; |
1007 } | 1320 } |
1008 | 1321 |
1009 // This function attempts to set all mode info entries in a given SB64 | 1322 // This function attempts to set all mode info entries in a given SB64 |
1010 // to the same block partition size. | 1323 // to the same block partition size. |
1011 // However, at the bottom and right borders of the image the requested size | 1324 // However, at the bottom and right borders of the image the requested size |
1012 // may not be allowed in which case this code attempts to choose the largest | 1325 // may not be allowed in which case this code attempts to choose the largest |
1013 // allowable partition. | 1326 // allowable partition. |
1014 static void set_partitioning(VP9_COMP *cpi, const TileInfo *const tile, | 1327 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile, |
1015 MODE_INFO **mi_8x8, int mi_row, int mi_col, | 1328 MODE_INFO **mi_8x8, int mi_row, int mi_col, |
1016 BLOCK_SIZE bsize) { | 1329 BLOCK_SIZE bsize) { |
1017 VP9_COMMON *const cm = &cpi->common; | 1330 VP9_COMMON *const cm = &cpi->common; |
1018 const int mis = cm->mode_info_stride; | 1331 const int mis = cm->mi_stride; |
1019 int row8x8_remaining = tile->mi_row_end - mi_row; | 1332 int row8x8_remaining = tile->mi_row_end - mi_row; |
1020 int col8x8_remaining = tile->mi_col_end - mi_col; | 1333 int col8x8_remaining = tile->mi_col_end - mi_col; |
1021 int block_row, block_col; | 1334 int block_row, block_col; |
1022 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col; | 1335 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col; |
1023 int bh = num_8x8_blocks_high_lookup[bsize]; | 1336 int bh = num_8x8_blocks_high_lookup[bsize]; |
1024 int bw = num_8x8_blocks_wide_lookup[bsize]; | 1337 int bw = num_8x8_blocks_wide_lookup[bsize]; |
1025 | 1338 |
1026 assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); | 1339 assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); |
1027 | 1340 |
1028 // Apply the requested partition size to the SB64 if it is all "in image" | 1341 // Apply the requested partition size to the SB64 if it is all "in image" |
(...skipping 15 matching lines...) Expand all Loading... |
1044 bsize = find_partition_size(bsize, | 1357 bsize = find_partition_size(bsize, |
1045 (row8x8_remaining - block_row), | 1358 (row8x8_remaining - block_row), |
1046 (col8x8_remaining - block_col), &bh, &bw); | 1359 (col8x8_remaining - block_col), &bh, &bw); |
1047 mi_8x8[index] = mi_upper_left + index; | 1360 mi_8x8[index] = mi_upper_left + index; |
1048 mi_8x8[index]->mbmi.sb_type = bsize; | 1361 mi_8x8[index]->mbmi.sb_type = bsize; |
1049 } | 1362 } |
1050 } | 1363 } |
1051 } | 1364 } |
1052 } | 1365 } |
1053 | 1366 |
| 1367 static void constrain_copy_partitioning(VP9_COMP *const cpi, |
| 1368 const TileInfo *const tile, |
| 1369 MODE_INFO **mi_8x8, |
| 1370 MODE_INFO **prev_mi_8x8, |
| 1371 int mi_row, int mi_col, |
| 1372 BLOCK_SIZE bsize) { |
| 1373 VP9_COMMON *const cm = &cpi->common; |
| 1374 const int mis = cm->mi_stride; |
| 1375 const int row8x8_remaining = tile->mi_row_end - mi_row; |
| 1376 const int col8x8_remaining = tile->mi_col_end - mi_col; |
| 1377 MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col; |
| 1378 const int bh = num_8x8_blocks_high_lookup[bsize]; |
| 1379 const int bw = num_8x8_blocks_wide_lookup[bsize]; |
| 1380 int block_row, block_col; |
| 1381 |
| 1382 assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); |
| 1383 |
| 1384 // If the SB64 if it is all "in image". |
| 1385 if ((col8x8_remaining >= MI_BLOCK_SIZE) && |
| 1386 (row8x8_remaining >= MI_BLOCK_SIZE)) { |
| 1387 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) { |
| 1388 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) { |
| 1389 const int index = block_row * mis + block_col; |
| 1390 MODE_INFO *prev_mi = prev_mi_8x8[index]; |
| 1391 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; |
| 1392 // Use previous partition if block size is not larger than bsize. |
| 1393 if (prev_mi && sb_type <= bsize) { |
| 1394 int block_row2, block_col2; |
| 1395 for (block_row2 = 0; block_row2 < bh; ++block_row2) { |
| 1396 for (block_col2 = 0; block_col2 < bw; ++block_col2) { |
| 1397 const int index2 = (block_row + block_row2) * mis + |
| 1398 block_col + block_col2; |
| 1399 prev_mi = prev_mi_8x8[index2]; |
| 1400 if (prev_mi) { |
| 1401 const ptrdiff_t offset = prev_mi - cm->prev_mi; |
| 1402 mi_8x8[index2] = cm->mi + offset; |
| 1403 mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type; |
| 1404 } |
| 1405 } |
| 1406 } |
| 1407 } else { |
| 1408 // Otherwise, use fixed partition of size bsize. |
| 1409 mi_8x8[index] = mi_upper_left + index; |
| 1410 mi_8x8[index]->mbmi.sb_type = bsize; |
| 1411 } |
| 1412 } |
| 1413 } |
| 1414 } else { |
| 1415 // Else this is a partial SB64, copy previous partition. |
| 1416 for (block_row = 0; block_row < 8; ++block_row) { |
| 1417 for (block_col = 0; block_col < 8; ++block_col) { |
| 1418 MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col]; |
| 1419 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; |
| 1420 if (prev_mi) { |
| 1421 const ptrdiff_t offset = prev_mi - cm->prev_mi; |
| 1422 mi_8x8[block_row * mis + block_col] = cm->mi + offset; |
| 1423 mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type; |
| 1424 } |
| 1425 } |
| 1426 } |
| 1427 } |
| 1428 } |
| 1429 |
1054 static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8, | 1430 static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8, |
1055 MODE_INFO **prev_mi_8x8) { | 1431 MODE_INFO **prev_mi_8x8) { |
1056 const int mis = cm->mode_info_stride; | 1432 const int mis = cm->mi_stride; |
1057 int block_row, block_col; | 1433 int block_row, block_col; |
1058 | 1434 |
1059 for (block_row = 0; block_row < 8; ++block_row) { | 1435 for (block_row = 0; block_row < 8; ++block_row) { |
1060 for (block_col = 0; block_col < 8; ++block_col) { | 1436 for (block_col = 0; block_col < 8; ++block_col) { |
1061 MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col]; | 1437 MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col]; |
1062 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; | 1438 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; |
| 1439 |
1063 if (prev_mi) { | 1440 if (prev_mi) { |
1064 const ptrdiff_t offset = prev_mi - cm->prev_mi; | 1441 const ptrdiff_t offset = prev_mi - cm->prev_mi; |
1065 mi_8x8[block_row * mis + block_col] = cm->mi + offset; | 1442 mi_8x8[block_row * mis + block_col] = cm->mi + offset; |
1066 mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type; | 1443 mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type; |
1067 } | 1444 } |
1068 } | 1445 } |
1069 } | 1446 } |
1070 } | 1447 } |
1071 | 1448 |
| 1449 const struct { |
| 1450 int row; |
| 1451 int col; |
| 1452 } coord_lookup[16] = { |
| 1453 // 32x32 index = 0 |
| 1454 {0, 0}, {0, 2}, {2, 0}, {2, 2}, |
| 1455 // 32x32 index = 1 |
| 1456 {0, 4}, {0, 6}, {2, 4}, {2, 6}, |
| 1457 // 32x32 index = 2 |
| 1458 {4, 0}, {4, 2}, {6, 0}, {6, 2}, |
| 1459 // 32x32 index = 3 |
| 1460 {4, 4}, {4, 6}, {6, 4}, {6, 6}, |
| 1461 }; |
| 1462 |
| 1463 static void set_source_var_based_partition(VP9_COMP *cpi, |
| 1464 const TileInfo *const tile, |
| 1465 MODE_INFO **mi_8x8, |
| 1466 int mi_row, int mi_col) { |
| 1467 VP9_COMMON *const cm = &cpi->common; |
| 1468 MACROBLOCK *x = &cpi->mb; |
| 1469 const int mis = cm->mi_stride; |
| 1470 int row8x8_remaining = tile->mi_row_end - mi_row; |
| 1471 int col8x8_remaining = tile->mi_col_end - mi_col; |
| 1472 int r, c; |
| 1473 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col; |
| 1474 |
| 1475 assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); |
| 1476 |
| 1477 // In-image SB64 |
| 1478 if ((col8x8_remaining >= MI_BLOCK_SIZE) && |
| 1479 (row8x8_remaining >= MI_BLOCK_SIZE)) { |
| 1480 const int src_stride = x->plane[0].src.stride; |
| 1481 const int pre_stride = cpi->Last_Source->y_stride; |
| 1482 const uint8_t *src = x->plane[0].src.buf; |
| 1483 const int pre_offset = (mi_row * MI_SIZE) * pre_stride + |
| 1484 (mi_col * MI_SIZE); |
| 1485 const uint8_t *pre_src = cpi->Last_Source->y_buffer + pre_offset; |
| 1486 const int thr_32x32 = cpi->sf.source_var_thresh; |
| 1487 const int thr_64x64 = thr_32x32 << 1; |
| 1488 int i, j; |
| 1489 int index; |
| 1490 diff d32[4]; |
| 1491 int use16x16 = 0; |
| 1492 |
| 1493 for (i = 0; i < 4; i++) { |
| 1494 diff d16[4]; |
| 1495 |
| 1496 for (j = 0; j < 4; j++) { |
| 1497 int b_mi_row = coord_lookup[i * 4 + j].row; |
| 1498 int b_mi_col = coord_lookup[i * 4 + j].col; |
| 1499 int b_offset = b_mi_row * MI_SIZE * src_stride + |
| 1500 b_mi_col * MI_SIZE; |
| 1501 |
| 1502 vp9_get_sse_sum_16x16(src + b_offset, |
| 1503 src_stride, |
| 1504 pre_src + b_offset, |
| 1505 pre_stride, &d16[j].sse, &d16[j].sum); |
| 1506 |
| 1507 d16[j].var = d16[j].sse - |
| 1508 (((uint32_t)d16[j].sum * d16[j].sum) >> 8); |
| 1509 |
| 1510 index = b_mi_row * mis + b_mi_col; |
| 1511 mi_8x8[index] = mi_upper_left + index; |
| 1512 mi_8x8[index]->mbmi.sb_type = BLOCK_16X16; |
| 1513 |
| 1514 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition |
| 1515 // size to further improve quality. |
| 1516 } |
| 1517 |
| 1518 if (d16[0].var < thr_32x32 && d16[1].var < thr_32x32 && |
| 1519 d16[2].var < thr_32x32 && d16[3].var < thr_32x32) { |
| 1520 d32[i].sse = d16[0].sse; |
| 1521 d32[i].sum = d16[0].sum; |
| 1522 |
| 1523 for (j = 1; j < 4; j++) { |
| 1524 d32[i].sse += d16[j].sse; |
| 1525 d32[i].sum += d16[j].sum; |
| 1526 } |
| 1527 |
| 1528 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10); |
| 1529 |
| 1530 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col; |
| 1531 mi_8x8[index] = mi_upper_left + index; |
| 1532 mi_8x8[index]->mbmi.sb_type = BLOCK_32X32; |
| 1533 |
| 1534 if (!((cm->current_video_frame - 1) % |
| 1535 cpi->sf.search_type_check_frequency)) |
| 1536 cpi->use_large_partition_rate += 1; |
| 1537 } else { |
| 1538 use16x16 = 1; |
| 1539 } |
| 1540 } |
| 1541 |
| 1542 if (!use16x16) { |
| 1543 if (d32[0].var < thr_64x64 && d32[1].var < thr_64x64 && |
| 1544 d32[2].var < thr_64x64 && d32[3].var < thr_64x64) { |
| 1545 mi_8x8[0] = mi_upper_left; |
| 1546 mi_8x8[0]->mbmi.sb_type = BLOCK_64X64; |
| 1547 } |
| 1548 } |
| 1549 } else { // partial in-image SB64 |
| 1550 BLOCK_SIZE bsize = BLOCK_16X16; |
| 1551 int bh = num_8x8_blocks_high_lookup[bsize]; |
| 1552 int bw = num_8x8_blocks_wide_lookup[bsize]; |
| 1553 |
| 1554 for (r = 0; r < MI_BLOCK_SIZE; r += bh) { |
| 1555 for (c = 0; c < MI_BLOCK_SIZE; c += bw) { |
| 1556 int index = r * mis + c; |
| 1557 // Find a partition size that fits |
| 1558 bsize = find_partition_size(bsize, |
| 1559 (row8x8_remaining - r), |
| 1560 (col8x8_remaining - c), &bh, &bw); |
| 1561 mi_8x8[index] = mi_upper_left + index; |
| 1562 mi_8x8[index]->mbmi.sb_type = bsize; |
| 1563 } |
| 1564 } |
| 1565 } |
| 1566 } |
| 1567 |
1072 static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) { | 1568 static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) { |
1073 const int mis = cm->mode_info_stride; | 1569 const int mis = cm->mi_stride; |
1074 int block_row, block_col; | 1570 int block_row, block_col; |
1075 | 1571 |
1076 if (cm->prev_mi) { | 1572 if (cm->prev_mi) { |
1077 for (block_row = 0; block_row < 8; ++block_row) { | 1573 for (block_row = 0; block_row < 8; ++block_row) { |
1078 for (block_col = 0; block_col < 8; ++block_col) { | 1574 for (block_col = 0; block_col < 8; ++block_col) { |
1079 const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col]; | 1575 const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col]; |
1080 if (prev_mi) { | 1576 if (prev_mi) { |
1081 if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 || | 1577 if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 || |
1082 abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8) | 1578 abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8) |
1083 return 1; | 1579 return 1; |
1084 } | 1580 } |
1085 } | 1581 } |
1086 } | 1582 } |
1087 } | 1583 } |
1088 return 0; | 1584 return 0; |
1089 } | 1585 } |
1090 | 1586 |
1091 static void update_state_rt(VP9_COMP *cpi, const PICK_MODE_CONTEXT *ctx) { | 1587 static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, |
1092 int i; | 1588 int mi_row, int mi_col, int bsize) { |
1093 VP9_COMMON *const cm = &cpi->common; | 1589 VP9_COMMON *const cm = &cpi->common; |
1094 MACROBLOCK *const x = &cpi->mb; | 1590 MACROBLOCK *const x = &cpi->mb; |
1095 MACROBLOCKD *const xd = &x->e_mbd; | 1591 MACROBLOCKD *const xd = &x->e_mbd; |
1096 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; | 1592 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; |
| 1593 const struct segmentation *const seg = &cm->seg; |
| 1594 |
| 1595 *(xd->mi[0]) = ctx->mic; |
| 1596 |
| 1597 // For in frame adaptive Q, check for reseting the segment_id and updating |
| 1598 // the cyclic refresh map. |
| 1599 if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) { |
| 1600 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, |
| 1601 mi_row, mi_col, bsize, 1); |
| 1602 vp9_init_plane_quantizers(cpi, x); |
| 1603 } |
| 1604 |
| 1605 if (is_inter_block(mbmi)) { |
| 1606 vp9_update_mv_count(cm, xd); |
| 1607 |
| 1608 if (cm->interp_filter == SWITCHABLE) { |
| 1609 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); |
| 1610 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter]; |
| 1611 } |
| 1612 } |
1097 | 1613 |
1098 x->skip = ctx->skip; | 1614 x->skip = ctx->skip; |
1099 | |
1100 #if CONFIG_INTERNAL_STATS | |
1101 if (frame_is_intra_only(cm)) { | |
1102 static const int kf_mode_index[] = { | |
1103 THR_DC /*DC_PRED*/, | |
1104 THR_V_PRED /*V_PRED*/, | |
1105 THR_H_PRED /*H_PRED*/, | |
1106 THR_D45_PRED /*D45_PRED*/, | |
1107 THR_D135_PRED /*D135_PRED*/, | |
1108 THR_D117_PRED /*D117_PRED*/, | |
1109 THR_D153_PRED /*D153_PRED*/, | |
1110 THR_D207_PRED /*D207_PRED*/, | |
1111 THR_D63_PRED /*D63_PRED*/, | |
1112 THR_TM /*TM_PRED*/, | |
1113 }; | |
1114 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; | |
1115 } else { | |
1116 // Note how often each mode chosen as best | |
1117 ++cpi->mode_chosen_counts[ctx->best_mode_index]; | |
1118 } | |
1119 #endif | |
1120 if (!frame_is_intra_only(cm)) { | |
1121 if (is_inter_block(mbmi)) { | |
1122 if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { | |
1123 MV best_mv[2]; | |
1124 for (i = 0; i < 1 + has_second_ref(mbmi); ++i) | |
1125 best_mv[i] = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv; | |
1126 vp9_update_mv_count(cm, xd, best_mv); | |
1127 } | |
1128 | |
1129 if (cm->interp_filter == SWITCHABLE) { | |
1130 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); | |
1131 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter]; | |
1132 } | |
1133 } | |
1134 } | |
1135 } | 1615 } |
1136 | 1616 |
1137 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile, | 1617 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile, |
1138 TOKENEXTRA **tp, int mi_row, int mi_col, | 1618 TOKENEXTRA **tp, int mi_row, int mi_col, |
1139 int output_enabled, BLOCK_SIZE bsize) { | 1619 int output_enabled, BLOCK_SIZE bsize) { |
1140 MACROBLOCK *const x = &cpi->mb; | 1620 MACROBLOCK *const x = &cpi->mb; |
1141 | 1621 |
1142 if (bsize < BLOCK_8X8) { | 1622 if (bsize < BLOCK_8X8) { |
1143 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 1623 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
1144 // there is nothing to be done. | 1624 // there is nothing to be done. |
1145 if (x->ab_index > 0) | 1625 if (x->ab_index > 0) |
1146 return; | 1626 return; |
1147 } | 1627 } |
| 1628 |
1148 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1629 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
1149 update_state_rt(cpi, get_block_context(x, bsize)); | 1630 update_state_rt(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize); |
1150 | 1631 |
1151 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); | 1632 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); |
1152 update_stats(cpi); | 1633 update_stats(cpi); |
1153 | 1634 |
1154 (*tp)->token = EOSB_TOKEN; | 1635 (*tp)->token = EOSB_TOKEN; |
1155 (*tp)++; | 1636 (*tp)++; |
1156 } | 1637 } |
1157 | 1638 |
1158 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, | 1639 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, |
1159 TOKENEXTRA **tp, int mi_row, int mi_col, | 1640 TOKENEXTRA **tp, int mi_row, int mi_col, |
1160 int output_enabled, BLOCK_SIZE bsize) { | 1641 int output_enabled, BLOCK_SIZE bsize) { |
1161 VP9_COMMON *const cm = &cpi->common; | 1642 VP9_COMMON *const cm = &cpi->common; |
1162 MACROBLOCK *const x = &cpi->mb; | 1643 MACROBLOCK *const x = &cpi->mb; |
| 1644 MACROBLOCKD *const xd = &x->e_mbd; |
| 1645 |
1163 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 1646 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; |
1164 int ctx; | 1647 int ctx; |
1165 PARTITION_TYPE partition; | 1648 PARTITION_TYPE partition; |
1166 BLOCK_SIZE subsize; | 1649 BLOCK_SIZE subsize; |
1167 | 1650 |
1168 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1651 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
1169 return; | 1652 return; |
1170 | 1653 |
1171 if (bsize >= BLOCK_8X8) { | 1654 if (bsize >= BLOCK_8X8) { |
1172 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 1655 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
1173 const int idx_str = xd->mode_info_stride * mi_row + mi_col; | 1656 const int idx_str = xd->mi_stride * mi_row + mi_col; |
1174 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str; | 1657 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str; |
1175 ctx = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, | 1658 ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
1176 mi_row, mi_col, bsize); | |
1177 subsize = mi_8x8[0]->mbmi.sb_type; | 1659 subsize = mi_8x8[0]->mbmi.sb_type; |
1178 } else { | 1660 } else { |
1179 ctx = 0; | 1661 ctx = 0; |
1180 subsize = BLOCK_4X4; | 1662 subsize = BLOCK_4X4; |
1181 } | 1663 } |
1182 | 1664 |
1183 partition = partition_lookup[bsl][subsize]; | 1665 partition = partition_lookup[bsl][subsize]; |
1184 | 1666 |
1185 switch (partition) { | 1667 switch (partition) { |
1186 case PARTITION_NONE: | 1668 case PARTITION_NONE: |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1225 subsize); | 1707 subsize); |
1226 *get_sb_index(x, subsize) = 3; | 1708 *get_sb_index(x, subsize) = 3; |
1227 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, | 1709 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, |
1228 subsize); | 1710 subsize); |
1229 break; | 1711 break; |
1230 default: | 1712 default: |
1231 assert("Invalid partition type."); | 1713 assert("Invalid partition type."); |
1232 } | 1714 } |
1233 | 1715 |
1234 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) | 1716 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) |
1235 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, | 1717 update_partition_context(xd, mi_row, mi_col, subsize, bsize); |
1236 mi_row, mi_col, subsize, bsize); | |
1237 } | 1718 } |
1238 | 1719 |
1239 static void rd_use_partition(VP9_COMP *cpi, | 1720 static void rd_use_partition(VP9_COMP *cpi, |
1240 const TileInfo *const tile, | 1721 const TileInfo *const tile, |
1241 MODE_INFO **mi_8x8, | 1722 MODE_INFO **mi_8x8, |
1242 TOKENEXTRA **tp, int mi_row, int mi_col, | 1723 TOKENEXTRA **tp, int mi_row, int mi_col, |
1243 BLOCK_SIZE bsize, int *rate, int64_t *dist, | 1724 BLOCK_SIZE bsize, int *rate, int64_t *dist, |
1244 int do_recon) { | 1725 int do_recon) { |
1245 VP9_COMMON *const cm = &cpi->common; | 1726 VP9_COMMON *const cm = &cpi->common; |
1246 MACROBLOCK *const x = &cpi->mb; | 1727 MACROBLOCK *const x = &cpi->mb; |
1247 const int mis = cm->mode_info_stride; | 1728 MACROBLOCKD *const xd = &x->e_mbd; |
| 1729 const int mis = cm->mi_stride; |
1248 const int bsl = b_width_log2(bsize); | 1730 const int bsl = b_width_log2(bsize); |
1249 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 1731 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2; |
1250 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | |
1251 const int ms = num_4x4_blocks_wide / 2; | |
1252 const int mh = num_4x4_blocks_high / 2; | |
1253 const int bss = (1 << bsl) / 4; | 1732 const int bss = (1 << bsl) / 4; |
1254 int i, pl; | 1733 int i, pl; |
1255 PARTITION_TYPE partition = PARTITION_NONE; | 1734 PARTITION_TYPE partition = PARTITION_NONE; |
1256 BLOCK_SIZE subsize; | 1735 BLOCK_SIZE subsize; |
1257 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 1736 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
1258 PARTITION_CONTEXT sl[8], sa[8]; | 1737 PARTITION_CONTEXT sl[8], sa[8]; |
1259 int last_part_rate = INT_MAX; | 1738 int last_part_rate = INT_MAX; |
1260 int64_t last_part_dist = INT64_MAX; | 1739 int64_t last_part_dist = INT64_MAX; |
1261 int64_t last_part_rd = INT64_MAX; | 1740 int64_t last_part_rd = INT64_MAX; |
1262 int none_rate = INT_MAX; | 1741 int none_rate = INT_MAX; |
1263 int64_t none_dist = INT64_MAX; | 1742 int64_t none_dist = INT64_MAX; |
1264 int64_t none_rd = INT64_MAX; | 1743 int64_t none_rd = INT64_MAX; |
1265 int chosen_rate = INT_MAX; | 1744 int chosen_rate = INT_MAX; |
1266 int64_t chosen_dist = INT64_MAX; | 1745 int64_t chosen_dist = INT64_MAX; |
1267 int64_t chosen_rd = INT64_MAX; | 1746 int64_t chosen_rd = INT64_MAX; |
1268 BLOCK_SIZE sub_subsize = BLOCK_4X4; | 1747 BLOCK_SIZE sub_subsize = BLOCK_4X4; |
1269 int splits_below = 0; | 1748 int splits_below = 0; |
1270 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; | 1749 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; |
| 1750 int do_partition_search = 1; |
1271 | 1751 |
1272 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1752 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
1273 return; | 1753 return; |
1274 | 1754 |
| 1755 assert(num_4x4_blocks_wide_lookup[bsize] == |
| 1756 num_4x4_blocks_high_lookup[bsize]); |
| 1757 |
1275 partition = partition_lookup[bsl][bs_type]; | 1758 partition = partition_lookup[bsl][bs_type]; |
1276 subsize = get_subsize(bsize, partition); | 1759 subsize = get_subsize(bsize, partition); |
1277 | 1760 |
1278 if (bsize < BLOCK_8X8) { | 1761 if (bsize < BLOCK_8X8) { |
1279 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 1762 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
1280 // there is nothing to be done. | 1763 // there is nothing to be done. |
1281 if (x->ab_index != 0) { | 1764 if (x->ab_index != 0) { |
1282 *rate = 0; | 1765 *rate = 0; |
1283 *dist = 0; | 1766 *dist = 0; |
1284 return; | 1767 return; |
1285 } | 1768 } |
1286 } else { | 1769 } else { |
1287 *(get_sb_partitioning(x, bsize)) = subsize; | 1770 *(get_sb_partitioning(x, bsize)) = subsize; |
1288 } | 1771 } |
1289 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1772 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1290 | 1773 |
1291 if (bsize == BLOCK_16X16) { | 1774 if (bsize == BLOCK_16X16) { |
1292 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1775 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
1293 x->mb_energy = vp9_block_energy(cpi, x, bsize); | 1776 x->mb_energy = vp9_block_energy(cpi, x, bsize); |
| 1777 } else { |
| 1778 x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize); |
1294 } | 1779 } |
1295 | 1780 |
1296 if (cpi->sf.partition_search_type == SEARCH_PARTITION && | 1781 if (!x->in_active_map) { |
| 1782 do_partition_search = 0; |
| 1783 if (mi_row + (mi_step >> 1) < cm->mi_rows && |
| 1784 mi_col + (mi_step >> 1) < cm->mi_cols) { |
| 1785 *(get_sb_partitioning(x, bsize)) = bsize; |
| 1786 bs_type = mi_8x8[0]->mbmi.sb_type = bsize; |
| 1787 subsize = bsize; |
| 1788 partition = PARTITION_NONE; |
| 1789 } |
| 1790 } |
| 1791 if (do_partition_search && |
| 1792 cpi->sf.partition_search_type == SEARCH_PARTITION && |
1297 cpi->sf.adjust_partitioning_from_last_frame) { | 1793 cpi->sf.adjust_partitioning_from_last_frame) { |
1298 // Check if any of the sub blocks are further split. | 1794 // Check if any of the sub blocks are further split. |
1299 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { | 1795 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { |
1300 sub_subsize = get_subsize(subsize, PARTITION_SPLIT); | 1796 sub_subsize = get_subsize(subsize, PARTITION_SPLIT); |
1301 splits_below = 1; | 1797 splits_below = 1; |
1302 for (i = 0; i < 4; i++) { | 1798 for (i = 0; i < 4; i++) { |
1303 int jj = i >> 1, ii = i & 0x01; | 1799 int jj = i >> 1, ii = i & 0x01; |
1304 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; | 1800 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; |
1305 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { | 1801 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { |
1306 splits_below = 0; | 1802 splits_below = 0; |
1307 } | 1803 } |
1308 } | 1804 } |
1309 } | 1805 } |
1310 | 1806 |
1311 // If partition is not none try none unless each of the 4 splits are split | 1807 // If partition is not none try none unless each of the 4 splits are split |
1312 // even further.. | 1808 // even further.. |
1313 if (partition != PARTITION_NONE && !splits_below && | 1809 if (partition != PARTITION_NONE && !splits_below && |
1314 mi_row + (ms >> 1) < cm->mi_rows && | 1810 mi_row + (mi_step >> 1) < cm->mi_rows && |
1315 mi_col + (ms >> 1) < cm->mi_cols) { | 1811 mi_col + (mi_step >> 1) < cm->mi_cols) { |
1316 *(get_sb_partitioning(x, bsize)) = bsize; | 1812 *(get_sb_partitioning(x, bsize)) = bsize; |
1317 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize, | 1813 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize, |
1318 get_block_context(x, bsize), INT64_MAX); | 1814 get_block_context(x, bsize), INT64_MAX); |
1319 | 1815 |
1320 pl = partition_plane_context(cpi->above_seg_context, | 1816 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1321 cpi->left_seg_context, | |
1322 mi_row, mi_col, bsize); | |
1323 | 1817 |
1324 if (none_rate < INT_MAX) { | 1818 if (none_rate < INT_MAX) { |
1325 none_rate += x->partition_cost[pl][PARTITION_NONE]; | 1819 none_rate += x->partition_cost[pl][PARTITION_NONE]; |
1326 none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist); | 1820 none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist); |
1327 } | 1821 } |
1328 | 1822 |
1329 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1823 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1330 mi_8x8[0]->mbmi.sb_type = bs_type; | 1824 mi_8x8[0]->mbmi.sb_type = bs_type; |
1331 *(get_sb_partitioning(x, bsize)) = subsize; | 1825 *(get_sb_partitioning(x, bsize)) = subsize; |
1332 } | 1826 } |
1333 } | 1827 } |
1334 | 1828 |
1335 switch (partition) { | 1829 switch (partition) { |
1336 case PARTITION_NONE: | 1830 case PARTITION_NONE: |
1337 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1831 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, |
1338 &last_part_dist, bsize, | 1832 &last_part_dist, bsize, |
1339 get_block_context(x, bsize), INT64_MAX); | 1833 get_block_context(x, bsize), INT64_MAX); |
1340 break; | 1834 break; |
1341 case PARTITION_HORZ: | 1835 case PARTITION_HORZ: |
1342 *get_sb_index(x, subsize) = 0; | 1836 *get_sb_index(x, subsize) = 0; |
1343 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1837 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, |
1344 &last_part_dist, subsize, | 1838 &last_part_dist, subsize, |
1345 get_block_context(x, subsize), INT64_MAX); | 1839 get_block_context(x, subsize), INT64_MAX); |
1346 if (last_part_rate != INT_MAX && | 1840 if (last_part_rate != INT_MAX && |
1347 bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) { | 1841 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) { |
1348 int rt = 0; | 1842 int rt = 0; |
1349 int64_t dt = 0; | 1843 int64_t dt = 0; |
1350 update_state(cpi, get_block_context(x, subsize), subsize, 0); | 1844 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col, |
| 1845 subsize, 0); |
1351 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | 1846 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
1352 *get_sb_index(x, subsize) = 1; | 1847 *get_sb_index(x, subsize) = 1; |
1353 rd_pick_sb_modes(cpi, tile, mi_row + (ms >> 1), mi_col, &rt, &dt, | 1848 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt, |
1354 subsize, get_block_context(x, subsize), INT64_MAX); | 1849 subsize, get_block_context(x, subsize), INT64_MAX); |
1355 if (rt == INT_MAX || dt == INT64_MAX) { | 1850 if (rt == INT_MAX || dt == INT64_MAX) { |
1356 last_part_rate = INT_MAX; | 1851 last_part_rate = INT_MAX; |
1357 last_part_dist = INT64_MAX; | 1852 last_part_dist = INT64_MAX; |
1358 break; | 1853 break; |
1359 } | 1854 } |
1360 | 1855 |
1361 last_part_rate += rt; | 1856 last_part_rate += rt; |
1362 last_part_dist += dt; | 1857 last_part_dist += dt; |
1363 } | 1858 } |
1364 break; | 1859 break; |
1365 case PARTITION_VERT: | 1860 case PARTITION_VERT: |
1366 *get_sb_index(x, subsize) = 0; | 1861 *get_sb_index(x, subsize) = 0; |
1367 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1862 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, |
1368 &last_part_dist, subsize, | 1863 &last_part_dist, subsize, |
1369 get_block_context(x, subsize), INT64_MAX); | 1864 get_block_context(x, subsize), INT64_MAX); |
1370 if (last_part_rate != INT_MAX && | 1865 if (last_part_rate != INT_MAX && |
1371 bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) { | 1866 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) { |
1372 int rt = 0; | 1867 int rt = 0; |
1373 int64_t dt = 0; | 1868 int64_t dt = 0; |
1374 update_state(cpi, get_block_context(x, subsize), subsize, 0); | 1869 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col, |
| 1870 subsize, 0); |
1375 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | 1871 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
1376 *get_sb_index(x, subsize) = 1; | 1872 *get_sb_index(x, subsize) = 1; |
1377 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (ms >> 1), &rt, &dt, | 1873 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt, |
1378 subsize, get_block_context(x, subsize), INT64_MAX); | 1874 subsize, get_block_context(x, subsize), INT64_MAX); |
1379 if (rt == INT_MAX || dt == INT64_MAX) { | 1875 if (rt == INT_MAX || dt == INT64_MAX) { |
1380 last_part_rate = INT_MAX; | 1876 last_part_rate = INT_MAX; |
1381 last_part_dist = INT64_MAX; | 1877 last_part_dist = INT64_MAX; |
1382 break; | 1878 break; |
1383 } | 1879 } |
1384 last_part_rate += rt; | 1880 last_part_rate += rt; |
1385 last_part_dist += dt; | 1881 last_part_dist += dt; |
1386 } | 1882 } |
1387 break; | 1883 break; |
1388 case PARTITION_SPLIT: | 1884 case PARTITION_SPLIT: |
1389 // Split partition. | 1885 // Split partition. |
1390 last_part_rate = 0; | 1886 last_part_rate = 0; |
1391 last_part_dist = 0; | 1887 last_part_dist = 0; |
1392 for (i = 0; i < 4; i++) { | 1888 for (i = 0; i < 4; i++) { |
1393 int x_idx = (i & 1) * (ms >> 1); | 1889 int x_idx = (i & 1) * (mi_step >> 1); |
1394 int y_idx = (i >> 1) * (ms >> 1); | 1890 int y_idx = (i >> 1) * (mi_step >> 1); |
1395 int jj = i >> 1, ii = i & 0x01; | 1891 int jj = i >> 1, ii = i & 0x01; |
1396 int rt; | 1892 int rt; |
1397 int64_t dt; | 1893 int64_t dt; |
1398 | 1894 |
1399 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) | 1895 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) |
1400 continue; | 1896 continue; |
1401 | 1897 |
1402 *get_sb_index(x, subsize) = i; | 1898 *get_sb_index(x, subsize) = i; |
1403 | 1899 |
1404 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, | 1900 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, |
1405 mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, | 1901 mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, |
1406 i != 3); | 1902 i != 3); |
1407 if (rt == INT_MAX || dt == INT64_MAX) { | 1903 if (rt == INT_MAX || dt == INT64_MAX) { |
1408 last_part_rate = INT_MAX; | 1904 last_part_rate = INT_MAX; |
1409 last_part_dist = INT64_MAX; | 1905 last_part_dist = INT64_MAX; |
1410 break; | 1906 break; |
1411 } | 1907 } |
1412 last_part_rate += rt; | 1908 last_part_rate += rt; |
1413 last_part_dist += dt; | 1909 last_part_dist += dt; |
1414 } | 1910 } |
1415 break; | 1911 break; |
1416 default: | 1912 default: |
1417 assert(0); | 1913 assert(0); |
1418 } | 1914 } |
1419 | 1915 |
1420 pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, | 1916 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1421 mi_row, mi_col, bsize); | |
1422 if (last_part_rate < INT_MAX) { | 1917 if (last_part_rate < INT_MAX) { |
1423 last_part_rate += x->partition_cost[pl][partition]; | 1918 last_part_rate += x->partition_cost[pl][partition]; |
1424 last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist); | 1919 last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist); |
1425 } | 1920 } |
1426 | 1921 |
1427 if (cpi->sf.adjust_partitioning_from_last_frame | 1922 if (do_partition_search |
| 1923 && cpi->sf.adjust_partitioning_from_last_frame |
1428 && cpi->sf.partition_search_type == SEARCH_PARTITION | 1924 && cpi->sf.partition_search_type == SEARCH_PARTITION |
1429 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 | 1925 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 |
1430 && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows) | 1926 && (mi_row + mi_step < cm->mi_rows || |
1431 && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) { | 1927 mi_row + (mi_step >> 1) == cm->mi_rows) |
| 1928 && (mi_col + mi_step < cm->mi_cols || |
| 1929 mi_col + (mi_step >> 1) == cm->mi_cols)) { |
1432 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); | 1930 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); |
1433 chosen_rate = 0; | 1931 chosen_rate = 0; |
1434 chosen_dist = 0; | 1932 chosen_dist = 0; |
1435 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1933 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1436 | 1934 |
1437 // Split partition. | 1935 // Split partition. |
1438 for (i = 0; i < 4; i++) { | 1936 for (i = 0; i < 4; i++) { |
1439 int x_idx = (i & 1) * (num_4x4_blocks_wide >> 2); | 1937 int x_idx = (i & 1) * (mi_step >> 1); |
1440 int y_idx = (i >> 1) * (num_4x4_blocks_wide >> 2); | 1938 int y_idx = (i >> 1) * (mi_step >> 1); |
1441 int rt = 0; | 1939 int rt = 0; |
1442 int64_t dt = 0; | 1940 int64_t dt = 0; |
1443 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 1941 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
1444 PARTITION_CONTEXT sl[8], sa[8]; | 1942 PARTITION_CONTEXT sl[8], sa[8]; |
1445 | 1943 |
1446 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) | 1944 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) |
1447 continue; | 1945 continue; |
1448 | 1946 |
1449 *get_sb_index(x, split_subsize) = i; | 1947 *get_sb_index(x, split_subsize) = i; |
1450 *get_sb_partitioning(x, bsize) = split_subsize; | 1948 *get_sb_partitioning(x, bsize) = split_subsize; |
(...skipping 13 matching lines...) Expand all Loading... |
1464 break; | 1962 break; |
1465 } | 1963 } |
1466 | 1964 |
1467 chosen_rate += rt; | 1965 chosen_rate += rt; |
1468 chosen_dist += dt; | 1966 chosen_dist += dt; |
1469 | 1967 |
1470 if (i != 3) | 1968 if (i != 3) |
1471 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0, | 1969 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0, |
1472 split_subsize); | 1970 split_subsize); |
1473 | 1971 |
1474 pl = partition_plane_context(cpi->above_seg_context, | 1972 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx, |
1475 cpi->left_seg_context, | |
1476 mi_row + y_idx, mi_col + x_idx, | |
1477 split_subsize); | 1973 split_subsize); |
1478 chosen_rate += x->partition_cost[pl][PARTITION_NONE]; | 1974 chosen_rate += x->partition_cost[pl][PARTITION_NONE]; |
1479 } | 1975 } |
1480 pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, | 1976 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1481 mi_row, mi_col, bsize); | |
1482 if (chosen_rate < INT_MAX) { | 1977 if (chosen_rate < INT_MAX) { |
1483 chosen_rate += x->partition_cost[pl][PARTITION_SPLIT]; | 1978 chosen_rate += x->partition_cost[pl][PARTITION_SPLIT]; |
1484 chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist); | 1979 chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist); |
1485 } | 1980 } |
1486 } | 1981 } |
1487 | 1982 |
1488 // If last_part is better set the partitioning to that... | 1983 // If last_part is better set the partitioning to that... |
1489 if (last_part_rd < chosen_rd) { | 1984 if (last_part_rd < chosen_rd) { |
1490 mi_8x8[0]->mbmi.sb_type = bsize; | 1985 mi_8x8[0]->mbmi.sb_type = bsize; |
1491 if (bsize >= BLOCK_8X8) | 1986 if (bsize >= BLOCK_8X8) |
(...skipping 17 matching lines...) Expand all Loading... |
1509 if ( bsize == BLOCK_64X64) | 2004 if ( bsize == BLOCK_64X64) |
1510 assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX); | 2005 assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX); |
1511 | 2006 |
1512 if (do_recon) { | 2007 if (do_recon) { |
1513 int output_enabled = (bsize == BLOCK_64X64); | 2008 int output_enabled = (bsize == BLOCK_64X64); |
1514 | 2009 |
1515 // Check the projected output rate for this SB against it's target | 2010 // Check the projected output rate for this SB against it's target |
1516 // and and if necessary apply a Q delta using segmentation to get | 2011 // and and if necessary apply a Q delta using segmentation to get |
1517 // closer to the target. | 2012 // closer to the target. |
1518 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { | 2013 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { |
1519 select_in_frame_q_segment(cpi, mi_row, mi_col, | 2014 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, |
1520 output_enabled, chosen_rate); | 2015 output_enabled, chosen_rate); |
1521 } | 2016 } |
1522 | 2017 |
| 2018 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 2019 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 2020 chosen_rate, chosen_dist); |
| 2021 |
1523 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); | 2022 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); |
1524 } | 2023 } |
1525 | 2024 |
1526 *rate = chosen_rate; | 2025 *rate = chosen_rate; |
1527 *dist = chosen_dist; | 2026 *dist = chosen_dist; |
1528 } | 2027 } |
1529 | 2028 |
1530 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { | 2029 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { |
1531 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 2030 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
1532 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 2031 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
(...skipping 27 matching lines...) Expand all Loading... |
1560 int index = 0; | 2059 int index = 0; |
1561 | 2060 |
1562 // Check the sb_type for each block that belongs to this region. | 2061 // Check the sb_type for each block that belongs to this region. |
1563 for (i = 0; i < sb_height_in_blocks; ++i) { | 2062 for (i = 0; i < sb_height_in_blocks; ++i) { |
1564 for (j = 0; j < sb_width_in_blocks; ++j) { | 2063 for (j = 0; j < sb_width_in_blocks; ++j) { |
1565 MODE_INFO * mi = mi_8x8[index+j]; | 2064 MODE_INFO * mi = mi_8x8[index+j]; |
1566 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; | 2065 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; |
1567 *min_block_size = MIN(*min_block_size, sb_type); | 2066 *min_block_size = MIN(*min_block_size, sb_type); |
1568 *max_block_size = MAX(*max_block_size, sb_type); | 2067 *max_block_size = MAX(*max_block_size, sb_type); |
1569 } | 2068 } |
1570 index += xd->mode_info_stride; | 2069 index += xd->mi_stride; |
1571 } | 2070 } |
1572 } | 2071 } |
1573 | 2072 |
1574 // Next square block size less or equal than current block size. | 2073 // Next square block size less or equal than current block size. |
1575 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { | 2074 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { |
1576 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 2075 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
1577 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, | 2076 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, |
1578 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, | 2077 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, |
1579 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, | 2078 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, |
1580 BLOCK_64X64 | 2079 BLOCK_64X64 |
1581 }; | 2080 }; |
1582 | 2081 |
1583 // Look at neighboring blocks and set a min and max partition size based on | 2082 // Look at neighboring blocks and set a min and max partition size based on |
1584 // what they chose. | 2083 // what they chose. |
1585 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, | 2084 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, |
1586 int row, int col, | 2085 int mi_row, int mi_col, |
1587 BLOCK_SIZE *min_block_size, | 2086 BLOCK_SIZE *min_block_size, |
1588 BLOCK_SIZE *max_block_size) { | 2087 BLOCK_SIZE *max_block_size) { |
1589 VP9_COMMON * const cm = &cpi->common; | 2088 VP9_COMMON *const cm = &cpi->common; |
1590 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 2089 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
1591 MODE_INFO ** mi_8x8 = xd->mi_8x8; | 2090 MODE_INFO **mi_8x8 = xd->mi; |
1592 MODE_INFO ** prev_mi_8x8 = xd->prev_mi_8x8; | |
1593 | |
1594 const int left_in_image = xd->left_available && mi_8x8[-1]; | 2091 const int left_in_image = xd->left_available && mi_8x8[-1]; |
1595 const int above_in_image = xd->up_available && | 2092 const int above_in_image = xd->up_available && |
1596 mi_8x8[-xd->mode_info_stride]; | 2093 mi_8x8[-xd->mi_stride]; |
1597 MODE_INFO ** above_sb64_mi_8x8; | 2094 MODE_INFO **above_sb64_mi_8x8; |
1598 MODE_INFO ** left_sb64_mi_8x8; | 2095 MODE_INFO **left_sb64_mi_8x8; |
1599 | 2096 |
1600 int row8x8_remaining = tile->mi_row_end - row; | 2097 int row8x8_remaining = tile->mi_row_end - mi_row; |
1601 int col8x8_remaining = tile->mi_col_end - col; | 2098 int col8x8_remaining = tile->mi_col_end - mi_col; |
1602 int bh, bw; | 2099 int bh, bw; |
1603 | 2100 BLOCK_SIZE min_size = BLOCK_4X4; |
| 2101 BLOCK_SIZE max_size = BLOCK_64X64; |
1604 // Trap case where we do not have a prediction. | 2102 // Trap case where we do not have a prediction. |
1605 if (!left_in_image && !above_in_image && | 2103 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) { |
1606 ((cm->frame_type == KEY_FRAME) || !cm->prev_mi)) { | |
1607 *min_block_size = BLOCK_4X4; | |
1608 *max_block_size = BLOCK_64X64; | |
1609 } else { | |
1610 // Default "min to max" and "max to min" | 2104 // Default "min to max" and "max to min" |
1611 *min_block_size = BLOCK_64X64; | 2105 min_size = BLOCK_64X64; |
1612 *max_block_size = BLOCK_4X4; | 2106 max_size = BLOCK_4X4; |
1613 | 2107 |
1614 // NOTE: each call to get_sb_partition_size_range() uses the previous | 2108 // NOTE: each call to get_sb_partition_size_range() uses the previous |
1615 // passed in values for min and max as a starting point. | 2109 // passed in values for min and max as a starting point. |
1616 // | |
1617 // Find the min and max partition used in previous frame at this location | 2110 // Find the min and max partition used in previous frame at this location |
1618 if (cm->prev_mi && (cm->frame_type != KEY_FRAME)) { | 2111 if (cm->frame_type != KEY_FRAME) { |
1619 get_sb_partition_size_range(cpi, prev_mi_8x8, | 2112 MODE_INFO **const prev_mi = |
1620 min_block_size, max_block_size); | 2113 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col]; |
| 2114 get_sb_partition_size_range(cpi, prev_mi, &min_size, &max_size); |
1621 } | 2115 } |
1622 | |
1623 // Find the min and max partition sizes used in the left SB64 | 2116 // Find the min and max partition sizes used in the left SB64 |
1624 if (left_in_image) { | 2117 if (left_in_image) { |
1625 left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE]; | 2118 left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE]; |
1626 get_sb_partition_size_range(cpi, left_sb64_mi_8x8, | 2119 get_sb_partition_size_range(cpi, left_sb64_mi_8x8, |
1627 min_block_size, max_block_size); | 2120 &min_size, &max_size); |
1628 } | 2121 } |
1629 | |
1630 // Find the min and max partition sizes used in the above SB64. | 2122 // Find the min and max partition sizes used in the above SB64. |
1631 if (above_in_image) { | 2123 if (above_in_image) { |
1632 above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE]; | 2124 above_sb64_mi_8x8 = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE]; |
1633 get_sb_partition_size_range(cpi, above_sb64_mi_8x8, | 2125 get_sb_partition_size_range(cpi, above_sb64_mi_8x8, |
1634 min_block_size, max_block_size); | 2126 &min_size, &max_size); |
| 2127 } |
| 2128 // adjust observed min and max |
| 2129 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) { |
| 2130 min_size = min_partition_size[min_size]; |
| 2131 max_size = max_partition_size[max_size]; |
1635 } | 2132 } |
1636 } | 2133 } |
1637 | 2134 |
1638 // adjust observed min and max | 2135 // Check border cases where max and min from neighbors may not be legal. |
1639 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) { | 2136 max_size = find_partition_size(max_size, |
1640 *min_block_size = min_partition_size[*min_block_size]; | 2137 row8x8_remaining, col8x8_remaining, |
1641 *max_block_size = max_partition_size[*max_block_size]; | 2138 &bh, &bw); |
1642 } | 2139 min_size = MIN(min_size, max_size); |
1643 | |
1644 // Check border cases where max and min from neighbours may not be legal. | |
1645 *max_block_size = find_partition_size(*max_block_size, | |
1646 row8x8_remaining, col8x8_remaining, | |
1647 &bh, &bw); | |
1648 *min_block_size = MIN(*min_block_size, *max_block_size); | |
1649 | 2140 |
1650 // When use_square_partition_only is true, make sure at least one square | 2141 // When use_square_partition_only is true, make sure at least one square |
1651 // partition is allowed by selecting the next smaller square size as | 2142 // partition is allowed by selecting the next smaller square size as |
1652 // *min_block_size. | 2143 // *min_block_size. |
1653 if (cpi->sf.use_square_partition_only && | 2144 if (cpi->sf.use_square_partition_only && |
1654 (*max_block_size - *min_block_size) < 2) { | 2145 next_square_size[max_size] < min_size) { |
1655 *min_block_size = next_square_size[*min_block_size]; | 2146 min_size = next_square_size[max_size]; |
1656 } | 2147 } |
| 2148 *min_block_size = min_size; |
| 2149 *max_block_size = max_size; |
1657 } | 2150 } |
1658 | 2151 |
1659 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { | 2152 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { |
1660 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv)); | 2153 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv)); |
1661 } | 2154 } |
1662 | 2155 |
1663 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { | 2156 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { |
1664 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv)); | 2157 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv)); |
1665 } | 2158 } |
1666 | 2159 |
1667 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are | 2160 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are |
1668 // unlikely to be selected depending on previous rate-distortion optimization | 2161 // unlikely to be selected depending on previous rate-distortion optimization |
1669 // results, for encoding speed-up. | 2162 // results, for encoding speed-up. |
1670 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, | 2163 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, |
1671 TOKENEXTRA **tp, int mi_row, | 2164 TOKENEXTRA **tp, int mi_row, |
1672 int mi_col, BLOCK_SIZE bsize, int *rate, | 2165 int mi_col, BLOCK_SIZE bsize, int *rate, |
1673 int64_t *dist, int do_recon, int64_t best_rd) { | 2166 int64_t *dist, int do_recon, int64_t best_rd) { |
1674 VP9_COMMON *const cm = &cpi->common; | 2167 VP9_COMMON *const cm = &cpi->common; |
1675 MACROBLOCK *const x = &cpi->mb; | 2168 MACROBLOCK *const x = &cpi->mb; |
1676 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2; | 2169 MACROBLOCKD *const xd = &x->e_mbd; |
| 2170 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2; |
1677 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 2171 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
1678 PARTITION_CONTEXT sl[8], sa[8]; | 2172 PARTITION_CONTEXT sl[8], sa[8]; |
1679 TOKENEXTRA *tp_orig = *tp; | 2173 TOKENEXTRA *tp_orig = *tp; |
| 2174 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); |
1680 int i, pl; | 2175 int i, pl; |
1681 BLOCK_SIZE subsize; | 2176 BLOCK_SIZE subsize; |
1682 int this_rate, sum_rate = 0, best_rate = INT_MAX; | 2177 int this_rate, sum_rate = 0, best_rate = INT_MAX; |
1683 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX; | 2178 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX; |
1684 int64_t sum_rd = 0; | 2179 int64_t sum_rd = 0; |
1685 int do_split = bsize >= BLOCK_8X8; | 2180 int do_split = bsize >= BLOCK_8X8; |
1686 int do_rect = 1; | 2181 int do_rect = 1; |
1687 // Override skipping rectangular partition operations for edge blocks | 2182 // Override skipping rectangular partition operations for edge blocks |
1688 const int force_horz_split = (mi_row + ms >= cm->mi_rows); | 2183 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows); |
1689 const int force_vert_split = (mi_col + ms >= cm->mi_cols); | 2184 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols); |
1690 const int xss = x->e_mbd.plane[1].subsampling_x; | 2185 const int xss = x->e_mbd.plane[1].subsampling_x; |
1691 const int yss = x->e_mbd.plane[1].subsampling_y; | 2186 const int yss = x->e_mbd.plane[1].subsampling_y; |
1692 | 2187 |
1693 int partition_none_allowed = !force_horz_split && !force_vert_split; | 2188 int partition_none_allowed = !force_horz_split && !force_vert_split; |
1694 int partition_horz_allowed = !force_vert_split && yss <= xss && | 2189 int partition_horz_allowed = !force_vert_split && yss <= xss && |
1695 bsize >= BLOCK_8X8; | 2190 bsize >= BLOCK_8X8; |
1696 int partition_vert_allowed = !force_horz_split && xss <= yss && | 2191 int partition_vert_allowed = !force_horz_split && xss <= yss && |
1697 bsize >= BLOCK_8X8; | 2192 bsize >= BLOCK_8X8; |
1698 (void) *tp_orig; | 2193 (void) *tp_orig; |
1699 | 2194 |
1700 if (bsize < BLOCK_8X8) { | 2195 if (bsize < BLOCK_8X8) { |
1701 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 2196 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
1702 // there is nothing to be done. | 2197 // there is nothing to be done. |
1703 if (x->ab_index != 0) { | 2198 if (x->ab_index != 0) { |
1704 *rate = 0; | 2199 *rate = 0; |
1705 *dist = 0; | 2200 *dist = 0; |
1706 return; | 2201 return; |
1707 } | 2202 } |
1708 } | 2203 } |
1709 assert(num_8x8_blocks_wide_lookup[bsize] == | 2204 assert(num_8x8_blocks_wide_lookup[bsize] == |
1710 num_8x8_blocks_high_lookup[bsize]); | 2205 num_8x8_blocks_high_lookup[bsize]); |
1711 | 2206 |
1712 if (bsize == BLOCK_16X16) { | 2207 if (bsize == BLOCK_16X16) { |
1713 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 2208 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
1714 x->mb_energy = vp9_block_energy(cpi, x, bsize); | 2209 x->mb_energy = vp9_block_energy(cpi, x, bsize); |
| 2210 } else { |
| 2211 x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize); |
1715 } | 2212 } |
1716 | 2213 |
1717 // Determine partition types in search according to the speed features. | 2214 // Determine partition types in search according to the speed features. |
1718 // The threshold set here has to be of square block size. | 2215 // The threshold set here has to be of square block size. |
1719 if (cpi->sf.auto_min_max_partition_size) { | 2216 if (cpi->sf.auto_min_max_partition_size) { |
1720 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size && | 2217 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size && |
1721 bsize >= cpi->sf.min_partition_size); | 2218 bsize >= cpi->sf.min_partition_size); |
1722 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size && | 2219 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size && |
1723 bsize > cpi->sf.min_partition_size) || | 2220 bsize > cpi->sf.min_partition_size) || |
1724 force_horz_split); | 2221 force_horz_split); |
(...skipping 13 matching lines...) Expand all Loading... |
1738 unsigned int source_variancey; | 2235 unsigned int source_variancey; |
1739 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col); | 2236 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col); |
1740 source_variancey = get_sby_perpixel_variance(cpi, x, bsize); | 2237 source_variancey = get_sby_perpixel_variance(cpi, x, bsize); |
1741 if (source_variancey < cpi->sf.disable_split_var_thresh) { | 2238 if (source_variancey < cpi->sf.disable_split_var_thresh) { |
1742 do_split = 0; | 2239 do_split = 0; |
1743 if (source_variancey < cpi->sf.disable_split_var_thresh / 2) | 2240 if (source_variancey < cpi->sf.disable_split_var_thresh / 2) |
1744 do_rect = 0; | 2241 do_rect = 0; |
1745 } | 2242 } |
1746 } | 2243 } |
1747 | 2244 |
| 2245 if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed)) |
| 2246 do_split = 0; |
1748 // PARTITION_NONE | 2247 // PARTITION_NONE |
1749 if (partition_none_allowed) { | 2248 if (partition_none_allowed) { |
1750 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize, | 2249 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize, |
1751 get_block_context(x, bsize), best_rd); | 2250 ctx, best_rd); |
1752 if (this_rate != INT_MAX) { | 2251 if (this_rate != INT_MAX) { |
1753 if (bsize >= BLOCK_8X8) { | 2252 if (bsize >= BLOCK_8X8) { |
1754 pl = partition_plane_context(cpi->above_seg_context, | 2253 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1755 cpi->left_seg_context, | |
1756 mi_row, mi_col, bsize); | |
1757 this_rate += x->partition_cost[pl][PARTITION_NONE]; | 2254 this_rate += x->partition_cost[pl][PARTITION_NONE]; |
1758 } | 2255 } |
1759 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); | 2256 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); |
1760 if (sum_rd < best_rd) { | 2257 if (sum_rd < best_rd) { |
1761 int64_t stop_thresh = 4096; | 2258 int64_t stop_thresh = 4096; |
1762 int64_t stop_thresh_rd; | 2259 int64_t stop_thresh_rd; |
1763 | 2260 |
1764 best_rate = this_rate; | 2261 best_rate = this_rate; |
1765 best_dist = this_dist; | 2262 best_dist = this_dist; |
1766 best_rd = sum_rd; | 2263 best_rd = sum_rd; |
1767 if (bsize >= BLOCK_8X8) | 2264 if (bsize >= BLOCK_8X8) |
1768 *(get_sb_partitioning(x, bsize)) = bsize; | 2265 *(get_sb_partitioning(x, bsize)) = bsize; |
1769 | 2266 |
1770 // Adjust threshold according to partition size. | 2267 // Adjust threshold according to partition size. |
1771 stop_thresh >>= 8 - (b_width_log2_lookup[bsize] + | 2268 stop_thresh >>= 8 - (b_width_log2_lookup[bsize] + |
1772 b_height_log2_lookup[bsize]); | 2269 b_height_log2_lookup[bsize]); |
1773 | 2270 |
1774 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh); | 2271 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh); |
1775 // If obtained distortion is very small, choose current partition | 2272 // If obtained distortion is very small, choose current partition |
1776 // and stop splitting. | 2273 // and stop splitting. |
1777 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) { | 2274 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) { |
1778 do_split = 0; | 2275 do_split = 0; |
1779 do_rect = 0; | 2276 do_rect = 0; |
1780 } | 2277 } |
1781 } | 2278 } |
1782 } | 2279 } |
| 2280 if (!x->in_active_map) { |
| 2281 do_split = 0; |
| 2282 do_rect = 0; |
| 2283 } |
1783 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2284 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1784 } | 2285 } |
1785 | 2286 |
1786 // store estimated motion vector | 2287 // store estimated motion vector |
1787 if (cpi->sf.adaptive_motion_search) | 2288 if (cpi->sf.adaptive_motion_search) |
1788 store_pred_mv(x, get_block_context(x, bsize)); | 2289 store_pred_mv(x, ctx); |
1789 | 2290 |
1790 // PARTITION_SPLIT | 2291 // PARTITION_SPLIT |
1791 sum_rd = 0; | 2292 sum_rd = 0; |
1792 // TODO(jingning): use the motion vectors given by the above search as | 2293 // TODO(jingning): use the motion vectors given by the above search as |
1793 // the starting point of motion search in the following partition type check. | 2294 // the starting point of motion search in the following partition type check. |
1794 if (do_split) { | 2295 if (do_split) { |
1795 subsize = get_subsize(bsize, PARTITION_SPLIT); | 2296 subsize = get_subsize(bsize, PARTITION_SPLIT); |
1796 for (i = 0; i < 4 && sum_rd < best_rd; ++i) { | 2297 for (i = 0; i < 4 && sum_rd < best_rd; ++i) { |
1797 const int x_idx = (i & 1) * ms; | 2298 const int x_idx = (i & 1) * mi_step; |
1798 const int y_idx = (i >> 1) * ms; | 2299 const int y_idx = (i >> 1) * mi_step; |
1799 | 2300 |
1800 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) | 2301 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) |
1801 continue; | 2302 continue; |
1802 | 2303 |
1803 *get_sb_index(x, subsize) = i; | 2304 *get_sb_index(x, subsize) = i; |
1804 if (cpi->sf.adaptive_motion_search) | 2305 if (cpi->sf.adaptive_motion_search) |
1805 load_pred_mv(x, get_block_context(x, bsize)); | 2306 load_pred_mv(x, ctx); |
1806 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2307 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1807 partition_none_allowed) | 2308 partition_none_allowed) |
1808 get_block_context(x, subsize)->pred_interp_filter = | 2309 get_block_context(x, subsize)->pred_interp_filter = |
1809 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 2310 ctx->mic.mbmi.interp_filter; |
1810 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize, | 2311 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize, |
1811 &this_rate, &this_dist, i != 3, best_rd - sum_rd); | 2312 &this_rate, &this_dist, i != 3, best_rd - sum_rd); |
1812 | 2313 |
1813 if (this_rate == INT_MAX) { | 2314 if (this_rate == INT_MAX) { |
1814 sum_rd = INT64_MAX; | 2315 sum_rd = INT64_MAX; |
1815 } else { | 2316 } else { |
1816 sum_rate += this_rate; | 2317 sum_rate += this_rate; |
1817 sum_dist += this_dist; | 2318 sum_dist += this_dist; |
1818 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2319 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1819 } | 2320 } |
1820 } | 2321 } |
1821 if (sum_rd < best_rd && i == 4) { | 2322 if (sum_rd < best_rd && i == 4) { |
1822 pl = partition_plane_context(cpi->above_seg_context, | 2323 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1823 cpi->left_seg_context, | |
1824 mi_row, mi_col, bsize); | |
1825 sum_rate += x->partition_cost[pl][PARTITION_SPLIT]; | 2324 sum_rate += x->partition_cost[pl][PARTITION_SPLIT]; |
1826 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2325 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1827 if (sum_rd < best_rd) { | 2326 if (sum_rd < best_rd) { |
1828 best_rate = sum_rate; | 2327 best_rate = sum_rate; |
1829 best_dist = sum_dist; | 2328 best_dist = sum_dist; |
1830 best_rd = sum_rd; | 2329 best_rd = sum_rd; |
1831 *(get_sb_partitioning(x, bsize)) = subsize; | 2330 *(get_sb_partitioning(x, bsize)) = subsize; |
1832 } | 2331 } |
1833 } else { | 2332 } else { |
1834 // skip rectangular partition test when larger block size | 2333 // skip rectangular partition test when larger block size |
1835 // gives better rd cost | 2334 // gives better rd cost |
1836 if (cpi->sf.less_rectangular_check) | 2335 if (cpi->sf.less_rectangular_check) |
1837 do_rect &= !partition_none_allowed; | 2336 do_rect &= !partition_none_allowed; |
1838 } | 2337 } |
1839 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2338 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1840 } | 2339 } |
1841 | 2340 |
1842 // PARTITION_HORZ | 2341 // PARTITION_HORZ |
1843 if (partition_horz_allowed && do_rect) { | 2342 if (partition_horz_allowed && do_rect) { |
1844 subsize = get_subsize(bsize, PARTITION_HORZ); | 2343 subsize = get_subsize(bsize, PARTITION_HORZ); |
1845 *get_sb_index(x, subsize) = 0; | 2344 *get_sb_index(x, subsize) = 0; |
1846 if (cpi->sf.adaptive_motion_search) | 2345 if (cpi->sf.adaptive_motion_search) |
1847 load_pred_mv(x, get_block_context(x, bsize)); | 2346 load_pred_mv(x, ctx); |
1848 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2347 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1849 partition_none_allowed) | 2348 partition_none_allowed) |
1850 get_block_context(x, subsize)->pred_interp_filter = | 2349 get_block_context(x, subsize)->pred_interp_filter = |
1851 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 2350 ctx->mic.mbmi.interp_filter; |
1852 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, | 2351 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, |
1853 get_block_context(x, subsize), best_rd); | 2352 get_block_context(x, subsize), best_rd); |
1854 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2353 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1855 | 2354 |
1856 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) { | 2355 if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) { |
1857 update_state(cpi, get_block_context(x, subsize), subsize, 0); | 2356 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col, |
| 2357 subsize, 0); |
1858 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | 2358 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
1859 | 2359 |
1860 *get_sb_index(x, subsize) = 1; | 2360 *get_sb_index(x, subsize) = 1; |
1861 if (cpi->sf.adaptive_motion_search) | 2361 if (cpi->sf.adaptive_motion_search) |
1862 load_pred_mv(x, get_block_context(x, bsize)); | 2362 load_pred_mv(x, ctx); |
1863 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2363 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1864 partition_none_allowed) | 2364 partition_none_allowed) |
1865 get_block_context(x, subsize)->pred_interp_filter = | 2365 get_block_context(x, subsize)->pred_interp_filter = |
1866 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 2366 ctx->mic.mbmi.interp_filter; |
1867 rd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col, &this_rate, | 2367 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate, |
1868 &this_dist, subsize, get_block_context(x, subsize), | 2368 &this_dist, subsize, get_block_context(x, subsize), |
1869 best_rd - sum_rd); | 2369 best_rd - sum_rd); |
1870 if (this_rate == INT_MAX) { | 2370 if (this_rate == INT_MAX) { |
1871 sum_rd = INT64_MAX; | 2371 sum_rd = INT64_MAX; |
1872 } else { | 2372 } else { |
1873 sum_rate += this_rate; | 2373 sum_rate += this_rate; |
1874 sum_dist += this_dist; | 2374 sum_dist += this_dist; |
1875 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2375 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1876 } | 2376 } |
1877 } | 2377 } |
1878 if (sum_rd < best_rd) { | 2378 if (sum_rd < best_rd) { |
1879 pl = partition_plane_context(cpi->above_seg_context, | 2379 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1880 cpi->left_seg_context, | |
1881 mi_row, mi_col, bsize); | |
1882 sum_rate += x->partition_cost[pl][PARTITION_HORZ]; | 2380 sum_rate += x->partition_cost[pl][PARTITION_HORZ]; |
1883 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2381 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1884 if (sum_rd < best_rd) { | 2382 if (sum_rd < best_rd) { |
1885 best_rd = sum_rd; | 2383 best_rd = sum_rd; |
1886 best_rate = sum_rate; | 2384 best_rate = sum_rate; |
1887 best_dist = sum_dist; | 2385 best_dist = sum_dist; |
1888 *(get_sb_partitioning(x, bsize)) = subsize; | 2386 *(get_sb_partitioning(x, bsize)) = subsize; |
1889 } | 2387 } |
1890 } | 2388 } |
1891 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2389 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1892 } | 2390 } |
1893 | 2391 |
1894 // PARTITION_VERT | 2392 // PARTITION_VERT |
1895 if (partition_vert_allowed && do_rect) { | 2393 if (partition_vert_allowed && do_rect) { |
1896 subsize = get_subsize(bsize, PARTITION_VERT); | 2394 subsize = get_subsize(bsize, PARTITION_VERT); |
1897 | 2395 |
1898 *get_sb_index(x, subsize) = 0; | 2396 *get_sb_index(x, subsize) = 0; |
1899 if (cpi->sf.adaptive_motion_search) | 2397 if (cpi->sf.adaptive_motion_search) |
1900 load_pred_mv(x, get_block_context(x, bsize)); | 2398 load_pred_mv(x, ctx); |
1901 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2399 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1902 partition_none_allowed) | 2400 partition_none_allowed) |
1903 get_block_context(x, subsize)->pred_interp_filter = | 2401 get_block_context(x, subsize)->pred_interp_filter = |
1904 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 2402 ctx->mic.mbmi.interp_filter; |
1905 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, | 2403 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, |
1906 get_block_context(x, subsize), best_rd); | 2404 get_block_context(x, subsize), best_rd); |
1907 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2405 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1908 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) { | 2406 if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) { |
1909 update_state(cpi, get_block_context(x, subsize), subsize, 0); | 2407 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col, |
| 2408 subsize, 0); |
1910 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | 2409 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
1911 | 2410 |
1912 *get_sb_index(x, subsize) = 1; | 2411 *get_sb_index(x, subsize) = 1; |
1913 if (cpi->sf.adaptive_motion_search) | 2412 if (cpi->sf.adaptive_motion_search) |
1914 load_pred_mv(x, get_block_context(x, bsize)); | 2413 load_pred_mv(x, ctx); |
1915 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2414 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1916 partition_none_allowed) | 2415 partition_none_allowed) |
1917 get_block_context(x, subsize)->pred_interp_filter = | 2416 get_block_context(x, subsize)->pred_interp_filter = |
1918 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 2417 ctx->mic.mbmi.interp_filter; |
1919 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms, &this_rate, | 2418 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate, |
1920 &this_dist, subsize, get_block_context(x, subsize), | 2419 &this_dist, subsize, get_block_context(x, subsize), |
1921 best_rd - sum_rd); | 2420 best_rd - sum_rd); |
1922 if (this_rate == INT_MAX) { | 2421 if (this_rate == INT_MAX) { |
1923 sum_rd = INT64_MAX; | 2422 sum_rd = INT64_MAX; |
1924 } else { | 2423 } else { |
1925 sum_rate += this_rate; | 2424 sum_rate += this_rate; |
1926 sum_dist += this_dist; | 2425 sum_dist += this_dist; |
1927 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2426 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1928 } | 2427 } |
1929 } | 2428 } |
1930 if (sum_rd < best_rd) { | 2429 if (sum_rd < best_rd) { |
1931 pl = partition_plane_context(cpi->above_seg_context, | 2430 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
1932 cpi->left_seg_context, | |
1933 mi_row, mi_col, bsize); | |
1934 sum_rate += x->partition_cost[pl][PARTITION_VERT]; | 2431 sum_rate += x->partition_cost[pl][PARTITION_VERT]; |
1935 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2432 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
1936 if (sum_rd < best_rd) { | 2433 if (sum_rd < best_rd) { |
1937 best_rate = sum_rate; | 2434 best_rate = sum_rate; |
1938 best_dist = sum_dist; | 2435 best_dist = sum_dist; |
1939 best_rd = sum_rd; | 2436 best_rd = sum_rd; |
1940 *(get_sb_partitioning(x, bsize)) = subsize; | 2437 *(get_sb_partitioning(x, bsize)) = subsize; |
1941 } | 2438 } |
1942 } | 2439 } |
1943 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2440 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1944 } | 2441 } |
1945 | 2442 |
1946 // TODO(jbb): This code added so that we avoid static analysis | 2443 // TODO(jbb): This code added so that we avoid static analysis |
1947 // warning related to the fact that best_rd isn't used after this | 2444 // warning related to the fact that best_rd isn't used after this |
1948 // point. This code should be refactored so that the duplicate | 2445 // point. This code should be refactored so that the duplicate |
1949 // checks occur in some sub function and thus are used... | 2446 // checks occur in some sub function and thus are used... |
1950 (void) best_rd; | 2447 (void) best_rd; |
1951 *rate = best_rate; | 2448 *rate = best_rate; |
1952 *dist = best_dist; | 2449 *dist = best_dist; |
1953 | 2450 |
1954 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { | 2451 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { |
1955 int output_enabled = (bsize == BLOCK_64X64); | 2452 int output_enabled = (bsize == BLOCK_64X64); |
1956 | 2453 |
1957 // Check the projected output rate for this SB against it's target | 2454 // Check the projected output rate for this SB against it's target |
1958 // and and if necessary apply a Q delta using segmentation to get | 2455 // and and if necessary apply a Q delta using segmentation to get |
1959 // closer to the target. | 2456 // closer to the target. |
1960 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { | 2457 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { |
1961 select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate); | 2458 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, |
| 2459 best_rate); |
1962 } | 2460 } |
| 2461 |
| 2462 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 2463 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 2464 best_rate, best_dist); |
| 2465 |
1963 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); | 2466 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); |
1964 } | 2467 } |
1965 if (bsize == BLOCK_64X64) { | 2468 if (bsize == BLOCK_64X64) { |
1966 assert(tp_orig < *tp); | 2469 assert(tp_orig < *tp); |
1967 assert(best_rate < INT_MAX); | 2470 assert(best_rate < INT_MAX); |
1968 assert(best_dist < INT64_MAX); | 2471 assert(best_dist < INT64_MAX); |
1969 } else { | 2472 } else { |
1970 assert(tp_orig == *tp); | 2473 assert(tp_orig == *tp); |
1971 } | 2474 } |
1972 } | 2475 } |
1973 | 2476 |
1974 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, | 2477 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, |
1975 int mi_row, TOKENEXTRA **tp) { | 2478 int mi_row, TOKENEXTRA **tp) { |
1976 VP9_COMMON *const cm = &cpi->common; | 2479 VP9_COMMON *const cm = &cpi->common; |
| 2480 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 2481 SPEED_FEATURES *const sf = &cpi->sf; |
1977 int mi_col; | 2482 int mi_col; |
1978 | 2483 |
1979 // Initialize the left context for the new SB row | 2484 // Initialize the left context for the new SB row |
1980 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); | 2485 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); |
1981 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); | 2486 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); |
1982 | 2487 |
1983 // Code each SB in the row | 2488 // Code each SB in the row |
1984 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 2489 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
1985 mi_col += MI_BLOCK_SIZE) { | 2490 mi_col += MI_BLOCK_SIZE) { |
1986 int dummy_rate; | 2491 int dummy_rate; |
1987 int64_t dummy_dist; | 2492 int64_t dummy_dist; |
1988 | 2493 |
1989 BLOCK_SIZE i; | 2494 BLOCK_SIZE i; |
1990 MACROBLOCK *x = &cpi->mb; | 2495 MACROBLOCK *x = &cpi->mb; |
1991 | 2496 |
1992 if (cpi->sf.adaptive_pred_interp_filter) { | 2497 if (sf->adaptive_pred_interp_filter) { |
1993 for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) { | 2498 for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) { |
1994 const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; | 2499 const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; |
1995 const int num_4x4_h = num_4x4_blocks_high_lookup[i]; | 2500 const int num_4x4_h = num_4x4_blocks_high_lookup[i]; |
1996 const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h); | 2501 const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h); |
1997 for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) | 2502 for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) |
1998 for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) | 2503 for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) |
1999 for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) | 2504 for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) |
2000 get_block_context(x, i)->pred_interp_filter = SWITCHABLE; | 2505 get_block_context(x, i)->pred_interp_filter = SWITCHABLE; |
2001 } | 2506 } |
2002 } | 2507 } |
2003 | 2508 |
2004 vp9_zero(cpi->mb.pred_mv); | 2509 vp9_zero(cpi->mb.pred_mv); |
2005 | 2510 |
2006 if ((cpi->sf.partition_search_type == SEARCH_PARTITION && | 2511 if ((sf->partition_search_type == SEARCH_PARTITION && |
2007 cpi->sf.use_lastframe_partitioning) || | 2512 sf->use_lastframe_partitioning) || |
2008 cpi->sf.partition_search_type == FIXED_PARTITION || | 2513 sf->partition_search_type == FIXED_PARTITION || |
2009 cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) { | 2514 sf->partition_search_type == VAR_BASED_PARTITION || |
2010 const int idx_str = cm->mode_info_stride * mi_row + mi_col; | 2515 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) { |
| 2516 const int idx_str = cm->mi_stride * mi_row + mi_col; |
2011 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; | 2517 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; |
2012 MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; | 2518 MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; |
2013 | |
2014 cpi->mb.source_variance = UINT_MAX; | 2519 cpi->mb.source_variance = UINT_MAX; |
2015 if (cpi->sf.partition_search_type == FIXED_PARTITION) { | 2520 if (sf->partition_search_type == FIXED_PARTITION) { |
2016 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2521 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
2017 set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, | 2522 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, |
2018 cpi->sf.always_this_block_size); | 2523 sf->always_this_block_size); |
2019 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, | 2524 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
2020 &dummy_rate, &dummy_dist, 1); | 2525 &dummy_rate, &dummy_dist, 1); |
2021 } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION || | 2526 } else if (sf->partition_search_type == VAR_BASED_FIXED_PARTITION) { |
2022 cpi->sf.partition_search_type == VAR_BASED_PARTITION) { | |
2023 // TODO(debargha): Implement VAR_BASED_PARTITION as a separate case. | |
2024 // Currently both VAR_BASED_FIXED_PARTITION/VAR_BASED_PARTITION | |
2025 // map to the same thing. | |
2026 BLOCK_SIZE bsize; | 2527 BLOCK_SIZE bsize; |
2027 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2528 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
2028 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col); | 2529 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col); |
2029 set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize); | 2530 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize); |
| 2531 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
| 2532 &dummy_rate, &dummy_dist, 1); |
| 2533 } else if (sf->partition_search_type == VAR_BASED_PARTITION) { |
| 2534 choose_partitioning(cpi, tile, mi_row, mi_col); |
2030 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, | 2535 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
2031 &dummy_rate, &dummy_dist, 1); | 2536 &dummy_rate, &dummy_dist, 1); |
2032 } else { | 2537 } else { |
2033 if ((cm->current_video_frame | 2538 if ((cm->current_video_frame |
2034 % cpi->sf.last_partitioning_redo_frequency) == 0 | 2539 % sf->last_partitioning_redo_frequency) == 0 |
2035 || cm->prev_mi == 0 | 2540 || cm->prev_mi == 0 |
2036 || cm->show_frame == 0 | 2541 || cm->show_frame == 0 |
2037 || cm->frame_type == KEY_FRAME | 2542 || cm->frame_type == KEY_FRAME |
2038 || cpi->rc.is_src_frame_alt_ref | 2543 || cpi->rc.is_src_frame_alt_ref |
2039 || ((cpi->sf.use_lastframe_partitioning == | 2544 || ((sf->use_lastframe_partitioning == |
2040 LAST_FRAME_PARTITION_LOW_MOTION) && | 2545 LAST_FRAME_PARTITION_LOW_MOTION) && |
2041 sb_has_motion(cm, prev_mi_8x8))) { | 2546 sb_has_motion(cm, prev_mi_8x8))) { |
2042 // If required set upper and lower partition size limits | 2547 // If required set upper and lower partition size limits |
2043 if (cpi->sf.auto_min_max_partition_size) { | 2548 if (sf->auto_min_max_partition_size) { |
2044 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2549 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
2045 rd_auto_partition_range(cpi, tile, mi_row, mi_col, | 2550 rd_auto_partition_range(cpi, tile, mi_row, mi_col, |
2046 &cpi->sf.min_partition_size, | 2551 &sf->min_partition_size, |
2047 &cpi->sf.max_partition_size); | 2552 &sf->max_partition_size); |
2048 } | 2553 } |
2049 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, | 2554 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, |
2050 &dummy_rate, &dummy_dist, 1, INT64_MAX); | 2555 &dummy_rate, &dummy_dist, 1, INT64_MAX); |
2051 } else { | 2556 } else { |
2052 copy_partitioning(cm, mi_8x8, prev_mi_8x8); | 2557 if (sf->constrain_copy_partition && |
| 2558 sb_has_motion(cm, prev_mi_8x8)) |
| 2559 constrain_copy_partitioning(cpi, tile, mi_8x8, prev_mi_8x8, |
| 2560 mi_row, mi_col, BLOCK_16X16); |
| 2561 else |
| 2562 copy_partitioning(cm, mi_8x8, prev_mi_8x8); |
2053 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, | 2563 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
2054 &dummy_rate, &dummy_dist, 1); | 2564 &dummy_rate, &dummy_dist, 1); |
2055 } | 2565 } |
2056 } | 2566 } |
2057 } else { | 2567 } else { |
2058 // If required set upper and lower partition size limits | 2568 // If required set upper and lower partition size limits |
2059 if (cpi->sf.auto_min_max_partition_size) { | 2569 if (sf->auto_min_max_partition_size) { |
2060 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2570 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
2061 rd_auto_partition_range(cpi, tile, mi_row, mi_col, | 2571 rd_auto_partition_range(cpi, tile, mi_row, mi_col, |
2062 &cpi->sf.min_partition_size, | 2572 &sf->min_partition_size, |
2063 &cpi->sf.max_partition_size); | 2573 &sf->max_partition_size); |
2064 } | 2574 } |
2065 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, | 2575 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, |
2066 &dummy_rate, &dummy_dist, 1, INT64_MAX); | 2576 &dummy_rate, &dummy_dist, 1, INT64_MAX); |
2067 } | 2577 } |
2068 } | 2578 } |
2069 } | 2579 } |
2070 | 2580 |
2071 static void init_encode_frame_mb_context(VP9_COMP *cpi) { | 2581 static void init_encode_frame_mb_context(VP9_COMP *cpi) { |
2072 MACROBLOCK *const x = &cpi->mb; | 2582 MACROBLOCK *const x = &cpi->mb; |
2073 VP9_COMMON *const cm = &cpi->common; | 2583 VP9_COMMON *const cm = &cpi->common; |
2074 MACROBLOCKD *const xd = &x->e_mbd; | 2584 MACROBLOCKD *const xd = &x->e_mbd; |
2075 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); | 2585 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
2076 | 2586 |
2077 x->act_zbin_adj = 0; | 2587 x->act_zbin_adj = 0; |
2078 cpi->seg0_idx = 0; | |
2079 | |
2080 xd->mode_info_stride = cm->mode_info_stride; | |
2081 | 2588 |
2082 // Copy data over into macro block data structures. | 2589 // Copy data over into macro block data structures. |
2083 vp9_setup_src_planes(x, cpi->Source, 0, 0); | 2590 vp9_setup_src_planes(x, cpi->Source, 0, 0); |
2084 | 2591 |
2085 // TODO(jkoleszar): are these initializations required? | 2592 // TODO(jkoleszar): are these initializations required? |
2086 setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL); | 2593 vp9_setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, |
2087 setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); | 2594 NULL); |
| 2595 vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); |
2088 | 2596 |
2089 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); | 2597 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); |
2090 | 2598 |
2091 xd->mi_8x8[0]->mbmi.mode = DC_PRED; | 2599 xd->mi[0]->mbmi.mode = DC_PRED; |
2092 xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED; | 2600 xd->mi[0]->mbmi.uv_mode = DC_PRED; |
2093 | |
2094 vp9_zero(cm->counts.y_mode); | |
2095 vp9_zero(cm->counts.uv_mode); | |
2096 vp9_zero(cm->counts.inter_mode); | |
2097 vp9_zero(cm->counts.partition); | |
2098 vp9_zero(cm->counts.intra_inter); | |
2099 vp9_zero(cm->counts.comp_inter); | |
2100 vp9_zero(cm->counts.single_ref); | |
2101 vp9_zero(cm->counts.comp_ref); | |
2102 vp9_zero(cm->counts.tx); | |
2103 vp9_zero(cm->counts.skip); | |
2104 | 2601 |
2105 // Note: this memset assumes above_context[0], [1] and [2] | 2602 // Note: this memset assumes above_context[0], [1] and [2] |
2106 // are allocated as part of the same buffer. | 2603 // are allocated as part of the same buffer. |
2107 vpx_memset(cpi->above_context[0], 0, | 2604 vpx_memset(xd->above_context[0], 0, |
2108 sizeof(*cpi->above_context[0]) * | 2605 sizeof(*xd->above_context[0]) * |
2109 2 * aligned_mi_cols * MAX_MB_PLANE); | 2606 2 * aligned_mi_cols * MAX_MB_PLANE); |
2110 vpx_memset(cpi->above_seg_context, 0, | 2607 vpx_memset(xd->above_seg_context, 0, |
2111 sizeof(*cpi->above_seg_context) * aligned_mi_cols); | 2608 sizeof(*xd->above_seg_context) * aligned_mi_cols); |
2112 } | 2609 } |
2113 | 2610 |
2114 static void switch_lossless_mode(VP9_COMP *cpi, int lossless) { | 2611 static void switch_lossless_mode(VP9_COMP *cpi, int lossless) { |
2115 if (lossless) { | 2612 if (lossless) { |
2116 // printf("Switching to lossless\n"); | 2613 // printf("Switching to lossless\n"); |
2117 cpi->mb.fwd_txm4x4 = vp9_fwht4x4; | 2614 cpi->mb.fwd_txm4x4 = vp9_fwht4x4; |
2118 cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add; | 2615 cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add; |
2119 cpi->mb.optimize = 0; | 2616 cpi->mb.optimize = 0; |
2120 cpi->common.lf.filter_level = 0; | 2617 cpi->common.lf.filter_level = 0; |
2121 cpi->zbin_mode_boost_enabled = 0; | 2618 cpi->zbin_mode_boost_enabled = 0; |
2122 cpi->common.tx_mode = ONLY_4X4; | 2619 cpi->common.tx_mode = ONLY_4X4; |
2123 } else { | 2620 } else { |
2124 // printf("Not lossless\n"); | 2621 // printf("Not lossless\n"); |
2125 cpi->mb.fwd_txm4x4 = vp9_fdct4x4; | 2622 cpi->mb.fwd_txm4x4 = vp9_fdct4x4; |
2126 cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add; | 2623 cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add; |
2127 } | 2624 } |
2128 } | 2625 } |
2129 | 2626 |
2130 static void switch_tx_mode(VP9_COMP *cpi) { | |
2131 if (cpi->sf.tx_size_search_method == USE_LARGESTALL && | |
2132 cpi->common.tx_mode >= ALLOW_32X32) | |
2133 cpi->common.tx_mode = ALLOW_32X32; | |
2134 } | |
2135 | |
2136 | |
2137 static int check_dual_ref_flags(VP9_COMP *cpi) { | 2627 static int check_dual_ref_flags(VP9_COMP *cpi) { |
2138 const int ref_flags = cpi->ref_frame_flags; | 2628 const int ref_flags = cpi->ref_frame_flags; |
2139 | 2629 |
2140 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { | 2630 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { |
2141 return 0; | 2631 return 0; |
2142 } else { | 2632 } else { |
2143 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) | 2633 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) |
2144 + !!(ref_flags & VP9_ALT_FLAG)) >= 2; | 2634 + !!(ref_flags & VP9_ALT_FLAG)) >= 2; |
2145 } | 2635 } |
2146 } | 2636 } |
2147 | 2637 |
2148 static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { | |
2149 int x, y; | |
2150 | |
2151 for (y = 0; y < ymbs; y++) { | |
2152 for (x = 0; x < xmbs; x++) { | |
2153 if (!mi_8x8[y * mis + x]->mbmi.skip) | |
2154 return 0; | |
2155 } | |
2156 } | |
2157 | |
2158 return 1; | |
2159 } | |
2160 | |
2161 static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, | |
2162 TX_SIZE tx_size) { | |
2163 int x, y; | |
2164 | |
2165 for (y = 0; y < ymbs; y++) { | |
2166 for (x = 0; x < xmbs; x++) | |
2167 mi_8x8[y * mis + x]->mbmi.tx_size = tx_size; | |
2168 } | |
2169 } | |
2170 | |
2171 static void reset_skip_txfm_size_b(const VP9_COMMON *cm, int mis, | |
2172 TX_SIZE max_tx_size, int bw, int bh, | |
2173 int mi_row, int mi_col, | |
2174 MODE_INFO **mi_8x8) { | |
2175 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) { | |
2176 return; | |
2177 } else { | |
2178 MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi; | |
2179 if (mbmi->tx_size > max_tx_size) { | |
2180 const int ymbs = MIN(bh, cm->mi_rows - mi_row); | |
2181 const int xmbs = MIN(bw, cm->mi_cols - mi_col); | |
2182 | |
2183 assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) || | |
2184 get_skip_flag(mi_8x8, mis, ymbs, xmbs)); | |
2185 set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size); | |
2186 } | |
2187 } | |
2188 } | |
2189 | |
2190 static void reset_skip_txfm_size_sb(VP9_COMMON *cm, MODE_INFO **mi_8x8, | |
2191 TX_SIZE max_tx_size, int mi_row, int mi_col, | |
2192 BLOCK_SIZE bsize) { | |
2193 const int mis = cm->mode_info_stride; | |
2194 int bw, bh; | |
2195 const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; | |
2196 | |
2197 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | |
2198 return; | |
2199 | |
2200 bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type]; | |
2201 bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type]; | |
2202 | |
2203 if (bw == bs && bh == bs) { | |
2204 reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, bs, mi_row, mi_col, | |
2205 mi_8x8); | |
2206 } else if (bw == bs && bh < bs) { | |
2207 reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, hbs, mi_row, mi_col, | |
2208 mi_8x8); | |
2209 reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, hbs, mi_row + hbs, | |
2210 mi_col, mi_8x8 + hbs * mis); | |
2211 } else if (bw < bs && bh == bs) { | |
2212 reset_skip_txfm_size_b(cm, mis, max_tx_size, hbs, bs, mi_row, mi_col, | |
2213 mi_8x8); | |
2214 reset_skip_txfm_size_b(cm, mis, max_tx_size, hbs, bs, mi_row, | |
2215 mi_col + hbs, mi_8x8 + hbs); | |
2216 } else { | |
2217 const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; | |
2218 int n; | |
2219 | |
2220 assert(bw < bs && bh < bs); | |
2221 | |
2222 for (n = 0; n < 4; n++) { | |
2223 const int mi_dc = hbs * (n & 1); | |
2224 const int mi_dr = hbs * (n >> 1); | |
2225 | |
2226 reset_skip_txfm_size_sb(cm, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size, | |
2227 mi_row + mi_dr, mi_col + mi_dc, subsize); | |
2228 } | |
2229 } | |
2230 } | |
2231 | |
2232 static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) { | 2638 static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) { |
2233 int mi_row, mi_col; | 2639 int mi_row, mi_col; |
2234 const int mis = cm->mode_info_stride; | 2640 const int mis = cm->mi_stride; |
2235 MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible; | 2641 MODE_INFO **mi_ptr = cm->mi_grid_visible; |
2236 | 2642 |
2237 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { | 2643 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) { |
2238 mi_8x8 = mi_ptr; | 2644 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) { |
2239 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { | 2645 if (mi_ptr[mi_col]->mbmi.tx_size > txfm_max) |
2240 reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col, | 2646 mi_ptr[mi_col]->mbmi.tx_size = txfm_max; |
2241 BLOCK_64X64); | |
2242 } | 2647 } |
2243 } | 2648 } |
2244 } | 2649 } |
2245 | 2650 |
2246 static MV_REFERENCE_FRAME get_frame_type(VP9_COMP *cpi) { | 2651 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) { |
2247 if (frame_is_intra_only(&cpi->common)) | 2652 if (frame_is_intra_only(&cpi->common)) |
2248 return INTRA_FRAME; | 2653 return INTRA_FRAME; |
2249 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) | 2654 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) |
2250 return ALTREF_FRAME; | 2655 return ALTREF_FRAME; |
2251 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) | 2656 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) |
2252 return LAST_FRAME; | 2657 return LAST_FRAME; |
2253 else | 2658 else |
2254 return GOLDEN_FRAME; | 2659 return GOLDEN_FRAME; |
2255 } | 2660 } |
2256 | 2661 |
2257 static void select_tx_mode(VP9_COMP *cpi) { | 2662 static TX_MODE select_tx_mode(const VP9_COMP *cpi) { |
2258 if (cpi->oxcf.lossless) { | 2663 if (cpi->oxcf.lossless) { |
2259 cpi->common.tx_mode = ONLY_4X4; | 2664 return ONLY_4X4; |
2260 } else if (cpi->common.current_video_frame == 0) { | 2665 } else if (cpi->common.current_video_frame == 0) { |
2261 cpi->common.tx_mode = TX_MODE_SELECT; | 2666 return TX_MODE_SELECT; |
2262 } else { | 2667 } else { |
2263 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { | 2668 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { |
2264 cpi->common.tx_mode = ALLOW_32X32; | 2669 return ALLOW_32X32; |
2265 } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) { | 2670 } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) { |
2266 int frame_type = get_frame_type(cpi); | 2671 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); |
2267 cpi->common.tx_mode = | 2672 return cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] > |
2268 cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] | 2673 cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ? |
2269 > cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ? | 2674 ALLOW_32X32 : TX_MODE_SELECT; |
2270 ALLOW_32X32 : TX_MODE_SELECT; | |
2271 } else { | 2675 } else { |
2272 unsigned int total = 0; | 2676 unsigned int total = 0; |
2273 int i; | 2677 int i; |
2274 for (i = 0; i < TX_SIZES; ++i) | 2678 for (i = 0; i < TX_SIZES; ++i) |
2275 total += cpi->tx_stepdown_count[i]; | 2679 total += cpi->tx_stepdown_count[i]; |
| 2680 |
2276 if (total) { | 2681 if (total) { |
2277 double fraction = (double)cpi->tx_stepdown_count[0] / total; | 2682 const double fraction = (double)cpi->tx_stepdown_count[0] / total; |
2278 cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; | 2683 return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; |
2279 // printf("fraction = %f\n", fraction); | 2684 } else { |
2280 } // else keep unchanged | 2685 return cpi->common.tx_mode; |
| 2686 } |
2281 } | 2687 } |
2282 } | 2688 } |
2283 } | 2689 } |
2284 | 2690 |
2285 // Start RTC Exploration | 2691 // Start RTC Exploration |
2286 typedef enum { | 2692 typedef enum { |
2287 BOTH_ZERO = 0, | 2693 BOTH_ZERO = 0, |
2288 ZERO_PLUS_PREDICTED = 1, | 2694 ZERO_PLUS_PREDICTED = 1, |
2289 BOTH_PREDICTED = 2, | 2695 BOTH_PREDICTED = 2, |
2290 NEW_PLUS_NON_INTRA = 3, | 2696 NEW_PLUS_NON_INTRA = 3, |
2291 BOTH_NEW = 4, | 2697 BOTH_NEW = 4, |
2292 INTRA_PLUS_NON_INTRA = 5, | 2698 INTRA_PLUS_NON_INTRA = 5, |
2293 BOTH_INTRA = 6, | 2699 BOTH_INTRA = 6, |
2294 INVALID_CASE = 9 | 2700 INVALID_CASE = 9 |
2295 } motion_vector_context; | 2701 } motion_vector_context; |
2296 | 2702 |
2297 static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize, | 2703 static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize, |
2298 MB_PREDICTION_MODE mode) { | 2704 MB_PREDICTION_MODE mode) { |
2299 mbmi->interp_filter = EIGHTTAP; | |
2300 mbmi->mode = mode; | 2705 mbmi->mode = mode; |
| 2706 mbmi->uv_mode = mode; |
2301 mbmi->mv[0].as_int = 0; | 2707 mbmi->mv[0].as_int = 0; |
2302 mbmi->mv[1].as_int = 0; | 2708 mbmi->mv[1].as_int = 0; |
2303 if (mode < NEARESTMV) { | 2709 mbmi->ref_frame[0] = INTRA_FRAME; |
2304 mbmi->ref_frame[0] = INTRA_FRAME; | 2710 mbmi->ref_frame[1] = NONE; |
2305 } else { | |
2306 mbmi->ref_frame[0] = LAST_FRAME; | |
2307 } | |
2308 | |
2309 mbmi->ref_frame[1] = INTRA_FRAME; | |
2310 mbmi->tx_size = max_txsize_lookup[bsize]; | 2711 mbmi->tx_size = max_txsize_lookup[bsize]; |
2311 mbmi->uv_mode = mode; | |
2312 mbmi->skip = 0; | 2712 mbmi->skip = 0; |
2313 mbmi->sb_type = bsize; | 2713 mbmi->sb_type = bsize; |
2314 mbmi->segment_id = 0; | 2714 mbmi->segment_id = 0; |
2315 } | 2715 } |
2316 | 2716 |
2317 static INLINE int get_block_row(int b32i, int b16i, int b8i) { | 2717 static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, |
2318 return ((b32i >> 1) << 2) + ((b16i >> 1) << 1) + (b8i >> 1); | 2718 int mi_row, int mi_col, |
2319 } | 2719 int *rate, int64_t *dist, |
2320 | 2720 BLOCK_SIZE bsize) { |
2321 static INLINE int get_block_col(int b32i, int b16i, int b8i) { | |
2322 return ((b32i & 1) << 2) + ((b16i & 1) << 1) + (b8i & 1); | |
2323 } | |
2324 | |
2325 static void nonrd_use_partition(VP9_COMP *cpi, const TileInfo *const tile, | |
2326 TOKENEXTRA **tp, int mi_row, int mi_col, | |
2327 BLOCK_SIZE bsize, int *rate, int64_t *dist) { | |
2328 VP9_COMMON *const cm = &cpi->common; | 2721 VP9_COMMON *const cm = &cpi->common; |
2329 MACROBLOCK *const x = &cpi->mb; | 2722 MACROBLOCK *const x = &cpi->mb; |
2330 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 2723 MACROBLOCKD *const xd = &x->e_mbd; |
2331 int mis = cm->mode_info_stride; | 2724 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
2332 int br, bc; | 2725 xd->mi[0]->mbmi.sb_type = bsize; |
2333 int i, j; | 2726 |
2334 MB_PREDICTION_MODE mode = DC_PRED; | 2727 if (!frame_is_intra_only(cm)) { |
2335 int rows = MIN(MI_BLOCK_SIZE, tile->mi_row_end - mi_row); | 2728 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, |
2336 int cols = MIN(MI_BLOCK_SIZE, tile->mi_col_end - mi_col); | 2729 rate, dist, bsize); |
2337 | 2730 } else { |
2338 int bw = num_8x8_blocks_wide_lookup[bsize]; | 2731 MB_PREDICTION_MODE intramode = DC_PRED; |
2339 int bh = num_8x8_blocks_high_lookup[bsize]; | 2732 set_mode_info(&xd->mi[0]->mbmi, bsize, intramode); |
2340 | 2733 } |
2341 int brate = 0; | 2734 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); |
2342 int64_t bdist = 0; | 2735 } |
2343 *rate = 0; | 2736 |
2344 *dist = 0; | 2737 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, |
2345 | 2738 int mi_row, int mi_col, |
2346 // find prediction mode for each 8x8 block | 2739 BLOCK_SIZE bsize, BLOCK_SIZE subsize) { |
2347 for (br = 0; br < rows; br += bh) { | 2740 MACROBLOCKD *xd = &x->e_mbd; |
2348 for (bc = 0; bc < cols; bc += bw) { | 2741 int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; |
2349 int row = mi_row + br; | 2742 PARTITION_TYPE partition = partition_lookup[bsl][subsize]; |
2350 int col = mi_col + bc; | 2743 |
2351 | 2744 assert(bsize >= BLOCK_8X8); |
2352 BLOCK_SIZE bs = find_partition_size(bsize, rows - br, cols - bc, | 2745 |
2353 &bh, &bw); | 2746 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
2354 | 2747 return; |
2355 set_offsets(cpi, tile, row, col, bs); | 2748 |
2356 | 2749 switch (partition) { |
2357 if (cm->frame_type != KEY_FRAME) | 2750 case PARTITION_NONE: |
2358 vp9_pick_inter_mode(cpi, x, tile, row, col, | 2751 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
2359 &brate, &bdist, bs); | 2752 *(xd->mi[0]) = get_block_context(x, subsize)->mic; |
2360 else | 2753 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); |
2361 set_mode_info(&xd->mi_8x8[0]->mbmi, bs, mode); | 2754 break; |
2362 | 2755 case PARTITION_VERT: |
2363 *rate += brate; | 2756 *get_sb_index(x, subsize) = 0; |
2364 *dist += bdist; | 2757 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
2365 | 2758 *(xd->mi[0]) = get_block_context(x, subsize)->mic; |
2366 for (j = 0; j < bh; ++j) | 2759 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); |
2367 for (i = 0; i < bw; ++i) | 2760 |
2368 xd->mi_8x8[j * mis + i] = xd->mi_8x8[0]; | 2761 if (mi_col + hbs < cm->mi_cols) { |
2369 } | 2762 *get_sb_index(x, subsize) = 1; |
| 2763 set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs); |
| 2764 *(xd->mi[0]) = get_block_context(x, subsize)->mic; |
| 2765 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize); |
| 2766 } |
| 2767 break; |
| 2768 case PARTITION_HORZ: |
| 2769 *get_sb_index(x, subsize) = 0; |
| 2770 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
| 2771 *(xd->mi[0]) = get_block_context(x, subsize)->mic; |
| 2772 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); |
| 2773 if (mi_row + hbs < cm->mi_rows) { |
| 2774 *get_sb_index(x, subsize) = 1; |
| 2775 set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col); |
| 2776 *(xd->mi[0]) = get_block_context(x, subsize)->mic; |
| 2777 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize); |
| 2778 } |
| 2779 break; |
| 2780 case PARTITION_SPLIT: |
| 2781 *get_sb_index(x, subsize) = 0; |
| 2782 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, |
| 2783 *(get_sb_partitioning(x, subsize))); |
| 2784 *get_sb_index(x, subsize) = 1; |
| 2785 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize, |
| 2786 *(get_sb_partitioning(x, subsize))); |
| 2787 *get_sb_index(x, subsize) = 2; |
| 2788 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize, |
| 2789 *(get_sb_partitioning(x, subsize))); |
| 2790 *get_sb_index(x, subsize) = 3; |
| 2791 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize, |
| 2792 *(get_sb_partitioning(x, subsize))); |
| 2793 break; |
| 2794 default: |
| 2795 break; |
| 2796 } |
| 2797 } |
| 2798 |
| 2799 static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, |
| 2800 TOKENEXTRA **tp, int mi_row, |
| 2801 int mi_col, BLOCK_SIZE bsize, int *rate, |
| 2802 int64_t *dist, int do_recon, int64_t best_rd) { |
| 2803 VP9_COMMON *const cm = &cpi->common; |
| 2804 MACROBLOCK *const x = &cpi->mb; |
| 2805 MACROBLOCKD *const xd = &x->e_mbd; |
| 2806 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2; |
| 2807 TOKENEXTRA *tp_orig = *tp; |
| 2808 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); |
| 2809 int i; |
| 2810 BLOCK_SIZE subsize; |
| 2811 int this_rate, sum_rate = 0, best_rate = INT_MAX; |
| 2812 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX; |
| 2813 int64_t sum_rd = 0; |
| 2814 int do_split = bsize >= BLOCK_8X8; |
| 2815 int do_rect = 1; |
| 2816 // Override skipping rectangular partition operations for edge blocks |
| 2817 const int force_horz_split = (mi_row + ms >= cm->mi_rows); |
| 2818 const int force_vert_split = (mi_col + ms >= cm->mi_cols); |
| 2819 const int xss = x->e_mbd.plane[1].subsampling_x; |
| 2820 const int yss = x->e_mbd.plane[1].subsampling_y; |
| 2821 |
| 2822 int partition_none_allowed = !force_horz_split && !force_vert_split; |
| 2823 int partition_horz_allowed = !force_vert_split && yss <= xss && |
| 2824 bsize >= BLOCK_8X8; |
| 2825 int partition_vert_allowed = !force_horz_split && xss <= yss && |
| 2826 bsize >= BLOCK_8X8; |
| 2827 (void) *tp_orig; |
| 2828 |
| 2829 if (bsize < BLOCK_8X8) { |
| 2830 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
| 2831 // there is nothing to be done. |
| 2832 if (x->ab_index != 0) { |
| 2833 *rate = 0; |
| 2834 *dist = 0; |
| 2835 return; |
| 2836 } |
| 2837 } |
| 2838 |
| 2839 assert(num_8x8_blocks_wide_lookup[bsize] == |
| 2840 num_8x8_blocks_high_lookup[bsize]); |
| 2841 |
| 2842 x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize); |
| 2843 |
| 2844 // Determine partition types in search according to the speed features. |
| 2845 // The threshold set here has to be of square block size. |
| 2846 if (cpi->sf.auto_min_max_partition_size) { |
| 2847 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size && |
| 2848 bsize >= cpi->sf.min_partition_size); |
| 2849 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size && |
| 2850 bsize > cpi->sf.min_partition_size) || |
| 2851 force_horz_split); |
| 2852 partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size && |
| 2853 bsize > cpi->sf.min_partition_size) || |
| 2854 force_vert_split); |
| 2855 do_split &= bsize > cpi->sf.min_partition_size; |
| 2856 } |
| 2857 if (cpi->sf.use_square_partition_only) { |
| 2858 partition_horz_allowed &= force_horz_split; |
| 2859 partition_vert_allowed &= force_vert_split; |
| 2860 } |
| 2861 |
| 2862 if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed)) |
| 2863 do_split = 0; |
| 2864 |
| 2865 // PARTITION_NONE |
| 2866 if (partition_none_allowed) { |
| 2867 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, |
| 2868 &this_rate, &this_dist, bsize); |
| 2869 ctx->mic.mbmi = xd->mi[0]->mbmi; |
| 2870 |
| 2871 if (this_rate != INT_MAX) { |
| 2872 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2873 this_rate += x->partition_cost[pl][PARTITION_NONE]; |
| 2874 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); |
| 2875 if (sum_rd < best_rd) { |
| 2876 int64_t stop_thresh = 4096; |
| 2877 int64_t stop_thresh_rd; |
| 2878 |
| 2879 best_rate = this_rate; |
| 2880 best_dist = this_dist; |
| 2881 best_rd = sum_rd; |
| 2882 if (bsize >= BLOCK_8X8) |
| 2883 *(get_sb_partitioning(x, bsize)) = bsize; |
| 2884 |
| 2885 // Adjust threshold according to partition size. |
| 2886 stop_thresh >>= 8 - (b_width_log2_lookup[bsize] + |
| 2887 b_height_log2_lookup[bsize]); |
| 2888 |
| 2889 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh); |
| 2890 // If obtained distortion is very small, choose current partition |
| 2891 // and stop splitting. |
| 2892 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) { |
| 2893 do_split = 0; |
| 2894 do_rect = 0; |
| 2895 } |
| 2896 } |
| 2897 } |
| 2898 if (!x->in_active_map) { |
| 2899 do_split = 0; |
| 2900 do_rect = 0; |
| 2901 } |
| 2902 } |
| 2903 |
| 2904 // store estimated motion vector |
| 2905 store_pred_mv(x, ctx); |
| 2906 |
| 2907 // PARTITION_SPLIT |
| 2908 sum_rd = 0; |
| 2909 if (do_split) { |
| 2910 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2911 sum_rate += x->partition_cost[pl][PARTITION_SPLIT]; |
| 2912 subsize = get_subsize(bsize, PARTITION_SPLIT); |
| 2913 for (i = 0; i < 4 && sum_rd < best_rd; ++i) { |
| 2914 const int x_idx = (i & 1) * ms; |
| 2915 const int y_idx = (i >> 1) * ms; |
| 2916 |
| 2917 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) |
| 2918 continue; |
| 2919 |
| 2920 *get_sb_index(x, subsize) = i; |
| 2921 load_pred_mv(x, ctx); |
| 2922 |
| 2923 nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, |
| 2924 subsize, &this_rate, &this_dist, 0, |
| 2925 best_rd - sum_rd); |
| 2926 |
| 2927 if (this_rate == INT_MAX) { |
| 2928 sum_rd = INT64_MAX; |
| 2929 } else { |
| 2930 sum_rate += this_rate; |
| 2931 sum_dist += this_dist; |
| 2932 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
| 2933 } |
| 2934 } |
| 2935 |
| 2936 if (sum_rd < best_rd) { |
| 2937 best_rate = sum_rate; |
| 2938 best_dist = sum_dist; |
| 2939 best_rd = sum_rd; |
| 2940 *(get_sb_partitioning(x, bsize)) = subsize; |
| 2941 } else { |
| 2942 // skip rectangular partition test when larger block size |
| 2943 // gives better rd cost |
| 2944 if (cpi->sf.less_rectangular_check) |
| 2945 do_rect &= !partition_none_allowed; |
| 2946 } |
| 2947 } |
| 2948 |
| 2949 // PARTITION_HORZ |
| 2950 if (partition_horz_allowed && do_rect) { |
| 2951 subsize = get_subsize(bsize, PARTITION_HORZ); |
| 2952 *get_sb_index(x, subsize) = 0; |
| 2953 if (cpi->sf.adaptive_motion_search) |
| 2954 load_pred_mv(x, ctx); |
| 2955 |
| 2956 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, |
| 2957 &this_rate, &this_dist, subsize); |
| 2958 |
| 2959 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 2960 |
| 2961 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
| 2962 |
| 2963 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) { |
| 2964 *get_sb_index(x, subsize) = 1; |
| 2965 |
| 2966 load_pred_mv(x, ctx); |
| 2967 |
| 2968 nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col, |
| 2969 &this_rate, &this_dist, subsize); |
| 2970 |
| 2971 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 2972 |
| 2973 if (this_rate == INT_MAX) { |
| 2974 sum_rd = INT64_MAX; |
| 2975 } else { |
| 2976 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2977 this_rate += x->partition_cost[pl][PARTITION_HORZ]; |
| 2978 sum_rate += this_rate; |
| 2979 sum_dist += this_dist; |
| 2980 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
| 2981 } |
| 2982 } |
| 2983 if (sum_rd < best_rd) { |
| 2984 best_rd = sum_rd; |
| 2985 best_rate = sum_rate; |
| 2986 best_dist = sum_dist; |
| 2987 *(get_sb_partitioning(x, bsize)) = subsize; |
| 2988 } |
| 2989 } |
| 2990 |
| 2991 // PARTITION_VERT |
| 2992 if (partition_vert_allowed && do_rect) { |
| 2993 subsize = get_subsize(bsize, PARTITION_VERT); |
| 2994 |
| 2995 *get_sb_index(x, subsize) = 0; |
| 2996 if (cpi->sf.adaptive_motion_search) |
| 2997 load_pred_mv(x, ctx); |
| 2998 |
| 2999 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, |
| 3000 &this_rate, &this_dist, subsize); |
| 3001 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3002 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
| 3003 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) { |
| 3004 *get_sb_index(x, subsize) = 1; |
| 3005 |
| 3006 load_pred_mv(x, ctx); |
| 3007 |
| 3008 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms, |
| 3009 &this_rate, &this_dist, subsize); |
| 3010 |
| 3011 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3012 |
| 3013 if (this_rate == INT_MAX) { |
| 3014 sum_rd = INT64_MAX; |
| 3015 } else { |
| 3016 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 3017 this_rate += x->partition_cost[pl][PARTITION_VERT]; |
| 3018 sum_rate += this_rate; |
| 3019 sum_dist += this_dist; |
| 3020 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
| 3021 } |
| 3022 } |
| 3023 if (sum_rd < best_rd) { |
| 3024 best_rate = sum_rate; |
| 3025 best_dist = sum_dist; |
| 3026 best_rd = sum_rd; |
| 3027 *(get_sb_partitioning(x, bsize)) = subsize; |
| 3028 } |
| 3029 } |
| 3030 |
| 3031 *rate = best_rate; |
| 3032 *dist = best_dist; |
| 3033 |
| 3034 if (best_rate == INT_MAX) |
| 3035 return; |
| 3036 |
| 3037 // update mode info array |
| 3038 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, |
| 3039 *(get_sb_partitioning(x, bsize))); |
| 3040 |
| 3041 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { |
| 3042 int output_enabled = (bsize == BLOCK_64X64); |
| 3043 |
| 3044 // Check the projected output rate for this SB against it's target |
| 3045 // and and if necessary apply a Q delta using segmentation to get |
| 3046 // closer to the target. |
| 3047 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { |
| 3048 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, |
| 3049 best_rate); |
| 3050 } |
| 3051 |
| 3052 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 3053 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 3054 best_rate, best_dist); |
| 3055 |
| 3056 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); |
| 3057 } |
| 3058 |
| 3059 if (bsize == BLOCK_64X64) { |
| 3060 assert(tp_orig < *tp); |
| 3061 assert(best_rate < INT_MAX); |
| 3062 assert(best_dist < INT64_MAX); |
| 3063 } else { |
| 3064 assert(tp_orig == *tp); |
| 3065 } |
| 3066 } |
| 3067 |
| 3068 static void nonrd_use_partition(VP9_COMP *cpi, |
| 3069 const TileInfo *const tile, |
| 3070 MODE_INFO **mi_8x8, |
| 3071 TOKENEXTRA **tp, |
| 3072 int mi_row, int mi_col, |
| 3073 BLOCK_SIZE bsize, int output_enabled, |
| 3074 int *totrate, int64_t *totdist) { |
| 3075 VP9_COMMON *const cm = &cpi->common; |
| 3076 MACROBLOCK *const x = &cpi->mb; |
| 3077 MACROBLOCKD *const xd = &x->e_mbd; |
| 3078 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; |
| 3079 const int mis = cm->mi_stride; |
| 3080 PARTITION_TYPE partition; |
| 3081 BLOCK_SIZE subsize; |
| 3082 int rate = INT_MAX; |
| 3083 int64_t dist = INT64_MAX; |
| 3084 |
| 3085 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 3086 return; |
| 3087 |
| 3088 subsize = (bsize >= BLOCK_8X8) ? mi_8x8[0]->mbmi.sb_type : BLOCK_4X4; |
| 3089 partition = partition_lookup[bsl][subsize]; |
| 3090 |
| 3091 switch (partition) { |
| 3092 case PARTITION_NONE: |
| 3093 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize); |
| 3094 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3095 break; |
| 3096 case PARTITION_VERT: |
| 3097 *get_sb_index(x, subsize) = 0; |
| 3098 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize); |
| 3099 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3100 if (mi_col + hbs < cm->mi_cols) { |
| 3101 *get_sb_index(x, subsize) = 1; |
| 3102 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs, |
| 3103 &rate, &dist, subsize); |
| 3104 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3105 if (rate != INT_MAX && dist != INT64_MAX && |
| 3106 *totrate != INT_MAX && *totdist != INT64_MAX) { |
| 3107 *totrate += rate; |
| 3108 *totdist += dist; |
| 3109 } |
| 3110 } |
| 3111 break; |
| 3112 case PARTITION_HORZ: |
| 3113 *get_sb_index(x, subsize) = 0; |
| 3114 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize); |
| 3115 get_block_context(x, subsize)->mic.mbmi = xd->mi[0]->mbmi; |
| 3116 if (mi_row + hbs < cm->mi_rows) { |
| 3117 *get_sb_index(x, subsize) = 1; |
| 3118 nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col, |
| 3119 &rate, &dist, subsize); |
| 3120 get_block_context(x, subsize)->mic.mbmi = mi_8x8[0]->mbmi; |
| 3121 if (rate != INT_MAX && dist != INT64_MAX && |
| 3122 *totrate != INT_MAX && *totdist != INT64_MAX) { |
| 3123 *totrate += rate; |
| 3124 *totdist += dist; |
| 3125 } |
| 3126 } |
| 3127 break; |
| 3128 case PARTITION_SPLIT: |
| 3129 subsize = get_subsize(bsize, PARTITION_SPLIT); |
| 3130 *get_sb_index(x, subsize) = 0; |
| 3131 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, |
| 3132 subsize, output_enabled, totrate, totdist); |
| 3133 *get_sb_index(x, subsize) = 1; |
| 3134 nonrd_use_partition(cpi, tile, mi_8x8 + hbs, tp, |
| 3135 mi_row, mi_col + hbs, subsize, output_enabled, |
| 3136 &rate, &dist); |
| 3137 if (rate != INT_MAX && dist != INT64_MAX && |
| 3138 *totrate != INT_MAX && *totdist != INT64_MAX) { |
| 3139 *totrate += rate; |
| 3140 *totdist += dist; |
| 3141 } |
| 3142 *get_sb_index(x, subsize) = 2; |
| 3143 nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis, tp, |
| 3144 mi_row + hbs, mi_col, subsize, output_enabled, |
| 3145 &rate, &dist); |
| 3146 if (rate != INT_MAX && dist != INT64_MAX && |
| 3147 *totrate != INT_MAX && *totdist != INT64_MAX) { |
| 3148 *totrate += rate; |
| 3149 *totdist += dist; |
| 3150 } |
| 3151 *get_sb_index(x, subsize) = 3; |
| 3152 nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis + hbs, tp, |
| 3153 mi_row + hbs, mi_col + hbs, subsize, output_enabled, |
| 3154 &rate, &dist); |
| 3155 if (rate != INT_MAX && dist != INT64_MAX && |
| 3156 *totrate != INT_MAX && *totdist != INT64_MAX) { |
| 3157 *totrate += rate; |
| 3158 *totdist += dist; |
| 3159 } |
| 3160 break; |
| 3161 default: |
| 3162 assert("Invalid partition type."); |
| 3163 } |
| 3164 |
| 3165 if (bsize == BLOCK_64X64 && output_enabled) { |
| 3166 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 3167 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 3168 *totrate, *totdist); |
| 3169 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize); |
2370 } | 3170 } |
2371 } | 3171 } |
2372 | 3172 |
2373 static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, | 3173 static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, |
2374 int mi_row, TOKENEXTRA **tp) { | 3174 int mi_row, TOKENEXTRA **tp) { |
| 3175 VP9_COMMON *cm = &cpi->common; |
| 3176 MACROBLOCKD *xd = &cpi->mb.e_mbd; |
2375 int mi_col; | 3177 int mi_col; |
2376 | 3178 |
2377 // Initialize the left context for the new SB row | 3179 // Initialize the left context for the new SB row |
2378 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); | 3180 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); |
2379 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); | 3181 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); |
2380 | 3182 |
2381 // Code each SB in the row | 3183 // Code each SB in the row |
2382 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 3184 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
2383 mi_col += MI_BLOCK_SIZE) { | 3185 mi_col += MI_BLOCK_SIZE) { |
2384 int dummy_rate; | 3186 int dummy_rate = 0; |
2385 int64_t dummy_dist; | 3187 int64_t dummy_dist = 0; |
| 3188 const int idx_str = cm->mi_stride * mi_row + mi_col; |
| 3189 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; |
| 3190 MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; |
| 3191 BLOCK_SIZE bsize; |
2386 | 3192 |
2387 cpi->mb.source_variance = UINT_MAX; | 3193 cpi->mb.source_variance = UINT_MAX; |
2388 | 3194 vp9_zero(cpi->mb.pred_mv); |
2389 if (cpi->sf.partition_search_type == FIXED_PARTITION) { | 3195 |
2390 nonrd_use_partition(cpi, tile, tp, mi_row, mi_col, | 3196 // Set the partition type of the 64X64 block |
2391 cpi->sf.always_this_block_size, | 3197 switch (cpi->sf.partition_search_type) { |
2392 &dummy_rate, &dummy_dist); | 3198 case VAR_BASED_PARTITION: |
2393 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64); | 3199 choose_partitioning(cpi, tile, mi_row, mi_col); |
2394 } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION || | 3200 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
2395 cpi->sf.partition_search_type == VAR_BASED_PARTITION) { | 3201 1, &dummy_rate, &dummy_dist); |
2396 // TODO(debargha): Implement VAR_BASED_PARTITION as a separate case. | 3202 break; |
2397 // Currently both VAR_BASED_FIXED_PARTITION/VAR_BASED_PARTITION | 3203 case SOURCE_VAR_BASED_PARTITION: |
2398 // map to the same thing. | 3204 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
2399 BLOCK_SIZE bsize = get_nonrd_var_based_fixed_partition(cpi, | 3205 set_source_var_based_partition(cpi, tile, mi_8x8, mi_row, mi_col); |
2400 mi_row, | 3206 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
2401 mi_col); | 3207 1, &dummy_rate, &dummy_dist); |
2402 nonrd_use_partition(cpi, tile, tp, mi_row, mi_col, | 3208 break; |
2403 bsize, &dummy_rate, &dummy_dist); | 3209 case VAR_BASED_FIXED_PARTITION: |
2404 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64); | 3210 case FIXED_PARTITION: |
2405 } else { | 3211 bsize = cpi->sf.partition_search_type == FIXED_PARTITION ? |
2406 assert(0); | 3212 cpi->sf.always_this_block_size : |
| 3213 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col); |
| 3214 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize); |
| 3215 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
| 3216 1, &dummy_rate, &dummy_dist); |
| 3217 break; |
| 3218 case REFERENCE_PARTITION: |
| 3219 if (cpi->sf.partition_check || sb_has_motion(cm, prev_mi_8x8)) { |
| 3220 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, |
| 3221 &dummy_rate, &dummy_dist, 1, INT64_MAX); |
| 3222 } else { |
| 3223 copy_partitioning(cm, mi_8x8, prev_mi_8x8); |
| 3224 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, |
| 3225 BLOCK_64X64, 1, &dummy_rate, &dummy_dist); |
| 3226 } |
| 3227 break; |
| 3228 default: |
| 3229 assert(0); |
2407 } | 3230 } |
2408 } | 3231 } |
2409 } | 3232 } |
2410 // end RTC play code | 3233 // end RTC play code |
2411 | 3234 |
2412 static void encode_frame_internal(VP9_COMP *cpi) { | 3235 static void encode_frame_internal(VP9_COMP *cpi) { |
2413 int mi_row; | 3236 SPEED_FEATURES *const sf = &cpi->sf; |
2414 MACROBLOCK *const x = &cpi->mb; | 3237 MACROBLOCK *const x = &cpi->mb; |
2415 VP9_COMMON *const cm = &cpi->common; | 3238 VP9_COMMON *const cm = &cpi->common; |
2416 MACROBLOCKD *const xd = &x->e_mbd; | 3239 MACROBLOCKD *const xd = &x->e_mbd; |
2417 | 3240 |
2418 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n", | 3241 xd->mi = cm->mi_grid_visible; |
2419 // cpi->common.current_video_frame, cpi->common.show_frame, | 3242 xd->mi[0] = cm->mi; |
2420 // cm->frame_type); | 3243 |
2421 | 3244 vp9_zero(cm->counts); |
2422 vp9_zero(cm->counts.switchable_interp); | 3245 vp9_zero(cpi->coef_counts); |
2423 vp9_zero(cpi->tx_stepdown_count); | 3246 vp9_zero(cpi->tx_stepdown_count); |
2424 | |
2425 xd->mi_8x8 = cm->mi_grid_visible; | |
2426 // required for vp9_frame_init_quantizer | |
2427 xd->mi_8x8[0] = cm->mi; | |
2428 | |
2429 xd->last_mi = cm->prev_mi; | |
2430 | |
2431 vp9_zero(cm->counts.mv); | |
2432 vp9_zero(cpi->coef_counts); | |
2433 vp9_zero(cm->counts.eob_branch); | |
2434 | |
2435 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 | |
2436 && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; | |
2437 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); | |
2438 | |
2439 vp9_frame_init_quantizer(cpi); | |
2440 | |
2441 vp9_initialize_rd_consts(cpi); | |
2442 vp9_initialize_me_consts(cpi, cm->base_qindex); | |
2443 switch_tx_mode(cpi); | |
2444 | |
2445 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | |
2446 // Initialize encode frame context. | |
2447 init_encode_frame_mb_context(cpi); | |
2448 | |
2449 // Build a frame level activity map | |
2450 build_activity_map(cpi); | |
2451 } | |
2452 | |
2453 // Re-initialize encode frame context. | |
2454 init_encode_frame_mb_context(cpi); | |
2455 | |
2456 vp9_zero(cpi->rd_comp_pred_diff); | 3247 vp9_zero(cpi->rd_comp_pred_diff); |
2457 vp9_zero(cpi->rd_filter_diff); | 3248 vp9_zero(cpi->rd_filter_diff); |
2458 vp9_zero(cpi->rd_tx_select_diff); | 3249 vp9_zero(cpi->rd_tx_select_diff); |
2459 vp9_zero(cpi->rd_tx_select_threshes); | 3250 vp9_zero(cpi->rd_tx_select_threshes); |
2460 | 3251 |
| 3252 cm->tx_mode = select_tx_mode(cpi); |
| 3253 |
| 3254 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && |
| 3255 cm->y_dc_delta_q == 0 && |
| 3256 cm->uv_dc_delta_q == 0 && |
| 3257 cm->uv_ac_delta_q == 0; |
| 3258 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); |
| 3259 |
| 3260 vp9_frame_init_quantizer(cpi); |
| 3261 |
| 3262 vp9_initialize_rd_consts(cpi); |
| 3263 vp9_initialize_me_consts(cpi, cm->base_qindex); |
| 3264 init_encode_frame_mb_context(cpi); |
| 3265 |
| 3266 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) |
| 3267 build_activity_map(cpi); |
| 3268 |
2461 set_prev_mi(cm); | 3269 set_prev_mi(cm); |
2462 | 3270 |
2463 if (cpi->sf.use_nonrd_pick_mode) { | 3271 if (sf->use_nonrd_pick_mode) { |
2464 // Initialize internal buffer pointers for rtc coding, where non-RD | 3272 // Initialize internal buffer pointers for rtc coding, where non-RD |
2465 // mode decision is used and hence no buffer pointer swap needed. | 3273 // mode decision is used and hence no buffer pointer swap needed. |
2466 int i; | 3274 int i; |
2467 struct macroblock_plane *const p = x->plane; | 3275 struct macroblock_plane *const p = x->plane; |
2468 struct macroblockd_plane *const pd = xd->plane; | 3276 struct macroblockd_plane *const pd = xd->plane; |
2469 PICK_MODE_CONTEXT *ctx = &cpi->mb.sb64_context; | 3277 PICK_MODE_CONTEXT *ctx = &cpi->mb.sb64_context; |
2470 | 3278 |
2471 for (i = 0; i < MAX_MB_PLANE; ++i) { | 3279 for (i = 0; i < MAX_MB_PLANE; ++i) { |
2472 p[i].coeff = ctx->coeff_pbuf[i][0]; | 3280 p[i].coeff = ctx->coeff_pbuf[i][0]; |
2473 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; | 3281 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; |
2474 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; | 3282 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; |
2475 p[i].eobs = ctx->eobs_pbuf[i][0]; | 3283 p[i].eobs = ctx->eobs_pbuf[i][0]; |
2476 } | 3284 } |
| 3285 vp9_zero(x->zcoeff_blk); |
| 3286 |
| 3287 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION && |
| 3288 cm->current_video_frame > 0) { |
| 3289 int check_freq = sf->search_type_check_frequency; |
| 3290 |
| 3291 if ((cm->current_video_frame - 1) % check_freq == 0) { |
| 3292 cpi->use_large_partition_rate = 0; |
| 3293 } |
| 3294 |
| 3295 if ((cm->current_video_frame - 1) % check_freq == 1) { |
| 3296 const int mbs_in_b32x32 = 1 << ((b_width_log2_lookup[BLOCK_32X32] - |
| 3297 b_width_log2_lookup[BLOCK_16X16]) + |
| 3298 (b_height_log2_lookup[BLOCK_32X32] - |
| 3299 b_height_log2_lookup[BLOCK_16X16])); |
| 3300 cpi->use_large_partition_rate = cpi->use_large_partition_rate * 100 * |
| 3301 mbs_in_b32x32 / cm->MBs; |
| 3302 } |
| 3303 |
| 3304 if ((cm->current_video_frame - 1) % check_freq >= 1) { |
| 3305 if (cpi->use_large_partition_rate < 15) |
| 3306 sf->partition_search_type = FIXED_PARTITION; |
| 3307 } |
| 3308 } |
2477 } | 3309 } |
2478 | 3310 |
2479 { | 3311 { |
2480 struct vpx_usec_timer emr_timer; | 3312 struct vpx_usec_timer emr_timer; |
2481 vpx_usec_timer_start(&emr_timer); | 3313 vpx_usec_timer_start(&emr_timer); |
2482 | 3314 |
2483 { | 3315 { |
2484 // Take tiles into account and give start/end MB | 3316 // Take tiles into account and give start/end MB |
2485 int tile_col, tile_row; | 3317 int tile_col, tile_row; |
2486 TOKENEXTRA *tp = cpi->tok; | 3318 TOKENEXTRA *tp = cpi->tok; |
2487 const int tile_cols = 1 << cm->log2_tile_cols; | 3319 const int tile_cols = 1 << cm->log2_tile_cols; |
2488 const int tile_rows = 1 << cm->log2_tile_rows; | 3320 const int tile_rows = 1 << cm->log2_tile_rows; |
2489 | 3321 |
2490 for (tile_row = 0; tile_row < tile_rows; tile_row++) { | 3322 for (tile_row = 0; tile_row < tile_rows; tile_row++) { |
2491 for (tile_col = 0; tile_col < tile_cols; tile_col++) { | 3323 for (tile_col = 0; tile_col < tile_cols; tile_col++) { |
2492 TileInfo tile; | 3324 TileInfo tile; |
2493 TOKENEXTRA *tp_old = tp; | 3325 TOKENEXTRA *tp_old = tp; |
| 3326 int mi_row; |
2494 | 3327 |
2495 // For each row of SBs in the frame | 3328 // For each row of SBs in the frame |
2496 vp9_tile_init(&tile, cm, tile_row, tile_col); | 3329 vp9_tile_init(&tile, cm, tile_row, tile_col); |
2497 for (mi_row = tile.mi_row_start; | 3330 for (mi_row = tile.mi_row_start; |
2498 mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) { | 3331 mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) { |
2499 if (cpi->sf.use_nonrd_pick_mode) | 3332 if (sf->use_nonrd_pick_mode && cm->frame_type != KEY_FRAME) |
2500 encode_nonrd_sb_row(cpi, &tile, mi_row, &tp); | 3333 encode_nonrd_sb_row(cpi, &tile, mi_row, &tp); |
2501 else | 3334 else |
2502 encode_rd_sb_row(cpi, &tile, mi_row, &tp); | 3335 encode_rd_sb_row(cpi, &tile, mi_row, &tp); |
2503 } | 3336 } |
2504 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); | 3337 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); |
2505 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); | 3338 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); |
2506 } | 3339 } |
2507 } | 3340 } |
2508 } | 3341 } |
2509 | 3342 |
2510 vpx_usec_timer_mark(&emr_timer); | 3343 vpx_usec_timer_mark(&emr_timer); |
2511 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); | 3344 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); |
2512 } | 3345 } |
2513 | 3346 |
2514 if (cpi->sf.skip_encode_sb) { | 3347 if (sf->skip_encode_sb) { |
2515 int j; | 3348 int j; |
2516 unsigned int intra_count = 0, inter_count = 0; | 3349 unsigned int intra_count = 0, inter_count = 0; |
2517 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { | 3350 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { |
2518 intra_count += cm->counts.intra_inter[j][0]; | 3351 intra_count += cm->counts.intra_inter[j][0]; |
2519 inter_count += cm->counts.intra_inter[j][1]; | 3352 inter_count += cm->counts.intra_inter[j][1]; |
2520 } | 3353 } |
2521 cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count); | 3354 sf->skip_encode_frame = (intra_count << 2) < inter_count && |
2522 cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME); | 3355 cm->frame_type != KEY_FRAME && |
2523 cpi->sf.skip_encode_frame &= cm->show_frame; | 3356 cm->show_frame; |
2524 } else { | 3357 } else { |
2525 cpi->sf.skip_encode_frame = 0; | 3358 sf->skip_encode_frame = 0; |
2526 } | 3359 } |
2527 | 3360 |
2528 #if 0 | 3361 #if 0 |
2529 // Keep record of the total distortion this time around for future use | 3362 // Keep record of the total distortion this time around for future use |
2530 cpi->last_frame_distortion = cpi->frame_distortion; | 3363 cpi->last_frame_distortion = cpi->frame_distortion; |
2531 #endif | 3364 #endif |
2532 } | 3365 } |
2533 | 3366 |
2534 void vp9_encode_frame(VP9_COMP *cpi) { | 3367 void vp9_encode_frame(VP9_COMP *cpi) { |
2535 VP9_COMMON *const cm = &cpi->common; | 3368 VP9_COMMON *const cm = &cpi->common; |
(...skipping 13 matching lines...) Expand all Loading... |
2549 } else { | 3382 } else { |
2550 cm->allow_comp_inter_inter = 1; | 3383 cm->allow_comp_inter_inter = 1; |
2551 cm->comp_fixed_ref = ALTREF_FRAME; | 3384 cm->comp_fixed_ref = ALTREF_FRAME; |
2552 cm->comp_var_ref[0] = LAST_FRAME; | 3385 cm->comp_var_ref[0] = LAST_FRAME; |
2553 cm->comp_var_ref[1] = GOLDEN_FRAME; | 3386 cm->comp_var_ref[1] = GOLDEN_FRAME; |
2554 } | 3387 } |
2555 } | 3388 } |
2556 | 3389 |
2557 if (cpi->sf.frame_parameter_update) { | 3390 if (cpi->sf.frame_parameter_update) { |
2558 int i; | 3391 int i; |
2559 REFERENCE_MODE reference_mode; | 3392 |
2560 /* | 3393 // This code does a single RD pass over the whole frame assuming |
2561 * This code does a single RD pass over the whole frame assuming | 3394 // either compound, single or hybrid prediction as per whatever has |
2562 * either compound, single or hybrid prediction as per whatever has | 3395 // worked best for that type of frame in the past. |
2563 * worked best for that type of frame in the past. | 3396 // It also predicts whether another coding mode would have worked |
2564 * It also predicts whether another coding mode would have worked | 3397 // better that this coding mode. If that is the case, it remembers |
2565 * better that this coding mode. If that is the case, it remembers | 3398 // that for subsequent frames. |
2566 * that for subsequent frames. | 3399 // It does the same analysis for transform size selection also. |
2567 * It does the same analysis for transform size selection also. | |
2568 */ | |
2569 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); | 3400 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); |
2570 const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type]; | 3401 const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type]; |
2571 const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type]; | 3402 const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type]; |
2572 | 3403 |
2573 /* prediction (compound, single or hybrid) mode selection */ | 3404 /* prediction (compound, single or hybrid) mode selection */ |
2574 if (frame_type == 3 || !cm->allow_comp_inter_inter) | 3405 if (frame_type == ALTREF_FRAME || !cm->allow_comp_inter_inter) |
2575 reference_mode = SINGLE_REFERENCE; | 3406 cm->reference_mode = SINGLE_REFERENCE; |
2576 else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] && | 3407 else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] && |
2577 mode_thresh[COMPOUND_REFERENCE] > | 3408 mode_thresh[COMPOUND_REFERENCE] > |
2578 mode_thresh[REFERENCE_MODE_SELECT] && | 3409 mode_thresh[REFERENCE_MODE_SELECT] && |
2579 check_dual_ref_flags(cpi) && | 3410 check_dual_ref_flags(cpi) && |
2580 cpi->static_mb_pct == 100) | 3411 cpi->static_mb_pct == 100) |
2581 reference_mode = COMPOUND_REFERENCE; | 3412 cm->reference_mode = COMPOUND_REFERENCE; |
2582 else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT]) | 3413 else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT]) |
2583 reference_mode = SINGLE_REFERENCE; | 3414 cm->reference_mode = SINGLE_REFERENCE; |
2584 else | 3415 else |
2585 reference_mode = REFERENCE_MODE_SELECT; | 3416 cm->reference_mode = REFERENCE_MODE_SELECT; |
2586 | 3417 |
2587 if (cm->interp_filter == SWITCHABLE) { | 3418 if (cm->interp_filter == SWITCHABLE) { |
2588 if (frame_type != ALTREF_FRAME && | 3419 if (frame_type != ALTREF_FRAME && |
2589 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] && | 3420 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] && |
2590 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] && | 3421 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] && |
2591 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) { | 3422 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) { |
2592 cm->interp_filter = EIGHTTAP_SMOOTH; | 3423 cm->interp_filter = EIGHTTAP_SMOOTH; |
2593 } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] && | 3424 } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] && |
2594 filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) { | 3425 filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) { |
2595 cm->interp_filter = EIGHTTAP_SHARP; | 3426 cm->interp_filter = EIGHTTAP_SHARP; |
2596 } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) { | 3427 } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) { |
2597 cm->interp_filter = EIGHTTAP; | 3428 cm->interp_filter = EIGHTTAP; |
2598 } | 3429 } |
2599 } | 3430 } |
2600 | 3431 |
2601 cpi->mb.e_mbd.lossless = cpi->oxcf.lossless; | |
2602 | |
2603 /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */ | |
2604 select_tx_mode(cpi); | |
2605 cm->reference_mode = reference_mode; | |
2606 | |
2607 encode_frame_internal(cpi); | 3432 encode_frame_internal(cpi); |
2608 | 3433 |
2609 for (i = 0; i < REFERENCE_MODES; ++i) { | 3434 for (i = 0; i < REFERENCE_MODES; ++i) { |
2610 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs); | 3435 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs); |
2611 cpi->rd_prediction_type_threshes[frame_type][i] += diff; | 3436 cpi->rd_prediction_type_threshes[frame_type][i] += diff; |
2612 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1; | 3437 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1; |
2613 } | 3438 } |
2614 | 3439 |
2615 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { | 3440 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { |
2616 const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs; | 3441 const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2675 cm->tx_mode = ONLY_4X4; | 3500 cm->tx_mode = ONLY_4X4; |
2676 reset_skip_txfm_size(cm, TX_4X4); | 3501 reset_skip_txfm_size(cm, TX_4X4); |
2677 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { | 3502 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { |
2678 cm->tx_mode = ALLOW_32X32; | 3503 cm->tx_mode = ALLOW_32X32; |
2679 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { | 3504 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { |
2680 cm->tx_mode = ALLOW_16X16; | 3505 cm->tx_mode = ALLOW_16X16; |
2681 reset_skip_txfm_size(cm, TX_16X16); | 3506 reset_skip_txfm_size(cm, TX_16X16); |
2682 } | 3507 } |
2683 } | 3508 } |
2684 } else { | 3509 } else { |
2685 // Force the usage of the BILINEAR interp_filter. | 3510 cm->reference_mode = SINGLE_REFERENCE; |
2686 cm->interp_filter = BILINEAR; | 3511 cm->interp_filter = SWITCHABLE; |
2687 encode_frame_internal(cpi); | 3512 encode_frame_internal(cpi); |
2688 } | 3513 } |
2689 } | 3514 } |
2690 | 3515 |
2691 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { | 3516 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { |
2692 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode; | 3517 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode; |
2693 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; | 3518 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; |
2694 const BLOCK_SIZE bsize = mi->mbmi.sb_type; | 3519 const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
2695 | 3520 |
2696 ++counts->uv_mode[y_mode][uv_mode]; | |
2697 | |
2698 if (bsize < BLOCK_8X8) { | 3521 if (bsize < BLOCK_8X8) { |
2699 int idx, idy; | 3522 int idx, idy; |
2700 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 3523 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; |
2701 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 3524 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; |
2702 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) | 3525 for (idy = 0; idy < 2; idy += num_4x4_h) |
2703 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) | 3526 for (idx = 0; idx < 2; idx += num_4x4_w) |
2704 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode]; | 3527 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode]; |
2705 } else { | 3528 } else { |
2706 ++counts->y_mode[size_group_lookup[bsize]][y_mode]; | 3529 ++counts->y_mode[size_group_lookup[bsize]][y_mode]; |
2707 } | 3530 } |
| 3531 |
| 3532 ++counts->uv_mode[y_mode][uv_mode]; |
2708 } | 3533 } |
2709 | 3534 |
2710 // Experimental stub function to create a per MB zbin adjustment based on | 3535 // Experimental stub function to create a per MB zbin adjustment based on |
2711 // some previously calculated measure of MB activity. | 3536 // some previously calculated measure of MB activity. |
2712 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) { | 3537 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) { |
2713 #if USE_ACT_INDEX | 3538 #if USE_ACT_INDEX |
2714 x->act_zbin_adj = *(x->mb_activity_ptr); | 3539 x->act_zbin_adj = *(x->mb_activity_ptr); |
2715 #else | 3540 #else |
2716 int64_t a; | |
2717 int64_t b; | |
2718 int64_t act = *(x->mb_activity_ptr); | |
2719 | |
2720 // Apply the masking to the RD multiplier. | 3541 // Apply the masking to the RD multiplier. |
2721 a = act + 4 * cpi->activity_avg; | 3542 const int64_t act = *(x->mb_activity_ptr); |
2722 b = 4 * act + cpi->activity_avg; | 3543 const int64_t a = act + 4 * cpi->activity_avg; |
| 3544 const int64_t b = 4 * act + cpi->activity_avg; |
2723 | 3545 |
2724 if (act > cpi->activity_avg) | 3546 if (act > cpi->activity_avg) |
2725 x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1; | 3547 x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1; |
2726 else | 3548 else |
2727 x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b); | 3549 x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b); |
2728 #endif | 3550 #endif |
2729 } | 3551 } |
2730 | 3552 |
2731 static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) { | 3553 static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) { |
2732 if (enabled) { | 3554 if (enabled) { |
(...skipping 11 matching lines...) Expand all Loading... |
2744 } else { | 3566 } else { |
2745 return 0; | 3567 return 0; |
2746 } | 3568 } |
2747 } | 3569 } |
2748 | 3570 |
2749 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, | 3571 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, |
2750 int mi_row, int mi_col, BLOCK_SIZE bsize) { | 3572 int mi_row, int mi_col, BLOCK_SIZE bsize) { |
2751 VP9_COMMON *const cm = &cpi->common; | 3573 VP9_COMMON *const cm = &cpi->common; |
2752 MACROBLOCK *const x = &cpi->mb; | 3574 MACROBLOCK *const x = &cpi->mb; |
2753 MACROBLOCKD *const xd = &x->e_mbd; | 3575 MACROBLOCKD *const xd = &x->e_mbd; |
2754 MODE_INFO **mi_8x8 = xd->mi_8x8; | 3576 MODE_INFO **mi_8x8 = xd->mi; |
2755 MODE_INFO *mi = mi_8x8[0]; | 3577 MODE_INFO *mi = mi_8x8[0]; |
2756 MB_MODE_INFO *mbmi = &mi->mbmi; | 3578 MB_MODE_INFO *mbmi = &mi->mbmi; |
2757 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); | 3579 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); |
2758 unsigned int segment_id = mbmi->segment_id; | 3580 unsigned int segment_id = mbmi->segment_id; |
2759 const int mis = cm->mode_info_stride; | 3581 const int mis = cm->mi_stride; |
2760 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; | 3582 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
2761 const int mi_height = num_8x8_blocks_high_lookup[bsize]; | 3583 const int mi_height = num_8x8_blocks_high_lookup[bsize]; |
2762 | 3584 |
2763 x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 && | 3585 x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 && |
2764 (cpi->oxcf.aq_mode != COMPLEXITY_AQ) && | 3586 cpi->oxcf.aq_mode != COMPLEXITY_AQ && |
2765 !cpi->sf.use_nonrd_pick_mode; | 3587 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ && |
| 3588 cpi->sf.allow_skip_recode; |
| 3589 |
2766 x->skip_optimize = ctx->is_coded; | 3590 x->skip_optimize = ctx->is_coded; |
2767 ctx->is_coded = 1; | 3591 ctx->is_coded = 1; |
2768 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; | 3592 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; |
2769 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame && | 3593 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame && |
2770 x->q_index < QIDX_SKIP_THRESH); | 3594 x->q_index < QIDX_SKIP_THRESH); |
| 3595 |
2771 if (x->skip_encode) | 3596 if (x->skip_encode) |
2772 return; | 3597 return; |
2773 | 3598 |
2774 if (cm->frame_type == KEY_FRAME) { | 3599 if (cm->frame_type == KEY_FRAME) { |
2775 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | 3600 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
2776 adjust_act_zbin(cpi, x); | 3601 adjust_act_zbin(cpi, x); |
2777 vp9_update_zbin_extra(cpi, x); | 3602 vp9_update_zbin_extra(cpi, x); |
2778 } | 3603 } |
2779 } else { | 3604 } else { |
2780 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); | 3605 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); |
2781 xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); | |
2782 | 3606 |
2783 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | 3607 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
2784 // Adjust the zbin based on this MB rate. | 3608 // Adjust the zbin based on this MB rate. |
2785 adjust_act_zbin(cpi, x); | 3609 adjust_act_zbin(cpi, x); |
2786 } | 3610 } |
2787 | 3611 |
2788 // Experimental code. Special case for gf and arf zeromv modes. | 3612 // Experimental code. Special case for gf and arf zeromv modes. |
2789 // Increase zbin size to suppress noise | 3613 // Increase zbin size to suppress noise |
2790 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi, | 3614 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi, |
2791 cpi->zbin_mode_boost_enabled); | 3615 cpi->zbin_mode_boost_enabled); |
2792 vp9_update_zbin_extra(cpi, x); | 3616 vp9_update_zbin_extra(cpi, x); |
2793 } | 3617 } |
2794 | 3618 |
2795 if (!is_inter_block(mbmi)) { | 3619 if (!is_inter_block(mbmi)) { |
2796 int plane; | 3620 int plane; |
2797 mbmi->skip = 1; | 3621 mbmi->skip = 1; |
2798 for (plane = 0; plane < MAX_MB_PLANE; ++plane) | 3622 for (plane = 0; plane < MAX_MB_PLANE; ++plane) |
2799 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); | 3623 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); |
2800 if (output_enabled) | 3624 if (output_enabled) |
2801 sum_intra_stats(&cm->counts, mi); | 3625 sum_intra_stats(&cm->counts, mi); |
2802 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); | 3626 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); |
2803 } else { | 3627 } else { |
2804 int ref; | 3628 int ref; |
2805 const int is_compound = has_second_ref(mbmi); | 3629 const int is_compound = has_second_ref(mbmi); |
2806 for (ref = 0; ref < 1 + is_compound; ++ref) { | 3630 for (ref = 0; ref < 1 + is_compound; ++ref) { |
2807 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, | 3631 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, |
2808 mbmi->ref_frame[ref]); | 3632 mbmi->ref_frame[ref]); |
2809 setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf); | 3633 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col, |
| 3634 &xd->block_refs[ref]->sf); |
2810 } | 3635 } |
2811 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); | 3636 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); |
2812 | 3637 |
2813 if (!x->skip) { | 3638 if (!x->skip) { |
2814 mbmi->skip = 1; | 3639 mbmi->skip = 1; |
2815 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); | 3640 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); |
2816 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); | 3641 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); |
2817 } else { | 3642 } else { |
2818 mbmi->skip = 1; | 3643 mbmi->skip = 1; |
2819 if (output_enabled) | 3644 if (output_enabled) |
(...skipping 21 matching lines...) Expand all Loading... |
2841 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; | 3666 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; |
2842 } | 3667 } |
2843 | 3668 |
2844 for (y = 0; y < mi_height; y++) | 3669 for (y = 0; y < mi_height; y++) |
2845 for (x = 0; x < mi_width; x++) | 3670 for (x = 0; x < mi_width; x++) |
2846 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 3671 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
2847 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; | 3672 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; |
2848 } | 3673 } |
2849 } | 3674 } |
2850 } | 3675 } |
OLD | NEW |