OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <assert.h> | 11 #include <assert.h> |
12 #include <stdlib.h> // qsort() | 12 #include <stdlib.h> // qsort() |
13 | 13 |
14 #include "./vp9_rtcd.h" | 14 #include "./vp9_rtcd.h" |
15 #include "./vpx_scale_rtcd.h" | 15 #include "./vpx_scale_rtcd.h" |
16 | 16 |
17 #include "vpx_mem/vpx_mem.h" | 17 #include "vpx_mem/vpx_mem.h" |
18 #include "vpx_ports/mem_ops.h" | 18 #include "vpx_ports/mem_ops.h" |
19 #include "vpx_scale/vpx_scale.h" | 19 #include "vpx_scale/vpx_scale.h" |
20 | 20 |
21 #include "vp9/common/vp9_alloccommon.h" | 21 #include "vp9/common/vp9_alloccommon.h" |
22 #include "vp9/common/vp9_common.h" | 22 #include "vp9/common/vp9_common.h" |
23 #include "vp9/common/vp9_entropy.h" | 23 #include "vp9/common/vp9_entropy.h" |
24 #include "vp9/common/vp9_entropymode.h" | 24 #include "vp9/common/vp9_entropymode.h" |
25 #include "vp9/common/vp9_idct.h" | 25 #include "vp9/common/vp9_idct.h" |
26 #include "vp9/common/vp9_loopfilter_thread.h" | |
27 #include "vp9/common/vp9_pred_common.h" | 26 #include "vp9/common/vp9_pred_common.h" |
28 #include "vp9/common/vp9_quant_common.h" | 27 #include "vp9/common/vp9_quant_common.h" |
29 #include "vp9/common/vp9_reconintra.h" | 28 #include "vp9/common/vp9_reconintra.h" |
30 #include "vp9/common/vp9_reconinter.h" | 29 #include "vp9/common/vp9_reconinter.h" |
31 #include "vp9/common/vp9_seg_common.h" | 30 #include "vp9/common/vp9_seg_common.h" |
32 #include "vp9/common/vp9_thread.h" | 31 #include "vp9/common/vp9_thread.h" |
33 #include "vp9/common/vp9_tile_common.h" | 32 #include "vp9/common/vp9_tile_common.h" |
34 | 33 |
35 #include "vp9/decoder/vp9_decodeframe.h" | 34 #include "vp9/decoder/vp9_decodeframe.h" |
36 #include "vp9/decoder/vp9_detokenize.h" | 35 #include "vp9/decoder/vp9_detokenize.h" |
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
292 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); | 291 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); |
293 else | 292 else |
294 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); | 293 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); |
295 } | 294 } |
296 } | 295 } |
297 } | 296 } |
298 | 297 |
299 struct intra_args { | 298 struct intra_args { |
300 VP9_COMMON *cm; | 299 VP9_COMMON *cm; |
301 MACROBLOCKD *xd; | 300 MACROBLOCKD *xd; |
302 FRAME_COUNTS *counts; | |
303 vp9_reader *r; | 301 vp9_reader *r; |
304 }; | 302 }; |
305 | 303 |
306 static void predict_and_reconstruct_intra_block(int plane, int block, | 304 static void predict_and_reconstruct_intra_block(int plane, int block, |
307 BLOCK_SIZE plane_bsize, | 305 BLOCK_SIZE plane_bsize, |
308 TX_SIZE tx_size, void *arg) { | 306 TX_SIZE tx_size, void *arg) { |
309 struct intra_args *const args = (struct intra_args *)arg; | 307 struct intra_args *const args = (struct intra_args *)arg; |
310 VP9_COMMON *const cm = args->cm; | 308 VP9_COMMON *const cm = args->cm; |
311 MACROBLOCKD *const xd = args->xd; | 309 MACROBLOCKD *const xd = args->xd; |
312 struct macroblockd_plane *const pd = &xd->plane[plane]; | 310 struct macroblockd_plane *const pd = &xd->plane[plane]; |
313 MODE_INFO *const mi = xd->mi[0].src_mi; | 311 MODE_INFO *const mi = xd->mi[0].src_mi; |
314 const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block) | 312 const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block) |
315 : mi->mbmi.uv_mode; | 313 : mi->mbmi.uv_mode; |
316 int x, y; | 314 int x, y; |
317 uint8_t *dst; | 315 uint8_t *dst; |
318 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); | 316 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); |
319 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; | 317 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; |
320 | 318 |
321 vp9_predict_intra_block(xd, block >> (tx_size << 1), | 319 vp9_predict_intra_block(xd, block >> (tx_size << 1), |
322 b_width_log2_lookup[plane_bsize], tx_size, mode, | 320 b_width_log2_lookup[plane_bsize], tx_size, mode, |
323 dst, pd->dst.stride, dst, pd->dst.stride, | 321 dst, pd->dst.stride, dst, pd->dst.stride, |
324 x, y, plane); | 322 x, y, plane); |
325 | 323 |
326 if (!mi->mbmi.skip) { | 324 if (!mi->mbmi.skip) { |
327 const int eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block, | 325 const int eob = vp9_decode_block_tokens(cm, xd, plane, block, |
328 plane_bsize, x, y, tx_size, | 326 plane_bsize, x, y, tx_size, |
329 args->r); | 327 args->r); |
330 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride, | 328 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride, |
331 eob); | 329 eob); |
332 } | 330 } |
333 } | 331 } |
334 | 332 |
335 struct inter_args { | 333 struct inter_args { |
336 VP9_COMMON *cm; | 334 VP9_COMMON *cm; |
337 MACROBLOCKD *xd; | 335 MACROBLOCKD *xd; |
338 vp9_reader *r; | 336 vp9_reader *r; |
339 FRAME_COUNTS *counts; | |
340 int *eobtotal; | 337 int *eobtotal; |
341 }; | 338 }; |
342 | 339 |
343 static void reconstruct_inter_block(int plane, int block, | 340 static void reconstruct_inter_block(int plane, int block, |
344 BLOCK_SIZE plane_bsize, | 341 BLOCK_SIZE plane_bsize, |
345 TX_SIZE tx_size, void *arg) { | 342 TX_SIZE tx_size, void *arg) { |
346 struct inter_args *args = (struct inter_args *)arg; | 343 struct inter_args *args = (struct inter_args *)arg; |
347 VP9_COMMON *const cm = args->cm; | 344 VP9_COMMON *const cm = args->cm; |
348 MACROBLOCKD *const xd = args->xd; | 345 MACROBLOCKD *const xd = args->xd; |
349 struct macroblockd_plane *const pd = &xd->plane[plane]; | 346 struct macroblockd_plane *const pd = &xd->plane[plane]; |
350 int x, y, eob; | 347 int x, y, eob; |
351 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); | 348 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); |
352 eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block, plane_bsize, | 349 eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, |
353 x, y, tx_size, args->r); | 350 tx_size, args->r); |
354 inverse_transform_block(xd, plane, block, tx_size, | 351 inverse_transform_block(xd, plane, block, tx_size, |
355 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x], | 352 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x], |
356 pd->dst.stride, eob); | 353 pd->dst.stride, eob); |
357 *args->eobtotal += eob; | 354 *args->eobtotal += eob; |
358 } | 355 } |
359 | 356 |
360 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, | 357 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
361 const TileInfo *const tile, | 358 const TileInfo *const tile, |
362 BLOCK_SIZE bsize, int mi_row, int mi_col) { | 359 BLOCK_SIZE bsize, int mi_row, int mi_col) { |
363 const int bw = num_8x8_blocks_wide_lookup[bsize]; | 360 const int bw = num_8x8_blocks_wide_lookup[bsize]; |
(...skipping 15 matching lines...) Expand all Loading... |
379 set_skip_context(xd, mi_row, mi_col); | 376 set_skip_context(xd, mi_row, mi_col); |
380 | 377 |
381 // Distance of Mb to the various image edges. These are specified to 8th pel | 378 // Distance of Mb to the various image edges. These are specified to 8th pel |
382 // as they are always compared to values that are in 1/8th pel units | 379 // as they are always compared to values that are in 1/8th pel units |
383 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); | 380 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); |
384 | 381 |
385 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col); | 382 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col); |
386 return &xd->mi[0].mbmi; | 383 return &xd->mi[0].mbmi; |
387 } | 384 } |
388 | 385 |
389 static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, | 386 static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
390 FRAME_COUNTS *counts, | |
391 const TileInfo *const tile, | 387 const TileInfo *const tile, |
392 int mi_row, int mi_col, | 388 int mi_row, int mi_col, |
393 vp9_reader *r, BLOCK_SIZE bsize) { | 389 vp9_reader *r, BLOCK_SIZE bsize) { |
394 VP9_COMMON *const cm = &pbi->common; | |
395 const int less8x8 = bsize < BLOCK_8X8; | 390 const int less8x8 = bsize < BLOCK_8X8; |
396 MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col); | 391 MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col); |
397 vp9_read_mode_info(pbi, xd, counts, tile, mi_row, mi_col, r); | 392 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r); |
398 | 393 |
399 if (less8x8) | 394 if (less8x8) |
400 bsize = BLOCK_8X8; | 395 bsize = BLOCK_8X8; |
401 | 396 |
402 if (mbmi->skip) { | 397 if (mbmi->skip) { |
403 reset_skip_context(xd, bsize); | 398 reset_skip_context(xd, bsize); |
404 } else { | 399 } else { |
405 if (cm->seg.enabled) | 400 if (cm->seg.enabled) |
406 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id, | 401 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id, |
407 cm->base_qindex)); | 402 cm->base_qindex)); |
408 } | 403 } |
409 | 404 |
410 if (!is_inter_block(mbmi)) { | 405 if (!is_inter_block(mbmi)) { |
411 struct intra_args arg = { cm, xd, counts, r }; | 406 struct intra_args arg = { cm, xd, r }; |
412 vp9_foreach_transformed_block(xd, bsize, | 407 vp9_foreach_transformed_block(xd, bsize, |
413 predict_and_reconstruct_intra_block, &arg); | 408 predict_and_reconstruct_intra_block, &arg); |
414 } else { | 409 } else { |
415 // Prediction | 410 // Prediction |
416 vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize); | 411 vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); |
417 | 412 |
418 // Reconstruction | 413 // Reconstruction |
419 if (!mbmi->skip) { | 414 if (!mbmi->skip) { |
420 int eobtotal = 0; | 415 int eobtotal = 0; |
421 struct inter_args arg = { cm, xd, r, counts, &eobtotal }; | 416 struct inter_args arg = { cm, xd, r, &eobtotal }; |
422 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); | 417 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); |
423 if (!less8x8 && eobtotal == 0) | 418 if (!less8x8 && eobtotal == 0) |
424 mbmi->skip = 1; // skip loopfilter | 419 mbmi->skip = 1; // skip loopfilter |
425 } | 420 } |
426 } | 421 } |
427 | 422 |
428 xd->corrupted |= vp9_reader_has_error(r); | 423 xd->corrupted |= vp9_reader_has_error(r); |
429 } | 424 } |
430 | 425 |
431 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, | 426 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, |
432 FRAME_COUNTS *counts, int hbs, | |
433 int mi_row, int mi_col, BLOCK_SIZE bsize, | 427 int mi_row, int mi_col, BLOCK_SIZE bsize, |
434 vp9_reader *r) { | 428 vp9_reader *r) { |
435 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); | 429 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
436 const vp9_prob *const probs = get_partition_probs(cm, ctx); | 430 const vp9_prob *const probs = get_partition_probs(cm, ctx); |
437 const int has_rows = (mi_row + hbs) < cm->mi_rows; | 431 const int has_rows = (mi_row + hbs) < cm->mi_rows; |
438 const int has_cols = (mi_col + hbs) < cm->mi_cols; | 432 const int has_cols = (mi_col + hbs) < cm->mi_cols; |
439 PARTITION_TYPE p; | 433 PARTITION_TYPE p; |
440 | 434 |
441 if (has_rows && has_cols) | 435 if (has_rows && has_cols) |
442 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs); | 436 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs); |
443 else if (!has_rows && has_cols) | 437 else if (!has_rows && has_cols) |
444 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; | 438 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; |
445 else if (has_rows && !has_cols) | 439 else if (has_rows && !has_cols) |
446 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; | 440 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; |
447 else | 441 else |
448 p = PARTITION_SPLIT; | 442 p = PARTITION_SPLIT; |
449 | 443 |
450 if (!cm->frame_parallel_decoding_mode) | 444 if (!cm->frame_parallel_decoding_mode) |
451 ++counts->partition[ctx][p]; | 445 ++cm->counts.partition[ctx][p]; |
452 | 446 |
453 return p; | 447 return p; |
454 } | 448 } |
455 | 449 |
456 static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd, | 450 static void decode_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
457 FRAME_COUNTS *counts, | |
458 const TileInfo *const tile, | 451 const TileInfo *const tile, |
459 int mi_row, int mi_col, | 452 int mi_row, int mi_col, |
460 vp9_reader* r, BLOCK_SIZE bsize) { | 453 vp9_reader* r, BLOCK_SIZE bsize) { |
461 VP9_COMMON *const cm = &pbi->common; | |
462 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; | 454 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; |
463 PARTITION_TYPE partition; | 455 PARTITION_TYPE partition; |
464 BLOCK_SIZE subsize, uv_subsize; | 456 BLOCK_SIZE subsize, uv_subsize; |
465 | 457 |
466 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 458 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
467 return; | 459 return; |
468 | 460 |
469 partition = read_partition(cm, xd, counts, hbs, mi_row, mi_col, bsize, r); | 461 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r); |
470 subsize = get_subsize(bsize, partition); | 462 subsize = get_subsize(bsize, partition); |
471 uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y]; | 463 uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y]; |
472 if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID) | 464 if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID) |
473 vpx_internal_error(xd->error_info, | 465 vpx_internal_error(xd->error_info, |
474 VPX_CODEC_CORRUPT_FRAME, "Invalid block size."); | 466 VPX_CODEC_CORRUPT_FRAME, "Invalid block size."); |
475 if (subsize < BLOCK_8X8) { | 467 if (subsize < BLOCK_8X8) { |
476 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize); | 468 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize); |
477 } else { | 469 } else { |
478 switch (partition) { | 470 switch (partition) { |
479 case PARTITION_NONE: | 471 case PARTITION_NONE: |
480 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize); | 472 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize); |
481 break; | 473 break; |
482 case PARTITION_HORZ: | 474 case PARTITION_HORZ: |
483 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize); | 475 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize); |
484 if (mi_row + hbs < cm->mi_rows) | 476 if (mi_row + hbs < cm->mi_rows) |
485 decode_block(pbi, xd, counts, tile, mi_row + hbs, mi_col, r, subsize); | 477 decode_block(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); |
486 break; | 478 break; |
487 case PARTITION_VERT: | 479 case PARTITION_VERT: |
488 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize); | 480 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize); |
489 if (mi_col + hbs < cm->mi_cols) | 481 if (mi_col + hbs < cm->mi_cols) |
490 decode_block(pbi, xd, counts, tile, mi_row, mi_col + hbs, r, subsize); | 482 decode_block(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); |
491 break; | 483 break; |
492 case PARTITION_SPLIT: | 484 case PARTITION_SPLIT: |
493 decode_partition(pbi, xd, counts, tile, mi_row, mi_col, r, subsize); | 485 decode_partition(cm, xd, tile, mi_row, mi_col, r, subsize); |
494 decode_partition(pbi, xd, counts, tile, mi_row, mi_col + hbs, r, | 486 decode_partition(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); |
495 subsize); | 487 decode_partition(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); |
496 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col, r, | 488 decode_partition(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize); |
497 subsize); | |
498 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col + hbs, r, | |
499 subsize); | |
500 break; | 489 break; |
501 default: | 490 default: |
502 assert(0 && "Invalid partition type"); | 491 assert(0 && "Invalid partition type"); |
503 } | 492 } |
504 } | 493 } |
505 | 494 |
506 // update partition context | 495 // update partition context |
507 if (bsize >= BLOCK_8X8 && | 496 if (bsize >= BLOCK_8X8 && |
508 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) | 497 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) |
509 update_partition_context(xd, mi_row, mi_col, subsize, bsize); | 498 update_partition_context(xd, mi_row, mi_col, subsize, bsize); |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
711 cm->height = height; | 700 cm->height = height; |
712 } | 701 } |
713 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows || | 702 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows || |
714 cm->mi_cols > cm->cur_frame->mi_cols) { | 703 cm->mi_cols > cm->cur_frame->mi_cols) { |
715 resize_mv_buffer(cm); | 704 resize_mv_buffer(cm); |
716 } | 705 } |
717 } | 706 } |
718 | 707 |
719 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { | 708 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
720 int width, height; | 709 int width, height; |
721 BufferPool *const pool = cm->buffer_pool; | |
722 vp9_read_frame_size(rb, &width, &height); | 710 vp9_read_frame_size(rb, &width, &height); |
723 resize_context_buffers(cm, width, height); | 711 resize_context_buffers(cm, width, height); |
724 setup_display_size(cm, rb); | 712 setup_display_size(cm, rb); |
725 | 713 |
726 lock_buffer_pool(pool); | |
727 if (vp9_realloc_frame_buffer( | 714 if (vp9_realloc_frame_buffer( |
728 get_frame_new_buffer(cm), cm->width, cm->height, | 715 get_frame_new_buffer(cm), cm->width, cm->height, |
729 cm->subsampling_x, cm->subsampling_y, | 716 cm->subsampling_x, cm->subsampling_y, |
730 #if CONFIG_VP9_HIGHBITDEPTH | 717 #if CONFIG_VP9_HIGHBITDEPTH |
731 cm->use_highbitdepth, | 718 cm->use_highbitdepth, |
732 #endif | 719 #endif |
733 VP9_DEC_BORDER_IN_PIXELS, | 720 VP9_DEC_BORDER_IN_PIXELS, |
734 cm->byte_alignment, | 721 cm->byte_alignment, |
735 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, | 722 &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb, |
736 pool->cb_priv)) { | 723 cm->cb_priv)) { |
737 unlock_buffer_pool(pool); | |
738 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, | 724 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
739 "Failed to allocate frame buffer"); | 725 "Failed to allocate frame buffer"); |
740 } | 726 } |
741 unlock_buffer_pool(pool); | 727 cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; |
742 | 728 cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; |
743 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; | 729 cm->frame_bufs[cm->new_fb_idx].buf.color_space = |
744 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; | 730 (vpx_color_space_t)cm->color_space; |
745 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; | 731 cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; |
746 } | 732 } |
747 | 733 |
748 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth, | 734 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth, |
749 int ref_xss, int ref_yss, | 735 int ref_xss, int ref_yss, |
750 vpx_bit_depth_t this_bit_depth, | 736 vpx_bit_depth_t this_bit_depth, |
751 int this_xss, int this_yss) { | 737 int this_xss, int this_yss) { |
752 return ref_bit_depth == this_bit_depth && ref_xss == this_xss && | 738 return ref_bit_depth == this_bit_depth && ref_xss == this_xss && |
753 ref_yss == this_yss; | 739 ref_yss == this_yss; |
754 } | 740 } |
755 | 741 |
756 static void setup_frame_size_with_refs(VP9_COMMON *cm, | 742 static void setup_frame_size_with_refs(VP9_COMMON *cm, |
757 struct vp9_read_bit_buffer *rb) { | 743 struct vp9_read_bit_buffer *rb) { |
758 int width, height; | 744 int width, height; |
759 int found = 0, i; | 745 int found = 0, i; |
760 int has_valid_ref_frame = 0; | 746 int has_valid_ref_frame = 0; |
761 BufferPool *const pool = cm->buffer_pool; | |
762 for (i = 0; i < REFS_PER_FRAME; ++i) { | 747 for (i = 0; i < REFS_PER_FRAME; ++i) { |
763 if (vp9_rb_read_bit(rb)) { | 748 if (vp9_rb_read_bit(rb)) { |
764 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; | 749 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; |
765 width = buf->y_crop_width; | 750 width = buf->y_crop_width; |
766 height = buf->y_crop_height; | 751 height = buf->y_crop_height; |
767 found = 1; | 752 found = 1; |
768 break; | 753 break; |
769 } | 754 } |
770 } | 755 } |
771 | 756 |
(...skipping 24 matching lines...) Expand all Loading... |
796 cm->bit_depth, | 781 cm->bit_depth, |
797 cm->subsampling_x, | 782 cm->subsampling_x, |
798 cm->subsampling_y)) | 783 cm->subsampling_y)) |
799 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 784 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
800 "Referenced frame has incompatible color format"); | 785 "Referenced frame has incompatible color format"); |
801 } | 786 } |
802 | 787 |
803 resize_context_buffers(cm, width, height); | 788 resize_context_buffers(cm, width, height); |
804 setup_display_size(cm, rb); | 789 setup_display_size(cm, rb); |
805 | 790 |
806 lock_buffer_pool(pool); | |
807 if (vp9_realloc_frame_buffer( | 791 if (vp9_realloc_frame_buffer( |
808 get_frame_new_buffer(cm), cm->width, cm->height, | 792 get_frame_new_buffer(cm), cm->width, cm->height, |
809 cm->subsampling_x, cm->subsampling_y, | 793 cm->subsampling_x, cm->subsampling_y, |
810 #if CONFIG_VP9_HIGHBITDEPTH | 794 #if CONFIG_VP9_HIGHBITDEPTH |
811 cm->use_highbitdepth, | 795 cm->use_highbitdepth, |
812 #endif | 796 #endif |
813 VP9_DEC_BORDER_IN_PIXELS, | 797 VP9_DEC_BORDER_IN_PIXELS, |
814 cm->byte_alignment, | 798 cm->byte_alignment, |
815 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, | 799 &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb, |
816 pool->cb_priv)) { | 800 cm->cb_priv)) { |
817 unlock_buffer_pool(pool); | |
818 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, | 801 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
819 "Failed to allocate frame buffer"); | 802 "Failed to allocate frame buffer"); |
820 } | 803 } |
821 unlock_buffer_pool(pool); | 804 cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; |
822 | 805 cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; |
823 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; | 806 cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; |
824 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; | |
825 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; | |
826 } | 807 } |
827 | 808 |
828 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { | 809 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
829 int min_log2_tile_cols, max_log2_tile_cols, max_ones; | 810 int min_log2_tile_cols, max_log2_tile_cols, max_ones; |
830 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); | 811 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); |
831 | 812 |
832 // columns | 813 // columns |
833 max_ones = max_log2_tile_cols - min_log2_tile_cols; | 814 max_ones = max_log2_tile_cols - min_log2_tile_cols; |
834 cm->log2_tile_cols = min_log2_tile_cols; | 815 cm->log2_tile_cols = min_log2_tile_cols; |
835 while (max_ones-- && vp9_rb_read_bit(rb)) | 816 while (max_ones-- && vp9_rb_read_bit(rb)) |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
984 mi_row += MI_BLOCK_SIZE) { | 965 mi_row += MI_BLOCK_SIZE) { |
985 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { | 966 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
986 const int col = pbi->inv_tile_order ? | 967 const int col = pbi->inv_tile_order ? |
987 tile_cols - tile_col - 1 : tile_col; | 968 tile_cols - tile_col - 1 : tile_col; |
988 tile_data = pbi->tile_data + tile_cols * tile_row + col; | 969 tile_data = pbi->tile_data + tile_cols * tile_row + col; |
989 vp9_tile_set_col(&tile, tile_data->cm, col); | 970 vp9_tile_set_col(&tile, tile_data->cm, col); |
990 vp9_zero(tile_data->xd.left_context); | 971 vp9_zero(tile_data->xd.left_context); |
991 vp9_zero(tile_data->xd.left_seg_context); | 972 vp9_zero(tile_data->xd.left_seg_context); |
992 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; | 973 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; |
993 mi_col += MI_BLOCK_SIZE) { | 974 mi_col += MI_BLOCK_SIZE) { |
994 decode_partition(pbi, &tile_data->xd, &cm->counts, &tile, mi_row, | 975 decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col, |
995 mi_col, &tile_data->bit_reader, BLOCK_64X64); | 976 &tile_data->bit_reader, BLOCK_64X64); |
996 } | 977 } |
997 pbi->mb.corrupted |= tile_data->xd.corrupted; | 978 pbi->mb.corrupted |= tile_data->xd.corrupted; |
998 if (pbi->mb.corrupted) | 979 if (pbi->mb.corrupted) |
999 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 980 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
1000 "Failed to decode tile data"); | 981 "Failed to decode tile data"); |
1001 } | 982 } |
1002 // Loopfilter one row. | 983 // Loopfilter one row. |
1003 if (cm->lf.filter_level) { | 984 if (cm->lf.filter_level) { |
1004 const int lf_start = mi_row - MI_BLOCK_SIZE; | 985 const int lf_start = mi_row - MI_BLOCK_SIZE; |
1005 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; | 986 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
1006 | 987 |
1007 // delay the loopfilter by 1 macroblock row. | 988 // delay the loopfilter by 1 macroblock row. |
1008 if (lf_start < 0) continue; | 989 if (lf_start < 0) continue; |
1009 | 990 |
1010 // decoding has completed: finish up the loop filter in this thread. | 991 // decoding has completed: finish up the loop filter in this thread. |
1011 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue; | 992 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue; |
1012 | 993 |
1013 winterface->sync(&pbi->lf_worker); | 994 winterface->sync(&pbi->lf_worker); |
1014 lf_data->start = lf_start; | 995 lf_data->start = lf_start; |
1015 lf_data->stop = mi_row; | 996 lf_data->stop = mi_row; |
1016 if (pbi->max_threads > 1) { | 997 if (pbi->max_threads > 1) { |
1017 winterface->launch(&pbi->lf_worker); | 998 winterface->launch(&pbi->lf_worker); |
1018 } else { | 999 } else { |
1019 winterface->execute(&pbi->lf_worker); | 1000 winterface->execute(&pbi->lf_worker); |
1020 } | 1001 } |
1021 } | 1002 } |
1022 // After loopfiltering, the last 7 row pixels in each superblock row may | |
1023 // still be changed by the longest loopfilter of the next superblock | |
1024 // row. | |
1025 if (pbi->frame_parallel_decode) | |
1026 vp9_frameworker_broadcast(pbi->cur_buf, | |
1027 mi_row << MI_BLOCK_SIZE_LOG2); | |
1028 } | 1003 } |
1029 } | 1004 } |
1030 | 1005 |
1031 // Loopfilter remaining rows in the frame. | 1006 // Loopfilter remaining rows in the frame. |
1032 if (cm->lf.filter_level) { | 1007 if (cm->lf.filter_level) { |
1033 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; | 1008 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
1034 winterface->sync(&pbi->lf_worker); | 1009 winterface->sync(&pbi->lf_worker); |
1035 lf_data->start = lf_data->stop; | 1010 lf_data->start = lf_data->stop; |
1036 lf_data->stop = cm->mi_rows; | 1011 lf_data->stop = cm->mi_rows; |
1037 winterface->execute(&pbi->lf_worker); | 1012 winterface->execute(&pbi->lf_worker); |
1038 } | 1013 } |
1039 | 1014 |
1040 // Get last tile data. | 1015 // Get last tile data. |
1041 tile_data = pbi->tile_data + tile_cols * tile_rows - 1; | 1016 tile_data = pbi->tile_data + tile_cols * tile_rows - 1; |
1042 | 1017 |
1043 if (pbi->frame_parallel_decode) | |
1044 vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX); | |
1045 return vp9_reader_find_end(&tile_data->bit_reader); | 1018 return vp9_reader_find_end(&tile_data->bit_reader); |
1046 } | 1019 } |
1047 | 1020 |
1048 static int tile_worker_hook(TileWorkerData *const tile_data, | 1021 static int tile_worker_hook(TileWorkerData *const tile_data, |
1049 const TileInfo *const tile) { | 1022 const TileInfo *const tile) { |
1050 int mi_row, mi_col; | 1023 int mi_row, mi_col; |
1051 | 1024 |
1052 if (setjmp(tile_data->error_info.jmp)) { | 1025 if (setjmp(tile_data->error_info.jmp)) { |
1053 tile_data->error_info.setjmp = 0; | 1026 tile_data->error_info.setjmp = 0; |
1054 tile_data->xd.corrupted = 1; | 1027 tile_data->xd.corrupted = 1; |
1055 return 0; | 1028 return 0; |
1056 } | 1029 } |
1057 | 1030 |
1058 tile_data->error_info.setjmp = 1; | 1031 tile_data->error_info.setjmp = 1; |
1059 tile_data->xd.error_info = &tile_data->error_info; | 1032 tile_data->xd.error_info = &tile_data->error_info; |
1060 | 1033 |
1061 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; | 1034 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
1062 mi_row += MI_BLOCK_SIZE) { | 1035 mi_row += MI_BLOCK_SIZE) { |
1063 vp9_zero(tile_data->xd.left_context); | 1036 vp9_zero(tile_data->xd.left_context); |
1064 vp9_zero(tile_data->xd.left_seg_context); | 1037 vp9_zero(tile_data->xd.left_seg_context); |
1065 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 1038 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
1066 mi_col += MI_BLOCK_SIZE) { | 1039 mi_col += MI_BLOCK_SIZE) { |
1067 decode_partition(tile_data->pbi, &tile_data->xd, | 1040 decode_partition(tile_data->cm, &tile_data->xd, tile, |
1068 &tile_data->pbi->common.counts, | 1041 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64); |
1069 tile, mi_row, mi_col, &tile_data->bit_reader, | |
1070 BLOCK_64X64); | |
1071 } | 1042 } |
1072 } | 1043 } |
1073 return !tile_data->xd.corrupted; | 1044 return !tile_data->xd.corrupted; |
1074 } | 1045 } |
1075 | 1046 |
1076 // sorts in descending order | 1047 // sorts in descending order |
1077 static int compare_tile_buffers(const void *a, const void *b) { | 1048 static int compare_tile_buffers(const void *a, const void *b) { |
1078 const TileBuffer *const buf1 = (const TileBuffer*)a; | 1049 const TileBuffer *const buf1 = (const TileBuffer*)a; |
1079 const TileBuffer *const buf2 = (const TileBuffer*)b; | 1050 const TileBuffer *const buf2 = (const TileBuffer*)b; |
1080 if (buf1->size < buf2->size) { | 1051 if (buf1->size < buf2->size) { |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1174 | 1145 |
1175 n = 0; | 1146 n = 0; |
1176 while (n < tile_cols) { | 1147 while (n < tile_cols) { |
1177 int i; | 1148 int i; |
1178 for (i = 0; i < num_workers && n < tile_cols; ++i) { | 1149 for (i = 0; i < num_workers && n < tile_cols; ++i) { |
1179 VP9Worker *const worker = &pbi->tile_workers[i]; | 1150 VP9Worker *const worker = &pbi->tile_workers[i]; |
1180 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; | 1151 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; |
1181 TileInfo *const tile = (TileInfo*)worker->data2; | 1152 TileInfo *const tile = (TileInfo*)worker->data2; |
1182 TileBuffer *const buf = &tile_buffers[0][n]; | 1153 TileBuffer *const buf = &tile_buffers[0][n]; |
1183 | 1154 |
1184 tile_data->pbi = pbi; | 1155 tile_data->cm = cm; |
1185 tile_data->xd = pbi->mb; | 1156 tile_data->xd = pbi->mb; |
1186 tile_data->xd.corrupted = 0; | 1157 tile_data->xd.corrupted = 0; |
1187 vp9_tile_init(tile, &pbi->common, 0, buf->col); | 1158 vp9_tile_init(tile, tile_data->cm, 0, buf->col); |
1188 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, | 1159 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, |
1189 &tile_data->bit_reader, pbi->decrypt_cb, | 1160 &tile_data->bit_reader, pbi->decrypt_cb, |
1190 pbi->decrypt_state); | 1161 pbi->decrypt_state); |
1191 init_macroblockd(cm, &tile_data->xd); | 1162 init_macroblockd(cm, &tile_data->xd); |
1192 | 1163 |
1193 worker->had_error = 0; | 1164 worker->had_error = 0; |
1194 if (i == num_workers - 1 || n == tile_cols - 1) { | 1165 if (i == num_workers - 1 || n == tile_cols - 1) { |
1195 winterface->execute(worker); | 1166 winterface->execute(worker); |
1196 } else { | 1167 } else { |
1197 winterface->launch(worker); | 1168 winterface->launch(worker); |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1281 } else { | 1252 } else { |
1282 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1253 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1283 "4:4:4 color not supported in profile 0 or 2"); | 1254 "4:4:4 color not supported in profile 0 or 2"); |
1284 } | 1255 } |
1285 } | 1256 } |
1286 } | 1257 } |
1287 | 1258 |
1288 static size_t read_uncompressed_header(VP9Decoder *pbi, | 1259 static size_t read_uncompressed_header(VP9Decoder *pbi, |
1289 struct vp9_read_bit_buffer *rb) { | 1260 struct vp9_read_bit_buffer *rb) { |
1290 VP9_COMMON *const cm = &pbi->common; | 1261 VP9_COMMON *const cm = &pbi->common; |
1291 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; | |
1292 BufferPool *const pool = pbi->common.buffer_pool; | |
1293 int i, mask, ref_index = 0; | |
1294 size_t sz; | 1262 size_t sz; |
| 1263 int i; |
1295 | 1264 |
1296 cm->last_frame_type = cm->frame_type; | 1265 cm->last_frame_type = cm->frame_type; |
1297 | 1266 |
1298 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) | 1267 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) |
1299 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1268 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1300 "Invalid frame marker"); | 1269 "Invalid frame marker"); |
1301 | 1270 |
1302 cm->profile = vp9_read_profile(rb); | 1271 cm->profile = vp9_read_profile(rb); |
1303 | 1272 |
1304 if (cm->profile >= MAX_PROFILES) | 1273 if (cm->profile >= MAX_PROFILES) |
1305 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1274 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1306 "Unsupported bitstream profile"); | 1275 "Unsupported bitstream profile"); |
1307 | 1276 |
1308 cm->show_existing_frame = vp9_rb_read_bit(rb); | 1277 cm->show_existing_frame = vp9_rb_read_bit(rb); |
1309 if (cm->show_existing_frame) { | 1278 if (cm->show_existing_frame) { |
1310 // Show an existing frame directly. | 1279 // Show an existing frame directly. |
1311 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; | 1280 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; |
1312 lock_buffer_pool(pool); | 1281 |
1313 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) { | 1282 if (frame_to_show < 0 || cm->frame_bufs[frame_to_show].ref_count < 1) |
1314 unlock_buffer_pool(pool); | |
1315 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1283 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1316 "Buffer %d does not contain a decoded frame", | 1284 "Buffer %d does not contain a decoded frame", |
1317 frame_to_show); | 1285 frame_to_show); |
1318 } | |
1319 | 1286 |
1320 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); | 1287 ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show); |
1321 unlock_buffer_pool(pool); | |
1322 pbi->refresh_frame_flags = 0; | 1288 pbi->refresh_frame_flags = 0; |
1323 cm->lf.filter_level = 0; | 1289 cm->lf.filter_level = 0; |
1324 cm->show_frame = 1; | 1290 cm->show_frame = 1; |
1325 | |
1326 if (pbi->frame_parallel_decode) { | |
1327 for (i = 0; i < REF_FRAMES; ++i) | |
1328 cm->next_ref_frame_map[i] = cm->ref_frame_map[i]; | |
1329 } | |
1330 return 0; | 1291 return 0; |
1331 } | 1292 } |
1332 | 1293 |
1333 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb); | 1294 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb); |
1334 cm->show_frame = vp9_rb_read_bit(rb); | 1295 cm->show_frame = vp9_rb_read_bit(rb); |
1335 cm->error_resilient_mode = vp9_rb_read_bit(rb); | 1296 cm->error_resilient_mode = vp9_rb_read_bit(rb); |
1336 | 1297 |
1337 if (cm->frame_type == KEY_FRAME) { | 1298 if (cm->frame_type == KEY_FRAME) { |
1338 if (!vp9_read_sync_code(rb)) | 1299 if (!vp9_read_sync_code(rb)) |
1339 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1300 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1340 "Invalid frame sync code"); | 1301 "Invalid frame sync code"); |
1341 | 1302 |
1342 read_bitdepth_colorspace_sampling(cm, rb); | 1303 read_bitdepth_colorspace_sampling(cm, rb); |
1343 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; | 1304 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; |
1344 | 1305 |
1345 for (i = 0; i < REFS_PER_FRAME; ++i) { | 1306 for (i = 0; i < REFS_PER_FRAME; ++i) { |
1346 cm->frame_refs[i].idx = -1; | 1307 cm->frame_refs[i].idx = -1; |
1347 cm->frame_refs[i].buf = NULL; | 1308 cm->frame_refs[i].buf = NULL; |
1348 } | 1309 } |
1349 | 1310 |
1350 setup_frame_size(cm, rb); | 1311 setup_frame_size(cm, rb); |
1351 if (pbi->need_resync) { | 1312 pbi->need_resync = 0; |
1352 vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); | |
1353 pbi->need_resync = 0; | |
1354 } | |
1355 } else { | 1313 } else { |
1356 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb); | 1314 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb); |
1357 | 1315 |
1358 cm->reset_frame_context = cm->error_resilient_mode ? | 1316 cm->reset_frame_context = cm->error_resilient_mode ? |
1359 0 : vp9_rb_read_literal(rb, 2); | 1317 0 : vp9_rb_read_literal(rb, 2); |
1360 | 1318 |
1361 if (cm->intra_only) { | 1319 if (cm->intra_only) { |
1362 if (!vp9_read_sync_code(rb)) | 1320 if (!vp9_read_sync_code(rb)) |
1363 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, | 1321 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
1364 "Invalid frame sync code"); | 1322 "Invalid frame sync code"); |
1365 if (cm->profile > PROFILE_0) { | 1323 if (cm->profile > PROFILE_0) { |
1366 read_bitdepth_colorspace_sampling(cm, rb); | 1324 read_bitdepth_colorspace_sampling(cm, rb); |
1367 } else { | 1325 } else { |
1368 // NOTE: The intra-only frame header does not include the specification | 1326 // NOTE: The intra-only frame header does not include the specification |
1369 // of either the color format or color sub-sampling in profile 0. VP9 | 1327 // of either the color format or color sub-sampling in profile 0. VP9 |
1370 // specifies that the default color format should be YUV 4:2:0 in this | 1328 // specifies that the default color format should be YUV 4:2:0 in this |
1371 // case (normative). | 1329 // case (normative). |
1372 cm->color_space = VPX_CS_BT_601; | 1330 cm->color_space = VPX_CS_BT_601; |
1373 cm->subsampling_y = cm->subsampling_x = 1; | 1331 cm->subsampling_y = cm->subsampling_x = 1; |
1374 cm->bit_depth = VPX_BITS_8; | 1332 cm->bit_depth = VPX_BITS_8; |
1375 #if CONFIG_VP9_HIGHBITDEPTH | 1333 #if CONFIG_VP9_HIGHBITDEPTH |
1376 cm->use_highbitdepth = 0; | 1334 cm->use_highbitdepth = 0; |
1377 #endif | 1335 #endif |
1378 } | 1336 } |
1379 | 1337 |
1380 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); | 1338 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); |
1381 setup_frame_size(cm, rb); | 1339 setup_frame_size(cm, rb); |
1382 if (pbi->need_resync) { | 1340 pbi->need_resync = 0; |
1383 vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); | 1341 } else { |
1384 pbi->need_resync = 0; | |
1385 } | |
1386 } else if (pbi->need_resync != 1) { /* Skip if need resync */ | |
1387 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); | 1342 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); |
1388 for (i = 0; i < REFS_PER_FRAME; ++i) { | 1343 for (i = 0; i < REFS_PER_FRAME; ++i) { |
1389 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2); | 1344 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2); |
1390 const int idx = cm->ref_frame_map[ref]; | 1345 const int idx = cm->ref_frame_map[ref]; |
1391 RefBuffer *const ref_frame = &cm->frame_refs[i]; | 1346 RefBuffer *const ref_frame = &cm->frame_refs[i]; |
1392 ref_frame->idx = idx; | 1347 ref_frame->idx = idx; |
1393 ref_frame->buf = &frame_bufs[idx].buf; | 1348 ref_frame->buf = &cm->frame_bufs[idx].buf; |
1394 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); | 1349 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); |
1395 } | 1350 } |
1396 | 1351 |
1397 setup_frame_size_with_refs(cm, rb); | 1352 setup_frame_size_with_refs(cm, rb); |
1398 | 1353 |
1399 cm->allow_high_precision_mv = vp9_rb_read_bit(rb); | 1354 cm->allow_high_precision_mv = vp9_rb_read_bit(rb); |
1400 cm->interp_filter = read_interp_filter(rb); | 1355 cm->interp_filter = read_interp_filter(rb); |
1401 | 1356 |
1402 for (i = 0; i < REFS_PER_FRAME; ++i) { | 1357 for (i = 0; i < REFS_PER_FRAME; ++i) { |
1403 RefBuffer *const ref_buf = &cm->frame_refs[i]; | 1358 RefBuffer *const ref_buf = &cm->frame_refs[i]; |
(...skipping 29 matching lines...) Expand all Loading... |
1433 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb); | 1388 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb); |
1434 } else { | 1389 } else { |
1435 cm->refresh_frame_context = 0; | 1390 cm->refresh_frame_context = 0; |
1436 cm->frame_parallel_decoding_mode = 1; | 1391 cm->frame_parallel_decoding_mode = 1; |
1437 } | 1392 } |
1438 | 1393 |
1439 // This flag will be overridden by the call to vp9_setup_past_independence | 1394 // This flag will be overridden by the call to vp9_setup_past_independence |
1440 // below, forcing the use of context 0 for those frame types. | 1395 // below, forcing the use of context 0 for those frame types. |
1441 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); | 1396 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); |
1442 | 1397 |
1443 // Generate next_ref_frame_map. | |
1444 lock_buffer_pool(pool); | |
1445 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { | |
1446 if (mask & 1) { | |
1447 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; | |
1448 ++frame_bufs[cm->new_fb_idx].ref_count; | |
1449 } else { | |
1450 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; | |
1451 } | |
1452 // Current thread holds the reference frame. | |
1453 if (cm->ref_frame_map[ref_index] >= 0) | |
1454 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; | |
1455 ++ref_index; | |
1456 } | |
1457 | |
1458 for (; ref_index < REF_FRAMES; ++ref_index) { | |
1459 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; | |
1460 // Current thread holds the reference frame. | |
1461 if (cm->ref_frame_map[ref_index] >= 0) | |
1462 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; | |
1463 } | |
1464 unlock_buffer_pool(pool); | |
1465 pbi->hold_ref_buf = 1; | |
1466 | |
1467 if (frame_is_intra_only(cm) || cm->error_resilient_mode) | 1398 if (frame_is_intra_only(cm) || cm->error_resilient_mode) |
1468 vp9_setup_past_independence(cm); | 1399 vp9_setup_past_independence(cm); |
1469 | 1400 |
1470 setup_loopfilter(&cm->lf, rb); | 1401 setup_loopfilter(&cm->lf, rb); |
1471 setup_quantization(cm, &pbi->mb, rb); | 1402 setup_quantization(cm, &pbi->mb, rb); |
1472 setup_segmentation(&cm->seg, rb); | 1403 setup_segmentation(&cm->seg, rb); |
1473 | 1404 |
1474 setup_tile_info(cm, rb); | 1405 setup_tile_info(cm, rb); |
1475 sz = vp9_rb_read_literal(rb, 16); | 1406 sz = vp9_rb_read_literal(rb, 16); |
1476 | 1407 |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1602 } | 1533 } |
1603 return rb; | 1534 return rb; |
1604 } | 1535 } |
1605 | 1536 |
1606 void vp9_decode_frame(VP9Decoder *pbi, | 1537 void vp9_decode_frame(VP9Decoder *pbi, |
1607 const uint8_t *data, const uint8_t *data_end, | 1538 const uint8_t *data, const uint8_t *data_end, |
1608 const uint8_t **p_data_end) { | 1539 const uint8_t **p_data_end) { |
1609 VP9_COMMON *const cm = &pbi->common; | 1540 VP9_COMMON *const cm = &pbi->common; |
1610 MACROBLOCKD *const xd = &pbi->mb; | 1541 MACROBLOCKD *const xd = &pbi->mb; |
1611 struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0}; | 1542 struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0}; |
1612 int context_updated = 0; | 1543 |
1613 uint8_t clear_data[MAX_VP9_HEADER_SIZE]; | 1544 uint8_t clear_data[MAX_VP9_HEADER_SIZE]; |
1614 const size_t first_partition_size = read_uncompressed_header(pbi, | 1545 const size_t first_partition_size = read_uncompressed_header(pbi, |
1615 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); | 1546 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); |
1616 const int tile_rows = 1 << cm->log2_tile_rows; | 1547 const int tile_rows = 1 << cm->log2_tile_rows; |
1617 const int tile_cols = 1 << cm->log2_tile_cols; | 1548 const int tile_cols = 1 << cm->log2_tile_cols; |
1618 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); | 1549 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); |
1619 xd->cur_buf = new_fb; | 1550 xd->cur_buf = new_fb; |
1620 | 1551 |
1621 if (!first_partition_size) { | 1552 if (!first_partition_size) { |
1622 // showing a frame directly | 1553 // showing a frame directly |
(...skipping 21 matching lines...) Expand all Loading... |
1644 "Uninitialized entropy context."); | 1575 "Uninitialized entropy context."); |
1645 | 1576 |
1646 vp9_zero(cm->counts); | 1577 vp9_zero(cm->counts); |
1647 | 1578 |
1648 xd->corrupted = 0; | 1579 xd->corrupted = 0; |
1649 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); | 1580 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); |
1650 if (new_fb->corrupted) | 1581 if (new_fb->corrupted) |
1651 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 1582 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
1652 "Decode failed. Frame data header is corrupted."); | 1583 "Decode failed. Frame data header is corrupted."); |
1653 | 1584 |
1654 if (cm->lf.filter_level) { | |
1655 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); | |
1656 } | |
1657 | |
1658 // If encoded in frame parallel mode, frame context is ready after decoding | |
1659 // the frame header. | |
1660 if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) { | |
1661 VP9Worker *const worker = pbi->frame_worker_owner; | |
1662 FrameWorkerData *const frame_worker_data = worker->data1; | |
1663 if (cm->refresh_frame_context) { | |
1664 context_updated = 1; | |
1665 cm->frame_contexts[cm->frame_context_idx] = *cm->fc; | |
1666 } | |
1667 vp9_frameworker_lock_stats(worker); | |
1668 pbi->cur_buf->row = -1; | |
1669 pbi->cur_buf->col = -1; | |
1670 frame_worker_data->frame_context_ready = 1; | |
1671 // Signal the main thread that context is ready. | |
1672 vp9_frameworker_signal_stats(worker); | |
1673 vp9_frameworker_unlock_stats(worker); | |
1674 } | |
1675 | |
1676 // TODO(jzern): remove frame_parallel_decoding_mode restriction for | 1585 // TODO(jzern): remove frame_parallel_decoding_mode restriction for |
1677 // single-frame tile decoding. | 1586 // single-frame tile decoding. |
1678 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 && | 1587 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 && |
1679 cm->frame_parallel_decoding_mode) { | 1588 cm->frame_parallel_decoding_mode) { |
1680 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); | 1589 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); |
1681 if (!xd->corrupted) { | 1590 if (!xd->corrupted) { |
1682 // If multiple threads are used to decode tiles, then we use those threads | 1591 // If multiple threads are used to decode tiles, then we use those threads |
1683 // to do parallel loopfiltering. | 1592 // to do parallel loopfiltering. |
1684 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level, | 1593 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level, |
1685 0, 0, pbi->tile_workers, pbi->num_tile_workers, | 1594 0, 0, pbi->tile_workers, pbi->num_tile_workers, |
1686 &pbi->lf_row_sync); | 1595 &pbi->lf_row_sync); |
1687 } else { | 1596 } else { |
1688 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 1597 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
1689 "Decode failed. Frame data is corrupted."); | 1598 "Decode failed. Frame data is corrupted."); |
1690 | 1599 |
1691 } | 1600 } |
1692 } else { | 1601 } else { |
1693 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end); | 1602 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end); |
1694 } | 1603 } |
1695 | 1604 |
1696 if (!xd->corrupted) { | 1605 new_fb->corrupted |= xd->corrupted; |
| 1606 |
| 1607 if (!new_fb->corrupted) { |
1697 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { | 1608 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { |
1698 vp9_adapt_coef_probs(cm); | 1609 vp9_adapt_coef_probs(cm); |
1699 | 1610 |
1700 if (!frame_is_intra_only(cm)) { | 1611 if (!frame_is_intra_only(cm)) { |
1701 vp9_adapt_mode_probs(cm); | 1612 vp9_adapt_mode_probs(cm); |
1702 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); | 1613 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); |
1703 } | 1614 } |
1704 } else { | 1615 } else { |
1705 debug_check_frame_counts(cm); | 1616 debug_check_frame_counts(cm); |
1706 } | 1617 } |
1707 } else { | 1618 } else { |
1708 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 1619 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
1709 "Decode failed. Frame data is corrupted."); | 1620 "Decode failed. Frame data is corrupted."); |
1710 } | 1621 } |
1711 | 1622 |
1712 // Non frame parallel update frame context here. | 1623 if (cm->refresh_frame_context) |
1713 if (cm->refresh_frame_context && !context_updated) | |
1714 cm->frame_contexts[cm->frame_context_idx] = *cm->fc; | 1624 cm->frame_contexts[cm->frame_context_idx] = *cm->fc; |
1715 } | 1625 } |
1716 | |
1717 static void build_mc_border(const uint8_t *src, int src_stride, | |
1718 uint8_t *dst, int dst_stride, | |
1719 int x, int y, int b_w, int b_h, int w, int h) { | |
1720 // Get a pointer to the start of the real data for this row. | |
1721 const uint8_t *ref_row = src - x - y * src_stride; | |
1722 | |
1723 if (y >= h) | |
1724 ref_row += (h - 1) * src_stride; | |
1725 else if (y > 0) | |
1726 ref_row += y * src_stride; | |
1727 | |
1728 do { | |
1729 int right = 0, copy; | |
1730 int left = x < 0 ? -x : 0; | |
1731 | |
1732 if (left > b_w) | |
1733 left = b_w; | |
1734 | |
1735 if (x + b_w > w) | |
1736 right = x + b_w - w; | |
1737 | |
1738 if (right > b_w) | |
1739 right = b_w; | |
1740 | |
1741 copy = b_w - left - right; | |
1742 | |
1743 if (left) | |
1744 memset(dst, ref_row[0], left); | |
1745 | |
1746 if (copy) | |
1747 memcpy(dst + left, ref_row + x + left, copy); | |
1748 | |
1749 if (right) | |
1750 memset(dst + left + copy, ref_row[w - 1], right); | |
1751 | |
1752 dst += dst_stride; | |
1753 ++y; | |
1754 | |
1755 if (y > 0 && y < h) | |
1756 ref_row += src_stride; | |
1757 } while (--b_h); | |
1758 } | |
1759 | |
1760 #if CONFIG_VP9_HIGHBITDEPTH | |
1761 static void high_build_mc_border(const uint8_t *src8, int src_stride, | |
1762 uint16_t *dst, int dst_stride, | |
1763 int x, int y, int b_w, int b_h, | |
1764 int w, int h) { | |
1765 // Get a pointer to the start of the real data for this row. | |
1766 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); | |
1767 const uint16_t *ref_row = src - x - y * src_stride; | |
1768 | |
1769 if (y >= h) | |
1770 ref_row += (h - 1) * src_stride; | |
1771 else if (y > 0) | |
1772 ref_row += y * src_stride; | |
1773 | |
1774 do { | |
1775 int right = 0, copy; | |
1776 int left = x < 0 ? -x : 0; | |
1777 | |
1778 if (left > b_w) | |
1779 left = b_w; | |
1780 | |
1781 if (x + b_w > w) | |
1782 right = x + b_w - w; | |
1783 | |
1784 if (right > b_w) | |
1785 right = b_w; | |
1786 | |
1787 copy = b_w - left - right; | |
1788 | |
1789 if (left) | |
1790 vpx_memset16(dst, ref_row[0], left); | |
1791 | |
1792 if (copy) | |
1793 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); | |
1794 | |
1795 if (right) | |
1796 vpx_memset16(dst + left + copy, ref_row[w - 1], right); | |
1797 | |
1798 dst += dst_stride; | |
1799 ++y; | |
1800 | |
1801 if (y > 0 && y < h) | |
1802 ref_row += src_stride; | |
1803 } while (--b_h); | |
1804 } | |
1805 #endif // CONFIG_VP9_HIGHBITDEPTH | |
1806 | |
1807 void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd, | |
1808 int plane, int block, int bw, int bh, int x, | |
1809 int y, int w, int h, int mi_x, int mi_y) { | |
1810 struct macroblockd_plane *const pd = &xd->plane[plane]; | |
1811 const MODE_INFO *mi = xd->mi[0].src_mi; | |
1812 const int is_compound = has_second_ref(&mi->mbmi); | |
1813 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); | |
1814 int ref; | |
1815 | |
1816 for (ref = 0; ref < 1 + is_compound; ++ref) { | |
1817 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; | |
1818 struct buf_2d *const pre_buf = &pd->pre[ref]; | |
1819 struct buf_2d *const dst_buf = &pd->dst; | |
1820 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | |
1821 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 | |
1822 ? average_split_mvs(pd, mi, ref, block) | |
1823 : mi->mbmi.mv[ref].as_mv; | |
1824 | |
1825 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, | |
1826 pd->subsampling_x, | |
1827 pd->subsampling_y); | |
1828 | |
1829 MV32 scaled_mv; | |
1830 int xs, ys, x0, y0, x0_16, y0_16, y1, frame_width, frame_height, | |
1831 buf_stride, subpel_x, subpel_y; | |
1832 uint8_t *ref_frame, *buf_ptr; | |
1833 const int idx = xd->block_refs[ref]->idx; | |
1834 BufferPool *const pool = pbi->common.buffer_pool; | |
1835 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx]; | |
1836 const int is_scaled = vp9_is_scaled(sf); | |
1837 | |
1838 // Get reference frame pointer, width and height. | |
1839 if (plane == 0) { | |
1840 frame_width = ref_frame_buf->buf.y_crop_width; | |
1841 frame_height = ref_frame_buf->buf.y_crop_height; | |
1842 ref_frame = ref_frame_buf->buf.y_buffer; | |
1843 } else { | |
1844 frame_width = ref_frame_buf->buf.uv_crop_width; | |
1845 frame_height = ref_frame_buf->buf.uv_crop_height; | |
1846 ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer | |
1847 : ref_frame_buf->buf.v_buffer; | |
1848 } | |
1849 | |
1850 if (is_scaled) { | |
1851 // Co-ordinate of containing block to pixel precision. | |
1852 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); | |
1853 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); | |
1854 | |
1855 // Co-ordinate of the block to 1/16th pixel precision. | |
1856 x0_16 = (x_start + x) << SUBPEL_BITS; | |
1857 y0_16 = (y_start + y) << SUBPEL_BITS; | |
1858 | |
1859 // Co-ordinate of current block in reference frame | |
1860 // to 1/16th pixel precision. | |
1861 x0_16 = sf->scale_value_x(x0_16, sf); | |
1862 y0_16 = sf->scale_value_y(y0_16, sf); | |
1863 | |
1864 // Map the top left corner of the block into the reference frame. | |
1865 x0 = sf->scale_value_x(x_start + x, sf); | |
1866 y0 = sf->scale_value_y(y_start + y, sf); | |
1867 | |
1868 // Scale the MV and incorporate the sub-pixel offset of the block | |
1869 // in the reference frame. | |
1870 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); | |
1871 xs = sf->x_step_q4; | |
1872 ys = sf->y_step_q4; | |
1873 } else { | |
1874 // Co-ordinate of containing block to pixel precision. | |
1875 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; | |
1876 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; | |
1877 | |
1878 // Co-ordinate of the block to 1/16th pixel precision. | |
1879 x0_16 = x0 << SUBPEL_BITS; | |
1880 y0_16 = y0 << SUBPEL_BITS; | |
1881 | |
1882 scaled_mv.row = mv_q4.row; | |
1883 scaled_mv.col = mv_q4.col; | |
1884 xs = ys = 16; | |
1885 } | |
1886 subpel_x = scaled_mv.col & SUBPEL_MASK; | |
1887 subpel_y = scaled_mv.row & SUBPEL_MASK; | |
1888 | |
1889 // Calculate the top left corner of the best matching block in the | |
1890 // reference frame. | |
1891 x0 += scaled_mv.col >> SUBPEL_BITS; | |
1892 y0 += scaled_mv.row >> SUBPEL_BITS; | |
1893 x0_16 += scaled_mv.col; | |
1894 y0_16 += scaled_mv.row; | |
1895 | |
1896 // Get reference block pointer. | |
1897 buf_ptr = ref_frame + y0 * pre_buf->stride + x0; | |
1898 buf_stride = pre_buf->stride; | |
1899 | |
1900 // Get reference block bottom right vertical coordinate. | |
1901 y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; | |
1902 | |
1903 // Do border extension if there is motion or the | |
1904 // width/height is not a multiple of 8 pixels. | |
1905 if (is_scaled || scaled_mv.col || scaled_mv.row || | |
1906 (frame_width & 0x7) || (frame_height & 0x7)) { | |
1907 // Get reference block bottom right horizontal coordinate. | |
1908 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; | |
1909 int x_pad = 0, y_pad = 0; | |
1910 | |
1911 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) { | |
1912 x0 -= VP9_INTERP_EXTEND - 1; | |
1913 x1 += VP9_INTERP_EXTEND; | |
1914 x_pad = 1; | |
1915 } | |
1916 | |
1917 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) { | |
1918 y0 -= VP9_INTERP_EXTEND - 1; | |
1919 y1 += VP9_INTERP_EXTEND; | |
1920 y_pad = 1; | |
1921 } | |
1922 | |
1923 // Wait until reference block is ready. Pad 7 more pixels as last 7 | |
1924 // pixels of each superblock row can be changed by next superblock row. | |
1925 if (pbi->frame_parallel_decode) | |
1926 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, | |
1927 (y1 + 7) << (plane == 0 ? 0 : 1)); | |
1928 | |
1929 // Skip border extension if block is inside the frame. | |
1930 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || | |
1931 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { | |
1932 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; | |
1933 // Extend the border. | |
1934 #if CONFIG_VP9_HIGHBITDEPTH | |
1935 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | |
1936 high_build_mc_border(buf_ptr1, | |
1937 pre_buf->stride, | |
1938 xd->mc_buf_high, | |
1939 x1 - x0 + 1, | |
1940 x0, | |
1941 y0, | |
1942 x1 - x0 + 1, | |
1943 y1 - y0 + 1, | |
1944 frame_width, | |
1945 frame_height); | |
1946 buf_stride = x1 - x0 + 1; | |
1947 buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) + | |
1948 y_pad * 3 * buf_stride + x_pad * 3; | |
1949 } else { | |
1950 build_mc_border(buf_ptr1, | |
1951 pre_buf->stride, | |
1952 xd->mc_buf, | |
1953 x1 - x0 + 1, | |
1954 x0, | |
1955 y0, | |
1956 x1 - x0 + 1, | |
1957 y1 - y0 + 1, | |
1958 frame_width, | |
1959 frame_height); | |
1960 buf_stride = x1 - x0 + 1; | |
1961 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; | |
1962 } | |
1963 #else | |
1964 build_mc_border(buf_ptr1, | |
1965 pre_buf->stride, | |
1966 xd->mc_buf, | |
1967 x1 - x0 + 1, | |
1968 x0, | |
1969 y0, | |
1970 x1 - x0 + 1, | |
1971 y1 - y0 + 1, | |
1972 frame_width, | |
1973 frame_height); | |
1974 buf_stride = x1 - x0 + 1; | |
1975 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; | |
1976 #endif // CONFIG_VP9_HIGHBITDEPTH | |
1977 } | |
1978 } else { | |
1979 // Wait until reference block is ready. Pad 7 more pixels as last 7 | |
1980 // pixels of each superblock row can be changed by next superblock row. | |
1981 if (pbi->frame_parallel_decode) | |
1982 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, | |
1983 (y1 + 7) << (plane == 0 ? 0 : 1)); | |
1984 } | |
1985 #if CONFIG_VP9_HIGHBITDEPTH | |
1986 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | |
1987 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, | |
1988 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); | |
1989 } else { | |
1990 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, | |
1991 subpel_y, sf, w, h, ref, kernel, xs, ys); | |
1992 } | |
1993 #else | |
1994 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, | |
1995 subpel_y, sf, w, h, ref, kernel, xs, ys); | |
1996 #endif // CONFIG_VP9_HIGHBITDEPTH | |
1997 } | |
1998 } | |
1999 | |
2000 void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd, | |
2001 int mi_row, int mi_col, | |
2002 BLOCK_SIZE bsize) { | |
2003 int plane; | |
2004 const int mi_x = mi_col * MI_SIZE; | |
2005 const int mi_y = mi_row * MI_SIZE; | |
2006 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { | |
2007 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, | |
2008 &xd->plane[plane]); | |
2009 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; | |
2010 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; | |
2011 const int bw = 4 * num_4x4_w; | |
2012 const int bh = 4 * num_4x4_h; | |
2013 | |
2014 if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) { | |
2015 int i = 0, x, y; | |
2016 assert(bsize == BLOCK_8X8); | |
2017 for (y = 0; y < num_4x4_h; ++y) | |
2018 for (x = 0; x < num_4x4_w; ++x) | |
2019 dec_build_inter_predictors(pbi, xd, plane, i++, bw, bh, | |
2020 4 * x, 4 * y, 4, 4, mi_x, mi_y); | |
2021 } else { | |
2022 dec_build_inter_predictors(pbi, xd, plane, 0, bw, bh, | |
2023 0, 0, bw, bh, mi_x, mi_y); | |
2024 } | |
2025 } | |
2026 } | |
OLD | NEW |