OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
221 | 221 |
222 if (block_inside_limit < 1) | 222 if (block_inside_limit < 1) |
223 block_inside_limit = 1; | 223 block_inside_limit = 1; |
224 | 224 |
225 vpx_memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH); | 225 vpx_memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH); |
226 vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), | 226 vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), |
227 SIMD_WIDTH); | 227 SIMD_WIDTH); |
228 } | 228 } |
229 } | 229 } |
230 | 230 |
| 231 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n, |
| 232 const MB_MODE_INFO *mbmi) { |
| 233 return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]] |
| 234 [mode_lf_lut[mbmi->mode]]; |
| 235 } |
| 236 |
231 void vp9_loop_filter_init(VP9_COMMON *cm) { | 237 void vp9_loop_filter_init(VP9_COMMON *cm) { |
232 loop_filter_info_n *lfi = &cm->lf_info; | 238 loop_filter_info_n *lfi = &cm->lf_info; |
233 struct loopfilter *lf = &cm->lf; | 239 struct loopfilter *lf = &cm->lf; |
234 int lvl; | 240 int lvl; |
235 | 241 |
236 // init limits for given sharpness | 242 // init limits for given sharpness |
237 update_sharpness(lfi, lf->sharpness_level); | 243 update_sharpness(lfi, lf->sharpness_level); |
238 lf->last_sharpness_level = lf->sharpness_level; | 244 lf->last_sharpness_level = lf->sharpness_level; |
239 | 245 |
240 // init hev threshold const vectors | 246 // init hev threshold const vectors |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 // filters for the specific mi we are looking at. It uses information | 492 // filters for the specific mi we are looking at. It uses information |
487 // including the block_size_type (32x16, 32x32, etc), the transform size, | 493 // including the block_size_type (32x16, 32x32, etc), the transform size, |
488 // whether there were any coefficients encoded, and the loop filter strength | 494 // whether there were any coefficients encoded, and the loop filter strength |
489 // block we are currently looking at. Shift is used to position the | 495 // block we are currently looking at. Shift is used to position the |
490 // 1's we produce. | 496 // 1's we produce. |
491 // TODO(JBB) Need another function for different resolution color.. | 497 // TODO(JBB) Need another function for different resolution color.. |
492 static void build_masks(const loop_filter_info_n *const lfi_n, | 498 static void build_masks(const loop_filter_info_n *const lfi_n, |
493 const MODE_INFO *mi, const int shift_y, | 499 const MODE_INFO *mi, const int shift_y, |
494 const int shift_uv, | 500 const int shift_uv, |
495 LOOP_FILTER_MASK *lfm) { | 501 LOOP_FILTER_MASK *lfm) { |
496 const BLOCK_SIZE block_size = mi->mbmi.sb_type; | 502 const MB_MODE_INFO *mbmi = &mi->mbmi; |
497 const TX_SIZE tx_size_y = mi->mbmi.tx_size; | 503 const BLOCK_SIZE block_size = mbmi->sb_type; |
498 const TX_SIZE tx_size_uv = get_uv_tx_size(&mi->mbmi); | 504 const TX_SIZE tx_size_y = mbmi->tx_size; |
499 const int skip = mi->mbmi.skip; | 505 const TX_SIZE tx_size_uv = get_uv_tx_size(mbmi); |
500 const int seg = mi->mbmi.segment_id; | 506 const int filter_level = get_filter_level(lfi_n, mbmi); |
501 const int ref = mi->mbmi.ref_frame[0]; | 507 uint64_t *const left_y = &lfm->left_y[tx_size_y]; |
502 const int filter_level = lfi_n->lvl[seg][ref][mode_lf_lut[mi->mbmi.mode]]; | 508 uint64_t *const above_y = &lfm->above_y[tx_size_y]; |
503 uint64_t *left_y = &lfm->left_y[tx_size_y]; | 509 uint64_t *const int_4x4_y = &lfm->int_4x4_y; |
504 uint64_t *above_y = &lfm->above_y[tx_size_y]; | 510 uint16_t *const left_uv = &lfm->left_uv[tx_size_uv]; |
505 uint64_t *int_4x4_y = &lfm->int_4x4_y; | 511 uint16_t *const above_uv = &lfm->above_uv[tx_size_uv]; |
506 uint16_t *left_uv = &lfm->left_uv[tx_size_uv]; | 512 uint16_t *const int_4x4_uv = &lfm->int_4x4_uv; |
507 uint16_t *above_uv = &lfm->above_uv[tx_size_uv]; | |
508 uint16_t *int_4x4_uv = &lfm->int_4x4_uv; | |
509 int i; | 513 int i; |
510 int w = num_8x8_blocks_wide_lookup[block_size]; | |
511 int h = num_8x8_blocks_high_lookup[block_size]; | |
512 | 514 |
513 // If filter level is 0 we don't loop filter. | 515 // If filter level is 0 we don't loop filter. |
514 if (!filter_level) { | 516 if (!filter_level) { |
515 return; | 517 return; |
516 } else { | 518 } else { |
| 519 const int w = num_8x8_blocks_wide_lookup[block_size]; |
| 520 const int h = num_8x8_blocks_high_lookup[block_size]; |
517 int index = shift_y; | 521 int index = shift_y; |
518 for (i = 0; i < h; i++) { | 522 for (i = 0; i < h; i++) { |
519 vpx_memset(&lfm->lfl_y[index], filter_level, w); | 523 vpx_memset(&lfm->lfl_y[index], filter_level, w); |
520 index += 8; | 524 index += 8; |
521 } | 525 } |
522 } | 526 } |
523 | 527 |
524 // These set 1 in the current block size for the block size edges. | 528 // These set 1 in the current block size for the block size edges. |
525 // For instance if the block size is 32x16, we'll set : | 529 // For instance if the block size is 32x16, we'll set : |
526 // above = 1111 | 530 // above = 1111 |
527 // 0000 | 531 // 0000 |
528 // and | 532 // and |
529 // left = 1000 | 533 // left = 1000 |
530 // = 1000 | 534 // = 1000 |
531 // NOTE : In this example the low bit is left most ( 1000 ) is stored as | 535 // NOTE : In this example the low bit is left most ( 1000 ) is stored as |
532 // 1, not 8... | 536 // 1, not 8... |
533 // | 537 // |
534 // U and v set things on a 16 bit scale. | 538 // U and v set things on a 16 bit scale. |
535 // | 539 // |
536 *above_y |= above_prediction_mask[block_size] << shift_y; | 540 *above_y |= above_prediction_mask[block_size] << shift_y; |
537 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv; | 541 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv; |
538 *left_y |= left_prediction_mask[block_size] << shift_y; | 542 *left_y |= left_prediction_mask[block_size] << shift_y; |
539 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv; | 543 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv; |
540 | 544 |
541 // If the block has no coefficients and is not intra we skip applying | 545 // If the block has no coefficients and is not intra we skip applying |
542 // the loop filter on block edges. | 546 // the loop filter on block edges. |
543 if (skip && ref > INTRA_FRAME) | 547 if (mbmi->skip && is_inter_block(mbmi)) |
544 return; | 548 return; |
545 | 549 |
546 // Here we are adding a mask for the transform size. The transform | 550 // Here we are adding a mask for the transform size. The transform |
547 // size mask is set to be correct for a 64x64 prediction block size. We | 551 // size mask is set to be correct for a 64x64 prediction block size. We |
548 // mask to match the size of the block we are working on and then shift it | 552 // mask to match the size of the block we are working on and then shift it |
549 // into place.. | 553 // into place.. |
550 *above_y |= (size_mask[block_size] & | 554 *above_y |= (size_mask[block_size] & |
551 above_64x64_txform_mask[tx_size_y]) << shift_y; | 555 above_64x64_txform_mask[tx_size_y]) << shift_y; |
552 *above_uv |= (size_mask_uv[block_size] & | 556 *above_uv |= (size_mask_uv[block_size] & |
553 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; | 557 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; |
554 | 558 |
555 *left_y |= (size_mask[block_size] & | 559 *left_y |= (size_mask[block_size] & |
556 left_64x64_txform_mask[tx_size_y]) << shift_y; | 560 left_64x64_txform_mask[tx_size_y]) << shift_y; |
557 *left_uv |= (size_mask_uv[block_size] & | 561 *left_uv |= (size_mask_uv[block_size] & |
558 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; | 562 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; |
559 | 563 |
560 // Here we are trying to determine what to do with the internal 4x4 block | 564 // Here we are trying to determine what to do with the internal 4x4 block |
561 // boundaries. These differ from the 4x4 boundaries on the outside edge of | 565 // boundaries. These differ from the 4x4 boundaries on the outside edge of |
562 // an 8x8 in that the internal ones can be skipped and don't depend on | 566 // an 8x8 in that the internal ones can be skipped and don't depend on |
563 // the prediction block size. | 567 // the prediction block size. |
564 if (tx_size_y == TX_4X4) { | 568 if (tx_size_y == TX_4X4) |
565 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; | 569 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; |
566 } | 570 |
567 if (tx_size_uv == TX_4X4) { | 571 if (tx_size_uv == TX_4X4) |
568 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; | 572 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; |
569 } | |
570 } | 573 } |
571 | 574 |
572 // This function does the same thing as the one above with the exception that | 575 // This function does the same thing as the one above with the exception that |
573 // it only affects the y masks. It exists because for blocks < 16x16 in size, | 576 // it only affects the y masks. It exists because for blocks < 16x16 in size, |
574 // we only update u and v masks on the first block. | 577 // we only update u and v masks on the first block. |
575 static void build_y_mask(const loop_filter_info_n *const lfi_n, | 578 static void build_y_mask(const loop_filter_info_n *const lfi_n, |
576 const MODE_INFO *mi, const int shift_y, | 579 const MODE_INFO *mi, const int shift_y, |
577 LOOP_FILTER_MASK *lfm) { | 580 LOOP_FILTER_MASK *lfm) { |
578 const BLOCK_SIZE block_size = mi->mbmi.sb_type; | 581 const MB_MODE_INFO *mbmi = &mi->mbmi; |
579 const TX_SIZE tx_size_y = mi->mbmi.tx_size; | 582 const BLOCK_SIZE block_size = mbmi->sb_type; |
580 const int skip = mi->mbmi.skip; | 583 const TX_SIZE tx_size_y = mbmi->tx_size; |
581 const int seg = mi->mbmi.segment_id; | 584 const int filter_level = get_filter_level(lfi_n, mbmi); |
582 const int ref = mi->mbmi.ref_frame[0]; | 585 uint64_t *const left_y = &lfm->left_y[tx_size_y]; |
583 const int filter_level = lfi_n->lvl[seg][ref][mode_lf_lut[mi->mbmi.mode]]; | 586 uint64_t *const above_y = &lfm->above_y[tx_size_y]; |
584 uint64_t *left_y = &lfm->left_y[tx_size_y]; | 587 uint64_t *const int_4x4_y = &lfm->int_4x4_y; |
585 uint64_t *above_y = &lfm->above_y[tx_size_y]; | |
586 uint64_t *int_4x4_y = &lfm->int_4x4_y; | |
587 int i; | 588 int i; |
588 int w = num_8x8_blocks_wide_lookup[block_size]; | |
589 int h = num_8x8_blocks_high_lookup[block_size]; | |
590 | 589 |
591 if (!filter_level) { | 590 if (!filter_level) { |
592 return; | 591 return; |
593 } else { | 592 } else { |
| 593 const int w = num_8x8_blocks_wide_lookup[block_size]; |
| 594 const int h = num_8x8_blocks_high_lookup[block_size]; |
594 int index = shift_y; | 595 int index = shift_y; |
595 for (i = 0; i < h; i++) { | 596 for (i = 0; i < h; i++) { |
596 vpx_memset(&lfm->lfl_y[index], filter_level, w); | 597 vpx_memset(&lfm->lfl_y[index], filter_level, w); |
597 index += 8; | 598 index += 8; |
598 } | 599 } |
599 } | 600 } |
600 | 601 |
601 *above_y |= above_prediction_mask[block_size] << shift_y; | 602 *above_y |= above_prediction_mask[block_size] << shift_y; |
602 *left_y |= left_prediction_mask[block_size] << shift_y; | 603 *left_y |= left_prediction_mask[block_size] << shift_y; |
603 | 604 |
604 if (skip && ref > INTRA_FRAME) | 605 if (mbmi->skip && is_inter_block(mbmi)) |
605 return; | 606 return; |
606 | 607 |
607 *above_y |= (size_mask[block_size] & | 608 *above_y |= (size_mask[block_size] & |
608 above_64x64_txform_mask[tx_size_y]) << shift_y; | 609 above_64x64_txform_mask[tx_size_y]) << shift_y; |
609 | 610 |
610 *left_y |= (size_mask[block_size] & | 611 *left_y |= (size_mask[block_size] & |
611 left_64x64_txform_mask[tx_size_y]) << shift_y; | 612 left_64x64_txform_mask[tx_size_y]) << shift_y; |
612 | 613 |
613 if (tx_size_y == TX_4X4) { | 614 if (tx_size_y == TX_4X4) |
614 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; | 615 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; |
615 } | |
616 } | 616 } |
617 | 617 |
618 // This function sets up the bit masks for the entire 64x64 region represented | 618 // This function sets up the bit masks for the entire 64x64 region represented |
619 // by mi_row, mi_col. | 619 // by mi_row, mi_col. |
620 // TODO(JBB): This function only works for yv12. | 620 // TODO(JBB): This function only works for yv12. |
621 void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, | 621 void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, |
622 MODE_INFO **mi_8x8, const int mode_info_stride, | 622 MODE_INFO **mi_8x8, const int mode_info_stride, |
623 LOOP_FILTER_MASK *lfm) { | 623 LOOP_FILTER_MASK *lfm) { |
624 int idx_32, idx_16, idx_8; | 624 int idx_32, idx_16, idx_8; |
625 const loop_filter_info_n *const lfi_n = &cm->lf_info; | 625 const loop_filter_info_n *const lfi_n = &cm->lf_info; |
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
861 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8])); | 861 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8])); |
862 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4])); | 862 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4])); |
863 assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4])); | 863 assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4])); |
864 assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16])); | 864 assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16])); |
865 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8])); | 865 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8])); |
866 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4])); | 866 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4])); |
867 assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4])); | 867 assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4])); |
868 assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16])); | 868 assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16])); |
869 } | 869 } |
870 | 870 |
871 static uint8_t build_lfi(const loop_filter_info_n *lfi_n, | |
872 const MB_MODE_INFO *mbmi) { | |
873 const int seg = mbmi->segment_id; | |
874 const int ref = mbmi->ref_frame[0]; | |
875 return lfi_n->lvl[seg][ref][mode_lf_lut[mbmi->mode]]; | |
876 } | |
877 | |
878 static void filter_selectively_vert(uint8_t *s, int pitch, | 871 static void filter_selectively_vert(uint8_t *s, int pitch, |
879 unsigned int mask_16x16, | 872 unsigned int mask_16x16, |
880 unsigned int mask_8x8, | 873 unsigned int mask_8x8, |
881 unsigned int mask_4x4, | 874 unsigned int mask_4x4, |
882 unsigned int mask_4x4_int, | 875 unsigned int mask_4x4_int, |
883 const loop_filter_info_n *lfi_n, | 876 const loop_filter_info_n *lfi_n, |
884 const uint8_t *lfl) { | 877 const uint8_t *lfl) { |
885 unsigned int mask; | 878 unsigned int mask; |
886 | 879 |
887 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; | 880 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; |
(...skipping 21 matching lines...) Expand all Loading... |
909 } | 902 } |
910 | 903 |
911 static void filter_block_plane_non420(VP9_COMMON *cm, | 904 static void filter_block_plane_non420(VP9_COMMON *cm, |
912 struct macroblockd_plane *plane, | 905 struct macroblockd_plane *plane, |
913 MODE_INFO **mi_8x8, | 906 MODE_INFO **mi_8x8, |
914 int mi_row, int mi_col) { | 907 int mi_row, int mi_col) { |
915 const int ss_x = plane->subsampling_x; | 908 const int ss_x = plane->subsampling_x; |
916 const int ss_y = plane->subsampling_y; | 909 const int ss_y = plane->subsampling_y; |
917 const int row_step = 1 << ss_x; | 910 const int row_step = 1 << ss_x; |
918 const int col_step = 1 << ss_y; | 911 const int col_step = 1 << ss_y; |
919 const int row_step_stride = cm->mode_info_stride * row_step; | 912 const int row_step_stride = cm->mi_stride * row_step; |
920 struct buf_2d *const dst = &plane->dst; | 913 struct buf_2d *const dst = &plane->dst; |
921 uint8_t* const dst0 = dst->buf; | 914 uint8_t* const dst0 = dst->buf; |
922 unsigned int mask_16x16[MI_BLOCK_SIZE] = {0}; | 915 unsigned int mask_16x16[MI_BLOCK_SIZE] = {0}; |
923 unsigned int mask_8x8[MI_BLOCK_SIZE] = {0}; | 916 unsigned int mask_8x8[MI_BLOCK_SIZE] = {0}; |
924 unsigned int mask_4x4[MI_BLOCK_SIZE] = {0}; | 917 unsigned int mask_4x4[MI_BLOCK_SIZE] = {0}; |
925 unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0}; | 918 unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0}; |
926 uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE]; | 919 uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE]; |
927 int r, c; | 920 int r, c; |
928 | 921 |
929 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) { | 922 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) { |
(...skipping 16 matching lines...) Expand all Loading... |
946 !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1; | 939 !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1; |
947 const int skip_this_r = skip_this && !block_edge_above; | 940 const int skip_this_r = skip_this && !block_edge_above; |
948 const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV) | 941 const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV) |
949 ? get_uv_tx_size(&mi[0].mbmi) | 942 ? get_uv_tx_size(&mi[0].mbmi) |
950 : mi[0].mbmi.tx_size; | 943 : mi[0].mbmi.tx_size; |
951 const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1; | 944 const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1; |
952 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; | 945 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; |
953 | 946 |
954 // Filter level can vary per MI | 947 // Filter level can vary per MI |
955 if (!(lfl[(r << 3) + (c >> ss_x)] = | 948 if (!(lfl[(r << 3) + (c >> ss_x)] = |
956 build_lfi(&cm->lf_info, &mi[0].mbmi))) | 949 get_filter_level(&cm->lf_info, &mi[0].mbmi))) |
957 continue; | 950 continue; |
958 | 951 |
959 // Build masks based on the transform size of each block | 952 // Build masks based on the transform size of each block |
960 if (tx_size == TX_32X32) { | 953 if (tx_size == TX_32X32) { |
961 if (!skip_this_c && ((c >> ss_x) & 3) == 0) { | 954 if (!skip_this_c && ((c >> ss_x) & 3) == 0) { |
962 if (!skip_border_4x4_c) | 955 if (!skip_border_4x4_c) |
963 mask_16x16_c |= 1 << (c >> ss_x); | 956 mask_16x16_c |= 1 << (c >> ss_x); |
964 else | 957 else |
965 mask_8x8_c |= 1 << (c >> ss_x); | 958 mask_8x8_c |= 1 << (c >> ss_x); |
966 } | 959 } |
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1201 void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, | 1194 void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, |
1202 VP9_COMMON *cm, MACROBLOCKD *xd, | 1195 VP9_COMMON *cm, MACROBLOCKD *xd, |
1203 int start, int stop, int y_only) { | 1196 int start, int stop, int y_only) { |
1204 const int num_planes = y_only ? 1 : MAX_MB_PLANE; | 1197 const int num_planes = y_only ? 1 : MAX_MB_PLANE; |
1205 int mi_row, mi_col; | 1198 int mi_row, mi_col; |
1206 LOOP_FILTER_MASK lfm; | 1199 LOOP_FILTER_MASK lfm; |
1207 int use_420 = y_only || (xd->plane[1].subsampling_y == 1 && | 1200 int use_420 = y_only || (xd->plane[1].subsampling_y == 1 && |
1208 xd->plane[1].subsampling_x == 1); | 1201 xd->plane[1].subsampling_x == 1); |
1209 | 1202 |
1210 for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { | 1203 for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { |
1211 MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride; | 1204 MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mi_stride; |
1212 | 1205 |
1213 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { | 1206 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { |
1214 int plane; | 1207 int plane; |
1215 | 1208 |
1216 setup_dst_planes(xd, frame_buffer, mi_row, mi_col); | 1209 vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col); |
1217 | 1210 |
1218 // TODO(JBB): Make setup_mask work for non 420. | 1211 // TODO(JBB): Make setup_mask work for non 420. |
1219 if (use_420) | 1212 if (use_420) |
1220 vp9_setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, | 1213 vp9_setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mi_stride, |
1221 cm->mode_info_stride, &lfm); | 1214 &lfm); |
1222 | 1215 |
1223 for (plane = 0; plane < num_planes; ++plane) { | 1216 for (plane = 0; plane < num_planes; ++plane) { |
1224 if (use_420) | 1217 if (use_420) |
1225 vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm); | 1218 vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm); |
1226 else | 1219 else |
1227 filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col, | 1220 filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col, |
1228 mi_row, mi_col); | 1221 mi_row, mi_col); |
1229 } | 1222 } |
1230 } | 1223 } |
1231 } | 1224 } |
(...skipping 18 matching lines...) Expand all Loading... |
1250 y_only); | 1243 y_only); |
1251 } | 1244 } |
1252 | 1245 |
1253 int vp9_loop_filter_worker(void *arg1, void *arg2) { | 1246 int vp9_loop_filter_worker(void *arg1, void *arg2) { |
1254 LFWorkerData *const lf_data = (LFWorkerData*)arg1; | 1247 LFWorkerData *const lf_data = (LFWorkerData*)arg1; |
1255 (void)arg2; | 1248 (void)arg2; |
1256 vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, &lf_data->xd, | 1249 vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, &lf_data->xd, |
1257 lf_data->start, lf_data->stop, lf_data->y_only); | 1250 lf_data->start, lf_data->stop, lf_data->y_only); |
1258 return 1; | 1251 return 1; |
1259 } | 1252 } |
OLD | NEW |