OLD | NEW |
1 /* | 1 /* |
2 * H.26L/H.264/AVC/JVT/14496-10/... loop filter | 2 * H.26L/H.264/AVC/JVT/14496-10/... loop filter |
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> | 3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> |
4 * | 4 * |
5 * This file is part of FFmpeg. | 5 * This file is part of FFmpeg. |
6 * | 6 * |
7 * FFmpeg is free software; you can redistribute it and/or | 7 * FFmpeg is free software; you can redistribute it and/or |
8 * modify it under the terms of the GNU Lesser General Public | 8 * modify it under the terms of the GNU Lesser General Public |
9 * License as published by the Free Software Foundation; either | 9 * License as published by the Free Software Foundation; either |
10 * version 2.1 of the License, or (at your option) any later version. | 10 * version 2.1 of the License, or (at your option) any later version. |
11 * | 11 * |
12 * FFmpeg is distributed in the hope that it will be useful, | 12 * FFmpeg is distributed in the hope that it will be useful, |
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 * Lesser General Public License for more details. | 15 * Lesser General Public License for more details. |
16 * | 16 * |
17 * You should have received a copy of the GNU Lesser General Public | 17 * You should have received a copy of the GNU Lesser General Public |
18 * License along with FFmpeg; if not, write to the Free Software | 18 * License along with FFmpeg; if not, write to the Free Software |
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 */ | 20 */ |
21 | 21 |
22 /** | 22 /** |
23 * @file libavcodec/h264_loopfilter.c | 23 * @file libavcodec/h264_loopfilter.c |
24 * H.264 / AVC / MPEG4 part10 loop filter. | 24 * H.264 / AVC / MPEG4 part10 loop filter. |
25 * @author Michael Niedermayer <michaelni@gmx.at> | 25 * @author Michael Niedermayer <michaelni@gmx.at> |
26 */ | 26 */ |
27 | 27 |
| 28 #include "libavutil/intreadwrite.h" |
28 #include "internal.h" | 29 #include "internal.h" |
29 #include "dsputil.h" | 30 #include "dsputil.h" |
30 #include "avcodec.h" | 31 #include "avcodec.h" |
31 #include "mpegvideo.h" | 32 #include "mpegvideo.h" |
32 #include "h264.h" | 33 #include "h264.h" |
33 #include "mathops.h" | 34 #include "mathops.h" |
34 #include "rectangle.h" | 35 #include "rectangle.h" |
35 | 36 |
36 //#undef NDEBUG | 37 //#undef NDEBUG |
37 #include <assert.h> | 38 #include <assert.h> |
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
360 filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h); | 361 filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h); |
361 } | 362 } |
362 filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h); | 363 filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h); |
363 filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h); | 364 filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h); |
364 filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); | 365 filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); |
365 filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h); | 366 filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h); |
366 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); | 367 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); |
367 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); | 368 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); |
368 return; | 369 return; |
369 } else { | 370 } else { |
370 DECLARE_ALIGNED_8(int16_t, bS)[2][4][4]; | 371 LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); |
371 uint64_t (*bSv)[4] = (uint64_t(*)[4])bS; | |
372 int edges; | 372 int edges; |
373 if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { | 373 if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { |
374 edges = 4; | 374 edges = 4; |
375 bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002UL
L; | 375 AV_WN64A(bS[0][0], 0x0002000200020002ULL); |
| 376 AV_WN64A(bS[0][2], 0x0002000200020002ULL); |
| 377 AV_WN64A(bS[1][0], 0x0002000200020002ULL); |
| 378 AV_WN64A(bS[1][2], 0x0002000200020002ULL); |
376 } else { | 379 } else { |
377 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type
& (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; | 380 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type
& (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; |
378 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_
type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB
_TYPE_8x16)) ? 3 : 0; | 381 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_
type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB
_TYPE_8x16)) ? 3 : 0; |
379 int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; | 382 int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; |
380 edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_
16x16) && !(h->cbp & 15) ? 1 : 4; | 383 edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_
16x16) && !(h->cbp & 15) ? 1 : 4; |
381 s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->re
f_cache, h->mv_cache, | 384 s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->re
f_cache, h->mv_cache, |
382 h->list_count==2, edges, step, mas
k_edge0, mask_edge1, FIELD_PICTURE); | 385 h->list_count==2, edges, step, mas
k_edge0, mask_edge1, FIELD_PICTURE); |
383 } | 386 } |
384 if( IS_INTRA(left_type) ) | 387 if( IS_INTRA(left_type) ) |
385 bSv[0][0] = 0x0004000400040004ULL; | 388 AV_WN64A(bS[0][0], 0x0004000400040004ULL); |
386 if( IS_INTRA(h->top_type) ) | 389 if( IS_INTRA(h->top_type) ) |
387 bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x00040004000400
04ULL; | 390 AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000
400040004ULL); |
388 | 391 |
389 #define FILTER(hv,dir,edge)\ | 392 #define FILTER(hv,dir,edge)\ |
390 if(bSv[dir][edge]) {\ | 393 if(AV_RN64A(bS[dir][edge])) { \ |
391 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[di
r][edge], edge ? qp : qp##dir, h );\ | 394 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[di
r][edge], edge ? qp : qp##dir, h );\ |
392 if(!(edge&1)) {\ | 395 if(!(edge&1)) {\ |
393 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlines
ize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ | 396 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlines
ize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ |
394 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlines
ize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ | 397 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlines
ize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ |
395 }\ | 398 }\ |
396 } | 399 } |
397 if(left_type) | 400 if(left_type) |
398 FILTER(v,0,0); | 401 FILTER(v,0,0); |
399 if( edges == 1 ) { | 402 if( edges == 1 ) { |
400 FILTER(h,1,0); | 403 FILTER(h,1,0); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 // This is a special case in the norm where the filtering must | 470 // This is a special case in the norm where the filtering must |
468 // be done twice (one each of the field) even if we are in a | 471 // be done twice (one each of the field) even if we are in a |
469 // frame macroblock. | 472 // frame macroblock. |
470 // | 473 // |
471 unsigned int tmp_linesize = 2 * linesize; | 474 unsigned int tmp_linesize = 2 * linesize; |
472 unsigned int tmp_uvlinesize = 2 * uvlinesize; | 475 unsigned int tmp_uvlinesize = 2 * uvlinesize; |
473 int mbn_xy = mb_xy - 2 * s->mb_stride; | 476 int mbn_xy = mb_xy - 2 * s->mb_stride; |
474 int j; | 477 int j; |
475 | 478 |
476 for(j=0; j<2; j++, mbn_xy += s->mb_stride){ | 479 for(j=0; j<2; j++, mbn_xy += s->mb_stride){ |
477 DECLARE_ALIGNED_8(int16_t, bS)[4]; | 480 DECLARE_ALIGNED(8, int16_t, bS)[4]; |
478 int qp; | 481 int qp; |
479 if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { | 482 if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { |
480 *(uint64_t*)bS= 0x0003000300030003ULL; | 483 AV_WN64A(bS, 0x0003000300030003ULL); |
481 } else { | 484 } else { |
482 const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8;
//FIXME 8x8dct? | 485 if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ |
| 486 bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_
cache[scan8[0]+0]); |
| 487 bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_
cache[scan8[0]+1]); |
| 488 bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_
cache[scan8[0]+2]); |
| 489 bS[3]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_
cache[scan8[0]+3]); |
| 490 }else{ |
| 491 const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8; |
483 int i; | 492 int i; |
484 for( i = 0; i < 4; i++ ) { | 493 for( i = 0; i < 4; i++ ) { |
485 bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn
_nnz[i]); | 494 bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn
_nnz[i]); |
486 } | 495 } |
| 496 } |
487 } | 497 } |
488 // Do not use s->qscale as luma quantizer because it has not the
same | 498 // Do not use s->qscale as luma quantizer because it has not the
same |
489 // value in IPCM macroblocks. | 499 // value in IPCM macroblocks. |
490 qp = ( s->current_picture.qscale_table[mb_xy] + s->current_pictu
re.qscale_table[mbn_xy] + 1 ) >> 1; | 500 qp = ( s->current_picture.qscale_table[mb_xy] + s->current_pictu
re.qscale_table[mbn_xy] + 1 ) >> 1; |
491 tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d
uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); | 501 tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d
uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); |
492 { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d",
i, bS[i]); tprintf(s->avctx, "\n"); } | 502 { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d",
i, bS[i]); tprintf(s->avctx, "\n"); } |
493 filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h ); | 503 filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h ); |
494 filter_mb_edgech( &img_cb[j*uvlinesize], tmp_uvlinesize, bS, | 504 filter_mb_edgech( &img_cb[j*uvlinesize], tmp_uvlinesize, bS, |
495 ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->curr
ent_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); | 505 ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->curr
ent_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); |
496 filter_mb_edgech( &img_cr[j*uvlinesize], tmp_uvlinesize, bS, | 506 filter_mb_edgech( &img_cr[j*uvlinesize], tmp_uvlinesize, bS, |
497 ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->curr
ent_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); | 507 ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->curr
ent_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); |
498 } | 508 } |
499 }else{ | 509 }else{ |
500 DECLARE_ALIGNED_8(int16_t, bS)[4]; | 510 DECLARE_ALIGNED(8, int16_t, bS)[4]; |
501 int qp; | 511 int qp; |
502 | 512 |
503 if( IS_INTRA(mb_type|mbm_type)) { | 513 if( IS_INTRA(mb_type|mbm_type)) { |
504 *(uint64_t*)bS= 0x0003000300030003ULL; | 514 AV_WN64A(bS, 0x0003000300030003ULL); |
505 if ( (!IS_INTERLACED(mb_type|mbm_type)) | 515 if ( (!IS_INTERLACED(mb_type|mbm_type)) |
506 || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) &&
(dir == 0)) | 516 || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) &&
(dir == 0)) |
507 ) | 517 ) |
508 *(uint64_t*)bS= 0x0004000400040004ULL; | 518 AV_WN64A(bS, 0x0004000400040004ULL); |
509 } else { | 519 } else { |
510 int i, l; | 520 int i; |
511 int mv_done; | 521 int mv_done; |
512 | 522 |
513 if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { | 523 if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { |
514 *(uint64_t*)bS= 0x0001000100010001ULL; | 524 AV_WN64A(bS, 0x0001000100010001ULL); |
515 mv_done = 1; | 525 mv_done = 1; |
516 } | 526 } |
517 else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x1
6 >> dir)))) ) { | 527 else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x1
6 >> dir)))) ) { |
518 int b_idx= 8 + 4; | 528 int b_idx= 8 + 4; |
519 int bn_idx= b_idx - (dir ? 8:1); | 529 int bn_idx= b_idx - (dir ? 8:1); |
520 | 530 |
521 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, m
vy_limit); | 531 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, m
vy_limit); |
522 mv_done = 1; | 532 mv_done = 1; |
523 } | 533 } |
524 else | 534 else |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
567 qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->curr
ent_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; | 577 qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->curr
ent_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; |
568 filter_mb_edgech( &img_cr[0], uvlinesize, bS, qp, h); | 578 filter_mb_edgech( &img_cr[0], uvlinesize, bS, qp, h); |
569 } | 579 } |
570 } | 580 } |
571 } | 581 } |
572 } | 582 } |
573 } | 583 } |
574 | 584 |
575 /* Calculate bS */ | 585 /* Calculate bS */ |
576 for( edge = 1; edge < edges; edge++ ) { | 586 for( edge = 1; edge < edges; edge++ ) { |
577 DECLARE_ALIGNED_8(int16_t, bS)[4]; | 587 DECLARE_ALIGNED(8, int16_t, bS)[4]; |
578 int qp; | 588 int qp; |
579 | 589 |
580 if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) | 590 if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) |
581 continue; | 591 continue; |
582 | 592 |
583 if( IS_INTRA(mb_type)) { | 593 if( IS_INTRA(mb_type)) { |
584 *(uint64_t*)bS= 0x0003000300030003ULL; | 594 AV_WN64A(bS, 0x0003000300030003ULL); |
585 } else { | 595 } else { |
586 int i, l; | 596 int i; |
587 int mv_done; | 597 int mv_done; |
588 | 598 |
589 if( edge & mask_edge ) { | 599 if( edge & mask_edge ) { |
590 *(uint64_t*)bS= 0; | 600 AV_ZERO64(bS); |
591 mv_done = 1; | 601 mv_done = 1; |
592 } | 602 } |
593 else if( mask_par0 ) { | 603 else if( mask_par0 ) { |
594 int b_idx= 8 + 4 + edge * (dir ? 8:1); | 604 int b_idx= 8 + 4 + edge * (dir ? 8:1); |
595 int bn_idx= b_idx - (dir ? 8:1); | 605 int bn_idx= b_idx - (dir ? 8:1); |
596 | 606 |
597 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_l
imit); | 607 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_l
imit); |
598 mv_done = 1; | 608 mv_done = 1; |
599 } | 609 } |
600 else | 610 else |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
643 } | 653 } |
644 } | 654 } |
645 | 655 |
646 void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { | 656 void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { |
647 MpegEncContext * const s = &h->s; | 657 MpegEncContext * const s = &h->s; |
648 const int mb_xy= mb_x + mb_y*s->mb_stride; | 658 const int mb_xy= mb_x + mb_y*s->mb_stride; |
649 const int mb_type = s->current_picture.mb_type[mb_xy]; | 659 const int mb_type = s->current_picture.mb_type[mb_xy]; |
650 const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; | 660 const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; |
651 int first_vertical_edge_done = 0; | 661 int first_vertical_edge_done = 0; |
652 av_unused int dir; | 662 av_unused int dir; |
653 int list; | |
654 | 663 |
655 if (FRAME_MBAFF | 664 if (FRAME_MBAFF |
656 // and current and left pair do not have the same interlaced type | 665 // and current and left pair do not have the same interlaced type |
657 && IS_INTERLACED(mb_type^h->left_type[0]) | 666 && IS_INTERLACED(mb_type^h->left_type[0]) |
658 // and left mb is in available to us | 667 // and left mb is in available to us |
659 && h->left_type[0]) { | 668 && h->left_type[0]) { |
660 /* First vertical edge is different in MBAFF frames | 669 /* First vertical edge is different in MBAFF frames |
661 * There are 8 different bS to compute and 2 different Qp | 670 * There are 8 different bS to compute and 2 different Qp |
662 */ | 671 */ |
663 DECLARE_ALIGNED_8(int16_t, bS)[8]; | 672 DECLARE_ALIGNED(8, int16_t, bS)[8]; |
664 int qp[2]; | 673 int qp[2]; |
665 int bqp[2]; | 674 int bqp[2]; |
666 int rqp[2]; | 675 int rqp[2]; |
667 int mb_qp, mbn0_qp, mbn1_qp; | 676 int mb_qp, mbn0_qp, mbn1_qp; |
668 int i; | 677 int i; |
669 first_vertical_edge_done = 1; | 678 first_vertical_edge_done = 1; |
670 | 679 |
671 if( IS_INTRA(mb_type) ) | 680 if( IS_INTRA(mb_type) ) { |
672 *(uint64_t*)&bS[0]= | 681 AV_WN64A(&bS[0], 0x0004000400040004ULL); |
673 *(uint64_t*)&bS[4]= 0x0004000400040004ULL; | 682 AV_WN64A(&bS[4], 0x0004000400040004ULL); |
674 else { | 683 } else { |
675 static const uint8_t offset[2][2][8]={ | 684 static const uint8_t offset[2][2][8]={ |
676 { | 685 { |
677 {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, | 686 {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, |
678 {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3}, | 687 {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3}, |
679 },{ | 688 },{ |
680 {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, | 689 {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, |
681 {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, | 690 {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, |
682 } | 691 } |
683 }; | 692 }; |
684 const uint8_t *off= offset[MB_FIELD][mb_y&1]; | 693 const uint8_t *off= offset[MB_FIELD][mb_y&1]; |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
734 } | 743 } |
735 | 744 |
736 #if CONFIG_SMALL | 745 #if CONFIG_SMALL |
737 for( dir = 0; dir < 2; dir++ ) | 746 for( dir = 0; dir < 2; dir++ ) |
738 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize
, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, dir); | 747 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize
, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, dir); |
739 #else | 748 #else |
740 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb
_xy, mb_type, mvy_limit, first_vertical_edge_done, 0); | 749 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb
_xy, mb_type, mvy_limit, first_vertical_edge_done, 0); |
741 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb
_xy, mb_type, mvy_limit, 0, 1); | 750 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb
_xy, mb_type, mvy_limit, 0, 1); |
742 #endif | 751 #endif |
743 } | 752 } |
OLD | NEW |