Index: patched-ffmpeg-mt/libavcodec/h264_loopfilter.c |
=================================================================== |
--- patched-ffmpeg-mt/libavcodec/h264_loopfilter.c (revision 41250) |
+++ patched-ffmpeg-mt/libavcodec/h264_loopfilter.c (working copy) |
@@ -25,6 +25,7 @@ |
* @author Michael Niedermayer <michaelni@gmx.at> |
*/ |
+#include "libavutil/intreadwrite.h" |
#include "internal.h" |
#include "dsputil.h" |
#include "avcodec.h" |
@@ -367,12 +368,14 @@ |
filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); |
return; |
} else { |
- DECLARE_ALIGNED_8(int16_t, bS)[2][4][4]; |
- uint64_t (*bSv)[4] = (uint64_t(*)[4])bS; |
+ LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); |
int edges; |
if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { |
edges = 4; |
- bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL; |
+ AV_WN64A(bS[0][0], 0x0002000200020002ULL); |
+ AV_WN64A(bS[0][2], 0x0002000200020002ULL); |
+ AV_WN64A(bS[1][0], 0x0002000200020002ULL); |
+ AV_WN64A(bS[1][2], 0x0002000200020002ULL); |
} else { |
int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; |
int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; |
@@ -382,12 +385,12 @@ |
h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE); |
} |
if( IS_INTRA(left_type) ) |
- bSv[0][0] = 0x0004000400040004ULL; |
+ AV_WN64A(bS[0][0], 0x0004000400040004ULL); |
if( IS_INTRA(h->top_type) ) |
- bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL; |
+ AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL); |
#define FILTER(hv,dir,edge)\ |
- if(bSv[dir][edge]) {\ |
+ if(AV_RN64A(bS[dir][edge])) { \ |
filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\ |
if(!(edge&1)) {\ |
filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ |
@@ -474,16 +477,23 @@ |
int j; |
for(j=0; j<2; j++, mbn_xy += s->mb_stride){ |
- DECLARE_ALIGNED_8(int16_t, bS)[4]; |
+ DECLARE_ALIGNED(8, int16_t, bS)[4]; |
int qp; |
if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { |
- *(uint64_t*)bS= 0x0003000300030003ULL; |
+ AV_WN64A(bS, 0x0003000300030003ULL); |
} else { |
- const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8; //FIXME 8x8dct? |
+ if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ |
+ bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]); |
+ bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+1]); |
+ bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+2]); |
+ bS[3]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+3]); |
+ }else{ |
+ const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8; |
int i; |
for( i = 0; i < 4; i++ ) { |
bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]); |
} |
+ } |
} |
// Do not use s->qscale as luma quantizer because it has not the same |
// value in IPCM macroblocks. |
@@ -497,21 +507,21 @@ |
( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); |
} |
}else{ |
- DECLARE_ALIGNED_8(int16_t, bS)[4]; |
+ DECLARE_ALIGNED(8, int16_t, bS)[4]; |
int qp; |
if( IS_INTRA(mb_type|mbm_type)) { |
- *(uint64_t*)bS= 0x0003000300030003ULL; |
+ AV_WN64A(bS, 0x0003000300030003ULL); |
if ( (!IS_INTERLACED(mb_type|mbm_type)) |
|| ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) |
) |
- *(uint64_t*)bS= 0x0004000400040004ULL; |
+ AV_WN64A(bS, 0x0004000400040004ULL); |
} else { |
- int i, l; |
+ int i; |
int mv_done; |
if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { |
- *(uint64_t*)bS= 0x0001000100010001ULL; |
+ AV_WN64A(bS, 0x0001000100010001ULL); |
mv_done = 1; |
} |
else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { |
@@ -574,20 +584,20 @@ |
/* Calculate bS */ |
for( edge = 1; edge < edges; edge++ ) { |
- DECLARE_ALIGNED_8(int16_t, bS)[4]; |
+ DECLARE_ALIGNED(8, int16_t, bS)[4]; |
int qp; |
if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) |
continue; |
if( IS_INTRA(mb_type)) { |
- *(uint64_t*)bS= 0x0003000300030003ULL; |
+ AV_WN64A(bS, 0x0003000300030003ULL); |
} else { |
- int i, l; |
+ int i; |
int mv_done; |
if( edge & mask_edge ) { |
- *(uint64_t*)bS= 0; |
+ AV_ZERO64(bS); |
mv_done = 1; |
} |
else if( mask_par0 ) { |
@@ -650,7 +660,6 @@ |
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; |
int first_vertical_edge_done = 0; |
av_unused int dir; |
- int list; |
if (FRAME_MBAFF |
// and current and left pair do not have the same interlaced type |
@@ -660,7 +669,7 @@ |
/* First vertical edge is different in MBAFF frames |
* There are 8 different bS to compute and 2 different Qp |
*/ |
- DECLARE_ALIGNED_8(int16_t, bS)[8]; |
+ DECLARE_ALIGNED(8, int16_t, bS)[8]; |
int qp[2]; |
int bqp[2]; |
int rqp[2]; |
@@ -668,10 +677,10 @@ |
int i; |
first_vertical_edge_done = 1; |
- if( IS_INTRA(mb_type) ) |
- *(uint64_t*)&bS[0]= |
- *(uint64_t*)&bS[4]= 0x0004000400040004ULL; |
- else { |
+ if( IS_INTRA(mb_type) ) { |
+ AV_WN64A(&bS[0], 0x0004000400040004ULL); |
+ AV_WN64A(&bS[4], 0x0004000400040004ULL); |
+ } else { |
static const uint8_t offset[2][2][8]={ |
{ |
{7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, |