Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: source/patched-ffmpeg-mt/libavcodec/x86/h264dsp_mmx.c

Issue 3384002: ffmpeg source update for sep 09 (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/ffmpeg/
Patch Set: Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt 2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3 * 3 *
4 * This file is part of FFmpeg. 4 * This file is part of FFmpeg.
5 * 5 *
6 * FFmpeg is free software; you can redistribute it and/or 6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public 7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either 8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version. 9 * version 2.1 of the License, or (at your option) any later version.
10 * 10 *
11 * FFmpeg is distributed in the hope that it will be useful, 11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details. 14 * Lesser General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU Lesser General Public 16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software 17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 19 */
20 20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
21 #include "dsputil_mmx.h" 24 #include "dsputil_mmx.h"
22 #include "libavcodec/h264pred.h"
23 25
24 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL; 26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
25 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL; 27 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
26 28
27 /***********************************/ 29 /***********************************/
28 /* IDCT */ 30 /* IDCT */
29 31
30 #define SUMSUB_BADC( a, b, c, d ) \ 32 #define SUMSUB_BADC( a, b, c, d ) \
31 "paddw "#b", "#a" \n\t"\ 33 "paddw "#b", "#a" \n\t"\
32 "paddw "#d", "#c" \n\t"\ 34 "paddw "#d", "#c" \n\t"\
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
205 "movq %%mm1, 48(%0) \n\t" 207 "movq %%mm1, 48(%0) \n\t"
206 "movq %%mm0, 64(%0) \n\t" 208 "movq %%mm0, 64(%0) \n\t"
207 "movq %%mm2, 80(%0) \n\t" 209 "movq %%mm2, 80(%0) \n\t"
208 "movq %%mm4, 96(%0) \n\t" 210 "movq %%mm4, 96(%0) \n\t"
209 "movq %%mm6, 112(%0) \n\t" 211 "movq %%mm6, 112(%0) \n\t"
210 :: "r"(b2+4*i) 212 :: "r"(b2+4*i)
211 : "memory" 213 : "memory"
212 ); 214 );
213 } 215 }
214 216
215 add_pixels_clamped_mmx(b2, dst, stride); 217 ff_add_pixels_clamped_mmx(b2, dst, stride);
216 } 218 }
217 219
218 #define STORE_DIFF_8P( p, d, t, z )\ 220 #define STORE_DIFF_8P( p, d, t, z )\
219 "movq "#d", "#t" \n"\ 221 "movq "#d", "#t" \n"\
220 "psraw $6, "#p" \n"\ 222 "psraw $6, "#p" \n"\
221 "punpcklbw "#z", "#t" \n"\ 223 "punpcklbw "#z", "#t" \n"\
222 "paddsw "#t", "#p" \n"\ 224 "paddsw "#t", "#p" \n"\
223 "packuswb "#p", "#p" \n"\ 225 "packuswb "#p", "#p" \n"\
224 "movq "#p", "#d" \n" 226 "movq "#p", "#d" \n"
225 227
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
541 ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); 543 ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
542 else if(block[i*16]|block[i*16+16]) 544 else if(block[i*16]|block[i*16+16])
543 ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); 545 ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
544 } 546 }
545 } 547 }
546 #endif 548 #endif
547 549
548 /***********************************/ 550 /***********************************/
549 /* deblocking */ 551 /* deblocking */
550 552
551 // out: o = |x-y|>a
552 // clobbers: t
553 #define DIFF_GT_MMX(x,y,a,o,t)\
554 "movq "#y", "#t" \n\t"\
555 "movq "#x", "#o" \n\t"\
556 "psubusb "#x", "#t" \n\t"\
557 "psubusb "#y", "#o" \n\t"\
558 "por "#t", "#o" \n\t"\
559 "psubusb "#a", "#o" \n\t"
560
561 // out: o = |x-y|>a
562 // clobbers: t
563 #define DIFF_GT2_MMX(x,y,a,o,t)\
564 "movq "#y", "#t" \n\t"\
565 "movq "#x", "#o" \n\t"\
566 "psubusb "#x", "#t" \n\t"\
567 "psubusb "#y", "#o" \n\t"\
568 "psubusb "#a", "#t" \n\t"\
569 "psubusb "#a", "#o" \n\t"\
570 "pcmpeqb "#t", "#o" \n\t"\
571
572 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
573 // out: mm5=beta-1, mm7=mask
574 // clobbers: mm4,mm6
575 #define H264_DEBLOCK_MASK(alpha1, beta1) \
576 "pshufw $0, "#alpha1", %%mm4 \n\t"\
577 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
578 "packuswb %%mm4, %%mm4 \n\t"\
579 "packuswb %%mm5, %%mm5 \n\t"\
580 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
581 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
582 "por %%mm4, %%mm7 \n\t"\
583 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
584 "por %%mm4, %%mm7 \n\t"\
585 "pxor %%mm6, %%mm6 \n\t"\
586 "pcmpeqb %%mm6, %%mm7 \n\t"
587
588 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
589 // out: mm1=p0' mm2=q0'
590 // clobbers: mm0,3-6
591 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
592 "movq %%mm1 , %%mm5 \n\t"\
593 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
594 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
595 "pcmpeqb %%mm4 , %%mm4 \n\t"\
596 "pxor %%mm4 , %%mm3 \n\t"\
597 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
598 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
599 "pxor %%mm1 , %%mm4 \n\t"\
600 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
601 "pavgb %%mm5 , %%mm3 \n\t"\
602 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
603 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
604 "psubusb %%mm3 , %%mm6 \n\t"\
605 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
606 "pminub %%mm7 , %%mm6 \n\t"\
607 "pminub %%mm7 , %%mm3 \n\t"\
608 "psubusb %%mm6 , %%mm1 \n\t"\
609 "psubusb %%mm3 , %%mm2 \n\t"\
610 "paddusb %%mm3 , %%mm1 \n\t"\
611 "paddusb %%mm6 , %%mm2 \n\t"
612
613 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
614 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
615 // clobbers: q2, tmp, tc0
616 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
617 "movq %%mm1, "#tmp" \n\t"\
618 "pavgb %%mm2, "#tmp" \n\t"\
619 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
620 "pxor "q2addr", "#tmp" \n\t"\
621 "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
622 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
623 "movq "#p1", "#tmp" \n\t"\
624 "psubusb "#tc0", "#tmp" \n\t"\
625 "paddusb "#p1", "#tc0" \n\t"\
626 "pmaxub "#tmp", "#q2" \n\t"\
627 "pminub "#tc0", "#q2" \n\t"\
628 "movq "#q2", "q1addr" \n\t"
629
630 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alph a1, int beta1, int8_t *tc0)
631 {
632 DECLARE_ALIGNED(8, uint64_t, tmp0)[2];
633
634 __asm__ volatile(
635 "movq (%2,%4), %%mm0 \n\t" //p1
636 "movq (%2,%4,2), %%mm1 \n\t" //p0
637 "movq (%3), %%mm2 \n\t" //q0
638 "movq (%3,%4), %%mm3 \n\t" //q1
639 H264_DEBLOCK_MASK(%7, %8)
640
641 "movd %6, %%mm4 \n\t"
642 "punpcklbw %%mm4, %%mm4 \n\t"
643 "punpcklwd %%mm4, %%mm4 \n\t"
644 "pcmpeqb %%mm3, %%mm3 \n\t"
645 "movq %%mm4, %%mm6 \n\t"
646 "pcmpgtb %%mm3, %%mm4 \n\t"
647 "movq %%mm6, %1 \n\t"
648 "pand %%mm4, %%mm7 \n\t"
649 "movq %%mm7, %0 \n\t"
650
651 /* filter p1 */
652 "movq (%2), %%mm3 \n\t" //p2
653 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
654 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
655 "pand %1, %%mm7 \n\t" // mask & tc0
656 "movq %%mm7, %%mm4 \n\t"
657 "psubb %%mm6, %%mm7 \n\t"
658 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
659 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4)
660
661 /* filter q1 */
662 "movq (%3,%4,2), %%mm4 \n\t" //q2
663 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
664 "pand %0, %%mm6 \n\t"
665 "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then
666 "pand %%mm6, %%mm5 \n\t"
667 "psubb %%mm6, %%mm7 \n\t"
668 "movq (%3,%4), %%mm3 \n\t"
669 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6)
670
671 /* filter p0, q0 */
672 H264_DEBLOCK_P0_Q0(%9, unused)
673 "movq %%mm1, (%2,%4,2) \n\t"
674 "movq %%mm2, (%3) \n\t"
675
676 : "=m"(tmp0[0]), "=m"(tmp0[1])
677 : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
678 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
679 "m"(ff_bone)
680 );
681 }
682
683 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, in t beta, int8_t *tc0)
684 {
685 if((tc0[0] & tc0[1]) >= 0)
686 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
687 if((tc0[2] & tc0[3]) >= 0)
688 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
689 }
690 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, in t beta, int8_t *tc0)
691 {
692 //FIXME: could cut some load/stores by merging transpose with filter
693 // also, it only needs to transpose 6x8
694 DECLARE_ALIGNED(8, uint8_t, trans)[8*8];
695 int i;
696 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
697 if((tc0[0] & tc0[1]) < 0)
698 continue;
699 transpose4x4(trans, pix-4, 8, stride);
700 transpose4x4(trans +4*8, pix, 8, stride);
701 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
702 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
703 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
704 transpose4x4(pix-2, trans +2*8, stride, 8);
705 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
706 }
707 }
708
709 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int al pha1, int beta1, int8_t *tc0)
710 {
711 __asm__ volatile(
712 "movq (%0), %%mm0 \n\t" //p1
713 "movq (%0,%2), %%mm1 \n\t" //p0
714 "movq (%1), %%mm2 \n\t" //q0
715 "movq (%1,%2), %%mm3 \n\t" //q1
716 H264_DEBLOCK_MASK(%4, %5)
717 "movd %3, %%mm6 \n\t"
718 "punpcklbw %%mm6, %%mm6 \n\t"
719 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
720 H264_DEBLOCK_P0_Q0(%6, %7)
721 "movq %%mm1, (%0,%2) \n\t"
722 "movq %%mm2, (%1) \n\t"
723
724 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
725 "r"(*(uint32_t*)tc0),
726 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
727 );
728 }
729
730 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
731 {
732 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
733 }
734
735 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
736 {
737 //FIXME: could cut some load/stores by merging transpose with filter
738 DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
739 transpose4x4(trans, pix-2, 8, stride);
740 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
741 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
742 transpose4x4(pix-2, trans, stride, 8);
743 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
744 }
745
746 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
747 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
748 "movq "#p0", %%mm4 \n\t"\
749 "pxor "#q1", %%mm4 \n\t"\
750 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
751 "pavgb "#q1", "#p0" \n\t"\
752 "psubusb %%mm4, "#p0" \n\t"\
753 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
754
755 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
756 {
757 __asm__ volatile(
758 "movq (%0), %%mm0 \n\t"
759 "movq (%0,%2), %%mm1 \n\t"
760 "movq (%1), %%mm2 \n\t"
761 "movq (%1,%2), %%mm3 \n\t"
762 H264_DEBLOCK_MASK(%3, %4)
763 "movq %%mm1, %%mm5 \n\t"
764 "movq %%mm2, %%mm6 \n\t"
765 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
766 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
767 "psubb %%mm5, %%mm1 \n\t"
768 "psubb %%mm6, %%mm2 \n\t"
769 "pand %%mm7, %%mm1 \n\t"
770 "pand %%mm7, %%mm2 \n\t"
771 "paddb %%mm5, %%mm1 \n\t"
772 "paddb %%mm6, %%mm2 \n\t"
773 "movq %%mm1, (%0,%2) \n\t"
774 "movq %%mm2, (%1) \n\t"
775 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
776 "m"(alpha1), "m"(beta1), "m"(ff_bone)
777 );
778 }
779
780 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a lpha, int beta)
781 {
782 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
783 }
784
785 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a lpha, int beta)
786 {
787 //FIXME: could cut some load/stores by merging transpose with filter
788 DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
789 transpose4x4(trans, pix-2, 8, stride);
790 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
791 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
792 transpose4x4(pix-2, trans, stride, 8);
793 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
794 }
795
796 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40] , int8_t ref[2][40], int16_t mv[2][40][2], 553 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40] , int8_t ref[2][40], int16_t mv[2][40][2],
797 int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { 554 int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
798 int dir; 555 int dir;
799 __asm__ volatile( 556 __asm__ volatile(
800 "movq %0, %%mm7 \n" 557 "movq %0, %%mm7 \n"
801 "movq %1, %%mm6 \n" 558 "movq %1, %%mm6 \n"
802 ::"m"(ff_pb_1), "m"(ff_pb_3) 559 ::"m"(ff_pb_1), "m"(ff_pb_3)
803 ); 560 );
804 if(field) 561 if(field)
805 __asm__ volatile( 562 __asm__ volatile(
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
910 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) 667 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
911 "movq %%mm0, (%0) \n\t" 668 "movq %%mm0, (%0) \n\t"
912 "movq %%mm3, 8(%0) \n\t" 669 "movq %%mm3, 8(%0) \n\t"
913 "movq %%mm4, 16(%0) \n\t" 670 "movq %%mm4, 16(%0) \n\t"
914 "movq %%mm2, 24(%0) \n\t" 671 "movq %%mm2, 24(%0) \n\t"
915 ::"r"(bS[0]) 672 ::"r"(bS[0])
916 :"memory" 673 :"memory"
917 ); 674 );
918 } 675 }
919 676
920 /***********************************/ 677 #define LF_FUNC(DIR, TYPE, OPT) \
921 /* motion compensation */ 678 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
679 int alpha, int beta, int8_t *tc0) ;
680 #define LF_IFUNC(DIR, TYPE, OPT) \
681 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
682 int alpha, int beta);
922 683
923 #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\ 684 LF_FUNC (h, chroma, mmxext)
924 "mov"#q" "#C", "#T" \n\t"\ 685 LF_IFUNC(h, chroma_intra, mmxext)
925 "mov"#d" (%0), "#F" \n\t"\ 686 LF_FUNC (v, chroma, mmxext)
926 "paddw "#D", "#T" \n\t"\ 687 LF_IFUNC(v, chroma_intra, mmxext)
927 "psllw $2, "#T" \n\t"\
928 "psubw "#B", "#T" \n\t"\
929 "psubw "#E", "#T" \n\t"\
930 "punpcklbw "#Z", "#F" \n\t"\
931 "pmullw %4, "#T" \n\t"\
932 "paddw %5, "#A" \n\t"\
933 "add %2, %0 \n\t"\
934 "paddw "#F", "#A" \n\t"\
935 "paddw "#A", "#T" \n\t"\
936 "psraw $5, "#T" \n\t"\
937 "packuswb "#T", "#T" \n\t"\
938 OP(T, (%1), A, d)\
939 "add %3, %1 \n\t"
940 688
941 #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\ 689 LF_FUNC (h, luma, mmxext)
942 "mov"#q" "#C", "#T" \n\t"\ 690 LF_IFUNC(h, luma_intra, mmxext)
943 "mov"#d" (%0), "#F" \n\t"\ 691 #if HAVE_YASM && ARCH_X86_32
944 "paddw "#D", "#T" \n\t"\ 692 LF_FUNC (v8, luma, mmxext)
945 "psllw $2, "#T" \n\t"\ 693 static void ff_x264_deblock_v_luma_mmxext(uint8_t *pix, int stride, int alpha, i nt beta, int8_t *tc0)
946 "paddw %4, "#A" \n\t"\ 694 {
947 "psubw "#B", "#T" \n\t"\ 695 if((tc0[0] & tc0[1]) >= 0)
948 "psubw "#E", "#T" \n\t"\ 696 ff_x264_deblock_v8_luma_mmxext(pix+0, stride, alpha, beta, tc0);
949 "punpcklbw "#Z", "#F" \n\t"\ 697 if((tc0[2] & tc0[3]) >= 0)
950 "pmullw %3, "#T" \n\t"\ 698 ff_x264_deblock_v8_luma_mmxext(pix+8, stride, alpha, beta, tc0+2);
951 "paddw "#F", "#A" \n\t"\
952 "add %2, %0 \n\t"\
953 "paddw "#A", "#T" \n\t"\
954 "mov"#q" "#T", "#OF"(%1) \n\t"
955
956 #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
957 #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d, q)
958 #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm 7,q,dqa)
959 #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%x mm7,q,dqa)
960
961
962 #define QPEL_H264(OPNAME, OP, MMX)\
963 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uin t8_t *src, int dstStride, int srcStride){\
964 int h=4;\
965 \
966 __asm__ volatile(\
967 "pxor %%mm7, %%mm7 \n\t"\
968 "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\
969 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
970 "1: \n\t"\
971 "movd -1(%0), %%mm1 \n\t"\
972 "movd (%0), %%mm2 \n\t"\
973 "movd 1(%0), %%mm3 \n\t"\
974 "movd 2(%0), %%mm0 \n\t"\
975 "punpcklbw %%mm7, %%mm1 \n\t"\
976 "punpcklbw %%mm7, %%mm2 \n\t"\
977 "punpcklbw %%mm7, %%mm3 \n\t"\
978 "punpcklbw %%mm7, %%mm0 \n\t"\
979 "paddw %%mm0, %%mm1 \n\t"\
980 "paddw %%mm3, %%mm2 \n\t"\
981 "movd -2(%0), %%mm0 \n\t"\
982 "movd 3(%0), %%mm3 \n\t"\
983 "punpcklbw %%mm7, %%mm0 \n\t"\
984 "punpcklbw %%mm7, %%mm3 \n\t"\
985 "paddw %%mm3, %%mm0 \n\t"\
986 "psllw $2, %%mm2 \n\t"\
987 "psubw %%mm1, %%mm2 \n\t"\
988 "pmullw %%mm4, %%mm2 \n\t"\
989 "paddw %%mm5, %%mm0 \n\t"\
990 "paddw %%mm2, %%mm0 \n\t"\
991 "psraw $5, %%mm0 \n\t"\
992 "packuswb %%mm0, %%mm0 \n\t"\
993 OP(%%mm0, (%1),%%mm6, d)\
994 "add %3, %0 \n\t"\
995 "add %4, %1 \n\t"\
996 "decl %2 \n\t"\
997 " jnz 1b \n\t"\
998 : "+a"(src), "+c"(dst), "+g"(h)\
999 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
1000 : "memory"\
1001 );\
1002 }\
1003 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1004 int h=4;\
1005 __asm__ volatile(\
1006 "pxor %%mm7, %%mm7 \n\t"\
1007 "movq %0, %%mm4 \n\t"\
1008 "movq %1, %%mm5 \n\t"\
1009 :: "m"(ff_pw_5), "m"(ff_pw_16)\
1010 );\
1011 do{\
1012 __asm__ volatile(\
1013 "movd -1(%0), %%mm1 \n\t"\
1014 "movd (%0), %%mm2 \n\t"\
1015 "movd 1(%0), %%mm3 \n\t"\
1016 "movd 2(%0), %%mm0 \n\t"\
1017 "punpcklbw %%mm7, %%mm1 \n\t"\
1018 "punpcklbw %%mm7, %%mm2 \n\t"\
1019 "punpcklbw %%mm7, %%mm3 \n\t"\
1020 "punpcklbw %%mm7, %%mm0 \n\t"\
1021 "paddw %%mm0, %%mm1 \n\t"\
1022 "paddw %%mm3, %%mm2 \n\t"\
1023 "movd -2(%0), %%mm0 \n\t"\
1024 "movd 3(%0), %%mm3 \n\t"\
1025 "punpcklbw %%mm7, %%mm0 \n\t"\
1026 "punpcklbw %%mm7, %%mm3 \n\t"\
1027 "paddw %%mm3, %%mm0 \n\t"\
1028 "psllw $2, %%mm2 \n\t"\
1029 "psubw %%mm1, %%mm2 \n\t"\
1030 "pmullw %%mm4, %%mm2 \n\t"\
1031 "paddw %%mm5, %%mm0 \n\t"\
1032 "paddw %%mm2, %%mm0 \n\t"\
1033 "movd (%2), %%mm3 \n\t"\
1034 "psraw $5, %%mm0 \n\t"\
1035 "packuswb %%mm0, %%mm0 \n\t"\
1036 PAVGB" %%mm3, %%mm0 \n\t"\
1037 OP(%%mm0, (%1),%%mm6, d)\
1038 "add %4, %0 \n\t"\
1039 "add %4, %1 \n\t"\
1040 "add %3, %2 \n\t"\
1041 : "+a"(src), "+c"(dst), "+d"(src2)\
1042 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
1043 : "memory"\
1044 );\
1045 }while(--h);\
1046 }\
1047 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uin t8_t *src, int dstStride, int srcStride){\
1048 src -= 2*srcStride;\
1049 __asm__ volatile(\
1050 "pxor %%mm7, %%mm7 \n\t"\
1051 "movd (%0), %%mm0 \n\t"\
1052 "add %2, %0 \n\t"\
1053 "movd (%0), %%mm1 \n\t"\
1054 "add %2, %0 \n\t"\
1055 "movd (%0), %%mm2 \n\t"\
1056 "add %2, %0 \n\t"\
1057 "movd (%0), %%mm3 \n\t"\
1058 "add %2, %0 \n\t"\
1059 "movd (%0), %%mm4 \n\t"\
1060 "add %2, %0 \n\t"\
1061 "punpcklbw %%mm7, %%mm0 \n\t"\
1062 "punpcklbw %%mm7, %%mm1 \n\t"\
1063 "punpcklbw %%mm7, %%mm2 \n\t"\
1064 "punpcklbw %%mm7, %%mm3 \n\t"\
1065 "punpcklbw %%mm7, %%mm4 \n\t"\
1066 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1067 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1068 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1069 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1070 \
1071 : "+a"(src), "+c"(dst)\
1072 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff _pw_16)\
1073 : "memory"\
1074 );\
1075 }\
1076 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, in t16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1077 int h=4;\
1078 int w=3;\
1079 src -= 2*srcStride+2;\
1080 while(w--){\
1081 __asm__ volatile(\
1082 "pxor %%mm7, %%mm7 \n\t"\
1083 "movd (%0), %%mm0 \n\t"\
1084 "add %2, %0 \n\t"\
1085 "movd (%0), %%mm1 \n\t"\
1086 "add %2, %0 \n\t"\
1087 "movd (%0), %%mm2 \n\t"\
1088 "add %2, %0 \n\t"\
1089 "movd (%0), %%mm3 \n\t"\
1090 "add %2, %0 \n\t"\
1091 "movd (%0), %%mm4 \n\t"\
1092 "add %2, %0 \n\t"\
1093 "punpcklbw %%mm7, %%mm0 \n\t"\
1094 "punpcklbw %%mm7, %%mm1 \n\t"\
1095 "punpcklbw %%mm7, %%mm2 \n\t"\
1096 "punpcklbw %%mm7, %%mm3 \n\t"\
1097 "punpcklbw %%mm7, %%mm4 \n\t"\
1098 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
1099 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
1100 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
1101 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
1102 \
1103 : "+a"(src)\
1104 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1105 : "memory"\
1106 );\
1107 tmp += 4;\
1108 src += 4 - 9*srcStride;\
1109 }\
1110 tmp -= 3*4;\
1111 __asm__ volatile(\
1112 "1: \n\t"\
1113 "movq (%0), %%mm0 \n\t"\
1114 "paddw 10(%0), %%mm0 \n\t"\
1115 "movq 2(%0), %%mm1 \n\t"\
1116 "paddw 8(%0), %%mm1 \n\t"\
1117 "movq 4(%0), %%mm2 \n\t"\
1118 "paddw 6(%0), %%mm2 \n\t"\
1119 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
1120 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
1121 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
1122 "paddsw %%mm2, %%mm0 \n\t"\
1123 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
1124 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
1125 "psraw $6, %%mm0 \n\t"\
1126 "packuswb %%mm0, %%mm0 \n\t"\
1127 OP(%%mm0, (%1),%%mm7, d)\
1128 "add $24, %0 \n\t"\
1129 "add %3, %1 \n\t"\
1130 "decl %2 \n\t"\
1131 " jnz 1b \n\t"\
1132 : "+a"(tmp), "+c"(dst), "+g"(h)\
1133 : "S"((x86_reg)dstStride)\
1134 : "memory"\
1135 );\
1136 }\
1137 \
1138 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uin t8_t *src, int dstStride, int srcStride){\
1139 int h=8;\
1140 __asm__ volatile(\
1141 "pxor %%mm7, %%mm7 \n\t"\
1142 "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
1143 "1: \n\t"\
1144 "movq (%0), %%mm0 \n\t"\
1145 "movq 1(%0), %%mm2 \n\t"\
1146 "movq %%mm0, %%mm1 \n\t"\
1147 "movq %%mm2, %%mm3 \n\t"\
1148 "punpcklbw %%mm7, %%mm0 \n\t"\
1149 "punpckhbw %%mm7, %%mm1 \n\t"\
1150 "punpcklbw %%mm7, %%mm2 \n\t"\
1151 "punpckhbw %%mm7, %%mm3 \n\t"\
1152 "paddw %%mm2, %%mm0 \n\t"\
1153 "paddw %%mm3, %%mm1 \n\t"\
1154 "psllw $2, %%mm0 \n\t"\
1155 "psllw $2, %%mm1 \n\t"\
1156 "movq -1(%0), %%mm2 \n\t"\
1157 "movq 2(%0), %%mm4 \n\t"\
1158 "movq %%mm2, %%mm3 \n\t"\
1159 "movq %%mm4, %%mm5 \n\t"\
1160 "punpcklbw %%mm7, %%mm2 \n\t"\
1161 "punpckhbw %%mm7, %%mm3 \n\t"\
1162 "punpcklbw %%mm7, %%mm4 \n\t"\
1163 "punpckhbw %%mm7, %%mm5 \n\t"\
1164 "paddw %%mm4, %%mm2 \n\t"\
1165 "paddw %%mm3, %%mm5 \n\t"\
1166 "psubw %%mm2, %%mm0 \n\t"\
1167 "psubw %%mm5, %%mm1 \n\t"\
1168 "pmullw %%mm6, %%mm0 \n\t"\
1169 "pmullw %%mm6, %%mm1 \n\t"\
1170 "movd -2(%0), %%mm2 \n\t"\
1171 "movd 7(%0), %%mm5 \n\t"\
1172 "punpcklbw %%mm7, %%mm2 \n\t"\
1173 "punpcklbw %%mm7, %%mm5 \n\t"\
1174 "paddw %%mm3, %%mm2 \n\t"\
1175 "paddw %%mm5, %%mm4 \n\t"\
1176 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
1177 "paddw %%mm5, %%mm2 \n\t"\
1178 "paddw %%mm5, %%mm4 \n\t"\
1179 "paddw %%mm2, %%mm0 \n\t"\
1180 "paddw %%mm4, %%mm1 \n\t"\
1181 "psraw $5, %%mm0 \n\t"\
1182 "psraw $5, %%mm1 \n\t"\
1183 "packuswb %%mm1, %%mm0 \n\t"\
1184 OP(%%mm0, (%1),%%mm5, q)\
1185 "add %3, %0 \n\t"\
1186 "add %4, %1 \n\t"\
1187 "decl %2 \n\t"\
1188 " jnz 1b \n\t"\
1189 : "+a"(src), "+c"(dst), "+g"(h)\
1190 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
1191 : "memory"\
1192 );\
1193 }\
1194 \
1195 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1196 int h=8;\
1197 __asm__ volatile(\
1198 "pxor %%mm7, %%mm7 \n\t"\
1199 "movq %0, %%mm6 \n\t"\
1200 :: "m"(ff_pw_5)\
1201 );\
1202 do{\
1203 __asm__ volatile(\
1204 "movq (%0), %%mm0 \n\t"\
1205 "movq 1(%0), %%mm2 \n\t"\
1206 "movq %%mm0, %%mm1 \n\t"\
1207 "movq %%mm2, %%mm3 \n\t"\
1208 "punpcklbw %%mm7, %%mm0 \n\t"\
1209 "punpckhbw %%mm7, %%mm1 \n\t"\
1210 "punpcklbw %%mm7, %%mm2 \n\t"\
1211 "punpckhbw %%mm7, %%mm3 \n\t"\
1212 "paddw %%mm2, %%mm0 \n\t"\
1213 "paddw %%mm3, %%mm1 \n\t"\
1214 "psllw $2, %%mm0 \n\t"\
1215 "psllw $2, %%mm1 \n\t"\
1216 "movq -1(%0), %%mm2 \n\t"\
1217 "movq 2(%0), %%mm4 \n\t"\
1218 "movq %%mm2, %%mm3 \n\t"\
1219 "movq %%mm4, %%mm5 \n\t"\
1220 "punpcklbw %%mm7, %%mm2 \n\t"\
1221 "punpckhbw %%mm7, %%mm3 \n\t"\
1222 "punpcklbw %%mm7, %%mm4 \n\t"\
1223 "punpckhbw %%mm7, %%mm5 \n\t"\
1224 "paddw %%mm4, %%mm2 \n\t"\
1225 "paddw %%mm3, %%mm5 \n\t"\
1226 "psubw %%mm2, %%mm0 \n\t"\
1227 "psubw %%mm5, %%mm1 \n\t"\
1228 "pmullw %%mm6, %%mm0 \n\t"\
1229 "pmullw %%mm6, %%mm1 \n\t"\
1230 "movd -2(%0), %%mm2 \n\t"\
1231 "movd 7(%0), %%mm5 \n\t"\
1232 "punpcklbw %%mm7, %%mm2 \n\t"\
1233 "punpcklbw %%mm7, %%mm5 \n\t"\
1234 "paddw %%mm3, %%mm2 \n\t"\
1235 "paddw %%mm5, %%mm4 \n\t"\
1236 "movq %5, %%mm5 \n\t"\
1237 "paddw %%mm5, %%mm2 \n\t"\
1238 "paddw %%mm5, %%mm4 \n\t"\
1239 "paddw %%mm2, %%mm0 \n\t"\
1240 "paddw %%mm4, %%mm1 \n\t"\
1241 "psraw $5, %%mm0 \n\t"\
1242 "psraw $5, %%mm1 \n\t"\
1243 "movq (%2), %%mm4 \n\t"\
1244 "packuswb %%mm1, %%mm0 \n\t"\
1245 PAVGB" %%mm4, %%mm0 \n\t"\
1246 OP(%%mm0, (%1),%%mm5, q)\
1247 "add %4, %0 \n\t"\
1248 "add %4, %1 \n\t"\
1249 "add %3, %2 \n\t"\
1250 : "+a"(src), "+c"(dst), "+d"(src2)\
1251 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1252 "m"(ff_pw_16)\
1253 : "memory"\
1254 );\
1255 }while(--h);\
1256 }\
1257 \
1258 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1259 int w= 2;\
1260 src -= 2*srcStride;\
1261 \
1262 while(w--){\
1263 __asm__ volatile(\
1264 "pxor %%mm7, %%mm7 \n\t"\
1265 "movd (%0), %%mm0 \n\t"\
1266 "add %2, %0 \n\t"\
1267 "movd (%0), %%mm1 \n\t"\
1268 "add %2, %0 \n\t"\
1269 "movd (%0), %%mm2 \n\t"\
1270 "add %2, %0 \n\t"\
1271 "movd (%0), %%mm3 \n\t"\
1272 "add %2, %0 \n\t"\
1273 "movd (%0), %%mm4 \n\t"\
1274 "add %2, %0 \n\t"\
1275 "punpcklbw %%mm7, %%mm0 \n\t"\
1276 "punpcklbw %%mm7, %%mm1 \n\t"\
1277 "punpcklbw %%mm7, %%mm2 \n\t"\
1278 "punpcklbw %%mm7, %%mm3 \n\t"\
1279 "punpcklbw %%mm7, %%mm4 \n\t"\
1280 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1281 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1282 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1283 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1284 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1285 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1286 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1287 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1288 \
1289 : "+a"(src), "+c"(dst)\
1290 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff _pw_16)\
1291 : "memory"\
1292 );\
1293 if(h==16){\
1294 __asm__ volatile(\
1295 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1296 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1297 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1298 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1299 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1300 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1301 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1302 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1303 \
1304 : "+a"(src), "+c"(dst)\
1305 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m" (ff_pw_16)\
1306 : "memory"\
1307 );\
1308 }\
1309 src += 4-(h+5)*srcStride;\
1310 dst += 4-h*dstStride;\
1311 }\
1312 }\
1313 static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_ t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
1314 int w = (size+8)>>2;\
1315 src -= 2*srcStride+2;\
1316 while(w--){\
1317 __asm__ volatile(\
1318 "pxor %%mm7, %%mm7 \n\t"\
1319 "movd (%0), %%mm0 \n\t"\
1320 "add %2, %0 \n\t"\
1321 "movd (%0), %%mm1 \n\t"\
1322 "add %2, %0 \n\t"\
1323 "movd (%0), %%mm2 \n\t"\
1324 "add %2, %0 \n\t"\
1325 "movd (%0), %%mm3 \n\t"\
1326 "add %2, %0 \n\t"\
1327 "movd (%0), %%mm4 \n\t"\
1328 "add %2, %0 \n\t"\
1329 "punpcklbw %%mm7, %%mm0 \n\t"\
1330 "punpcklbw %%mm7, %%mm1 \n\t"\
1331 "punpcklbw %%mm7, %%mm2 \n\t"\
1332 "punpcklbw %%mm7, %%mm3 \n\t"\
1333 "punpcklbw %%mm7, %%mm4 \n\t"\
1334 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
1335 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
1336 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
1337 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
1338 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
1339 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
1340 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
1341 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
1342 : "+a"(src)\
1343 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1344 : "memory"\
1345 );\
1346 if(size==16){\
1347 __asm__ volatile(\
1348 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
1349 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
1350 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
1351 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
1352 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
1353 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
1354 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
1355 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
1356 : "+a"(src)\
1357 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) \
1358 : "memory"\
1359 );\
1360 }\
1361 tmp += 4;\
1362 src += 4 - (size+5)*srcStride;\
1363 }\
1364 }\
1365 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_ t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
1366 int w = size>>4;\
1367 do{\
1368 int h = size;\
1369 __asm__ volatile(\
1370 "1: \n\t"\
1371 "movq (%0), %%mm0 \n\t"\
1372 "movq 8(%0), %%mm3 \n\t"\
1373 "movq 2(%0), %%mm1 \n\t"\
1374 "movq 10(%0), %%mm4 \n\t"\
1375 "paddw %%mm4, %%mm0 \n\t"\
1376 "paddw %%mm3, %%mm1 \n\t"\
1377 "paddw 18(%0), %%mm3 \n\t"\
1378 "paddw 16(%0), %%mm4 \n\t"\
1379 "movq 4(%0), %%mm2 \n\t"\
1380 "movq 12(%0), %%mm5 \n\t"\
1381 "paddw 6(%0), %%mm2 \n\t"\
1382 "paddw 14(%0), %%mm5 \n\t"\
1383 "psubw %%mm1, %%mm0 \n\t"\
1384 "psubw %%mm4, %%mm3 \n\t"\
1385 "psraw $2, %%mm0 \n\t"\
1386 "psraw $2, %%mm3 \n\t"\
1387 "psubw %%mm1, %%mm0 \n\t"\
1388 "psubw %%mm4, %%mm3 \n\t"\
1389 "paddsw %%mm2, %%mm0 \n\t"\
1390 "paddsw %%mm5, %%mm3 \n\t"\
1391 "psraw $2, %%mm0 \n\t"\
1392 "psraw $2, %%mm3 \n\t"\
1393 "paddw %%mm2, %%mm0 \n\t"\
1394 "paddw %%mm5, %%mm3 \n\t"\
1395 "psraw $6, %%mm0 \n\t"\
1396 "psraw $6, %%mm3 \n\t"\
1397 "packuswb %%mm3, %%mm0 \n\t"\
1398 OP(%%mm0, (%1),%%mm7, q)\
1399 "add $48, %0 \n\t"\
1400 "add %3, %1 \n\t"\
1401 "decl %2 \n\t"\
1402 " jnz 1b \n\t"\
1403 : "+a"(tmp), "+c"(dst), "+g"(h)\
1404 : "S"((x86_reg)dstStride)\
1405 : "memory"\
1406 );\
1407 tmp += 8 - size*24;\
1408 dst += 8 - size*dstStride;\
1409 }while(w--);\
1410 }\
1411 \
1412 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, i nt dstStride, int srcStride){\
1413 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStrid e, 8);\
1414 }\
1415 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, ui nt8_t *src, int dstStride, int srcStride){\
1416 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStrid e, 16);\
1417 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStrid e, 16);\
1418 }\
1419 \
1420 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1421 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1422 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1423 src += 8*srcStride;\
1424 dst += 8*dstStride;\
1425 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1426 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1427 }\
1428 \
1429 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1430 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, s rc2Stride);\
1431 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, s rc2Stride);\
1432 src += 8*dstStride;\
1433 dst += 8*dstStride;\
1434 src2 += 8*src2Stride;\
1435 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, s rc2Stride);\
1436 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, s rc2Stride);\
1437 }\
1438 \
1439 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst , int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int s ize){\
1440 put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
1441 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
1442 }\
1443 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1444 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
1445 }\
1446 \
1447 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1448 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
1449 }\
1450 \
1451 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_ t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1452 {\
1453 __asm__ volatile(\
1454 "movq (%1), %%mm0 \n\t"\
1455 "movq 24(%1), %%mm1 \n\t"\
1456 "psraw $5, %%mm0 \n\t"\
1457 "psraw $5, %%mm1 \n\t"\
1458 "packuswb %%mm0, %%mm0 \n\t"\
1459 "packuswb %%mm1, %%mm1 \n\t"\
1460 PAVGB" (%0), %%mm0 \n\t"\
1461 PAVGB" (%0,%3), %%mm1 \n\t"\
1462 OP(%%mm0, (%2), %%mm4, d)\
1463 OP(%%mm1, (%2,%4), %%mm5, d)\
1464 "lea (%0,%3,2), %0 \n\t"\
1465 "lea (%2,%4,2), %2 \n\t"\
1466 "movq 48(%1), %%mm0 \n\t"\
1467 "movq 72(%1), %%mm1 \n\t"\
1468 "psraw $5, %%mm0 \n\t"\
1469 "psraw $5, %%mm1 \n\t"\
1470 "packuswb %%mm0, %%mm0 \n\t"\
1471 "packuswb %%mm1, %%mm1 \n\t"\
1472 PAVGB" (%0), %%mm0 \n\t"\
1473 PAVGB" (%0,%3), %%mm1 \n\t"\
1474 OP(%%mm0, (%2), %%mm4, d)\
1475 OP(%%mm1, (%2,%4), %%mm5, d)\
1476 :"+a"(src8), "+c"(src16), "+d"(dst)\
1477 :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
1478 :"memory");\
1479 }\
1480 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_ t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1481 {\
1482 do{\
1483 __asm__ volatile(\
1484 "movq (%1), %%mm0 \n\t"\
1485 "movq 8(%1), %%mm1 \n\t"\
1486 "movq 48(%1), %%mm2 \n\t"\
1487 "movq 8+48(%1), %%mm3 \n\t"\
1488 "psraw $5, %%mm0 \n\t"\
1489 "psraw $5, %%mm1 \n\t"\
1490 "psraw $5, %%mm2 \n\t"\
1491 "psraw $5, %%mm3 \n\t"\
1492 "packuswb %%mm1, %%mm0 \n\t"\
1493 "packuswb %%mm3, %%mm2 \n\t"\
1494 PAVGB" (%0), %%mm0 \n\t"\
1495 PAVGB" (%0,%3), %%mm2 \n\t"\
1496 OP(%%mm0, (%2), %%mm5, q)\
1497 OP(%%mm2, (%2,%4), %%mm5, q)\
1498 ::"a"(src8), "c"(src16), "d"(dst),\
1499 "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
1500 :"memory");\
1501 src8 += 2L*src8Stride;\
1502 src16 += 48;\
1503 dst += 2L*dstStride;\
1504 }while(h-=2);\
1505 }\
1506 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, u int8_t *src8, int dstStride, int src8Stride, int h)\
1507 {\
1508 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8S tride, h);\
1509 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8S tride, h);\
1510 }\
1511
1512
1513 #if ARCH_X86_64
1514 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
1515 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1516 int h=16;\
1517 __asm__ volatile(\
1518 "pxor %%xmm15, %%xmm15 \n\t"\
1519 "movdqa %6, %%xmm14 \n\t"\
1520 "movdqa %7, %%xmm13 \n\t"\
1521 "1: \n\t"\
1522 "lddqu 6(%0), %%xmm1 \n\t"\
1523 "lddqu -2(%0), %%xmm7 \n\t"\
1524 "movdqa %%xmm1, %%xmm0 \n\t"\
1525 "punpckhbw %%xmm15, %%xmm1 \n\t"\
1526 "punpcklbw %%xmm15, %%xmm0 \n\t"\
1527 "punpcklbw %%xmm15, %%xmm7 \n\t"\
1528 "movdqa %%xmm1, %%xmm2 \n\t"\
1529 "movdqa %%xmm0, %%xmm6 \n\t"\
1530 "movdqa %%xmm1, %%xmm3 \n\t"\
1531 "movdqa %%xmm0, %%xmm8 \n\t"\
1532 "movdqa %%xmm1, %%xmm4 \n\t"\
1533 "movdqa %%xmm0, %%xmm9 \n\t"\
1534 "movdqa %%xmm0, %%xmm12 \n\t"\
1535 "movdqa %%xmm1, %%xmm11 \n\t"\
1536 "palignr $10,%%xmm0, %%xmm11\n\t"\
1537 "palignr $10,%%xmm7, %%xmm12\n\t"\
1538 "palignr $2, %%xmm0, %%xmm4 \n\t"\
1539 "palignr $2, %%xmm7, %%xmm9 \n\t"\
1540 "palignr $4, %%xmm0, %%xmm3 \n\t"\
1541 "palignr $4, %%xmm7, %%xmm8 \n\t"\
1542 "palignr $6, %%xmm0, %%xmm2 \n\t"\
1543 "palignr $6, %%xmm7, %%xmm6 \n\t"\
1544 "paddw %%xmm0 ,%%xmm11 \n\t"\
1545 "palignr $8, %%xmm0, %%xmm1 \n\t"\
1546 "palignr $8, %%xmm7, %%xmm0 \n\t"\
1547 "paddw %%xmm12,%%xmm7 \n\t"\
1548 "paddw %%xmm3, %%xmm2 \n\t"\
1549 "paddw %%xmm8, %%xmm6 \n\t"\
1550 "paddw %%xmm4, %%xmm1 \n\t"\
1551 "paddw %%xmm9, %%xmm0 \n\t"\
1552 "psllw $2, %%xmm2 \n\t"\
1553 "psllw $2, %%xmm6 \n\t"\
1554 "psubw %%xmm1, %%xmm2 \n\t"\
1555 "psubw %%xmm0, %%xmm6 \n\t"\
1556 "paddw %%xmm13,%%xmm11 \n\t"\
1557 "paddw %%xmm13,%%xmm7 \n\t"\
1558 "pmullw %%xmm14,%%xmm2 \n\t"\
1559 "pmullw %%xmm14,%%xmm6 \n\t"\
1560 "lddqu (%2), %%xmm3 \n\t"\
1561 "paddw %%xmm11,%%xmm2 \n\t"\
1562 "paddw %%xmm7, %%xmm6 \n\t"\
1563 "psraw $5, %%xmm2 \n\t"\
1564 "psraw $5, %%xmm6 \n\t"\
1565 "packuswb %%xmm2,%%xmm6 \n\t"\
1566 "pavgb %%xmm3, %%xmm6 \n\t"\
1567 OP(%%xmm6, (%1), %%xmm4, dqa)\
1568 "add %5, %0 \n\t"\
1569 "add %5, %1 \n\t"\
1570 "add %4, %2 \n\t"\
1571 "decl %3 \n\t"\
1572 "jg 1b \n\t"\
1573 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
1574 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1575 "m"(ff_pw_5), "m"(ff_pw_16)\
1576 : "memory"\
1577 );\
1578 } 699 }
1579 #else // ARCH_X86_64 700 LF_IFUNC(v8, luma_intra, mmxext)
1580 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ 701 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int al pha, int beta)
1581 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ 702 {
1582 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, s rc2Stride);\ 703 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
1583 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, s rc2Stride);\ 704 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
1584 src += 8*dstStride;\
1585 dst += 8*dstStride;\
1586 src2 += 8*src2Stride;\
1587 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, s rc2Stride);\
1588 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, s rc2Stride);\
1589 } 705 }
1590 #endif // ARCH_X86_64
1591
1592 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
1593 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1594 int h=8;\
1595 __asm__ volatile(\
1596 "pxor %%xmm7, %%xmm7 \n\t"\
1597 "movdqa %0, %%xmm6 \n\t"\
1598 :: "m"(ff_pw_5)\
1599 );\
1600 do{\
1601 __asm__ volatile(\
1602 "lddqu -2(%0), %%xmm1 \n\t"\
1603 "movdqa %%xmm1, %%xmm0 \n\t"\
1604 "punpckhbw %%xmm7, %%xmm1 \n\t"\
1605 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1606 "movdqa %%xmm1, %%xmm2 \n\t"\
1607 "movdqa %%xmm1, %%xmm3 \n\t"\
1608 "movdqa %%xmm1, %%xmm4 \n\t"\
1609 "movdqa %%xmm1, %%xmm5 \n\t"\
1610 "palignr $2, %%xmm0, %%xmm4 \n\t"\
1611 "palignr $4, %%xmm0, %%xmm3 \n\t"\
1612 "palignr $6, %%xmm0, %%xmm2 \n\t"\
1613 "palignr $8, %%xmm0, %%xmm1 \n\t"\
1614 "palignr $10,%%xmm0, %%xmm5 \n\t"\
1615 "paddw %%xmm5, %%xmm0 \n\t"\
1616 "paddw %%xmm3, %%xmm2 \n\t"\
1617 "paddw %%xmm4, %%xmm1 \n\t"\
1618 "psllw $2, %%xmm2 \n\t"\
1619 "movq (%2), %%xmm3 \n\t"\
1620 "psubw %%xmm1, %%xmm2 \n\t"\
1621 "paddw %5, %%xmm0 \n\t"\
1622 "pmullw %%xmm6, %%xmm2 \n\t"\
1623 "paddw %%xmm0, %%xmm2 \n\t"\
1624 "psraw $5, %%xmm2 \n\t"\
1625 "packuswb %%xmm2, %%xmm2 \n\t"\
1626 "pavgb %%xmm3, %%xmm2 \n\t"\
1627 OP(%%xmm2, (%1), %%xmm4, q)\
1628 "add %4, %0 \n\t"\
1629 "add %4, %1 \n\t"\
1630 "add %3, %2 \n\t"\
1631 : "+a"(src), "+c"(dst), "+d"(src2)\
1632 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1633 "m"(ff_pw_16)\
1634 : "memory"\
1635 );\
1636 }while(--h);\
1637 }\
1638 QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
1639 \
1640 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uin t8_t *src, int dstStride, int srcStride){\
1641 int h=8;\
1642 __asm__ volatile(\
1643 "pxor %%xmm7, %%xmm7 \n\t"\
1644 "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\
1645 "1: \n\t"\
1646 "lddqu -2(%0), %%xmm1 \n\t"\
1647 "movdqa %%xmm1, %%xmm0 \n\t"\
1648 "punpckhbw %%xmm7, %%xmm1 \n\t"\
1649 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1650 "movdqa %%xmm1, %%xmm2 \n\t"\
1651 "movdqa %%xmm1, %%xmm3 \n\t"\
1652 "movdqa %%xmm1, %%xmm4 \n\t"\
1653 "movdqa %%xmm1, %%xmm5 \n\t"\
1654 "palignr $2, %%xmm0, %%xmm4 \n\t"\
1655 "palignr $4, %%xmm0, %%xmm3 \n\t"\
1656 "palignr $6, %%xmm0, %%xmm2 \n\t"\
1657 "palignr $8, %%xmm0, %%xmm1 \n\t"\
1658 "palignr $10,%%xmm0, %%xmm5 \n\t"\
1659 "paddw %%xmm5, %%xmm0 \n\t"\
1660 "paddw %%xmm3, %%xmm2 \n\t"\
1661 "paddw %%xmm4, %%xmm1 \n\t"\
1662 "psllw $2, %%xmm2 \n\t"\
1663 "psubw %%xmm1, %%xmm2 \n\t"\
1664 "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\
1665 "pmullw %%xmm6, %%xmm2 \n\t"\
1666 "paddw %%xmm0, %%xmm2 \n\t"\
1667 "psraw $5, %%xmm2 \n\t"\
1668 "packuswb %%xmm2, %%xmm2 \n\t"\
1669 OP(%%xmm2, (%1), %%xmm4, q)\
1670 "add %3, %0 \n\t"\
1671 "add %4, %1 \n\t"\
1672 "decl %2 \n\t"\
1673 " jnz 1b \n\t"\
1674 : "+a"(src), "+c"(dst), "+g"(h)\
1675 : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
1676 : "memory"\
1677 );\
1678 }\
1679 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1680 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1681 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1682 src += 8*srcStride;\
1683 dst += 8*dstStride;\
1684 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1685 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1686 }\
1687
1688 #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
1689 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1690 src -= 2*srcStride;\
1691 \
1692 __asm__ volatile(\
1693 "pxor %%xmm7, %%xmm7 \n\t"\
1694 "movq (%0), %%xmm0 \n\t"\
1695 "add %2, %0 \n\t"\
1696 "movq (%0), %%xmm1 \n\t"\
1697 "add %2, %0 \n\t"\
1698 "movq (%0), %%xmm2 \n\t"\
1699 "add %2, %0 \n\t"\
1700 "movq (%0), %%xmm3 \n\t"\
1701 "add %2, %0 \n\t"\
1702 "movq (%0), %%xmm4 \n\t"\
1703 "add %2, %0 \n\t"\
1704 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1705 "punpcklbw %%xmm7, %%xmm1 \n\t"\
1706 "punpcklbw %%xmm7, %%xmm2 \n\t"\
1707 "punpcklbw %%xmm7, %%xmm3 \n\t"\
1708 "punpcklbw %%xmm7, %%xmm4 \n\t"\
1709 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1710 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1711 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1712 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1713 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
1714 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
1715 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1716 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1717 \
1718 : "+a"(src), "+c"(dst)\
1719 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff _pw_16)\
1720 : "memory"\
1721 );\
1722 if(h==16){\
1723 __asm__ volatile(\
1724 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1725 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1726 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
1727 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
1728 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1729 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1730 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1731 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1732 \
1733 : "+a"(src), "+c"(dst)\
1734 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m "(ff_pw_16)\
1735 : "memory"\
1736 );\
1737 }\
1738 }\
1739 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, i nt dstStride, int srcStride){\
1740 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStrid e, 8);\
1741 }\
1742 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, ui nt8_t *src, int dstStride, int srcStride){\
1743 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStrid e, 16);\
1744 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStrid e, 16);\
1745 }
1746
1747 static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, u int8_t *src, int tmpStride, int srcStride, int size){
1748 int w = (size+8)>>3;
1749 src -= 2*srcStride+2;
1750 while(w--){
1751 __asm__ volatile(
1752 "pxor %%xmm7, %%xmm7 \n\t"
1753 "movq (%0), %%xmm0 \n\t"
1754 "add %2, %0 \n\t"
1755 "movq (%0), %%xmm1 \n\t"
1756 "add %2, %0 \n\t"
1757 "movq (%0), %%xmm2 \n\t"
1758 "add %2, %0 \n\t"
1759 "movq (%0), %%xmm3 \n\t"
1760 "add %2, %0 \n\t"
1761 "movq (%0), %%xmm4 \n\t"
1762 "add %2, %0 \n\t"
1763 "punpcklbw %%xmm7, %%xmm0 \n\t"
1764 "punpcklbw %%xmm7, %%xmm1 \n\t"
1765 "punpcklbw %%xmm7, %%xmm2 \n\t"
1766 "punpcklbw %%xmm7, %%xmm3 \n\t"
1767 "punpcklbw %%xmm7, %%xmm4 \n\t"
1768 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48 )
1769 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48 )
1770 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48 )
1771 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48 )
1772 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48 )
1773 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48 )
1774 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48 )
1775 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48 )
1776 : "+a"(src)
1777 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
1778 : "memory"
1779 );
1780 if(size==16){
1781 __asm__ volatile(
1782 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
1783 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
1784 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
1785 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
1786 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
1787 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
1788 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
1789 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
1790 : "+a"(src)
1791 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
1792 : "memory"
1793 );
1794 }
1795 tmp += 8;
1796 src += 8 - (size+5)*srcStride;
1797 }
1798 }
1799
1800 #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
1801 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_ t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
1802 int h = size;\
1803 if(size == 16){\
1804 __asm__ volatile(\
1805 "1: \n\t"\
1806 "movdqa 32(%0), %%xmm4 \n\t"\
1807 "movdqa 16(%0), %%xmm5 \n\t"\
1808 "movdqa (%0), %%xmm7 \n\t"\
1809 "movdqa %%xmm4, %%xmm3 \n\t"\
1810 "movdqa %%xmm4, %%xmm2 \n\t"\
1811 "movdqa %%xmm4, %%xmm1 \n\t"\
1812 "movdqa %%xmm4, %%xmm0 \n\t"\
1813 "palignr $10, %%xmm5, %%xmm0 \n\t"\
1814 "palignr $8, %%xmm5, %%xmm1 \n\t"\
1815 "palignr $6, %%xmm5, %%xmm2 \n\t"\
1816 "palignr $4, %%xmm5, %%xmm3 \n\t"\
1817 "palignr $2, %%xmm5, %%xmm4 \n\t"\
1818 "paddw %%xmm5, %%xmm0 \n\t"\
1819 "paddw %%xmm4, %%xmm1 \n\t"\
1820 "paddw %%xmm3, %%xmm2 \n\t"\
1821 "movdqa %%xmm5, %%xmm6 \n\t"\
1822 "movdqa %%xmm5, %%xmm4 \n\t"\
1823 "movdqa %%xmm5, %%xmm3 \n\t"\
1824 "palignr $8, %%xmm7, %%xmm4 \n\t"\
1825 "palignr $2, %%xmm7, %%xmm6 \n\t"\
1826 "palignr $10, %%xmm7, %%xmm3 \n\t"\
1827 "paddw %%xmm6, %%xmm4 \n\t"\
1828 "movdqa %%xmm5, %%xmm6 \n\t"\
1829 "palignr $6, %%xmm7, %%xmm5 \n\t"\
1830 "palignr $4, %%xmm7, %%xmm6 \n\t"\
1831 "paddw %%xmm7, %%xmm3 \n\t"\
1832 "paddw %%xmm6, %%xmm5 \n\t"\
1833 \
1834 "psubw %%xmm1, %%xmm0 \n\t"\
1835 "psubw %%xmm4, %%xmm3 \n\t"\
1836 "psraw $2, %%xmm0 \n\t"\
1837 "psraw $2, %%xmm3 \n\t"\
1838 "psubw %%xmm1, %%xmm0 \n\t"\
1839 "psubw %%xmm4, %%xmm3 \n\t"\
1840 "paddw %%xmm2, %%xmm0 \n\t"\
1841 "paddw %%xmm5, %%xmm3 \n\t"\
1842 "psraw $2, %%xmm0 \n\t"\
1843 "psraw $2, %%xmm3 \n\t"\
1844 "paddw %%xmm2, %%xmm0 \n\t"\
1845 "paddw %%xmm5, %%xmm3 \n\t"\
1846 "psraw $6, %%xmm0 \n\t"\
1847 "psraw $6, %%xmm3 \n\t"\
1848 "packuswb %%xmm0, %%xmm3 \n\t"\
1849 OP(%%xmm3, (%1), %%xmm7, dqa)\
1850 "add $48, %0 \n\t"\
1851 "add %3, %1 \n\t"\
1852 "decl %2 \n\t"\
1853 " jnz 1b \n\t"\
1854 : "+a"(tmp), "+c"(dst), "+g"(h)\
1855 : "S"((x86_reg)dstStride)\
1856 : "memory"\
1857 );\
1858 }else{\
1859 __asm__ volatile(\
1860 "1: \n\t"\
1861 "movdqa 16(%0), %%xmm1 \n\t"\
1862 "movdqa (%0), %%xmm0 \n\t"\
1863 "movdqa %%xmm1, %%xmm2 \n\t"\
1864 "movdqa %%xmm1, %%xmm3 \n\t"\
1865 "movdqa %%xmm1, %%xmm4 \n\t"\
1866 "movdqa %%xmm1, %%xmm5 \n\t"\
1867 "palignr $10, %%xmm0, %%xmm5 \n\t"\
1868 "palignr $8, %%xmm0, %%xmm4 \n\t"\
1869 "palignr $6, %%xmm0, %%xmm3 \n\t"\
1870 "palignr $4, %%xmm0, %%xmm2 \n\t"\
1871 "palignr $2, %%xmm0, %%xmm1 \n\t"\
1872 "paddw %%xmm5, %%xmm0 \n\t"\
1873 "paddw %%xmm4, %%xmm1 \n\t"\
1874 "paddw %%xmm3, %%xmm2 \n\t"\
1875 "psubw %%xmm1, %%xmm0 \n\t"\
1876 "psraw $2, %%xmm0 \n\t"\
1877 "psubw %%xmm1, %%xmm0 \n\t"\
1878 "paddw %%xmm2, %%xmm0 \n\t"\
1879 "psraw $2, %%xmm0 \n\t"\
1880 "paddw %%xmm2, %%xmm0 \n\t"\
1881 "psraw $6, %%xmm0 \n\t"\
1882 "packuswb %%xmm0, %%xmm0 \n\t"\
1883 OP(%%xmm0, (%1), %%xmm7, q)\
1884 "add $48, %0 \n\t"\
1885 "add %3, %1 \n\t"\
1886 "decl %2 \n\t"\
1887 " jnz 1b \n\t"\
1888 : "+a"(tmp), "+c"(dst), "+g"(h)\
1889 : "S"((x86_reg)dstStride)\
1890 : "memory"\
1891 );\
1892 }\
1893 }
1894
1895 #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
1896 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst , int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int s ize){\
1897 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, si ze);\
1898 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
1899 }\
1900 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1901 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStr ide, srcStride, 8);\
1902 }\
1903 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1904 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStr ide, srcStride, 16);\
1905 }\
1906
1907 #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
1908 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
1909 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
1910 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
1911 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
1912 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
1913 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
1914 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
1915
1916 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
1917 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
1918 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
1919 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
1920 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
1921 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
1922 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
1923 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
1924
1925 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
1926 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
1927 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
1928 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
1929
1930 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
1931 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
1932 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
1933 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
1934
1935 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
1936 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
1937
1938 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
1939 H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
1940 H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
1941 H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
1942 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
1943
1944 static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
1945 put_pixels16_sse2(dst, src, stride, 16);
1946 }
1947 static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
1948 avg_pixels16_sse2(dst, src, stride, 16);
1949 }
1950 #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
1951 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
1952
1953 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
1954 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1955 OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
1956 }\
1957
1958 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
1959 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1960 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
1961 }\
1962 \
1963 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1964 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride); \
1965 }\
1966 \
1967 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1968 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride , stride);\
1969 }\
1970
1971 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
1972 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1973 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
1974 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1975 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE );\
1976 }\
1977 \
1978 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1979 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride); \
1980 }\
1981 \
1982 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1983 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
1984 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1985 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, strid e, SIZE);\
1986 }\
1987
1988 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
1989 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1990 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
1991 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1992 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
1993 }\
1994 \
1995 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
1996 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
1997 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
1998 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
1999 }\
2000 \
2001 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2002 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
2003 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
2004 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
2005 }\
2006 \
2007 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2008 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
2009 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
2010 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
2011 }\
2012 \
2013 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2014 DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
2015 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, S IZE, stride);\
2016 }\
2017 \
2018 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2019 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
2020 uint8_t * const halfHV= temp;\
2021 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
2022 assert(((int)temp & 7) == 0);\
2023 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
2024 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, strid e, SIZE);\
2025 }\
2026 \
2027 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2028 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
2029 uint8_t * const halfHV= temp;\
2030 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
2031 assert(((int)temp & 7) == 0);\
2032 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
2033 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV , stride, SIZE);\
2034 }\
2035 \
2036 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2037 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
2038 uint8_t * const halfHV= temp;\
2039 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
2040 assert(((int)temp & 7) == 0);\
2041 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
2042 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
2043 }\
2044 \
2045 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t * src, int stride){\
2046 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
2047 uint8_t * const halfHV= temp;\
2048 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
2049 assert(((int)temp & 7) == 0);\
2050 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
2051 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
2052 }\
2053
2054 #define H264_MC_4816(MMX)\
2055 H264_MC(put_, 4, MMX, 8)\
2056 H264_MC(put_, 8, MMX, 8)\
2057 H264_MC(put_, 16,MMX, 8)\
2058 H264_MC(avg_, 4, MMX, 8)\
2059 H264_MC(avg_, 8, MMX, 8)\
2060 H264_MC(avg_, 16,MMX, 8)\
2061
2062 #define H264_MC_816(QPEL, XMM)\
2063 QPEL(put_, 8, XMM, 16)\
2064 QPEL(put_, 16,XMM, 16)\
2065 QPEL(avg_, 8, XMM, 16)\
2066 QPEL(avg_, 16,XMM, 16)\
2067
2068
2069 #define AVG_3DNOW_OP(a,b,temp, size) \
2070 "mov" #size " " #b ", " #temp " \n\t"\
2071 "pavgusb " #temp ", " #a " \n\t"\
2072 "mov" #size " " #a ", " #b " \n\t"
2073 #define AVG_MMX2_OP(a,b,temp, size) \
2074 "mov" #size " " #b ", " #temp " \n\t"\
2075 "pavgb " #temp ", " #a " \n\t"\
2076 "mov" #size " " #a ", " #b " \n\t"
2077
2078 #define PAVGB "pavgusb"
2079 QPEL_H264(put_, PUT_OP, 3dnow)
2080 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
2081 #undef PAVGB
2082 #define PAVGB "pavgb"
2083 QPEL_H264(put_, PUT_OP, mmx2)
2084 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
2085 QPEL_H264_V_XMM(put_, PUT_OP, sse2)
2086 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
2087 QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
2088 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
2089 #if HAVE_SSSE3
2090 QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
2091 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
2092 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
2093 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
2094 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
2095 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
2096 #endif
2097 #undef PAVGB
2098
2099 H264_MC_4816(3dnow)
2100 H264_MC_4816(mmx2)
2101 H264_MC_816(H264_MC_V, sse2)
2102 H264_MC_816(H264_MC_HV, sse2)
2103 #if HAVE_SSSE3
2104 H264_MC_816(H264_MC_H, ssse3)
2105 H264_MC_816(H264_MC_HV, ssse3)
2106 #endif 706 #endif
2107 707
2108 /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ 708 LF_FUNC (h, luma, sse2)
2109 DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = { 709 LF_IFUNC(h, luma_intra, sse2)
2110 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x00030 00300030003ULL 710 LF_FUNC (v, luma, sse2)
2111 }; 711 LF_IFUNC(v, luma_intra, sse2)
2112
2113 #define H264_CHROMA_OP(S,D)
2114 #define H264_CHROMA_OP4(S,D,T)
2115 #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx
2116 #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx
2117 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
2118 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
2119 #include "dsputil_h264_template_mmx.c"
2120
2121 static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*a lign 1*/, int stride, int h, int x, int y)
2122 {
2123 put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
2124 }
2125 static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/* align 1*/, int stride, int h, int x, int y)
2126 {
2127 put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2);
2128 }
2129 static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
2130 {
2131 put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
2132 }
2133
2134 #undef H264_CHROMA_OP
2135 #undef H264_CHROMA_OP4
2136 #undef H264_CHROMA_MC8_TMPL
2137 #undef H264_CHROMA_MC4_TMPL
2138 #undef H264_CHROMA_MC2_TMPL
2139 #undef H264_CHROMA_MC8_MV0
2140
2141 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
2142 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
2143 "pavgb " #T ", " #D " \n\t"
2144 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2
2145 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2
2146 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
2147 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
2148 #include "dsputil_h264_template_mmx.c"
2149 static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/* align 1*/, int stride, int h, int x, int y)
2150 {
2151 avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
2152 }
2153 static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/ *align 1*/, int stride, int h, int x, int y)
2154 {
2155 avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2);
2156 }
2157 static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*alig n 1*/, int stride, int h, int x, int y)
2158 {
2159 avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
2160 }
2161 #undef H264_CHROMA_OP
2162 #undef H264_CHROMA_OP4
2163 #undef H264_CHROMA_MC8_TMPL
2164 #undef H264_CHROMA_MC4_TMPL
2165 #undef H264_CHROMA_MC2_TMPL
2166 #undef H264_CHROMA_MC8_MV0
2167
2168 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
2169 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
2170 "pavgusb " #T ", " #D " \n\t"
2171 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow
2172 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow
2173 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
2174 #include "dsputil_h264_template_mmx.c"
2175 static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/ *align 1*/, int stride, int h, int x, int y)
2176 {
2177 avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
2178 }
2179 static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*ali gn 1*/, int stride, int h, int x, int y)
2180 {
2181 avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
2182 }
2183 #undef H264_CHROMA_OP
2184 #undef H264_CHROMA_OP4
2185 #undef H264_CHROMA_MC8_TMPL
2186 #undef H264_CHROMA_MC4_TMPL
2187 #undef H264_CHROMA_MC8_MV0
2188
2189 #if HAVE_SSSE3
2190 #define AVG_OP(X)
2191 #undef H264_CHROMA_MC8_TMPL
2192 #undef H264_CHROMA_MC4_TMPL
2193 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
2194 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
2195 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
2196 #include "dsputil_h264_template_ssse3.c"
2197 static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/ *align 1*/, int stride, int h, int x, int y)
2198 {
2199 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
2200 }
2201 static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src /*align 1*/, int stride, int h, int x, int y)
2202 {
2203 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
2204 }
2205
2206 #undef AVG_OP
2207 #undef H264_CHROMA_MC8_TMPL
2208 #undef H264_CHROMA_MC4_TMPL
2209 #undef H264_CHROMA_MC8_MV0
2210 #define AVG_OP(X) X
2211 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
2212 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
2213 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
2214 #include "dsputil_h264_template_ssse3.c"
2215 static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/ *align 1*/, int stride, int h, int x, int y)
2216 {
2217 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
2218 }
2219 static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src /*align 1*/, int stride, int h, int x, int y)
2220 {
2221 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
2222 }
2223 #undef AVG_OP
2224 #undef H264_CHROMA_MC8_TMPL
2225 #undef H264_CHROMA_MC4_TMPL
2226 #undef H264_CHROMA_MC8_MV0
2227 #endif
2228 712
2229 /***********************************/ 713 /***********************************/
2230 /* weighted prediction */ 714 /* weighted prediction */
2231 715
2232 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_de nom, int weight, int offset, int w, int h) 716 #define H264_WEIGHT(W, H, OPT) \
717 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
718 int stride, int log2_denom, int weight, int offset);
719
720 #define H264_BIWEIGHT(W, H, OPT) \
721 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
722 uint8_t *src, int stride, int log2_denom, int weightd, \
723 int weights, int offset);
724
725 #define H264_BIWEIGHT_MMX(W,H) \
726 H264_WEIGHT (W, H, mmx2) \
727 H264_BIWEIGHT(W, H, mmx2)
728
729 #define H264_BIWEIGHT_MMX_SSE(W,H) \
730 H264_BIWEIGHT_MMX(W, H) \
731 H264_WEIGHT (W, H, sse2) \
732 H264_BIWEIGHT (W, H, sse2) \
733 H264_BIWEIGHT (W, H, ssse3)
734
735 H264_BIWEIGHT_MMX_SSE(16, 16)
736 H264_BIWEIGHT_MMX_SSE(16, 8)
737 H264_BIWEIGHT_MMX_SSE( 8, 16)
738 H264_BIWEIGHT_MMX_SSE( 8, 8)
739 H264_BIWEIGHT_MMX_SSE( 8, 4)
740 H264_BIWEIGHT_MMX ( 4, 8)
741 H264_BIWEIGHT_MMX ( 4, 4)
742 H264_BIWEIGHT_MMX ( 4, 2)
743
744 void ff_h264dsp_init_x86(H264DSPContext *c)
2233 { 745 {
2234 int x, y; 746 int mm_flags = av_get_cpu_flags();
2235 offset <<= log2_denom; 747
2236 offset += (1 << log2_denom) >> 1; 748 if (mm_flags & AV_CPU_FLAG_MMX) {
2237 __asm__ volatile( 749 c->h264_idct_dc_add=
2238 "movd %0, %%mm4 \n\t" 750 c->h264_idct_add= ff_h264_idct_add_mmx;
2239 "movd %1, %%mm5 \n\t" 751 c->h264_idct8_dc_add=
2240 "movd %2, %%mm6 \n\t" 752 c->h264_idct8_add= ff_h264_idct8_add_mmx;
2241 "pshufw $0, %%mm4, %%mm4 \n\t" 753
2242 "pshufw $0, %%mm5, %%mm5 \n\t" 754 c->h264_idct_add16 = ff_h264_idct_add16_mmx;
2243 "pxor %%mm7, %%mm7 \n\t" 755 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
2244 :: "g"(weight), "g"(offset), "g"(log2_denom) 756 c->h264_idct_add8 = ff_h264_idct_add8_mmx;
2245 ); 757 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2246 for(y=0; y<h; y+=2){ 758
2247 for(x=0; x<w; x+=4){ 759 if (mm_flags & AV_CPU_FLAG_MMX2) {
2248 __asm__ volatile( 760 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2249 "movd %0, %%mm0 \n\t" 761 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2250 "movd %1, %%mm1 \n\t" 762 c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
2251 "punpcklbw %%mm7, %%mm0 \n\t" 763 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
2252 "punpcklbw %%mm7, %%mm1 \n\t" 764 c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
2253 "pmullw %%mm4, %%mm0 \n\t" 765 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2254 "pmullw %%mm4, %%mm1 \n\t" 766
2255 "paddsw %%mm5, %%mm0 \n\t" 767 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2256 "paddsw %%mm5, %%mm1 \n\t"
2257 "psraw %%mm6, %%mm0 \n\t"
2258 "psraw %%mm6, %%mm1 \n\t"
2259 "packuswb %%mm7, %%mm0 \n\t"
2260 "packuswb %%mm7, %%mm1 \n\t"
2261 "movd %%mm0, %0 \n\t"
2262 "movd %%mm1, %1 \n\t"
2263 : "+m"(*(uint32_t*)(dst+x)),
2264 "+m"(*(uint32_t*)(dst+x+stride))
2265 );
2266 } 768 }
2267 dst += 2*stride; 769 if(mm_flags & AV_CPU_FLAG_SSE2){
770 c->h264_idct8_add = ff_h264_idct8_add_sse2;
771 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
772 }
773
774 #if HAVE_YASM
775 if (mm_flags & AV_CPU_FLAG_MMX2){
776 c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
777 c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
778 c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_m mxext;
779 c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_m mxext;
780 #if ARCH_X86_32
781 c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext;
782 c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext;
783 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxe xt;
784 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxe xt;
785 #endif
786 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
787 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
788 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
789 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
790 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
791 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
792 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
793 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
794
795 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
796 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
797 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
798 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
799 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
800 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
801 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
802 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
803
804 if (mm_flags&AV_CPU_FLAG_SSE2) {
805 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
806 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
807 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
808 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
809 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
810
811 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
812 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
813 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
814 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
815 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
816
817 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110
818 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
819 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
820 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_ sse2;
821 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_ sse2;
822 #endif
823 #if CONFIG_GPL
824 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
825 c->h264_idct_add8 = ff_h264_idct_add8_sse2;
826 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
827 #endif
828 }
829 if (mm_flags&AV_CPU_FLAG_SSSE3) {
830 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
831 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
832 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
833 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
834 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
835 }
836 }
837 #endif
2268 } 838 }
2269 } 839 }
2270
2271 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int str ide, int log2_denom, int weightd, int weights, int offset, int w, int h)
2272 {
2273 int x, y;
2274 offset = ((offset + 1) | 1) << log2_denom;
2275 __asm__ volatile(
2276 "movd %0, %%mm3 \n\t"
2277 "movd %1, %%mm4 \n\t"
2278 "movd %2, %%mm5 \n\t"
2279 "movd %3, %%mm6 \n\t"
2280 "pshufw $0, %%mm3, %%mm3 \n\t"
2281 "pshufw $0, %%mm4, %%mm4 \n\t"
2282 "pshufw $0, %%mm5, %%mm5 \n\t"
2283 "pxor %%mm7, %%mm7 \n\t"
2284 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
2285 );
2286 for(y=0; y<h; y++){
2287 for(x=0; x<w; x+=4){
2288 __asm__ volatile(
2289 "movd %0, %%mm0 \n\t"
2290 "movd %1, %%mm1 \n\t"
2291 "punpcklbw %%mm7, %%mm0 \n\t"
2292 "punpcklbw %%mm7, %%mm1 \n\t"
2293 "pmullw %%mm3, %%mm0 \n\t"
2294 "pmullw %%mm4, %%mm1 \n\t"
2295 "paddsw %%mm1, %%mm0 \n\t"
2296 "paddsw %%mm5, %%mm0 \n\t"
2297 "psraw %%mm6, %%mm0 \n\t"
2298 "packuswb %%mm0, %%mm0 \n\t"
2299 "movd %%mm0, %0 \n\t"
2300 : "+m"(*(uint32_t*)(dst+x))
2301 : "m"(*(uint32_t*)(src+x))
2302 );
2303 }
2304 src += stride;
2305 dst += stride;
2306 }
2307 }
2308
2309 #define H264_WEIGHT(W,H) \
2310 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src , int stride, int log2_denom, int weightd, int weights, int offset){ \
2311 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, of fset, W, H); \
2312 } \
2313 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, in t log2_denom, int weight, int offset){ \
2314 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
2315 }
2316
2317 H264_WEIGHT(16,16)
2318 H264_WEIGHT(16, 8)
2319 H264_WEIGHT( 8,16)
2320 H264_WEIGHT( 8, 8)
2321 H264_WEIGHT( 8, 4)
2322 H264_WEIGHT( 4, 8)
2323 H264_WEIGHT( 4, 4)
2324 H264_WEIGHT( 4, 2)
2325
2326 void ff_h264_biweight_8x8_sse2(uint8_t *dst, uint8_t *src, int stride,
2327 int log2_denom, int weightd, int weights,
2328 int offset);
2329
2330 void ff_h264_biweight_16x16_sse2(uint8_t *dst, uint8_t *src, int stride,
2331 int log2_denom, int weightd, int weights,
2332 int offset);
2333
2334 void ff_h264_biweight_8x8_ssse3(uint8_t *dst, uint8_t *src, int stride,
2335 int log2_denom, int weightd, int weights,
2336 int offset);
2337
2338 void ff_h264_biweight_16x16_ssse3(uint8_t *dst, uint8_t *src, int stride,
2339 int log2_denom, int weightd, int weights,
2340 int offset);
2341
2342 void ff_pred16x16_vertical_mmx (uint8_t *src, int stride);
2343 void ff_pred16x16_vertical_sse (uint8_t *src, int stride);
2344 void ff_pred16x16_horizontal_mmx (uint8_t *src, int stride);
2345 void ff_pred16x16_horizontal_mmxext(uint8_t *src, int stride);
2346 void ff_pred16x16_horizontal_ssse3 (uint8_t *src, int stride);
2347 void ff_pred16x16_dc_mmxext (uint8_t *src, int stride);
2348 void ff_pred16x16_dc_sse2 (uint8_t *src, int stride);
2349 void ff_pred16x16_dc_ssse3 (uint8_t *src, int stride);
2350 void ff_pred16x16_tm_vp8_mmx (uint8_t *src, int stride);
2351 void ff_pred16x16_tm_vp8_mmxext (uint8_t *src, int stride);
2352 void ff_pred16x16_tm_vp8_sse2 (uint8_t *src, int stride);
2353 void ff_pred8x8_dc_rv40_mmxext (uint8_t *src, int stride);
2354 void ff_pred8x8_vertical_mmx (uint8_t *src, int stride);
2355 void ff_pred8x8_horizontal_mmx (uint8_t *src, int stride);
2356 void ff_pred8x8_horizontal_mmxext (uint8_t *src, int stride);
2357 void ff_pred8x8_horizontal_ssse3 (uint8_t *src, int stride);
2358 void ff_pred8x8_tm_vp8_mmx (uint8_t *src, int stride);
2359 void ff_pred8x8_tm_vp8_mmxext (uint8_t *src, int stride);
2360 void ff_pred8x8_tm_vp8_sse2 (uint8_t *src, int stride);
2361 void ff_pred8x8_tm_vp8_ssse3 (uint8_t *src, int stride);
2362 void ff_pred4x4_dc_mmxext (uint8_t *src, const uint8_t *topright, int s tride);
2363 void ff_pred4x4_tm_vp8_mmx (uint8_t *src, const uint8_t *topright, int s tride);
2364 void ff_pred4x4_tm_vp8_mmxext (uint8_t *src, const uint8_t *topright, int s tride);
2365 void ff_pred4x4_tm_vp8_ssse3 (uint8_t *src, const uint8_t *topright, int s tride);
2366 void ff_pred4x4_vertical_vp8_mmxext(uint8_t *src, const uint8_t *topright, int s tride);
2367
2368 #if CONFIG_H264PRED
2369 void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
2370 {
2371 mm_flags = mm_support();
2372
2373 #if HAVE_YASM
2374 if (mm_flags & FF_MM_MMX) {
2375 h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx;
2376 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx;
2377 h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx;
2378 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx;
2379 if (codec_id == CODEC_ID_VP8) {
2380 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmx;
2381 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmx;
2382 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx;
2383 }
2384 }
2385
2386 if (mm_flags & FF_MM_MMX2) {
2387 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
2388 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
2389 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
2390 h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext;
2391 if (codec_id == CODEC_ID_VP8) {
2392 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext;
2393 h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext;
2394 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmxext;
2395 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext;
2396 h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext;
2397 }
2398 }
2399
2400 if (mm_flags & FF_MM_SSE) {
2401 h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse;
2402 }
2403
2404 if (mm_flags & FF_MM_SSE2) {
2405 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2;
2406 if (codec_id == CODEC_ID_VP8) {
2407 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2;
2408 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2;
2409 }
2410 }
2411
2412 if (mm_flags & FF_MM_SSSE3) {
2413 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3;
2414 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3;
2415 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3;
2416 if (codec_id == CODEC_ID_VP8) {
2417 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3;
2418 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3;
2419 }
2420 }
2421 #endif
2422 }
2423 #endif
OLDNEW
« no previous file with comments | « source/patched-ffmpeg-mt/libavcodec/x86/h264_weight_sse2.asm ('k') | source/patched-ffmpeg-mt/libavcodec/x86/idct_sse2_xvid.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698