| OLD | NEW |
| (Empty) |
| 1 ; | |
| 2 ; Copyright (c) 2014 The WebM project authors. All Rights Reserved. | |
| 3 ; | |
| 4 ; Use of this source code is governed by a BSD-style license | |
| 5 ; that can be found in the LICENSE file in the root of the source | |
| 6 ; tree. An additional intellectual property rights grant can be found | |
| 7 ; in the file PATENTS. All contributing project authors may | |
| 8 ; be found in the AUTHORS file in the root of the source tree. | |
| 9 ; | |
| 10 %include "third_party/x86inc/x86inc.asm" | |
| 11 | |
| 12 ; This file provides SSSE3 version of the forward transformation. Part | |
| 13 ; of the macro definitions are originally derived from the ffmpeg project. | |
| 14 ; The current version applies to x86 64-bit only. | |
| 15 | |
| 16 SECTION_RODATA | |
| 17 | |
| 18 pw_11585x2: times 8 dw 23170 | |
| 19 pd_8192: times 4 dd 8192 | |
| 20 | |
| 21 %macro TRANSFORM_COEFFS 2 | |
| 22 pw_%1_%2: dw %1, %2, %1, %2, %1, %2, %1, %2 | |
| 23 pw_%2_m%1: dw %2, -%1, %2, -%1, %2, -%1, %2, -%1 | |
| 24 %endmacro | |
| 25 | |
| 26 TRANSFORM_COEFFS 15137, 6270 | |
| 27 TRANSFORM_COEFFS 16069, 3196 | |
| 28 TRANSFORM_COEFFS 9102, 13623 | |
| 29 | |
| 30 SECTION .text | |
| 31 | |
| 32 %if ARCH_X86_64 | |
| 33 %macro SUM_SUB 3 | |
| 34 psubw m%3, m%1, m%2 | |
| 35 paddw m%1, m%2 | |
| 36 SWAP %2, %3 | |
| 37 %endmacro | |
| 38 | |
| 39 ; butterfly operation | |
| 40 %macro MUL_ADD_2X 6 ; dst1, dst2, src, round, coefs1, coefs2 | |
| 41 pmaddwd m%1, m%3, %5 | |
| 42 pmaddwd m%2, m%3, %6 | |
| 43 paddd m%1, %4 | |
| 44 paddd m%2, %4 | |
| 45 psrad m%1, 14 | |
| 46 psrad m%2, 14 | |
| 47 %endmacro | |
| 48 | |
| 49 %macro BUTTERFLY_4X 7 ; dst1, dst2, coef1, coef2, round, tmp1, tmp2 | |
| 50 punpckhwd m%6, m%2, m%1 | |
| 51 MUL_ADD_2X %7, %6, %6, %5, [pw_%4_%3], [pw_%3_m%4] | |
| 52 punpcklwd m%2, m%1 | |
| 53 MUL_ADD_2X %1, %2, %2, %5, [pw_%4_%3], [pw_%3_m%4] | |
| 54 packssdw m%1, m%7 | |
| 55 packssdw m%2, m%6 | |
| 56 %endmacro | |
| 57 | |
| 58 ; matrix transpose | |
| 59 %macro INTERLEAVE_2X 4 | |
| 60 punpckh%1 m%4, m%2, m%3 | |
| 61 punpckl%1 m%2, m%3 | |
| 62 SWAP %3, %4 | |
| 63 %endmacro | |
| 64 | |
| 65 %macro TRANSPOSE8X8 9 | |
| 66 INTERLEAVE_2X wd, %1, %2, %9 | |
| 67 INTERLEAVE_2X wd, %3, %4, %9 | |
| 68 INTERLEAVE_2X wd, %5, %6, %9 | |
| 69 INTERLEAVE_2X wd, %7, %8, %9 | |
| 70 | |
| 71 INTERLEAVE_2X dq, %1, %3, %9 | |
| 72 INTERLEAVE_2X dq, %2, %4, %9 | |
| 73 INTERLEAVE_2X dq, %5, %7, %9 | |
| 74 INTERLEAVE_2X dq, %6, %8, %9 | |
| 75 | |
| 76 INTERLEAVE_2X qdq, %1, %5, %9 | |
| 77 INTERLEAVE_2X qdq, %3, %7, %9 | |
| 78 INTERLEAVE_2X qdq, %2, %6, %9 | |
| 79 INTERLEAVE_2X qdq, %4, %8, %9 | |
| 80 | |
| 81 SWAP %2, %5 | |
| 82 SWAP %4, %7 | |
| 83 %endmacro | |
| 84 | |
| 85 ; 1D forward 8x8 DCT transform | |
| 86 %macro FDCT8_1D 0 | |
| 87 SUM_SUB 0, 7, 9 | |
| 88 SUM_SUB 1, 6, 9 | |
| 89 SUM_SUB 2, 5, 9 | |
| 90 SUM_SUB 3, 4, 9 | |
| 91 | |
| 92 SUM_SUB 0, 3, 9 | |
| 93 SUM_SUB 1, 2, 9 | |
| 94 SUM_SUB 6, 5, 9 | |
| 95 SUM_SUB 0, 1, 9 | |
| 96 | |
| 97 BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 | |
| 98 | |
| 99 pmulhrsw m6, m12 | |
| 100 pmulhrsw m5, m12 | |
| 101 pmulhrsw m0, m12 | |
| 102 pmulhrsw m1, m12 | |
| 103 | |
| 104 SUM_SUB 4, 5, 9 | |
| 105 SUM_SUB 7, 6, 9 | |
| 106 BUTTERFLY_4X 4, 7, 3196, 16069, m8, 9, 10 | |
| 107 BUTTERFLY_4X 5, 6, 13623, 9102, m8, 9, 10 | |
| 108 SWAP 1, 4 | |
| 109 SWAP 3, 6 | |
| 110 %endmacro | |
| 111 | |
| 112 %macro DIVIDE_ROUND_2X 4 ; dst1, dst2, tmp1, tmp2 | |
| 113 psraw m%3, m%1, 15 | |
| 114 psraw m%4, m%2, 15 | |
| 115 psubw m%1, m%3 | |
| 116 psubw m%2, m%4 | |
| 117 psraw m%1, 1 | |
| 118 psraw m%2, 1 | |
| 119 %endmacro | |
| 120 | |
| 121 INIT_XMM ssse3 | |
| 122 cglobal fdct8x8, 3, 5, 13, input, output, stride | |
| 123 | |
| 124 mova m8, [pd_8192] | |
| 125 mova m12, [pw_11585x2] | |
| 126 pxor m11, m11 | |
| 127 | |
| 128 lea r3, [2 * strideq] | |
| 129 lea r4, [4 * strideq] | |
| 130 mova m0, [inputq] | |
| 131 mova m1, [inputq + r3] | |
| 132 lea inputq, [inputq + r4] | |
| 133 mova m2, [inputq] | |
| 134 mova m3, [inputq + r3] | |
| 135 lea inputq, [inputq + r4] | |
| 136 mova m4, [inputq] | |
| 137 mova m5, [inputq + r3] | |
| 138 lea inputq, [inputq + r4] | |
| 139 mova m6, [inputq] | |
| 140 mova m7, [inputq + r3] | |
| 141 | |
| 142 ; left shift by 2 to increase forward transformation precision | |
| 143 psllw m0, 2 | |
| 144 psllw m1, 2 | |
| 145 psllw m2, 2 | |
| 146 psllw m3, 2 | |
| 147 psllw m4, 2 | |
| 148 psllw m5, 2 | |
| 149 psllw m6, 2 | |
| 150 psllw m7, 2 | |
| 151 | |
| 152 ; column transform | |
| 153 FDCT8_1D | |
| 154 TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9 | |
| 155 | |
| 156 FDCT8_1D | |
| 157 TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9 | |
| 158 | |
| 159 DIVIDE_ROUND_2X 0, 1, 9, 10 | |
| 160 DIVIDE_ROUND_2X 2, 3, 9, 10 | |
| 161 DIVIDE_ROUND_2X 4, 5, 9, 10 | |
| 162 DIVIDE_ROUND_2X 6, 7, 9, 10 | |
| 163 | |
| 164 mova [outputq + 0], m0 | |
| 165 mova [outputq + 16], m1 | |
| 166 mova [outputq + 32], m2 | |
| 167 mova [outputq + 48], m3 | |
| 168 mova [outputq + 64], m4 | |
| 169 mova [outputq + 80], m5 | |
| 170 mova [outputq + 96], m6 | |
| 171 mova [outputq + 112], m7 | |
| 172 | |
| 173 RET | |
| 174 %endif | |
| OLD | NEW |