| OLD | NEW |
| 1 ; | 1 ; |
| 2 ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 ; | 3 ; |
| 4 ; Use of this source code is governed by a BSD-style license | 4 ; Use of this source code is governed by a BSD-style license |
| 5 ; that can be found in the LICENSE file in the root of the source | 5 ; that can be found in the LICENSE file in the root of the source |
| 6 ; tree. An additional intellectual property rights grant can be found | 6 ; tree. An additional intellectual property rights grant can be found |
| 7 ; in the file PATENTS. All contributing project authors may | 7 ; in the file PATENTS. All contributing project authors may |
| 8 ; be found in the AUTHORS file in the root of the source tree. | 8 ; be found in the AUTHORS file in the root of the source tree. |
| 9 ; | 9 ; |
| 10 | 10 |
| 11 %define program_name vpx |
| 12 |
| 11 %include "third_party/x86inc/x86inc.asm" | 13 %include "third_party/x86inc/x86inc.asm" |
| 12 | 14 |
| 13 SECTION .text | 15 SECTION .text |
| 14 | 16 |
| 15 %macro SAD_FN 4 | 17 %macro SAD_FN 4 |
| 16 %if %4 == 0 | 18 %if %4 == 0 |
| 17 %if %3 == 5 | 19 %if %3 == 5 |
| 18 cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, n_rows | 20 cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, n_rows |
| 19 %else ; %3 == 7 | 21 %else ; %3 == 7 |
| 20 cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, \ | 22 cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, \ |
| (...skipping 16 matching lines...) Expand all Loading... |
| 37 %endif ; %3 == 5/7 | 39 %endif ; %3 == 5/7 |
| 38 %endif ; avg/sad | 40 %endif ; avg/sad |
| 39 movsxdifnidn src_strideq, src_strided | 41 movsxdifnidn src_strideq, src_strided |
| 40 movsxdifnidn ref_strideq, ref_strided | 42 movsxdifnidn ref_strideq, ref_strided |
| 41 %if %3 == 7 | 43 %if %3 == 7 |
| 42 lea src_stride3q, [src_strideq*3] | 44 lea src_stride3q, [src_strideq*3] |
| 43 lea ref_stride3q, [ref_strideq*3] | 45 lea ref_stride3q, [ref_strideq*3] |
| 44 %endif ; %3 == 7 | 46 %endif ; %3 == 7 |
| 45 %endmacro | 47 %endmacro |
| 46 | 48 |
| 47 ; unsigned int vp9_sad64x64_sse2(uint8_t *src, int src_stride, | 49 ; unsigned int vpx_sad64x64_sse2(uint8_t *src, int src_stride, |
| 48 ; uint8_t *ref, int ref_stride); | 50 ; uint8_t *ref, int ref_stride); |
| 49 %macro SAD64XN 1-2 0 | 51 %macro SAD64XN 1-2 0 |
| 50 SAD_FN 64, %1, 5, %2 | 52 SAD_FN 64, %1, 5, %2 |
| 51 mov n_rowsd, %1 | 53 mov n_rowsd, %1 |
| 52 pxor m0, m0 | 54 pxor m0, m0 |
| 53 .loop: | 55 .loop: |
| 54 movu m1, [refq] | 56 movu m1, [refq] |
| 55 movu m2, [refq+16] | 57 movu m2, [refq+16] |
| 56 movu m3, [refq+32] | 58 movu m3, [refq+32] |
| 57 movu m4, [refq+48] | 59 movu m4, [refq+48] |
| (...skipping 22 matching lines...) Expand all Loading... |
| 80 movd eax, m0 | 82 movd eax, m0 |
| 81 RET | 83 RET |
| 82 %endmacro | 84 %endmacro |
| 83 | 85 |
| 84 INIT_XMM sse2 | 86 INIT_XMM sse2 |
| 85 SAD64XN 64 ; sad64x64_sse2 | 87 SAD64XN 64 ; sad64x64_sse2 |
| 86 SAD64XN 32 ; sad64x32_sse2 | 88 SAD64XN 32 ; sad64x32_sse2 |
| 87 SAD64XN 64, 1 ; sad64x64_avg_sse2 | 89 SAD64XN 64, 1 ; sad64x64_avg_sse2 |
| 88 SAD64XN 32, 1 ; sad64x32_avg_sse2 | 90 SAD64XN 32, 1 ; sad64x32_avg_sse2 |
| 89 | 91 |
| 90 ; unsigned int vp9_sad32x32_sse2(uint8_t *src, int src_stride, | 92 ; unsigned int vpx_sad32x32_sse2(uint8_t *src, int src_stride, |
| 91 ; uint8_t *ref, int ref_stride); | 93 ; uint8_t *ref, int ref_stride); |
| 92 %macro SAD32XN 1-2 0 | 94 %macro SAD32XN 1-2 0 |
| 93 SAD_FN 32, %1, 5, %2 | 95 SAD_FN 32, %1, 5, %2 |
| 94 mov n_rowsd, %1/2 | 96 mov n_rowsd, %1/2 |
| 95 pxor m0, m0 | 97 pxor m0, m0 |
| 96 .loop: | 98 .loop: |
| 97 movu m1, [refq] | 99 movu m1, [refq] |
| 98 movu m2, [refq+16] | 100 movu m2, [refq+16] |
| 99 movu m3, [refq+ref_strideq] | 101 movu m3, [refq+ref_strideq] |
| 100 movu m4, [refq+ref_strideq+16] | 102 movu m4, [refq+ref_strideq+16] |
| (...skipping 24 matching lines...) Expand all Loading... |
| 125 %endmacro | 127 %endmacro |
| 126 | 128 |
| 127 INIT_XMM sse2 | 129 INIT_XMM sse2 |
| 128 SAD32XN 64 ; sad32x64_sse2 | 130 SAD32XN 64 ; sad32x64_sse2 |
| 129 SAD32XN 32 ; sad32x32_sse2 | 131 SAD32XN 32 ; sad32x32_sse2 |
| 130 SAD32XN 16 ; sad32x16_sse2 | 132 SAD32XN 16 ; sad32x16_sse2 |
| 131 SAD32XN 64, 1 ; sad32x64_avg_sse2 | 133 SAD32XN 64, 1 ; sad32x64_avg_sse2 |
| 132 SAD32XN 32, 1 ; sad32x32_avg_sse2 | 134 SAD32XN 32, 1 ; sad32x32_avg_sse2 |
| 133 SAD32XN 16, 1 ; sad32x16_avg_sse2 | 135 SAD32XN 16, 1 ; sad32x16_avg_sse2 |
| 134 | 136 |
| 135 ; unsigned int vp9_sad16x{8,16}_sse2(uint8_t *src, int src_stride, | 137 ; unsigned int vpx_sad16x{8,16}_sse2(uint8_t *src, int src_stride, |
| 136 ; uint8_t *ref, int ref_stride); | 138 ; uint8_t *ref, int ref_stride); |
| 137 %macro SAD16XN 1-2 0 | 139 %macro SAD16XN 1-2 0 |
| 138 SAD_FN 16, %1, 7, %2 | 140 SAD_FN 16, %1, 7, %2 |
| 139 mov n_rowsd, %1/4 | 141 mov n_rowsd, %1/4 |
| 140 pxor m0, m0 | 142 pxor m0, m0 |
| 141 | 143 |
| 142 .loop: | 144 .loop: |
| 143 movu m1, [refq] | 145 movu m1, [refq] |
| 144 movu m2, [refq+ref_strideq] | 146 movu m2, [refq+ref_strideq] |
| 145 movu m3, [refq+ref_strideq*2] | 147 movu m3, [refq+ref_strideq*2] |
| (...skipping 25 matching lines...) Expand all Loading... |
| 171 %endmacro | 173 %endmacro |
| 172 | 174 |
| 173 INIT_XMM sse2 | 175 INIT_XMM sse2 |
| 174 SAD16XN 32 ; sad16x32_sse2 | 176 SAD16XN 32 ; sad16x32_sse2 |
| 175 SAD16XN 16 ; sad16x16_sse2 | 177 SAD16XN 16 ; sad16x16_sse2 |
| 176 SAD16XN 8 ; sad16x8_sse2 | 178 SAD16XN 8 ; sad16x8_sse2 |
| 177 SAD16XN 32, 1 ; sad16x32_avg_sse2 | 179 SAD16XN 32, 1 ; sad16x32_avg_sse2 |
| 178 SAD16XN 16, 1 ; sad16x16_avg_sse2 | 180 SAD16XN 16, 1 ; sad16x16_avg_sse2 |
| 179 SAD16XN 8, 1 ; sad16x8_avg_sse2 | 181 SAD16XN 8, 1 ; sad16x8_avg_sse2 |
| 180 | 182 |
| 181 ; unsigned int vp9_sad8x{8,16}_sse2(uint8_t *src, int src_stride, | 183 ; unsigned int vpx_sad8x{8,16}_sse2(uint8_t *src, int src_stride, |
| 182 ; uint8_t *ref, int ref_stride); | 184 ; uint8_t *ref, int ref_stride); |
| 183 %macro SAD8XN 1-2 0 | 185 %macro SAD8XN 1-2 0 |
| 184 SAD_FN 8, %1, 7, %2 | 186 SAD_FN 8, %1, 7, %2 |
| 185 mov n_rowsd, %1/4 | 187 mov n_rowsd, %1/4 |
| 186 pxor m0, m0 | 188 pxor m0, m0 |
| 187 | 189 |
| 188 .loop: | 190 .loop: |
| 189 movh m1, [refq] | 191 movh m1, [refq] |
| 190 movhps m1, [refq+ref_strideq] | 192 movhps m1, [refq+ref_strideq] |
| 191 movh m2, [refq+ref_strideq*2] | 193 movh m2, [refq+ref_strideq*2] |
| (...skipping 23 matching lines...) Expand all Loading... |
| 215 %endmacro | 217 %endmacro |
| 216 | 218 |
| 217 INIT_XMM sse2 | 219 INIT_XMM sse2 |
| 218 SAD8XN 16 ; sad8x16_sse2 | 220 SAD8XN 16 ; sad8x16_sse2 |
| 219 SAD8XN 8 ; sad8x8_sse2 | 221 SAD8XN 8 ; sad8x8_sse2 |
| 220 SAD8XN 4 ; sad8x4_sse2 | 222 SAD8XN 4 ; sad8x4_sse2 |
| 221 SAD8XN 16, 1 ; sad8x16_avg_sse2 | 223 SAD8XN 16, 1 ; sad8x16_avg_sse2 |
| 222 SAD8XN 8, 1 ; sad8x8_avg_sse2 | 224 SAD8XN 8, 1 ; sad8x8_avg_sse2 |
| 223 SAD8XN 4, 1 ; sad8x4_avg_sse2 | 225 SAD8XN 4, 1 ; sad8x4_avg_sse2 |
| 224 | 226 |
| 225 ; unsigned int vp9_sad4x{4, 8}_sse(uint8_t *src, int src_stride, | 227 ; unsigned int vpx_sad4x{4, 8}_sse(uint8_t *src, int src_stride, |
| 226 ; uint8_t *ref, int ref_stride); | 228 ; uint8_t *ref, int ref_stride); |
| 227 %macro SAD4XN 1-2 0 | 229 %macro SAD4XN 1-2 0 |
| 228 SAD_FN 4, %1, 7, %2 | 230 SAD_FN 4, %1, 7, %2 |
| 229 mov n_rowsd, %1/4 | 231 mov n_rowsd, %1/4 |
| 230 pxor m0, m0 | 232 pxor m0, m0 |
| 231 | 233 |
| 232 .loop: | 234 .loop: |
| 233 movd m1, [refq] | 235 movd m1, [refq] |
| 234 movd m2, [refq+ref_strideq] | 236 movd m2, [refq+ref_strideq] |
| 235 movd m3, [refq+ref_strideq*2] | 237 movd m3, [refq+ref_strideq*2] |
| (...skipping 22 matching lines...) Expand all Loading... |
| 258 | 260 |
| 259 movd eax, m0 | 261 movd eax, m0 |
| 260 RET | 262 RET |
| 261 %endmacro | 263 %endmacro |
| 262 | 264 |
| 263 INIT_MMX sse | 265 INIT_MMX sse |
| 264 SAD4XN 8 ; sad4x8_sse | 266 SAD4XN 8 ; sad4x8_sse |
| 265 SAD4XN 4 ; sad4x4_sse | 267 SAD4XN 4 ; sad4x4_sse |
| 266 SAD4XN 8, 1 ; sad4x8_avg_sse | 268 SAD4XN 8, 1 ; sad4x8_avg_sse |
| 267 SAD4XN 4, 1 ; sad4x4_avg_sse | 269 SAD4XN 4, 1 ; sad4x4_avg_sse |
| OLD | NEW |