| OLD | NEW |
| 1 /* | 1 /* |
| 2 * VP8 DSP functions x86-optimized | 2 * VP8 DSP functions x86-optimized |
| 3 * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com> | 3 * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com> |
| 4 * Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com> | 4 * Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com> |
| 5 * | 5 * |
| 6 * This file is part of FFmpeg. | 6 * This file is part of FFmpeg. |
| 7 * | 7 * |
| 8 * FFmpeg is free software; you can redistribute it and/or | 8 * FFmpeg is free software; you can redistribute it and/or |
| 9 * modify it under the terms of the GNU Lesser General Public | 9 * modify it under the terms of the GNU Lesser General Public |
| 10 * License as published by the Free Software Foundation; either | 10 * License as published by the Free Software Foundation; either |
| 11 * version 2.1 of the License, or (at your option) any later version. | 11 * version 2.1 of the License, or (at your option) any later version. |
| 12 * | 12 * |
| 13 * FFmpeg is distributed in the hope that it will be useful, | 13 * FFmpeg is distributed in the hope that it will be useful, |
| 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 * Lesser General Public License for more details. | 16 * Lesser General Public License for more details. |
| 17 * | 17 * |
| 18 * You should have received a copy of the GNU Lesser General Public | 18 * You should have received a copy of the GNU Lesser General Public |
| 19 * License along with FFmpeg; if not, write to the Free Software | 19 * License along with FFmpeg; if not, write to the Free Software |
| 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 */ | 21 */ |
| 22 | 22 |
| 23 #include "libavutil/cpu.h" |
| 23 #include "libavutil/x86_cpu.h" | 24 #include "libavutil/x86_cpu.h" |
| 24 #include "libavcodec/vp8dsp.h" | 25 #include "libavcodec/vp8dsp.h" |
| 25 | 26 |
| 26 #if HAVE_YASM | 27 #if HAVE_YASM |
| 27 | 28 |
| 28 /* | 29 /* |
| 29 * MC functions | 30 * MC functions |
| 30 */ | 31 */ |
| 31 extern void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, int dststride, | 32 extern void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, int dststride, |
| 32 uint8_t *src, int srcstride, | 33 uint8_t *src, int srcstride, |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 275 c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _
v_ ## OPT; \ | 276 c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _
v_ ## OPT; \ |
| 276 c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ | 277 c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ |
| 277 c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ | 278 c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ |
| 278 c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _
v_ ## OPT; \ | 279 c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _
v_ ## OPT; \ |
| 279 c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ | 280 c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT; \ |
| 280 c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT | 281 c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _
hv_ ## OPT |
| 281 | 282 |
| 282 | 283 |
| 283 av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | 284 av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) |
| 284 { | 285 { |
| 285 mm_flags = mm_support(); | 286 int mm_flags = av_get_cpu_flags(); |
| 286 | 287 |
| 287 #if HAVE_YASM | 288 #if HAVE_YASM |
| 288 if (mm_flags & FF_MM_MMX) { | 289 if (mm_flags & AV_CPU_FLAG_MMX) { |
| 289 c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; | 290 c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; |
| 290 c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; | 291 c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; |
| 291 c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; | 292 c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; |
| 292 c->vp8_idct_add = ff_vp8_idct_add_mmx; | 293 c->vp8_idct_add = ff_vp8_idct_add_mmx; |
| 293 c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; | 294 c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; |
| 294 c->put_vp8_epel_pixels_tab[0][0][0] = | 295 c->put_vp8_epel_pixels_tab[0][0][0] = |
| 295 c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; | 296 c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; |
| 296 c->put_vp8_epel_pixels_tab[1][0][0] = | 297 c->put_vp8_epel_pixels_tab[1][0][0] = |
| 297 c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; | 298 c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; |
| 298 | 299 |
| 299 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; | 300 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; |
| 300 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; | 301 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; |
| 301 | 302 |
| 302 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx; | 303 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx; |
| 303 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx; | 304 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx; |
| 304 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx; | 305 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx; |
| 305 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx; | 306 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx; |
| 306 | 307 |
| 307 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx; | 308 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx; |
| 308 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; | 309 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; |
| 309 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; | 310 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; |
| 310 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; | 311 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; |
| 311 } | 312 } |
| 312 | 313 |
| 313 /* note that 4-tap width=16 functions are missing because w=16 | 314 /* note that 4-tap width=16 functions are missing because w=16 |
| 314 * is only used for luma, and luma is always a copy or sixtap. */ | 315 * is only used for luma, and luma is always a copy or sixtap. */ |
| 315 if (mm_flags & FF_MM_MMX2) { | 316 if (mm_flags & AV_CPU_FLAG_MMX2) { |
| 316 VP8_LUMA_MC_FUNC(0, 16, mmxext); | 317 VP8_LUMA_MC_FUNC(0, 16, mmxext); |
| 317 VP8_MC_FUNC(1, 8, mmxext); | 318 VP8_MC_FUNC(1, 8, mmxext); |
| 318 VP8_MC_FUNC(2, 4, mmxext); | 319 VP8_MC_FUNC(2, 4, mmxext); |
| 319 VP8_BILINEAR_MC_FUNC(0, 16, mmxext); | 320 VP8_BILINEAR_MC_FUNC(0, 16, mmxext); |
| 320 VP8_BILINEAR_MC_FUNC(1, 8, mmxext); | 321 VP8_BILINEAR_MC_FUNC(1, 8, mmxext); |
| 321 VP8_BILINEAR_MC_FUNC(2, 4, mmxext); | 322 VP8_BILINEAR_MC_FUNC(2, 4, mmxext); |
| 322 | 323 |
| 323 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; | 324 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; |
| 324 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; | 325 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; |
| 325 | 326 |
| 326 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; | 327 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; |
| 327 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; | 328 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; |
| 328 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; | 329 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; |
| 329 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; | 330 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; |
| 330 | 331 |
| 331 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; | 332 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; |
| 332 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; | 333 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; |
| 333 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; | 334 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; |
| 334 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; | 335 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; |
| 335 } | 336 } |
| 336 | 337 |
| 337 if (mm_flags & FF_MM_SSE) { | 338 if (mm_flags & AV_CPU_FLAG_SSE) { |
| 338 c->vp8_idct_add = ff_vp8_idct_add_sse; | 339 c->vp8_idct_add = ff_vp8_idct_add_sse; |
| 339 c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse; | 340 c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse; |
| 340 c->put_vp8_epel_pixels_tab[0][0][0] = | 341 c->put_vp8_epel_pixels_tab[0][0][0] = |
| 341 c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; | 342 c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; |
| 342 } | 343 } |
| 343 | 344 |
| 344 if (mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) { | 345 if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) { |
| 345 VP8_LUMA_MC_FUNC(0, 16, sse2); | 346 VP8_LUMA_MC_FUNC(0, 16, sse2); |
| 346 VP8_MC_FUNC(1, 8, sse2); | 347 VP8_MC_FUNC(1, 8, sse2); |
| 347 VP8_BILINEAR_MC_FUNC(0, 16, sse2); | 348 VP8_BILINEAR_MC_FUNC(0, 16, sse2); |
| 348 VP8_BILINEAR_MC_FUNC(1, 8, sse2); | 349 VP8_BILINEAR_MC_FUNC(1, 8, sse2); |
| 349 | 350 |
| 350 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; | 351 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; |
| 351 | 352 |
| 352 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; | 353 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; |
| 353 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2; | 354 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2; |
| 354 | 355 |
| 355 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2; | 356 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2; |
| 356 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2; | 357 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2; |
| 357 } | 358 } |
| 358 | 359 |
| 359 if (mm_flags & FF_MM_SSE2) { | 360 if (mm_flags & AV_CPU_FLAG_SSE2) { |
| 360 c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; | 361 c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; |
| 361 | 362 |
| 362 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; | 363 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; |
| 363 | 364 |
| 364 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; | 365 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; |
| 365 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2; | 366 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2; |
| 366 | 367 |
| 367 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2; | 368 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2; |
| 368 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2; | 369 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2; |
| 369 } | 370 } |
| 370 | 371 |
| 371 if (mm_flags & FF_MM_SSSE3) { | 372 if (mm_flags & AV_CPU_FLAG_SSSE3) { |
| 372 VP8_LUMA_MC_FUNC(0, 16, ssse3); | 373 VP8_LUMA_MC_FUNC(0, 16, ssse3); |
| 373 VP8_MC_FUNC(1, 8, ssse3); | 374 VP8_MC_FUNC(1, 8, ssse3); |
| 374 VP8_MC_FUNC(2, 4, ssse3); | 375 VP8_MC_FUNC(2, 4, ssse3); |
| 375 VP8_BILINEAR_MC_FUNC(0, 16, ssse3); | 376 VP8_BILINEAR_MC_FUNC(0, 16, ssse3); |
| 376 VP8_BILINEAR_MC_FUNC(1, 8, ssse3); | 377 VP8_BILINEAR_MC_FUNC(1, 8, ssse3); |
| 377 VP8_BILINEAR_MC_FUNC(2, 4, ssse3); | 378 VP8_BILINEAR_MC_FUNC(2, 4, ssse3); |
| 378 | 379 |
| 379 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; | 380 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; |
| 380 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; | 381 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; |
| 381 | 382 |
| 382 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3; | 383 c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3; |
| 383 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3; | 384 c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3; |
| 384 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3; | 385 c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3; |
| 385 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3; | 386 c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3; |
| 386 | 387 |
| 387 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3; | 388 c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3; |
| 388 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3; | 389 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3; |
| 389 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3; | 390 c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3; |
| 390 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3; | 391 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3; |
| 391 } | 392 } |
| 392 | 393 |
| 393 if (mm_flags & FF_MM_SSE4) { | 394 if (mm_flags & AV_CPU_FLAG_SSE4) { |
| 394 c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4; | 395 c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4; |
| 395 | 396 |
| 396 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4; | 397 c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4; |
| 397 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4; | 398 c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4; |
| 398 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4; | 399 c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4; |
| 399 } | 400 } |
| 400 #endif | 401 #endif |
| 401 } | 402 } |
| OLD | NEW |