OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. | 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
329 return 0; | 329 return 0; |
330 } | 330 } |
331 | 331 |
332 // Convert I420 with Alpha to preattenuated ARGB. | 332 // Convert I420 with Alpha to preattenuated ARGB. |
333 LIBYUV_API | 333 LIBYUV_API |
334 int I420AlphaToARGB(const uint8* src_y, int src_stride_y, | 334 int I420AlphaToARGB(const uint8* src_y, int src_stride_y, |
335 const uint8* src_u, int src_stride_u, | 335 const uint8* src_u, int src_stride_u, |
336 const uint8* src_v, int src_stride_v, | 336 const uint8* src_v, int src_stride_v, |
337 const uint8* src_a, int src_stride_a, | 337 const uint8* src_a, int src_stride_a, |
338 uint8* dst_argb, int dst_stride_argb, | 338 uint8* dst_argb, int dst_stride_argb, |
339 int width, int height) { | 339 int width, int height, int attenuate) { |
340 int y; | 340 int y; |
341 void (*I422ToARGBRow)(const uint8* y_buf, | 341 void (*I422AlphaToARGBRow)(const uint8* y_buf, |
342 const uint8* u_buf, | 342 const uint8* u_buf, |
343 const uint8* v_buf, | 343 const uint8* v_buf, |
344 uint8* rgb_buf, | 344 const uint8* a_buf, |
345 struct YuvConstants* yuvconstants, | 345 uint8* dst_argb, |
346 int width) = I422ToARGBRow_C; | 346 struct YuvConstants* yuvconstants, |
347 void (*ARGBCopyYToAlphaRow)(const uint8* src_y, uint8* dst_argb, int width) = | 347 int width) = I422AlphaToARGBRow_C; |
348 ARGBCopyYToAlphaRow_C; | |
349 void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb, | 348 void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb, |
350 int width) = ARGBAttenuateRow_C; | 349 int width) = ARGBAttenuateRow_C; |
351 if (!src_y || !src_u || !src_v || !dst_argb || | 350 if (!src_y || !src_u || !src_v || !dst_argb || |
352 width <= 0 || height == 0) { | 351 width <= 0 || height == 0) { |
353 return -1; | 352 return -1; |
354 } | 353 } |
355 // Negative height means invert the image. | 354 // Negative height means invert the image. |
356 if (height < 0) { | 355 if (height < 0) { |
357 height = -height; | 356 height = -height; |
358 dst_argb = dst_argb + (height - 1) * dst_stride_argb; | 357 dst_argb = dst_argb + (height - 1) * dst_stride_argb; |
359 dst_stride_argb = -dst_stride_argb; | 358 dst_stride_argb = -dst_stride_argb; |
360 } | 359 } |
361 #if defined(HAS_I422TOARGBROW_SSSE3) | 360 #if defined(HAS_I422ALPHATOARGBROW_SSSE3) |
362 if (TestCpuFlag(kCpuHasSSSE3)) { | 361 if (TestCpuFlag(kCpuHasSSSE3)) { |
363 I422ToARGBRow = I422ToARGBRow_Any_SSSE3; | 362 I422AlphaToARGBRow = I422AlphaToARGBRow_Any_SSSE3; |
364 if (IS_ALIGNED(width, 8)) { | 363 if (IS_ALIGNED(width, 8)) { |
365 I422ToARGBRow = I422ToARGBRow_SSSE3; | 364 I422AlphaToARGBRow = I422AlphaToARGBRow_SSSE3; |
366 } | 365 } |
367 } | 366 } |
368 #endif | 367 #endif |
369 #if defined(HAS_I422TOARGBROW_AVX2) | 368 #if defined(HAS_I422ALPHATOARGBROW_AVX2) |
370 if (TestCpuFlag(kCpuHasAVX2)) { | 369 if (TestCpuFlag(kCpuHasAVX2)) { |
371 I422ToARGBRow = I422ToARGBRow_Any_AVX2; | 370 I422AlphaToARGBRow = I422AlphaToARGBRow_Any_AVX2; |
372 if (IS_ALIGNED(width, 16)) { | 371 if (IS_ALIGNED(width, 16)) { |
373 I422ToARGBRow = I422ToARGBRow_AVX2; | 372 I422AlphaToARGBRow = I422AlphaToARGBRow_AVX2; |
374 } | 373 } |
375 } | 374 } |
376 #endif | 375 #endif |
377 #if defined(HAS_I422TOARGBROW_NEON) | 376 #if defined(HAS_I422ALPHATOARGBROW_NEON) |
378 if (TestCpuFlag(kCpuHasNEON)) { | 377 if (TestCpuFlag(kCpuHasNEON)) { |
379 I422ToARGBRow = I422ToARGBRow_Any_NEON; | 378 I422AlphaToARGBRow = I422AlphaToARGBRow_Any_NEON; |
380 if (IS_ALIGNED(width, 8)) { | 379 if (IS_ALIGNED(width, 8)) { |
381 I422ToARGBRow = I422ToARGBRow_NEON; | 380 I422AlphaToARGBRow = I422AlphaToARGBRow_NEON; |
382 } | 381 } |
383 } | 382 } |
384 #endif | 383 #endif |
385 #if defined(HAS_I422TOARGBROW_MIPS_DSPR2) | 384 #if defined(HAS_I422ALPHATOARGBROW_MIPS_DSPR2) |
386 if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && | 385 if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && |
387 IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && | 386 IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && |
388 IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && | 387 IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && |
389 IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && | 388 IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && |
390 IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { | 389 IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { |
391 I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; | 390 I422AlphaToARGBRow = I422AlphaToARGBRow_MIPS_DSPR2; |
392 } | |
393 #endif | |
394 #if defined(HAS_ARGBCOPYYTOALPHAROW_SSE2) | |
395 if (TestCpuFlag(kCpuHasSSE2)) { | |
396 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_SSE2; | |
397 if (IS_ALIGNED(width, 8)) { | |
398 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_SSE2; | |
399 } | |
400 } | |
401 #endif | |
402 #if defined(HAS_ARGBCOPYYTOALPHAROW_AVX2) | |
403 if (TestCpuFlag(kCpuHasAVX2)) { | |
404 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_AVX2; | |
405 if (IS_ALIGNED(width, 16)) { | |
406 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_AVX2; | |
407 } | |
408 } | 391 } |
409 #endif | 392 #endif |
410 #if defined(HAS_ARGBATTENUATEROW_SSE2) | 393 #if defined(HAS_ARGBATTENUATEROW_SSE2) |
411 if (TestCpuFlag(kCpuHasSSE2)) { | 394 if (TestCpuFlag(kCpuHasSSE2)) { |
412 ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2; | 395 ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2; |
413 if (IS_ALIGNED(width, 4)) { | 396 if (IS_ALIGNED(width, 4)) { |
414 ARGBAttenuateRow = ARGBAttenuateRow_SSE2; | 397 ARGBAttenuateRow = ARGBAttenuateRow_SSE2; |
415 } | 398 } |
416 } | 399 } |
417 #endif | 400 #endif |
(...skipping 16 matching lines...) Expand all Loading... |
434 #if defined(HAS_ARGBATTENUATEROW_NEON) | 417 #if defined(HAS_ARGBATTENUATEROW_NEON) |
435 if (TestCpuFlag(kCpuHasNEON)) { | 418 if (TestCpuFlag(kCpuHasNEON)) { |
436 ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; | 419 ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; |
437 if (IS_ALIGNED(width, 8)) { | 420 if (IS_ALIGNED(width, 8)) { |
438 ARGBAttenuateRow = ARGBAttenuateRow_NEON; | 421 ARGBAttenuateRow = ARGBAttenuateRow_NEON; |
439 } | 422 } |
440 } | 423 } |
441 #endif | 424 #endif |
442 | 425 |
443 for (y = 0; y < height; ++y) { | 426 for (y = 0; y < height; ++y) { |
444 I422ToARGBRow(src_y, src_u, src_v, dst_argb, &kYuvConstants, width); | 427 I422AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, &kYuvConstants, wid
th); |
445 ARGBCopyYToAlphaRow(src_a, dst_argb, width); | 428 if (attenuate) { |
446 ARGBAttenuateRow(dst_argb, dst_argb, width); | 429 ARGBAttenuateRow(dst_argb, dst_argb, width); |
| 430 } |
447 dst_argb += dst_stride_argb; | 431 dst_argb += dst_stride_argb; |
448 src_a += src_stride_a; | 432 src_a += src_stride_a; |
449 src_y += src_stride_y; | 433 src_y += src_stride_y; |
450 if (y & 1) { | 434 if (y & 1) { |
451 src_u += src_stride_u; | 435 src_u += src_stride_u; |
452 src_v += src_stride_v; | 436 src_v += src_stride_v; |
453 } | 437 } |
454 } | 438 } |
455 return 0; | 439 return 0; |
456 } | 440 } |
457 // Convert I420 with Alpha to preattenuated ABGR. | 441 |
| 442 // Convert I420 with Alpha to preattenuated ARGB. |
458 LIBYUV_API | 443 LIBYUV_API |
459 int I420AlphaToABGR(const uint8* src_y, int src_stride_y, | 444 int I420AlphaToABGR(const uint8* src_y, int src_stride_y, |
460 const uint8* src_u, int src_stride_u, | 445 const uint8* src_u, int src_stride_u, |
461 const uint8* src_v, int src_stride_v, | 446 const uint8* src_v, int src_stride_v, |
462 const uint8* src_a, int src_stride_a, | 447 const uint8* src_a, int src_stride_a, |
463 uint8* dst_abgr, int dst_stride_abgr, | 448 uint8* dst_abgr, int dst_stride_abgr, |
464 int width, int height) { | 449 int width, int height, int attenuate) { |
465 int y; | 450 int y; |
466 void (*I422ToABGRRow)(const uint8* y_buf, | 451 void (*I422AlphaToABGRRow)(const uint8* y_buf, |
467 const uint8* u_buf, | 452 const uint8* u_buf, |
468 const uint8* v_buf, | 453 const uint8* v_buf, |
469 uint8* rgb_buf, | 454 const uint8* a_buf, |
470 struct YuvConstants* yuvconstants, | 455 uint8* dst_abgr, |
471 int width) = I422ToABGRRow_C; | 456 struct YuvConstants* yuvconstants, |
472 void (*ARGBCopyYToAlphaRow)(const uint8* src_y, uint8* dst_argb, int width) = | 457 int width) = I422AlphaToABGRRow_C; |
473 ARGBCopyYToAlphaRow_C; | 458 void (*ARGBAttenuateRow)(const uint8* src_abgr, uint8* dst_abgr, |
474 void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb, | |
475 int width) = ARGBAttenuateRow_C; | 459 int width) = ARGBAttenuateRow_C; |
476 if (!src_y || !src_u || !src_v || !dst_abgr || | 460 if (!src_y || !src_u || !src_v || !dst_abgr || |
477 width <= 0 || height == 0) { | 461 width <= 0 || height == 0) { |
478 return -1; | 462 return -1; |
479 } | 463 } |
480 // Negative height means invert the image. | 464 // Negative height means invert the image. |
481 if (height < 0) { | 465 if (height < 0) { |
482 height = -height; | 466 height = -height; |
483 dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr; | 467 dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr; |
484 dst_stride_abgr = -dst_stride_abgr; | 468 dst_stride_abgr = -dst_stride_abgr; |
485 } | 469 } |
486 #if defined(HAS_I422TOABGRROW_SSSE3) | 470 #if defined(HAS_I422ALPHATOABGRROW_SSSE3) |
487 if (TestCpuFlag(kCpuHasSSSE3)) { | 471 if (TestCpuFlag(kCpuHasSSSE3)) { |
488 I422ToABGRRow = I422ToABGRRow_Any_SSSE3; | 472 I422AlphaToABGRRow = I422AlphaToABGRRow_Any_SSSE3; |
489 if (IS_ALIGNED(width, 8)) { | 473 if (IS_ALIGNED(width, 8)) { |
490 I422ToABGRRow = I422ToABGRRow_SSSE3; | 474 I422AlphaToABGRRow = I422AlphaToABGRRow_SSSE3; |
491 } | 475 } |
492 } | 476 } |
493 #endif | 477 #endif |
494 #if defined(HAS_I422TOABGRROW_AVX2) | 478 #if defined(HAS_I422ALPHATOABGRROW_AVX2) |
495 if (TestCpuFlag(kCpuHasAVX2)) { | 479 if (TestCpuFlag(kCpuHasAVX2)) { |
496 I422ToABGRRow = I422ToABGRRow_Any_AVX2; | 480 I422AlphaToABGRRow = I422AlphaToABGRRow_Any_AVX2; |
497 if (IS_ALIGNED(width, 16)) { | 481 if (IS_ALIGNED(width, 16)) { |
498 I422ToABGRRow = I422ToABGRRow_AVX2; | 482 I422AlphaToABGRRow = I422AlphaToABGRRow_AVX2; |
499 } | 483 } |
500 } | 484 } |
501 #endif | 485 #endif |
502 #if defined(HAS_I422TOABGRROW_NEON) | 486 #if defined(HAS_I422ALPHATOABGRROW_NEON) |
503 if (TestCpuFlag(kCpuHasNEON)) { | 487 if (TestCpuFlag(kCpuHasNEON)) { |
504 I422ToABGRRow = I422ToABGRRow_Any_NEON; | 488 I422AlphaToABGRRow = I422AlphaToABGRRow_Any_NEON; |
505 if (IS_ALIGNED(width, 8)) { | 489 if (IS_ALIGNED(width, 8)) { |
506 I422ToABGRRow = I422ToABGRRow_NEON; | 490 I422AlphaToABGRRow = I422AlphaToABGRRow_NEON; |
507 } | 491 } |
508 } | 492 } |
509 #endif | 493 #endif |
510 #if defined(HAS_I422TOABGRROW_MIPS_DSPR2) | 494 #if defined(HAS_I422ALPHATOABGRROW_MIPS_DSPR2) |
511 if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && | 495 if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && |
512 IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && | 496 IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && |
513 IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && | 497 IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && |
514 IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && | 498 IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && |
515 IS_ALIGNED(dst_abgr, 4) && IS_ALIGNED(dst_stride_abgr, 4)) { | 499 IS_ALIGNED(dst_abgr, 4) && IS_ALIGNED(dst_stride_abgr, 4)) { |
516 I422ToABGRRow = I422ToABGRRow_MIPS_DSPR2; | 500 I422AlphaToABGRRow = I422AlphaToABGRRow_MIPS_DSPR2; |
517 } | |
518 #endif | |
519 #if defined(HAS_ARGBCOPYYTOALPHAROW_SSE2) | |
520 if (TestCpuFlag(kCpuHasSSE2)) { | |
521 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_SSE2; | |
522 if (IS_ALIGNED(width, 8)) { | |
523 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_SSE2; | |
524 } | |
525 } | |
526 #endif | |
527 #if defined(HAS_ARGBCOPYYTOALPHAROW_AVX2) | |
528 if (TestCpuFlag(kCpuHasAVX2)) { | |
529 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_AVX2; | |
530 if (IS_ALIGNED(width, 16)) { | |
531 ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_AVX2; | |
532 } | |
533 } | 501 } |
534 #endif | 502 #endif |
535 #if defined(HAS_ARGBATTENUATEROW_SSE2) | 503 #if defined(HAS_ARGBATTENUATEROW_SSE2) |
536 if (TestCpuFlag(kCpuHasSSE2)) { | 504 if (TestCpuFlag(kCpuHasSSE2)) { |
537 ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2; | 505 ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2; |
538 if (IS_ALIGNED(width, 4)) { | 506 if (IS_ALIGNED(width, 4)) { |
539 ARGBAttenuateRow = ARGBAttenuateRow_SSE2; | 507 ARGBAttenuateRow = ARGBAttenuateRow_SSE2; |
540 } | 508 } |
541 } | 509 } |
542 #endif | 510 #endif |
(...skipping 16 matching lines...) Expand all Loading... |
559 #if defined(HAS_ARGBATTENUATEROW_NEON) | 527 #if defined(HAS_ARGBATTENUATEROW_NEON) |
560 if (TestCpuFlag(kCpuHasNEON)) { | 528 if (TestCpuFlag(kCpuHasNEON)) { |
561 ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; | 529 ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; |
562 if (IS_ALIGNED(width, 8)) { | 530 if (IS_ALIGNED(width, 8)) { |
563 ARGBAttenuateRow = ARGBAttenuateRow_NEON; | 531 ARGBAttenuateRow = ARGBAttenuateRow_NEON; |
564 } | 532 } |
565 } | 533 } |
566 #endif | 534 #endif |
567 | 535 |
568 for (y = 0; y < height; ++y) { | 536 for (y = 0; y < height; ++y) { |
569 I422ToABGRRow(src_y, src_u, src_v, dst_abgr, &kYuvConstants, width); | 537 I422AlphaToABGRRow(src_y, src_u, src_v, src_a, dst_abgr, &kYuvConstants, wid
th); |
570 ARGBCopyYToAlphaRow(src_a, dst_abgr, width); | 538 if (attenuate) { |
571 ARGBAttenuateRow(dst_abgr, dst_abgr, width); | 539 ARGBAttenuateRow(dst_abgr, dst_abgr, width); |
| 540 } |
572 dst_abgr += dst_stride_abgr; | 541 dst_abgr += dst_stride_abgr; |
573 src_a += src_stride_a; | 542 src_a += src_stride_a; |
574 src_y += src_stride_y; | 543 src_y += src_stride_y; |
575 if (y & 1) { | 544 if (y & 1) { |
576 src_u += src_stride_u; | 545 src_u += src_stride_u; |
577 src_v += src_stride_v; | 546 src_v += src_stride_v; |
578 } | 547 } |
579 } | 548 } |
580 return 0; | 549 return 0; |
581 } | 550 } |
(...skipping 1344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1926 src_u += src_stride_u; | 1895 src_u += src_stride_u; |
1927 src_v += src_stride_v; | 1896 src_v += src_stride_v; |
1928 } | 1897 } |
1929 return 0; | 1898 return 0; |
1930 } | 1899 } |
1931 | 1900 |
1932 #ifdef __cplusplus | 1901 #ifdef __cplusplus |
1933 } // extern "C" | 1902 } // extern "C" |
1934 } // namespace libyuv | 1903 } // namespace libyuv |
1935 #endif | 1904 #endif |
OLD | NEW |