| Index: source/row_gcc.cc
|
| diff --git a/source/row_gcc.cc b/source/row_gcc.cc
|
| index 0be1c5ec240255dc11f9b162a1400cab9ceeb20a..dccf0135b1ccbb01e7536d4491f8df8eb1935726 100644
|
| --- a/source/row_gcc.cc
|
| +++ b/source/row_gcc.cc
|
| @@ -16,8 +16,6 @@ namespace libyuv {
|
| extern "C" {
|
| #endif
|
|
|
| -// clang-format off
|
| -
|
| // This module is for GCC x86 and x64.
|
| #if !defined(LIBYUV_DISABLE_X86) && \
|
| (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER)))
|
| @@ -25,165 +23,133 @@ extern "C" {
|
| #if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
|
|
|
| // Constants for ARGB
|
| -static vec8 kARGBToY = {
|
| - 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
|
| -};
|
| +static vec8 kARGBToY = {13, 65, 33, 0, 13, 65, 33, 0,
|
| + 13, 65, 33, 0, 13, 65, 33, 0};
|
|
|
| // JPeg full range.
|
| -static vec8 kARGBToYJ = {
|
| - 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
|
| -};
|
| +static vec8 kARGBToYJ = {15, 75, 38, 0, 15, 75, 38, 0,
|
| + 15, 75, 38, 0, 15, 75, 38, 0};
|
| #endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
|
|
|
| #if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
|
|
|
| -static vec8 kARGBToU = {
|
| - 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
|
| -};
|
| +static vec8 kARGBToU = {112, -74, -38, 0, 112, -74, -38, 0,
|
| + 112, -74, -38, 0, 112, -74, -38, 0};
|
|
|
| -static vec8 kARGBToUJ = {
|
| - 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
|
| -};
|
| +static vec8 kARGBToUJ = {127, -84, -43, 0, 127, -84, -43, 0,
|
| + 127, -84, -43, 0, 127, -84, -43, 0};
|
|
|
| static vec8 kARGBToV = {
|
| - -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
|
| + -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
|
| };
|
|
|
| -static vec8 kARGBToVJ = {
|
| - -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
|
| -};
|
| +static vec8 kARGBToVJ = {-20, -107, 127, 0, -20, -107, 127, 0,
|
| + -20, -107, 127, 0, -20, -107, 127, 0};
|
|
|
| // Constants for BGRA
|
| -static vec8 kBGRAToY = {
|
| - 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
|
| -};
|
| +static vec8 kBGRAToY = {0, 33, 65, 13, 0, 33, 65, 13,
|
| + 0, 33, 65, 13, 0, 33, 65, 13};
|
|
|
| -static vec8 kBGRAToU = {
|
| - 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
|
| -};
|
| +static vec8 kBGRAToU = {0, -38, -74, 112, 0, -38, -74, 112,
|
| + 0, -38, -74, 112, 0, -38, -74, 112};
|
|
|
| -static vec8 kBGRAToV = {
|
| - 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
|
| -};
|
| +static vec8 kBGRAToV = {0, 112, -94, -18, 0, 112, -94, -18,
|
| + 0, 112, -94, -18, 0, 112, -94, -18};
|
|
|
| // Constants for ABGR
|
| -static vec8 kABGRToY = {
|
| - 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
|
| -};
|
| +static vec8 kABGRToY = {33, 65, 13, 0, 33, 65, 13, 0,
|
| + 33, 65, 13, 0, 33, 65, 13, 0};
|
|
|
| -static vec8 kABGRToU = {
|
| - -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
|
| -};
|
| +static vec8 kABGRToU = {-38, -74, 112, 0, -38, -74, 112, 0,
|
| + -38, -74, 112, 0, -38, -74, 112, 0};
|
|
|
| -static vec8 kABGRToV = {
|
| - 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
|
| -};
|
| +static vec8 kABGRToV = {112, -94, -18, 0, 112, -94, -18, 0,
|
| + 112, -94, -18, 0, 112, -94, -18, 0};
|
|
|
| // Constants for RGBA.
|
| -static vec8 kRGBAToY = {
|
| - 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
|
| -};
|
| +static vec8 kRGBAToY = {0, 13, 65, 33, 0, 13, 65, 33,
|
| + 0, 13, 65, 33, 0, 13, 65, 33};
|
|
|
| -static vec8 kRGBAToU = {
|
| - 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
|
| -};
|
| +static vec8 kRGBAToU = {0, 112, -74, -38, 0, 112, -74, -38,
|
| + 0, 112, -74, -38, 0, 112, -74, -38};
|
|
|
| -static vec8 kRGBAToV = {
|
| - 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
|
| -};
|
| +static vec8 kRGBAToV = {0, -18, -94, 112, 0, -18, -94, 112,
|
| + 0, -18, -94, 112, 0, -18, -94, 112};
|
|
|
| -static uvec8 kAddY16 = {
|
| - 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
|
| -};
|
| +static uvec8 kAddY16 = {16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u,
|
| + 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u};
|
|
|
| // 7 bit fixed point 0.5.
|
| -static vec16 kAddYJ64 = {
|
| - 64, 64, 64, 64, 64, 64, 64, 64
|
| -};
|
| +static vec16 kAddYJ64 = {64, 64, 64, 64, 64, 64, 64, 64};
|
|
|
| -static uvec8 kAddUV128 = {
|
| - 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
|
| - 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
|
| -};
|
| +static uvec8 kAddUV128 = {128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
|
| + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
|
|
|
| -static uvec16 kAddUVJ128 = {
|
| - 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
|
| -};
|
| +static uvec16 kAddUVJ128 = {0x8080u, 0x8080u, 0x8080u, 0x8080u,
|
| + 0x8080u, 0x8080u, 0x8080u, 0x8080u};
|
| #endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
|
|
|
| #ifdef HAS_RGB24TOARGBROW_SSSE3
|
|
|
| // Shuffle table for converting RGB24 to ARGB.
|
| -static uvec8 kShuffleMaskRGB24ToARGB = {
|
| - 0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
|
| -};
|
| +static uvec8 kShuffleMaskRGB24ToARGB = {0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u,
|
| + 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u};
|
|
|
| // Shuffle table for converting RAW to ARGB.
|
| -static uvec8 kShuffleMaskRAWToARGB = {
|
| - 2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
|
| -};
|
| +static uvec8 kShuffleMaskRAWToARGB = {2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u,
|
| + 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u};
|
|
|
| // Shuffle table for converting RAW to RGB24. First 8.
|
| static const uvec8 kShuffleMaskRAWToRGB24_0 = {
|
| - 2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u,
|
| - 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
|
| -};
|
| + 2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u,
|
| + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
|
|
|
| // Shuffle table for converting RAW to RGB24. Middle 8.
|
| static const uvec8 kShuffleMaskRAWToRGB24_1 = {
|
| - 2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u,
|
| - 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
|
| -};
|
| + 2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u,
|
| + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
|
|
|
| // Shuffle table for converting RAW to RGB24. Last 8.
|
| static const uvec8 kShuffleMaskRAWToRGB24_2 = {
|
| - 8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u,
|
| - 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
|
| -};
|
| + 8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u,
|
| + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
|
|
|
| // Shuffle table for converting ARGB to RGB24.
|
| static uvec8 kShuffleMaskARGBToRGB24 = {
|
| - 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
|
| -};
|
| + 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u};
|
|
|
| // Shuffle table for converting ARGB to RAW.
|
| static uvec8 kShuffleMaskARGBToRAW = {
|
| - 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
|
| -};
|
| + 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u};
|
|
|
| // Shuffle table for converting ARGBToRGB24 for I422ToRGB24. First 8 + next 4
|
| static uvec8 kShuffleMaskARGBToRGB24_0 = {
|
| - 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
|
| -};
|
| + 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u};
|
|
|
| // YUY2 shuf 16 Y to 32 Y.
|
| -static const lvec8 kShuffleYUY2Y = {
|
| - 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14,
|
| - 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14
|
| -};
|
| +static const lvec8 kShuffleYUY2Y = {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10,
|
| + 10, 12, 12, 14, 14, 0, 0, 2, 2, 4, 4,
|
| + 6, 6, 8, 8, 10, 10, 12, 12, 14, 14};
|
|
|
| // YUY2 shuf 8 UV to 16 UV.
|
| -static const lvec8 kShuffleYUY2UV = {
|
| - 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15,
|
| - 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15
|
| -};
|
| +static const lvec8 kShuffleYUY2UV = {1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9,
|
| + 11, 13, 15, 13, 15, 1, 3, 1, 3, 5, 7,
|
| + 5, 7, 9, 11, 9, 11, 13, 15, 13, 15};
|
|
|
| // UYVY shuf 16 Y to 32 Y.
|
| -static const lvec8 kShuffleUYVYY = {
|
| - 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15,
|
| - 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15
|
| -};
|
| +static const lvec8 kShuffleUYVYY = {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11,
|
| + 11, 13, 13, 15, 15, 1, 1, 3, 3, 5, 5,
|
| + 7, 7, 9, 9, 11, 11, 13, 13, 15, 15};
|
|
|
| // UYVY shuf 8 UV to 16 UV.
|
| -static const lvec8 kShuffleUYVYUV = {
|
| - 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14,
|
| - 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14
|
| -};
|
| +static const lvec8 kShuffleUYVYUV = {0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8,
|
| + 10, 12, 14, 12, 14, 0, 2, 0, 2, 4, 6,
|
| + 4, 6, 8, 10, 8, 10, 12, 14, 12, 14};
|
|
|
| // NV21 shuf 8 VU to 16 UV.
|
| static const lvec8 kShuffleNV21 = {
|
| - 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
|
| - 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
|
| + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
|
| + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
|
| };
|
| #endif // HAS_RGB24TOARGBROW_SSSE3
|
|
|
| @@ -571,98 +537,99 @@ void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int width) {
|
| );
|
| }
|
|
|
| -void ARGBToRGB565DitherRow_SSE2(const uint8* src, uint8* dst,
|
| - const uint32 dither4, int width) {
|
| - asm volatile (
|
| - "movd %3,%%xmm6 \n"
|
| - "punpcklbw %%xmm6,%%xmm6 \n"
|
| - "movdqa %%xmm6,%%xmm7 \n"
|
| - "punpcklwd %%xmm6,%%xmm6 \n"
|
| - "punpckhwd %%xmm7,%%xmm7 \n"
|
| - "pcmpeqb %%xmm3,%%xmm3 \n"
|
| - "psrld $0x1b,%%xmm3 \n"
|
| - "pcmpeqb %%xmm4,%%xmm4 \n"
|
| - "psrld $0x1a,%%xmm4 \n"
|
| - "pslld $0x5,%%xmm4 \n"
|
| - "pcmpeqb %%xmm5,%%xmm5 \n"
|
| - "pslld $0xb,%%xmm5 \n"
|
| -
|
| - LABELALIGN
|
| - "1: \n"
|
| - "movdqu (%0),%%xmm0 \n"
|
| - "paddusb %%xmm6,%%xmm0 \n"
|
| - "movdqa %%xmm0,%%xmm1 \n"
|
| - "movdqa %%xmm0,%%xmm2 \n"
|
| - "pslld $0x8,%%xmm0 \n"
|
| - "psrld $0x3,%%xmm1 \n"
|
| - "psrld $0x5,%%xmm2 \n"
|
| - "psrad $0x10,%%xmm0 \n"
|
| - "pand %%xmm3,%%xmm1 \n"
|
| - "pand %%xmm4,%%xmm2 \n"
|
| - "pand %%xmm5,%%xmm0 \n"
|
| - "por %%xmm2,%%xmm1 \n"
|
| - "por %%xmm1,%%xmm0 \n"
|
| - "packssdw %%xmm0,%%xmm0 \n"
|
| - "lea 0x10(%0),%0 \n"
|
| - "movq %%xmm0,(%1) \n"
|
| - "lea 0x8(%1),%1 \n"
|
| - "sub $0x4,%2 \n"
|
| - "jg 1b \n"
|
| - : "+r"(src), // %0
|
| - "+r"(dst), // %1
|
| - "+r"(width) // %2
|
| - : "m"(dither4) // %3
|
| - : "memory", "cc",
|
| - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
| - );
|
| +void ARGBToRGB565DitherRow_SSE2(const uint8* src,
|
| + uint8* dst,
|
| + const uint32 dither4,
|
| + int width) {
|
| + asm volatile(
|
| + "movd %3,%%xmm6 \n"
|
| + "punpcklbw %%xmm6,%%xmm6 \n"
|
| + "movdqa %%xmm6,%%xmm7 \n"
|
| + "punpcklwd %%xmm6,%%xmm6 \n"
|
| + "punpckhwd %%xmm7,%%xmm7 \n"
|
| + "pcmpeqb %%xmm3,%%xmm3 \n"
|
| + "psrld $0x1b,%%xmm3 \n"
|
| + "pcmpeqb %%xmm4,%%xmm4 \n"
|
| + "psrld $0x1a,%%xmm4 \n"
|
| + "pslld $0x5,%%xmm4 \n"
|
| + "pcmpeqb %%xmm5,%%xmm5 \n"
|
| + "pslld $0xb,%%xmm5 \n"
|
| +
|
| + LABELALIGN
|
| + "1: \n"
|
| + "movdqu (%0),%%xmm0 \n"
|
| + "paddusb %%xmm6,%%xmm0 \n"
|
| + "movdqa %%xmm0,%%xmm1 \n"
|
| + "movdqa %%xmm0,%%xmm2 \n"
|
| + "pslld $0x8,%%xmm0 \n"
|
| + "psrld $0x3,%%xmm1 \n"
|
| + "psrld $0x5,%%xmm2 \n"
|
| + "psrad $0x10,%%xmm0 \n"
|
| + "pand %%xmm3,%%xmm1 \n"
|
| + "pand %%xmm4,%%xmm2 \n"
|
| + "pand %%xmm5,%%xmm0 \n"
|
| + "por %%xmm2,%%xmm1 \n"
|
| + "por %%xmm1,%%xmm0 \n"
|
| + "packssdw %%xmm0,%%xmm0 \n"
|
| + "lea 0x10(%0),%0 \n"
|
| + "movq %%xmm0,(%1) \n"
|
| + "lea 0x8(%1),%1 \n"
|
| + "sub $0x4,%2 \n"
|
| + "jg 1b \n"
|
| + : "+r"(src), // %0
|
| + "+r"(dst), // %1
|
| + "+r"(width) // %2
|
| + : "m"(dither4) // %3
|
| + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
|
| + "xmm7");
|
| }
|
|
|
| #ifdef HAS_ARGBTORGB565DITHERROW_AVX2
|
| -void ARGBToRGB565DitherRow_AVX2(const uint8* src, uint8* dst,
|
| - const uint32 dither4, int width) {
|
| - asm volatile (
|
| - "vbroadcastss %3,%%xmm6 \n"
|
| - "vpunpcklbw %%xmm6,%%xmm6,%%xmm6 \n"
|
| - "vpermq $0xd8,%%ymm6,%%ymm6 \n"
|
| - "vpunpcklwd %%ymm6,%%ymm6,%%ymm6 \n"
|
| - "vpcmpeqb %%ymm3,%%ymm3,%%ymm3 \n"
|
| - "vpsrld $0x1b,%%ymm3,%%ymm3 \n"
|
| - "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n"
|
| - "vpsrld $0x1a,%%ymm4,%%ymm4 \n"
|
| - "vpslld $0x5,%%ymm4,%%ymm4 \n"
|
| - "vpslld $0xb,%%ymm3,%%ymm5 \n"
|
| -
|
| - LABELALIGN
|
| - "1: \n"
|
| - "vmovdqu (%0),%%ymm0 \n"
|
| - "vpaddusb %%ymm6,%%ymm0,%%ymm0 \n"
|
| - "vpsrld $0x5,%%ymm0,%%ymm2 \n"
|
| - "vpsrld $0x3,%%ymm0,%%ymm1 \n"
|
| - "vpsrld $0x8,%%ymm0,%%ymm0 \n"
|
| - "vpand %%ymm4,%%ymm2,%%ymm2 \n"
|
| - "vpand %%ymm3,%%ymm1,%%ymm1 \n"
|
| - "vpand %%ymm5,%%ymm0,%%ymm0 \n"
|
| - "vpor %%ymm2,%%ymm1,%%ymm1 \n"
|
| - "vpor %%ymm1,%%ymm0,%%ymm0 \n"
|
| - "vpackusdw %%ymm0,%%ymm0,%%ymm0 \n"
|
| - "vpermq $0xd8,%%ymm0,%%ymm0 \n"
|
| - "lea 0x20(%0),%0 \n"
|
| - "vmovdqu %%xmm0,(%1) \n"
|
| - "lea 0x10(%1),%1 \n"
|
| - "sub $0x8,%2 \n"
|
| - "jg 1b \n"
|
| - "vzeroupper \n"
|
| - : "+r"(src), // %0
|
| - "+r"(dst), // %1
|
| - "+r"(width) // %2
|
| - : "m"(dither4) // %3
|
| - : "memory", "cc",
|
| - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
| - );
|
| +void ARGBToRGB565DitherRow_AVX2(const uint8* src,
|
| + uint8* dst,
|
| + const uint32 dither4,
|
| + int width) {
|
| + asm volatile(
|
| + "vbroadcastss %3,%%xmm6 \n"
|
| + "vpunpcklbw %%xmm6,%%xmm6,%%xmm6 \n"
|
| + "vpermq $0xd8,%%ymm6,%%ymm6 \n"
|
| + "vpunpcklwd %%ymm6,%%ymm6,%%ymm6 \n"
|
| + "vpcmpeqb %%ymm3,%%ymm3,%%ymm3 \n"
|
| + "vpsrld $0x1b,%%ymm3,%%ymm3 \n"
|
| + "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n"
|
| + "vpsrld $0x1a,%%ymm4,%%ymm4 \n"
|
| + "vpslld $0x5,%%ymm4,%%ymm4 \n"
|
| + "vpslld $0xb,%%ymm3,%%ymm5 \n"
|
| +
|
| + LABELALIGN
|
| + "1: \n"
|
| + "vmovdqu (%0),%%ymm0 \n"
|
| + "vpaddusb %%ymm6,%%ymm0,%%ymm0 \n"
|
| + "vpsrld $0x5,%%ymm0,%%ymm2 \n"
|
| + "vpsrld $0x3,%%ymm0,%%ymm1 \n"
|
| + "vpsrld $0x8,%%ymm0,%%ymm0 \n"
|
| + "vpand %%ymm4,%%ymm2,%%ymm2 \n"
|
| + "vpand %%ymm3,%%ymm1,%%ymm1 \n"
|
| + "vpand %%ymm5,%%ymm0,%%ymm0 \n"
|
| + "vpor %%ymm2,%%ymm1,%%ymm1 \n"
|
| + "vpor %%ymm1,%%ymm0,%%ymm0 \n"
|
| + "vpackusdw %%ymm0,%%ymm0,%%ymm0 \n"
|
| + "vpermq $0xd8,%%ymm0,%%ymm0 \n"
|
| + "lea 0x20(%0),%0 \n"
|
| + "vmovdqu %%xmm0,(%1) \n"
|
| + "lea 0x10(%1),%1 \n"
|
| + "sub $0x8,%2 \n"
|
| + "jg 1b \n"
|
| + "vzeroupper \n"
|
| + : "+r"(src), // %0
|
| + "+r"(dst), // %1
|
| + "+r"(width) // %2
|
| + : "m"(dither4) // %3
|
| + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
|
| + "xmm7");
|
| }
|
| #endif // HAS_ARGBTORGB565DITHERROW_AVX2
|
|
|
| -
|
| void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm4,%%xmm4 \n"
|
| @@ -811,9 +778,7 @@ void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
|
|
|
| #ifdef HAS_ARGBTOYROW_AVX2
|
| // vpermd for vphaddw + vpackuswb vpermd.
|
| -static const lvec32 kPermdARGBToY_AVX = {
|
| - 0, 4, 1, 5, 2, 6, 3, 7
|
| -};
|
| +static const lvec32 kPermdARGBToY_AVX = {0, 4, 1, 5, 2, 6, 3, 7};
|
|
|
| // Convert 32 ARGB pixels (128 bytes) to 32 Y values.
|
| void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
|
| @@ -898,8 +863,11 @@ void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
|
| #endif // HAS_ARGBTOYJROW_AVX2
|
|
|
| #ifdef HAS_ARGBTOUVROW_SSSE3
|
| -void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void ARGBToUVRow_SSSE3(const uint8* src_argb0,
|
| + int src_stride_argb,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "movdqa %5,%%xmm3 \n"
|
| "movdqa %6,%%xmm4 \n"
|
| @@ -963,11 +931,13 @@ void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
| #ifdef HAS_ARGBTOUVROW_AVX2
|
| // vpshufb for vphaddw + vpackuswb packed to shorts.
|
| static const lvec8 kShufARGBToUV_AVX = {
|
| - 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
|
| - 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
|
| -};
|
| -void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| + 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
|
| + 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
|
| +void ARGBToUVRow_AVX2(const uint8* src_argb0,
|
| + int src_stride_argb,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vbroadcastf128 %5,%%ymm5 \n"
|
| "vbroadcastf128 %6,%%ymm6 \n"
|
| @@ -1026,8 +996,11 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
|
| #endif // HAS_ARGBTOUVROW_AVX2
|
|
|
| #ifdef HAS_ARGBTOUVJROW_AVX2
|
| -void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void ARGBToUVJRow_AVX2(const uint8* src_argb0,
|
| + int src_stride_argb,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vbroadcastf128 %5,%%ymm5 \n"
|
| "vbroadcastf128 %6,%%ymm6 \n"
|
| @@ -1087,8 +1060,11 @@ void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb,
|
| #endif // HAS_ARGBTOUVJROW_AVX2
|
|
|
| #ifdef HAS_ARGBTOUVJROW_SSSE3
|
| -void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void ARGBToUVJRow_SSSE3(const uint8* src_argb0,
|
| + int src_stride_argb,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "movdqa %5,%%xmm3 \n"
|
| "movdqa %6,%%xmm4 \n"
|
| @@ -1151,7 +1127,9 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
|
| #endif // HAS_ARGBTOUVJROW_SSSE3
|
|
|
| #ifdef HAS_ARGBTOUV444ROW_SSSE3
|
| -void ARGBToUV444Row_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
|
| +void ARGBToUV444Row_SSSE3(const uint8* src_argb,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| int width) {
|
| asm volatile (
|
| "movdqa %4,%%xmm3 \n"
|
| @@ -1241,8 +1219,11 @@ void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width) {
|
| );
|
| }
|
|
|
| -void BGRAToUVRow_SSSE3(const uint8* src_bgra0, int src_stride_bgra,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void BGRAToUVRow_SSSE3(const uint8* src_bgra0,
|
| + int src_stride_bgra,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "movdqa %5,%%xmm3 \n"
|
| "movdqa %6,%%xmm4 \n"
|
| @@ -1370,8 +1351,11 @@ void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int width) {
|
| );
|
| }
|
|
|
| -void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void ABGRToUVRow_SSSE3(const uint8* src_abgr0,
|
| + int src_stride_abgr,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "movdqa %5,%%xmm3 \n"
|
| "movdqa %6,%%xmm4 \n"
|
| @@ -1431,8 +1415,11 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
|
| );
|
| }
|
|
|
| -void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void RGBAToUVRow_SSSE3(const uint8* src_rgba0,
|
| + int src_stride_rgba,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "movdqa %5,%%xmm3 \n"
|
| "movdqa %6,%%xmm4 \n"
|
| @@ -1495,8 +1482,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| #if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2)
|
|
|
| // Read 8 UV from 444
|
| -#define READYUV444 \
|
| - "movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUV444 \
|
| + "movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
|
| "punpcklbw %%xmm1,%%xmm0 \n" \
|
| @@ -1505,8 +1492,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 4 UV from 422, upsample to 8 UV
|
| -#define READYUV422 \
|
| - "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUV422 \
|
| + "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
|
| "punpcklbw %%xmm1,%%xmm0 \n" \
|
| @@ -1516,8 +1503,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 4 UV from 422, upsample to 8 UV. With 8 Alpha.
|
| -#define READYUVA422 \
|
| - "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUVA422 \
|
| + "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
|
| "punpcklbw %%xmm1,%%xmm0 \n" \
|
| @@ -1529,8 +1516,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x8, [a_buf]) ",%[a_buf] \n"
|
|
|
| // Read 4 UV from NV12, upsample to 8 UV
|
| -#define READNV12 \
|
| - "movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
|
| +#define READNV12 \
|
| + "movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
|
| "lea " MEMLEA(0x8, [uv_buf]) ",%[uv_buf] \n" \
|
| "punpcklwd %%xmm0,%%xmm0 \n" \
|
| "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \
|
| @@ -1538,8 +1525,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 4 VU from NV21, upsample to 8 UV
|
| -#define READNV21 \
|
| - "movq " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
|
| +#define READNV21 \
|
| + "movq " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
|
| "lea " MEMLEA(0x8, [vu_buf]) ",%[vu_buf] \n" \
|
| "pshufb %[kShuffleNV21], %%xmm0 \n" \
|
| "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \
|
| @@ -1547,24 +1534,24 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 4 YUY2 with 8 Y and update 4 UV to 8 UV.
|
| -#define READYUY2 \
|
| - "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm4 \n" \
|
| +#define READYUY2 \
|
| + "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm4 \n" \
|
| "pshufb %[kShuffleYUY2Y], %%xmm4 \n" \
|
| "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm0 \n" \
|
| "pshufb %[kShuffleYUY2UV], %%xmm0 \n" \
|
| "lea " MEMLEA(0x10, [yuy2_buf]) ",%[yuy2_buf] \n"
|
|
|
| // Read 4 UYVY with 8 Y and update 4 UV to 8 UV.
|
| -#define READUYVY \
|
| - "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm4 \n" \
|
| +#define READUYVY \
|
| + "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm4 \n" \
|
| "pshufb %[kShuffleUYVYY], %%xmm4 \n" \
|
| "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm0 \n" \
|
| "pshufb %[kShuffleUYVYUV], %%xmm0 \n" \
|
| "lea " MEMLEA(0x10, [uyvy_buf]) ",%[uyvy_buf] \n"
|
|
|
| #if defined(__x86_64__)
|
| -#define YUVTORGB_SETUP(yuvconstants) \
|
| - "movdqa " MEMACCESS([yuvconstants]) ",%%xmm8 \n" \
|
| +#define YUVTORGB_SETUP(yuvconstants) \
|
| + "movdqa " MEMACCESS([yuvconstants]) ",%%xmm8 \n" \
|
| "movdqa " MEMACCESS2(32, [yuvconstants]) ",%%xmm9 \n" \
|
| "movdqa " MEMACCESS2(64, [yuvconstants]) ",%%xmm10 \n" \
|
| "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm11 \n" \
|
| @@ -1572,37 +1559,37 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "movdqa " MEMACCESS2(160, [yuvconstants]) ",%%xmm13 \n" \
|
| "movdqa " MEMACCESS2(192, [yuvconstants]) ",%%xmm14 \n"
|
| // Convert 8 pixels: 8 UV and 8 Y
|
| -#define YUVTORGB(yuvconstants) \
|
| - "movdqa %%xmm0,%%xmm1 \n" \
|
| - "movdqa %%xmm0,%%xmm2 \n" \
|
| - "movdqa %%xmm0,%%xmm3 \n" \
|
| - "movdqa %%xmm11,%%xmm0 \n" \
|
| - "pmaddubsw %%xmm8,%%xmm1 \n" \
|
| - "psubw %%xmm1,%%xmm0 \n" \
|
| - "movdqa %%xmm12,%%xmm1 \n" \
|
| - "pmaddubsw %%xmm9,%%xmm2 \n" \
|
| - "psubw %%xmm2,%%xmm1 \n" \
|
| - "movdqa %%xmm13,%%xmm2 \n" \
|
| - "pmaddubsw %%xmm10,%%xmm3 \n" \
|
| - "psubw %%xmm3,%%xmm2 \n" \
|
| - "pmulhuw %%xmm14,%%xmm4 \n" \
|
| - "paddsw %%xmm4,%%xmm0 \n" \
|
| - "paddsw %%xmm4,%%xmm1 \n" \
|
| - "paddsw %%xmm4,%%xmm2 \n" \
|
| - "psraw $0x6,%%xmm0 \n" \
|
| - "psraw $0x6,%%xmm1 \n" \
|
| - "psraw $0x6,%%xmm2 \n" \
|
| - "packuswb %%xmm0,%%xmm0 \n" \
|
| - "packuswb %%xmm1,%%xmm1 \n" \
|
| - "packuswb %%xmm2,%%xmm2 \n"
|
| +#define YUVTORGB(yuvconstants) \
|
| + "movdqa %%xmm0,%%xmm1 \n" \
|
| + "movdqa %%xmm0,%%xmm2 \n" \
|
| + "movdqa %%xmm0,%%xmm3 \n" \
|
| + "movdqa %%xmm11,%%xmm0 \n" \
|
| + "pmaddubsw %%xmm8,%%xmm1 \n" \
|
| + "psubw %%xmm1,%%xmm0 \n" \
|
| + "movdqa %%xmm12,%%xmm1 \n" \
|
| + "pmaddubsw %%xmm9,%%xmm2 \n" \
|
| + "psubw %%xmm2,%%xmm1 \n" \
|
| + "movdqa %%xmm13,%%xmm2 \n" \
|
| + "pmaddubsw %%xmm10,%%xmm3 \n" \
|
| + "psubw %%xmm3,%%xmm2 \n" \
|
| + "pmulhuw %%xmm14,%%xmm4 \n" \
|
| + "paddsw %%xmm4,%%xmm0 \n" \
|
| + "paddsw %%xmm4,%%xmm1 \n" \
|
| + "paddsw %%xmm4,%%xmm2 \n" \
|
| + "psraw $0x6,%%xmm0 \n" \
|
| + "psraw $0x6,%%xmm1 \n" \
|
| + "psraw $0x6,%%xmm2 \n" \
|
| + "packuswb %%xmm0,%%xmm0 \n" \
|
| + "packuswb %%xmm1,%%xmm1 \n" \
|
| + "packuswb %%xmm2,%%xmm2 \n"
|
| #define YUVTORGB_REGS \
|
| - "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
|
| + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
|
|
|
| #else
|
| #define YUVTORGB_SETUP(yuvconstants)
|
| // Convert 8 pixels: 8 UV and 8 Y
|
| -#define YUVTORGB(yuvconstants) \
|
| - "movdqa %%xmm0,%%xmm1 \n" \
|
| +#define YUVTORGB(yuvconstants) \
|
| + "movdqa %%xmm0,%%xmm1 \n" \
|
| "movdqa %%xmm0,%%xmm2 \n" \
|
| "movdqa %%xmm0,%%xmm3 \n" \
|
| "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm0 \n" \
|
| @@ -1628,8 +1615,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| #endif
|
|
|
| // Store 8 ARGB values.
|
| -#define STOREARGB \
|
| - "punpcklbw %%xmm1,%%xmm0 \n" \
|
| +#define STOREARGB \
|
| + "punpcklbw %%xmm1,%%xmm0 \n" \
|
| "punpcklbw %%xmm5,%%xmm2 \n" \
|
| "movdqa %%xmm0,%%xmm1 \n" \
|
| "punpcklwd %%xmm2,%%xmm0 \n" \
|
| @@ -1639,8 +1626,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
|
| "lea " MEMLEA(0x20, [dst_argb]) ", %[dst_argb] \n"
|
|
|
| // Store 8 RGBA values.
|
| -#define STORERGBA \
|
| - "pcmpeqb %%xmm5,%%xmm5 \n" \
|
| +#define STORERGBA \
|
| + "pcmpeqb %%xmm5,%%xmm5 \n" \
|
| "punpcklbw %%xmm2,%%xmm1 \n" \
|
| "punpcklbw %%xmm0,%%xmm5 \n" \
|
| "movdqa %%xmm5,%%xmm0 \n" \
|
| @@ -1759,6 +1746,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP(yuvconstants)
|
| "sub %[u_buf],%[v_buf] \n"
|
| @@ -1783,6 +1771,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
|
| : "memory", "cc", NACL_R14 YUVTORGB_REGS
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_I422ALPHATOARGBROW_SSSE3
|
|
|
| @@ -1791,6 +1780,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP(yuvconstants)
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -1809,6 +1799,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf,
|
| : "memory", "cc", YUVTORGB_REGS // Does not use r14.
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
|
|
| void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
|
| @@ -1816,6 +1807,7 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP(yuvconstants)
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -1835,12 +1827,14 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
|
| : "memory", "cc", YUVTORGB_REGS // Does not use r14.
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
|
|
| void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP(yuvconstants)
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -1860,12 +1854,14 @@ void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf,
|
| : "memory", "cc", YUVTORGB_REGS // Does not use r14.
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
|
|
| void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP(yuvconstants)
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -1885,6 +1881,7 @@ void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf,
|
| : "memory", "cc", YUVTORGB_REGS // Does not use r14.
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
|
|
| void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| @@ -1918,8 +1915,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| #endif // HAS_I422TOARGBROW_SSSE3
|
|
|
| // Read 16 UV from 444
|
| -#define READYUV444_AVX2 \
|
| - "vmovdqu " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUV444_AVX2 \
|
| + "vmovdqu " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(vmovdqu, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x10, [u_buf]) ",%[u_buf] \n" \
|
| "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
|
| @@ -1931,8 +1928,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 8 UV from 422, upsample to 16 UV.
|
| -#define READYUV422_AVX2 \
|
| - "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUV422_AVX2 \
|
| + "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
|
| "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
|
| @@ -1944,8 +1941,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 8 UV from 422, upsample to 16 UV. With 16 Alpha.
|
| -#define READYUVA422_AVX2 \
|
| - "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| +#define READYUVA422_AVX2 \
|
| + "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
|
| MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \
|
| "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
|
| "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
|
| @@ -1960,8 +1957,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| "lea " MEMLEA(0x10, [a_buf]) ",%[a_buf] \n"
|
|
|
| // Read 8 UV from NV12, upsample to 16 UV.
|
| -#define READNV12_AVX2 \
|
| - "vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
|
| +#define READNV12_AVX2 \
|
| + "vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
|
| "lea " MEMLEA(0x10, [uv_buf]) ",%[uv_buf] \n" \
|
| "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
|
| "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \
|
| @@ -1971,8 +1968,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 8 VU from NV21, upsample to 16 UV.
|
| -#define READNV21_AVX2 \
|
| - "vmovdqu " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
|
| +#define READNV21_AVX2 \
|
| + "vmovdqu " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
|
| "lea " MEMLEA(0x10, [vu_buf]) ",%[vu_buf] \n" \
|
| "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
|
| "vpshufb %[kShuffleNV21], %%ymm0, %%ymm0 \n" \
|
| @@ -1982,53 +1979,53 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
|
|
|
| // Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV.
|
| -#define READYUY2_AVX2 \
|
| - "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm4 \n" \
|
| +#define READYUY2_AVX2 \
|
| + "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm4 \n" \
|
| "vpshufb %[kShuffleYUY2Y], %%ymm4, %%ymm4 \n" \
|
| "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm0 \n" \
|
| "vpshufb %[kShuffleYUY2UV], %%ymm0, %%ymm0 \n" \
|
| "lea " MEMLEA(0x20, [yuy2_buf]) ",%[yuy2_buf] \n"
|
|
|
| // Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV.
|
| -#define READUYVY_AVX2 \
|
| - "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm4 \n" \
|
| +#define READUYVY_AVX2 \
|
| + "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm4 \n" \
|
| "vpshufb %[kShuffleUYVYY], %%ymm4, %%ymm4 \n" \
|
| "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm0 \n" \
|
| "vpshufb %[kShuffleUYVYUV], %%ymm0, %%ymm0 \n" \
|
| "lea " MEMLEA(0x20, [uyvy_buf]) ",%[uyvy_buf] \n"
|
|
|
| #if defined(__x86_64__)
|
| -#define YUVTORGB_SETUP_AVX2(yuvconstants) \
|
| - "vmovdqa " MEMACCESS([yuvconstants]) ",%%ymm8 \n" \
|
| +#define YUVTORGB_SETUP_AVX2(yuvconstants) \
|
| + "vmovdqa " MEMACCESS([yuvconstants]) ",%%ymm8 \n" \
|
| "vmovdqa " MEMACCESS2(32, [yuvconstants]) ",%%ymm9 \n" \
|
| "vmovdqa " MEMACCESS2(64, [yuvconstants]) ",%%ymm10 \n" \
|
| "vmovdqa " MEMACCESS2(96, [yuvconstants]) ",%%ymm11 \n" \
|
| "vmovdqa " MEMACCESS2(128, [yuvconstants]) ",%%ymm12 \n" \
|
| "vmovdqa " MEMACCESS2(160, [yuvconstants]) ",%%ymm13 \n" \
|
| "vmovdqa " MEMACCESS2(192, [yuvconstants]) ",%%ymm14 \n"
|
| -#define YUVTORGB_AVX2(yuvconstants) \
|
| - "vpmaddubsw %%ymm10,%%ymm0,%%ymm2 \n" \
|
| - "vpmaddubsw %%ymm9,%%ymm0,%%ymm1 \n" \
|
| - "vpmaddubsw %%ymm8,%%ymm0,%%ymm0 \n" \
|
| - "vpsubw %%ymm2,%%ymm13,%%ymm2 \n" \
|
| - "vpsubw %%ymm1,%%ymm12,%%ymm1 \n" \
|
| - "vpsubw %%ymm0,%%ymm11,%%ymm0 \n" \
|
| - "vpmulhuw %%ymm14,%%ymm4,%%ymm4 \n" \
|
| - "vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \
|
| - "vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \
|
| - "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \
|
| - "vpsraw $0x6,%%ymm0,%%ymm0 \n" \
|
| - "vpsraw $0x6,%%ymm1,%%ymm1 \n" \
|
| - "vpsraw $0x6,%%ymm2,%%ymm2 \n" \
|
| - "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \
|
| - "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \
|
| - "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n"
|
| +#define YUVTORGB_AVX2(yuvconstants) \
|
| + "vpmaddubsw %%ymm10,%%ymm0,%%ymm2 \n" \
|
| + "vpmaddubsw %%ymm9,%%ymm0,%%ymm1 \n" \
|
| + "vpmaddubsw %%ymm8,%%ymm0,%%ymm0 \n" \
|
| + "vpsubw %%ymm2,%%ymm13,%%ymm2 \n" \
|
| + "vpsubw %%ymm1,%%ymm12,%%ymm1 \n" \
|
| + "vpsubw %%ymm0,%%ymm11,%%ymm0 \n" \
|
| + "vpmulhuw %%ymm14,%%ymm4,%%ymm4 \n" \
|
| + "vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \
|
| + "vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \
|
| + "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \
|
| + "vpsraw $0x6,%%ymm0,%%ymm0 \n" \
|
| + "vpsraw $0x6,%%ymm1,%%ymm1 \n" \
|
| + "vpsraw $0x6,%%ymm2,%%ymm2 \n" \
|
| + "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \
|
| + "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \
|
| + "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n"
|
| #define YUVTORGB_REGS_AVX2 \
|
| - "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
|
| + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
|
| #else // Convert 16 pixels: 16 UV and 16 Y.
|
| #define YUVTORGB_SETUP_AVX2(yuvconstants)
|
| -#define YUVTORGB_AVX2(yuvconstants) \
|
| - "vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \
|
| +#define YUVTORGB_AVX2(yuvconstants) \
|
| + "vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \
|
| "vpmaddubsw " MEMACCESS2(32, [yuvconstants]) ",%%ymm0,%%ymm1 \n" \
|
| "vpmaddubsw " MEMACCESS([yuvconstants]) ",%%ymm0,%%ymm0 \n" \
|
| "vmovdqu " MEMACCESS2(160, [yuvconstants]) ",%%ymm3 \n" \
|
| @@ -2051,8 +2048,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
|
| #endif
|
|
|
| // Store 16 ARGB values.
|
| -#define STOREARGB_AVX2 \
|
| - "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
|
| +#define STOREARGB_AVX2 \
|
| + "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
|
| "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
|
| "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" \
|
| "vpermq $0xd8,%%ymm2,%%ymm2 \n" \
|
| @@ -2132,12 +2129,13 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf,
|
| // 16 pixels
|
| // 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB.
|
| void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf,
|
| - const uint8* u_buf,
|
| - const uint8* v_buf,
|
| - const uint8* a_buf,
|
| - uint8* dst_argb,
|
| - const struct YuvConstants* yuvconstants,
|
| - int width) {
|
| + const uint8* u_buf,
|
| + const uint8* v_buf,
|
| + const uint8* a_buf,
|
| + uint8* dst_argb,
|
| + const struct YuvConstants* yuvconstants,
|
| + int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP_AVX2(yuvconstants)
|
| "sub %[u_buf],%[v_buf] \n"
|
| @@ -2163,6 +2161,7 @@ void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf,
|
| : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_I422ALPHATOARGBROW_AVX2
|
|
|
| @@ -2217,6 +2216,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP_AVX2(yuvconstants)
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| @@ -2236,6 +2236,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf,
|
| : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
|
| "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_NV12TOARGBROW_AVX2
|
|
|
| @@ -2247,6 +2248,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP_AVX2(yuvconstants)
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| @@ -2267,6 +2269,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf,
|
| : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
|
| "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_NV21TOARGBROW_AVX2
|
|
|
| @@ -2277,6 +2280,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP_AVX2(yuvconstants)
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| @@ -2297,6 +2301,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf,
|
| : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
|
| "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_YUY2TOARGBROW_AVX2
|
|
|
| @@ -2307,6 +2312,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf,
|
| uint8* dst_argb,
|
| const struct YuvConstants* yuvconstants,
|
| int width) {
|
| + // clang-format off
|
| asm volatile (
|
| YUVTORGB_SETUP_AVX2(yuvconstants)
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| @@ -2327,6 +2333,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf,
|
| : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
|
| "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| + // clang-format on
|
| }
|
| #endif // HAS_UYVYTOARGBROW_AVX2
|
|
|
| @@ -2424,9 +2431,8 @@ void I400ToARGBRow_AVX2(const uint8* y_buf, uint8* dst_argb, int width) {
|
|
|
| #ifdef HAS_MIRRORROW_SSSE3
|
| // Shuffle table for reversing the bytes.
|
| -static uvec8 kShuffleMirror = {
|
| - 15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
|
| -};
|
| +static uvec8 kShuffleMirror = {15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u,
|
| + 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u};
|
|
|
| void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
|
| intptr_t temp_width = (intptr_t)(width);
|
| @@ -2477,10 +2483,11 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
|
|
|
| #ifdef HAS_MIRRORUVROW_SSSE3
|
| // Shuffle table for reversing the bytes of UV channels.
|
| -static uvec8 kShuffleMirrorUV = {
|
| - 14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
|
| -};
|
| -void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
|
| +static uvec8 kShuffleMirrorUV = {14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u,
|
| + 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u};
|
| +void MirrorUVRow_SSSE3(const uint8* src,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| int width) {
|
| intptr_t temp_width = (intptr_t)(width);
|
| asm volatile (
|
| @@ -2535,9 +2542,7 @@ void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
|
|
|
| #ifdef HAS_ARGBMIRRORROW_AVX2
|
| // Shuffle table for reversing the bytes.
|
| -static const ulvec32 kARGBShuffleMirror_AVX2 = {
|
| - 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
|
| -};
|
| +static const ulvec32 kARGBShuffleMirror_AVX2 = {7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u};
|
| void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
|
| intptr_t temp_width = (intptr_t)(width);
|
| asm volatile (
|
| @@ -2561,7 +2566,9 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
|
| #endif // HAS_ARGBMIRRORROW_AVX2
|
|
|
| #ifdef HAS_SPLITUVROW_AVX2
|
| -void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
|
| +void SplitUVRow_AVX2(const uint8* src_uv,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| int width) {
|
| asm volatile (
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| @@ -2598,7 +2605,9 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
|
| #endif // HAS_SPLITUVROW_AVX2
|
|
|
| #ifdef HAS_SPLITUVROW_SSE2
|
| -void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
|
| +void SplitUVRow_SSE2(const uint8* src_uv,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -2634,7 +2643,9 @@ void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
|
| #endif // HAS_SPLITUVROW_SSE2
|
|
|
| #ifdef HAS_MERGEUVROW_AVX2
|
| -void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
|
| +void MergeUVRow_AVX2(const uint8* src_u,
|
| + const uint8* src_v,
|
| + uint8* dst_uv,
|
| int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| @@ -2665,7 +2676,9 @@ void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
|
| #endif // HAS_MERGEUVROW_AVX2
|
|
|
| #ifdef HAS_MERGEUVROW_SSE2
|
| -void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
|
| +void MergeUVRow_SSE2(const uint8* src_u,
|
| + const uint8* src_v,
|
| + uint8* dst_uv,
|
| int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| @@ -2759,14 +2772,12 @@ void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
|
| // Multiple of 1.
|
| void CopyRow_ERMS(const uint8* src, uint8* dst, int width) {
|
| size_t width_tmp = (size_t)(width);
|
| - asm volatile (
|
| - "rep movsb " MEMMOVESTRING(0,1) " \n"
|
| - : "+S"(src), // %0
|
| - "+D"(dst), // %1
|
| - "+c"(width_tmp) // %2
|
| - :
|
| - : "memory", "cc"
|
| - );
|
| + asm volatile("rep movsb " MEMMOVESTRING(0, 1) " \n"
|
| + : "+S"(src), // %0
|
| + "+D"(dst), // %1
|
| + "+c"(width_tmp) // %2
|
| + :
|
| + : "memory", "cc");
|
| }
|
| #endif // HAS_COPYROW_ERMS
|
|
|
| @@ -2838,7 +2849,7 @@ void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
|
| #ifdef HAS_ARGBEXTRACTALPHAROW_SSE2
|
| // width in pixels
|
| void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) {
|
| - asm volatile (
|
| + asm volatile (
|
| LABELALIGN
|
| "1: \n"
|
| "movdqu " MEMACCESS(0) ", %%xmm0 \n"
|
| @@ -2864,12 +2875,11 @@ void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) {
|
|
|
| #ifdef HAS_ARGBEXTRACTALPHAROW_AVX2
|
| static const uvec8 kShuffleAlphaShort_AVX2 = {
|
| - 3u, 128u, 128u, 128u, 7u, 128u, 128u, 128u,
|
| - 11u, 128u, 128u, 128u, 15u, 128u, 128u, 128u
|
| -};
|
| + 3u, 128u, 128u, 128u, 7u, 128u, 128u, 128u,
|
| + 11u, 128u, 128u, 128u, 15u, 128u, 128u, 128u};
|
|
|
| void ARGBExtractAlphaRow_AVX2(const uint8* src_argb, uint8* dst_a, int width) {
|
| - asm volatile (
|
| + asm volatile (
|
| "vmovdqa %3,%%ymm4 \n"
|
| "vbroadcastf128 %4,%%ymm5 \n"
|
| LABELALIGN
|
| @@ -2976,32 +2986,29 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
|
| void SetRow_X86(uint8* dst, uint8 v8, int width) {
|
| size_t width_tmp = (size_t)(width >> 2);
|
| const uint32 v32 = v8 * 0x01010101u; // Duplicate byte to all bytes.
|
| - asm volatile (
|
| - "rep stosl " MEMSTORESTRING(eax,0) " \n"
|
| - : "+D"(dst), // %0
|
| - "+c"(width_tmp) // %1
|
| - : "a"(v32) // %2
|
| - : "memory", "cc");
|
| + asm volatile("rep stosl " MEMSTORESTRING(eax, 0) " \n"
|
| + : "+D"(dst), // %0
|
| + "+c"(width_tmp) // %1
|
| + : "a"(v32) // %2
|
| + : "memory", "cc");
|
| }
|
|
|
| void SetRow_ERMS(uint8* dst, uint8 v8, int width) {
|
| size_t width_tmp = (size_t)(width);
|
| - asm volatile (
|
| - "rep stosb " MEMSTORESTRING(al,0) " \n"
|
| - : "+D"(dst), // %0
|
| - "+c"(width_tmp) // %1
|
| - : "a"(v8) // %2
|
| - : "memory", "cc");
|
| + asm volatile("rep stosb " MEMSTORESTRING(al, 0) " \n"
|
| + : "+D"(dst), // %0
|
| + "+c"(width_tmp) // %1
|
| + : "a"(v8) // %2
|
| + : "memory", "cc");
|
| }
|
|
|
| void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int width) {
|
| size_t width_tmp = (size_t)(width);
|
| - asm volatile (
|
| - "rep stosl " MEMSTORESTRING(eax,0) " \n"
|
| - : "+D"(dst_argb), // %0
|
| - "+c"(width_tmp) // %1
|
| - : "a"(v32) // %2
|
| - : "memory", "cc");
|
| + asm volatile("rep stosl " MEMSTORESTRING(eax, 0) " \n"
|
| + : "+D"(dst_argb), // %0
|
| + "+c"(width_tmp) // %1
|
| + : "a"(v32) // %2
|
| + : "memory", "cc");
|
| }
|
| #endif // HAS_SETROW_X86
|
|
|
| @@ -3031,8 +3038,11 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int width) {
|
| );
|
| }
|
|
|
| -void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void YUY2ToUVRow_SSE2(const uint8* src_yuy2,
|
| + int stride_yuy2,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| "psrlw $0x8,%%xmm5 \n"
|
| @@ -3070,7 +3080,9 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
|
| }
|
|
|
| void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| "psrlw $0x8,%%xmm5 \n"
|
| @@ -3126,8 +3138,11 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int width) {
|
| );
|
| }
|
|
|
| -void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void UYVYToUVRow_SSE2(const uint8* src_uyvy,
|
| + int stride_uyvy,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| "psrlw $0x8,%%xmm5 \n"
|
| @@ -3165,7 +3180,9 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
|
| }
|
|
|
| void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| "psrlw $0x8,%%xmm5 \n"
|
| @@ -3227,8 +3244,11 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) {
|
| );
|
| }
|
|
|
| -void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void YUY2ToUVRow_AVX2(const uint8* src_yuy2,
|
| + int stride_yuy2,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
|
| @@ -3267,7 +3287,9 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
|
| }
|
|
|
| void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
|
| @@ -3327,8 +3349,11 @@ void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width) {
|
| , "xmm0", "xmm1", "xmm5"
|
| );
|
| }
|
| -void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| +void UYVYToUVRow_AVX2(const uint8* src_uyvy,
|
| + int stride_uyvy,
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
|
| @@ -3368,7 +3393,9 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
|
| }
|
|
|
| void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
|
| - uint8* dst_u, uint8* dst_v, int width) {
|
| + uint8* dst_u,
|
| + uint8* dst_v,
|
| + int width) {
|
| asm volatile (
|
| "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
|
| @@ -3407,14 +3434,14 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
|
|
|
| #ifdef HAS_ARGBBLENDROW_SSSE3
|
| // Shuffle table for isolating alpha.
|
| -static uvec8 kShuffleAlpha = {
|
| - 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
|
| - 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
|
| -};
|
| +static uvec8 kShuffleAlpha = {3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
|
| + 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80};
|
|
|
| // Blend 8 pixels at a time
|
| -void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBBlendRow_SSSE3(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm7,%%xmm7 \n"
|
| "psrlw $0xf,%%xmm7 \n"
|
| @@ -3499,46 +3526,49 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
| // =((A2*C2)+(B2*(255-C2))+255)/256
|
| // signed version of math
|
| // =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
|
| -void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
|
| - const uint8* alpha, uint8* dst, int width) {
|
| - asm volatile (
|
| - "pcmpeqb %%xmm5,%%xmm5 \n"
|
| - "psllw $0x8,%%xmm5 \n"
|
| - "mov $0x80808080,%%eax \n"
|
| - "movd %%eax,%%xmm6 \n"
|
| - "pshufd $0x0,%%xmm6,%%xmm6 \n"
|
| - "mov $0x807f807f,%%eax \n"
|
| - "movd %%eax,%%xmm7 \n"
|
| - "pshufd $0x0,%%xmm7,%%xmm7 \n"
|
| - "sub %2,%0 \n"
|
| - "sub %2,%1 \n"
|
| - "sub %2,%3 \n"
|
| -
|
| - // 8 pixel loop.
|
| - LABELALIGN
|
| - "1: \n"
|
| - "movq (%2),%%xmm0 \n"
|
| - "punpcklbw %%xmm0,%%xmm0 \n"
|
| - "pxor %%xmm5,%%xmm0 \n"
|
| - "movq (%0,%2,1),%%xmm1 \n"
|
| - "movq (%1,%2,1),%%xmm2 \n"
|
| - "punpcklbw %%xmm2,%%xmm1 \n"
|
| - "psubb %%xmm6,%%xmm1 \n"
|
| - "pmaddubsw %%xmm1,%%xmm0 \n"
|
| - "paddw %%xmm7,%%xmm0 \n"
|
| - "psrlw $0x8,%%xmm0 \n"
|
| - "packuswb %%xmm0,%%xmm0 \n"
|
| - "movq %%xmm0,(%3,%2,1) \n"
|
| - "lea 0x8(%2),%2 \n"
|
| - "sub $0x8,%4 \n"
|
| - "jg 1b \n"
|
| - : "+r"(src0), // %0
|
| - "+r"(src1), // %1
|
| - "+r"(alpha), // %2
|
| - "+r"(dst), // %3
|
| - "+rm"(width) // %4
|
| - :: "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7"
|
| - );
|
| +void BlendPlaneRow_SSSE3(const uint8* src0,
|
| + const uint8* src1,
|
| + const uint8* alpha,
|
| + uint8* dst,
|
| + int width) {
|
| + asm volatile(
|
| + "pcmpeqb %%xmm5,%%xmm5 \n"
|
| + "psllw $0x8,%%xmm5 \n"
|
| + "mov $0x80808080,%%eax \n"
|
| + "movd %%eax,%%xmm6 \n"
|
| + "pshufd $0x0,%%xmm6,%%xmm6 \n"
|
| + "mov $0x807f807f,%%eax \n"
|
| + "movd %%eax,%%xmm7 \n"
|
| + "pshufd $0x0,%%xmm7,%%xmm7 \n"
|
| + "sub %2,%0 \n"
|
| + "sub %2,%1 \n"
|
| + "sub %2,%3 \n"
|
| +
|
| + // 8 pixel loop.
|
| + LABELALIGN
|
| + "1: \n"
|
| + "movq (%2),%%xmm0 \n"
|
| + "punpcklbw %%xmm0,%%xmm0 \n"
|
| + "pxor %%xmm5,%%xmm0 \n"
|
| + "movq (%0,%2,1),%%xmm1 \n"
|
| + "movq (%1,%2,1),%%xmm2 \n"
|
| + "punpcklbw %%xmm2,%%xmm1 \n"
|
| + "psubb %%xmm6,%%xmm1 \n"
|
| + "pmaddubsw %%xmm1,%%xmm0 \n"
|
| + "paddw %%xmm7,%%xmm0 \n"
|
| + "psrlw $0x8,%%xmm0 \n"
|
| + "packuswb %%xmm0,%%xmm0 \n"
|
| + "movq %%xmm0,(%3,%2,1) \n"
|
| + "lea 0x8(%2),%2 \n"
|
| + "sub $0x8,%4 \n"
|
| + "jg 1b \n"
|
| + : "+r"(src0), // %0
|
| + "+r"(src1), // %1
|
| + "+r"(alpha), // %2
|
| + "+r"(dst), // %3
|
| + "+rm"(width) // %4
|
| + ::"memory",
|
| + "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7");
|
| }
|
| #endif // HAS_BLENDPLANEROW_SSSE3
|
|
|
| @@ -3548,67 +3578,67 @@ void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
|
| // =((A2*C2)+(B2*(255-C2))+255)/256
|
| // signed version of math
|
| // =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
|
| -void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1,
|
| - const uint8* alpha, uint8* dst, int width) {
|
| - asm volatile (
|
| - "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| - "vpsllw $0x8,%%ymm5,%%ymm5 \n"
|
| - "mov $0x80808080,%%eax \n"
|
| - "vmovd %%eax,%%xmm6 \n"
|
| - "vbroadcastss %%xmm6,%%ymm6 \n"
|
| - "mov $0x807f807f,%%eax \n"
|
| - "vmovd %%eax,%%xmm7 \n"
|
| - "vbroadcastss %%xmm7,%%ymm7 \n"
|
| - "sub %2,%0 \n"
|
| - "sub %2,%1 \n"
|
| - "sub %2,%3 \n"
|
| -
|
| - // 32 pixel loop.
|
| - LABELALIGN
|
| - "1: \n"
|
| - "vmovdqu (%2),%%ymm0 \n"
|
| - "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n"
|
| - "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n"
|
| - "vpxor %%ymm5,%%ymm3,%%ymm3 \n"
|
| - "vpxor %%ymm5,%%ymm0,%%ymm0 \n"
|
| - "vmovdqu (%0,%2,1),%%ymm1 \n"
|
| - "vmovdqu (%1,%2,1),%%ymm2 \n"
|
| - "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n"
|
| - "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n"
|
| - "vpsubb %%ymm6,%%ymm4,%%ymm4 \n"
|
| - "vpsubb %%ymm6,%%ymm1,%%ymm1 \n"
|
| - "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
|
| - "vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n"
|
| - "vpaddw %%ymm7,%%ymm3,%%ymm3 \n"
|
| - "vpaddw %%ymm7,%%ymm0,%%ymm0 \n"
|
| - "vpsrlw $0x8,%%ymm3,%%ymm3 \n"
|
| - "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
|
| - "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n"
|
| - "vmovdqu %%ymm0,(%3,%2,1) \n"
|
| - "lea 0x20(%2),%2 \n"
|
| - "sub $0x20,%4 \n"
|
| - "jg 1b \n"
|
| - "vzeroupper \n"
|
| - : "+r"(src0), // %0
|
| - "+r"(src1), // %1
|
| - "+r"(alpha), // %2
|
| - "+r"(dst), // %3
|
| - "+rm"(width) // %4
|
| - :: "memory", "cc", "eax",
|
| - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
| - );
|
| +void BlendPlaneRow_AVX2(const uint8* src0,
|
| + const uint8* src1,
|
| + const uint8* alpha,
|
| + uint8* dst,
|
| + int width) {
|
| + asm volatile(
|
| + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
|
| + "vpsllw $0x8,%%ymm5,%%ymm5 \n"
|
| + "mov $0x80808080,%%eax \n"
|
| + "vmovd %%eax,%%xmm6 \n"
|
| + "vbroadcastss %%xmm6,%%ymm6 \n"
|
| + "mov $0x807f807f,%%eax \n"
|
| + "vmovd %%eax,%%xmm7 \n"
|
| + "vbroadcastss %%xmm7,%%ymm7 \n"
|
| + "sub %2,%0 \n"
|
| + "sub %2,%1 \n"
|
| + "sub %2,%3 \n"
|
| +
|
| + // 32 pixel loop.
|
| + LABELALIGN
|
| + "1: \n"
|
| + "vmovdqu (%2),%%ymm0 \n"
|
| + "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n"
|
| + "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n"
|
| + "vpxor %%ymm5,%%ymm3,%%ymm3 \n"
|
| + "vpxor %%ymm5,%%ymm0,%%ymm0 \n"
|
| + "vmovdqu (%0,%2,1),%%ymm1 \n"
|
| + "vmovdqu (%1,%2,1),%%ymm2 \n"
|
| + "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n"
|
| + "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n"
|
| + "vpsubb %%ymm6,%%ymm4,%%ymm4 \n"
|
| + "vpsubb %%ymm6,%%ymm1,%%ymm1 \n"
|
| + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
|
| + "vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n"
|
| + "vpaddw %%ymm7,%%ymm3,%%ymm3 \n"
|
| + "vpaddw %%ymm7,%%ymm0,%%ymm0 \n"
|
| + "vpsrlw $0x8,%%ymm3,%%ymm3 \n"
|
| + "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
|
| + "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n"
|
| + "vmovdqu %%ymm0,(%3,%2,1) \n"
|
| + "lea 0x20(%2),%2 \n"
|
| + "sub $0x20,%4 \n"
|
| + "jg 1b \n"
|
| + "vzeroupper \n"
|
| + : "+r"(src0), // %0
|
| + "+r"(src1), // %1
|
| + "+r"(alpha), // %2
|
| + "+r"(dst), // %3
|
| + "+rm"(width) // %4
|
| + ::"memory",
|
| + "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
|
| + "xmm7");
|
| }
|
| #endif // HAS_BLENDPLANEROW_AVX2
|
|
|
| #ifdef HAS_ARGBATTENUATEROW_SSSE3
|
| // Shuffle table duplicating alpha
|
| -static uvec8 kShuffleAlpha0 = {
|
| - 3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u
|
| -};
|
| -static uvec8 kShuffleAlpha1 = {
|
| - 11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
|
| - 15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u
|
| -};
|
| +static uvec8 kShuffleAlpha0 = {3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u,
|
| + 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u};
|
| +static uvec8 kShuffleAlpha1 = {11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
|
| + 15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u};
|
| // Attenuate 4 pixels at a time.
|
| void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
|
| asm volatile (
|
| @@ -3654,9 +3684,9 @@ void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
|
|
|
| #ifdef HAS_ARGBATTENUATEROW_AVX2
|
| // Shuffle table duplicating alpha.
|
| -static const uvec8 kShuffleAlpha_AVX2 = {
|
| - 6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
|
| -};
|
| +static const uvec8 kShuffleAlpha_AVX2 = {6u, 7u, 6u, 7u, 6u, 7u,
|
| + 128u, 128u, 14u, 15u, 14u, 15u,
|
| + 14u, 15u, 128u, 128u};
|
| // Attenuate 8 pixels at a time.
|
| void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
|
| asm volatile (
|
| @@ -3697,7 +3727,8 @@ void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
|
|
|
| #ifdef HAS_ARGBUNATTENUATEROW_SSE2
|
| // Unattenuate 4 pixels at a time.
|
| -void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
|
| +void ARGBUnattenuateRow_SSE2(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| int width) {
|
| uintptr_t alpha;
|
| asm volatile (
|
| @@ -3744,10 +3775,10 @@ void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
|
| #ifdef HAS_ARGBUNATTENUATEROW_AVX2
|
| // Shuffle table duplicating alpha.
|
| static const uvec8 kUnattenShuffleAlpha_AVX2 = {
|
| - 0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
|
| -};
|
| + 0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u};
|
| // Unattenuate 8 pixels at a time.
|
| -void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
|
| +void ARGBUnattenuateRow_AVX2(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| int width) {
|
| uintptr_t alpha;
|
| asm volatile (
|
| @@ -3862,17 +3893,14 @@ void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
|
| // g = (r * 45 + g * 88 + b * 22) >> 7
|
| // r = (r * 50 + g * 98 + b * 24) >> 7
|
| // Constant for ARGB color to sepia tone
|
| -static vec8 kARGBToSepiaB = {
|
| - 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
|
| -};
|
| +static vec8 kARGBToSepiaB = {17, 68, 35, 0, 17, 68, 35, 0,
|
| + 17, 68, 35, 0, 17, 68, 35, 0};
|
|
|
| -static vec8 kARGBToSepiaG = {
|
| - 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
|
| -};
|
| +static vec8 kARGBToSepiaG = {22, 88, 45, 0, 22, 88, 45, 0,
|
| + 22, 88, 45, 0, 22, 88, 45, 0};
|
|
|
| -static vec8 kARGBToSepiaR = {
|
| - 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
|
| -};
|
| +static vec8 kARGBToSepiaR = {24, 98, 50, 0, 24, 98, 50, 0,
|
| + 24, 98, 50, 0, 24, 98, 50, 0};
|
|
|
| // Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
|
| void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
|
| @@ -3935,8 +3963,10 @@ void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
|
| #ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
|
| // Tranform 8 ARGB pixels (32 bytes) with color matrix.
|
| // Same as Sepia except matrix is provided.
|
| -void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
| - const int8* matrix_argb, int width) {
|
| +void ARGBColorMatrixRow_SSSE3(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| + const int8* matrix_argb,
|
| + int width) {
|
| asm volatile (
|
| "movdqu " MEMACCESS(3) ",%%xmm5 \n"
|
| "pshufd $0x00,%%xmm5,%%xmm2 \n"
|
| @@ -3998,8 +4028,11 @@ void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
|
|
| #ifdef HAS_ARGBQUANTIZEROW_SSE2
|
| // Quantize 4 ARGB pixels (16 bytes).
|
| -void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
|
| - int interval_offset, int width) {
|
| +void ARGBQuantizeRow_SSE2(uint8* dst_argb,
|
| + int scale,
|
| + int interval_size,
|
| + int interval_offset,
|
| + int width) {
|
| asm volatile (
|
| "movd %2,%%xmm2 \n"
|
| "movd %3,%%xmm3 \n"
|
| @@ -4048,7 +4081,9 @@ void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
|
|
|
| #ifdef HAS_ARGBSHADEROW_SSE2
|
| // Shade 4 pixels at a time by specified value.
|
| -void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
|
| +void ARGBShadeRow_SSE2(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| + int width,
|
| uint32 value) {
|
| asm volatile (
|
| "movd %3,%%xmm2 \n"
|
| @@ -4084,8 +4119,10 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
|
|
|
| #ifdef HAS_ARGBMULTIPLYROW_SSE2
|
| // Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
|
| -void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBMultiplyRow_SSE2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| "pxor %%xmm5,%%xmm5 \n"
|
|
|
| @@ -4122,8 +4159,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_ARGBMULTIPLYROW_AVX2
|
| // Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
|
| -void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBMultiplyRow_AVX2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| "vpxor %%ymm5,%%ymm5,%%ymm5 \n"
|
|
|
| @@ -4161,8 +4200,10 @@ void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_ARGBADDROW_SSE2
|
| // Add 2 rows of ARGB pixels together, 4 pixels at a time.
|
| -void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBAddRow_SSE2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| // 4 pixel loop.
|
| LABELALIGN
|
| @@ -4189,8 +4230,10 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_ARGBADDROW_AVX2
|
| // Add 2 rows of ARGB pixels together, 4 pixels at a time.
|
| -void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBAddRow_AVX2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| // 4 pixel loop.
|
| LABELALIGN
|
| @@ -4217,8 +4260,10 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_ARGBSUBTRACTROW_SSE2
|
| // Subtract 2 rows of ARGB pixels, 4 pixels at a time.
|
| -void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBSubtractRow_SSE2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| // 4 pixel loop.
|
| LABELALIGN
|
| @@ -4245,8 +4290,10 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_ARGBSUBTRACTROW_AVX2
|
| // Subtract 2 rows of ARGB pixels, 8 pixels at a time.
|
| -void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
| - uint8* dst_argb, int width) {
|
| +void ARGBSubtractRow_AVX2(const uint8* src_argb0,
|
| + const uint8* src_argb1,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| // 4 pixel loop.
|
| LABELALIGN
|
| @@ -4276,8 +4323,11 @@ void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
|
| // -1 0 1
|
| // -2 0 2
|
| // -1 0 1
|
| -void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
|
| - const uint8* src_y2, uint8* dst_sobelx, int width) {
|
| +void SobelXRow_SSE2(const uint8* src_y0,
|
| + const uint8* src_y1,
|
| + const uint8* src_y2,
|
| + uint8* dst_sobelx,
|
| + int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| "sub %0,%2 \n"
|
| @@ -4330,8 +4380,10 @@ void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
|
| // -1 -2 -1
|
| // 0 0 0
|
| // 1 2 1
|
| -void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
|
| - uint8* dst_sobely, int width) {
|
| +void SobelYRow_SSE2(const uint8* src_y0,
|
| + const uint8* src_y1,
|
| + uint8* dst_sobely,
|
| + int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| "sub %0,%2 \n"
|
| @@ -4383,8 +4435,10 @@ void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
|
| // R = Sobel
|
| // G = Sobel
|
| // B = Sobel
|
| -void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
| - uint8* dst_argb, int width) {
|
| +void SobelRow_SSE2(const uint8* src_sobelx,
|
| + const uint8* src_sobely,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -4430,8 +4484,10 @@ void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
|
|
| #ifdef HAS_SOBELTOPLANEROW_SSE2
|
| // Adds Sobel X and Sobel Y and stores Sobel into a plane.
|
| -void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
| - uint8* dst_y, int width) {
|
| +void SobelToPlaneRow_SSE2(const uint8* src_sobelx,
|
| + const uint8* src_sobely,
|
| + uint8* dst_y,
|
| + int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -4465,8 +4521,10 @@ void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
| // R = Sobel X
|
| // G = Sobel
|
| // B = Sobel Y
|
| -void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
| - uint8* dst_argb, int width) {
|
| +void SobelXYRow_SSE2(const uint8* src_sobelx,
|
| + const uint8* src_sobely,
|
| + uint8* dst_argb,
|
| + int width) {
|
| asm volatile (
|
| "sub %0,%1 \n"
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -4512,8 +4570,10 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
|
| #ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
|
| // Creates a table of cumulative sums where each value is a sum of all values
|
| // above and to the left of the value, inclusive of the value.
|
| -void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
|
| - const int32* previous_cumsum, int width) {
|
| +void ComputeCumulativeSumRow_SSE2(const uint8* row,
|
| + int32* cumsum,
|
| + const int32* previous_cumsum,
|
| + int width) {
|
| asm volatile (
|
| "pxor %%xmm0,%%xmm0 \n"
|
| "pxor %%xmm1,%%xmm1 \n"
|
| @@ -4590,8 +4650,11 @@ void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
|
| #endif // HAS_COMPUTECUMULATIVESUMROW_SSE2
|
|
|
| #ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
|
| -void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
|
| - int width, int area, uint8* dst,
|
| +void CumulativeSumToAverageRow_SSE2(const int32* topleft,
|
| + const int32* botleft,
|
| + int width,
|
| + int area,
|
| + uint8* dst,
|
| int count) {
|
| asm volatile (
|
| "movd %5,%%xmm5 \n"
|
| @@ -4723,8 +4786,11 @@ void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
|
| #ifdef HAS_ARGBAFFINEROW_SSE2
|
| // Copy ARGB pixels from source image with slope to a row of destination.
|
| LIBYUV_API
|
| -void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
|
| - uint8* dst_argb, const float* src_dudv, int width) {
|
| +void ARGBAffineRow_SSE2(const uint8* src_argb,
|
| + int src_argb_stride,
|
| + uint8* dst_argb,
|
| + const float* src_dudv,
|
| + int width) {
|
| intptr_t src_argb_stride_temp = src_argb_stride;
|
| intptr_t temp;
|
| asm volatile (
|
| @@ -4808,8 +4874,10 @@ void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
|
|
|
| #ifdef HAS_INTERPOLATEROW_SSSE3
|
| // Bilinear filter 16x2 -> 16x1
|
| -void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
|
| - ptrdiff_t src_stride, int dst_width,
|
| +void InterpolateRow_SSSE3(uint8* dst_ptr,
|
| + const uint8* src_ptr,
|
| + ptrdiff_t src_stride,
|
| + int dst_width,
|
| int source_y_fraction) {
|
| asm volatile (
|
| "sub %1,%0 \n"
|
| @@ -4889,8 +4957,10 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
|
|
|
| #ifdef HAS_INTERPOLATEROW_AVX2
|
| // Bilinear filter 32x2 -> 32x1
|
| -void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
|
| - ptrdiff_t src_stride, int dst_width,
|
| +void InterpolateRow_AVX2(uint8* dst_ptr,
|
| + const uint8* src_ptr,
|
| + ptrdiff_t src_stride,
|
| + int dst_width,
|
| int source_y_fraction) {
|
| asm volatile (
|
| "cmp $0x0,%3 \n"
|
| @@ -4965,8 +5035,10 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
|
|
|
| #ifdef HAS_ARGBSHUFFLEROW_SSSE3
|
| // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
|
| -void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
| - const uint8* shuffler, int width) {
|
| +void ARGBShuffleRow_SSSE3(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| + const uint8* shuffler,
|
| + int width) {
|
| asm volatile (
|
| "movdqu " MEMACCESS(3) ",%%xmm5 \n"
|
| LABELALIGN
|
| @@ -4993,8 +5065,10 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
|
|
| #ifdef HAS_ARGBSHUFFLEROW_AVX2
|
| // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
|
| -void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
|
| - const uint8* shuffler, int width) {
|
| +void ARGBShuffleRow_AVX2(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| + const uint8* shuffler,
|
| + int width) {
|
| asm volatile (
|
| "vbroadcastf128 " MEMACCESS(3) ",%%ymm5 \n"
|
| LABELALIGN
|
| @@ -5022,8 +5096,10 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
|
|
|
| #ifdef HAS_ARGBSHUFFLEROW_SSE2
|
| // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
|
| -void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
|
| - const uint8* shuffler, int width) {
|
| +void ARGBShuffleRow_SSE2(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| + const uint8* shuffler,
|
| + int width) {
|
| uintptr_t pixel_temp;
|
| asm volatile (
|
| "pxor %%xmm5,%%xmm5 \n"
|
| @@ -5144,8 +5220,9 @@ void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
|
| void I422ToYUY2Row_SSE2(const uint8* src_y,
|
| const uint8* src_u,
|
| const uint8* src_v,
|
| - uint8* dst_frame, int width) {
|
| - asm volatile (
|
| + uint8* dst_frame,
|
| + int width) {
|
| + asm volatile (
|
| "sub %1,%2 \n"
|
| LABELALIGN
|
| "1: \n"
|
| @@ -5179,8 +5256,9 @@ void I422ToYUY2Row_SSE2(const uint8* src_y,
|
| void I422ToUYVYRow_SSE2(const uint8* src_y,
|
| const uint8* src_u,
|
| const uint8* src_v,
|
| - uint8* dst_frame, int width) {
|
| - asm volatile (
|
| + uint8* dst_frame,
|
| + int width) {
|
| + asm volatile (
|
| "sub %1,%2 \n"
|
| LABELALIGN
|
| "1: \n"
|
| @@ -5212,7 +5290,8 @@ void I422ToUYVYRow_SSE2(const uint8* src_y,
|
|
|
| #ifdef HAS_ARGBPOLYNOMIALROW_SSE2
|
| void ARGBPolynomialRow_SSE2(const uint8* src_argb,
|
| - uint8* dst_argb, const float* poly,
|
| + uint8* dst_argb,
|
| + const float* poly,
|
| int width) {
|
| asm volatile (
|
| "pxor %%xmm3,%%xmm3 \n"
|
| @@ -5268,7 +5347,8 @@ void ARGBPolynomialRow_SSE2(const uint8* src_argb,
|
|
|
| #ifdef HAS_ARGBPOLYNOMIALROW_AVX2
|
| void ARGBPolynomialRow_AVX2(const uint8* src_argb,
|
| - uint8* dst_argb, const float* poly,
|
| + uint8* dst_argb,
|
| + const float* poly,
|
| int width) {
|
| asm volatile (
|
| "vbroadcastf128 " MEMACCESS(3) ",%%ymm4 \n"
|
| @@ -5444,7 +5524,8 @@ void HalfFloat1Row_F16C(const uint16* src, uint16* dst, float, int width) {
|
|
|
| #ifdef HAS_ARGBCOLORTABLEROW_X86
|
| // Tranform ARGB pixels with color table.
|
| -void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
|
| +void ARGBColorTableRow_X86(uint8* dst_argb,
|
| + const uint8* table_argb,
|
| int width) {
|
| uintptr_t pixel_temp;
|
| asm volatile (
|
| @@ -5504,9 +5585,11 @@ void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
|
|
|
| #ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
|
| // Tranform RGB pixels with luma table.
|
| -void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
| +void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb,
|
| + uint8* dst_argb,
|
| int width,
|
| - const uint8* luma, uint32 lumacoeff) {
|
| + const uint8* luma,
|
| + uint32 lumacoeff) {
|
| uintptr_t pixel_temp;
|
| uintptr_t table_temp;
|
| asm volatile (
|
| @@ -5604,8 +5687,6 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
|
|
|
| #endif // defined(__x86_64__) || defined(__i386__)
|
|
|
| -// clang-format on
|
| -
|
| #ifdef __cplusplus
|
| } // extern "C"
|
| } // namespace libyuv
|
|
|