Chromium Code Reviews| Index: src/opts/SkBlitRow_opts_SSE4_asm.S |
| diff --git a/src/opts/SkBlitRow_opts_SSE4_asm.S b/src/opts/SkBlitRow_opts_SSE4_asm.S |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..5e034c089a746a5bf5d9e4b0e3996cb6ebfa0b17 |
| --- /dev/null |
| +++ b/src/opts/SkBlitRow_opts_SSE4_asm.S |
| @@ -0,0 +1,589 @@ |
| +/* |
| + * Copyright 2013 The Android Open Source Project |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#if !defined(_MSC_VER) |
| + |
| +#define CFI_PUSH(REG) \ |
| + .cfi_adjust_cfa_offset 4; \ |
| + .cfi_rel_offset REG, 0 |
| + |
| +#define CFI_POP(REG) \ |
| + .cfi_adjust_cfa_offset -4;\ |
| + .cfi_restore REG |
| + |
| +#define PUSH(REG) pushl REG; CFI_PUSH (REG) |
| +#define POP(REG) popl REG; CFI_POP (REG) |
| + |
| +/* |
| + * void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst, |
| + * const SkPMColor* SK_RESTRICT src, |
| + * int count, U8CPU alpha) |
| + * |
| + * The primary optimization comes from checking the source pixels' alpha value. |
| + * If the alpha is zero, the pixel can be skipped entirely. |
| + * If the alpha is fully opaque, the pixel can be copied directly to the destination. |
| + * According to collected statistics, these two cases are the most common. |
| + * The main loop(s) uses pre-loading and unrolling in an attempt to reduce the |
| + * memory latency worse-case. |
| + */ |
| + |
| + .section .text.sse4,"ax",@progbits |
| + .type S32A_Opaque_BlitRow32_SSE4_asm, @function |
| + .globl S32A_Opaque_BlitRow32_SSE4_asm |
| + |
| + .p2align 4 |
| +S32A_Opaque_BlitRow32_SSE4_asm: |
| + .cfi_startproc |
| + movl 8(%esp), %eax // Source pointer |
| + movl 12(%esp), %ecx // Pixel count |
| + movl 4(%esp), %edx // Destination pointer |
| + prefetcht0 (%eax) |
| + |
| + // Setup SSE constants |
| + pcmpeqd %xmm7, %xmm7 // 0xFF000000 mask to check alpha |
| + pcmpeqw %xmm6, %xmm6 // 16-bit 256 to calculate inv. alpha |
|
mtklein
2014/05/16 18:06:38
Does the interlaced instruction scheduling here re
henrik.smiding
2014/05/20 15:10:29
On a Haswell core, probably not. On a Silvermont/A
|
| + pslld $24, %xmm7 |
| + pcmpeqw %xmm0, %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb) |
| + psrlw $15, %xmm6 |
| + psrlw $8, %xmm0 |
| + subl $4, %ecx // Check if we have only 0-3 pixels |
| + psllw $8, %xmm6 |
| + js .LReallySmall |
| + PUSH(%edi) |
| + cmpl $11, %ecx // Do we have enough pixels to run the main loop? |
| + ja .LBigBlit |
| + |
| + // Handle small blits (4-15 pixels) |
| + // ******************************** |
| + xorl %edi, %edi // Reset offset to zero |
| + |
| +.LSmallLoop: |
| + lddqu (%eax, %edi), %xmm1 // Load four source pixels |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
|
mtklein
2014/05/16 18:06:38
Is this the sort of place intrinsics fail us? I g
henrik.smiding
2014/05/20 15:10:29
That's correct. It was all about not making the wo
|
| + ja .LSmallAlphaNotOpaqueOrZero |
| + jz .LSmallAlphaZero |
| + movdqu %xmm1, (%edx, %edi) // Store four destination pixels |
| +.LSmallAlphaZero: |
| + addl $16, %edi |
| + subl $4, %ecx // Check if there are four additional pixels, at least |
| + jns .LSmallLoop |
| + jmp .LSmallRemaining |
| + |
| + // Handle mixed alphas (calculate and scale) |
| + .p2align 4 |
| +.LSmallAlphaNotOpaqueOrZero: |
| + lddqu (%edx, %edi), %xmm5 // Load four destination pixels |
| + |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + addl $16, %edi |
| + subl $4, %ecx // Check if we can store all four pixels |
| + pblendvb %xmm0, %xmm5, %xmm3 |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + movdqu %xmm1, -16(%edx, %edi) // Store four destination pixels |
| + jns .LSmallLoop |
| + |
| + // Handle the last 0-3 pixels (also used by the big unaligned loop) |
| +.LSmallRemaining: |
| + cmpl $-4, %ecx // Check if we are done |
| + je .LSmallExit |
| + sall $2, %ecx // Calculate offset for last pixels |
| + addl %ecx, %edi |
| + |
| + lddqu (%eax, %edi), %xmm1 // Load last four source pixels (overlapping) |
|
mtklein
2014/05/16 18:06:38
I was expecting we'd fall back on non-SIMD or do s
henrik.smiding
2014/05/20 15:10:29
I've improved the comments a bit.
|
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + jc .LSmallRemainingStoreAll// If all alphas are opaque, just store |
| + jz .LSmallExit |
| + |
| + // Handle mixed alphas (calculate and scale) |
|
mtklein
2014/05/16 18:06:38
Can we share or macro away this big blend block?
henrik.smiding
2014/05/20 15:10:29
Done. I replaced about 200 lines of code with macr
|
| + lddqu (%edx, %edi), %xmm5 // Load last four destination pixels (overlapping) |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm3 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm3 // Scale red and blue |
| + movdqa %xmm5, %xmm2 |
| + psrlw $8, %xmm2 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm2 // Scale alpha and green |
| + |
| + cmpl $-8, %ecx // Check how many pixels should be written |
| + pblendvb %xmm0, %xmm3, %xmm2 // Combine results |
| + paddb %xmm2, %xmm1 // Add source and destination pixels together |
| + jb .LSmallPixelsLeft1 |
| + ja .LSmallPixelsLeft3 |
| + pblendw $0xF0, %xmm1, %xmm5 |
| + movdqu %xmm5, (%edx, %edi) // Store last two destination pixels |
| +.LSmallExit: |
| + POP(%edi) |
| + ret |
| + |
| +.LSmallPixelsLeft1: |
| + pblendw $0xC0, %xmm1, %xmm5 |
| + movdqu %xmm5, (%edx, %edi) // Store last destination pixel |
| + POP(%edi) |
| + ret |
| + |
| +.LSmallPixelsLeft3: |
| + pblendw $0xFC, %xmm1, %xmm5 |
| + movdqu %xmm5, (%edx, %edi) // Store last three destination pixels |
| + POP(%edi) |
| + ret |
| + |
| +.LSmallRemainingStoreAll: |
| + movdqu %xmm1, (%edx, %edi) // Store last destination pixels (overwrite) |
| + POP(%edi) |
| + ret |
| + |
| + // Handle really small blits (0-3 pixels) |
| + // ************************************** |
| +.LReallySmall: |
| + addl $4, %ecx |
| + jle .LReallySmallExit |
| + pcmpeqd %xmm1, %xmm1 |
| + cmp $2, %ecx // Check how many pixels should be read |
| + pinsrd $0x0, (%eax), %xmm1 // Load one source pixel |
| + pinsrd $0x0, (%edx), %xmm5 // Load one destination pixel |
| + jb .LReallySmallCalc |
| + pinsrd $0x1, 4(%eax), %xmm1 // Load second source pixel |
| + pinsrd $0x1, 4(%edx), %xmm5 // Load second destination pixel |
| + je .LReallySmallCalc |
| + pinsrd $0x2, 8(%eax), %xmm1 // Load third source pixel |
| + pinsrd $0x2, 8(%edx), %xmm5 // Load third destination pixel |
| + |
| +.LReallySmallCalc: |
| + ptest %xmm7, %xmm1 // Check if all alphas are opaque |
| + jc .LReallySmallStore // If all alphas are opaque, just store |
| + |
| + // Handle mixed alphas (calculate and scale) |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + pand %xmm0, %xmm5 // Filter out red and blue components |
| + pmullw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + psrlw $8, %xmm5 // Combine results |
| + pblendvb %xmm0, %xmm5, %xmm3 |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + |
| +.LReallySmallStore: |
| + cmp $2, %ecx // Check how many pixels should be written |
| + pextrd $0x0, %xmm1, (%edx) // Store one destination pixel |
| + jb .LReallySmallExit |
| + pextrd $0x1, %xmm1, 4(%edx) // Store second destination pixel |
| + je .LReallySmallExit |
| + pextrd $0x2, %xmm1, 8(%edx) // Store third destination pixel |
| +.LReallySmallExit: |
| + ret |
| + |
| + // Handle bigger blit operations (16+ pixels) |
| + // ****************************************** |
| + .p2align 4 |
| +.LBigBlit: |
| + // Align destination? |
| + testl $0xF, %edx |
| + lddqu (%eax), %xmm1 // Pre-load four source pixels |
| + jz .LAligned |
| + |
| + movl %edx, %edi // Calculate alignment of destination pointer |
| + negl %edi |
| + andl $0xF, %edi |
| + |
| + // Handle 1-3 pixels to align destination |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
|
mtklein
2014/05/16 18:06:38
Do you think we're benefitting by having everythin
henrik.smiding
2014/05/20 15:10:29
That would kill performance of short blits, like 1
|
| + jz .LAlignDone // If all alphas are opaque, just skip |
| + lddqu (%edx), %xmm5 // Load four destination pixels |
| + jc .LAlignStore // If all alphas are opaque, just store |
| + |
| + // Handle mixed alphas (calculate and scale) |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm3 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm3 // Scale red and blue |
| + movdqa %xmm5, %xmm2 |
| + psrlw $8, %xmm2 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm2 // Scale alpha and green |
| + |
| + pblendvb %xmm0, %xmm3, %xmm2 // Combine results |
| + paddb %xmm2, %xmm1 // Add source and destination pixels together |
| + |
| +.LAlignStore: |
| + cmp $8, %edi // Check how many pixels should be written |
| + jb .LAlignPixelsLeft1 |
| + ja .LAlignPixelsLeft3 |
| + pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels |
| + jmp .LAlignStorePixels |
| + |
| +.LAlignPixelsLeft1: |
| + pblendw $0x03, %xmm1, %xmm5 // Blend one pixel |
| + jmp .LAlignStorePixels |
| + |
| +.LAlignPixelsLeft3: |
| + pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels |
| + |
| +.LAlignStorePixels: |
| + movdqu %xmm5, (%edx) // Store destination pixels |
| + |
| +.LAlignDone: |
| + addl %edi, %eax // Adjust pointers and pixel count |
| + addl %edi, %edx |
| + shrl $2, %edi |
| + lddqu (%eax), %xmm1 // Pre-load new source pixels (after alignment) |
| + subl %edi, %ecx |
| + |
| +.LAligned: // Destination is guaranteed to be 16 byte aligned |
| + xorl %edi, %edi // Reset offset to zero |
| + subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup) |
| + testl $0xF, %eax // Check alignment of source pointer |
| + jz .LAlignedLoop |
| + |
| + // Source not aligned to destination |
| + // ********************************* |
| + .p2align 4 |
| +.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero00 |
| + lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| + jz .LAlphaZero00 |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + |
| +.LAlphaZero00: |
| + ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero01 |
| + lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jz .LAlphaZero01 |
| + movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| + |
| +.LAlphaZero01: |
| + addl $32, %edi // Adjust offset and pixel count |
| + subl $8, %ecx |
| + jae .LUnalignedLoop |
| + addl $8, %ecx // Adjust pixel count |
| + jmp .LLoopCleanup0 |
| + |
| + .p2align 4 |
| +.LAlphaNotOpaqueOrZero00: |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + |
| + // Handle next four pixels |
| + ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero01 |
| + lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jz .LAlphaZero02 |
| + movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| +.LAlphaZero02: |
| + addl $32, %edi // Adjust offset and pixel count |
| + subl $8, %ecx |
| + jae .LUnalignedLoop |
| + addl $8, %ecx // Adjust pixel count |
| + jmp .LLoopCleanup0 |
| + |
| + .p2align 4 |
| +.LAlphaNotOpaqueOrZero01: |
| + movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels |
| + |
| + movdqa %xmm2, %xmm1 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm1 // Discard red and blue |
| + pshufhw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm1, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + addl $32, %edi |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm2 // Add source and destination pixels together |
| + subl $8, %ecx |
| + movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels |
| + jae .LUnalignedLoop |
| + addl $8, %ecx // Adjust pixel count |
| + |
| + // Cleanup - handle pending pixels from loop |
| +.LLoopCleanup0: |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero02 |
| + jz .LAlphaZero03 |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| +.LAlphaZero03: |
| + addl $16, %edi |
| + subl $4, %ecx |
| + js .LSmallRemaining // Reuse code from small loop |
| + lddqu (%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jmp .LLoopCleanup0 |
| + |
| +.LAlphaNotOpaqueOrZero02: |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + addl $16, %edi |
| + subl $4, %ecx |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels |
| + js .LSmallRemaining // Reuse code from small loop |
| + lddqu (%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jmp .LLoopCleanup0 |
| + |
| + // Source aligned to destination |
| + // ***************************** |
| + .p2align 4 |
| +.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero10 |
| + movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| + jz .LAlphaZero10 |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + |
| +.LAlphaZero10: |
| + ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero11 |
| + movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jz .LAlphaZero11 |
| + movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| + |
| +.LAlphaZero11: |
| + addl $32, %edi // Adjust offset and pixel count |
| + subl $8, %ecx |
| + jae .LAlignedLoop |
| + jmp .LLoopCleanup1 |
| + |
| + .p2align 4 |
| +.LAlphaNotOpaqueOrZero10: |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + |
| + // Handle next four pixels |
| + ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero11 |
| + movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + jz .LAlphaZero12 |
| + movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| +.LAlphaZero12: |
| + addl $32, %edi // Adjust offset and pixel count |
| + subl $8, %ecx |
| + jae .LAlignedLoop |
| + jmp .LLoopCleanup1 |
| + |
| + .p2align 4 |
| +.LAlphaNotOpaqueOrZero11: |
| + movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels |
| + |
| + movdqa %xmm2, %xmm1 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm1 // Discard red and blue |
| + pshufhw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm1, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| + |
| + addl $32, %edi |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm2 // Add source and destination pixels together |
| + subl $8, %ecx |
| + movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels |
| + jae .LAlignedLoop |
| + |
| + // Cleanup - handle four pending pixels from loop |
| +.LLoopCleanup1: |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + ja .LAlphaNotOpaqueOrZero12 |
| + jz .LAlphaZero13 |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| +.LAlphaZero13: |
| + addl $8, %ecx // Adjust offset and pixel count |
| + jz .LExit |
| + addl $16, %edi |
| + jmp .LRemainLoop1 |
| + |
| +.LAlphaNotOpaqueOrZero12: |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + addl $8, %ecx // Adjust offset and pixel count |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + jz .LExit |
| + addl $16, %edi |
| + |
| + // Handle last 1-7 pixels |
| +.LRemainLoop1: |
| + movdqa (%eax, %edi), %xmm1 // Load four source pixels |
| + ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque |
| + ja .LRemainAlphaNotOpaqueOrZero1 |
| + jz .LRemainAlphaZero1 |
| + |
| + // All alphas were opaque (copy) |
| + subl $4, %ecx // Check if we have more than four pixels left |
| + jle .LRemainStore |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + addl $16, %edi |
| + jmp .LRemainLoop1 |
| + |
| + // All alphas were zero (skip) |
| + .p2align 4 |
| +.LRemainAlphaZero1: |
| + subl $4, %ecx // Check if we have more than four pixels left |
| + jle .LExit |
| + addl $16, %edi |
| + jmp .LRemainLoop1 |
| + |
| + // Handle mixed alphas (calculate and scale) |
| + .p2align 4 |
| +.LRemainAlphaNotOpaqueOrZero1: |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + |
| + movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha |
| + psrlw $8, %xmm2 // Discard red and blue |
| + pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high) |
| + movdqa %xmm6, %xmm4 |
| + pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low) |
| + movdqa %xmm5, %xmm3 |
| + psubw %xmm2, %xmm4 // Finalize alpha calculations |
| + |
| + psllw $8, %xmm5 // Filter out red and blue components |
| + pmulhuw %xmm4, %xmm5 // Scale red and blue |
| + psrlw $8, %xmm3 // Filter out alpha and green components |
| + pmullw %xmm4, %xmm3 // Scale alpha and green |
| + |
| + subl $4, %ecx |
| + pblendvb %xmm0, %xmm5, %xmm3 // Combine results |
| + paddb %xmm3, %xmm1 // Add source and destination pixels together |
| + jle .LRemainStore |
| + movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| + addl $16, %edi |
| + jmp .LRemainLoop1 |
| + |
| + // Store the last 1-4 pixels |
| + .p2align 4 |
| +.LRemainStore: |
| + jz .LRemainFull |
| + movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| + cmp $-2, %ecx // Check how many pixels should be written |
| + jb .LRemainPixelsLeft11 |
| + ja .LRemainPixelsLeft13 |
| + pblendw $0x0F, %xmm1, %xmm5 |
| + movdqa %xmm5, (%edx, %edi) // Store last 2 destination pixels |
| +.LExit: |
| + POP(%edi) // Exit |
| + ret |
| + |
| +.LRemainPixelsLeft11: |
| + pblendw $0x03, %xmm1, %xmm5 |
| + movdqa %xmm5, (%edx, %edi) // Store last destination pixel |
| + POP(%edi) // Exit |
| + ret |
| + |
| +.LRemainPixelsLeft13: |
| + pblendw $0x3F, %xmm1, %xmm5 |
| + movdqa %xmm5, (%edx, %edi) // Store last 3 destination pixels |
| + POP(%edi) // Exit |
| + ret |
| + |
| +.LRemainFull: |
| + movdqa %xmm1, (%edx, %edi) // Store last 4 destination pixels |
| + POP(%edi) // Exit |
| + ret |
| + |
| + .cfi_endproc |
| + .size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm |
| +#endif |