Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(525)

Unified Diff: src/opts/SkBlitRow_opts_SSE4_x64_asm.S

Issue 289473009: Add SSE4 optimization of S32A_Opaque_Blitrow (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/opts/SkBlitRow_opts_SSE4_x64_asm.S
diff --git a/src/opts/SkBlitRow_opts_SSE4_x64_asm.S b/src/opts/SkBlitRow_opts_SSE4_x64_asm.S
new file mode 100644
index 0000000000000000000000000000000000000000..f5068016b154c9df90cbaac3cce77979224778ee
--- /dev/null
+++ b/src/opts/SkBlitRow_opts_SSE4_x64_asm.S
@@ -0,0 +1,578 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if !defined(_MSC_VER)
+
+/*
+ * void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
+ * const SkPMColor* SK_RESTRICT src,
+ * int count, U8CPU alpha)
+ *
+ * The primary optimization comes from checking the source pixels' alpha value.
+ * If the alpha is zero, the pixel can be skipped entirely.
+ * If the alpha is fully opaque, the pixel can be copied directly to the destination.
+ * According to collected statistics, these two cases are the most common.
+ * The main loop(s) uses pre-loading and unrolling in an attempt to reduce the
+ * memory latency worse-case.
+ */
+
+ .section .text.sse4,"ax",@progbits
+ .type S32A_Opaque_BlitRow32_SSE4_asm, @function
+ .globl S32A_Opaque_BlitRow32_SSE4_asm
+
+ .p2align 4
+S32A_Opaque_BlitRow32_SSE4_asm:
+ .cfi_startproc
+ prefetcht0 (%rsi)
+ movl %edx, %ecx // Pixel count
+ movq %rdi, %rdx // Destination pointer
+ movq %rsi, %rax // Source pointer
+
+ // Setup SSE constants
+ movdqa .LAlphaCheckMask(%rip), %xmm7 // 0xFF000000 mask to check alpha
+ movdqa .LInverseAlphaCalc(%rip), %xmm6// 16-bit 256 to calculate inv. alpha
+ movdqa .LResultMergeMask(%rip), %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb)
+
+ subl $4, %ecx // Check if we have only 0-3 pixels
+ js .LReallySmall
+ cmpl $11, %ecx // Do we have enough pixels to run the main loop?
+ ja .LBigBlit
+
+ // Handle small blits (4-15 pixels)
+ // ********************************
+ xorq %rdi, %rdi // Reset offset to zero
+
+.LSmallLoop:
+ lddqu (%rax, %rdi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LSmallAlphaNotOpaqueOrZero
+ jz .LSmallAlphaZero
+ movdqu %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LSmallAlphaZero:
+ addq $16, %rdi
+ subl $4, %ecx // Check if there are four additional pixels, at least
+ jns .LSmallLoop
+ jmp .LSmallRemaining
+
+ // Handle mixed alphas (calculate and scale)
+ .p2align 4
+.LSmallAlphaNotOpaqueOrZero:
+ lddqu (%rdx, %rdi), %xmm5 // Load four destination pixels
+
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ addq $16, %rdi
+ subl $4, %ecx // Check if we can store all four pixels
+ pblendvb %xmm0, %xmm5, %xmm3
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqu %xmm1, -16(%rdx, %rdi) // Store four destination pixels
+ jns .LSmallLoop
+
+ // Handle the last 0-3 pixels (also used by the big unaligned loop)
+.LSmallRemaining:
+ cmpl $-4, %ecx // Check if we are done
+ je .LSmallExit
+ sall $2, %ecx // Calculate offset for last pixels
+ movslq %ecx, %rcx
+ addq %rcx, %rdi
+
+ lddqu (%rax, %rdi), %xmm1 // Load last four source pixels (overlapping)
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jc .LSmallRemainingStoreAll// If all alphas are opaque, just store
+ jz .LSmallExit
+
+ // Handle mixed alphas (calculate and scale)
+ lddqu (%rdx, %rdi), %xmm5 // Load last four destination pixels (overlapping)
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ cmpl $-8, %ecx // Check how many pixels should be written
+ pblendvb %xmm0, %xmm3, %xmm2 // Combine results
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+ jb .LSmallPixelsLeft1
+ ja .LSmallPixelsLeft3
+ pblendw $0xF0, %xmm1, %xmm5
+ movdqu %xmm5, (%rdx, %rdi) // Store last two destination pixels
+.LSmallExit:
+ ret
+
+.LSmallPixelsLeft1:
+ pblendw $0xC0, %xmm1, %xmm5
+ movdqu %xmm5, (%rdx, %rdi) // Store last destination pixel
+ ret
+
+.LSmallPixelsLeft3:
+ pblendw $0xFC, %xmm1, %xmm5
+ movdqu %xmm5, (%rdx, %rdi) // Store last three destination pixels
+ ret
+
+.LSmallRemainingStoreAll:
+ movdqu %xmm1, (%rdx, %rdi) // Store last destination pixels (overwrite)
+ ret
+
+ // Handle really small blits (0-3 pixels)
+ // **************************************
+.LReallySmall:
+ addl $4, %ecx
+ jle .LReallySmallExit
+ pcmpeqd %xmm1, %xmm1
+ cmpl $2, %ecx // Check how many pixels should be read
+ pinsrd $0x0, (%rax), %xmm1 // Load one source pixel
+ pinsrd $0x0, (%rdx), %xmm5 // Load one destination pixel
+ jb .LReallySmallCalc
+ pinsrd $0x1, 4(%rax), %xmm1 // Load second source pixel
+ pinsrd $0x1, 4(%rdx), %xmm5 // Load second destination pixel
+ je .LReallySmallCalc
+ pinsrd $0x2, 8(%rax), %xmm1 // Load third source pixel
+ pinsrd $0x2, 8(%rdx), %xmm5 // Load third destination pixel
+
+.LReallySmallCalc:
+ ptest %xmm7, %xmm1 // Check if all alphas are opaque
+ jc .LReallySmallStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue, leaving alpha and green
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ pand %xmm0, %xmm5 // Filter out red and blue components
+ pmullw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ psrlw $8, %xmm5 // Combine results
+ pblendvb %xmm0, %xmm5, %xmm3
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+
+.LReallySmallStore:
+ cmpl $2, %ecx // Check how many pixels should be written
+ pextrd $0x0, %xmm1, (%rdx) // Store one destination pixel
+ jb .LReallySmallExit
+ pextrd $0x1, %xmm1, 4(%rdx) // Store second destination pixel
+ je .LReallySmallExit
+ pextrd $0x2, %xmm1, 8(%rdx) // Store third destination pixel
+.LReallySmallExit:
+ ret
+
+ // Handle bigger blit operations (16+ pixels)
+ // ******************************************
+ .p2align 4
+.LBigBlit:
+ // Align destination?
+ testl $0xF, %edx
+ lddqu (%rax), %xmm1 // Pre-load four source pixels
+ jz .LAligned
+
+ movq %rdx, %rdi // Calculate alignment of destination pointer
+ negq %rdi
+ andl $0xF, %edi
+
+ // Handle 1-3 pixels to align destination
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jz .LAlignDone // If all alphas are opaque, just skip
+ lddqu (%rdx), %xmm5 // Load four destination pixels
+ jc .LAlignStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ pblendvb %xmm0, %xmm3, %xmm2 // Combine results
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+
+.LAlignStore:
+ cmpl $8, %edi // Check how many pixels should be written
+ jb .LAlignPixelsLeft1
+ ja .LAlignPixelsLeft3
+ pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft1:
+ pblendw $0x03, %xmm1, %xmm5 // Blend one pixel
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft3:
+ pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels
+
+.LAlignStorePixels:
+ movdqu %xmm5, (%rdx) // Store destination pixels
+
+.LAlignDone:
+ addq %rdi, %rax // Adjust pointers and pixel count
+ addq %rdi, %rdx
+ shrq $2, %rdi
+ lddqu (%rax), %xmm1 // Pre-load new source pixels (after alignment)
+ subl %edi, %ecx
+
+.LAligned: // Destination is guaranteed to be 16 byte aligned
+ xorq %rdi, %rdi // Reset offset to zero
+ subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup)
+ testl $0xF, %eax // Check alignment of source pointer
+ jz .LAlignedLoop
+
+ // Source not aligned to destination
+ // *********************************
+ .p2align 4
+.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero00
+ lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero00
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero00:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero01
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero01:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero00:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero02
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+.LAlphaZero02:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero01:
+ movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
+
+ movdqa %xmm2, %xmm1 // Clone source pixels to extract alpha
+ psrlw $8, %xmm1 // Discard red and blue
+ pshufhw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm1, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ addq $32, %rdi
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+
+ // Cleanup - handle pending pixels from loop
+.LLoopCleanup0:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero02
+ jz .LAlphaZero03
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero03:
+ addq $16, %rdi
+ subl $4, %ecx
+ js .LSmallRemaining // Reuse code from small loop
+ lddqu (%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jmp .LLoopCleanup0
+
+.LAlphaNotOpaqueOrZero02:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ addq $16, %rdi
+ subl $4, %ecx
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, -16(%rdx, %rdi) // Store four destination pixels
+ js .LSmallRemaining // Reuse code from small loop
+ lddqu (%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jmp .LLoopCleanup0
+
+ // Source aligned to destination
+ // *****************************
+ .p2align 4
+.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero10
+ movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero10
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero10:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero11
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero11:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero10:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero12
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+.LAlphaZero12:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero11:
+ movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
+
+ movdqa %xmm2, %xmm1 // Clone source pixels to extract alpha
+ psrlw $8, %xmm1 // Discard red and blue
+ pshufhw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm1, %xmm1 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm1, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+
+ addq $32, %rdi
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
+ jae .LAlignedLoop
+
+ // Cleanup - handle four pending pixels from loop
+.LLoopCleanup1:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero12
+ jz .LAlphaZero13
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero13:
+ addl $8, %ecx // Adjust offset and pixel count
+ jz .LExit
+ addq $16, %rdi
+ jmp .LRemainLoop1
+
+.LAlphaNotOpaqueOrZero12:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ addl $8, %ecx // Adjust offset and pixel count
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+ jz .LExit
+ addq $16, %rdi
+
+ // Handle last 1-7 pixels
+.LRemainLoop1:
+ movdqa (%rax, %rdi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LRemainAlphaNotOpaqueOrZero1
+ jz .LRemainAlphaZero1
+
+ // All alphas were opaque (copy)
+ subl $4, %ecx // Check if we have more than four pixels left
+ jle .LRemainStore
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+ addq $16, %rdi
+ jmp .LRemainLoop1
+
+ // All alphas were zero (skip)
+ .p2align 4
+.LRemainAlphaZero1:
+ subl $4, %ecx // Check if we have more than four pixels left
+ jle .LExit
+ addq $16, %rdi
+ jmp .LRemainLoop1
+
+ // Handle mixed alphas (calculate and scale)
+ .p2align 4
+.LRemainAlphaNotOpaqueOrZero1:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+
+ movdqa %xmm1, %xmm2 // Clone source pixels to extract alpha
+ psrlw $8, %xmm2 // Discard red and blue
+ pshufhw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (high)
+ movdqa %xmm6, %xmm4
+ pshuflw $0xF5, %xmm2, %xmm2 // Repeat alpha for scaling (low)
+ movdqa %xmm5, %xmm3
+ psubw %xmm2, %xmm4 // Finalize alpha calculations
+
+ psllw $8, %xmm5 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ subl $4, %ecx
+ pblendvb %xmm0, %xmm5, %xmm3 // Combine results
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ jle .LRemainStore
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+ addq $16, %rdi
+ jmp .LRemainLoop1
+
+ // Store the last 1-4 pixels
+ .p2align 4
+.LRemainStore:
+ jz .LRemainFull
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ cmpl $-2, %ecx // Check how many pixels should be written
+ jb .LRemainPixelsLeft11
+ ja .LRemainPixelsLeft13
+ pblendw $0x0F, %xmm1, %xmm5
+ movdqa %xmm5, (%rdx, %rdi) // Store last 2 destination pixels
+.LExit:
+ ret
+
+.LRemainPixelsLeft11:
+ pblendw $0x03, %xmm1, %xmm5
+ movdqa %xmm5, (%rdx, %rdi) // Store last destination pixel
+ ret
+
+.LRemainPixelsLeft13:
+ pblendw $0x3F, %xmm1, %xmm5
+ movdqa %xmm5, (%rdx, %rdi) // Store last 3 destination pixels
+ ret
+
+.LRemainFull:
+ movdqa %xmm1, (%rdx, %rdi) // Store last 4 destination pixels
+ ret
+
+ .cfi_endproc
+ .size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm
+
+ // Constants for SSE code
+ .pushsection .rodata.sse4,"a",@progbits
+ .p2align 4
+.LAlphaCheckMask:
mtklein 2014/05/16 18:06:38 Looks like the differences here are: 1) calling
henrik.smiding 2014/05/20 15:10:29 I tested doing a position independent version in 3
+ .long 0xFF000000, 0xFF000000, 0xFF000000, 0xFF000000
+.LInverseAlphaCalc:
+ .word 256, 256, 256, 256, 256, 256, 256, 256
+.LResultMergeMask:
+ .long 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF
+ .popsection
+#endif

Powered by Google App Engine
This is Rietveld 408576698