OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2014 The Android Open Source Project |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC)) |
| 9 |
| 10 #define CFI_PUSH(REG) \ |
| 11 .cfi_adjust_cfa_offset 4; \ |
| 12 .cfi_rel_offset REG, 0 |
| 13 |
| 14 #define CFI_POP(REG) \ |
| 15 .cfi_adjust_cfa_offset -4; \ |
| 16 .cfi_restore REG |
| 17 |
| 18 #define PUSH(REG) pushl REG; CFI_PUSH (REG) |
| 19 #define POP(REG) popl REG; CFI_POP (REG) |
| 20 #define RETURN POP(%edi); ret |
| 21 |
| 22 #define EXTRACT_ALPHA(var1, var2) \ |
| 23 movdqa %var1, %var2; /* Clone source pixels to extract alpha
*/\ |
| 24 psrlw $8, %var2; /* Discard red and blue, leaving alpha a
nd green */\ |
| 25 pshufhw $0xF5, %var2, %var2; /* Repeat alpha for scaling (high) */\ |
| 26 movdqa %xmm6, %xmm4; \ |
| 27 pshuflw $0xF5, %var2, %var2; /* Repeat alpha for scaling (low) */\ |
| 28 movdqa %xmm5, %xmm3; \ |
| 29 psubw %var2, %xmm4 /* Finalize alpha calculations */ |
| 30 |
| 31 #define SCALE_PIXELS \ |
| 32 psllw $8, %xmm5; /* Filter out red and blue components */
\ |
| 33 pmulhuw %xmm4, %xmm5; /* Scale red and blue */\ |
| 34 psrlw $8, %xmm3; /* Filter out alpha and green components
*/\ |
| 35 pmullw %xmm4, %xmm3 /* Scale alpha and green */ |
| 36 |
| 37 |
| 38 /* |
| 39 * void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst, |
| 40 * const SkPMColor* SK_RESTRICT src, |
| 41 * int count, U8CPU alpha) |
| 42 * |
| 43 * This function is divided into six blocks: initialization, blit 4-15 pixels, |
| 44 * blit 0-3 pixels, align destination for 16+ pixel blits, |
| 45 * blit 16+ pixels with source unaligned, blit 16+ pixels with source aligned. |
| 46 * There are some code reuse between the blocks. |
| 47 * |
| 48 * The primary optimization comes from checking the source pixels' alpha value. |
| 49 * If the alpha is zero, the pixel can be skipped entirely. |
| 50 * If the alpha is fully opaque, the pixel can be copied directly to the destina
tion. |
| 51 * According to collected statistics, these two cases are the most common. |
| 52 * The main loop(s) uses pre-loading and unrolling in an attempt to reduce the |
| 53 * memory latency worse-case. |
| 54 */ |
| 55 |
| 56 #ifdef __clang__ |
| 57 .text |
| 58 #else |
| 59 .section .text.sse4.2,"ax",@progbits |
| 60 .type S32A_Opaque_BlitRow32_SSE4_asm, @function |
| 61 #endif |
| 62 .p2align 4 |
| 63 #if defined(__clang__) && defined(SK_BUILD_FOR_MAC) |
| 64 .global _S32A_Opaque_BlitRow32_SSE4_asm |
| 65 _S32A_Opaque_BlitRow32_SSE4_asm: |
| 66 #else |
| 67 .global S32A_Opaque_BlitRow32_SSE4_asm |
| 68 S32A_Opaque_BlitRow32_SSE4_asm: |
| 69 #endif |
| 70 .cfi_startproc |
| 71 movl 8(%esp), %eax // Source pointer |
| 72 movl 12(%esp), %ecx // Pixel count |
| 73 movl 4(%esp), %edx // Destination pointer |
| 74 prefetcht0 (%eax) |
| 75 |
| 76 // Setup SSE constants |
| 77 pcmpeqd %xmm7, %xmm7 // 0xFF000000 mask to check alpha |
| 78 pslld $24, %xmm7 |
| 79 pcmpeqw %xmm6, %xmm6 // 16-bit 256 to calculate inv. alpha |
| 80 psrlw $15, %xmm6 |
| 81 psllw $8, %xmm6 |
| 82 pcmpeqw %xmm0, %xmm0 // 0x00FF00FF mask (Must be in xmm0 beca
use of pblendvb) |
| 83 psrlw $8, %xmm0 |
| 84 subl $4, %ecx // Check if we have only 0-3 pixels |
| 85 js .LReallySmall |
| 86 PUSH(%edi) |
| 87 cmpl $11, %ecx // Do we have enough pixels to run the m
ain loop? |
| 88 ja .LBigBlit |
| 89 |
| 90 // Handle small blits (4-15 pixels) |
| 91 ////////////////////////////////////////////////////////////////////////////
//// |
| 92 xorl %edi, %edi // Reset offset to zero |
| 93 |
| 94 .LSmallLoop: |
| 95 lddqu (%eax, %edi), %xmm1 // Load four source pixels |
| 96 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 97 ja .LSmallAlphaNotOpaqueOrZero |
| 98 jz .LSmallAlphaZero // If all alphas are zero, skip the pixe
ls completely |
| 99 movdqu %xmm1, (%edx, %edi) // Store four destination pixels |
| 100 .LSmallAlphaZero: |
| 101 addl $16, %edi |
| 102 subl $4, %ecx // Check if there are four additional pi
xels, at least |
| 103 jns .LSmallLoop |
| 104 jmp .LSmallRemaining |
| 105 |
| 106 // Handle mixed alphas (calculate and scale) |
| 107 .p2align 4 |
| 108 .LSmallAlphaNotOpaqueOrZero: |
| 109 lddqu (%edx, %edi), %xmm5 // Load four destination pixels |
| 110 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 111 SCALE_PIXELS // Scale pixels using alpha |
| 112 |
| 113 addl $16, %edi |
| 114 subl $4, %ecx // Check if there are four additional pi
xels, at least |
| 115 pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly |
| 116 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 117 movdqu %xmm1, -16(%edx, %edi) // Store four destination pixels |
| 118 jns .LSmallLoop |
| 119 |
| 120 // Handle the last 0-3 pixels (also used by the main loops) |
| 121 .LSmallRemaining: |
| 122 cmpl $-4, %ecx // Check if we are done |
| 123 je .LSmallExit |
| 124 sall $2, %ecx // Calculate offset for last pixels |
| 125 addl %ecx, %edi |
| 126 |
| 127 lddqu (%eax, %edi), %xmm1 // Load last four source pixels (overlap
ping) |
| 128 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 129 jc .LSmallRemainingStoreAll// If all alphas are opaque, just store
(overlapping) |
| 130 jz .LSmallExit // If all alphas are zero, skip the pixe
ls completely |
| 131 |
| 132 // Handle mixed alphas (calculate and scale) |
| 133 lddqu (%edx, %edi), %xmm5 // Load last four destination pixels (ov
erlapping) |
| 134 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 135 |
| 136 psllw $8, %xmm3 // Filter out red and blue components |
| 137 pmulhuw %xmm4, %xmm3 // Scale red and blue |
| 138 movdqa %xmm5, %xmm2 |
| 139 psrlw $8, %xmm2 // Filter out alpha and green components |
| 140 pmullw %xmm4, %xmm2 // Scale alpha and green |
| 141 |
| 142 cmpl $-8, %ecx // Check how many pixels should be writt
en |
| 143 pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, impli
citly) |
| 144 paddb %xmm2, %xmm1 // Add source and destination pixels tog
ether |
| 145 jb .LSmallPixelsLeft1 |
| 146 ja .LSmallPixelsLeft3 // To avoid double-blending the overlapp
ing pixels... |
| 147 pblendw $0xF0, %xmm1, %xmm5 // Merge only the final two pixels to th
e destination |
| 148 movdqu %xmm5, (%edx, %edi) // Store last two destination pixels |
| 149 .LSmallExit: |
| 150 RETURN |
| 151 |
| 152 .LSmallPixelsLeft1: |
| 153 pblendw $0xC0, %xmm1, %xmm5 // Merge only the final pixel to the des
tination |
| 154 movdqu %xmm5, (%edx, %edi) // Store last destination pixel |
| 155 RETURN |
| 156 |
| 157 .LSmallPixelsLeft3: |
| 158 pblendw $0xFC, %xmm1, %xmm5 // Merge only the final three pixels to
the destination |
| 159 movdqu %xmm5, (%edx, %edi) // Store last three destination pixels |
| 160 RETURN |
| 161 |
| 162 .LSmallRemainingStoreAll: |
| 163 movdqu %xmm1, (%edx, %edi) // Store last destination pixels (overwr
ite) |
| 164 RETURN |
| 165 |
| 166 // Handle really small blits (0-3 pixels) |
| 167 ////////////////////////////////////////////////////////////////////////////
//// |
| 168 .LReallySmall: |
| 169 addl $4, %ecx |
| 170 jle .LReallySmallExit |
| 171 pcmpeqd %xmm1, %xmm1 |
| 172 cmp $2, %ecx // Check how many pixels should be read |
| 173 pinsrd $0x0, (%eax), %xmm1 // Load one source pixel |
| 174 pinsrd $0x0, (%edx), %xmm5 // Load one destination pixel |
| 175 jb .LReallySmallCalc |
| 176 pinsrd $0x1, 4(%eax), %xmm1 // Load second source pixel |
| 177 pinsrd $0x1, 4(%edx), %xmm5 // Load second destination pixel |
| 178 je .LReallySmallCalc |
| 179 pinsrd $0x2, 8(%eax), %xmm1 // Load third source pixel |
| 180 pinsrd $0x2, 8(%edx), %xmm5 // Load third destination pixel |
| 181 |
| 182 .LReallySmallCalc: |
| 183 ptest %xmm7, %xmm1 // Check if all alphas are opaque |
| 184 jc .LReallySmallStore // If all alphas are opaque, just store |
| 185 |
| 186 // Handle mixed alphas (calculate and scale) |
| 187 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 188 |
| 189 pand %xmm0, %xmm5 // Filter out red and blue components |
| 190 pmullw %xmm4, %xmm5 // Scale red and blue |
| 191 psrlw $8, %xmm3 // Filter out alpha and green components |
| 192 pmullw %xmm4, %xmm3 // Scale alpha and green |
| 193 |
| 194 psrlw $8, %xmm5 // Combine results |
| 195 pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly |
| 196 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 197 |
| 198 .LReallySmallStore: |
| 199 cmp $2, %ecx // Check how many pixels should be writt
en |
| 200 pextrd $0x0, %xmm1, (%edx) // Store one destination pixel |
| 201 jb .LReallySmallExit |
| 202 pextrd $0x1, %xmm1, 4(%edx) // Store second destination pixel |
| 203 je .LReallySmallExit |
| 204 pextrd $0x2, %xmm1, 8(%edx) // Store third destination pixel |
| 205 .LReallySmallExit: |
| 206 ret |
| 207 |
| 208 // Handle bigger blit operations (16+ pixels) |
| 209 ////////////////////////////////////////////////////////////////////////////
//// |
| 210 .p2align 4 |
| 211 .LBigBlit: |
| 212 // Align destination? |
| 213 testl $0xF, %edx |
| 214 lddqu (%eax), %xmm1 // Pre-load four source pixels |
| 215 jz .LAligned |
| 216 |
| 217 movl %edx, %edi // Calculate alignment of destination po
inter |
| 218 negl %edi |
| 219 andl $0xF, %edi |
| 220 |
| 221 // Handle 1-3 pixels to align destination |
| 222 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 223 jz .LAlignDone // If all alphas are zero, just skip |
| 224 lddqu (%edx), %xmm5 // Load four destination pixels |
| 225 jc .LAlignStore // If all alphas are opaque, just store |
| 226 |
| 227 // Handle mixed alphas (calculate and scale) |
| 228 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 229 |
| 230 psllw $8, %xmm3 // Filter out red and blue components |
| 231 pmulhuw %xmm4, %xmm3 // Scale red and blue |
| 232 movdqa %xmm5, %xmm2 |
| 233 psrlw $8, %xmm2 // Filter out alpha and green components |
| 234 pmullw %xmm4, %xmm2 // Scale alpha and green |
| 235 |
| 236 pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, impli
citly) |
| 237 paddb %xmm2, %xmm1 // Add source and destination pixels tog
ether |
| 238 |
| 239 .LAlignStore: |
| 240 cmp $8, %edi // Check how many pixels should be writt
en |
| 241 jb .LAlignPixelsLeft1 |
| 242 ja .LAlignPixelsLeft3 |
| 243 pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels |
| 244 jmp .LAlignStorePixels |
| 245 |
| 246 .LAlignPixelsLeft1: |
| 247 pblendw $0x03, %xmm1, %xmm5 // Blend one pixel |
| 248 jmp .LAlignStorePixels |
| 249 |
| 250 .LAlignPixelsLeft3: |
| 251 pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels |
| 252 |
| 253 .LAlignStorePixels: |
| 254 movdqu %xmm5, (%edx) // Store destination pixels |
| 255 |
| 256 .LAlignDone: |
| 257 addl %edi, %eax // Adjust pointers and pixel count |
| 258 addl %edi, %edx |
| 259 shrl $2, %edi |
| 260 lddqu (%eax), %xmm1 // Pre-load new source pixels (after ali
gnment) |
| 261 subl %edi, %ecx |
| 262 |
| 263 .LAligned: // Destination is guaranteed to be 16 by
te aligned |
| 264 xorl %edi, %edi // Reset offset to zero |
| 265 subl $8, %ecx // Decrease counter (Reserve four pixels
for the cleanup) |
| 266 testl $0xF, %eax // Check alignment of source pointer |
| 267 jz .LAlignedLoop |
| 268 |
| 269 // Source not aligned to destination |
| 270 ////////////////////////////////////////////////////////////////////////////
//// |
| 271 .p2align 4 |
| 272 .LUnalignedLoop: // Main loop for unaligned, handles eigh
t pixels per iteration |
| 273 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 274 ja .LAlphaNotOpaqueOrZero00 |
| 275 lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| 276 jz .LAlphaZero00 |
| 277 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 278 |
| 279 .LAlphaZero00: |
| 280 ptest %xmm7, %xmm2 // Check if all alphas are zero or opaqu
e |
| 281 ja .LAlphaNotOpaqueOrZero01 |
| 282 lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 283 jz .LAlphaZero01 |
| 284 movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| 285 |
| 286 .LAlphaZero01: |
| 287 addl $32, %edi // Adjust offset and pixel count |
| 288 subl $8, %ecx |
| 289 jae .LUnalignedLoop |
| 290 addl $8, %ecx // Adjust pixel count |
| 291 jmp .LLoopCleanup0 |
| 292 |
| 293 .p2align 4 |
| 294 .LAlphaNotOpaqueOrZero00: |
| 295 movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| 296 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 297 SCALE_PIXELS // Scale pixels using alpha |
| 298 |
| 299 lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| 300 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 301 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 302 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 303 |
| 304 // Handle next four pixels |
| 305 ptest %xmm7, %xmm2 // Check if all alphas are zero or opaqu
e |
| 306 ja .LAlphaNotOpaqueOrZero01 |
| 307 lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 308 jz .LAlphaZero02 |
| 309 movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| 310 .LAlphaZero02: |
| 311 addl $32, %edi // Adjust offset and pixel count |
| 312 subl $8, %ecx |
| 313 jae .LUnalignedLoop |
| 314 addl $8, %ecx // Adjust pixel count |
| 315 jmp .LLoopCleanup0 |
| 316 |
| 317 .p2align 4 |
| 318 .LAlphaNotOpaqueOrZero01: |
| 319 movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels |
| 320 EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value |
| 321 SCALE_PIXELS // Scale pixels using alpha |
| 322 |
| 323 lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 324 addl $32, %edi |
| 325 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 326 paddb %xmm3, %xmm2 // Add source and destination pixels tog
ether |
| 327 subl $8, %ecx |
| 328 movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels |
| 329 jae .LUnalignedLoop |
| 330 addl $8, %ecx // Adjust pixel count |
| 331 |
| 332 // Cleanup - handle pending pixels from loop |
| 333 .LLoopCleanup0: |
| 334 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 335 ja .LAlphaNotOpaqueOrZero02 |
| 336 jz .LAlphaZero03 |
| 337 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 338 .LAlphaZero03: |
| 339 addl $16, %edi |
| 340 subl $4, %ecx |
| 341 js .LSmallRemaining // Reuse code from small loop |
| 342 |
| 343 .LRemain0: |
| 344 lddqu (%eax, %edi), %xmm1 // Load four source pixels |
| 345 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 346 ja .LAlphaNotOpaqueOrZero02 |
| 347 jz .LAlphaZero04 |
| 348 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 349 .LAlphaZero04: |
| 350 addl $16, %edi |
| 351 subl $4, %ecx |
| 352 jmp .LSmallRemaining // Reuse code from small loop |
| 353 |
| 354 .LAlphaNotOpaqueOrZero02: |
| 355 movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| 356 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 357 SCALE_PIXELS // Scale pixels using alpha |
| 358 |
| 359 addl $16, %edi |
| 360 subl $4, %ecx |
| 361 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 362 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 363 movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels |
| 364 js .LSmallRemaining // Reuse code from small loop |
| 365 jmp .LRemain0 |
| 366 |
| 367 // Source aligned to destination |
| 368 ////////////////////////////////////////////////////////////////////////////
//// |
| 369 .p2align 4 |
| 370 .LAlignedLoop: // Main loop for aligned, handles eight
pixels per iteration |
| 371 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 372 ja .LAlphaNotOpaqueOrZero10 |
| 373 movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| 374 jz .LAlphaZero10 |
| 375 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 376 |
| 377 .LAlphaZero10: |
| 378 ptest %xmm7, %xmm2 // Check if all alphas are zero or opaqu
e |
| 379 ja .LAlphaNotOpaqueOrZero11 |
| 380 movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 381 jz .LAlphaZero11 |
| 382 movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| 383 |
| 384 .LAlphaZero11: |
| 385 addl $32, %edi // Adjust offset and pixel count |
| 386 subl $8, %ecx |
| 387 jae .LAlignedLoop |
| 388 addl $8, %ecx // Adjust pixel count |
| 389 jmp .LLoopCleanup1 |
| 390 |
| 391 .p2align 4 |
| 392 .LAlphaNotOpaqueOrZero10: |
| 393 movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| 394 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 395 SCALE_PIXELS // Scale pixels using alpha |
| 396 |
| 397 movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels |
| 398 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 399 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 400 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 401 |
| 402 // Handle next four pixels |
| 403 ptest %xmm7, %xmm2 // Check if all alphas are zero or opaqu
e |
| 404 ja .LAlphaNotOpaqueOrZero11 |
| 405 movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 406 jz .LAlphaZero12 |
| 407 movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels |
| 408 .LAlphaZero12: |
| 409 addl $32, %edi // Adjust offset and pixel count |
| 410 subl $8, %ecx |
| 411 jae .LAlignedLoop |
| 412 addl $8, %ecx // Adjust pixel count |
| 413 jmp .LLoopCleanup1 |
| 414 |
| 415 .p2align 4 |
| 416 .LAlphaNotOpaqueOrZero11: |
| 417 movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels |
| 418 EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value |
| 419 SCALE_PIXELS // Scale pixels using alpha |
| 420 movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels |
| 421 |
| 422 addl $32, %edi |
| 423 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 424 paddb %xmm3, %xmm2 // Add source and destination pixels tog
ether |
| 425 subl $8, %ecx |
| 426 movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels |
| 427 jae .LAlignedLoop |
| 428 addl $8, %ecx // Adjust pixel count |
| 429 |
| 430 // Cleanup - handle pending pixels from loop |
| 431 .LLoopCleanup1: |
| 432 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 433 ja .LAlphaNotOpaqueOrZero12 |
| 434 jz .LAlphaZero13 |
| 435 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 436 .LAlphaZero13: |
| 437 addl $16, %edi |
| 438 subl $4, %ecx |
| 439 js .LSmallRemaining // Reuse code from small loop |
| 440 |
| 441 .LRemain1: |
| 442 movdqa (%eax, %edi), %xmm1 // Load four source pixels |
| 443 ptest %xmm7, %xmm1 // Check if all alphas are zero or opaqu
e |
| 444 ja .LAlphaNotOpaqueOrZero12 |
| 445 jz .LAlphaZero14 |
| 446 movdqa %xmm1, (%edx, %edi) // Store four destination pixels |
| 447 .LAlphaZero14: |
| 448 addl $16, %edi |
| 449 subl $4, %ecx |
| 450 jmp .LSmallRemaining // Reuse code from small loop |
| 451 |
| 452 .LAlphaNotOpaqueOrZero12: |
| 453 movdqa (%edx, %edi), %xmm5 // Load four destination pixels |
| 454 EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value |
| 455 SCALE_PIXELS // Scale pixels using alpha |
| 456 |
| 457 addl $16, %edi |
| 458 subl $4, %ecx |
| 459 pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, impli
citly) |
| 460 paddb %xmm3, %xmm1 // Add source and destination pixels tog
ether |
| 461 movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels |
| 462 js .LSmallRemaining // Reuse code from small loop |
| 463 jmp .LRemain1 |
| 464 |
| 465 .cfi_endproc |
| 466 #ifndef __clang__ |
| 467 .size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm |
| 468 #endif |
| 469 #endif |
OLD | NEW |