Index: source/libvpx/third_party/x86inc/x86inc.asm |
diff --git a/source/libvpx/third_party/x86inc/x86inc.asm b/source/libvpx/third_party/x86inc/x86inc.asm |
index bc8116995dd86fef4b69913065621114c43b7959..77a58f295b33f3ca3d9d9c687c8abff468f423dc 100644 |
--- a/source/libvpx/third_party/x86inc/x86inc.asm |
+++ b/source/libvpx/third_party/x86inc/x86inc.asm |
@@ -1,12 +1,12 @@ |
;***************************************************************************** |
;* x86inc.asm: x264asm abstraction layer |
;***************************************************************************** |
-;* Copyright (C) 2005-2012 x264 project |
+;* Copyright (C) 2005-2015 x264 project |
;* |
;* Authors: Loren Merritt <lorenm@u.washington.edu> |
;* Anton Mitrofanov <BugMaster@narod.ru> |
-;* Jason Garrett-Glaser <darkshikari@gmail.com> |
-;* Henrik Gramner <hengar-6@student.ltu.se> |
+;* Fiona Glaser <fiona@x264.com> |
+;* Henrik Gramner <henrik@gramner.com> |
;* |
;* Permission to use, copy, modify, and/or distribute this software for any |
;* purpose with or without fee is hereby granted, provided that the above |
@@ -36,13 +36,24 @@ |
%include "vpx_config.asm" |
-%ifndef program_name |
-%define program_name vp9 |
+%ifndef private_prefix |
+ %define private_prefix vpx |
%endif |
+%ifndef public_prefix |
+ %define public_prefix private_prefix |
+%endif |
+ |
+%ifndef STACK_ALIGNMENT |
+ %if ARCH_X86_64 |
+ %define STACK_ALIGNMENT 16 |
+ %else |
+ %define STACK_ALIGNMENT 4 |
+ %endif |
+%endif |
-%define UNIX64 0 |
%define WIN64 0 |
+%define UNIX64 0 |
%if ARCH_X86_64 |
%ifidn __OUTPUT_FORMAT__,win32 |
%define WIN64 1 |
@@ -59,8 +70,6 @@ |
%define mangle(x) x |
%elifidn __OUTPUT_FORMAT__,elf64 |
%define mangle(x) x |
-%elifidn __OUTPUT_FORMAT__,elf |
- %define mangle(x) x |
%elifidn __OUTPUT_FORMAT__,x64 |
%define mangle(x) x |
%elifidn __OUTPUT_FORMAT__,win64 |
@@ -69,31 +78,22 @@ |
%define mangle(x) _ %+ x |
%endif |
-; FIXME: All of the 64bit asm functions that take a stride as an argument |
-; via register, assume that the high dword of that register is filled with 0. |
-; This is true in practice (since we never do any 64bit arithmetic on strides, |
-; and x264's strides are all positive), but is not guaranteed by the ABI. |
- |
-; Name of the .rodata section. |
-; Kludge: Something on OS X fails to align .rodata even given an align attribute, |
-; so use a different read-only section. |
+; In some instances macho32 tables get misaligned when using .rodata. |
+; When looking at the disassembly it appears that the offset is either |
+; correct or consistently off by 90. Placing them in the .text section |
+; works around the issue. It appears to be specific to the way libvpx |
+; handles the tables. |
%macro SECTION_RODATA 0-1 16 |
- %ifidn __OUTPUT_FORMAT__,macho64 |
- SECTION .text align=%1 |
- %elifidn __OUTPUT_FORMAT__,macho32 |
- SECTION .text align=%1 |
- fakegot: |
- %elifidn __OUTPUT_FORMAT__,macho |
+ %ifidn __OUTPUT_FORMAT__,macho32 |
SECTION .text align=%1 |
fakegot: |
%elifidn __OUTPUT_FORMAT__,aout |
- section .text |
+ SECTION .text |
%else |
SECTION .rodata align=%1 |
%endif |
%endmacro |
-; aout does not support align= |
%macro SECTION_TEXT 0-1 16 |
%ifidn __OUTPUT_FORMAT__,aout |
SECTION .text |
@@ -117,58 +117,58 @@ |
%endif |
%if ABI_IS_32BIT |
- %if CONFIG_PIC=1 |
- %ifidn __OUTPUT_FORMAT__,elf32 |
- %define GET_GOT_SAVE_ARG 1 |
- %define WRT_PLT wrt ..plt |
- %macro GET_GOT 1 |
- extern _GLOBAL_OFFSET_TABLE_ |
- push %1 |
- call %%get_got |
- %%sub_offset: |
- jmp %%exitGG |
- %%get_got: |
- mov %1, [esp] |
- add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc |
- ret |
- %%exitGG: |
- %undef GLOBAL |
- %define GLOBAL(x) x + %1 wrt ..gotoff |
- %undef RESTORE_GOT |
- %define RESTORE_GOT pop %1 |
- %endmacro |
- %elifidn __OUTPUT_FORMAT__,macho32 |
- %define GET_GOT_SAVE_ARG 1 |
- %macro GET_GOT 1 |
- push %1 |
- call %%get_got |
- %%get_got: |
- pop %1 |
- %undef GLOBAL |
- %define GLOBAL(x) x + %1 - %%get_got |
- %undef RESTORE_GOT |
- %define RESTORE_GOT pop %1 |
- %endmacro |
- %endif |
- %endif |
+ %if CONFIG_PIC=1 |
+ %ifidn __OUTPUT_FORMAT__,elf32 |
+ %define GET_GOT_SAVE_ARG 1 |
+ %define WRT_PLT wrt ..plt |
+ %macro GET_GOT 1 |
+ extern _GLOBAL_OFFSET_TABLE_ |
+ push %1 |
+ call %%get_got |
+ %%sub_offset: |
+ jmp %%exitGG |
+ %%get_got: |
+ mov %1, [esp] |
+ add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc |
+ ret |
+ %%exitGG: |
+ %undef GLOBAL |
+ %define GLOBAL(x) x + %1 wrt ..gotoff |
+ %undef RESTORE_GOT |
+ %define RESTORE_GOT pop %1 |
+ %endmacro |
+ %elifidn __OUTPUT_FORMAT__,macho32 |
+ %define GET_GOT_SAVE_ARG 1 |
+ %macro GET_GOT 1 |
+ push %1 |
+ call %%get_got |
+ %%get_got: |
+ pop %1 |
+ %undef GLOBAL |
+ %define GLOBAL(x) x + %1 - %%get_got |
+ %undef RESTORE_GOT |
+ %define RESTORE_GOT pop %1 |
+ %endmacro |
+ %endif |
+ %endif |
- %if ARCH_X86_64 == 0 |
- %undef PIC |
- %endif |
+ %if ARCH_X86_64 == 0 |
+ %undef PIC |
+ %endif |
%else |
- %macro GET_GOT 1 |
- %endmacro |
- %define GLOBAL(x) rel x |
- %define WRT_PLT wrt ..plt |
- |
- %if WIN64 |
- %define PIC |
- %elifidn __OUTPUT_FORMAT__,macho64 |
- %define PIC |
- %elif CONFIG_PIC |
- %define PIC |
- %endif |
+ %macro GET_GOT 1 |
+ %endmacro |
+ %define GLOBAL(x) rel x |
+ %define WRT_PLT wrt ..plt |
+ |
+ %if WIN64 |
+ %define PIC |
+ %elifidn __OUTPUT_FORMAT__,macho64 |
+ %define PIC |
+ %elif CONFIG_PIC |
+ %define PIC |
+ %endif |
%endif |
%ifnmacro GET_GOT |
@@ -177,10 +177,10 @@ |
%define GLOBAL(x) x |
%endif |
%ifndef RESTORE_GOT |
-%define RESTORE_GOT |
+ %define RESTORE_GOT |
%endif |
%ifndef WRT_PLT |
-%define WRT_PLT |
+ %define WRT_PLT |
%endif |
%ifdef PIC |
@@ -188,14 +188,6 @@ |
%endif |
; Done with PIC macros |
-; Always use long nops (reduces 0x90 spam in disassembly on x86_32) |
-%ifndef __NASM_VER__ |
-CPU amdnop |
-%else |
-%use smartalign |
-ALIGNMODE k7 |
-%endif |
- |
; Macros to eliminate most code duplication between x86_32 and x86_64: |
; Currently this works only for leaf functions which load all their arguments |
; into registers at the start, and make no other use of the stack. Luckily that |
@@ -205,12 +197,20 @@ ALIGNMODE k7 |
; %1 = number of arguments. loads them from stack if needed. |
; %2 = number of registers used. pushes callee-saved regs if needed. |
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. |
-; %4 = list of names to define to registers |
+; %4 = (optional) stack size to be allocated. The stack will be aligned before |
+; allocating the specified stack size. If the required stack alignment is |
+; larger than the known stack alignment the stack will be manually aligned |
+; and an extra register will be allocated to hold the original stack |
+; pointer (to not invalidate r0m etc.). To prevent the use of an extra |
+; register as stack pointer, request a negative stack size. |
+; %4+/%5+ = list of names to define to registers |
; PROLOGUE can also be invoked by adding the same options to cglobal |
; e.g. |
-; cglobal foo, 2,3,0, dst, src, tmp |
-; declares a function (foo), taking two args (dst and src) and one local variable (tmp) |
+; cglobal foo, 2,3,7,0x40, dst, src, tmp |
+; declares a function (foo) that automatically loads two arguments (dst and |
+; src) into registers, uses one additional register (tmp) plus 7 vector |
+; registers (m0-m6) and allocates 0x40 bytes of stack space. |
; TODO Some functions can use some args directly from the stack. If they're the |
; last args then you can just not declare them, but if they're in the middle |
@@ -220,40 +220,43 @@ ALIGNMODE k7 |
; Pops anything that was pushed by PROLOGUE, and returns. |
; REP_RET: |
-; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons |
-; which are slow when a normal ret follows a branch. |
+; Use this instead of RET if it's a branch target. |
; registers: |
; rN and rNq are the native-size register holding function argument N |
; rNd, rNw, rNb are dword, word, and byte size |
+; rNh is the high 8 bits of the word size |
; rNm is the original location of arg N (a register or on the stack), dword |
; rNmp is native size |
-%macro DECLARE_REG 5-6 |
+%macro DECLARE_REG 2-3 |
%define r%1q %2 |
- %define r%1d %3 |
- %define r%1w %4 |
- %define r%1b %5 |
- %if %0 == 5 |
- %define r%1m %3 |
+ %define r%1d %2d |
+ %define r%1w %2w |
+ %define r%1b %2b |
+ %define r%1h %2h |
+ %if %0 == 2 |
+ %define r%1m %2d |
%define r%1mp %2 |
%elif ARCH_X86_64 ; memory |
- %define r%1m [rsp + stack_offset + %6] |
+ %define r%1m [rstk + stack_offset + %3] |
%define r%1mp qword r %+ %1 %+ m |
%else |
- %define r%1m [esp + stack_offset + %6] |
+ %define r%1m [rstk + stack_offset + %3] |
%define r%1mp dword r %+ %1 %+ m |
%endif |
%define r%1 %2 |
%endmacro |
-%macro DECLARE_REG_SIZE 2 |
+%macro DECLARE_REG_SIZE 3 |
%define r%1q r%1 |
%define e%1q r%1 |
%define r%1d e%1 |
%define e%1d e%1 |
%define r%1w %1 |
%define e%1w %1 |
+ %define r%1h %3 |
+ %define e%1h %3 |
%define r%1b %2 |
%define e%1b %2 |
%if ARCH_X86_64 == 0 |
@@ -261,13 +264,13 @@ ALIGNMODE k7 |
%endif |
%endmacro |
-DECLARE_REG_SIZE ax, al |
-DECLARE_REG_SIZE bx, bl |
-DECLARE_REG_SIZE cx, cl |
-DECLARE_REG_SIZE dx, dl |
-DECLARE_REG_SIZE si, sil |
-DECLARE_REG_SIZE di, dil |
-DECLARE_REG_SIZE bp, bpl |
+DECLARE_REG_SIZE ax, al, ah |
+DECLARE_REG_SIZE bx, bl, bh |
+DECLARE_REG_SIZE cx, cl, ch |
+DECLARE_REG_SIZE dx, dl, dh |
+DECLARE_REG_SIZE si, sil, null |
+DECLARE_REG_SIZE di, dil, null |
+DECLARE_REG_SIZE bp, bpl, null |
; t# defines for when per-arch register allocation is more complex than just function arguments |
@@ -285,6 +288,7 @@ DECLARE_REG_SIZE bp, bpl |
%define t%1q t%1 %+ q |
%define t%1d t%1 %+ d |
%define t%1w t%1 %+ w |
+ %define t%1h t%1 %+ h |
%define t%1b t%1 %+ b |
%rotate 1 |
%endrep |
@@ -300,12 +304,16 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
%macro PUSH 1 |
push %1 |
- %assign stack_offset stack_offset+gprsize |
+ %ifidn rstk, rsp |
+ %assign stack_offset stack_offset+gprsize |
+ %endif |
%endmacro |
%macro POP 1 |
pop %1 |
- %assign stack_offset stack_offset-gprsize |
+ %ifidn rstk, rsp |
+ %assign stack_offset stack_offset-gprsize |
+ %endif |
%endmacro |
%macro PUSH_IF_USED 1-* |
@@ -337,14 +345,14 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
%macro SUB 2 |
sub %1, %2 |
- %ifidn %1, rsp |
+ %ifidn %1, rstk |
%assign stack_offset stack_offset+(%2) |
%endif |
%endmacro |
%macro ADD 2 |
add %1, %2 |
- %ifidn %1, rsp |
+ %ifidn %1, rstk |
%assign stack_offset stack_offset-(%2) |
%endif |
%endmacro |
@@ -374,6 +382,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
CAT_UNDEF arg_name %+ %%i, q |
CAT_UNDEF arg_name %+ %%i, d |
CAT_UNDEF arg_name %+ %%i, w |
+ CAT_UNDEF arg_name %+ %%i, h |
CAT_UNDEF arg_name %+ %%i, b |
CAT_UNDEF arg_name %+ %%i, m |
CAT_UNDEF arg_name %+ %%i, mp |
@@ -389,6 +398,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
%xdefine %1q r %+ %%i %+ q |
%xdefine %1d r %+ %%i %+ d |
%xdefine %1w r %+ %%i %+ w |
+ %xdefine %1h r %+ %%i %+ h |
%xdefine %1b r %+ %%i %+ b |
%xdefine %1m r %+ %%i %+ m |
%xdefine %1mp r %+ %%i %+ mp |
@@ -400,155 +410,240 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 |
%assign n_arg_names %0 |
%endmacro |
-%if ARCH_X86_64 |
-%macro ALLOC_STACK 2 ; stack_size, num_regs |
- %assign %%stack_aligment ((mmsize + 15) & ~15) |
- %assign stack_size_padded %1 |
+%define required_stack_alignment ((mmsize + 15) & ~15) |
+ |
+%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only) |
+ %ifnum %1 |
+ %if %1 != 0 |
+ %assign %%pad 0 |
+ %assign stack_size %1 |
+ %if stack_size < 0 |
+ %assign stack_size -stack_size |
+ %endif |
+ %if WIN64 |
+ %assign %%pad %%pad + 32 ; shadow space |
+ %if mmsize != 8 |
+ %assign xmm_regs_used %2 |
+ %if xmm_regs_used > 8 |
+ %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers |
+ %endif |
+ %endif |
+ %endif |
+ %if required_stack_alignment <= STACK_ALIGNMENT |
+ ; maintain the current stack alignment |
+ %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) |
+ SUB rsp, stack_size_padded |
+ %else |
+ %assign %%reg_num (regs_used - 1) |
+ %xdefine rstk r %+ %%reg_num |
+ ; align stack, and save original stack location directly above |
+ ; it, i.e. in [rsp+stack_size_padded], so we can restore the |
+ ; stack in a single instruction (i.e. mov rsp, rstk or mov |
+ ; rsp, [rsp+stack_size_padded]) |
+ %if %1 < 0 ; need to store rsp on stack |
+ %xdefine rstkm [rsp + stack_size + %%pad] |
+ %assign %%pad %%pad + gprsize |
+ %else ; can keep rsp in rstk during whole function |
+ %xdefine rstkm rstk |
+ %endif |
+ %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1)) |
+ mov rstk, rsp |
+ and rsp, ~(required_stack_alignment-1) |
+ sub rsp, stack_size_padded |
+ movifnidn rstkm, rstk |
+ %endif |
+ WIN64_PUSH_XMM |
+ %endif |
+ %endif |
+%endmacro |
- %assign %%reg_num (%2 - 1) |
- %xdefine rsp_tmp r %+ %%reg_num |
- mov rsp_tmp, rsp |
- sub rsp, stack_size_padded |
- and rsp, ~(%%stack_aligment - 1) |
+%macro SETUP_STACK_POINTER 1 |
+ %ifnum %1 |
+ %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT |
+ %if %1 > 0 |
+ %assign regs_used (regs_used + 1) |
+ %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2 |
+ %warning "Stack pointer will overwrite register argument" |
+ %endif |
+ %endif |
+ %endif |
%endmacro |
-%macro RESTORE_STACK 0 ; reset rsp register |
- mov rsp, rsp_tmp |
+%macro DEFINE_ARGS_INTERNAL 3+ |
+ %ifnum %2 |
+ DEFINE_ARGS %3 |
+ %elif %1 == 4 |
+ DEFINE_ARGS %2 |
+ %elif %1 > 4 |
+ DEFINE_ARGS %2, %3 |
+ %endif |
%endmacro |
-%endif |
%if WIN64 ; Windows x64 ;================================================= |
-DECLARE_REG 0, rcx, ecx, cx, cl |
-DECLARE_REG 1, rdx, edx, dx, dl |
-DECLARE_REG 2, R8, R8D, R8W, R8B |
-DECLARE_REG 3, R9, R9D, R9W, R9B |
-DECLARE_REG 4, R10, R10D, R10W, R10B, 40 |
-DECLARE_REG 5, R11, R11D, R11W, R11B, 48 |
-DECLARE_REG 6, rax, eax, ax, al, 56 |
-DECLARE_REG 7, rdi, edi, di, dil, 64 |
-DECLARE_REG 8, rsi, esi, si, sil, 72 |
-DECLARE_REG 9, rbx, ebx, bx, bl, 80 |
-DECLARE_REG 10, rbp, ebp, bp, bpl, 88 |
-DECLARE_REG 11, R12, R12D, R12W, R12B, 96 |
-DECLARE_REG 12, R13, R13D, R13W, R13B, 104 |
-DECLARE_REG 13, R14, R14D, R14W, R14B, 112 |
-DECLARE_REG 14, R15, R15D, R15W, R15B, 120 |
- |
-%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names... |
+DECLARE_REG 0, rcx |
+DECLARE_REG 1, rdx |
+DECLARE_REG 2, R8 |
+DECLARE_REG 3, R9 |
+DECLARE_REG 4, R10, 40 |
+DECLARE_REG 5, R11, 48 |
+DECLARE_REG 6, rax, 56 |
+DECLARE_REG 7, rdi, 64 |
+DECLARE_REG 8, rsi, 72 |
+DECLARE_REG 9, rbx, 80 |
+DECLARE_REG 10, rbp, 88 |
+DECLARE_REG 11, R12, 96 |
+DECLARE_REG 12, R13, 104 |
+DECLARE_REG 13, R14, 112 |
+DECLARE_REG 14, R15, 120 |
+ |
+%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names... |
%assign num_args %1 |
%assign regs_used %2 |
ASSERT regs_used >= num_args |
+ SETUP_STACK_POINTER %4 |
ASSERT regs_used <= 15 |
PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14 |
- %if mmsize == 8 |
- %assign xmm_regs_used 0 |
- %else |
+ ALLOC_STACK %4, %3 |
+ %if mmsize != 8 && stack_size == 0 |
WIN64_SPILL_XMM %3 |
%endif |
LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 |
- DEFINE_ARGS %4 |
+ DEFINE_ARGS_INTERNAL %0, %4, %5 |
+%endmacro |
+ |
+%macro WIN64_PUSH_XMM 0 |
+ ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. |
+ %if xmm_regs_used > 6 |
+ movaps [rstk + stack_offset + 8], xmm6 |
+ %endif |
+ %if xmm_regs_used > 7 |
+ movaps [rstk + stack_offset + 24], xmm7 |
+ %endif |
+ %if xmm_regs_used > 8 |
+ %assign %%i 8 |
+ %rep xmm_regs_used-8 |
+ movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i |
+ %assign %%i %%i+1 |
+ %endrep |
+ %endif |
%endmacro |
%macro WIN64_SPILL_XMM 1 |
%assign xmm_regs_used %1 |
ASSERT xmm_regs_used <= 16 |
- %if xmm_regs_used > 6 |
- SUB rsp, (xmm_regs_used-6)*16+16 |
- %assign %%i xmm_regs_used |
- %rep (xmm_regs_used-6) |
- %assign %%i %%i-1 |
- movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i |
- %endrep |
+ %if xmm_regs_used > 8 |
+ ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack. |
+ %assign %%pad (xmm_regs_used-8)*16 + 32 |
+ %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) |
+ SUB rsp, stack_size_padded |
%endif |
+ WIN64_PUSH_XMM |
%endmacro |
%macro WIN64_RESTORE_XMM_INTERNAL 1 |
- %if xmm_regs_used > 6 |
+ %assign %%pad_size 0 |
+ %if xmm_regs_used > 8 |
%assign %%i xmm_regs_used |
- %rep (xmm_regs_used-6) |
+ %rep xmm_regs_used-8 |
%assign %%i %%i-1 |
- movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)] |
+ movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32] |
%endrep |
- add %1, (xmm_regs_used-6)*16+16 |
+ %endif |
+ %if stack_size_padded > 0 |
+ %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT |
+ mov rsp, rstkm |
+ %else |
+ add %1, stack_size_padded |
+ %assign %%pad_size stack_size_padded |
+ %endif |
+ %endif |
+ %if xmm_regs_used > 7 |
+ movaps xmm7, [%1 + stack_offset - %%pad_size + 24] |
+ %endif |
+ %if xmm_regs_used > 6 |
+ movaps xmm6, [%1 + stack_offset - %%pad_size + 8] |
%endif |
%endmacro |
%macro WIN64_RESTORE_XMM 1 |
WIN64_RESTORE_XMM_INTERNAL %1 |
- %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16 |
+ %assign stack_offset (stack_offset-stack_size_padded) |
%assign xmm_regs_used 0 |
%endmacro |
+%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0 |
+ |
%macro RET 0 |
WIN64_RESTORE_XMM_INTERNAL rsp |
POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 |
- ret |
-%endmacro |
- |
-%macro REP_RET 0 |
- %if regs_used > 7 || xmm_regs_used > 6 |
- RET |
- %else |
- rep ret |
- %endif |
+%if mmsize == 32 |
+ vzeroupper |
+%endif |
+ AUTO_REP_RET |
%endmacro |
%elif ARCH_X86_64 ; *nix x64 ;============================================= |
-DECLARE_REG 0, rdi, edi, di, dil |
-DECLARE_REG 1, rsi, esi, si, sil |
-DECLARE_REG 2, rdx, edx, dx, dl |
-DECLARE_REG 3, rcx, ecx, cx, cl |
-DECLARE_REG 4, R8, R8D, R8W, R8B |
-DECLARE_REG 5, R9, R9D, R9W, R9B |
-DECLARE_REG 6, rax, eax, ax, al, 8 |
-DECLARE_REG 7, R10, R10D, R10W, R10B, 16 |
-DECLARE_REG 8, R11, R11D, R11W, R11B, 24 |
-DECLARE_REG 9, rbx, ebx, bx, bl, 32 |
-DECLARE_REG 10, rbp, ebp, bp, bpl, 40 |
-DECLARE_REG 11, R12, R12D, R12W, R12B, 48 |
-DECLARE_REG 12, R13, R13D, R13W, R13B, 56 |
-DECLARE_REG 13, R14, R14D, R14W, R14B, 64 |
-DECLARE_REG 14, R15, R15D, R15W, R15B, 72 |
- |
-%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... |
+DECLARE_REG 0, rdi |
+DECLARE_REG 1, rsi |
+DECLARE_REG 2, rdx |
+DECLARE_REG 3, rcx |
+DECLARE_REG 4, R8 |
+DECLARE_REG 5, R9 |
+DECLARE_REG 6, rax, 8 |
+DECLARE_REG 7, R10, 16 |
+DECLARE_REG 8, R11, 24 |
+DECLARE_REG 9, rbx, 32 |
+DECLARE_REG 10, rbp, 40 |
+DECLARE_REG 11, R12, 48 |
+DECLARE_REG 12, R13, 56 |
+DECLARE_REG 13, R14, 64 |
+DECLARE_REG 14, R15, 72 |
+ |
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... |
%assign num_args %1 |
%assign regs_used %2 |
ASSERT regs_used >= num_args |
+ SETUP_STACK_POINTER %4 |
ASSERT regs_used <= 15 |
PUSH_IF_USED 9, 10, 11, 12, 13, 14 |
+ ALLOC_STACK %4 |
LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 |
- DEFINE_ARGS %4 |
+ DEFINE_ARGS_INTERNAL %0, %4, %5 |
%endmacro |
+%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 |
+ |
%macro RET 0 |
+%if stack_size_padded > 0 |
+%if required_stack_alignment > STACK_ALIGNMENT |
+ mov rsp, rstkm |
+%else |
+ add rsp, stack_size_padded |
+%endif |
+%endif |
POP_IF_USED 14, 13, 12, 11, 10, 9 |
- ret |
-%endmacro |
- |
-%macro REP_RET 0 |
- %if regs_used > 9 |
- RET |
- %else |
- rep ret |
- %endif |
+%if mmsize == 32 |
+ vzeroupper |
+%endif |
+ AUTO_REP_RET |
%endmacro |
%else ; X86_32 ;============================================================== |
-DECLARE_REG 0, eax, eax, ax, al, 4 |
-DECLARE_REG 1, ecx, ecx, cx, cl, 8 |
-DECLARE_REG 2, edx, edx, dx, dl, 12 |
-DECLARE_REG 3, ebx, ebx, bx, bl, 16 |
-DECLARE_REG 4, esi, esi, si, null, 20 |
-DECLARE_REG 5, edi, edi, di, null, 24 |
-DECLARE_REG 6, ebp, ebp, bp, null, 28 |
+DECLARE_REG 0, eax, 4 |
+DECLARE_REG 1, ecx, 8 |
+DECLARE_REG 2, edx, 12 |
+DECLARE_REG 3, ebx, 16 |
+DECLARE_REG 4, esi, 20 |
+DECLARE_REG 5, edi, 24 |
+DECLARE_REG 6, ebp, 28 |
%define rsp esp |
%macro DECLARE_ARG 1-* |
%rep %0 |
- %define r%1m [esp + stack_offset + 4*%1 + 4] |
+ %define r%1m [rstk + stack_offset + 4*%1 + 4] |
%define r%1mp dword r%1m |
%rotate 1 |
%endrep |
@@ -556,39 +651,95 @@ DECLARE_REG 6, ebp, ebp, bp, null, 28 |
DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 |
-%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... |
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... |
%assign num_args %1 |
%assign regs_used %2 |
+ ASSERT regs_used >= num_args |
+ %if num_args > 7 |
+ %assign num_args 7 |
+ %endif |
%if regs_used > 7 |
%assign regs_used 7 |
%endif |
- ASSERT regs_used >= num_args |
+ SETUP_STACK_POINTER %4 |
+ ASSERT regs_used <= 7 |
PUSH_IF_USED 3, 4, 5, 6 |
+ ALLOC_STACK %4 |
LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 |
- DEFINE_ARGS %4 |
+ DEFINE_ARGS_INTERNAL %0, %4, %5 |
%endmacro |
+%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 |
+ |
%macro RET 0 |
+%if stack_size_padded > 0 |
+%if required_stack_alignment > STACK_ALIGNMENT |
+ mov rsp, rstkm |
+%else |
+ add rsp, stack_size_padded |
+%endif |
+%endif |
POP_IF_USED 6, 5, 4, 3 |
- ret |
+%if mmsize == 32 |
+ vzeroupper |
+%endif |
+ AUTO_REP_RET |
%endmacro |
+%endif ;====================================================================== |
+ |
+%if WIN64 == 0 |
+%macro WIN64_SPILL_XMM 1 |
+%endmacro |
+%macro WIN64_RESTORE_XMM 1 |
+%endmacro |
+%macro WIN64_PUSH_XMM 0 |
+%endmacro |
+%endif |
+ |
+; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either |
+; a branch or a branch target. So switch to a 2-byte form of ret in that case. |
+; We can automatically detect "follows a branch", but not a branch target. |
+; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.) |
%macro REP_RET 0 |
- %if regs_used > 3 |
+ %if has_epilogue |
RET |
%else |
rep ret |
%endif |
%endmacro |
-%endif ;====================================================================== |
+%define last_branch_adr $$ |
+%macro AUTO_REP_RET 0 |
+ %ifndef cpuflags |
+ times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr. |
+ %elif notcpuflag(ssse3) |
+ times ((last_branch_adr-$)>>31)+1 rep |
+ %endif |
+ ret |
+%endmacro |
-%if WIN64 == 0 |
-%macro WIN64_SPILL_XMM 1 |
+%macro BRANCH_INSTR 0-* |
+ %rep %0 |
+ %macro %1 1-2 %1 |
+ %2 %1 |
+ %%branch_instr: |
+ %xdefine last_branch_adr %%branch_instr |
+ %endmacro |
+ %rotate 1 |
+ %endrep |
%endmacro |
-%macro WIN64_RESTORE_XMM 1 |
+ |
+BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp |
+ |
+%macro TAIL_CALL 2 ; callee, is_nonadjacent |
+ %if has_epilogue |
+ call %1 |
+ RET |
+ %elif %2 |
+ jmp %1 |
+ %endif |
%endmacro |
-%endif |
;============================================================================= |
; arch-independent part |
@@ -600,56 +751,69 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 |
; Applies any symbol mangling needed for C linkage, and sets up a define such that |
; subsequent uses of the function name automatically refer to the mangled version. |
; Appends cpuflags to the function name if cpuflags has been specified. |
-%macro cglobal 1-2+ ; name, [PROLOGUE args] |
-%if %0 == 1 |
- cglobal_internal %1 %+ SUFFIX |
-%else |
- cglobal_internal %1 %+ SUFFIX, %2 |
-%endif |
+; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX |
+; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2). |
+%macro cglobal 1-2+ "" ; name, [PROLOGUE args] |
+ cglobal_internal 1, %1 %+ SUFFIX, %2 |
%endmacro |
-%macro cglobal_internal 1-2+ |
- %ifndef cglobaled_%1 |
- %xdefine %1 mangle(program_name %+ _ %+ %1) |
- %xdefine %1.skip_prologue %1 %+ .skip_prologue |
- CAT_XDEFINE cglobaled_, %1, 1 |
+%macro cvisible 1-2+ "" ; name, [PROLOGUE args] |
+ cglobal_internal 0, %1 %+ SUFFIX, %2 |
+%endmacro |
+%macro cglobal_internal 2-3+ |
+ %if %1 |
+ %xdefine %%FUNCTION_PREFIX private_prefix |
+ ; libvpx explicitly sets visibility in shared object builds. Avoid |
+ ; setting visibility to hidden as it may break builds that split |
+ ; sources on e.g., directory boundaries. |
+ %ifdef CHROMIUM |
+ %xdefine %%VISIBILITY hidden |
+ %else |
+ %xdefine %%VISIBILITY |
+ %endif |
+ %else |
+ %xdefine %%FUNCTION_PREFIX public_prefix |
+ %xdefine %%VISIBILITY |
%endif |
- %xdefine current_function %1 |
- %ifdef CHROMIUM |
- %ifidn __OUTPUT_FORMAT__,elf |
- global %1:function hidden |
- %elifidn __OUTPUT_FORMAT__,elf32 |
- global %1:function hidden |
- %elifidn __OUTPUT_FORMAT__,elf64 |
- global %1:function hidden |
- %elifidn __OUTPUT_FORMAT__,macho32 |
- %ifdef __NASM_VER__ |
- global %1 |
- %else |
- global %1:private_extern |
- %endif |
- %elifidn __OUTPUT_FORMAT__,macho64 |
- %ifdef __NASM_VER__ |
- global %1 |
- %else |
- global %1:private_extern |
- %endif |
+ %ifndef cglobaled_%2 |
+ %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2) |
+ %xdefine %2.skip_prologue %2 %+ .skip_prologue |
+ CAT_XDEFINE cglobaled_, %2, 1 |
+ %endif |
+ %xdefine current_function %2 |
+ %ifidn __OUTPUT_FORMAT__,elf32 |
+ global %2:function %%VISIBILITY |
+ %elifidn __OUTPUT_FORMAT__,elf64 |
+ global %2:function %%VISIBILITY |
+ %elifidn __OUTPUT_FORMAT__,macho32 |
+ %ifdef __NASM_VER__ |
+ global %2 |
%else |
- global %1 |
+ global %2:private_extern |
+ %endif |
+ %elifidn __OUTPUT_FORMAT__,macho64 |
+ %ifdef __NASM_VER__ |
+ global %2 |
+ %else |
+ global %2:private_extern |
%endif |
%else |
- global %1 |
+ global %2 |
%endif |
align function_align |
- %1: |
- RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer |
- %assign stack_offset 0 |
- %if %0 > 1 |
- PROLOGUE %2 |
+ %2: |
+ RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer |
+ %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required |
+ %assign stack_offset 0 ; stack pointer offset relative to the return address |
+ %assign stack_size 0 ; amount of stack space that can be freely used inside a function |
+ %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding |
+ %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 |
+ %ifnidn %3, "" |
+ PROLOGUE %3 |
%endif |
%endmacro |
%macro cextern 1 |
- %xdefine %1 mangle(program_name %+ _ %+ %1) |
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1) |
CAT_XDEFINE cglobaled_, %1, 1 |
extern %1 |
%endmacro |
@@ -661,17 +825,21 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 |
extern %1 |
%endmacro |
-%macro const 2+ |
- %xdefine %1 mangle(program_name %+ _ %+ %1) |
- global %1 |
+%macro const 1-2+ |
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1) |
+ %ifidn __OUTPUT_FORMAT__,elf32 |
+ global %1:data hidden |
+ %elifidn __OUTPUT_FORMAT__,elf64 |
+ global %1:data hidden |
+ %else |
+ global %1 |
+ %endif |
%1: %2 |
%endmacro |
; This is needed for ELF, otherwise the GNU linker assumes the stack is |
; executable by default. |
-%ifidn __OUTPUT_FORMAT__,elf |
-SECTION .note.GNU-stack noalloc noexec nowrite progbits |
-%elifidn __OUTPUT_FORMAT__,elf32 |
+%ifidn __OUTPUT_FORMAT__,elf32 |
SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%elifidn __OUTPUT_FORMAT__,elf64 |
SECTION .note.GNU-stack noalloc noexec nowrite progbits |
@@ -682,7 +850,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%assign cpuflags_mmx (1<<0) |
%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx |
%assign cpuflags_3dnow (1<<2) | cpuflags_mmx |
-%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow |
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow |
%assign cpuflags_sse (1<<4) | cpuflags_mmx2 |
%assign cpuflags_sse2 (1<<5) | cpuflags_sse |
%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 |
@@ -693,51 +861,71 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%assign cpuflags_avx (1<<11)| cpuflags_sse42 |
%assign cpuflags_xop (1<<12)| cpuflags_avx |
%assign cpuflags_fma4 (1<<13)| cpuflags_avx |
+%assign cpuflags_fma3 (1<<14)| cpuflags_avx |
+%assign cpuflags_avx2 (1<<15)| cpuflags_fma3 |
%assign cpuflags_cache32 (1<<16) |
%assign cpuflags_cache64 (1<<17) |
%assign cpuflags_slowctz (1<<18) |
%assign cpuflags_lzcnt (1<<19) |
-%assign cpuflags_misalign (1<<20) |
-%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant |
-%assign cpuflags_atom (1<<22) |
+%assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant |
+%assign cpuflags_atom (1<<21) |
+%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt |
+%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1 |
%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) |
%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) |
-; Takes up to 2 cpuflags from the above list. |
+; Takes an arbitrary number of cpuflags from the above list. |
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. |
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. |
-%macro INIT_CPUFLAGS 0-2 |
+%macro INIT_CPUFLAGS 0-* |
+ %xdefine SUFFIX |
+ %undef cpuname |
+ %assign cpuflags 0 |
+ |
%if %0 >= 1 |
- %xdefine cpuname %1 |
- %assign cpuflags cpuflags_%1 |
- %if %0 >= 2 |
- %xdefine cpuname %1_%2 |
- %assign cpuflags cpuflags | cpuflags_%2 |
- %endif |
+ %rep %0 |
+ %ifdef cpuname |
+ %xdefine cpuname cpuname %+ _%1 |
+ %else |
+ %xdefine cpuname %1 |
+ %endif |
+ %assign cpuflags cpuflags | cpuflags_%1 |
+ %rotate 1 |
+ %endrep |
%xdefine SUFFIX _ %+ cpuname |
+ |
%if cpuflag(avx) |
%assign avx_enabled 1 |
%endif |
- %if mmsize == 16 && notcpuflag(sse2) |
+ %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2)) |
%define mova movaps |
%define movu movups |
%define movnta movntps |
%endif |
%if cpuflag(aligned) |
%define movu mova |
- %elifidn %1, sse3 |
+ %elif cpuflag(sse3) && notcpuflag(ssse3) |
%define movu lddqu |
%endif |
+ %endif |
+ |
+ %ifdef __NASM_VER__ |
+ %use smartalign |
+ ALIGNMODE k7 |
+ %elif ARCH_X86_64 || cpuflag(sse2) |
+ CPU amdnop |
%else |
- %xdefine SUFFIX |
- %undef cpuname |
- %undef cpuflags |
+ CPU basicnop |
%endif |
%endmacro |
-; merge mmx and sse* |
+; Merge mmx and sse* |
+; m# is a simd register of the currently selected size |
+; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m# |
+; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m# |
+; (All 3 remain in sync through SWAP.) |
%macro CAT_XDEFINE 3 |
%xdefine %1%2 %3 |
@@ -759,12 +947,12 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%assign %%i 0 |
%rep 8 |
CAT_XDEFINE m, %%i, mm %+ %%i |
- CAT_XDEFINE nmm, %%i, %%i |
+ CAT_XDEFINE nnmm, %%i, %%i |
%assign %%i %%i+1 |
%endrep |
%rep 8 |
CAT_UNDEF m, %%i |
- CAT_UNDEF nmm, %%i |
+ CAT_UNDEF nnmm, %%i |
%assign %%i %%i+1 |
%endrep |
INIT_CPUFLAGS %1 |
@@ -785,20 +973,12 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%assign %%i 0 |
%rep num_mmregs |
CAT_XDEFINE m, %%i, xmm %+ %%i |
- CAT_XDEFINE nxmm, %%i, %%i |
+ CAT_XDEFINE nnxmm, %%i, %%i |
%assign %%i %%i+1 |
%endrep |
INIT_CPUFLAGS %1 |
%endmacro |
-; FIXME: INIT_AVX can be replaced by INIT_XMM avx |
-%macro INIT_AVX 0 |
- INIT_XMM |
- %assign avx_enabled 1 |
- %define PALIGNR PALIGNR_SSSE3 |
- %define RESET_MM_PERMUTATION INIT_AVX |
-%endmacro |
- |
%macro INIT_YMM 0-1+ |
%assign avx_enabled 1 |
%define RESET_MM_PERMUTATION INIT_YMM %1 |
@@ -807,14 +987,14 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
%if ARCH_X86_64 |
%define num_mmregs 16 |
%endif |
- %define mova vmovaps |
- %define movu vmovups |
+ %define mova movdqa |
+ %define movu movdqu |
%undef movh |
- %define movnta vmovntps |
+ %define movnta movntdq |
%assign %%i 0 |
%rep num_mmregs |
CAT_XDEFINE m, %%i, ymm %+ %%i |
- CAT_XDEFINE nymm, %%i, %%i |
+ CAT_XDEFINE nnymm, %%i, %%i |
%assign %%i %%i+1 |
%endrep |
INIT_CPUFLAGS %1 |
@@ -822,6 +1002,26 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
INIT_XMM |
+%macro DECLARE_MMCAST 1 |
+ %define mmmm%1 mm%1 |
+ %define mmxmm%1 mm%1 |
+ %define mmymm%1 mm%1 |
+ %define xmmmm%1 mm%1 |
+ %define xmmxmm%1 xmm%1 |
+ %define xmmymm%1 xmm%1 |
+ %define ymmmm%1 mm%1 |
+ %define ymmxmm%1 xmm%1 |
+ %define ymmymm%1 ymm%1 |
+ %define xm%1 xmm %+ m%1 |
+ %define ym%1 ymm %+ m%1 |
+%endmacro |
+ |
+%assign i 0 |
+%rep 16 |
+ DECLARE_MMCAST i |
+%assign i i+1 |
+%endrep |
+ |
; I often want to use macros that permute their arguments. e.g. there's no |
; efficient way to implement butterfly or transpose or dct without swapping some |
; arguments. |
@@ -838,42 +1038,42 @@ INIT_XMM |
%macro PERMUTE 2-* ; takes a list of pairs to swap |
%rep %0/2 |
- %xdefine tmp%2 m%2 |
- %xdefine ntmp%2 nm%2 |
+ %xdefine %%tmp%2 m%2 |
%rotate 2 |
%endrep |
%rep %0/2 |
- %xdefine m%1 tmp%2 |
- %xdefine nm%1 ntmp%2 |
- %undef tmp%2 |
- %undef ntmp%2 |
+ %xdefine m%1 %%tmp%2 |
+ CAT_XDEFINE nn, m%1, %1 |
%rotate 2 |
%endrep |
%endmacro |
-%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs) |
-%rep %0-1 |
-%ifdef m%1 |
- %xdefine tmp m%1 |
- %xdefine m%1 m%2 |
- %xdefine m%2 tmp |
- CAT_XDEFINE n, m%1, %1 |
- CAT_XDEFINE n, m%2, %2 |
-%else |
- ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here. |
- ; Be careful using this mode in nested macros though, as in some cases there may be |
- ; other copies of m# that have already been dereferenced and don't get updated correctly. |
- %xdefine %%n1 n %+ %1 |
- %xdefine %%n2 n %+ %2 |
- %xdefine tmp m %+ %%n1 |
- CAT_XDEFINE m, %%n1, m %+ %%n2 |
- CAT_XDEFINE m, %%n2, tmp |
- CAT_XDEFINE n, m %+ %%n1, %%n1 |
- CAT_XDEFINE n, m %+ %%n2, %%n2 |
+%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) |
+%ifnum %1 ; SWAP 0, 1, ... |
+ SWAP_INTERNAL_NUM %1, %2 |
+%else ; SWAP m0, m1, ... |
+ SWAP_INTERNAL_NAME %1, %2 |
%endif |
- %undef tmp |
+%endmacro |
+ |
+%macro SWAP_INTERNAL_NUM 2-* |
+ %rep %0-1 |
+ %xdefine %%tmp m%1 |
+ %xdefine m%1 m%2 |
+ %xdefine m%2 %%tmp |
+ CAT_XDEFINE nn, m%1, %1 |
+ CAT_XDEFINE nn, m%2, %2 |
%rotate 1 |
-%endrep |
+ %endrep |
+%endmacro |
+ |
+%macro SWAP_INTERNAL_NAME 2-* |
+ %xdefine %%args nn %+ %1 |
+ %rep %0-1 |
+ %xdefine %%args %%args, nn %+ %2 |
+ %rotate 1 |
+ %endrep |
+ SWAP_INTERNAL_NUM %%args |
%endmacro |
; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later |
@@ -897,7 +1097,7 @@ INIT_XMM |
%assign %%i 0 |
%rep num_mmregs |
CAT_XDEFINE m, %%i, %1_m %+ %%i |
- CAT_XDEFINE n, m %+ %%i, %%i |
+ CAT_XDEFINE nn, m %+ %%i, %%i |
%assign %%i %%i+1 |
%endrep |
%endif |
@@ -958,246 +1158,365 @@ INIT_XMM |
%endrep |
%undef i |
+%macro CHECK_AVX_INSTR_EMU 3-* |
+ %xdefine %%opcode %1 |
+ %xdefine %%dst %2 |
+ %rep %0-2 |
+ %ifidn %%dst, %3 |
+ %error non-avx emulation of ``%%opcode'' is not supported |
+ %endif |
+ %rotate 1 |
+ %endrep |
+%endmacro |
+ |
;%1 == instruction |
-;%2 == 1 if float, 0 if int |
-;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm) |
-;%4 == number of operands given |
-;%5+: operands |
-%macro RUN_AVX_INSTR 6-7+ |
- %ifid %5 |
- %define %%size sizeof%5 |
+;%2 == minimal instruction set |
+;%3 == 1 if float, 0 if int |
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise |
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not |
+;%6+: operands |
+%macro RUN_AVX_INSTR 6-9+ |
+ %ifnum sizeof%7 |
+ %assign __sizeofreg sizeof%7 |
+ %elifnum sizeof%6 |
+ %assign __sizeofreg sizeof%6 |
%else |
- %define %%size mmsize |
+ %assign __sizeofreg mmsize |
%endif |
- %if %%size==32 |
- %if %0 >= 7 |
- v%1 %5, %6, %7 |
- %else |
- v%1 %5, %6 |
- %endif |
+ %assign __emulate_avx 0 |
+ %if avx_enabled && __sizeofreg >= 16 |
+ %xdefine __instr v%1 |
%else |
- %if %%size==8 |
- %define %%regmov movq |
- %elif %2 |
- %define %%regmov movaps |
- %else |
- %define %%regmov movdqa |
+ %xdefine __instr %1 |
+ %if %0 >= 8+%4 |
+ %assign __emulate_avx 1 |
%endif |
- |
- %if %4>=3+%3 |
- %ifnidn %5, %6 |
- %if avx_enabled && sizeof%5==16 |
- v%1 %5, %6, %7 |
- %else |
- %%regmov %5, %6 |
- %1 %5, %7 |
- %endif |
- %else |
- %1 %5, %7 |
+ %endif |
+ %ifnidn %2, fnord |
+ %ifdef cpuname |
+ %if notcpuflag(%2) |
+ %error use of ``%1'' %2 instruction in cpuname function: current_function |
+ %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8 |
+ %error use of ``%1'' sse2 instruction in cpuname function: current_function |
%endif |
- %elif %3 |
- %1 %5, %6, %7 |
- %else |
- %1 %5, %6 |
%endif |
%endif |
-%endmacro |
-; 3arg AVX ops with a memory arg can only have it in src2, |
-; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov). |
-; So, if the op is symmetric and the wrong one is memory, swap them. |
-%macro RUN_AVX_INSTR1 8 |
- %assign %%swap 0 |
- %if avx_enabled |
- %ifnid %6 |
- %assign %%swap 1 |
+ %if __emulate_avx |
+ %xdefine __src1 %7 |
+ %xdefine __src2 %8 |
+ %ifnidn %6, %7 |
+ %if %0 >= 9 |
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9 |
+ %else |
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8 |
+ %endif |
+ %if %5 && %4 == 0 |
+ %ifnid %8 |
+ ; 3-operand AVX instructions with a memory arg can only have it in src2, |
+ ; whereas SSE emulation prefers to have it in src1 (i.e. the mov). |
+ ; So, if the instruction is commutative with a memory arg, swap them. |
+ %xdefine __src1 %8 |
+ %xdefine __src2 %7 |
+ %endif |
+ %endif |
+ %if __sizeofreg == 8 |
+ MOVQ %6, __src1 |
+ %elif %3 |
+ MOVAPS %6, __src1 |
+ %else |
+ MOVDQA %6, __src1 |
+ %endif |
%endif |
- %elifnidn %5, %6 |
- %ifnid %7 |
- %assign %%swap 1 |
+ %if %0 >= 9 |
+ %1 %6, __src2, %9 |
+ %else |
+ %1 %6, __src2 |
%endif |
- %endif |
- %if %%swap && %3 == 0 && %8 == 1 |
- RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6 |
+ %elif %0 >= 9 |
+ __instr %6, %7, %8, %9 |
+ %elif %0 == 8 |
+ __instr %6, %7, %8 |
+ %elif %0 == 7 |
+ __instr %6, %7 |
%else |
- RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7 |
+ __instr %6 |
%endif |
%endmacro |
;%1 == instruction |
-;%2 == 1 if float, 0 if int |
-;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm) |
-;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not |
-%macro AVX_INSTR 4 |
- %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4 |
- %ifidn %3, fnord |
- RUN_AVX_INSTR %6, %7, %8, 2, %1, %2 |
+;%2 == minimal instruction set |
+;%3 == 1 if float, 0 if int |
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise |
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not |
+%macro AVX_INSTR 1-5 fnord, 0, 1, 0 |
+ %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5 |
+ %ifidn %2, fnord |
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1 |
+ %elifidn %3, fnord |
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2 |
%elifidn %4, fnord |
- RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9 |
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3 |
%elifidn %5, fnord |
- RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4 |
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4 |
%else |
- RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5 |
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5 |
%endif |
%endmacro |
%endmacro |
-AVX_INSTR addpd, 1, 0, 1 |
-AVX_INSTR addps, 1, 0, 1 |
-AVX_INSTR addsd, 1, 0, 1 |
-AVX_INSTR addss, 1, 0, 1 |
-AVX_INSTR addsubpd, 1, 0, 0 |
-AVX_INSTR addsubps, 1, 0, 0 |
-AVX_INSTR andpd, 1, 0, 1 |
-AVX_INSTR andps, 1, 0, 1 |
-AVX_INSTR andnpd, 1, 0, 0 |
-AVX_INSTR andnps, 1, 0, 0 |
-AVX_INSTR blendpd, 1, 0, 0 |
-AVX_INSTR blendps, 1, 0, 0 |
-AVX_INSTR blendvpd, 1, 0, 0 |
-AVX_INSTR blendvps, 1, 0, 0 |
-AVX_INSTR cmppd, 1, 0, 0 |
-AVX_INSTR cmpps, 1, 0, 0 |
-AVX_INSTR cmpsd, 1, 0, 0 |
-AVX_INSTR cmpss, 1, 0, 0 |
-AVX_INSTR cvtdq2ps, 1, 0, 0 |
-AVX_INSTR cvtps2dq, 1, 0, 0 |
-AVX_INSTR divpd, 1, 0, 0 |
-AVX_INSTR divps, 1, 0, 0 |
-AVX_INSTR divsd, 1, 0, 0 |
-AVX_INSTR divss, 1, 0, 0 |
-AVX_INSTR dppd, 1, 1, 0 |
-AVX_INSTR dpps, 1, 1, 0 |
-AVX_INSTR haddpd, 1, 0, 0 |
-AVX_INSTR haddps, 1, 0, 0 |
-AVX_INSTR hsubpd, 1, 0, 0 |
-AVX_INSTR hsubps, 1, 0, 0 |
-AVX_INSTR maxpd, 1, 0, 1 |
-AVX_INSTR maxps, 1, 0, 1 |
-AVX_INSTR maxsd, 1, 0, 1 |
-AVX_INSTR maxss, 1, 0, 1 |
-AVX_INSTR minpd, 1, 0, 1 |
-AVX_INSTR minps, 1, 0, 1 |
-AVX_INSTR minsd, 1, 0, 1 |
-AVX_INSTR minss, 1, 0, 1 |
-AVX_INSTR movhlps, 1, 0, 0 |
-AVX_INSTR movlhps, 1, 0, 0 |
-AVX_INSTR movsd, 1, 0, 0 |
-AVX_INSTR movss, 1, 0, 0 |
-AVX_INSTR mpsadbw, 0, 1, 0 |
-AVX_INSTR mulpd, 1, 0, 1 |
-AVX_INSTR mulps, 1, 0, 1 |
-AVX_INSTR mulsd, 1, 0, 1 |
-AVX_INSTR mulss, 1, 0, 1 |
-AVX_INSTR orpd, 1, 0, 1 |
-AVX_INSTR orps, 1, 0, 1 |
-AVX_INSTR packsswb, 0, 0, 0 |
-AVX_INSTR packssdw, 0, 0, 0 |
-AVX_INSTR packuswb, 0, 0, 0 |
-AVX_INSTR packusdw, 0, 0, 0 |
-AVX_INSTR paddb, 0, 0, 1 |
-AVX_INSTR paddw, 0, 0, 1 |
-AVX_INSTR paddd, 0, 0, 1 |
-AVX_INSTR paddq, 0, 0, 1 |
-AVX_INSTR paddsb, 0, 0, 1 |
-AVX_INSTR paddsw, 0, 0, 1 |
-AVX_INSTR paddusb, 0, 0, 1 |
-AVX_INSTR paddusw, 0, 0, 1 |
-AVX_INSTR palignr, 0, 1, 0 |
-AVX_INSTR pand, 0, 0, 1 |
-AVX_INSTR pandn, 0, 0, 0 |
-AVX_INSTR pavgb, 0, 0, 1 |
-AVX_INSTR pavgw, 0, 0, 1 |
-AVX_INSTR pblendvb, 0, 0, 0 |
-AVX_INSTR pblendw, 0, 1, 0 |
-AVX_INSTR pcmpestri, 0, 0, 0 |
-AVX_INSTR pcmpestrm, 0, 0, 0 |
-AVX_INSTR pcmpistri, 0, 0, 0 |
-AVX_INSTR pcmpistrm, 0, 0, 0 |
-AVX_INSTR pcmpeqb, 0, 0, 1 |
-AVX_INSTR pcmpeqw, 0, 0, 1 |
-AVX_INSTR pcmpeqd, 0, 0, 1 |
-AVX_INSTR pcmpeqq, 0, 0, 1 |
-AVX_INSTR pcmpgtb, 0, 0, 0 |
-AVX_INSTR pcmpgtw, 0, 0, 0 |
-AVX_INSTR pcmpgtd, 0, 0, 0 |
-AVX_INSTR pcmpgtq, 0, 0, 0 |
-AVX_INSTR phaddw, 0, 0, 0 |
-AVX_INSTR phaddd, 0, 0, 0 |
-AVX_INSTR phaddsw, 0, 0, 0 |
-AVX_INSTR phsubw, 0, 0, 0 |
-AVX_INSTR phsubd, 0, 0, 0 |
-AVX_INSTR phsubsw, 0, 0, 0 |
-AVX_INSTR pmaddwd, 0, 0, 1 |
-AVX_INSTR pmaddubsw, 0, 0, 0 |
-AVX_INSTR pmaxsb, 0, 0, 1 |
-AVX_INSTR pmaxsw, 0, 0, 1 |
-AVX_INSTR pmaxsd, 0, 0, 1 |
-AVX_INSTR pmaxub, 0, 0, 1 |
-AVX_INSTR pmaxuw, 0, 0, 1 |
-AVX_INSTR pmaxud, 0, 0, 1 |
-AVX_INSTR pminsb, 0, 0, 1 |
-AVX_INSTR pminsw, 0, 0, 1 |
-AVX_INSTR pminsd, 0, 0, 1 |
-AVX_INSTR pminub, 0, 0, 1 |
-AVX_INSTR pminuw, 0, 0, 1 |
-AVX_INSTR pminud, 0, 0, 1 |
-AVX_INSTR pmulhuw, 0, 0, 1 |
-AVX_INSTR pmulhrsw, 0, 0, 1 |
-AVX_INSTR pmulhw, 0, 0, 1 |
-AVX_INSTR pmullw, 0, 0, 1 |
-AVX_INSTR pmulld, 0, 0, 1 |
-AVX_INSTR pmuludq, 0, 0, 1 |
-AVX_INSTR pmuldq, 0, 0, 1 |
-AVX_INSTR por, 0, 0, 1 |
-AVX_INSTR psadbw, 0, 0, 1 |
-AVX_INSTR pshufb, 0, 0, 0 |
-AVX_INSTR psignb, 0, 0, 0 |
-AVX_INSTR psignw, 0, 0, 0 |
-AVX_INSTR psignd, 0, 0, 0 |
-AVX_INSTR psllw, 0, 0, 0 |
-AVX_INSTR pslld, 0, 0, 0 |
-AVX_INSTR psllq, 0, 0, 0 |
-AVX_INSTR pslldq, 0, 0, 0 |
-AVX_INSTR psraw, 0, 0, 0 |
-AVX_INSTR psrad, 0, 0, 0 |
-AVX_INSTR psrlw, 0, 0, 0 |
-AVX_INSTR psrld, 0, 0, 0 |
-AVX_INSTR psrlq, 0, 0, 0 |
-AVX_INSTR psrldq, 0, 0, 0 |
-AVX_INSTR psubb, 0, 0, 0 |
-AVX_INSTR psubw, 0, 0, 0 |
-AVX_INSTR psubd, 0, 0, 0 |
-AVX_INSTR psubq, 0, 0, 0 |
-AVX_INSTR psubsb, 0, 0, 0 |
-AVX_INSTR psubsw, 0, 0, 0 |
-AVX_INSTR psubusb, 0, 0, 0 |
-AVX_INSTR psubusw, 0, 0, 0 |
-AVX_INSTR punpckhbw, 0, 0, 0 |
-AVX_INSTR punpckhwd, 0, 0, 0 |
-AVX_INSTR punpckhdq, 0, 0, 0 |
-AVX_INSTR punpckhqdq, 0, 0, 0 |
-AVX_INSTR punpcklbw, 0, 0, 0 |
-AVX_INSTR punpcklwd, 0, 0, 0 |
-AVX_INSTR punpckldq, 0, 0, 0 |
-AVX_INSTR punpcklqdq, 0, 0, 0 |
-AVX_INSTR pxor, 0, 0, 1 |
-AVX_INSTR shufps, 1, 1, 0 |
-AVX_INSTR subpd, 1, 0, 0 |
-AVX_INSTR subps, 1, 0, 0 |
-AVX_INSTR subsd, 1, 0, 0 |
-AVX_INSTR subss, 1, 0, 0 |
-AVX_INSTR unpckhpd, 1, 0, 0 |
-AVX_INSTR unpckhps, 1, 0, 0 |
-AVX_INSTR unpcklpd, 1, 0, 0 |
-AVX_INSTR unpcklps, 1, 0, 0 |
-AVX_INSTR xorpd, 1, 0, 1 |
-AVX_INSTR xorps, 1, 0, 1 |
+; Instructions with both VEX and non-VEX encodings |
+; Non-destructive instructions are written without parameters |
+AVX_INSTR addpd, sse2, 1, 0, 1 |
+AVX_INSTR addps, sse, 1, 0, 1 |
+AVX_INSTR addsd, sse2, 1, 0, 1 |
+AVX_INSTR addss, sse, 1, 0, 1 |
+AVX_INSTR addsubpd, sse3, 1, 0, 0 |
+AVX_INSTR addsubps, sse3, 1, 0, 0 |
+AVX_INSTR aesdec, fnord, 0, 0, 0 |
+AVX_INSTR aesdeclast, fnord, 0, 0, 0 |
+AVX_INSTR aesenc, fnord, 0, 0, 0 |
+AVX_INSTR aesenclast, fnord, 0, 0, 0 |
+AVX_INSTR aesimc |
+AVX_INSTR aeskeygenassist |
+AVX_INSTR andnpd, sse2, 1, 0, 0 |
+AVX_INSTR andnps, sse, 1, 0, 0 |
+AVX_INSTR andpd, sse2, 1, 0, 1 |
+AVX_INSTR andps, sse, 1, 0, 1 |
+AVX_INSTR blendpd, sse4, 1, 0, 0 |
+AVX_INSTR blendps, sse4, 1, 0, 0 |
+AVX_INSTR blendvpd, sse4, 1, 0, 0 |
+AVX_INSTR blendvps, sse4, 1, 0, 0 |
+AVX_INSTR cmppd, sse2, 1, 1, 0 |
+AVX_INSTR cmpps, sse, 1, 1, 0 |
+AVX_INSTR cmpsd, sse2, 1, 1, 0 |
+AVX_INSTR cmpss, sse, 1, 1, 0 |
+AVX_INSTR comisd, sse2 |
+AVX_INSTR comiss, sse |
+AVX_INSTR cvtdq2pd, sse2 |
+AVX_INSTR cvtdq2ps, sse2 |
+AVX_INSTR cvtpd2dq, sse2 |
+AVX_INSTR cvtpd2ps, sse2 |
+AVX_INSTR cvtps2dq, sse2 |
+AVX_INSTR cvtps2pd, sse2 |
+AVX_INSTR cvtsd2si, sse2 |
+AVX_INSTR cvtsd2ss, sse2 |
+AVX_INSTR cvtsi2sd, sse2 |
+AVX_INSTR cvtsi2ss, sse |
+AVX_INSTR cvtss2sd, sse2 |
+AVX_INSTR cvtss2si, sse |
+AVX_INSTR cvttpd2dq, sse2 |
+AVX_INSTR cvttps2dq, sse2 |
+AVX_INSTR cvttsd2si, sse2 |
+AVX_INSTR cvttss2si, sse |
+AVX_INSTR divpd, sse2, 1, 0, 0 |
+AVX_INSTR divps, sse, 1, 0, 0 |
+AVX_INSTR divsd, sse2, 1, 0, 0 |
+AVX_INSTR divss, sse, 1, 0, 0 |
+AVX_INSTR dppd, sse4, 1, 1, 0 |
+AVX_INSTR dpps, sse4, 1, 1, 0 |
+AVX_INSTR extractps, sse4 |
+AVX_INSTR haddpd, sse3, 1, 0, 0 |
+AVX_INSTR haddps, sse3, 1, 0, 0 |
+AVX_INSTR hsubpd, sse3, 1, 0, 0 |
+AVX_INSTR hsubps, sse3, 1, 0, 0 |
+AVX_INSTR insertps, sse4, 1, 1, 0 |
+AVX_INSTR lddqu, sse3 |
+AVX_INSTR ldmxcsr, sse |
+AVX_INSTR maskmovdqu, sse2 |
+AVX_INSTR maxpd, sse2, 1, 0, 1 |
+AVX_INSTR maxps, sse, 1, 0, 1 |
+AVX_INSTR maxsd, sse2, 1, 0, 1 |
+AVX_INSTR maxss, sse, 1, 0, 1 |
+AVX_INSTR minpd, sse2, 1, 0, 1 |
+AVX_INSTR minps, sse, 1, 0, 1 |
+AVX_INSTR minsd, sse2, 1, 0, 1 |
+AVX_INSTR minss, sse, 1, 0, 1 |
+AVX_INSTR movapd, sse2 |
+AVX_INSTR movaps, sse |
+AVX_INSTR movd, mmx |
+AVX_INSTR movddup, sse3 |
+AVX_INSTR movdqa, sse2 |
+AVX_INSTR movdqu, sse2 |
+AVX_INSTR movhlps, sse, 1, 0, 0 |
+AVX_INSTR movhpd, sse2, 1, 0, 0 |
+AVX_INSTR movhps, sse, 1, 0, 0 |
+AVX_INSTR movlhps, sse, 1, 0, 0 |
+AVX_INSTR movlpd, sse2, 1, 0, 0 |
+AVX_INSTR movlps, sse, 1, 0, 0 |
+AVX_INSTR movmskpd, sse2 |
+AVX_INSTR movmskps, sse |
+AVX_INSTR movntdq, sse2 |
+AVX_INSTR movntdqa, sse4 |
+AVX_INSTR movntpd, sse2 |
+AVX_INSTR movntps, sse |
+AVX_INSTR movq, mmx |
+AVX_INSTR movsd, sse2, 1, 0, 0 |
+AVX_INSTR movshdup, sse3 |
+AVX_INSTR movsldup, sse3 |
+AVX_INSTR movss, sse, 1, 0, 0 |
+AVX_INSTR movupd, sse2 |
+AVX_INSTR movups, sse |
+AVX_INSTR mpsadbw, sse4 |
+AVX_INSTR mulpd, sse2, 1, 0, 1 |
+AVX_INSTR mulps, sse, 1, 0, 1 |
+AVX_INSTR mulsd, sse2, 1, 0, 1 |
+AVX_INSTR mulss, sse, 1, 0, 1 |
+AVX_INSTR orpd, sse2, 1, 0, 1 |
+AVX_INSTR orps, sse, 1, 0, 1 |
+AVX_INSTR pabsb, ssse3 |
+AVX_INSTR pabsd, ssse3 |
+AVX_INSTR pabsw, ssse3 |
+AVX_INSTR packsswb, mmx, 0, 0, 0 |
+AVX_INSTR packssdw, mmx, 0, 0, 0 |
+AVX_INSTR packuswb, mmx, 0, 0, 0 |
+AVX_INSTR packusdw, sse4, 0, 0, 0 |
+AVX_INSTR paddb, mmx, 0, 0, 1 |
+AVX_INSTR paddw, mmx, 0, 0, 1 |
+AVX_INSTR paddd, mmx, 0, 0, 1 |
+AVX_INSTR paddq, sse2, 0, 0, 1 |
+AVX_INSTR paddsb, mmx, 0, 0, 1 |
+AVX_INSTR paddsw, mmx, 0, 0, 1 |
+AVX_INSTR paddusb, mmx, 0, 0, 1 |
+AVX_INSTR paddusw, mmx, 0, 0, 1 |
+AVX_INSTR palignr, ssse3 |
+AVX_INSTR pand, mmx, 0, 0, 1 |
+AVX_INSTR pandn, mmx, 0, 0, 0 |
+AVX_INSTR pavgb, mmx2, 0, 0, 1 |
+AVX_INSTR pavgw, mmx2, 0, 0, 1 |
+AVX_INSTR pblendvb, sse4, 0, 0, 0 |
+AVX_INSTR pblendw, sse4 |
+AVX_INSTR pclmulqdq |
+AVX_INSTR pcmpestri, sse42 |
+AVX_INSTR pcmpestrm, sse42 |
+AVX_INSTR pcmpistri, sse42 |
+AVX_INSTR pcmpistrm, sse42 |
+AVX_INSTR pcmpeqb, mmx, 0, 0, 1 |
+AVX_INSTR pcmpeqw, mmx, 0, 0, 1 |
+AVX_INSTR pcmpeqd, mmx, 0, 0, 1 |
+AVX_INSTR pcmpeqq, sse4, 0, 0, 1 |
+AVX_INSTR pcmpgtb, mmx, 0, 0, 0 |
+AVX_INSTR pcmpgtw, mmx, 0, 0, 0 |
+AVX_INSTR pcmpgtd, mmx, 0, 0, 0 |
+AVX_INSTR pcmpgtq, sse42, 0, 0, 0 |
+AVX_INSTR pextrb, sse4 |
+AVX_INSTR pextrd, sse4 |
+AVX_INSTR pextrq, sse4 |
+AVX_INSTR pextrw, mmx2 |
+AVX_INSTR phaddw, ssse3, 0, 0, 0 |
+AVX_INSTR phaddd, ssse3, 0, 0, 0 |
+AVX_INSTR phaddsw, ssse3, 0, 0, 0 |
+AVX_INSTR phminposuw, sse4 |
+AVX_INSTR phsubw, ssse3, 0, 0, 0 |
+AVX_INSTR phsubd, ssse3, 0, 0, 0 |
+AVX_INSTR phsubsw, ssse3, 0, 0, 0 |
+AVX_INSTR pinsrb, sse4 |
+AVX_INSTR pinsrd, sse4 |
+AVX_INSTR pinsrq, sse4 |
+AVX_INSTR pinsrw, mmx2 |
+AVX_INSTR pmaddwd, mmx, 0, 0, 1 |
+AVX_INSTR pmaddubsw, ssse3, 0, 0, 0 |
+AVX_INSTR pmaxsb, sse4, 0, 0, 1 |
+AVX_INSTR pmaxsw, mmx2, 0, 0, 1 |
+AVX_INSTR pmaxsd, sse4, 0, 0, 1 |
+AVX_INSTR pmaxub, mmx2, 0, 0, 1 |
+AVX_INSTR pmaxuw, sse4, 0, 0, 1 |
+AVX_INSTR pmaxud, sse4, 0, 0, 1 |
+AVX_INSTR pminsb, sse4, 0, 0, 1 |
+AVX_INSTR pminsw, mmx2, 0, 0, 1 |
+AVX_INSTR pminsd, sse4, 0, 0, 1 |
+AVX_INSTR pminub, mmx2, 0, 0, 1 |
+AVX_INSTR pminuw, sse4, 0, 0, 1 |
+AVX_INSTR pminud, sse4, 0, 0, 1 |
+AVX_INSTR pmovmskb, mmx2 |
+AVX_INSTR pmovsxbw, sse4 |
+AVX_INSTR pmovsxbd, sse4 |
+AVX_INSTR pmovsxbq, sse4 |
+AVX_INSTR pmovsxwd, sse4 |
+AVX_INSTR pmovsxwq, sse4 |
+AVX_INSTR pmovsxdq, sse4 |
+AVX_INSTR pmovzxbw, sse4 |
+AVX_INSTR pmovzxbd, sse4 |
+AVX_INSTR pmovzxbq, sse4 |
+AVX_INSTR pmovzxwd, sse4 |
+AVX_INSTR pmovzxwq, sse4 |
+AVX_INSTR pmovzxdq, sse4 |
+AVX_INSTR pmuldq, sse4, 0, 0, 1 |
+AVX_INSTR pmulhrsw, ssse3, 0, 0, 1 |
+AVX_INSTR pmulhuw, mmx2, 0, 0, 1 |
+AVX_INSTR pmulhw, mmx, 0, 0, 1 |
+AVX_INSTR pmullw, mmx, 0, 0, 1 |
+AVX_INSTR pmulld, sse4, 0, 0, 1 |
+AVX_INSTR pmuludq, sse2, 0, 0, 1 |
+AVX_INSTR por, mmx, 0, 0, 1 |
+AVX_INSTR psadbw, mmx2, 0, 0, 1 |
+AVX_INSTR pshufb, ssse3, 0, 0, 0 |
+AVX_INSTR pshufd, sse2 |
+AVX_INSTR pshufhw, sse2 |
+AVX_INSTR pshuflw, sse2 |
+AVX_INSTR psignb, ssse3, 0, 0, 0 |
+AVX_INSTR psignw, ssse3, 0, 0, 0 |
+AVX_INSTR psignd, ssse3, 0, 0, 0 |
+AVX_INSTR psllw, mmx, 0, 0, 0 |
+AVX_INSTR pslld, mmx, 0, 0, 0 |
+AVX_INSTR psllq, mmx, 0, 0, 0 |
+AVX_INSTR pslldq, sse2, 0, 0, 0 |
+AVX_INSTR psraw, mmx, 0, 0, 0 |
+AVX_INSTR psrad, mmx, 0, 0, 0 |
+AVX_INSTR psrlw, mmx, 0, 0, 0 |
+AVX_INSTR psrld, mmx, 0, 0, 0 |
+AVX_INSTR psrlq, mmx, 0, 0, 0 |
+AVX_INSTR psrldq, sse2, 0, 0, 0 |
+AVX_INSTR psubb, mmx, 0, 0, 0 |
+AVX_INSTR psubw, mmx, 0, 0, 0 |
+AVX_INSTR psubd, mmx, 0, 0, 0 |
+AVX_INSTR psubq, sse2, 0, 0, 0 |
+AVX_INSTR psubsb, mmx, 0, 0, 0 |
+AVX_INSTR psubsw, mmx, 0, 0, 0 |
+AVX_INSTR psubusb, mmx, 0, 0, 0 |
+AVX_INSTR psubusw, mmx, 0, 0, 0 |
+AVX_INSTR ptest, sse4 |
+AVX_INSTR punpckhbw, mmx, 0, 0, 0 |
+AVX_INSTR punpckhwd, mmx, 0, 0, 0 |
+AVX_INSTR punpckhdq, mmx, 0, 0, 0 |
+AVX_INSTR punpckhqdq, sse2, 0, 0, 0 |
+AVX_INSTR punpcklbw, mmx, 0, 0, 0 |
+AVX_INSTR punpcklwd, mmx, 0, 0, 0 |
+AVX_INSTR punpckldq, mmx, 0, 0, 0 |
+AVX_INSTR punpcklqdq, sse2, 0, 0, 0 |
+AVX_INSTR pxor, mmx, 0, 0, 1 |
+AVX_INSTR rcpps, sse, 1, 0, 0 |
+AVX_INSTR rcpss, sse, 1, 0, 0 |
+AVX_INSTR roundpd, sse4 |
+AVX_INSTR roundps, sse4 |
+AVX_INSTR roundsd, sse4 |
+AVX_INSTR roundss, sse4 |
+AVX_INSTR rsqrtps, sse, 1, 0, 0 |
+AVX_INSTR rsqrtss, sse, 1, 0, 0 |
+AVX_INSTR shufpd, sse2, 1, 1, 0 |
+AVX_INSTR shufps, sse, 1, 1, 0 |
+AVX_INSTR sqrtpd, sse2, 1, 0, 0 |
+AVX_INSTR sqrtps, sse, 1, 0, 0 |
+AVX_INSTR sqrtsd, sse2, 1, 0, 0 |
+AVX_INSTR sqrtss, sse, 1, 0, 0 |
+AVX_INSTR stmxcsr, sse |
+AVX_INSTR subpd, sse2, 1, 0, 0 |
+AVX_INSTR subps, sse, 1, 0, 0 |
+AVX_INSTR subsd, sse2, 1, 0, 0 |
+AVX_INSTR subss, sse, 1, 0, 0 |
+AVX_INSTR ucomisd, sse2 |
+AVX_INSTR ucomiss, sse |
+AVX_INSTR unpckhpd, sse2, 1, 0, 0 |
+AVX_INSTR unpckhps, sse, 1, 0, 0 |
+AVX_INSTR unpcklpd, sse2, 1, 0, 0 |
+AVX_INSTR unpcklps, sse, 1, 0, 0 |
+AVX_INSTR xorpd, sse2, 1, 0, 1 |
+AVX_INSTR xorps, sse, 1, 0, 1 |
; 3DNow instructions, for sharing code between AVX, SSE and 3DN |
-AVX_INSTR pfadd, 1, 0, 1 |
-AVX_INSTR pfsub, 1, 0, 0 |
-AVX_INSTR pfmul, 1, 0, 1 |
+AVX_INSTR pfadd, 3dnow, 1, 0, 1 |
+AVX_INSTR pfsub, 3dnow, 1, 0, 0 |
+AVX_INSTR pfmul, 3dnow, 1, 0, 1 |
; base-4 constants for shuffles |
%assign i 0 |
@@ -1221,13 +1540,69 @@ AVX_INSTR pfmul, 1, 0, 1 |
%macro %1 4-7 %1, %2, %3 |
%if cpuflag(xop) |
v%5 %1, %2, %3, %4 |
- %else |
+ %elifnidn %1, %4 |
%6 %1, %2, %3 |
%7 %1, %4 |
+ %else |
+ %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported |
%endif |
%endmacro |
%endmacro |
-FMA_INSTR pmacsdd, pmulld, paddd |
FMA_INSTR pmacsww, pmullw, paddw |
+FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation |
+FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation |
FMA_INSTR pmadcswd, pmaddwd, paddd |
+ |
+; convert FMA4 to FMA3 if possible |
+%macro FMA4_INSTR 4 |
+ %macro %1 4-8 %1, %2, %3, %4 |
+ %if cpuflag(fma4) |
+ v%5 %1, %2, %3, %4 |
+ %elifidn %1, %2 |
+ v%6 %1, %4, %3 ; %1 = %1 * %3 + %4 |
+ %elifidn %1, %3 |
+ v%7 %1, %2, %4 ; %1 = %2 * %1 + %4 |
+ %elifidn %1, %4 |
+ v%8 %1, %2, %3 ; %1 = %2 * %3 + %1 |
+ %else |
+ %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported |
+ %endif |
+ %endmacro |
+%endmacro |
+ |
+FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd |
+FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps |
+FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd |
+FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss |
+ |
+FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd |
+FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps |
+FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd |
+FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps |
+ |
+FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd |
+FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps |
+FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd |
+FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss |
+ |
+FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd |
+FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps |
+FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd |
+FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss |
+ |
+FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd |
+FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps |
+FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd |
+FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss |
+ |
+; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug |
+%if ARCH_X86_64 == 0 |
+%macro vpbroadcastq 2 |
+%if sizeof%1 == 16 |
+ movddup %1, %2 |
+%else |
+ vbroadcastsd %1, %2 |
+%endif |
+%endmacro |
+%endif |