| Index: source/libvpx/vpx_ports/x86.h
|
| ===================================================================
|
| --- source/libvpx/vpx_ports/x86.h (revision 172621)
|
| +++ source/libvpx/vpx_ports/x86.h (working copy)
|
| @@ -14,78 +14,77 @@
|
| #include <stdlib.h>
|
| #include "vpx_config.h"
|
|
|
| -typedef enum
|
| -{
|
| - VPX_CPU_UNKNOWN = -1,
|
| - VPX_CPU_AMD,
|
| - VPX_CPU_AMD_OLD,
|
| - VPX_CPU_CENTAUR,
|
| - VPX_CPU_CYRIX,
|
| - VPX_CPU_INTEL,
|
| - VPX_CPU_NEXGEN,
|
| - VPX_CPU_NSC,
|
| - VPX_CPU_RISE,
|
| - VPX_CPU_SIS,
|
| - VPX_CPU_TRANSMETA,
|
| - VPX_CPU_TRANSMETA_OLD,
|
| - VPX_CPU_UMC,
|
| - VPX_CPU_VIA,
|
| +typedef enum {
|
| + VPX_CPU_UNKNOWN = -1,
|
| + VPX_CPU_AMD,
|
| + VPX_CPU_AMD_OLD,
|
| + VPX_CPU_CENTAUR,
|
| + VPX_CPU_CYRIX,
|
| + VPX_CPU_INTEL,
|
| + VPX_CPU_NEXGEN,
|
| + VPX_CPU_NSC,
|
| + VPX_CPU_RISE,
|
| + VPX_CPU_SIS,
|
| + VPX_CPU_TRANSMETA,
|
| + VPX_CPU_TRANSMETA_OLD,
|
| + VPX_CPU_UMC,
|
| + VPX_CPU_VIA,
|
|
|
| - VPX_CPU_LAST
|
| + VPX_CPU_LAST
|
| } vpx_cpu_t;
|
|
|
| #if defined(__GNUC__) && __GNUC__
|
| #if ARCH_X86_64
|
| #define cpuid(func,ax,bx,cx,dx)\
|
| - __asm__ __volatile__ (\
|
| - "cpuid \n\t" \
|
| - : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
|
| - : "a" (func));
|
| + __asm__ __volatile__ (\
|
| + "cpuid \n\t" \
|
| + : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
|
| + : "a" (func));
|
| #else
|
| #define cpuid(func,ax,bx,cx,dx)\
|
| - __asm__ __volatile__ (\
|
| - "mov %%ebx, %%edi \n\t" \
|
| - "cpuid \n\t" \
|
| - "xchg %%edi, %%ebx \n\t" \
|
| - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| - : "a" (func));
|
| + __asm__ __volatile__ (\
|
| + "mov %%ebx, %%edi \n\t" \
|
| + "cpuid \n\t" \
|
| + "xchg %%edi, %%ebx \n\t" \
|
| + : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| + : "a" (func));
|
| #endif
|
| #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
| #if ARCH_X86_64
|
| #define cpuid(func,ax,bx,cx,dx)\
|
| - asm volatile (\
|
| - "xchg %rsi, %rbx \n\t" \
|
| - "cpuid \n\t" \
|
| - "movl %ebx, %edi \n\t" \
|
| - "xchg %rsi, %rbx \n\t" \
|
| - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| - : "a" (func));
|
| + asm volatile (\
|
| + "xchg %rsi, %rbx \n\t" \
|
| + "cpuid \n\t" \
|
| + "movl %ebx, %edi \n\t" \
|
| + "xchg %rsi, %rbx \n\t" \
|
| + : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| + : "a" (func));
|
| #else
|
| #define cpuid(func,ax,bx,cx,dx)\
|
| - asm volatile (\
|
| - "pushl %ebx \n\t" \
|
| - "cpuid \n\t" \
|
| - "movl %ebx, %edi \n\t" \
|
| - "popl %ebx \n\t" \
|
| - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| - : "a" (func));
|
| + asm volatile (\
|
| + "pushl %ebx \n\t" \
|
| + "cpuid \n\t" \
|
| + "movl %ebx, %edi \n\t" \
|
| + "popl %ebx \n\t" \
|
| + : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
|
| + : "a" (func));
|
| #endif
|
| #else
|
| #if ARCH_X86_64
|
| void __cpuid(int CPUInfo[4], int info_type);
|
| #pragma intrinsic(__cpuid)
|
| #define cpuid(func,a,b,c,d) do{\
|
| - int regs[4];\
|
| - __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
|
| - } while(0)
|
| + int regs[4];\
|
| + __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
|
| + } while(0)
|
| #else
|
| #define cpuid(func,a,b,c,d)\
|
| - __asm mov eax, func\
|
| - __asm cpuid\
|
| - __asm mov a, eax\
|
| - __asm mov b, ebx\
|
| - __asm mov c, ecx\
|
| - __asm mov d, edx
|
| + __asm mov eax, func\
|
| + __asm cpuid\
|
| + __asm mov a, eax\
|
| + __asm mov b, ebx\
|
| + __asm mov c, ecx\
|
| + __asm mov d, edx
|
| #endif
|
| #endif
|
|
|
| @@ -100,47 +99,46 @@
|
| #endif
|
|
|
| static int
|
| -x86_simd_caps(void)
|
| -{
|
| - unsigned int flags = 0;
|
| - unsigned int mask = ~0;
|
| - unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
|
| - char *env;
|
| - (void)reg_ebx;
|
| +x86_simd_caps(void) {
|
| + unsigned int flags = 0;
|
| + unsigned int mask = ~0;
|
| + unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
|
| + char *env;
|
| + (void)reg_ebx;
|
|
|
| - /* See if the CPU capabilities are being overridden by the environment */
|
| - env = getenv("VPX_SIMD_CAPS");
|
| + /* See if the CPU capabilities are being overridden by the environment */
|
| + env = getenv("VPX_SIMD_CAPS");
|
|
|
| - if (env && *env)
|
| - return (int)strtol(env, NULL, 0);
|
| + if (env && *env)
|
| + return (int)strtol(env, NULL, 0);
|
|
|
| - env = getenv("VPX_SIMD_CAPS_MASK");
|
| + env = getenv("VPX_SIMD_CAPS_MASK");
|
|
|
| - if (env && *env)
|
| - mask = strtol(env, NULL, 0);
|
| + if (env && *env)
|
| + mask = strtol(env, NULL, 0);
|
|
|
| - /* Ensure that the CPUID instruction supports extended features */
|
| - cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
|
| + /* Ensure that the CPUID instruction supports extended features */
|
| + cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
|
|
|
| - if (reg_eax < 1)
|
| - return 0;
|
| + if (reg_eax < 1)
|
| + return 0;
|
|
|
| - /* Get the standard feature flags */
|
| - cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
|
| + /* Get the standard feature flags */
|
| + cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
|
|
|
| - if (reg_edx & BIT(23)) flags |= HAS_MMX;
|
| + if (reg_edx & BIT(23)) flags |= HAS_MMX;
|
|
|
| - if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
|
| + if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
|
|
|
| - if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
|
| + if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
|
|
|
| - if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
|
| + if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
|
|
|
| - if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
|
| + if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
|
|
|
| - if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
|
| + if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
|
|
|
| - return flags & mask;
|
| + return flags & mask;
|
| }
|
|
|
| vpx_cpu_t vpx_x86_vendor(void);
|
| @@ -150,21 +148,20 @@
|
| #pragma intrinsic(__rdtsc)
|
| #endif
|
| static unsigned int
|
| -x86_readtsc(void)
|
| -{
|
| +x86_readtsc(void) {
|
| #if defined(__GNUC__) && __GNUC__
|
| - unsigned int tsc;
|
| - __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
|
| - return tsc;
|
| + unsigned int tsc;
|
| + __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
|
| + return tsc;
|
| #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
| - unsigned int tsc;
|
| - asm volatile("rdtsc\n\t":"=a"(tsc):);
|
| - return tsc;
|
| + unsigned int tsc;
|
| + asm volatile("rdtsc\n\t":"=a"(tsc):);
|
| + return tsc;
|
| #else
|
| #if ARCH_X86_64
|
| - return (unsigned int)__rdtsc();
|
| + return (unsigned int)__rdtsc();
|
| #else
|
| - __asm rdtsc;
|
| + __asm rdtsc;
|
| #endif
|
| #endif
|
| }
|
| @@ -172,31 +169,29 @@
|
|
|
| #if defined(__GNUC__) && __GNUC__
|
| #define x86_pause_hint()\
|
| - __asm__ __volatile__ ("pause \n\t")
|
| + __asm__ __volatile__ ("pause \n\t")
|
| #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
| #define x86_pause_hint()\
|
| - asm volatile ("pause \n\t")
|
| + asm volatile ("pause \n\t")
|
| #else
|
| #if ARCH_X86_64
|
| #define x86_pause_hint()\
|
| - _mm_pause();
|
| + _mm_pause();
|
| #else
|
| #define x86_pause_hint()\
|
| - __asm pause
|
| + __asm pause
|
| #endif
|
| #endif
|
|
|
| #if defined(__GNUC__) && __GNUC__
|
| static void
|
| -x87_set_control_word(unsigned short mode)
|
| -{
|
| - __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
|
| +x87_set_control_word(unsigned short mode) {
|
| + __asm__ __volatile__("fldcw %0" : : "m"( *&mode));
|
| }
|
| static unsigned short
|
| -x87_get_control_word(void)
|
| -{
|
| - unsigned short mode;
|
| - __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
|
| +x87_get_control_word(void) {
|
| + unsigned short mode;
|
| + __asm__ __volatile__("fstcw %0\n\t":"=m"( *&mode):);
|
| return mode;
|
| }
|
| #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
| @@ -210,8 +205,19 @@
|
| {
|
| unsigned short mode;
|
| asm volatile("fstcw %0\n\t":"=m"(*&mode):);
|
| - return mode;
|
| + return mode;
|
| }
|
| +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
| +static void
|
| +x87_set_control_word(unsigned short mode) {
|
| + asm volatile("fldcw %0" : : "m"( *&mode));
|
| +}
|
| +static unsigned short
|
| +x87_get_control_word(void) {
|
| + unsigned short mode;
|
| + asm volatile("fstcw %0\n\t":"=m"( *&mode):);
|
| + return mode;
|
| +}
|
| #elif ARCH_X86_64
|
| /* No fldcw intrinsics on Windows x64, punt to external asm */
|
| extern void vpx_winx64_fldcw(unsigned short mode);
|
| @@ -220,25 +226,22 @@
|
| #define x87_get_control_word vpx_winx64_fstcw
|
| #else
|
| static void
|
| -x87_set_control_word(unsigned short mode)
|
| -{
|
| - __asm { fldcw mode }
|
| +x87_set_control_word(unsigned short mode) {
|
| + __asm { fldcw mode }
|
| }
|
| static unsigned short
|
| -x87_get_control_word(void)
|
| -{
|
| - unsigned short mode;
|
| - __asm { fstcw mode }
|
| - return mode;
|
| +x87_get_control_word(void) {
|
| + unsigned short mode;
|
| + __asm { fstcw mode }
|
| + return mode;
|
| }
|
| #endif
|
|
|
| static unsigned short
|
| -x87_set_double_precision(void)
|
| -{
|
| - unsigned short mode = x87_get_control_word();
|
| - x87_set_control_word((mode&~0x300) | 0x200);
|
| - return mode;
|
| +x87_set_double_precision(void) {
|
| + unsigned short mode = x87_get_control_word();
|
| + x87_set_control_word((mode&~0x300) | 0x200);
|
| + return mode;
|
| }
|
|
|
|
|
|
|