OLD | NEW |
1 /* | 1 /* |
2 * simple math operations | 2 * simple math operations |
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al | 3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al |
4 * | 4 * |
5 * This file is part of FFmpeg. | 5 * This file is part of FFmpeg. |
6 * | 6 * |
7 * FFmpeg is free software; you can redistribute it and/or | 7 * FFmpeg is free software; you can redistribute it and/or |
8 * modify it under the terms of the GNU Lesser General Public | 8 * modify it under the terms of the GNU Lesser General Public |
9 * License as published by the Free Software Foundation; either | 9 * License as published by the Free Software Foundation; either |
10 * version 2.1 of the License, or (at your option) any later version. | 10 * version 2.1 of the License, or (at your option) any later version. |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
71 __asm__ volatile(\ | 71 __asm__ volatile(\ |
72 "cmpl %0, %3 \n\t"\ | 72 "cmpl %0, %3 \n\t"\ |
73 "cmovl %3, %0 \n\t"\ | 73 "cmovl %3, %0 \n\t"\ |
74 "cmovl %4, %1 \n\t"\ | 74 "cmovl %4, %1 \n\t"\ |
75 "cmovl %5, %2 \n\t"\ | 75 "cmovl %5, %2 \n\t"\ |
76 : "+&r" (x), "+&r" (a), "+r" (c)\ | 76 : "+&r" (x), "+&r" (a), "+r" (c)\ |
77 : "r" (y), "r" (b), "r" (d)\ | 77 : "r" (y), "r" (b), "r" (d)\ |
78 ); | 78 ); |
79 #endif | 79 #endif |
80 | 80 |
| 81 // avoid +32 for shift optimization (gcc should do that ...) |
| 82 #define NEG_SSR32 NEG_SSR32 |
| 83 static inline int32_t NEG_SSR32( int32_t a, int8_t s){ |
| 84 __asm__ ("sarl %1, %0\n\t" |
| 85 : "+r" (a) |
| 86 : "ic" ((uint8_t)(-s)) |
| 87 ); |
| 88 return a; |
| 89 } |
| 90 |
| 91 #define NEG_USR32 NEG_USR32 |
| 92 static inline uint32_t NEG_USR32(uint32_t a, int8_t s){ |
| 93 __asm__ ("shrl %1, %0\n\t" |
| 94 : "+r" (a) |
| 95 : "ic" ((uint8_t)(-s)) |
| 96 ); |
| 97 return a; |
| 98 } |
| 99 |
81 #endif /* AVCODEC_X86_MATHOPS_H */ | 100 #endif /* AVCODEC_X86_MATHOPS_H */ |
OLD | NEW |