| OLD | NEW |
| 1 /*********************************************************************** | 1 /*********************************************************************** |
| 2 Copyright (C) 2013 Xiph.Org Foundation and contributors. | 2 Copyright (C) 2013 Xiph.Org Foundation and contributors. |
| 3 Redistribution and use in source and binary forms, with or without | 3 Redistribution and use in source and binary forms, with or without |
| 4 modification, are permitted provided that the following conditions | 4 modification, are permitted provided that the following conditions |
| 5 are met: | 5 are met: |
| 6 - Redistributions of source code must retain the above copyright notice, | 6 - Redistributions of source code must retain the above copyright notice, |
| 7 this list of conditions and the following disclaimer. | 7 this list of conditions and the following disclaimer. |
| 8 - Redistributions in binary form must reproduce the above copyright | 8 - Redistributions in binary form must reproduce the above copyright |
| 9 notice, this list of conditions and the following disclaimer in the | 9 notice, this list of conditions and the following disclaimer in the |
| 10 documentation and/or other materials provided with the distribution. | 10 documentation and/or other materials provided with the distribution. |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 25 POSSIBILITY OF SUCH DAMAGE. | 25 POSSIBILITY OF SUCH DAMAGE. |
| 26 ***********************************************************************/ | 26 ***********************************************************************/ |
| 27 | 27 |
| 28 #ifndef SILK_MACROS_ARMv4_H | 28 #ifndef SILK_MACROS_ARMv4_H |
| 29 #define SILK_MACROS_ARMv4_H | 29 #define SILK_MACROS_ARMv4_H |
| 30 | 30 |
| 31 /* This macro only avoids the undefined behaviour from a left shift of |
| 32 a negative value. It should only be used in macros that can't include |
| 33 SigProc_FIX.h. In other cases, use silk_LSHIFT32(). */ |
| 34 #define SAFE_SHL(a,b) ((opus_int32)((opus_uint32)(a) << (b))) |
| 35 |
| 31 /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */ | 36 /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */ |
| 32 #undef silk_SMULWB | 37 #undef silk_SMULWB |
| 33 static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b) | 38 static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b) |
| 34 { | 39 { |
| 35 unsigned rd_lo; | 40 unsigned rd_lo; |
| 36 int rd_hi; | 41 int rd_hi; |
| 37 __asm__( | 42 __asm__( |
| 38 "#silk_SMULWB\n\t" | 43 "#silk_SMULWB\n\t" |
| 39 "smull %0, %1, %2, %3\n\t" | 44 "smull %0, %1, %2, %3\n\t" |
| 40 : "=&r"(rd_lo), "=&r"(rd_hi) | 45 : "=&r"(rd_lo), "=&r"(rd_hi) |
| 41 : "%r"(a), "r"(b<<16) | 46 : "%r"(a), "r"(SAFE_SHL(b,16)) |
| 42 ); | 47 ); |
| 43 return rd_hi; | 48 return rd_hi; |
| 44 } | 49 } |
| 45 #define silk_SMULWB(a, b) (silk_SMULWB_armv4(a, b)) | 50 #define silk_SMULWB(a, b) (silk_SMULWB_armv4(a, b)) |
| 46 | 51 |
| 47 /* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit i
nt */ | 52 /* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit i
nt */ |
| 48 #undef silk_SMLAWB | 53 #undef silk_SMLAWB |
| 49 #define silk_SMLAWB(a, b, c) ((a) + silk_SMULWB(b, c)) | 54 #define silk_SMLAWB(a, b, c) ((a) + silk_SMULWB(b, c)) |
| 50 | 55 |
| 51 /* (a32 * (b32 >> 16)) >> 16 */ | 56 /* (a32 * (b32 >> 16)) >> 16 */ |
| (...skipping 21 matching lines...) Expand all Loading... |
| 73 static OPUS_INLINE opus_int32 silk_SMULWW_armv4(opus_int32 a, opus_int32 b) | 78 static OPUS_INLINE opus_int32 silk_SMULWW_armv4(opus_int32 a, opus_int32 b) |
| 74 { | 79 { |
| 75 unsigned rd_lo; | 80 unsigned rd_lo; |
| 76 int rd_hi; | 81 int rd_hi; |
| 77 __asm__( | 82 __asm__( |
| 78 "#silk_SMULWW\n\t" | 83 "#silk_SMULWW\n\t" |
| 79 "smull %0, %1, %2, %3\n\t" | 84 "smull %0, %1, %2, %3\n\t" |
| 80 : "=&r"(rd_lo), "=&r"(rd_hi) | 85 : "=&r"(rd_lo), "=&r"(rd_hi) |
| 81 : "%r"(a), "r"(b) | 86 : "%r"(a), "r"(b) |
| 82 ); | 87 ); |
| 83 return (rd_hi<<16)+(rd_lo>>16); | 88 return SAFE_SHL(rd_hi,16)+(rd_lo>>16); |
| 84 } | 89 } |
| 85 #define silk_SMULWW(a, b) (silk_SMULWW_armv4(a, b)) | 90 #define silk_SMULWW(a, b) (silk_SMULWW_armv4(a, b)) |
| 86 | 91 |
| 87 #undef silk_SMLAWW | 92 #undef silk_SMLAWW |
| 88 static OPUS_INLINE opus_int32 silk_SMLAWW_armv4(opus_int32 a, opus_int32 b, | 93 static OPUS_INLINE opus_int32 silk_SMLAWW_armv4(opus_int32 a, opus_int32 b, |
| 89 opus_int32 c) | 94 opus_int32 c) |
| 90 { | 95 { |
| 91 unsigned rd_lo; | 96 unsigned rd_lo; |
| 92 int rd_hi; | 97 int rd_hi; |
| 93 __asm__( | 98 __asm__( |
| 94 "#silk_SMLAWW\n\t" | 99 "#silk_SMLAWW\n\t" |
| 95 "smull %0, %1, %2, %3\n\t" | 100 "smull %0, %1, %2, %3\n\t" |
| 96 : "=&r"(rd_lo), "=&r"(rd_hi) | 101 : "=&r"(rd_lo), "=&r"(rd_hi) |
| 97 : "%r"(b), "r"(c) | 102 : "%r"(b), "r"(c) |
| 98 ); | 103 ); |
| 99 return a+(rd_hi<<16)+(rd_lo>>16); | 104 return a+SAFE_SHL(rd_hi,16)+(rd_lo>>16); |
| 100 } | 105 } |
| 101 #define silk_SMLAWW(a, b, c) (silk_SMLAWW_armv4(a, b, c)) | 106 #define silk_SMLAWW(a, b, c) (silk_SMLAWW_armv4(a, b, c)) |
| 102 | 107 |
| 108 #undef SAFE_SHL |
| 109 |
| 103 #endif /* SILK_MACROS_ARMv4_H */ | 110 #endif /* SILK_MACROS_ARMv4_H */ |
| OLD | NEW |