OLD | NEW |
(Empty) | |
| 1 /* Copyright (C) 2007-2009 Xiph.Org Foundation |
| 2 Copyright (C) 2003-2008 Jean-Marc Valin |
| 3 Copyright (C) 2007-2008 CSIRO */ |
| 4 /** |
| 5 @file fixed_generic.h |
| 6 @brief Generic fixed-point operations |
| 7 */ |
| 8 /* |
| 9 Redistribution and use in source and binary forms, with or without |
| 10 modification, are permitted provided that the following conditions |
| 11 are met: |
| 12 |
| 13 - Redistributions of source code must retain the above copyright |
| 14 notice, this list of conditions and the following disclaimer. |
| 15 |
| 16 - Redistributions in binary form must reproduce the above copyright |
| 17 notice, this list of conditions and the following disclaimer in the |
| 18 documentation and/or other materials provided with the distribution. |
| 19 |
| 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER |
| 24 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 25 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 26 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 27 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 28 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 29 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 30 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 */ |
| 32 |
| 33 #ifndef CELT_FIXED_GENERIC_MIPSR1_H |
| 34 #define CELT_FIXED_GENERIC_MIPSR1_H |
| 35 |
| 36 #undef MULT16_32_Q15_ADD |
| 37 static inline int MULT16_32_Q15_ADD(int a, int b, int c, int d) { |
| 38 int m; |
| 39 asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b)); |
| 40 asm volatile("madd $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d)); |
| 41 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15)); |
| 42 return m; |
| 43 } |
| 44 |
| 45 #undef MULT16_32_Q15_SUB |
| 46 static inline int MULT16_32_Q15_SUB(int a, int b, int c, int d) { |
| 47 int m; |
| 48 asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b)); |
| 49 asm volatile("msub $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d)); |
| 50 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15)); |
| 51 return m; |
| 52 } |
| 53 |
| 54 #undef MULT16_16_Q15_ADD |
| 55 static inline int MULT16_16_Q15_ADD(int a, int b, int c, int d) { |
| 56 int m; |
| 57 asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b)); |
| 58 asm volatile("madd $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d)); |
| 59 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15)); |
| 60 return m; |
| 61 } |
| 62 |
| 63 #undef MULT16_16_Q15_SUB |
| 64 static inline int MULT16_16_Q15_SUB(int a, int b, int c, int d) { |
| 65 int m; |
| 66 asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b)); |
| 67 asm volatile("msub $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d)); |
| 68 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15)); |
| 69 return m; |
| 70 } |
| 71 |
| 72 |
| 73 #undef MULT16_32_Q16 |
| 74 static inline int MULT16_32_Q16(int a, int b) |
| 75 { |
| 76 int c; |
| 77 asm volatile("MULT $ac1,%0, %1" : : "r" (a), "r" (b)); |
| 78 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (c): "i" (16)); |
| 79 return c; |
| 80 } |
| 81 |
| 82 #undef MULT16_32_P16 |
| 83 static inline int MULT16_32_P16(int a, int b) |
| 84 { |
| 85 int c; |
| 86 asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b)); |
| 87 asm volatile("EXTR_R.W %0,$ac1, %1" : "=r" (c): "i" (16)); |
| 88 return c; |
| 89 } |
| 90 |
| 91 #undef MULT16_32_Q15 |
| 92 static inline int MULT16_32_Q15(int a, int b) |
| 93 { |
| 94 int c; |
| 95 asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b)); |
| 96 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (c): "i" (15)); |
| 97 return c; |
| 98 } |
| 99 |
| 100 #undef MULT32_32_Q31 |
| 101 static inline int MULT32_32_Q31(int a, int b) |
| 102 { |
| 103 int r; |
| 104 asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b)); |
| 105 asm volatile("EXTR.W %0,$ac1, %1" : "=r" (r): "i" (31)); |
| 106 return r; |
| 107 } |
| 108 |
| 109 #undef PSHR32 |
| 110 static inline int PSHR32(int a, int shift) |
| 111 { |
| 112 int r; |
| 113 asm volatile ("SHRAV_R.W %0, %1, %2" :"=r" (r): "r" (a), "r" (shift)); |
| 114 return r; |
| 115 } |
| 116 |
| 117 #undef MULT16_16_P15 |
| 118 static inline int MULT16_16_P15(int a, int b) |
| 119 { |
| 120 int r; |
| 121 asm volatile ("mul %0, %1, %2" :"=r" (r): "r" (a), "r" (b)); |
| 122 asm volatile ("SHRA_R.W %0, %1, %2" : "+r" (r): "0" (r), "i"(15)); |
| 123 return r; |
| 124 } |
| 125 |
| 126 #endif /* CELT_FIXED_GENERIC_MIPSR1_H */ |
OLD | NEW |