| OLD | NEW |
| 1 /* Copyright (c) 2014, Cisco Systems, INC | 1 /* Copyright (c) 2014, Cisco Systems, INC |
| 2 Written by XiangMingZhu WeiZhou MinPeng YanWang | 2 Written by XiangMingZhu WeiZhou MinPeng YanWang |
| 3 | 3 |
| 4 Redistribution and use in source and binary forms, with or without | 4 Redistribution and use in source and binary forms, with or without |
| 5 modification, are permitted provided that the following conditions | 5 modification, are permitted provided that the following conditions |
| 6 are met: | 6 are met: |
| 7 | 7 |
| 8 - Redistributions of source code must retain the above copyright | 8 - Redistributions of source code must retain the above copyright |
| 9 notice, this list of conditions and the following disclaimer. | 9 notice, this list of conditions and the following disclaimer. |
| 10 | 10 |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 100 xmm_tempa = _mm_mul_epi32( coef_Q13_3210, xmm_tempa ); | 100 xmm_tempa = _mm_mul_epi32( coef_Q13_3210, xmm_tempa ); |
| 101 xmm_tempb = _mm_mul_epi32( coef_Q13_7654, xmm_tempb ); | 101 xmm_tempb = _mm_mul_epi32( coef_Q13_7654, xmm_tempb ); |
| 102 | 102 |
| 103 xmm_tempa = _mm_srli_epi64( xmm_tempa, 16 ); | 103 xmm_tempa = _mm_srli_epi64( xmm_tempa, 16 ); |
| 104 xmm_tempb = _mm_srli_epi64( xmm_tempb, 16 ); | 104 xmm_tempb = _mm_srli_epi64( xmm_tempb, 16 ); |
| 105 | 105 |
| 106 xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_product1 ); | 106 xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_product1 ); |
| 107 xmm_tempb = _mm_add_epi32( xmm_tempb, xmm_product2 ); | 107 xmm_tempb = _mm_add_epi32( xmm_tempb, xmm_product2 ); |
| 108 xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_tempb ); | 108 xmm_tempa = _mm_add_epi32( xmm_tempa, xmm_tempb ); |
| 109 | 109 |
| 110 sum = (coef_Q13_8 * state_8) >> 16; | 110 sum = (opus_int32)((coef_Q13_8 * state_8) >> 16); |
| 111 sum += (coef_Q13_9 * state_9) >> 16; | 111 sum += (opus_int32)((coef_Q13_9 * state_9) >> 16); |
| 112 | 112 |
| 113 xmm_tempa = _mm_add_epi32( xmm_tempa, _mm_shuffle_epi32( xmm_tem
pa, _MM_SHUFFLE( 0, 0, 0, 2 ) ) ); | 113 xmm_tempa = _mm_add_epi32( xmm_tempa, _mm_shuffle_epi32( xmm_tem
pa, _MM_SHUFFLE( 0, 0, 0, 2 ) ) ); |
| 114 sum += _mm_cvtsi128_si32( xmm_tempa); | 114 sum += _mm_cvtsi128_si32( xmm_tempa); |
| 115 res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RS
HIFT_ROUND( ( 5 + sum ), 9); | 115 res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RS
HIFT_ROUND( ( 5 + sum ), 9); |
| 116 | 116 |
| 117 /* move right */ | 117 /* move right */ |
| 118 state_a = state_9; | 118 state_a = state_9; |
| 119 state_9 = state_8; | 119 state_9 = state_8; |
| 120 state_8 = _mm_cvtsi128_si32( state_4567 ); | 120 state_8 = _mm_cvtsi128_si32( state_4567 ); |
| 121 state_4567 = _mm_alignr_epi8( state_0123, state_4567, 4 ); | 121 state_4567 = _mm_alignr_epi8( state_0123, state_4567, 4 ); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 151 /* Output of allpass section */ | 151 /* Output of allpass section */ |
| 152 tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q1
6 ); | 152 tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q1
6 ); |
| 153 state[ i + 1 ] = tmp2; | 153 state[ i + 1 ] = tmp2; |
| 154 acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); | 154 acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] ); |
| 155 } | 155 } |
| 156 state[ order ] = tmp1; | 156 state[ order ] = tmp1; |
| 157 acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); | 157 acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] ); |
| 158 res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROU
ND( acc_Q11, 9 ); | 158 res_Q2[ n ] = silk_LSHIFT( (opus_int32)input[ n ], 2 ) - silk_RSHIFT_ROU
ND( acc_Q11, 9 ); |
| 159 } | 159 } |
| 160 } | 160 } |
| OLD | NEW |