Index: silk/x86/NSQ_del_dec_sse.c |
diff --git a/silk/NSQ_del_dec.c b/silk/x86/NSQ_del_dec_sse.c |
similarity index 57% |
copy from silk/NSQ_del_dec.c |
copy to silk/x86/NSQ_del_dec_sse.c |
index 522be406639ef1e3f138332c10b6bb54fc1c6cfe..21d4a8bc1e960af9a95ab202cc951d14c04601b3 100644 |
--- a/silk/NSQ_del_dec.c |
+++ b/silk/x86/NSQ_del_dec_sse.c |
@@ -1,35 +1,40 @@ |
-/*********************************************************************** |
-Copyright (c) 2006-2011, Skype Limited. All rights reserved. |
-Redistribution and use in source and binary forms, with or without |
-modification, are permitted provided that the following conditions |
-are met: |
-- Redistributions of source code must retain the above copyright notice, |
-this list of conditions and the following disclaimer. |
-- Redistributions in binary form must reproduce the above copyright |
-notice, this list of conditions and the following disclaimer in the |
-documentation and/or other materials provided with the distribution. |
-- Neither the name of Internet Society, IETF or IETF Trust, nor the |
-names of specific contributors, may be used to endorse or promote |
-products derived from this software without specific prior written |
-permission. |
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
-POSSIBILITY OF SUCH DAMAGE. |
-***********************************************************************/ |
+/* Copyright (c) 2014, Cisco Systems, INC |
+ Written by XiangMingZhu WeiZhou MinPeng YanWang |
+ |
+ Redistribution and use in source and binary forms, with or without |
+ modification, are permitted provided that the following conditions |
+ are met: |
+ |
+ - Redistributions of source code must retain the above copyright |
+ notice, this list of conditions and the following disclaimer. |
+ |
+ - Redistributions in binary form must reproduce the above copyright |
+ notice, this list of conditions and the following disclaimer in the |
+ documentation and/or other materials provided with the distribution. |
+ |
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER |
+ OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
+*/ |
#ifdef HAVE_CONFIG_H |
#include "config.h" |
#endif |
+#include <xmmintrin.h> |
+#include <emmintrin.h> |
+#include <smmintrin.h> |
#include "main.h" |
+#include "celt/x86/x86cpu.h" |
+ |
#include "stack_alloc.h" |
typedef struct { |
@@ -57,7 +62,7 @@ typedef struct { |
typedef NSQ_sample_struct NSQ_sample_pair[ 2 ]; |
-static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
+static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( |
const silk_encoder_state *psEncC, /* I Encoder State */ |
silk_nsq_state *NSQ, /* I/O NSQ state */ |
NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ |
@@ -77,7 +82,7 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
/******************************************/ |
/* Noise shape quantizer for one subframe */ |
/******************************************/ |
-static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
+static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( |
silk_nsq_state *NSQ, /* I/O NSQ state */ |
NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ |
opus_int signalType, /* I Signal type */ |
@@ -106,7 +111,7 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
opus_int decisionDelay /* I */ |
); |
-void silk_NSQ_del_dec( |
+void silk_NSQ_del_dec_sse4_1( |
const silk_encoder_state *psEncC, /* I/O Encoder State */ |
silk_nsq_state *NSQ, /* I/O NSQ state */ |
SideInfoIndices *psIndices, /* I/O Quantization Indices */ |
@@ -244,17 +249,17 @@ void silk_NSQ_del_dec( |
silk_assert( start_idx > 0 ); |
silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ], |
- A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder ); |
+ A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder, psEncC->arch ); |
NSQ->sLTP_buf_idx = psEncC->ltp_mem_length; |
NSQ->rewhite_flag = 1; |
} |
} |
- silk_nsq_del_dec_scale_states( psEncC, NSQ, psDelDec, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, |
+ silk_nsq_del_dec_scale_states_sse4_1( psEncC, NSQ, psDelDec, x_Q3, x_sc_Q10, sLTP, sLTP_Q15, k, |
psEncC->nStatesDelayedDecision, LTP_scale_Q14, Gains_Q16, pitchL, psIndices->signalType, decisionDelay ); |
- silk_noise_shape_quantizer_del_dec( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, |
+ silk_noise_shape_quantizer_del_dec_sse4_1( NSQ, psDelDec, psIndices->signalType, x_sc_Q10, pulses, pxq, sLTP_Q15, |
delayedGain_Q10, A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, Tilt_Q14[ k ], LF_shp_Q14[ k ], |
Gains_Q16[ k ], Lambda_Q10, offset_Q10, psEncC->subfr_length, subfr++, psEncC->shapingLPCOrder, |
psEncC->predictLPCOrder, psEncC->warping_Q16, psEncC->nStatesDelayedDecision, &smpl_buf_idx, decisionDelay ); |
@@ -303,7 +308,7 @@ void silk_NSQ_del_dec( |
/******************************************/ |
/* Noise shape quantizer for one subframe */ |
/******************************************/ |
-static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
+static OPUS_INLINE void silk_noise_shape_quantizer_del_dec_sse4_1( |
silk_nsq_state *NSQ, /* I/O NSQ state */ |
NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ |
opus_int signalType, /* I Signal type */ |
@@ -342,6 +347,9 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
VARDECL( NSQ_sample_pair, psSampleState ); |
NSQ_del_dec_struct *psDD; |
NSQ_sample_struct *psSS; |
+ |
+ __m128i a_Q12_0123, a_Q12_4567, a_Q12_89AB, a_Q12_CDEF; |
+ __m128i b_Q12_0123, b_sr_Q12_0123; |
SAVE_STACK; |
silk_assert( nStatesDelayedDecision > 0 ); |
@@ -351,6 +359,18 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ]; |
Gain_Q10 = silk_RSHIFT( Gain_Q16, 6 ); |
+ a_Q12_0123 = OP_CVTEPI16_EPI32_M64( a_Q12 ); |
+ a_Q12_4567 = OP_CVTEPI16_EPI32_M64( a_Q12 + 4 ); |
+ |
+ if( opus_likely( predictLPCOrder == 16 ) ) { |
+ a_Q12_89AB = OP_CVTEPI16_EPI32_M64( a_Q12 + 8 ); |
+ a_Q12_CDEF = OP_CVTEPI16_EPI32_M64( a_Q12 + 12 ); |
+ } |
+ |
+ if( signalType == TYPE_VOICED ){ |
+ b_Q12_0123 = OP_CVTEPI16_EPI32_M64( b_Q14 ); |
+ b_sr_Q12_0123 = _mm_shuffle_epi32( b_Q12_0123, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ } |
for( i = 0; i < length; i++ ) { |
/* Perform common calculations used in all states */ |
@@ -359,13 +379,26 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
/* Unrolled loop */ |
/* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ |
LTP_pred_Q14 = 2; |
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ 0 ], b_Q14[ 0 ] ); |
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] ); |
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] ); |
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] ); |
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); |
- LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */ |
- pred_lag_ptr++; |
+ { |
+ __m128i tmpa, tmpb, pred_lag_ptr_tmp; |
+ pred_lag_ptr_tmp = _mm_loadu_si128( (__m128i *)(&pred_lag_ptr[ -3 ] ) ); |
+ pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, 0x1B ); |
+ tmpa = _mm_mul_epi32( pred_lag_ptr_tmp, b_Q12_0123 ); |
+ tmpa = _mm_srli_si128( tmpa, 2 ); |
+ |
+ pred_lag_ptr_tmp = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) );/* equal shift right 4 bytes */ |
+ pred_lag_ptr_tmp = _mm_mul_epi32( pred_lag_ptr_tmp, b_sr_Q12_0123 ); |
+ pred_lag_ptr_tmp = _mm_srli_si128( pred_lag_ptr_tmp, 2 ); |
+ pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpa ); |
+ |
+ tmpb = _mm_shuffle_epi32( pred_lag_ptr_tmp, _MM_SHUFFLE( 0, 0, 3, 2 ) );/* equal shift right 8 bytes */ |
+ pred_lag_ptr_tmp = _mm_add_epi32( pred_lag_ptr_tmp, tmpb ); |
+ LTP_pred_Q14 += _mm_cvtsi128_si32( pred_lag_ptr_tmp ); |
+ |
+ LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] ); |
+ LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */ |
+ pred_lag_ptr++; |
+ } |
} else { |
LTP_pred_Q14 = 0; |
} |
@@ -380,172 +413,231 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
} else { |
n_LTP_Q14 = 0; |
} |
+ { |
+ __m128i tmpa, tmpb, psLPC_Q14_tmp, a_Q12_tmp; |
+ |
+ for( k = 0; k < nStatesDelayedDecision; k++ ) { |
+ /* Delayed decision state */ |
+ psDD = &psDelDec[ k ]; |
+ |
+ /* Sample state */ |
+ psSS = psSampleState[ k ]; |
+ |
+ /* Generate dither */ |
+ psDD->Seed = silk_RAND( psDD->Seed ); |
+ |
+ /* Pointer used in short term prediction and shaping */ |
+ psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ]; |
+ /* Short-term prediction */ |
+ silk_assert( predictLPCOrder == 10 || predictLPCOrder == 16 ); |
+ /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ |
+ LPC_pred_Q14 = silk_RSHIFT( predictLPCOrder, 1 ); |
+ |
+ tmpb = _mm_setzero_si128(); |
+ |
+ /* step 1 */ |
+ psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -3 ] ) ); /* -3, -2 , -1, 0 */ |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); /* 0, -1, -2, -3 */ |
+ tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_0123 ); /* 0, -1, -2, -3 * 0123 -> 0*0, 2*-2 */ |
+ |
+ tmpa = _mm_srli_epi64( tmpa, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ a_Q12_tmp = _mm_shuffle_epi32( a_Q12_0123, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); /* 1*-1, 3*-3 */ |
+ psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); |
+ |
+ /* step 2 */ |
+ psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -7 ] ) ); |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); |
+ tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_4567 ); |
+ tmpa = _mm_srli_epi64( tmpa, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ a_Q12_tmp = _mm_shuffle_epi32( a_Q12_4567, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); |
+ psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); |
+ |
+ if ( opus_likely( predictLPCOrder == 16 ) ) |
+ { |
+ /* step 3 */ |
+ psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -11 ] ) ); |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); |
+ tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_89AB ); |
+ tmpa = _mm_srli_epi64( tmpa, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ a_Q12_tmp = _mm_shuffle_epi32( a_Q12_89AB, _MM_SHUFFLE(0, 3, 2, 1 ) );/* equal shift right 4 bytes */ |
+ psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); |
+ psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); |
+ |
+ /* setp 4 */ |
+ psLPC_Q14_tmp = _mm_loadu_si128( (__m128i *)(&psLPC_Q14[ -15 ] ) ); |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, 0x1B ); |
+ tmpa = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_CDEF ); |
+ tmpa = _mm_srli_epi64( tmpa, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ |
+ psLPC_Q14_tmp = _mm_shuffle_epi32( psLPC_Q14_tmp, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ a_Q12_tmp = _mm_shuffle_epi32( a_Q12_CDEF, _MM_SHUFFLE(0, 3, 2, 1 ) ); /* equal shift right 4 bytes */ |
+ psLPC_Q14_tmp = _mm_mul_epi32( psLPC_Q14_tmp, a_Q12_tmp ); |
+ psLPC_Q14_tmp = _mm_srli_epi64( psLPC_Q14_tmp, 16 ); |
+ tmpb = _mm_add_epi32( tmpb, psLPC_Q14_tmp ); |
+ |
+ /* add at last */ |
+ /* equal shift right 8 bytes*/ |
+ tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); |
+ } |
+ else |
+ { |
+ /* add at last */ |
+ tmpa = _mm_shuffle_epi32( tmpb, _MM_SHUFFLE( 0, 0, 3, 2 ) ); /* equal shift right 8 bytes*/ |
+ tmpb = _mm_add_epi32( tmpb, tmpa ); |
+ LPC_pred_Q14 += _mm_cvtsi128_si32( tmpb ); |
+ |
+ LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -8 ], a_Q12[ 8 ] ); |
+ LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -9 ], a_Q12[ 9 ] ); |
+ } |
- for( k = 0; k < nStatesDelayedDecision; k++ ) { |
- /* Delayed decision state */ |
- psDD = &psDelDec[ k ]; |
- |
- /* Sample state */ |
- psSS = psSampleState[ k ]; |
- |
- /* Generate dither */ |
- psDD->Seed = silk_RAND( psDD->Seed ); |
+ LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */ |
- /* Pointer used in short term prediction and shaping */ |
- psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ]; |
- /* Short-term prediction */ |
- silk_assert( predictLPCOrder == 10 || predictLPCOrder == 16 ); |
- /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */ |
- LPC_pred_Q14 = silk_RSHIFT( predictLPCOrder, 1 ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ 0 ], a_Q12[ 0 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -1 ], a_Q12[ 1 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -2 ], a_Q12[ 2 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -3 ], a_Q12[ 3 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -4 ], a_Q12[ 4 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -5 ], a_Q12[ 5 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -6 ], a_Q12[ 6 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -7 ], a_Q12[ 7 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -8 ], a_Q12[ 8 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -9 ], a_Q12[ 9 ] ); |
- if( predictLPCOrder == 16 ) { |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -10 ], a_Q12[ 10 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -11 ], a_Q12[ 11 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -12 ], a_Q12[ 12 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -13 ], a_Q12[ 13 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -14 ], a_Q12[ 14 ] ); |
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -15 ], a_Q12[ 15 ] ); |
- } |
- LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */ |
- |
- /* Noise shape feedback */ |
- silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ |
- /* Output of lowpass section */ |
- tmp2 = silk_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 ); |
- /* Output of allpass section */ |
- tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 ); |
- psDD->sAR2_Q14[ 0 ] = tmp2; |
- n_AR_Q14 = silk_RSHIFT( shapingLPCOrder, 1 ); |
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ 0 ] ); |
- /* Loop over allpass sections */ |
- for( j = 2; j < shapingLPCOrder; j += 2 ) { |
+ /* Noise shape feedback */ |
+ silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */ |
+ /* Output of lowpass section */ |
+ tmp2 = silk_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 ); |
/* Output of allpass section */ |
- tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 ); |
- psDD->sAR2_Q14[ j - 1 ] = tmp1; |
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ j - 1 ] ); |
- /* Output of allpass section */ |
- tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 ); |
- psDD->sAR2_Q14[ j + 0 ] = tmp2; |
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ j ] ); |
- } |
- psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1; |
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] ); |
+ tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 ); |
+ psDD->sAR2_Q14[ 0 ] = tmp2; |
+ n_AR_Q14 = silk_RSHIFT( shapingLPCOrder, 1 ); |
+ n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ 0 ] ); |
+ /* Loop over allpass sections */ |
+ for( j = 2; j < shapingLPCOrder; j += 2 ) { |
+ /* Output of allpass section */ |
+ tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 ); |
+ psDD->sAR2_Q14[ j - 1 ] = tmp1; |
+ n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ j - 1 ] ); |
+ /* Output of allpass section */ |
+ tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 ); |
+ psDD->sAR2_Q14[ j + 0 ] = tmp2; |
+ n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ j ] ); |
+ } |
+ psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1; |
+ n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] ); |
- n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 1 ); /* Q11 -> Q12 */ |
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, psDD->LF_AR_Q14, Tilt_Q14 ); /* Q12 */ |
- n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 2 ); /* Q12 -> Q14 */ |
+ n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 1 ); /* Q11 -> Q12 */ |
+ n_AR_Q14 = silk_SMLAWB( n_AR_Q14, psDD->LF_AR_Q14, Tilt_Q14 ); /* Q12 */ |
+ n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 2 ); /* Q12 -> Q14 */ |
- n_LF_Q14 = silk_SMULWB( psDD->Shape_Q14[ *smpl_buf_idx ], LF_shp_Q14 ); /* Q12 */ |
- n_LF_Q14 = silk_SMLAWT( n_LF_Q14, psDD->LF_AR_Q14, LF_shp_Q14 ); /* Q12 */ |
- n_LF_Q14 = silk_LSHIFT( n_LF_Q14, 2 ); /* Q12 -> Q14 */ |
+ n_LF_Q14 = silk_SMULWB( psDD->Shape_Q14[ *smpl_buf_idx ], LF_shp_Q14 ); /* Q12 */ |
+ n_LF_Q14 = silk_SMLAWT( n_LF_Q14, psDD->LF_AR_Q14, LF_shp_Q14 ); /* Q12 */ |
+ n_LF_Q14 = silk_LSHIFT( n_LF_Q14, 2 ); /* Q12 -> Q14 */ |
- /* Input minus prediction plus noise feedback */ |
- /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ |
- tmp1 = silk_ADD32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ |
- tmp2 = silk_ADD32( n_LTP_Q14, LPC_pred_Q14 ); /* Q13 */ |
- tmp1 = silk_SUB32( tmp2, tmp1 ); /* Q13 */ |
- tmp1 = silk_RSHIFT_ROUND( tmp1, 4 ); /* Q10 */ |
+ /* Input minus prediction plus noise feedback */ |
+ /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP */ |
+ tmp1 = silk_ADD32( n_AR_Q14, n_LF_Q14 ); /* Q14 */ |
+ tmp2 = silk_ADD32( n_LTP_Q14, LPC_pred_Q14 ); /* Q13 */ |
+ tmp1 = silk_SUB32( tmp2, tmp1 ); /* Q13 */ |
+ tmp1 = silk_RSHIFT_ROUND( tmp1, 4 ); /* Q10 */ |
- r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 ); /* residual error Q10 */ |
+ r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 ); /* residual error Q10 */ |
- /* Flip sign depending on dither */ |
- if ( psDD->Seed < 0 ) { |
- r_Q10 = -r_Q10; |
- } |
- r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); |
- |
- /* Find two quantization level candidates and measure their rate-distortion */ |
- q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); |
- q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); |
- if( q1_Q0 > 0 ) { |
- q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); |
- q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); |
- q2_Q10 = silk_ADD32( q1_Q10, 1024 ); |
- rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); |
- rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
- } else if( q1_Q0 == 0 ) { |
- q1_Q10 = offset_Q10; |
- q2_Q10 = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); |
- rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); |
- rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
- } else if( q1_Q0 == -1 ) { |
- q2_Q10 = offset_Q10; |
- q1_Q10 = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); |
- rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); |
- rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
- } else { /* q1_Q0 < -1 */ |
- q1_Q10 = silk_ADD32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); |
- q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); |
- q2_Q10 = silk_ADD32( q1_Q10, 1024 ); |
- rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); |
- rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 ); |
- } |
- rr_Q10 = silk_SUB32( r_Q10, q1_Q10 ); |
- rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 ); |
- rr_Q10 = silk_SUB32( r_Q10, q2_Q10 ); |
- rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 ); |
- |
- if( rd1_Q10 < rd2_Q10 ) { |
- psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); |
- psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); |
- psSS[ 0 ].Q_Q10 = q1_Q10; |
- psSS[ 1 ].Q_Q10 = q2_Q10; |
- } else { |
- psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); |
- psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); |
- psSS[ 0 ].Q_Q10 = q2_Q10; |
- psSS[ 1 ].Q_Q10 = q1_Q10; |
- } |
+ /* Flip sign depending on dither */ |
+ if ( psDD->Seed < 0 ) { |
+ r_Q10 = -r_Q10; |
+ } |
+ r_Q10 = silk_LIMIT_32( r_Q10, -(31 << 10), 30 << 10 ); |
+ |
+ /* Find two quantization level candidates and measure their rate-distortion */ |
+ q1_Q10 = silk_SUB32( r_Q10, offset_Q10 ); |
+ q1_Q0 = silk_RSHIFT( q1_Q10, 10 ); |
+ if( q1_Q0 > 0 ) { |
+ q1_Q10 = silk_SUB32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); |
+ q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); |
+ q2_Q10 = silk_ADD32( q1_Q10, 1024 ); |
+ rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); |
+ rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
+ } else if( q1_Q0 == 0 ) { |
+ q1_Q10 = offset_Q10; |
+ q2_Q10 = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); |
+ rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 ); |
+ rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
+ } else if( q1_Q0 == -1 ) { |
+ q2_Q10 = offset_Q10; |
+ q1_Q10 = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 ); |
+ rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); |
+ rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 ); |
+ } else { /* q1_Q0 < -1 */ |
+ q1_Q10 = silk_ADD32( silk_LSHIFT( q1_Q0, 10 ), QUANT_LEVEL_ADJUST_Q10 ); |
+ q1_Q10 = silk_ADD32( q1_Q10, offset_Q10 ); |
+ q2_Q10 = silk_ADD32( q1_Q10, 1024 ); |
+ rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 ); |
+ rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 ); |
+ } |
+ rr_Q10 = silk_SUB32( r_Q10, q1_Q10 ); |
+ rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 ); |
+ rr_Q10 = silk_SUB32( r_Q10, q2_Q10 ); |
+ rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 ); |
+ |
+ if( rd1_Q10 < rd2_Q10 ) { |
+ psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); |
+ psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); |
+ psSS[ 0 ].Q_Q10 = q1_Q10; |
+ psSS[ 1 ].Q_Q10 = q2_Q10; |
+ } else { |
+ psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 ); |
+ psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 ); |
+ psSS[ 0 ].Q_Q10 = q2_Q10; |
+ psSS[ 1 ].Q_Q10 = q1_Q10; |
+ } |
- /* Update states for best quantization */ |
+ /* Update states for best quantization */ |
- /* Quantized excitation */ |
- exc_Q14 = silk_LSHIFT32( psSS[ 0 ].Q_Q10, 4 ); |
- if ( psDD->Seed < 0 ) { |
- exc_Q14 = -exc_Q14; |
- } |
+ /* Quantized excitation */ |
+ exc_Q14 = silk_LSHIFT32( psSS[ 0 ].Q_Q10, 4 ); |
+ if ( psDD->Seed < 0 ) { |
+ exc_Q14 = -exc_Q14; |
+ } |
- /* Add predictions */ |
- LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); |
- xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); |
+ /* Add predictions */ |
+ LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); |
+ xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); |
- /* Update states */ |
- sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); |
- psSS[ 0 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); |
- psSS[ 0 ].LF_AR_Q14 = sLF_AR_shp_Q14; |
- psSS[ 0 ].LPC_exc_Q14 = LPC_exc_Q14; |
- psSS[ 0 ].xq_Q14 = xq_Q14; |
+ /* Update states */ |
+ sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); |
+ psSS[ 0 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); |
+ psSS[ 0 ].LF_AR_Q14 = sLF_AR_shp_Q14; |
+ psSS[ 0 ].LPC_exc_Q14 = LPC_exc_Q14; |
+ psSS[ 0 ].xq_Q14 = xq_Q14; |
- /* Update states for second best quantization */ |
+ /* Update states for second best quantization */ |
- /* Quantized excitation */ |
- exc_Q14 = silk_LSHIFT32( psSS[ 1 ].Q_Q10, 4 ); |
- if ( psDD->Seed < 0 ) { |
- exc_Q14 = -exc_Q14; |
- } |
+ /* Quantized excitation */ |
+ exc_Q14 = silk_LSHIFT32( psSS[ 1 ].Q_Q10, 4 ); |
+ if ( psDD->Seed < 0 ) { |
+ exc_Q14 = -exc_Q14; |
+ } |
- /* Add predictions */ |
- LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); |
- xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); |
+ /* Add predictions */ |
+ LPC_exc_Q14 = silk_ADD32( exc_Q14, LTP_pred_Q14 ); |
+ xq_Q14 = silk_ADD32( LPC_exc_Q14, LPC_pred_Q14 ); |
- /* Update states */ |
- sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); |
- psSS[ 1 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); |
- psSS[ 1 ].LF_AR_Q14 = sLF_AR_shp_Q14; |
- psSS[ 1 ].LPC_exc_Q14 = LPC_exc_Q14; |
- psSS[ 1 ].xq_Q14 = xq_Q14; |
+ /* Update states */ |
+ sLF_AR_shp_Q14 = silk_SUB32( xq_Q14, n_AR_Q14 ); |
+ psSS[ 1 ].sLTP_shp_Q14 = silk_SUB32( sLF_AR_shp_Q14, n_LF_Q14 ); |
+ psSS[ 1 ].LF_AR_Q14 = sLF_AR_shp_Q14; |
+ psSS[ 1 ].LPC_exc_Q14 = LPC_exc_Q14; |
+ psSS[ 1 ].xq_Q14 = xq_Q14; |
+ } |
} |
- |
*smpl_buf_idx = ( *smpl_buf_idx - 1 ) & DECISION_DELAY_MASK; /* Index to newest samples */ |
last_smple_idx = ( *smpl_buf_idx + decisionDelay ) & DECISION_DELAY_MASK; /* Index to decisionDelay old samples */ |
@@ -630,7 +722,7 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec( |
RESTORE_STACK; |
} |
-static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
+static OPUS_INLINE void silk_nsq_del_dec_scale_states_sse4_1( |
const silk_encoder_state *psEncC, /* I Encoder State */ |
silk_nsq_state *NSQ, /* I/O NSQ state */ |
NSQ_del_dec_struct psDelDec[], /* I/O Delayed decision states */ |
@@ -650,9 +742,11 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
opus_int i, k, lag; |
opus_int32 gain_adj_Q16, inv_gain_Q31, inv_gain_Q23; |
NSQ_del_dec_struct *psDD; |
+ __m128i xmm_inv_gain_Q23, xmm_x_Q3_x2x0, xmm_x_Q3_x3x1; |
lag = pitchL[ subfr ]; |
inv_gain_Q31 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 47 ); |
+ |
silk_assert( inv_gain_Q31 != 0 ); |
/* Calculate gain adjustment factor */ |
@@ -664,7 +758,27 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
/* Scale input */ |
inv_gain_Q23 = silk_RSHIFT_ROUND( inv_gain_Q31, 8 ); |
- for( i = 0; i < psEncC->subfr_length; i++ ) { |
+ |
+ /* prepare inv_gain_Q23 in packed 4 32-bits */ |
+ xmm_inv_gain_Q23 = _mm_set1_epi32(inv_gain_Q23); |
+ |
+ for( i = 0; i < psEncC->subfr_length - 3; i += 4 ) { |
+ xmm_x_Q3_x2x0 = _mm_loadu_si128( (__m128i *)(&(x_Q3[ i ] ) ) ); |
+ /* equal shift right 4 bytes*/ |
+ xmm_x_Q3_x3x1 = _mm_shuffle_epi32( xmm_x_Q3_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); |
+ |
+ xmm_x_Q3_x2x0 = _mm_mul_epi32( xmm_x_Q3_x2x0, xmm_inv_gain_Q23 ); |
+ xmm_x_Q3_x3x1 = _mm_mul_epi32( xmm_x_Q3_x3x1, xmm_inv_gain_Q23 ); |
+ |
+ xmm_x_Q3_x2x0 = _mm_srli_epi64( xmm_x_Q3_x2x0, 16 ); |
+ xmm_x_Q3_x3x1 = _mm_slli_epi64( xmm_x_Q3_x3x1, 16 ); |
+ |
+ xmm_x_Q3_x2x0 = _mm_blend_epi16( xmm_x_Q3_x2x0, xmm_x_Q3_x3x1, 0xCC ); |
+ |
+ _mm_storeu_si128( (__m128i *)(&(x_sc_Q10[ i ])), xmm_x_Q3_x2x0 ); |
+ } |
+ |
+ for( ; i < psEncC->subfr_length; i++ ) { |
x_sc_Q10[ i ] = silk_SMULWW( x_Q3[ i ], inv_gain_Q23 ); |
} |
@@ -686,33 +800,57 @@ static OPUS_INLINE void silk_nsq_del_dec_scale_states( |
/* Adjust for changing gain */ |
if( gain_adj_Q16 != (opus_int32)1 << 16 ) { |
/* Scale long-term shaping state */ |
- for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i++ ) { |
- NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); |
- } |
+ { |
+ __m128i xmm_gain_adj_Q16, xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1; |
- /* Scale long-term prediction state */ |
- if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { |
- for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { |
- sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); |
- } |
- } |
+ /* prepare gain_adj_Q16 in packed 4 32-bits */ |
+ xmm_gain_adj_Q16 = _mm_set1_epi32( gain_adj_Q16 ); |
- for( k = 0; k < nStatesDelayedDecision; k++ ) { |
- psDD = &psDelDec[ k ]; |
+ for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx - 3; i += 4 ) |
+ { |
+ xmm_sLTP_shp_Q14_x2x0 = _mm_loadu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ) ); |
+ /* equal shift right 4 bytes*/ |
+ xmm_sLTP_shp_Q14_x3x1 = _mm_shuffle_epi32( xmm_sLTP_shp_Q14_x2x0, _MM_SHUFFLE( 0, 3, 2, 1 ) ); |
+ |
+ xmm_sLTP_shp_Q14_x2x0 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x2x0, xmm_gain_adj_Q16 ); |
+ xmm_sLTP_shp_Q14_x3x1 = _mm_mul_epi32( xmm_sLTP_shp_Q14_x3x1, xmm_gain_adj_Q16 ); |
- /* Scale scalar states */ |
- psDD->LF_AR_Q14 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q14 ); |
+ xmm_sLTP_shp_Q14_x2x0 = _mm_srli_epi64( xmm_sLTP_shp_Q14_x2x0, 16 ); |
+ xmm_sLTP_shp_Q14_x3x1 = _mm_slli_epi64( xmm_sLTP_shp_Q14_x3x1, 16 ); |
- /* Scale short-term prediction and shaping states */ |
- for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { |
- psDD->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] ); |
+ xmm_sLTP_shp_Q14_x2x0 = _mm_blend_epi16( xmm_sLTP_shp_Q14_x2x0, xmm_sLTP_shp_Q14_x3x1, 0xCC ); |
+ |
+ _mm_storeu_si128( (__m128i *)(&(NSQ->sLTP_shp_Q14[ i ] ) ), xmm_sLTP_shp_Q14_x2x0 ); |
} |
- for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { |
- psDD->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] ); |
+ |
+ for( ; i < NSQ->sLTP_shp_buf_idx; i++ ) { |
+ NSQ->sLTP_shp_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q14[ i ] ); |
+ } |
+ |
+ /* Scale long-term prediction state */ |
+ if( signal_type == TYPE_VOICED && NSQ->rewhite_flag == 0 ) { |
+ for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx - decisionDelay; i++ ) { |
+ sLTP_Q15[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q15[ i ] ); |
+ } |
} |
- for( i = 0; i < DECISION_DELAY; i++ ) { |
- psDD->Pred_Q15[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Pred_Q15[ i ] ); |
- psDD->Shape_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q14[ i ] ); |
+ |
+ for( k = 0; k < nStatesDelayedDecision; k++ ) { |
+ psDD = &psDelDec[ k ]; |
+ |
+ /* Scale scalar states */ |
+ psDD->LF_AR_Q14 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q14 ); |
+ |
+ /* Scale short-term prediction and shaping states */ |
+ for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) { |
+ psDD->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] ); |
+ } |
+ for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) { |
+ psDD->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] ); |
+ } |
+ for( i = 0; i < DECISION_DELAY; i++ ) { |
+ psDD->Pred_Q15[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Pred_Q15[ i ] ); |
+ psDD->Shape_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q14[ i ] ); |
+ } |
} |
} |
} |