| OLD | NEW |
| (Empty) | |
| 1 /*********************************************************************** |
| 2 Copyright (C) 2014 Vidyo |
| 3 Redistribution and use in source and binary forms, with or without |
| 4 modification, are permitted provided that the following conditions |
| 5 are met: |
| 6 - Redistributions of source code must retain the above copyright notice, |
| 7 this list of conditions and the following disclaimer. |
| 8 - Redistributions in binary form must reproduce the above copyright |
| 9 notice, this list of conditions and the following disclaimer in the |
| 10 documentation and/or other materials provided with the distribution. |
| 11 - Neither the name of Internet Society, IETF or IETF Trust, nor the |
| 12 names of specific contributors, may be used to endorse or promote |
| 13 products derived from this software without specific prior written |
| 14 permission. |
| 15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 25 POSSIBILITY OF SUCH DAMAGE. |
| 26 ***********************************************************************/ |
| 27 #ifdef HAVE_CONFIG_H |
| 28 #include "config.h" |
| 29 #endif |
| 30 |
| 31 #include <arm_neon.h> |
| 32 #include "main.h" |
| 33 #include "stack_alloc.h" |
| 34 #include "NSQ.h" |
| 35 #include "celt/cpu_support.h" |
| 36 #include "celt/arm/armcpu.h" |
| 37 |
| 38 opus_int32 silk_noise_shape_quantizer_short_prediction_neon(const opus_int32 *bu
f32, const opus_int32 *coef32, opus_int order) |
| 39 { |
| 40 int32x4_t coef0 = vld1q_s32(coef32); |
| 41 int32x4_t coef1 = vld1q_s32(coef32 + 4); |
| 42 int32x4_t coef2 = vld1q_s32(coef32 + 8); |
| 43 int32x4_t coef3 = vld1q_s32(coef32 + 12); |
| 44 |
| 45 int32x4_t a0 = vld1q_s32(buf32 - 15); |
| 46 int32x4_t a1 = vld1q_s32(buf32 - 11); |
| 47 int32x4_t a2 = vld1q_s32(buf32 - 7); |
| 48 int32x4_t a3 = vld1q_s32(buf32 - 3); |
| 49 |
| 50 int32x4_t b0 = vqdmulhq_s32(coef0, a0); |
| 51 int32x4_t b1 = vqdmulhq_s32(coef1, a1); |
| 52 int32x4_t b2 = vqdmulhq_s32(coef2, a2); |
| 53 int32x4_t b3 = vqdmulhq_s32(coef3, a3); |
| 54 |
| 55 int32x4_t c0 = vaddq_s32(b0, b1); |
| 56 int32x4_t c1 = vaddq_s32(b2, b3); |
| 57 |
| 58 int32x4_t d = vaddq_s32(c0, c1); |
| 59 |
| 60 int64x2_t e = vpaddlq_s32(d); |
| 61 |
| 62 int64x1_t f = vadd_s64(vget_low_s64(e), vget_high_s64(e)); |
| 63 |
| 64 opus_int32 out = vget_lane_s32(vreinterpret_s32_s64(f), 0); |
| 65 |
| 66 out += silk_RSHIFT( order, 1 ); |
| 67 |
| 68 return out; |
| 69 } |
| 70 |
| 71 |
| 72 opus_int32 silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 *data0, opus
_int32 *data1, const opus_int16 *coef, opus_int order) |
| 73 { |
| 74 opus_int32 out; |
| 75 if (order == 8) |
| 76 { |
| 77 int32x4_t a00 = vdupq_n_s32(data0[0]); |
| 78 int32x4_t a01 = vld1q_s32(data1); /* data1[0] ... [3] */ |
| 79 |
| 80 int32x4_t a0 = vextq_s32 (a00, a01, 3); /* data0[0] data1[0] ...[2] */ |
| 81 int32x4_t a1 = vld1q_s32(data1 + 3); /* data1[3] ... [6] */ |
| 82 |
| 83 /*TODO: Convert these once in advance instead of once per sample, like |
| 84 silk_noise_shape_quantizer_short_prediction_neon() does.*/ |
| 85 int16x8_t coef16 = vld1q_s16(coef); |
| 86 int32x4_t coef0 = vmovl_s16(vget_low_s16(coef16)); |
| 87 int32x4_t coef1 = vmovl_s16(vget_high_s16(coef16)); |
| 88 |
| 89 /*This is not bit-exact with the C version, since we do not drop the |
| 90 lower 16 bits of each multiply, but wait until the end to truncate |
| 91 precision. This is an encoder-specific calculation (and unlike |
| 92 silk_noise_shape_quantizer_short_prediction_neon(), is not meant to |
| 93 simulate what the decoder will do). We still could use vqdmulhq_s32() |
| 94 like silk_noise_shape_quantizer_short_prediction_neon() and save |
| 95 half the multiplies, but the speed difference is not large, since we |
| 96 then need two extra adds.*/ |
| 97 int64x2_t b0 = vmull_s32(vget_low_s32(a0), vget_low_s32(coef0)); |
| 98 int64x2_t b1 = vmlal_s32(b0, vget_high_s32(a0), vget_high_s32(coef0)); |
| 99 int64x2_t b2 = vmlal_s32(b1, vget_low_s32(a1), vget_low_s32(coef1)); |
| 100 int64x2_t b3 = vmlal_s32(b2, vget_high_s32(a1), vget_high_s32(coef1)); |
| 101 |
| 102 int64x1_t c = vadd_s64(vget_low_s64(b3), vget_high_s64(b3)); |
| 103 int64x1_t cS = vrshr_n_s64(c, 15); |
| 104 int32x2_t d = vreinterpret_s32_s64(cS); |
| 105 |
| 106 out = vget_lane_s32(d, 0); |
| 107 vst1q_s32(data1, a0); |
| 108 vst1q_s32(data1 + 4, a1); |
| 109 return out; |
| 110 } |
| 111 return silk_NSQ_noise_shape_feedback_loop_c(data0, data1, coef, order); |
| 112 } |
| OLD | NEW |