OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 // | 4 // |
5 // Input buffer layout, dividing the total buffer into regions (r0_ - r5_): | 5 // Input buffer layout, dividing the total buffer into regions (r0_ - r5_): |
6 // | 6 // |
7 // |----------------|-----------------------------------------|----------------| | 7 // |----------------|-----------------------------------------|----------------| |
8 // | 8 // |
9 // kBlockSize + kKernelSize / 2 | 9 // kBlockSize + kKernelSize / 2 |
10 // <---------------------------------------------------------> | 10 // <---------------------------------------------------------> |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
48 namespace media { | 48 namespace media { |
49 | 49 |
50 SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) | 50 SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
51 : io_sample_rate_ratio_(io_sample_rate_ratio), | 51 : io_sample_rate_ratio_(io_sample_rate_ratio), |
52 virtual_source_idx_(0), | 52 virtual_source_idx_(0), |
53 buffer_primed_(false), | 53 buffer_primed_(false), |
54 read_cb_(read_cb), | 54 read_cb_(read_cb), |
55 // Create input buffers with a 16-byte alignment for SSE optimizations. | 55 // Create input buffers with a 16-byte alignment for SSE optimizations. |
56 kernel_storage_(static_cast<float*>( | 56 kernel_storage_(static_cast<float*>( |
57 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | 57 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
58 kernel_pre_sinc_storage_(static_cast<float*>( | |
59 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | |
60 kernel_window_storage_(static_cast<float*>( | |
61 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | |
58 input_buffer_(static_cast<float*>( | 62 input_buffer_(static_cast<float*>( |
59 base::AlignedAlloc(sizeof(float) * kBufferSize, 16))), | 63 base::AlignedAlloc(sizeof(float) * kBufferSize, 16))), |
60 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) | 64 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) |
61 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C), | 65 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C), |
62 #endif | 66 #endif |
63 // Setup various region pointers in the buffer (see diagram above). | 67 // Setup various region pointers in the buffer (see diagram above). |
64 r0_(input_buffer_.get() + kKernelSize / 2), | 68 r0_(input_buffer_.get() + kKernelSize / 2), |
65 r1_(input_buffer_.get()), | 69 r1_(input_buffer_.get()), |
66 r2_(r0_), | 70 r2_(r0_), |
67 r3_(r0_ + kBlockSize - kKernelSize / 2), | 71 r3_(r0_ + kBlockSize - kKernelSize / 2), |
(...skipping 14 matching lines...) Expand all Loading... | |
82 DCHECK_EQ(r2_ - r1_, r5_ - r2_); | 86 DCHECK_EQ(r2_ - r1_, r5_ - r2_); |
83 // r3_ left of r4_, r5_ left of r0_ and r3_ size correct. | 87 // r3_ left of r4_, r5_ left of r0_ and r3_ size correct. |
84 DCHECK_EQ(r4_ - r3_, r5_ - r0_); | 88 DCHECK_EQ(r4_ - r3_, r5_ - r0_); |
85 // r3_, r4_ size correct and r4_ at the end of the buffer. | 89 // r3_, r4_ size correct and r4_ at the end of the buffer. |
86 DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize); | 90 DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize); |
87 // r5_ size correct and at the end of the buffer. | 91 // r5_ size correct and at the end of the buffer. |
88 DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize); | 92 DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize); |
89 | 93 |
90 memset(kernel_storage_.get(), 0, | 94 memset(kernel_storage_.get(), 0, |
91 sizeof(*kernel_storage_.get()) * kKernelStorageSize); | 95 sizeof(*kernel_storage_.get()) * kKernelStorageSize); |
96 memset(kernel_pre_sinc_storage_.get(), 0, | |
97 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); | |
98 memset(kernel_window_storage_.get(), 0, | |
99 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); | |
92 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); | 100 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); |
93 | 101 |
94 InitializeKernel(); | 102 InitializeKernel(true); |
95 } | 103 } |
96 | 104 |
97 SincResampler::~SincResampler() {} | 105 SincResampler::~SincResampler() {} |
98 | 106 |
99 void SincResampler::InitializeKernel() { | 107 void SincResampler::InitializeKernel(bool first_run) { |
100 // Blackman window parameters. | 108 // Blackman window parameters. |
101 static const double kAlpha = 0.16; | 109 static const double kAlpha = 0.16; |
102 static const double kA0 = 0.5 * (1.0 - kAlpha); | 110 static const double kA0 = 0.5 * (1.0 - kAlpha); |
103 static const double kA1 = 0.5; | 111 static const double kA1 = 0.5; |
104 static const double kA2 = 0.5 * kAlpha; | 112 static const double kA2 = 0.5 * kAlpha; |
105 | 113 |
106 // |sinc_scale_factor| is basically the normalized cutoff frequency of the | 114 // |sinc_scale_factor| is basically the normalized cutoff frequency of the |
107 // low-pass filter. | 115 // low-pass filter. |
108 double sinc_scale_factor = | 116 double sinc_scale_factor = |
109 io_sample_rate_ratio_ > 1.0 ? 1.0 / io_sample_rate_ratio_ : 1.0; | 117 io_sample_rate_ratio_ > 1.0 ? 1.0 / io_sample_rate_ratio_ : 1.0; |
110 | 118 |
111 // The sinc function is an idealized brick-wall filter, but since we're | 119 // The sinc function is an idealized brick-wall filter, but since we're |
112 // windowing it the transition from pass to stop does not happen right away. | 120 // windowing it the transition from pass to stop does not happen right away. |
113 // So we should adjust the low pass filter cutoff slightly downward to avoid | 121 // So we should adjust the low pass filter cutoff slightly downward to avoid |
114 // some aliasing at the very high-end. | 122 // some aliasing at the very high-end. |
115 // TODO(crogers): this value is empirical and to be more exact should vary | 123 // TODO(crogers): this value is empirical and to be more exact should vary |
116 // depending on kKernelSize. | 124 // depending on kKernelSize. |
117 sinc_scale_factor *= 0.9; | 125 sinc_scale_factor *= 0.9; |
118 | 126 |
127 if (!first_run) { | |
Chris Rogers
2013/04/08 19:38:11
A simple comment explaining the optimization we're
DaleCurtis
2013/04/15 20:27:31
Done.
| |
128 for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { | |
129 for (int i = 0; i < kKernelSize; ++i) { | |
130 const int idx = i + offset_idx * kKernelSize; | |
131 const double s = sinc_scale_factor * kernel_pre_sinc_storage_[idx]; | |
132 const double sinc = (s == 0 ? 1.0 : sin(s) / s) * sinc_scale_factor; | |
133 kernel_storage_[idx] = sinc * kernel_window_storage_[idx]; | |
134 } | |
135 } | |
136 return; | |
137 } | |
138 | |
119 // Generates a set of windowed sinc() kernels. | 139 // Generates a set of windowed sinc() kernels. |
120 // We generate a range of sub-sample offsets from 0.0 to 1.0. | 140 // We generate a range of sub-sample offsets from 0.0 to 1.0. |
121 for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { | 141 for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { |
122 double subsample_offset = | 142 double subsample_offset = |
123 static_cast<double>(offset_idx) / kKernelOffsetCount; | 143 static_cast<double>(offset_idx) / kKernelOffsetCount; |
124 | 144 |
125 for (int i = 0; i < kKernelSize; ++i) { | 145 for (int i = 0; i < kKernelSize; ++i) { |
146 double pre_sinc = M_PI * (i - kKernelSize / 2 - subsample_offset); | |
147 | |
126 // Compute the sinc with offset. | 148 // Compute the sinc with offset. |
127 double s = | 149 double s = sinc_scale_factor * pre_sinc; |
128 sinc_scale_factor * M_PI * (i - kKernelSize / 2 - subsample_offset); | 150 double sinc = (s == 0 ? 1.0 : sin(s) / s) * sinc_scale_factor; |
129 double sinc = (!s ? 1.0 : sin(s) / s) * sinc_scale_factor; | |
130 | 151 |
131 // Compute Blackman window, matching the offset of the sinc(). | 152 // Compute Blackman window, matching the offset of the sinc(). |
132 double x = (i - subsample_offset) / kKernelSize; | 153 double x = (i - subsample_offset) / kKernelSize; |
133 double window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 | 154 double window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 |
134 * cos(4.0 * M_PI * x); | 155 * cos(4.0 * M_PI * x); |
135 | 156 |
157 const int idx = i + offset_idx * kKernelSize; | |
158 | |
136 // Window the sinc() function and store at the correct offset. | 159 // Window the sinc() function and store at the correct offset. |
137 kernel_storage_.get()[i + offset_idx * kKernelSize] = sinc * window; | 160 kernel_pre_sinc_storage_[idx] = pre_sinc; |
161 kernel_window_storage_[idx] = window; | |
162 kernel_storage_[idx] = sinc * window; | |
138 } | 163 } |
139 } | 164 } |
140 } | 165 } |
141 | 166 |
167 void SincResampler::UpdateSampleRateRatio(double io_sample_rate_ratio) { | |
168 io_sample_rate_ratio_ = io_sample_rate_ratio; | |
169 InitializeKernel(false); | |
170 } | |
171 | |
142 // If we know the minimum architecture avoid function hopping for CPU detection. | 172 // If we know the minimum architecture avoid function hopping for CPU detection. |
143 #if defined(ARCH_CPU_X86_FAMILY) | 173 #if defined(ARCH_CPU_X86_FAMILY) |
144 #if defined(__SSE__) | 174 #if defined(__SSE__) |
145 #define CONVOLVE_FUNC Convolve_SSE | 175 #define CONVOLVE_FUNC Convolve_SSE |
146 #else | 176 #else |
147 // X86 CPU detection required. |convolve_proc_| will be set upon construction. | 177 // X86 CPU detection required. |convolve_proc_| will be set upon construction. |
148 // TODO(dalecurtis): Once Chrome moves to a SSE baseline this can be removed. | 178 // TODO(dalecurtis): Once Chrome moves to a SSE baseline this can be removed. |
149 #define CONVOLVE_FUNC convolve_proc_ | 179 #define CONVOLVE_FUNC convolve_proc_ |
150 #endif | 180 #endif |
151 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON) | 181 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON) |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
268 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), | 298 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), |
269 m_sums2, vmovq_n_f32(kernel_interpolation_factor)); | 299 m_sums2, vmovq_n_f32(kernel_interpolation_factor)); |
270 | 300 |
271 // Sum components together. | 301 // Sum components together. |
272 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); | 302 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); |
273 return vget_lane_f32(vpadd_f32(m_half, m_half), 0); | 303 return vget_lane_f32(vpadd_f32(m_half, m_half), 0); |
274 } | 304 } |
275 #endif | 305 #endif |
276 | 306 |
277 } // namespace media | 307 } // namespace media |
OLD | NEW |