Index: media/base/sinc_resampler.cc |
diff --git a/media/base/sinc_resampler.cc b/media/base/sinc_resampler.cc |
index 88e62044d27c6a77e416f93c8355bbeb094ffd5d..19f878bb902404c2d09d6578abab3f59b4d420fe 100644 |
--- a/media/base/sinc_resampler.cc |
+++ b/media/base/sinc_resampler.cc |
@@ -36,8 +36,12 @@ |
#include "media/base/sinc_resampler.h" |
+#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__) |
+#include <xmmintrin.h> |
+#endif |
#include <cmath> |
+#include "base/cpu.h" |
#include "base/logging.h" |
namespace media { |
@@ -68,8 +72,7 @@ SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
virtual_source_idx_(0), |
buffer_primed_(false), |
read_cb_(read_cb), |
- // TODO(dalecurtis): When we switch to AVX/SSE optimization, we'll need to |
- // allocate with 32-byte alignment and ensure they're sized % 32 bytes. |
+ // TODO(dalecurtis): Switch these to AlignedMemory<> instances. |
kernel_storage_(new float[kKernelStorageSize]), |
input_buffer_(new float[kBufferSize]), |
// Setup various region pointers in the buffer (see diagram above). |
@@ -168,36 +171,18 @@ void SincResampler::Resample(float* destination, int frames) { |
double virtual_offset_idx = subsample_remainder * kKernelOffsetCount; |
int offset_idx = static_cast<int>(virtual_offset_idx); |
+ // We'll compute "convolutions" for the two kernels which straddle |
+ // |virtual_source_idx_|. |
float* k1 = kernel_storage_.get() + offset_idx * kKernelSize; |
float* k2 = k1 + kKernelSize; |
// Initialize input pointer based on quantized |virtual_source_idx_|. |
float* input_ptr = r1_ + source_idx; |
- // We'll compute "convolutions" for the two kernels which straddle |
- // |virtual_source_idx_|. |
- float sum1 = 0; |
- float sum2 = 0; |
- |
// Figure out how much to weight each kernel's "convolution". |
double kernel_interpolation_factor = virtual_offset_idx - offset_idx; |
- |
- // Generate a single output sample. |
- int n = kKernelSize; |
- float input; |
- // TODO(dalecurtis): For initial commit, I've ripped out all the SSE |
- // optimizations, these definitely need to go back in before release. |
- while (n--) { |
- input = *input_ptr++; |
- sum1 += input * *k1++; |
- sum2 += input * *k2++; |
- } |
- |
- // Linearly interpolate the two "convolutions". |
- double result = (1.0 - kernel_interpolation_factor) * sum1 |
- + kernel_interpolation_factor * sum2; |
- |
- *destination++ = result; |
+ *destination++ = Convolve( |
+ input_ptr, k1, k2, kernel_interpolation_factor); |
// Advance the virtual index. |
virtual_source_idx_ += io_sample_rate_ratio_; |
@@ -224,4 +209,85 @@ int SincResampler::ChunkSize() { |
return kBlockSize / io_sample_rate_ratio_; |
} |
+float SincResampler::Convolve(const float* input_ptr, const float* k1, |
+ const float* k2, |
+ double kernel_interpolation_factor) { |
+ // Rely on function level static initialization to keep ConvolveProc selection |
+ // thread safe. |
+ typedef float (*ConvolveProc)(const float* src, const float* k1, |
+ const float* k2, |
+ double kernel_interpolation_factor); |
+#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__) |
+ static const ConvolveProc kConvolveProc = |
+ base::CPU().has_sse() ? Convolve_SSE : Convolve_C; |
+#else |
+ static const ConvolveProc kConvolveProc = Convolve_C; |
+#endif |
+ |
+ return kConvolveProc(input_ptr, k1, k2, kernel_interpolation_factor); |
+} |
+ |
+float SincResampler::Convolve_C(const float* input_ptr, const float* k1, |
+ const float* k2, |
+ double kernel_interpolation_factor) { |
+ float sum1 = 0; |
+ float sum2 = 0; |
+ |
+ // Generate a single output sample. Unrolling this loop hurt performance in |
+ // local testing. |
+ int n = kKernelSize; |
+ while (n--) { |
+ sum1 += *input_ptr * *k1++; |
+ sum2 += *input_ptr++ * *k2++; |
+ } |
+ |
+ // Linearly interpolate the two "convolutions". |
+ return (1.0 - kernel_interpolation_factor) * sum1 |
+ + kernel_interpolation_factor * sum2; |
+} |
+ |
+#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__) |
+float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1, |
+ const float* k2, |
+ double kernel_interpolation_factor) { |
+ // Ensure |k1|, |k2| are 16-byte aligned for SSE usage. Should always be true |
+ // so long as kKernelSize is a multiple of 16. |
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F); |
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F); |
+ |
+ __m128 m_input; |
+ __m128 m_sums1 = _mm_setzero_ps(); |
+ __m128 m_sums2 = _mm_setzero_ps(); |
+ |
+ // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling |
+ // these loops hurt performance in local testing. |
+ if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) { |
+ for (int i = 0; i < kKernelSize; i += 4) { |
+ m_input = _mm_loadu_ps(input_ptr + i); |
Ami GONE FROM CHROMIUM
2012/07/21 17:25:09
CL description says "eating at most one unaligned
DaleCurtis
2012/07/24 00:13:07
Done.
|
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i))); |
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i))); |
+ } |
+ } else { |
+ for (int i = 0; i < kKernelSize; i += 4) { |
+ m_input = _mm_load_ps(input_ptr + i); |
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i))); |
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i))); |
+ } |
+ } |
+ |
+ // Linearly interpolate the two "convolutions". |
+ m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor)); |
+ m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor)); |
+ m_sums1 = _mm_add_ps(m_sums1, m_sums2); |
+ |
+ // Sum components together. |
+ float result; |
+ m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1); |
+ _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps( |
+ m_sums2, m_sums2, 1))); |
+ |
+ return result; |
+} |
+#endif |
+ |
} // namespace media |