Index: media/base/sinc_resampler.cc |
diff --git a/media/base/sinc_resampler.cc b/media/base/sinc_resampler.cc |
index 88e62044d27c6a77e416f93c8355bbeb094ffd5d..7a67cb0b6593fbb6940a056eb6bc41cce8bea327 100644 |
--- a/media/base/sinc_resampler.cc |
+++ b/media/base/sinc_resampler.cc |
@@ -38,8 +38,13 @@ |
#include <cmath> |
+#include "base/cpu.h" |
#include "base/logging.h" |
+#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__) |
+#include <xmmintrin.h> |
+#endif |
+ |
namespace media { |
enum { |
@@ -63,13 +68,84 @@ enum { |
kBufferSize = kBlockSize + kKernelSize |
}; |
+ |
+#define CONVOLVE_ONE_SAMPLE \ |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
#undef this when done with it.
Except you only use
DaleCurtis
2012/07/21 02:35:14
Good point. Leftovers from the WebKit version of t
|
+ sum1 += *input_ptr * *k1++; \ |
+ sum2 += *input_ptr++ * *k2++; |
+ |
+static double Convolve_C(float *input_ptr, float* k1, float* k2, |
+ double kernel_interpolation_factor) { |
+ float sum1 = 0; |
+ float sum2 = 0; |
+ |
+ // Generate a single output sample. Unrolling this loop hurt performance in |
+ // local testing. |
+ int n = kKernelSize; |
+ while (n--) { |
+ CONVOLVE_ONE_SAMPLE |
+ } |
+ |
+ // Linearly interpolate the two "convolutions". |
+ return (1.0 - kernel_interpolation_factor) * sum1 |
+ + kernel_interpolation_factor * sum2; |
+} |
+ |
+#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__) |
+#define CONVOLVE_4_SAMPLES(load) \ |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
Any reason to use a #define instead of an inline f
DaleCurtis
2012/07/21 02:35:14
Not sure how that'd work since we're modifying the
|
+ m_input = _mm_##load##_ps(input_ptr + i); \ |
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i))); \ |
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i))); |
+ |
+static const int kFloatsPerPass = sizeof(__m128) / sizeof(float); |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
Not sure what this generality buys you given you'v
DaleCurtis
2012/07/21 02:35:14
Removed.
|
+static double Convolve_SSE(float *input_ptr, float* k1, float* k2, |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
i can haz a test that shows the two Convolve_* fun
DaleCurtis
2012/07/21 02:35:14
Done.
|
+ double kernel_interpolation_factor) { |
+ // Ensure |k1|, |k2| are aligned for SSE usage. Should always be true so long |
+ // as kKernelSize is a power of 2. |
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & (sizeof(__m128) - 1)); |
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & (sizeof(__m128) - 1)); |
+ |
+ __m128 m_input; |
+ __m128 m_sums1 = _mm_setzero_ps(); |
+ __m128 m_sums2 = _mm_setzero_ps(); |
+ |
+ // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling |
+ // these loops hurt performance in local testing. |
+ if (reinterpret_cast<uintptr_t>(input_ptr) & (sizeof(__m128) - 1)) { |
+ for (int i = 0; i < kKernelSize; i += kFloatsPerPass) { |
+ CONVOLVE_4_SAMPLES(loadu) |
+ } |
+ } else { |
+ for (int i = 0; i < kKernelSize; i += kFloatsPerPass) { |
+ CONVOLVE_4_SAMPLES(load) |
+ } |
+ } |
+ |
+ // Linearly interpolate the two "convolutions". |
+ m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor)); |
+ m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor)); |
+ m_sums1 = _mm_add_ps(m_sums1, m_sums2); |
+ |
+ // Sum components together. |
+ float result; |
+ m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1); |
+ _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps( |
+ m_sums2, m_sums2, 1))); |
+ |
+ return result; |
+} |
+#endif |
+ |
+typedef double (*ConvolveProc)(float* src, float* k1, float* k2, |
+ double kernel_interpolation_factor); |
+static ConvolveProc convolve_proc = NULL; |
+ |
SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
: io_sample_rate_ratio_(io_sample_rate_ratio), |
virtual_source_idx_(0), |
buffer_primed_(false), |
read_cb_(read_cb), |
- // TODO(dalecurtis): When we switch to AVX/SSE optimization, we'll need to |
- // allocate with 32-byte alignment and ensure they're sized % 32 bytes. |
+ // TODO(dalecurtis): When we switch to SSE optimization, we'll need to |
+ // allocate with 16-byte alignment (default linux, mac, not win) |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
IDK what this means
DaleCurtis
2012/07/21 02:35:14
Changed. Was a reminder to me to land my AlignedMe
|
kernel_storage_(new float[kKernelStorageSize]), |
input_buffer_(new float[kBufferSize]), |
// Setup various region pointers in the buffer (see diagram above). |
@@ -96,6 +172,13 @@ SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
// r5_ size correct and at the end of the buffer. |
DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize); |
+ if (!convolve_proc) { |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
This is racy.
DaleCurtis
2012/07/21 02:35:14
Done.
|
+ convolve_proc = Convolve_C; |
+ base::CPU cpu; |
+ if (cpu.has_sse()) |
+ convolve_proc = Convolve_SSE; |
Ami GONE FROM CHROMIUM
2012/07/18 04:40:35
Does this compile on build platforms that don't qu
DaleCurtis
2012/07/21 02:35:14
Done.
|
+ } |
+ |
memset(kernel_storage_.get(), 0, |
sizeof(*kernel_storage_.get()) * kKernelStorageSize); |
memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); |
@@ -168,36 +251,18 @@ void SincResampler::Resample(float* destination, int frames) { |
double virtual_offset_idx = subsample_remainder * kKernelOffsetCount; |
int offset_idx = static_cast<int>(virtual_offset_idx); |
+ // We'll compute "convolutions" for the two kernels which straddle |
+ // |virtual_source_idx_|. |
float* k1 = kernel_storage_.get() + offset_idx * kKernelSize; |
float* k2 = k1 + kKernelSize; |
// Initialize input pointer based on quantized |virtual_source_idx_|. |
float* input_ptr = r1_ + source_idx; |
- // We'll compute "convolutions" for the two kernels which straddle |
- // |virtual_source_idx_|. |
- float sum1 = 0; |
- float sum2 = 0; |
- |
// Figure out how much to weight each kernel's "convolution". |
double kernel_interpolation_factor = virtual_offset_idx - offset_idx; |
- |
- // Generate a single output sample. |
- int n = kKernelSize; |
- float input; |
- // TODO(dalecurtis): For initial commit, I've ripped out all the SSE |
- // optimizations, these definitely need to go back in before release. |
- while (n--) { |
- input = *input_ptr++; |
- sum1 += input * *k1++; |
- sum2 += input * *k2++; |
- } |
- |
- // Linearly interpolate the two "convolutions". |
- double result = (1.0 - kernel_interpolation_factor) * sum1 |
- + kernel_interpolation_factor * sum2; |
- |
- *destination++ = result; |
+ *destination++ = convolve_proc( |
+ input_ptr, k1, k2, kernel_interpolation_factor); |
// Advance the virtual index. |
virtual_source_idx_ += io_sample_rate_ratio_; |