Index: Source/platform/audio/VectorMath.cpp |
diff --git a/Source/platform/audio/VectorMath.cpp b/Source/platform/audio/VectorMath.cpp |
index 0bbe6bae9fdd85f346971db6589ac41a6c2d93f9..cb48cc0634d9322cf15d4ea7617522d3997e8069 100644 |
--- a/Source/platform/audio/VectorMath.cpp |
+++ b/Source/platform/audio/VectorMath.cpp |
@@ -35,7 +35,7 @@ |
#include <Accelerate/Accelerate.h> |
#endif |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
#include <emmintrin.h> |
#endif |
@@ -135,7 +135,7 @@ void vsma(const float* sourceP, int sourceStride, const float* scale, float* des |
{ |
int n = framesToProcess; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if ((sourceStride == 1) && (destStride == 1)) { |
float k = *scale; |
@@ -208,7 +208,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de |
{ |
int n = framesToProcess; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if ((sourceStride == 1) && (destStride == 1)) { |
float k = *scale; |
@@ -279,7 +279,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de |
sourceP += sourceStride; |
destP += destStride; |
} |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
} |
#endif |
} |
@@ -288,7 +288,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s |
{ |
int n = framesToProcess; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if ((sourceStride1 ==1) && (sourceStride2 == 1) && (destStride == 1)) { |
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. |
while ((reinterpret_cast<size_t>(source1P) & 0x0F) && n) { |
@@ -391,7 +391,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s |
source2P += sourceStride2; |
destP += destStride; |
} |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
} |
#endif |
} |
@@ -401,7 +401,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s |
int n = framesToProcess; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if ((sourceStride1 == 1) && (sourceStride2 == 1) && (destStride == 1)) { |
// If the source1P address is not 16-byte aligned, the first several frames (at most three) should be processed separately. |
while ((reinterpret_cast<uintptr_t>(source1P) & 0x0F) && n) { |
@@ -474,7 +474,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s |
void zvmul(const float* real1P, const float* imag1P, const float* real2P, const float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess) |
{ |
unsigned i = 0; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
// Only use the SSE optimization in the very common case that all addresses are 16-byte aligned. |
// Otherwise, fall through to the scalar code below. |
if (!(reinterpret_cast<uintptr_t>(real1P) & 0x0F) |
@@ -532,7 +532,7 @@ void vsvesq(const float* sourceP, int sourceStride, float* sumP, size_t framesTo |
int n = framesToProcess; |
float sum = 0; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if (sourceStride == 1) { |
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. |
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) { |
@@ -597,7 +597,7 @@ void vmaxmgv(const float* sourceP, int sourceStride, float* maxP, size_t framesT |
int n = framesToProcess; |
float max = 0; |
-#ifdef __SSE2__ |
+#if CPU(X86) || CPU(X86_64) |
if (sourceStride == 1) { |
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. |
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) { |