| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| 11 * documentation and/or other materials provided with the distribution. | 11 * documentation and/or other materials provided with the distribution. |
| 12 * | 12 * |
| 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y | 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y |
| 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y | 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y |
| 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N | 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N |
| 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 23 */ | 23 */ |
| 24 | 24 |
| 25 #include "config.h" | 25 #include "config.h" |
| 26 | 26 |
| 27 #if ENABLE(WEB_AUDIO) | 27 #if ENABLE(WEB_AUDIO) |
| 28 | 28 |
| 29 #include "platform/audio/VectorMath.h" | 29 #include "platform/audio/VectorMath.h" |
| 30 |
| 30 #include "wtf/Assertions.h" | 31 #include "wtf/Assertions.h" |
| 31 #include "wtf/CPU.h" | 32 #include "wtf/CPU.h" |
| 33 #include <algorithm> |
| 34 #include <math.h> |
| 32 #include <stdint.h> | 35 #include <stdint.h> |
| 33 | 36 |
| 34 #if OS(MACOSX) | 37 #if OS(MACOSX) |
| 38 // On the Mac we use the highly optimized versions in Accelerate.framework |
| 39 // In 32-bit mode (__ppc__ or __i386__) <Accelerate/Accelerate.h> includes <vecL
ib/vDSP_translate.h> which defines macros of the same name as |
| 40 // our namespaced function names, so we must handle this case differently. Other
architectures (64bit, ARM, etc.) do not include this header file. |
| 35 #include <Accelerate/Accelerate.h> | 41 #include <Accelerate/Accelerate.h> |
| 36 #endif | 42 #endif |
| 37 | 43 |
| 38 #if CPU(X86) || CPU(X86_64) | 44 #if CPU(X86) || CPU(X86_64) |
| 39 #include <emmintrin.h> | 45 #include <emmintrin.h> |
| 40 #endif | 46 #endif |
| 41 | 47 |
| 42 #if HAVE(ARM_NEON_INTRINSICS) | |
| 43 #include <arm_neon.h> | |
| 44 #endif | |
| 45 | |
| 46 #include <math.h> | |
| 47 #include <algorithm> | |
| 48 | 48 |
| 49 namespace blink { | 49 namespace blink { |
| 50 | 50 |
| 51 namespace VectorMath { | 51 namespace VectorMath { |
| 52 | 52 |
| 53 #if OS(MACOSX) | 53 #if OS(MACOSX) |
| 54 // On the Mac we use the highly optimized versions in Accelerate.framework | |
| 55 // In 32-bit mode (__ppc__ or __i386__) <Accelerate/Accelerate.h> includes <vecL
ib/vDSP_translate.h> which defines macros of the same name as | |
| 56 // our namespaced function names, so we must handle this case differently. Other
architectures (64bit, ARM, etc.) do not include this header file. | |
| 57 | 54 |
| 58 void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
stP, int destStride, size_t framesToProcess) | 55 void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
stP, int destStride, size_t framesToProcess) |
| 59 { | 56 { |
| 60 #if CPU(X86) | 57 #if OS(MACOSX) && !CPU(X86) |
| 58 vDSP_vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess)
; |
| 59 #elif HAVE(ARM_NEON_INTRINSICS) |
| 60 WTF_CPU_ARM_NEON_WRAP(vsmul)(sourceP, sourceStride, scale, destP, destStride
, framesToProcess); |
| 61 #else |
| 61 ::vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess); | 62 ::vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess); |
| 62 #else | |
| 63 vDSP_vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess)
; | |
| 64 #endif | 63 #endif |
| 65 } | 64 } |
| 66 | 65 |
| 67 void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
ourceStride2, float* destP, int destStride, size_t framesToProcess) | 66 void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
ourceStride2, float* destP, int destStride, size_t framesToProcess) |
| 68 { | 67 { |
| 69 #if CPU(X86) | 68 #if OS(MACOSX) && !CPU(X86) |
| 69 vDSP_vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStrid
e, framesToProcess); |
| 70 #elif HAVE(ARM_NEON_INTRINSICS) |
| 71 WTF_CPU_ARM_NEON_WRAP(vadd)(source1P, sourceStride1, source2P, sourceStride2
, destP, destStride, framesToProcess); |
| 72 #else |
| 70 ::vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStride,
framesToProcess); | 73 ::vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStride,
framesToProcess); |
| 71 #else | |
| 72 vDSP_vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStrid
e, framesToProcess); | |
| 73 #endif | 74 #endif |
| 74 } | 75 } |
| 75 | 76 |
| 76 void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
ourceStride2, float* destP, int destStride, size_t framesToProcess) | 77 void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
ourceStride2, float* destP, int destStride, size_t framesToProcess) |
| 77 { | 78 { |
| 78 #if CPU(X86) | 79 #if OS(MACOSX) && !CPU(X86) |
| 80 vDSP_vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStrid
e, framesToProcess); |
| 81 #elif HAVE(ARM_NEON_INTRINSICS) |
| 82 WTF_CPU_ARM_NEON_WRAP(vmul)(source1P, sourceStride1, source2P, sourceStride2
, destP, destStride, framesToProcess); |
| 83 #else |
| 79 ::vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStride,
framesToProcess); | 84 ::vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStride,
framesToProcess); |
| 80 #else | |
| 81 vDSP_vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStrid
e, framesToProcess); | |
| 82 #endif | 85 #endif |
| 83 } | 86 } |
| 84 | 87 |
| 85 void zvmul(const float* real1P, const float* imag1P, const float* real2P, const
float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess) | 88 void zvmul(const float* real1P, const float* imag1P, const float* real2P, const
float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess) |
| 86 { | 89 { |
| 87 DSPSplitComplex sc1; | 90 DSPSplitComplex sc1; |
| 88 DSPSplitComplex sc2; | 91 DSPSplitComplex sc2; |
| 89 DSPSplitComplex dest; | 92 DSPSplitComplex dest; |
| 90 sc1.realp = const_cast<float*>(real1P); | 93 sc1.realp = const_cast<float*>(real1P); |
| 91 sc1.imagp = const_cast<float*>(imag1P); | 94 sc1.imagp = const_cast<float*>(imag1P); |
| (...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 681 } | 684 } |
| 682 } | 685 } |
| 683 | 686 |
| 684 #endif // OS(MACOSX) | 687 #endif // OS(MACOSX) |
| 685 | 688 |
| 686 } // namespace VectorMath | 689 } // namespace VectorMath |
| 687 | 690 |
| 688 } // namespace blink | 691 } // namespace blink |
| 689 | 692 |
| 690 #endif // ENABLE(WEB_AUDIO) | 693 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |