OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * | 7 * |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 11 matching lines...) Expand all Loading... | |
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 */ | 27 */ |
28 | 28 |
29 #include "platform/audio/AudioDestination.h" | 29 #include "platform/audio/AudioDestination.h" |
30 | 30 |
31 #include "platform/Histogram.h" | 31 #include "platform/Histogram.h" |
32 #include "platform/audio/AudioPullFIFO.h" | |
33 #include "platform/audio/AudioUtilities.h" | 32 #include "platform/audio/AudioUtilities.h" |
33 #include "platform/audio/PushPullFIFO.h" | |
34 #include "platform/weborigin/SecurityOrigin.h" | 34 #include "platform/weborigin/SecurityOrigin.h" |
35 #include "public/platform/Platform.h" | 35 #include "public/platform/Platform.h" |
36 #include "public/platform/WebSecurityOrigin.h" | 36 #include "public/platform/WebSecurityOrigin.h" |
37 #include "wtf/PtrUtil.h" | 37 #include "wtf/PtrUtil.h" |
38 #include <memory> | 38 #include <memory> |
39 | 39 |
40 namespace blink { | 40 namespace blink { |
41 | 41 |
42 // FIFO Size. | 42 // FIFO Size. |
43 // | 43 // |
(...skipping 16 matching lines...) Expand all Loading... | |
60 unsigned numberOfOutputChannels, | 60 unsigned numberOfOutputChannels, |
61 float sampleRate, | 61 float sampleRate, |
62 PassRefPtr<SecurityOrigin> securityOrigin) | 62 PassRefPtr<SecurityOrigin> securityOrigin) |
63 : m_numberOfOutputChannels(numberOfOutputChannels), | 63 : m_numberOfOutputChannels(numberOfOutputChannels), |
64 m_sampleRate(sampleRate), | 64 m_sampleRate(sampleRate), |
65 m_isPlaying(false), | 65 m_isPlaying(false), |
66 m_callback(callback), | 66 m_callback(callback), |
67 m_outputBus(AudioBus::create(numberOfOutputChannels, | 67 m_outputBus(AudioBus::create(numberOfOutputChannels, |
68 AudioUtilities::kRenderQuantumFrames, | 68 AudioUtilities::kRenderQuantumFrames, |
69 false)), | 69 false)), |
70 m_renderBus(AudioBus::create(numberOfOutputChannels, | |
71 AudioUtilities::kRenderQuantumFrames)), | |
70 m_framesElapsed(0) { | 72 m_framesElapsed(0) { |
71 // Calculate the optimum buffer size first. | 73 // Calculate the optimum buffer size first. |
72 if (calculateBufferSize()) { | 74 if (calculateBufferSize()) { |
73 // Create WebAudioDevice. blink::WebAudioDevice is designed to support the | 75 // Create WebAudioDevice. blink::WebAudioDevice is designed to support the |
74 // local input (e.g. loopback from OS audio system), but Chromium's media | 76 // local input (e.g. loopback from OS audio system), but Chromium's media |
75 // renderer does not support it currently. Thus, we use zero for the number | 77 // renderer does not support it currently. Thus, we use zero for the number |
76 // of input channels. | 78 // of input channels. |
77 m_webAudioDevice = WTF::wrapUnique(Platform::current()->createAudioDevice( | 79 m_webAudioDevice = WTF::wrapUnique(Platform::current()->createAudioDevice( |
78 m_callbackBufferSize, 0, numberOfOutputChannels, sampleRate, this, | 80 m_callbackBufferSize, 0, numberOfOutputChannels, sampleRate, this, |
79 String(), std::move(securityOrigin))); | 81 String(), std::move(securityOrigin))); |
80 DCHECK(m_webAudioDevice); | 82 DCHECK(m_webAudioDevice); |
81 | 83 |
82 // Create a FIFO. | 84 // Create a FIFO. |
83 m_fifo = WTF::wrapUnique( | 85 m_fifo = |
84 new AudioPullFIFO(*this, numberOfOutputChannels, kFIFOSize, | 86 WTF::wrapUnique(new PushPullFIFO(numberOfOutputChannels, kFIFOSize)); |
85 AudioUtilities::kRenderQuantumFrames)); | |
86 } else { | 87 } else { |
87 NOTREACHED(); | 88 NOTREACHED(); |
88 } | 89 } |
89 } | 90 } |
90 | 91 |
91 AudioDestination::~AudioDestination() { | 92 AudioDestination::~AudioDestination() { |
92 stop(); | 93 stop(); |
93 } | 94 } |
94 | 95 |
95 void AudioDestination::render(const WebVector<float*>& destinationData, | 96 void AudioDestination::render(const WebVector<float*>& destinationData, |
96 size_t numberOfFrames, | 97 size_t numberOfFrames, |
97 double delay, | 98 double delay, |
98 double delayTimestamp, | 99 double delayTimestamp, |
99 size_t priorFramesSkipped) { | 100 size_t priorFramesSkipped) { |
100 DCHECK_EQ(destinationData.size(), m_numberOfOutputChannels); | 101 CHECK_EQ(destinationData.size(), m_numberOfOutputChannels); |
101 if (destinationData.size() != m_numberOfOutputChannels) | 102 CHECK_EQ(numberOfFrames, m_callbackBufferSize); |
102 return; | |
103 | |
104 DCHECK_EQ(numberOfFrames, m_callbackBufferSize); | |
105 if (numberOfFrames != m_callbackBufferSize) | |
106 return; | |
107 | 103 |
108 m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); | 104 m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); |
109 double outputPosition = | 105 double outputPosition = |
110 m_framesElapsed / static_cast<double>(m_sampleRate) - delay; | 106 m_framesElapsed / static_cast<double>(m_sampleRate) - delay; |
111 m_outputPosition.position = outputPosition; | 107 m_outputPosition.position = outputPosition; |
112 m_outputPosition.timestamp = delayTimestamp; | 108 m_outputPosition.timestamp = delayTimestamp; |
113 m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); | 109 m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); |
114 | 110 |
115 // Associate the destination data array with the output bus then fill the | 111 // Associate the destination data array with the output bus then fill the |
116 // FIFO. | 112 // FIFO. |
117 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) | 113 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
118 m_outputBus->setChannelMemory(i, destinationData[i], numberOfFrames); | 114 m_outputBus->setChannelMemory(i, destinationData[i], numberOfFrames); |
119 m_fifo->consume(m_outputBus.get(), numberOfFrames); | 115 |
116 // Number of frames to render via WebAudio graph. |framesToRender > 0| means | |
117 // the frames in FIFO is not enough to fulfill the requested frames from the | |
118 // audio device. | |
119 size_t framesToRender = numberOfFrames > m_fifo->framesAvailable() | |
120 ? numberOfFrames - m_fifo->framesAvailable() | |
121 : 0; | |
122 | |
123 while (framesToRender != 0) { | |
Raymond Toy
2017/02/01 23:38:55
Since you decrement by render quantum each time, y
hongchan
2017/02/02 19:55:23
Done.
| |
124 // If platform buffer is more than two times longer than |framesToProcess| | |
125 // we do not want output position to get stuck so we promote it | |
126 // using the elapsed time from the moment it was initially obtained. | |
127 if (m_callbackBufferSize > AudioUtilities::kRenderQuantumFrames * 2) { | |
128 double delta = | |
129 (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) | |
130 .InSecondsF(); | |
131 m_outputPosition.position += delta; | |
132 m_outputPosition.timestamp += delta; | |
133 } | |
134 | |
135 // Some implementations give only rough estimation of |delay| so | |
136 // we might have negative estimation |outputPosition| value. | |
137 if (m_outputPosition.position < 0.0) | |
138 m_outputPosition.position = 0.0; | |
139 | |
140 // Process WebAudio graph and push the rendered output to FIFO. | |
141 m_callback.render(nullptr, m_renderBus.get(), | |
142 AudioUtilities::kRenderQuantumFrames, m_outputPosition); | |
143 m_fifo->push(m_renderBus.get()); | |
144 | |
145 framesToRender = framesToRender > AudioUtilities::kRenderQuantumFrames | |
146 ? framesToRender - AudioUtilities::kRenderQuantumFrames | |
147 : 0; | |
o1ka
2017/02/02 16:30:24
Is it a contract that framesToRender % kRenderQuan
hongchan
2017/02/02 19:55:23
I took both advices and made it a for loop with co
| |
148 } | |
149 | |
150 m_fifo->pull(m_outputBus.get(), numberOfFrames); | |
120 | 151 |
121 m_framesElapsed += numberOfFrames; | 152 m_framesElapsed += numberOfFrames; |
122 } | 153 } |
123 | 154 |
124 void AudioDestination::provideInput(AudioBus* outputBus, | |
125 size_t framesToProcess) { | |
126 AudioIOPosition outputPosition = m_outputPosition; | |
127 | |
128 // If platform buffer is more than two times longer than |framesToProcess| | |
129 // we do not want output position to get stuck so we promote it | |
130 // using the elapsed time from the moment it was initially obtained. | |
131 if (m_callbackBufferSize > framesToProcess * 2) { | |
132 double delta = (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) | |
133 .InSecondsF(); | |
134 outputPosition.position += delta; | |
135 outputPosition.timestamp += delta; | |
136 } | |
137 | |
138 // Some implementations give only rough estimation of |delay| so | |
139 // we might have negative estimation |outputPosition| value. | |
140 if (outputPosition.position < 0.0) | |
141 outputPosition.position = 0.0; | |
142 | |
143 // To fill the FIFO, start the render call chain of the destination node. | |
144 m_callback.render(nullptr, outputBus, framesToProcess, outputPosition); | |
145 } | |
146 | |
147 void AudioDestination::start() { | 155 void AudioDestination::start() { |
148 if (m_webAudioDevice && !m_isPlaying) { | 156 if (m_webAudioDevice && !m_isPlaying) { |
149 m_webAudioDevice->start(); | 157 m_webAudioDevice->start(); |
150 m_isPlaying = true; | 158 m_isPlaying = true; |
151 } | 159 } |
152 } | 160 } |
153 | 161 |
154 void AudioDestination::stop() { | 162 void AudioDestination::stop() { |
155 if (m_webAudioDevice && m_isPlaying) { | 163 if (m_webAudioDevice && m_isPlaying) { |
156 m_webAudioDevice->stop(); | 164 m_webAudioDevice->stop(); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
211 callbackBufferSizeHistogram.sample(m_callbackBufferSize); | 219 callbackBufferSizeHistogram.sample(m_callbackBufferSize); |
212 | 220 |
213 // Check if the requested buffer size is too large. | 221 // Check if the requested buffer size is too large. |
214 bool isBufferSizeValid = | 222 bool isBufferSizeValid = |
215 m_callbackBufferSize + AudioUtilities::kRenderQuantumFrames <= kFIFOSize; | 223 m_callbackBufferSize + AudioUtilities::kRenderQuantumFrames <= kFIFOSize; |
216 DCHECK(isBufferSizeValid); | 224 DCHECK(isBufferSizeValid); |
217 return isBufferSizeValid; | 225 return isBufferSizeValid; |
218 } | 226 } |
219 | 227 |
220 } // namespace blink | 228 } // namespace blink |
OLD | NEW |