Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(28)

Side by Side Diff: Source/modules/webaudio/AbstractAudioContext.cpp

Issue 1214463003: Split "Online" and "Offline" AudioContext processing (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Bring to ToT Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/modules/webaudio/AbstractAudioContext.h ('k') | Source/modules/webaudio/AnalyserNode.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution. 11 * documentation and/or other materials provided with the distribution.
12 * 12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN Y 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN Y
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN Y 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN Y
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O N 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O N
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */ 23 */
24 24
25 #include "config.h" 25 #include "config.h"
26 #if ENABLE(WEB_AUDIO) 26 #if ENABLE(WEB_AUDIO)
27 #include "modules/webaudio/AudioContext.h" 27 #include "modules/webaudio/AbstractAudioContext.h"
28 28
29 #include "bindings/core/v8/ExceptionMessages.h" 29 #include "bindings/core/v8/ExceptionMessages.h"
30 #include "bindings/core/v8/ExceptionState.h" 30 #include "bindings/core/v8/ExceptionState.h"
31 #include "bindings/core/v8/ScriptPromiseResolver.h" 31 #include "bindings/core/v8/ScriptPromiseResolver.h"
32 #include "bindings/core/v8/ScriptState.h" 32 #include "bindings/core/v8/ScriptState.h"
33 #include "core/dom/DOMException.h" 33 #include "core/dom/DOMException.h"
34 #include "core/dom/Document.h" 34 #include "core/dom/Document.h"
35 #include "core/dom/ExceptionCode.h" 35 #include "core/dom/ExceptionCode.h"
36 #include "core/dom/ExecutionContextTask.h" 36 #include "core/dom/ExecutionContextTask.h"
37 #include "core/html/HTMLMediaElement.h" 37 #include "core/html/HTMLMediaElement.h"
38 #include "modules/mediastream/MediaStream.h" 38 #include "modules/mediastream/MediaStream.h"
39 #include "modules/webaudio/AnalyserNode.h" 39 #include "modules/webaudio/AnalyserNode.h"
40 #include "modules/webaudio/AudioBuffer.h" 40 #include "modules/webaudio/AudioBuffer.h"
41 #include "modules/webaudio/AudioBufferCallback.h" 41 #include "modules/webaudio/AudioBufferCallback.h"
42 #include "modules/webaudio/AudioBufferSourceNode.h" 42 #include "modules/webaudio/AudioBufferSourceNode.h"
43 #include "modules/webaudio/AudioContext.h"
43 #include "modules/webaudio/AudioListener.h" 44 #include "modules/webaudio/AudioListener.h"
44 #include "modules/webaudio/AudioNodeInput.h" 45 #include "modules/webaudio/AudioNodeInput.h"
45 #include "modules/webaudio/AudioNodeOutput.h" 46 #include "modules/webaudio/AudioNodeOutput.h"
46 #include "modules/webaudio/BiquadFilterNode.h" 47 #include "modules/webaudio/BiquadFilterNode.h"
47 #include "modules/webaudio/ChannelMergerNode.h" 48 #include "modules/webaudio/ChannelMergerNode.h"
48 #include "modules/webaudio/ChannelSplitterNode.h" 49 #include "modules/webaudio/ChannelSplitterNode.h"
49 #include "modules/webaudio/ConvolverNode.h" 50 #include "modules/webaudio/ConvolverNode.h"
50 #include "modules/webaudio/DefaultAudioDestinationNode.h" 51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
51 #include "modules/webaudio/DelayNode.h" 52 #include "modules/webaudio/DelayNode.h"
52 #include "modules/webaudio/DynamicsCompressorNode.h" 53 #include "modules/webaudio/DynamicsCompressorNode.h"
53 #include "modules/webaudio/GainNode.h" 54 #include "modules/webaudio/GainNode.h"
54 #include "modules/webaudio/MediaElementAudioSourceNode.h" 55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
55 #include "modules/webaudio/MediaStreamAudioDestinationNode.h" 56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
56 #include "modules/webaudio/MediaStreamAudioSourceNode.h" 57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
57 #include "modules/webaudio/OfflineAudioCompletionEvent.h" 58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
58 #include "modules/webaudio/OfflineAudioContext.h" 59 #include "modules/webaudio/OfflineAudioContext.h"
59 #include "modules/webaudio/OfflineAudioDestinationNode.h" 60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
60 #include "modules/webaudio/OscillatorNode.h" 61 #include "modules/webaudio/OscillatorNode.h"
61 #include "modules/webaudio/PannerNode.h" 62 #include "modules/webaudio/PannerNode.h"
62 #include "modules/webaudio/PeriodicWave.h" 63 #include "modules/webaudio/PeriodicWave.h"
63 #include "modules/webaudio/ScriptProcessorNode.h" 64 #include "modules/webaudio/ScriptProcessorNode.h"
64 #include "modules/webaudio/StereoPannerNode.h" 65 #include "modules/webaudio/StereoPannerNode.h"
65 #include "modules/webaudio/WaveShaperNode.h" 66 #include "modules/webaudio/WaveShaperNode.h"
66 #include "platform/ThreadSafeFunctional.h" 67 #include "platform/ThreadSafeFunctional.h"
67 #include "public/platform/Platform.h" 68 #include "public/platform/Platform.h"
68 #include "wtf/text/WTFString.h" 69 #include "wtf/text/WTFString.h"
69 70
70 #if DEBUG_AUDIONODE_REFERENCES
71 #include <stdio.h>
72 #endif
73
74 namespace blink { 71 namespace blink {
75 72
76 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware. 73 AbstractAudioContext* AbstractAudioContext::create(Document& document, Exception State& exceptionState)
77 const unsigned MaxHardwareContexts = 6;
78 unsigned AudioContext::s_hardwareContextCount = 0;
79 unsigned AudioContext::s_contextId = 0;
80
81 AudioContext* AudioContext::create(Document& document, ExceptionState& exception State)
82 { 74 {
83 ASSERT(isMainThread()); 75 return AudioContext::create(document, exceptionState);
84 if (s_hardwareContextCount >= MaxHardwareContexts) {
85 exceptionState.throwDOMException(
86 NotSupportedError,
87 ExceptionMessages::indexExceedsMaximumBound(
88 "number of hardware contexts",
89 s_hardwareContextCount,
90 MaxHardwareContexts));
91 return nullptr;
92 }
93
94 AudioContext* audioContext = new AudioContext(&document);
95 audioContext->suspendIfNeeded();
96 return audioContext;
97 } 76 }
98 77
78 // FIXME(dominicc): Devolve these constructors to AudioContext
79 // and OfflineAudioContext respectively.
80
99 // Constructor for rendering to the audio hardware. 81 // Constructor for rendering to the audio hardware.
100 AudioContext::AudioContext(Document* document) 82 AbstractAudioContext::AbstractAudioContext(Document* document)
101 : ActiveDOMObject(document) 83 : ActiveDOMObject(document)
102 , m_isStopScheduled(false) 84 , m_isStopScheduled(false)
103 , m_isCleared(false) 85 , m_isCleared(false)
104 , m_isInitialized(false) 86 , m_isInitialized(false)
105 , m_destinationNode(nullptr) 87 , m_destinationNode(nullptr)
106 , m_isResolvingResumePromises(false) 88 , m_isResolvingResumePromises(false)
107 , m_connectionCount(0) 89 , m_connectionCount(0)
108 , m_didInitializeContextGraphMutex(false) 90 , m_didInitializeContextGraphMutex(false)
109 , m_deferredTaskHandler(DeferredTaskHandler::create()) 91 , m_deferredTaskHandler(DeferredTaskHandler::create())
110 , m_isOfflineContext(false)
111 , m_contextState(Suspended) 92 , m_contextState(Suspended)
112 { 93 {
113 m_didInitializeContextGraphMutex = true; 94 m_didInitializeContextGraphMutex = true;
114 m_destinationNode = DefaultAudioDestinationNode::create(this); 95 m_destinationNode = DefaultAudioDestinationNode::create(this);
115 96
116 initialize(); 97 initialize();
117 } 98 }
118 99
119 // Constructor for offline (non-realtime) rendering. 100 // Constructor for offline (non-realtime) rendering.
120 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) 101 AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOf Channels, size_t numberOfFrames, float sampleRate)
121 : ActiveDOMObject(document) 102 : ActiveDOMObject(document)
122 , m_isStopScheduled(false) 103 , m_isStopScheduled(false)
123 , m_isCleared(false) 104 , m_isCleared(false)
124 , m_isInitialized(false) 105 , m_isInitialized(false)
125 , m_destinationNode(nullptr) 106 , m_destinationNode(nullptr)
126 , m_isResolvingResumePromises(false) 107 , m_isResolvingResumePromises(false)
127 , m_connectionCount(0) 108 , m_connectionCount(0)
128 , m_didInitializeContextGraphMutex(false) 109 , m_didInitializeContextGraphMutex(false)
129 , m_deferredTaskHandler(DeferredTaskHandler::create()) 110 , m_deferredTaskHandler(DeferredTaskHandler::create())
130 , m_isOfflineContext(true)
131 , m_contextState(Suspended) 111 , m_contextState(Suspended)
132 { 112 {
133 m_didInitializeContextGraphMutex = true; 113 m_didInitializeContextGraphMutex = true;
134 // Create a new destination for offline rendering. 114 // Create a new destination for offline rendering.
135 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate); 115 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate);
136 if (m_renderTarget.get()) 116 if (m_renderTarget.get())
137 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get()); 117 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get());
138 118
139 initialize(); 119 initialize();
140 } 120 }
141 121
142 AudioContext::~AudioContext() 122 AbstractAudioContext::~AbstractAudioContext()
143 { 123 {
144 #if DEBUG_AUDIONODE_REFERENCES
145 fprintf(stderr, "%p: AudioContext::~AudioContext(): %u\n", this, m_contextId );
146 #endif
147 deferredTaskHandler().contextWillBeDestroyed(); 124 deferredTaskHandler().contextWillBeDestroyed();
148 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around. 125 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around.
149 ASSERT(!m_isInitialized); 126 ASSERT(!m_isInitialized);
150 ASSERT(!m_activeSourceNodes.size()); 127 ASSERT(!m_activeSourceNodes.size());
151 ASSERT(!m_finishedSourceHandlers.size()); 128 ASSERT(!m_finishedSourceHandlers.size());
152 ASSERT(!m_isResolvingResumePromises); 129 ASSERT(!m_isResolvingResumePromises);
153 ASSERT(!m_resumeResolvers.size()); 130 ASSERT(!m_resumeResolvers.size());
154 } 131 }
155 132
156 void AudioContext::initialize() 133 void AbstractAudioContext::initialize()
157 { 134 {
158 if (isInitialized()) 135 if (isInitialized())
159 return; 136 return;
160 137
161 FFTFrame::initialize(); 138 FFTFrame::initialize();
162 m_listener = AudioListener::create(); 139 m_listener = AudioListener::create();
163 140
164 if (m_destinationNode.get()) { 141 if (m_destinationNode.get()) {
165 m_destinationNode->handler().initialize(); 142 m_destinationNode->handler().initialize();
166
167 if (!isOfflineContext()) {
168 // This starts the audio thread. The destination node's provideInput () method will now be called repeatedly to render audio.
169 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
170 // NOTE: for now default AudioContext does not need an explicit star tRendering() call from JavaScript.
171 // We may want to consider requiring it for symmetry with OfflineAud ioContext.
172 startRendering();
173 ++s_hardwareContextCount;
174 }
175
176 m_contextId = s_contextId++;
177 m_isInitialized = true; 143 m_isInitialized = true;
178 #if DEBUG_AUDIONODE_REFERENCES
179 fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n",
180 this, m_contextId, AudioContext::s_hardwareContextCount);
181 #endif
182 } 144 }
183 } 145 }
184 146
185 void AudioContext::clear() 147 void AbstractAudioContext::clear()
186 { 148 {
187 m_destinationNode.clear(); 149 m_destinationNode.clear();
188 // The audio rendering thread is dead. Nobody will schedule AudioHandler 150 // The audio rendering thread is dead. Nobody will schedule AudioHandler
189 // deletion. Let's do it ourselves. 151 // deletion. Let's do it ourselves.
190 deferredTaskHandler().clearHandlersToBeDeleted(); 152 deferredTaskHandler().clearHandlersToBeDeleted();
191 m_isCleared = true; 153 m_isCleared = true;
192 } 154 }
193 155
194 void AudioContext::uninitialize() 156 void AbstractAudioContext::uninitialize()
195 { 157 {
196 ASSERT(isMainThread()); 158 ASSERT(isMainThread());
197 159
198 if (!isInitialized()) 160 if (!isInitialized())
199 return; 161 return;
200 162
201 m_isInitialized = false; 163 m_isInitialized = false;
202 164
203 // This stops the audio thread and all audio rendering. 165 // This stops the audio thread and all audio rendering.
204 if (m_destinationNode) 166 if (m_destinationNode)
205 m_destinationNode->handler().uninitialize(); 167 m_destinationNode->handler().uninitialize();
206 168
207 if (!isOfflineContext()) {
208 ASSERT(s_hardwareContextCount);
209 --s_hardwareContextCount;
210 }
211
212 // Get rid of the sources which may still be playing. 169 // Get rid of the sources which may still be playing.
213 releaseActiveSourceNodes(); 170 releaseActiveSourceNodes();
214 171
215 // Reject any pending resolvers before we go away. 172 // Reject any pending resolvers before we go away.
216 rejectPendingResolvers(); 173 rejectPendingResolvers();
217 174 didClose();
218 // For an offline audio context, the completion event will set the state to closed. For an
219 // online context, we need to do it here. We only want to set the closed st ate once.
220 if (!isOfflineContext())
221 setContextState(Closed);
222
223 // Resolve the promise now, if any
224 if (m_closeResolver)
225 m_closeResolver->resolve();
226 175
227 ASSERT(m_listener); 176 ASSERT(m_listener);
228 m_listener->waitForHRTFDatabaseLoaderThreadCompletion(); 177 m_listener->waitForHRTFDatabaseLoaderThreadCompletion();
229 178
230 clear(); 179 clear();
231 } 180 }
232 181
233 void AudioContext::stop() 182 void AbstractAudioContext::stop()
234 { 183 {
235 // Usually ExecutionContext calls stop twice. 184 // Usually ExecutionContext calls stop twice.
236 if (m_isStopScheduled) 185 if (m_isStopScheduled)
237 return; 186 return;
238 m_isStopScheduled = true; 187 m_isStopScheduled = true;
239 188
240 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle 189 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle
241 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other 190 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
242 // ActiveDOMObjects so let's schedule uninitialize() to be called later. 191 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
243 // FIXME: see if there's a more direct way to handle this issue. 192 // FIXME: see if there's a more direct way to handle this issue.
244 Platform::current()->mainThread()->postTask(FROM_HERE, bind(&AudioContext::u ninitialize, this)); 193 Platform::current()->mainThread()->postTask(FROM_HERE, bind(&AbstractAudioCo ntext::uninitialize, this));
245 } 194 }
246 195
247 bool AudioContext::hasPendingActivity() const 196 bool AbstractAudioContext::hasPendingActivity() const
248 { 197 {
249 // There's no pending activity if the audio context has been cleared. 198 // There's no pending activity if the audio context has been cleared.
250 return !m_isCleared; 199 return !m_isCleared;
251 } 200 }
252 201
253 void AudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) 202 void AbstractAudioContext::throwExceptionForClosedState(ExceptionState& exceptio nState)
254 { 203 {
255 exceptionState.throwDOMException(InvalidStateError, "AudioContext has been c losed."); 204 exceptionState.throwDOMException(InvalidStateError, "AudioContext has been c losed.");
256 } 205 }
257 206
258 AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t number OfFrames, float sampleRate, ExceptionState& exceptionState) 207 AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_ t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
259 { 208 {
260 // It's ok to call createBuffer, even if the context is closed because the A udioBuffer doesn't 209 // It's ok to call createBuffer, even if the context is closed because the A udioBuffer doesn't
261 // really "belong" to any particular context. 210 // really "belong" to any particular context.
262 211
263 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exc eptionState); 212 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exc eptionState);
264 } 213 }
265 214
266 void AudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBufferCallbac k* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptio nState) 215 void AbstractAudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBuffe rCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState)
267 { 216 {
268 if (isContextClosed()) { 217 if (isContextClosed()) {
269 throwExceptionForClosedState(exceptionState); 218 throwExceptionForClosedState(exceptionState);
270 return; 219 return;
271 } 220 }
272 221
273 if (!audioData) { 222 if (!audioData) {
274 exceptionState.throwDOMException( 223 exceptionState.throwDOMException(
275 SyntaxError, 224 SyntaxError,
276 "invalid ArrayBuffer for audioData."); 225 "invalid ArrayBuffer for audioData.");
277 return; 226 return;
278 } 227 }
279 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback); 228 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback);
280 } 229 }
281 230
282 AudioBufferSourceNode* AudioContext::createBufferSource(ExceptionState& exceptio nState) 231 AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState& exceptionState)
283 { 232 {
284 ASSERT(isMainThread()); 233 ASSERT(isMainThread());
285 234
286 if (isContextClosed()) { 235 if (isContextClosed()) {
287 throwExceptionForClosedState(exceptionState); 236 throwExceptionForClosedState(exceptionState);
288 return nullptr; 237 return nullptr;
289 } 238 }
290 239
291 AudioBufferSourceNode* node = AudioBufferSourceNode::create(*this, sampleRat e()); 240 AudioBufferSourceNode* node = AudioBufferSourceNode::create(*this, sampleRat e());
292 241
293 // Do not add a reference to this source node now. The reference will be add ed when start() is 242 // Do not add a reference to this source node now. The reference will be add ed when start() is
294 // called. 243 // called.
295 244
296 return node; 245 return node;
297 } 246 }
298 247
299 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaEle ment* mediaElement, ExceptionState& exceptionState) 248 MediaElementAudioSourceNode* AbstractAudioContext::createMediaElementSource(HTML MediaElement* mediaElement, ExceptionState& exceptionState)
300 { 249 {
301 ASSERT(isMainThread()); 250 ASSERT(isMainThread());
302 251
303 if (isContextClosed()) { 252 if (isContextClosed()) {
304 throwExceptionForClosedState(exceptionState); 253 throwExceptionForClosedState(exceptionState);
305 return nullptr; 254 return nullptr;
306 } 255 }
307 256
308 if (!mediaElement) { 257 if (!mediaElement) {
309 exceptionState.throwDOMException( 258 exceptionState.throwDOMException(
(...skipping 11 matching lines...) Expand all
321 } 270 }
322 271
323 MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(*thi s, *mediaElement); 272 MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(*thi s, *mediaElement);
324 273
325 mediaElement->setAudioSourceNode(node); 274 mediaElement->setAudioSourceNode(node);
326 275
327 notifySourceNodeStartedProcessing(node); // context keeps reference until no de is disconnected 276 notifySourceNodeStartedProcessing(node); // context keeps reference until no de is disconnected
328 return node; 277 return node;
329 } 278 }
330 279
331 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* m ediaStream, ExceptionState& exceptionState) 280 MediaStreamAudioSourceNode* AbstractAudioContext::createMediaStreamSource(MediaS tream* mediaStream, ExceptionState& exceptionState)
332 { 281 {
333 ASSERT(isMainThread()); 282 ASSERT(isMainThread());
334 283
335 if (isContextClosed()) { 284 if (isContextClosed()) {
336 throwExceptionForClosedState(exceptionState); 285 throwExceptionForClosedState(exceptionState);
337 return nullptr; 286 return nullptr;
338 } 287 }
339 288
340 if (!mediaStream) { 289 if (!mediaStream) {
341 exceptionState.throwDOMException( 290 exceptionState.throwDOMException(
(...skipping 15 matching lines...) Expand all
357 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); 306 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
358 MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(*this, *mediaStream, audioTrack, provider.release()); 307 MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(*this, *mediaStream, audioTrack, provider.release());
359 308
360 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. 309 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
361 node->setFormat(2, sampleRate()); 310 node->setFormat(2, sampleRate());
362 311
363 notifySourceNodeStartedProcessing(node); // context keeps reference until no de is disconnected 312 notifySourceNodeStartedProcessing(node); // context keeps reference until no de is disconnected
364 return node; 313 return node;
365 } 314 }
366 315
367 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination(Exce ptionState& exceptionState) 316 MediaStreamAudioDestinationNode* AbstractAudioContext::createMediaStreamDestinat ion(ExceptionState& exceptionState)
368 { 317 {
369 if (isContextClosed()) { 318 if (isContextClosed()) {
370 throwExceptionForClosedState(exceptionState); 319 throwExceptionForClosedState(exceptionState);
371 return nullptr; 320 return nullptr;
372 } 321 }
373 322
374 // Set number of output channels to stereo by default. 323 // Set number of output channels to stereo by default.
375 return MediaStreamAudioDestinationNode::create(*this, 2); 324 return MediaStreamAudioDestinationNode::create(*this, 2);
376 } 325 }
377 326
378 ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& excepti onState) 327 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(ExceptionState& exceptionState)
379 { 328 {
380 // Set number of input/output channels to stereo by default. 329 // Set number of input/output channels to stereo by default.
381 return createScriptProcessor(0, 2, 2, exceptionState); 330 return createScriptProcessor(0, 2, 2, exceptionState);
382 } 331 }
383 332
384 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, Exce ptionState& exceptionState) 333 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, ExceptionState& exceptionState)
385 { 334 {
386 // Set number of input/output channels to stereo by default. 335 // Set number of input/output channels to stereo by default.
387 return createScriptProcessor(bufferSize, 2, 2, exceptionState); 336 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
388 } 337 }
389 338
390 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, ExceptionState& exceptionState) 339 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, size_t numberOfInputChannels, ExceptionState& exceptionState)
391 { 340 {
392 // Set number of output channels to stereo by default. 341 // Set number of output channels to stereo by default.
393 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State); 342 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State);
394 } 343 }
395 344
396 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& excepti onState) 345 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
397 { 346 {
398 ASSERT(isMainThread()); 347 ASSERT(isMainThread());
399 348
400 if (isContextClosed()) { 349 if (isContextClosed()) {
401 throwExceptionForClosedState(exceptionState); 350 throwExceptionForClosedState(exceptionState);
402 return nullptr; 351 return nullptr;
403 } 352 }
404 353
405 ScriptProcessorNode* node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); 354 ScriptProcessorNode* node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
406 355
407 if (!node) { 356 if (!node) {
408 if (!numberOfInputChannels && !numberOfOutputChannels) { 357 if (!numberOfInputChannels && !numberOfOutputChannels) {
409 exceptionState.throwDOMException( 358 exceptionState.throwDOMException(
410 IndexSizeError, 359 IndexSizeError,
411 "number of input channels and output channels cannot both be zer o."); 360 "number of input channels and output channels cannot both be zer o.");
412 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { 361 } else if (numberOfInputChannels > AbstractAudioContext::maxNumberOfChan nels()) {
413 exceptionState.throwDOMException( 362 exceptionState.throwDOMException(
414 IndexSizeError, 363 IndexSizeError,
415 "number of input channels (" + String::number(numberOfInputChann els) 364 "number of input channels (" + String::number(numberOfInputChann els)
416 + ") exceeds maximum (" 365 + ") exceeds maximum ("
417 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 366 + String::number(AbstractAudioContext::maxNumberOfChannels()) + ").");
418 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) { 367 } else if (numberOfOutputChannels > AbstractAudioContext::maxNumberOfCha nnels()) {
419 exceptionState.throwDOMException( 368 exceptionState.throwDOMException(
420 IndexSizeError, 369 IndexSizeError,
421 "number of output channels (" + String::number(numberOfInputChan nels) 370 "number of output channels (" + String::number(numberOfInputChan nels)
422 + ") exceeds maximum (" 371 + ") exceeds maximum ("
423 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 372 + String::number(AbstractAudioContext::maxNumberOfChannels()) + ").");
424 } else { 373 } else {
425 exceptionState.throwDOMException( 374 exceptionState.throwDOMException(
426 IndexSizeError, 375 IndexSizeError,
427 "buffer size (" + String::number(bufferSize) 376 "buffer size (" + String::number(bufferSize)
428 + ") must be a power of two between 256 and 16384."); 377 + ") must be a power of two between 256 and 16384.");
429 } 378 }
430 return nullptr; 379 return nullptr;
431 } 380 }
432 381
433 notifySourceNodeStartedProcessing(node); // context keeps reference until we stop making javascript rendering callbacks 382 notifySourceNodeStartedProcessing(node); // context keeps reference until we stop making javascript rendering callbacks
434 return node; 383 return node;
435 } 384 }
436 385
437 StereoPannerNode* AudioContext::createStereoPanner(ExceptionState& exceptionStat e) 386 StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& excep tionState)
438 { 387 {
439 ASSERT(isMainThread()); 388 ASSERT(isMainThread());
440 if (isContextClosed()) { 389 if (isContextClosed()) {
441 throwExceptionForClosedState(exceptionState); 390 throwExceptionForClosedState(exceptionState);
442 return nullptr; 391 return nullptr;
443 } 392 }
444 393
445 return StereoPannerNode::create(*this, sampleRate()); 394 return StereoPannerNode::create(*this, sampleRate());
446 } 395 }
447 396
448 BiquadFilterNode* AudioContext::createBiquadFilter(ExceptionState& exceptionStat e) 397 BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& excep tionState)
449 { 398 {
450 ASSERT(isMainThread()); 399 ASSERT(isMainThread());
451 if (isContextClosed()) { 400 if (isContextClosed()) {
452 throwExceptionForClosedState(exceptionState); 401 throwExceptionForClosedState(exceptionState);
453 return nullptr; 402 return nullptr;
454 } 403 }
455 404
456 return BiquadFilterNode::create(*this, sampleRate()); 405 return BiquadFilterNode::create(*this, sampleRate());
457 } 406 }
458 407
459 WaveShaperNode* AudioContext::createWaveShaper(ExceptionState& exceptionState) 408 WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exception State)
460 { 409 {
461 ASSERT(isMainThread()); 410 ASSERT(isMainThread());
462 if (isContextClosed()) { 411 if (isContextClosed()) {
463 throwExceptionForClosedState(exceptionState); 412 throwExceptionForClosedState(exceptionState);
464 return nullptr; 413 return nullptr;
465 } 414 }
466 415
467 return WaveShaperNode::create(*this); 416 return WaveShaperNode::create(*this);
468 } 417 }
469 418
470 PannerNode* AudioContext::createPanner(ExceptionState& exceptionState) 419 PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState)
471 { 420 {
472 ASSERT(isMainThread()); 421 ASSERT(isMainThread());
473 if (isContextClosed()) { 422 if (isContextClosed()) {
474 throwExceptionForClosedState(exceptionState); 423 throwExceptionForClosedState(exceptionState);
475 return nullptr; 424 return nullptr;
476 } 425 }
477 426
478 return PannerNode::create(*this, sampleRate()); 427 return PannerNode::create(*this, sampleRate());
479 } 428 }
480 429
481 ConvolverNode* AudioContext::createConvolver(ExceptionState& exceptionState) 430 ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionSt ate)
482 { 431 {
483 ASSERT(isMainThread()); 432 ASSERT(isMainThread());
484 if (isContextClosed()) { 433 if (isContextClosed()) {
485 throwExceptionForClosedState(exceptionState); 434 throwExceptionForClosedState(exceptionState);
486 return nullptr; 435 return nullptr;
487 } 436 }
488 437
489 return ConvolverNode::create(*this, sampleRate()); 438 return ConvolverNode::create(*this, sampleRate());
490 } 439 }
491 440
492 DynamicsCompressorNode* AudioContext::createDynamicsCompressor(ExceptionState& e xceptionState) 441 DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(Exception State& exceptionState)
493 { 442 {
494 ASSERT(isMainThread()); 443 ASSERT(isMainThread());
495 if (isContextClosed()) { 444 if (isContextClosed()) {
496 throwExceptionForClosedState(exceptionState); 445 throwExceptionForClosedState(exceptionState);
497 return nullptr; 446 return nullptr;
498 } 447 }
499 448
500 return DynamicsCompressorNode::create(*this, sampleRate()); 449 return DynamicsCompressorNode::create(*this, sampleRate());
501 } 450 }
502 451
503 AnalyserNode* AudioContext::createAnalyser(ExceptionState& exceptionState) 452 AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionStat e)
504 { 453 {
505 ASSERT(isMainThread()); 454 ASSERT(isMainThread());
506 if (isContextClosed()) { 455 if (isContextClosed()) {
507 throwExceptionForClosedState(exceptionState); 456 throwExceptionForClosedState(exceptionState);
508 return nullptr; 457 return nullptr;
509 } 458 }
510 459
511 return AnalyserNode::create(*this, sampleRate()); 460 return AnalyserNode::create(*this, sampleRate());
512 } 461 }
513 462
514 GainNode* AudioContext::createGain(ExceptionState& exceptionState) 463 GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState)
515 { 464 {
516 ASSERT(isMainThread()); 465 ASSERT(isMainThread());
517 if (isContextClosed()) { 466 if (isContextClosed()) {
518 throwExceptionForClosedState(exceptionState); 467 throwExceptionForClosedState(exceptionState);
519 return nullptr; 468 return nullptr;
520 } 469 }
521 470
522 return GainNode::create(*this, sampleRate()); 471 return GainNode::create(*this, sampleRate());
523 } 472 }
524 473
525 DelayNode* AudioContext::createDelay(ExceptionState& exceptionState) 474 DelayNode* AbstractAudioContext::createDelay(ExceptionState& exceptionState)
526 { 475 {
527 const double defaultMaxDelayTime = 1; 476 const double defaultMaxDelayTime = 1;
528 return createDelay(defaultMaxDelayTime, exceptionState); 477 return createDelay(defaultMaxDelayTime, exceptionState);
529 } 478 }
530 479
531 DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& except ionState) 480 DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState & exceptionState)
532 { 481 {
533 ASSERT(isMainThread()); 482 ASSERT(isMainThread());
534 if (isContextClosed()) { 483 if (isContextClosed()) {
535 throwExceptionForClosedState(exceptionState); 484 throwExceptionForClosedState(exceptionState);
536 return nullptr; 485 return nullptr;
537 } 486 }
538 487
539 return DelayNode::create(*this, sampleRate(), maxDelayTime, exceptionState); 488 return DelayNode::create(*this, sampleRate(), maxDelayTime, exceptionState);
540 } 489 }
541 490
542 ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& excepti onState) 491 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(ExceptionState& exceptionState)
543 { 492 {
544 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 493 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
545 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 494 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
546 } 495 }
547 496
548 ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) 497 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOf Outputs, ExceptionState& exceptionState)
549 { 498 {
550 ASSERT(isMainThread()); 499 ASSERT(isMainThread());
551 500
552 if (isContextClosed()) { 501 if (isContextClosed()) {
553 throwExceptionForClosedState(exceptionState); 502 throwExceptionForClosedState(exceptionState);
554 return nullptr; 503 return nullptr;
555 } 504 }
556 505
557 ChannelSplitterNode* node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs); 506 ChannelSplitterNode* node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs);
558 507
559 if (!node) { 508 if (!node) {
560 exceptionState.throwDOMException( 509 exceptionState.throwDOMException(
561 IndexSizeError, 510 IndexSizeError,
562 "number of outputs (" + String::number(numberOfOutputs) 511 "number of outputs (" + String::number(numberOfOutputs)
563 + ") must be between 1 and " 512 + ") must be between 1 and "
564 + String::number(AudioContext::maxNumberOfChannels()) + "."); 513 + String::number(AbstractAudioContext::maxNumberOfChannels()) + ".") ;
565 return nullptr; 514 return nullptr;
566 } 515 }
567 516
568 return node; 517 return node;
569 } 518 }
570 519
571 ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionSt ate) 520 ChannelMergerNode* AbstractAudioContext::createChannelMerger(ExceptionState& exc eptionState)
572 { 521 {
573 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 522 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
574 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 523 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
575 } 524 }
576 525
577 ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce ptionState& exceptionState) 526 ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInpu ts, ExceptionState& exceptionState)
578 { 527 {
579 ASSERT(isMainThread()); 528 ASSERT(isMainThread());
580 if (isContextClosed()) { 529 if (isContextClosed()) {
581 throwExceptionForClosedState(exceptionState); 530 throwExceptionForClosedState(exceptionState);
582 return nullptr; 531 return nullptr;
583 } 532 }
584 533
585 ChannelMergerNode* node = ChannelMergerNode::create(*this, sampleRate(), num berOfInputs); 534 ChannelMergerNode* node = ChannelMergerNode::create(*this, sampleRate(), num berOfInputs);
586 535
587 if (!node) { 536 if (!node) {
588 exceptionState.throwDOMException( 537 exceptionState.throwDOMException(
589 IndexSizeError, 538 IndexSizeError,
590 ExceptionMessages::indexOutsideRange<size_t>( 539 ExceptionMessages::indexOutsideRange<size_t>(
591 "number of inputs", 540 "number of inputs",
592 numberOfInputs, 541 numberOfInputs,
593 1, 542 1,
594 ExceptionMessages::InclusiveBound, 543 ExceptionMessages::InclusiveBound,
595 AudioContext::maxNumberOfChannels(), 544 AbstractAudioContext::maxNumberOfChannels(),
596 ExceptionMessages::InclusiveBound)); 545 ExceptionMessages::InclusiveBound));
597 return nullptr; 546 return nullptr;
598 } 547 }
599 548
600 return node; 549 return node;
601 } 550 }
602 551
603 OscillatorNode* AudioContext::createOscillator(ExceptionState& exceptionState) 552 OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exception State)
604 { 553 {
605 ASSERT(isMainThread()); 554 ASSERT(isMainThread());
606 if (isContextClosed()) { 555 if (isContextClosed()) {
607 throwExceptionForClosedState(exceptionState); 556 throwExceptionForClosedState(exceptionState);
608 return nullptr; 557 return nullptr;
609 } 558 }
610 559
611 OscillatorNode* node = OscillatorNode::create(*this, sampleRate()); 560 OscillatorNode* node = OscillatorNode::create(*this, sampleRate());
612 561
613 // Do not add a reference to this source node now. The reference will be add ed when start() is 562 // Do not add a reference to this source node now. The reference will be add ed when start() is
614 // called. 563 // called.
615 564
616 return node; 565 return node;
617 } 566 }
618 567
619 PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32 Array* imag, ExceptionState& exceptionState) 568 PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO MFloat32Array* imag, ExceptionState& exceptionState)
620 { 569 {
621 ASSERT(isMainThread()); 570 ASSERT(isMainThread());
622 571
623 if (isContextClosed()) { 572 if (isContextClosed()) {
624 throwExceptionForClosedState(exceptionState); 573 throwExceptionForClosedState(exceptionState);
625 return nullptr; 574 return nullptr;
626 } 575 }
627 576
628 if (!real) { 577 if (!real) {
629 exceptionState.throwDOMException( 578 exceptionState.throwDOMException(
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 IndexSizeError, 619 IndexSizeError,
671 "length of real array (" + String::number(real->length()) 620 "length of real array (" + String::number(real->length())
672 + ") and length of imaginary array (" + String::number(imag->length ()) 621 + ") and length of imaginary array (" + String::number(imag->length ())
673 + ") must match."); 622 + ") must match.");
674 return nullptr; 623 return nullptr;
675 } 624 }
676 625
677 return PeriodicWave::create(sampleRate(), real, imag); 626 return PeriodicWave::create(sampleRate(), real, imag);
678 } 627 }
679 628
680 String AudioContext::state() const 629 String AbstractAudioContext::state() const
681 { 630 {
682 // These strings had better match the strings for AudioContextState in Audio Context.idl. 631 // These strings had better match the strings for AudioContextState in Audio Context.idl.
683 switch (m_contextState) { 632 switch (m_contextState) {
684 case Suspended: 633 case Suspended:
685 return "suspended"; 634 return "suspended";
686 case Running: 635 case Running:
687 return "running"; 636 return "running";
688 case Closed: 637 case Closed:
689 return "closed"; 638 return "closed";
690 } 639 }
691 ASSERT_NOT_REACHED(); 640 ASSERT_NOT_REACHED();
692 return ""; 641 return "";
693 } 642 }
694 643
695 void AudioContext::setContextState(AudioContextState newState) 644 void AbstractAudioContext::setContextState(AudioContextState newState)
696 { 645 {
697 ASSERT(isMainThread()); 646 ASSERT(isMainThread());
698 647
699 // Validate the transitions. The valid transitions are Suspended->Running, Running->Suspended, 648 // Validate the transitions. The valid transitions are Suspended->Running, Running->Suspended,
700 // and anything->Closed. 649 // and anything->Closed.
701 switch (newState) { 650 switch (newState) {
702 case Suspended: 651 case Suspended:
703 ASSERT(m_contextState == Running); 652 ASSERT(m_contextState == Running);
704 break; 653 break;
705 case Running: 654 case Running:
706 ASSERT(m_contextState == Suspended); 655 ASSERT(m_contextState == Suspended);
707 break; 656 break;
708 case Closed: 657 case Closed:
709 ASSERT(m_contextState != Closed); 658 ASSERT(m_contextState != Closed);
710 break; 659 break;
711 } 660 }
712 661
713 if (newState == m_contextState) { 662 if (newState == m_contextState) {
714 // ASSERTs above failed; just return. 663 // ASSERTs above failed; just return.
715 return; 664 return;
716 } 665 }
717 666
718 m_contextState = newState; 667 m_contextState = newState;
719 668
720 // Notify context that state changed 669 // Notify context that state changed
721 if (executionContext()) 670 if (executionContext())
722 executionContext()->postTask(FROM_HERE, createSameThreadTask(&AudioConte xt::notifyStateChange, this)); 671 executionContext()->postTask(FROM_HERE, createSameThreadTask(&AbstractAu dioContext::notifyStateChange, this));
723 } 672 }
724 673
725 void AudioContext::notifyStateChange() 674 void AbstractAudioContext::notifyStateChange()
726 { 675 {
727 dispatchEvent(Event::create(EventTypeNames::statechange)); 676 dispatchEvent(Event::create(EventTypeNames::statechange));
728 } 677 }
729 678
730 ScriptPromise AudioContext::suspendContext(ScriptState* scriptState) 679 void AbstractAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* hand ler)
731 {
732 ASSERT(isMainThread());
733 AutoLocker locker(this);
734
735 if (isOfflineContext()) {
736 return ScriptPromise::rejectWithDOMException(
737 scriptState,
738 DOMException::create(
739 InvalidAccessError,
740 "cannot suspend an OfflineAudioContext"));
741 }
742
743 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver:: create(scriptState);
744 ScriptPromise promise = resolver->promise();
745
746 if (m_contextState == Closed) {
747 resolver->reject(
748 DOMException::create(InvalidStateError, "Cannot suspend a context th at has been closed"));
749 } else {
750 // Stop rendering now.
751 if (m_destinationNode)
752 stopRendering();
753
754 // Since we don't have any way of knowing when the hardware actually sto ps, we'll just
755 // resolve the promise now.
756 resolver->resolve();
757 }
758
759 return promise;
760 }
761
762 ScriptPromise AudioContext::resumeContext(ScriptState* scriptState)
763 {
764 ASSERT(isMainThread());
765 AutoLocker locker(this);
766
767 if (isOfflineContext()) {
768 return ScriptPromise::rejectWithDOMException(
769 scriptState,
770 DOMException::create(
771 InvalidAccessError,
772 "cannot resume an OfflineAudioContext"));
773 }
774
775 if (isContextClosed()) {
776 return ScriptPromise::rejectWithDOMException(
777 scriptState,
778 DOMException::create(
779 InvalidAccessError,
780 "cannot resume a closed AudioContext"));
781 }
782
783 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver:: create(scriptState);
784 ScriptPromise promise = resolver->promise();
785
786 // Restart the destination node to pull on the audio graph.
787 if (m_destinationNode)
788 startRendering();
789
790 // Save the resolver which will get resolved when the destination node start s pulling on the
791 // graph again.
792 m_resumeResolvers.append(resolver);
793
794 return promise;
795 }
796
797 void AudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler)
798 { 680 {
799 ASSERT(isAudioThread()); 681 ASSERT(isAudioThread());
800 m_finishedSourceHandlers.append(handler); 682 m_finishedSourceHandlers.append(handler);
801 } 683 }
802 684
803 void AudioContext::releaseFinishedSourceNodes() 685 void AbstractAudioContext::releaseFinishedSourceNodes()
804 { 686 {
805 ASSERT(isGraphOwner()); 687 ASSERT(isGraphOwner());
806 ASSERT(isAudioThread()); 688 ASSERT(isAudioThread());
807 for (AudioHandler* handler : m_finishedSourceHandlers) { 689 for (AudioHandler* handler : m_finishedSourceHandlers) {
808 for (unsigned i = 0; i < m_activeSourceNodes.size(); ++i) { 690 for (unsigned i = 0; i < m_activeSourceNodes.size(); ++i) {
809 if (handler == &m_activeSourceNodes[i]->handler()) { 691 if (handler == &m_activeSourceNodes[i]->handler()) {
810 handler->breakConnection(); 692 handler->breakConnection();
811 m_activeSourceNodes.remove(i); 693 m_activeSourceNodes.remove(i);
812 break; 694 break;
813 } 695 }
814 } 696 }
815 } 697 }
816 698
817 m_finishedSourceHandlers.clear(); 699 m_finishedSourceHandlers.clear();
818 } 700 }
819 701
820 void AudioContext::notifySourceNodeStartedProcessing(AudioNode* node) 702 void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node)
821 { 703 {
822 ASSERT(isMainThread()); 704 ASSERT(isMainThread());
823 AutoLocker locker(this); 705 AutoLocker locker(this);
824 706
825 m_activeSourceNodes.append(node); 707 m_activeSourceNodes.append(node);
826 node->handler().makeConnection(); 708 node->handler().makeConnection();
827 } 709 }
828 710
829 void AudioContext::releaseActiveSourceNodes() 711 void AbstractAudioContext::releaseActiveSourceNodes()
830 { 712 {
831 ASSERT(isMainThread()); 713 ASSERT(isMainThread());
832 for (auto& sourceNode : m_activeSourceNodes) 714 for (auto& sourceNode : m_activeSourceNodes)
833 sourceNode->handler().breakConnection(); 715 sourceNode->handler().breakConnection();
834 716
835 m_activeSourceNodes.clear(); 717 m_activeSourceNodes.clear();
836 } 718 }
837 719
838 void AudioContext::handleStoppableSourceNodes() 720 void AbstractAudioContext::handleStoppableSourceNodes()
839 { 721 {
840 ASSERT(isGraphOwner()); 722 ASSERT(isGraphOwner());
841 723
842 // Find AudioBufferSourceNodes to see if we can stop playing them. 724 // Find AudioBufferSourceNodes to see if we can stop playing them.
843 for (AudioNode* node : m_activeSourceNodes) { 725 for (AudioNode* node : m_activeSourceNodes) {
844 if (node->handler().nodeType() == AudioHandler::NodeTypeAudioBufferSourc e) { 726 if (node->handler().nodeType() == AudioHandler::NodeTypeAudioBufferSourc e) {
845 AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNod e*>(node); 727 AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNod e*>(node);
846 sourceNode->audioBufferSourceHandler().handleStoppableSourceNode(); 728 sourceNode->audioBufferSourceHandler().handleStoppableSourceNode();
847 } 729 }
848 } 730 }
849 } 731 }
850 732
851 void AudioContext::handlePreRenderTasks() 733 void AbstractAudioContext::handlePreRenderTasks()
852 { 734 {
853 ASSERT(isAudioThread()); 735 ASSERT(isAudioThread());
854 736
855 // At the beginning of every render quantum, try to update the internal rend ering graph state (from main thread changes). 737 // At the beginning of every render quantum, try to update the internal rend ering graph state (from main thread changes).
856 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u p the changes. 738 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u p the changes.
857 if (tryLock()) { 739 if (tryLock()) {
858 deferredTaskHandler().handleDeferredTasks(); 740 deferredTaskHandler().handleDeferredTasks();
859 741
860 resolvePromisesForResume(); 742 resolvePromisesForResume();
861 743
862 // Check to see if source nodes can be stopped because the end time has passed. 744 // Check to see if source nodes can be stopped because the end time has passed.
863 handleStoppableSourceNodes(); 745 handleStoppableSourceNodes();
864 746
865 unlock(); 747 unlock();
866 } 748 }
867 } 749 }
868 750
869 void AudioContext::handlePostRenderTasks() 751 void AbstractAudioContext::handlePostRenderTasks()
870 { 752 {
871 ASSERT(isAudioThread()); 753 ASSERT(isAudioThread());
872 754
873 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently. 755 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
874 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed 756 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed
875 // from the render graph (in which case they'll render silence). 757 // from the render graph (in which case they'll render silence).
876 if (tryLock()) { 758 if (tryLock()) {
877 // Take care of AudioNode tasks where the tryLock() failed previously. 759 // Take care of AudioNode tasks where the tryLock() failed previously.
878 deferredTaskHandler().breakConnections(); 760 deferredTaskHandler().breakConnections();
879 761
880 // Dynamically clean up nodes which are no longer needed. 762 // Dynamically clean up nodes which are no longer needed.
881 releaseFinishedSourceNodes(); 763 releaseFinishedSourceNodes();
882 764
883 deferredTaskHandler().handleDeferredTasks(); 765 deferredTaskHandler().handleDeferredTasks();
884 deferredTaskHandler().requestToDeleteHandlersOnMainThread(); 766 deferredTaskHandler().requestToDeleteHandlersOnMainThread();
885 767
886 unlock(); 768 unlock();
887 } 769 }
888 } 770 }
889 771
890 void AudioContext::resolvePromisesForResumeOnMainThread() 772 void AbstractAudioContext::resolvePromisesForResumeOnMainThread()
891 { 773 {
892 ASSERT(isMainThread()); 774 ASSERT(isMainThread());
893 AutoLocker locker(this); 775 AutoLocker locker(this);
894 776
895 for (auto& resolver : m_resumeResolvers) { 777 for (auto& resolver : m_resumeResolvers) {
896 if (m_contextState == Closed) { 778 if (m_contextState == Closed) {
897 resolver->reject( 779 resolver->reject(
898 DOMException::create(InvalidStateError, "Cannot resume a context that has been closed")); 780 DOMException::create(InvalidStateError, "Cannot resume a context that has been closed"));
899 } else { 781 } else {
900 resolver->resolve(); 782 resolver->resolve();
901 } 783 }
902 } 784 }
903 785
904 m_resumeResolvers.clear(); 786 m_resumeResolvers.clear();
905 m_isResolvingResumePromises = false; 787 m_isResolvingResumePromises = false;
906 } 788 }
907 789
908 void AudioContext::resolvePromisesForResume() 790 void AbstractAudioContext::resolvePromisesForResume()
909 { 791 {
910 // This runs inside the AudioContext's lock when handling pre-render tasks. 792 // This runs inside the AbstractAudioContext's lock when handling pre-render tasks.
911 ASSERT(isAudioThread()); 793 ASSERT(isAudioThread());
912 ASSERT(isGraphOwner()); 794 ASSERT(isGraphOwner());
913 795
914 // Resolve any pending promises created by resume(). Only do this if we have n't already started 796 // Resolve any pending promises created by resume(). Only do this if we have n't already started
915 // resolving these promises. This gets called very often and it takes some t ime to resolve the 797 // resolving these promises. This gets called very often and it takes some t ime to resolve the
916 // promises in the main thread. 798 // promises in the main thread.
917 if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) { 799 if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) {
918 m_isResolvingResumePromises = true; 800 m_isResolvingResumePromises = true;
919 Platform::current()->mainThread()->postTask(FROM_HERE, threadSafeBind(&A udioContext::resolvePromisesForResumeOnMainThread, this)); 801 Platform::current()->mainThread()->postTask(FROM_HERE, threadSafeBind(&A bstractAudioContext::resolvePromisesForResumeOnMainThread, this));
920 } 802 }
921 } 803 }
922 804
923 void AudioContext::rejectPendingResolvers() 805 void AbstractAudioContext::rejectPendingResolvers()
924 { 806 {
925 ASSERT(isMainThread()); 807 ASSERT(isMainThread());
926 808
927 // Audio context is closing down so reject any resume promises that are stil l pending. 809 // Audio context is closing down so reject any resume promises that are stil l pending.
928 810
929 for (auto& resolver : m_resumeResolvers) { 811 for (auto& resolver : m_resumeResolvers) {
930 resolver->reject(DOMException::create(InvalidStateError, "Audio context is going away")); 812 resolver->reject(DOMException::create(InvalidStateError, "Audio context is going away"));
931 } 813 }
932 m_resumeResolvers.clear(); 814 m_resumeResolvers.clear();
933 m_isResolvingResumePromises = false; 815 m_isResolvingResumePromises = false;
934 } 816 }
935 817
936 const AtomicString& AudioContext::interfaceName() const 818 const AtomicString& AbstractAudioContext::interfaceName() const
937 { 819 {
938 return EventTargetNames::AudioContext; 820 return EventTargetNames::AudioContext;
939 } 821 }
940 822
941 ExecutionContext* AudioContext::executionContext() const 823 ExecutionContext* AbstractAudioContext::executionContext() const
942 { 824 {
943 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext(); 825 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
944 } 826 }
945 827
946 void AudioContext::startRendering() 828 void AbstractAudioContext::startRendering()
947 { 829 {
948 // This is called for both online and offline contexts. 830 // This is called for both online and offline contexts.
949 ASSERT(isMainThread()); 831 ASSERT(isMainThread());
950 ASSERT(m_destinationNode); 832 ASSERT(m_destinationNode);
951 833
952 if (m_contextState == Suspended) { 834 if (m_contextState == Suspended) {
953 destination()->audioDestinationHandler().startRendering(); 835 destination()->audioDestinationHandler().startRendering();
954 setContextState(Running); 836 setContextState(Running);
955 } 837 }
956 } 838 }
957 839
958 void AudioContext::stopRendering() 840 void AbstractAudioContext::fireCompletionEvent()
959 {
960 ASSERT(isMainThread());
961 ASSERT(m_destinationNode);
962 ASSERT(!isOfflineContext());
963
964 if (m_contextState == Running) {
965 destination()->audioDestinationHandler().stopRendering();
966 setContextState(Suspended);
967 deferredTaskHandler().clearHandlersToBeDeleted();
968 }
969 }
970
971 void AudioContext::fireCompletionEvent()
972 { 841 {
973 ASSERT(isMainThread()); 842 ASSERT(isMainThread());
974 if (!isMainThread()) 843 if (!isMainThread())
975 return; 844 return;
976 845
977 AudioBuffer* renderedBuffer = m_renderTarget.get(); 846 AudioBuffer* renderedBuffer = m_renderTarget.get();
978 847
979 // For an offline context, we set the state to closed here so that the oncom plete handler sees 848 // For an offline context, we set the state to closed here so that the oncom plete handler sees
980 // that the context has been closed. 849 // that the context has been closed.
981 setContextState(Closed); 850 setContextState(Closed);
982 851
983 ASSERT(renderedBuffer); 852 ASSERT(renderedBuffer);
984 if (!renderedBuffer) 853 if (!renderedBuffer)
985 return; 854 return;
986 855
987 // Avoid firing the event if the document has already gone away. 856 // Avoid firing the event if the document has already gone away.
988 if (executionContext()) { 857 if (executionContext()) {
989 // Call the offline rendering completion event listener and resolve the promise too. 858 // Call the offline rendering completion event listener and resolve the promise too.
990 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); 859 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
991 m_offlineResolver->resolve(renderedBuffer); 860 m_offlineResolver->resolve(renderedBuffer);
992 } 861 }
993 } 862 }
994 863
995 DEFINE_TRACE(AudioContext) 864 DEFINE_TRACE(AbstractAudioContext)
996 { 865 {
997 visitor->trace(m_closeResolver);
998 visitor->trace(m_offlineResolver); 866 visitor->trace(m_offlineResolver);
999 visitor->trace(m_renderTarget); 867 visitor->trace(m_renderTarget);
1000 visitor->trace(m_destinationNode); 868 visitor->trace(m_destinationNode);
1001 visitor->trace(m_listener); 869 visitor->trace(m_listener);
1002 // trace() can be called in AudioContext constructor, and 870 // trace() can be called in AbstractAudioContext constructor, and
1003 // m_contextGraphMutex might be unavailable. 871 // m_contextGraphMutex might be unavailable.
1004 if (m_didInitializeContextGraphMutex) { 872 if (m_didInitializeContextGraphMutex) {
1005 AutoLocker lock(this); 873 AutoLocker lock(this);
1006 visitor->trace(m_activeSourceNodes); 874 visitor->trace(m_activeSourceNodes);
1007 } else { 875 } else {
1008 visitor->trace(m_activeSourceNodes); 876 visitor->trace(m_activeSourceNodes);
1009 } 877 }
1010 visitor->trace(m_resumeResolvers); 878 visitor->trace(m_resumeResolvers);
1011 RefCountedGarbageCollectedEventTargetWithInlineData<AudioContext>::trace(vis itor); 879 RefCountedGarbageCollectedEventTargetWithInlineData<AbstractAudioContext>::t race(visitor);
1012 ActiveDOMObject::trace(visitor); 880 ActiveDOMObject::trace(visitor);
1013 } 881 }
1014 882
1015 SecurityOrigin* AudioContext::securityOrigin() const 883 SecurityOrigin* AbstractAudioContext::securityOrigin() const
1016 { 884 {
1017 if (executionContext()) 885 if (executionContext())
1018 return executionContext()->securityOrigin(); 886 return executionContext()->securityOrigin();
1019 887
1020 return nullptr; 888 return nullptr;
1021 } 889 }
1022 890
1023 ScriptPromise AudioContext::closeContext(ScriptState* scriptState)
1024 {
1025 if (isOfflineContext()) {
1026 return ScriptPromise::rejectWithDOMException(
1027 scriptState,
1028 DOMException::create(InvalidAccessError, "cannot close an OfflineAud ioContext."));
1029 }
1030
1031 if (isContextClosed()) {
1032 // We've already closed the context previously, but it hasn't yet been r esolved, so just
1033 // create a new promise and reject it.
1034 return ScriptPromise::rejectWithDOMException(
1035 scriptState,
1036 DOMException::create(InvalidStateError,
1037 "Cannot close a context that is being closed or has already been closed."));
1038 }
1039
1040 m_closeResolver = ScriptPromiseResolver::create(scriptState);
1041 ScriptPromise promise = m_closeResolver->promise();
1042
1043 // Stop the audio context. This will stop the destination node from pulling audio anymore. And
1044 // since we have disconnected the destination from the audio graph, and thus has no references,
1045 // the destination node can GCed if JS has no references. stop() will also r esolve the Promise
1046 // created here.
1047 stop();
1048
1049 return promise;
1050 }
1051
1052 } // namespace blink 891 } // namespace blink
1053 892
1054 #endif // ENABLE(WEB_AUDIO) 893 #endif // ENABLE(WEB_AUDIO)
OLDNEW
« no previous file with comments | « Source/modules/webaudio/AbstractAudioContext.h ('k') | Source/modules/webaudio/AnalyserNode.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698