OLD | NEW |
1 /* | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
3 * | 3 // found in the LICENSE file. |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions | |
6 * are met: | |
7 * 1. Redistributions of source code must retain the above copyright | |
8 * notice, this list of conditions and the following disclaimer. | |
9 * 2. Redistributions in binary form must reproduce the above copyright | |
10 * notice, this list of conditions and the following disclaimer in the | |
11 * documentation and/or other materials provided with the distribution. | |
12 * | |
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y | |
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y | |
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N | |
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
23 */ | |
24 | 4 |
25 #include "config.h" | 5 #include "config.h" |
26 #if ENABLE(WEB_AUDIO) | |
27 #include "modules/webaudio/AudioContext.h" | 6 #include "modules/webaudio/AudioContext.h" |
28 | 7 |
29 #include "bindings/core/v8/ExceptionMessages.h" | 8 #include "bindings/core/v8/ExceptionMessages.h" |
30 #include "bindings/core/v8/ExceptionState.h" | 9 #include "bindings/core/v8/ExceptionState.h" |
31 #include "bindings/core/v8/ScriptPromiseResolver.h" | 10 #include "bindings/core/v8/ScriptPromiseResolver.h" |
32 #include "bindings/core/v8/ScriptState.h" | |
33 #include "core/dom/DOMException.h" | 11 #include "core/dom/DOMException.h" |
34 #include "core/dom/Document.h" | |
35 #include "core/dom/ExceptionCode.h" | 12 #include "core/dom/ExceptionCode.h" |
36 #include "core/dom/ExecutionContextTask.h" | |
37 #include "core/html/HTMLMediaElement.h" | |
38 #include "modules/mediastream/MediaStream.h" | |
39 #include "modules/webaudio/AnalyserNode.h" | |
40 #include "modules/webaudio/AudioBuffer.h" | |
41 #include "modules/webaudio/AudioBufferCallback.h" | |
42 #include "modules/webaudio/AudioBufferSourceNode.h" | |
43 #include "modules/webaudio/AudioListener.h" | |
44 #include "modules/webaudio/AudioNodeInput.h" | |
45 #include "modules/webaudio/AudioNodeOutput.h" | |
46 #include "modules/webaudio/BiquadFilterNode.h" | |
47 #include "modules/webaudio/ChannelMergerNode.h" | |
48 #include "modules/webaudio/ChannelSplitterNode.h" | |
49 #include "modules/webaudio/ConvolverNode.h" | |
50 #include "modules/webaudio/DefaultAudioDestinationNode.h" | |
51 #include "modules/webaudio/DelayNode.h" | |
52 #include "modules/webaudio/DynamicsCompressorNode.h" | |
53 #include "modules/webaudio/GainNode.h" | |
54 #include "modules/webaudio/MediaElementAudioSourceNode.h" | |
55 #include "modules/webaudio/MediaStreamAudioDestinationNode.h" | |
56 #include "modules/webaudio/MediaStreamAudioSourceNode.h" | |
57 #include "modules/webaudio/OfflineAudioCompletionEvent.h" | |
58 #include "modules/webaudio/OfflineAudioContext.h" | |
59 #include "modules/webaudio/OfflineAudioDestinationNode.h" | |
60 #include "modules/webaudio/OscillatorNode.h" | |
61 #include "modules/webaudio/PannerNode.h" | |
62 #include "modules/webaudio/PeriodicWave.h" | |
63 #include "modules/webaudio/ScriptProcessorNode.h" | |
64 #include "modules/webaudio/StereoPannerNode.h" | |
65 #include "modules/webaudio/WaveShaperNode.h" | |
66 #include "platform/ThreadSafeFunctional.h" | |
67 #include "public/platform/Platform.h" | |
68 #include "wtf/text/WTFString.h" | |
69 | 13 |
70 #if DEBUG_AUDIONODE_REFERENCES | 14 #if DEBUG_AUDIONODE_REFERENCES |
71 #include <stdio.h> | 15 #include <stdio.h> |
72 #endif | 16 #endif |
73 | 17 |
| 18 #if ENABLE(WEB_AUDIO) |
| 19 |
74 namespace blink { | 20 namespace blink { |
75 | 21 |
76 // Don't allow more than this number of simultaneous AudioContexts talking to ha
rdware. | 22 // Don't allow more than this number of simultaneous AudioContexts |
| 23 // talking to hardware. |
77 const unsigned MaxHardwareContexts = 6; | 24 const unsigned MaxHardwareContexts = 6; |
78 unsigned AudioContext::s_hardwareContextCount = 0; | 25 static unsigned s_hardwareContextCount = 0; |
79 unsigned AudioContext::s_contextId = 0; | 26 static unsigned s_contextId = 0; |
80 | 27 |
81 AudioContext* AudioContext::create(Document& document, ExceptionState& exception
State) | 28 AbstractAudioContext* AudioContext::create(Document& document, ExceptionState& e
xceptionState) |
82 { | 29 { |
83 ASSERT(isMainThread()); | 30 ASSERT(isMainThread()); |
84 if (s_hardwareContextCount >= MaxHardwareContexts) { | 31 if (s_hardwareContextCount >= MaxHardwareContexts) { |
85 exceptionState.throwDOMException( | 32 exceptionState.throwDOMException( |
86 NotSupportedError, | 33 NotSupportedError, |
87 ExceptionMessages::indexExceedsMaximumBound( | 34 ExceptionMessages::indexExceedsMaximumBound( |
88 "number of hardware contexts", | 35 "number of hardware contexts", |
89 s_hardwareContextCount, | 36 s_hardwareContextCount, |
90 MaxHardwareContexts)); | 37 MaxHardwareContexts)); |
91 return nullptr; | 38 return nullptr; |
92 } | 39 } |
93 | 40 |
94 AudioContext* audioContext = new AudioContext(&document); | 41 AudioContext* audioContext = new AudioContext(document); |
95 audioContext->suspendIfNeeded(); | 42 audioContext->suspendIfNeeded(); |
| 43 |
| 44 // This starts the audio thread. The destination node's |
| 45 // provideInput() method will now be called repeatedly to render |
| 46 // audio. Each time provideInput() is called, a portion of the |
| 47 // audio stream is rendered. Let's call this time period a "render |
| 48 // quantum". NOTE: for now AudioContext does not need an explicit |
| 49 // startRendering() call from JavaScript. We may want to consider |
| 50 // requiring it for symmetry with OfflineAudioContext. |
| 51 audioContext->startRendering(); |
| 52 ++s_hardwareContextCount; |
| 53 #if DEBUG_AUDIONODE_REFERENCES |
| 54 fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n", |
| 55 audioContext, audioContext->m_contextId, s_hardwareContextCount); |
| 56 #endif |
| 57 |
96 return audioContext; | 58 return audioContext; |
97 } | 59 } |
98 | 60 |
99 // Constructor for rendering to the audio hardware. | 61 AudioContext::AudioContext(Document& document) |
100 AudioContext::AudioContext(Document* document) | 62 : AbstractAudioContext(&document) |
101 : ActiveDOMObject(document) | 63 , m_contextId(s_contextId++) |
102 , m_isStopScheduled(false) | |
103 , m_isCleared(false) | |
104 , m_isInitialized(false) | |
105 , m_destinationNode(nullptr) | |
106 , m_isResolvingResumePromises(false) | |
107 , m_connectionCount(0) | |
108 , m_didInitializeContextGraphMutex(false) | |
109 , m_deferredTaskHandler(DeferredTaskHandler::create()) | |
110 , m_isOfflineContext(false) | |
111 , m_contextState(Suspended) | |
112 { | 64 { |
113 m_didInitializeContextGraphMutex = true; | |
114 m_destinationNode = DefaultAudioDestinationNode::create(this); | |
115 | |
116 initialize(); | |
117 } | |
118 | |
119 // Constructor for offline (non-realtime) rendering. | |
120 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
numberOfFrames, float sampleRate) | |
121 : ActiveDOMObject(document) | |
122 , m_isStopScheduled(false) | |
123 , m_isCleared(false) | |
124 , m_isInitialized(false) | |
125 , m_destinationNode(nullptr) | |
126 , m_isResolvingResumePromises(false) | |
127 , m_connectionCount(0) | |
128 , m_didInitializeContextGraphMutex(false) | |
129 , m_deferredTaskHandler(DeferredTaskHandler::create()) | |
130 , m_isOfflineContext(true) | |
131 , m_contextState(Suspended) | |
132 { | |
133 m_didInitializeContextGraphMutex = true; | |
134 // Create a new destination for offline rendering. | |
135 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl
eRate); | |
136 if (m_renderTarget.get()) | |
137 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa
rget.get()); | |
138 | |
139 initialize(); | |
140 } | 65 } |
141 | 66 |
142 AudioContext::~AudioContext() | 67 AudioContext::~AudioContext() |
143 { | 68 { |
144 #if DEBUG_AUDIONODE_REFERENCES | 69 #if DEBUG_AUDIONODE_REFERENCES |
145 fprintf(stderr, "%p: AudioContext::~AudioContext(): %u\n", this, m_contextId
); | 70 fprintf(stderr, "%p: AudioContext::~AudioContext(): %u\n", this, m_contextId
); |
146 #endif | 71 #endif |
147 deferredTaskHandler().contextWillBeDestroyed(); | |
148 // AudioNodes keep a reference to their context, so there should be no way t
o be in the destructor if there are still AudioNodes around. | |
149 ASSERT(!m_isInitialized); | |
150 ASSERT(!m_activeSourceNodes.size()); | |
151 ASSERT(!m_finishedSourceHandlers.size()); | |
152 ASSERT(!m_isResolvingResumePromises); | |
153 ASSERT(!m_resumeResolvers.size()); | |
154 } | 72 } |
155 | 73 |
156 void AudioContext::initialize() | 74 DEFINE_TRACE(AudioContext) |
157 { | 75 { |
158 if (isInitialized()) | 76 visitor->trace(m_closeResolver); |
159 return; | 77 AbstractAudioContext::trace(visitor); |
160 | |
161 FFTFrame::initialize(); | |
162 m_listener = AudioListener::create(); | |
163 | |
164 if (m_destinationNode.get()) { | |
165 m_destinationNode->handler().initialize(); | |
166 | |
167 if (!isOfflineContext()) { | |
168 // This starts the audio thread. The destination node's provideInput
() method will now be called repeatedly to render audio. | |
169 // Each time provideInput() is called, a portion of the audio stream
is rendered. Let's call this time period a "render quantum". | |
170 // NOTE: for now default AudioContext does not need an explicit star
tRendering() call from JavaScript. | |
171 // We may want to consider requiring it for symmetry with OfflineAud
ioContext. | |
172 startRendering(); | |
173 ++s_hardwareContextCount; | |
174 } | |
175 | |
176 m_contextId = s_contextId++; | |
177 m_isInitialized = true; | |
178 #if DEBUG_AUDIONODE_REFERENCES | |
179 fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n", | |
180 this, m_contextId, AudioContext::s_hardwareContextCount); | |
181 #endif | |
182 } | |
183 } | |
184 | |
185 void AudioContext::clear() | |
186 { | |
187 m_destinationNode.clear(); | |
188 // The audio rendering thread is dead. Nobody will schedule AudioHandler | |
189 // deletion. Let's do it ourselves. | |
190 deferredTaskHandler().clearHandlersToBeDeleted(); | |
191 m_isCleared = true; | |
192 } | |
193 | |
194 void AudioContext::uninitialize() | |
195 { | |
196 ASSERT(isMainThread()); | |
197 | |
198 if (!isInitialized()) | |
199 return; | |
200 | |
201 m_isInitialized = false; | |
202 | |
203 // This stops the audio thread and all audio rendering. | |
204 if (m_destinationNode) | |
205 m_destinationNode->handler().uninitialize(); | |
206 | |
207 if (!isOfflineContext()) { | |
208 ASSERT(s_hardwareContextCount); | |
209 --s_hardwareContextCount; | |
210 } | |
211 | |
212 // Get rid of the sources which may still be playing. | |
213 releaseActiveSourceNodes(); | |
214 | |
215 // Reject any pending resolvers before we go away. | |
216 rejectPendingResolvers(); | |
217 | |
218 // For an offline audio context, the completion event will set the state to
closed. For an | |
219 // online context, we need to do it here. We only want to set the closed st
ate once. | |
220 if (!isOfflineContext()) | |
221 setContextState(Closed); | |
222 | |
223 // Resolve the promise now, if any | |
224 if (m_closeResolver) | |
225 m_closeResolver->resolve(); | |
226 | |
227 ASSERT(m_listener); | |
228 m_listener->waitForHRTFDatabaseLoaderThreadCompletion(); | |
229 | |
230 clear(); | |
231 } | |
232 | |
233 void AudioContext::stop() | |
234 { | |
235 // Usually ExecutionContext calls stop twice. | |
236 if (m_isStopScheduled) | |
237 return; | |
238 m_isStopScheduled = true; | |
239 | |
240 // Don't call uninitialize() immediately here because the ExecutionContext i
s in the middle | |
241 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other | |
242 // ActiveDOMObjects so let's schedule uninitialize() to be called later. | |
243 // FIXME: see if there's a more direct way to handle this issue. | |
244 Platform::current()->mainThread()->postTask(FROM_HERE, bind(&AudioContext::u
ninitialize, this)); | |
245 } | |
246 | |
247 bool AudioContext::hasPendingActivity() const | |
248 { | |
249 // There's no pending activity if the audio context has been cleared. | |
250 return !m_isCleared; | |
251 } | |
252 | |
253 void AudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) | |
254 { | |
255 exceptionState.throwDOMException(InvalidStateError, "AudioContext has been c
losed."); | |
256 } | |
257 | |
258 AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t number
OfFrames, float sampleRate, ExceptionState& exceptionState) | |
259 { | |
260 // It's ok to call createBuffer, even if the context is closed because the A
udioBuffer doesn't | |
261 // really "belong" to any particular context. | |
262 | |
263 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exc
eptionState); | |
264 } | |
265 | |
266 void AudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBufferCallbac
k* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptio
nState) | |
267 { | |
268 if (isContextClosed()) { | |
269 throwExceptionForClosedState(exceptionState); | |
270 return; | |
271 } | |
272 | |
273 if (!audioData) { | |
274 exceptionState.throwDOMException( | |
275 SyntaxError, | |
276 "invalid ArrayBuffer for audioData."); | |
277 return; | |
278 } | |
279 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | |
280 } | |
281 | |
282 AudioBufferSourceNode* AudioContext::createBufferSource(ExceptionState& exceptio
nState) | |
283 { | |
284 ASSERT(isMainThread()); | |
285 | |
286 if (isContextClosed()) { | |
287 throwExceptionForClosedState(exceptionState); | |
288 return nullptr; | |
289 } | |
290 | |
291 AudioBufferSourceNode* node = AudioBufferSourceNode::create(*this, sampleRat
e()); | |
292 | |
293 // Do not add a reference to this source node now. The reference will be add
ed when start() is | |
294 // called. | |
295 | |
296 return node; | |
297 } | |
298 | |
299 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaEle
ment* mediaElement, ExceptionState& exceptionState) | |
300 { | |
301 ASSERT(isMainThread()); | |
302 | |
303 if (isContextClosed()) { | |
304 throwExceptionForClosedState(exceptionState); | |
305 return nullptr; | |
306 } | |
307 | |
308 if (!mediaElement) { | |
309 exceptionState.throwDOMException( | |
310 InvalidStateError, | |
311 "invalid HTMLMedialElement."); | |
312 return nullptr; | |
313 } | |
314 | |
315 // First check if this media element already has a source node. | |
316 if (mediaElement->audioSourceNode()) { | |
317 exceptionState.throwDOMException( | |
318 InvalidStateError, | |
319 "HTMLMediaElement already connected previously to a different MediaE
lementSourceNode."); | |
320 return nullptr; | |
321 } | |
322 | |
323 MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(*thi
s, *mediaElement); | |
324 | |
325 mediaElement->setAudioSourceNode(node); | |
326 | |
327 notifySourceNodeStartedProcessing(node); // context keeps reference until no
de is disconnected | |
328 return node; | |
329 } | |
330 | |
331 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* m
ediaStream, ExceptionState& exceptionState) | |
332 { | |
333 ASSERT(isMainThread()); | |
334 | |
335 if (isContextClosed()) { | |
336 throwExceptionForClosedState(exceptionState); | |
337 return nullptr; | |
338 } | |
339 | |
340 if (!mediaStream) { | |
341 exceptionState.throwDOMException( | |
342 InvalidStateError, | |
343 "invalid MediaStream source"); | |
344 return nullptr; | |
345 } | |
346 | |
347 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | |
348 if (audioTracks.isEmpty()) { | |
349 exceptionState.throwDOMException( | |
350 InvalidStateError, | |
351 "MediaStream has no audio track"); | |
352 return nullptr; | |
353 } | |
354 | |
355 // Use the first audio track in the media stream. | |
356 MediaStreamTrack* audioTrack = audioTracks[0]; | |
357 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); | |
358 MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(*this,
*mediaStream, audioTrack, provider.release()); | |
359 | |
360 // FIXME: Only stereo streams are supported right now. We should be able to
accept multi-channel streams. | |
361 node->setFormat(2, sampleRate()); | |
362 | |
363 notifySourceNodeStartedProcessing(node); // context keeps reference until no
de is disconnected | |
364 return node; | |
365 } | |
366 | |
367 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination(Exce
ptionState& exceptionState) | |
368 { | |
369 if (isContextClosed()) { | |
370 throwExceptionForClosedState(exceptionState); | |
371 return nullptr; | |
372 } | |
373 | |
374 // Set number of output channels to stereo by default. | |
375 return MediaStreamAudioDestinationNode::create(*this, 2); | |
376 } | |
377 | |
378 ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& excepti
onState) | |
379 { | |
380 // Set number of input/output channels to stereo by default. | |
381 return createScriptProcessor(0, 2, 2, exceptionState); | |
382 } | |
383 | |
384 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, Exce
ptionState& exceptionState) | |
385 { | |
386 // Set number of input/output channels to stereo by default. | |
387 return createScriptProcessor(bufferSize, 2, 2, exceptionState); | |
388 } | |
389 | |
390 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size
_t numberOfInputChannels, ExceptionState& exceptionState) | |
391 { | |
392 // Set number of output channels to stereo by default. | |
393 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception
State); | |
394 } | |
395 | |
396 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size
_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& excepti
onState) | |
397 { | |
398 ASSERT(isMainThread()); | |
399 | |
400 if (isContextClosed()) { | |
401 throwExceptionForClosedState(exceptionState); | |
402 return nullptr; | |
403 } | |
404 | |
405 ScriptProcessorNode* node = ScriptProcessorNode::create(*this, sampleRate(),
bufferSize, numberOfInputChannels, numberOfOutputChannels); | |
406 | |
407 if (!node) { | |
408 if (!numberOfInputChannels && !numberOfOutputChannels) { | |
409 exceptionState.throwDOMException( | |
410 IndexSizeError, | |
411 "number of input channels and output channels cannot both be zer
o."); | |
412 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ | |
413 exceptionState.throwDOMException( | |
414 IndexSizeError, | |
415 "number of input channels (" + String::number(numberOfInputChann
els) | |
416 + ") exceeds maximum (" | |
417 + String::number(AudioContext::maxNumberOfChannels()) + ")."); | |
418 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
{ | |
419 exceptionState.throwDOMException( | |
420 IndexSizeError, | |
421 "number of output channels (" + String::number(numberOfInputChan
nels) | |
422 + ") exceeds maximum (" | |
423 + String::number(AudioContext::maxNumberOfChannels()) + ")."); | |
424 } else { | |
425 exceptionState.throwDOMException( | |
426 IndexSizeError, | |
427 "buffer size (" + String::number(bufferSize) | |
428 + ") must be a power of two between 256 and 16384."); | |
429 } | |
430 return nullptr; | |
431 } | |
432 | |
433 notifySourceNodeStartedProcessing(node); // context keeps reference until we
stop making javascript rendering callbacks | |
434 return node; | |
435 } | |
436 | |
437 StereoPannerNode* AudioContext::createStereoPanner(ExceptionState& exceptionStat
e) | |
438 { | |
439 ASSERT(isMainThread()); | |
440 if (isContextClosed()) { | |
441 throwExceptionForClosedState(exceptionState); | |
442 return nullptr; | |
443 } | |
444 | |
445 return StereoPannerNode::create(*this, sampleRate()); | |
446 } | |
447 | |
448 BiquadFilterNode* AudioContext::createBiquadFilter(ExceptionState& exceptionStat
e) | |
449 { | |
450 ASSERT(isMainThread()); | |
451 if (isContextClosed()) { | |
452 throwExceptionForClosedState(exceptionState); | |
453 return nullptr; | |
454 } | |
455 | |
456 return BiquadFilterNode::create(*this, sampleRate()); | |
457 } | |
458 | |
459 WaveShaperNode* AudioContext::createWaveShaper(ExceptionState& exceptionState) | |
460 { | |
461 ASSERT(isMainThread()); | |
462 if (isContextClosed()) { | |
463 throwExceptionForClosedState(exceptionState); | |
464 return nullptr; | |
465 } | |
466 | |
467 return WaveShaperNode::create(*this); | |
468 } | |
469 | |
470 PannerNode* AudioContext::createPanner(ExceptionState& exceptionState) | |
471 { | |
472 ASSERT(isMainThread()); | |
473 if (isContextClosed()) { | |
474 throwExceptionForClosedState(exceptionState); | |
475 return nullptr; | |
476 } | |
477 | |
478 return PannerNode::create(*this, sampleRate()); | |
479 } | |
480 | |
481 ConvolverNode* AudioContext::createConvolver(ExceptionState& exceptionState) | |
482 { | |
483 ASSERT(isMainThread()); | |
484 if (isContextClosed()) { | |
485 throwExceptionForClosedState(exceptionState); | |
486 return nullptr; | |
487 } | |
488 | |
489 return ConvolverNode::create(*this, sampleRate()); | |
490 } | |
491 | |
492 DynamicsCompressorNode* AudioContext::createDynamicsCompressor(ExceptionState& e
xceptionState) | |
493 { | |
494 ASSERT(isMainThread()); | |
495 if (isContextClosed()) { | |
496 throwExceptionForClosedState(exceptionState); | |
497 return nullptr; | |
498 } | |
499 | |
500 return DynamicsCompressorNode::create(*this, sampleRate()); | |
501 } | |
502 | |
503 AnalyserNode* AudioContext::createAnalyser(ExceptionState& exceptionState) | |
504 { | |
505 ASSERT(isMainThread()); | |
506 if (isContextClosed()) { | |
507 throwExceptionForClosedState(exceptionState); | |
508 return nullptr; | |
509 } | |
510 | |
511 return AnalyserNode::create(*this, sampleRate()); | |
512 } | |
513 | |
514 GainNode* AudioContext::createGain(ExceptionState& exceptionState) | |
515 { | |
516 ASSERT(isMainThread()); | |
517 if (isContextClosed()) { | |
518 throwExceptionForClosedState(exceptionState); | |
519 return nullptr; | |
520 } | |
521 | |
522 return GainNode::create(*this, sampleRate()); | |
523 } | |
524 | |
525 DelayNode* AudioContext::createDelay(ExceptionState& exceptionState) | |
526 { | |
527 const double defaultMaxDelayTime = 1; | |
528 return createDelay(defaultMaxDelayTime, exceptionState); | |
529 } | |
530 | |
531 DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& except
ionState) | |
532 { | |
533 ASSERT(isMainThread()); | |
534 if (isContextClosed()) { | |
535 throwExceptionForClosedState(exceptionState); | |
536 return nullptr; | |
537 } | |
538 | |
539 return DelayNode::create(*this, sampleRate(), maxDelayTime, exceptionState); | |
540 } | |
541 | |
542 ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& excepti
onState) | |
543 { | |
544 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; | |
545 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio
nState); | |
546 } | |
547 | |
548 ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs,
ExceptionState& exceptionState) | |
549 { | |
550 ASSERT(isMainThread()); | |
551 | |
552 if (isContextClosed()) { | |
553 throwExceptionForClosedState(exceptionState); | |
554 return nullptr; | |
555 } | |
556 | |
557 ChannelSplitterNode* node = ChannelSplitterNode::create(*this, sampleRate(),
numberOfOutputs); | |
558 | |
559 if (!node) { | |
560 exceptionState.throwDOMException( | |
561 IndexSizeError, | |
562 "number of outputs (" + String::number(numberOfOutputs) | |
563 + ") must be between 1 and " | |
564 + String::number(AudioContext::maxNumberOfChannels()) + "."); | |
565 return nullptr; | |
566 } | |
567 | |
568 return node; | |
569 } | |
570 | |
571 ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionSt
ate) | |
572 { | |
573 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | |
574 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat
e); | |
575 } | |
576 | |
577 ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce
ptionState& exceptionState) | |
578 { | |
579 ASSERT(isMainThread()); | |
580 if (isContextClosed()) { | |
581 throwExceptionForClosedState(exceptionState); | |
582 return nullptr; | |
583 } | |
584 | |
585 ChannelMergerNode* node = ChannelMergerNode::create(*this, sampleRate(), num
berOfInputs); | |
586 | |
587 if (!node) { | |
588 exceptionState.throwDOMException( | |
589 IndexSizeError, | |
590 ExceptionMessages::indexOutsideRange<size_t>( | |
591 "number of inputs", | |
592 numberOfInputs, | |
593 1, | |
594 ExceptionMessages::InclusiveBound, | |
595 AudioContext::maxNumberOfChannels(), | |
596 ExceptionMessages::InclusiveBound)); | |
597 return nullptr; | |
598 } | |
599 | |
600 return node; | |
601 } | |
602 | |
603 OscillatorNode* AudioContext::createOscillator(ExceptionState& exceptionState) | |
604 { | |
605 ASSERT(isMainThread()); | |
606 if (isContextClosed()) { | |
607 throwExceptionForClosedState(exceptionState); | |
608 return nullptr; | |
609 } | |
610 | |
611 OscillatorNode* node = OscillatorNode::create(*this, sampleRate()); | |
612 | |
613 // Do not add a reference to this source node now. The reference will be add
ed when start() is | |
614 // called. | |
615 | |
616 return node; | |
617 } | |
618 | |
619 PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32
Array* imag, ExceptionState& exceptionState) | |
620 { | |
621 ASSERT(isMainThread()); | |
622 | |
623 if (isContextClosed()) { | |
624 throwExceptionForClosedState(exceptionState); | |
625 return nullptr; | |
626 } | |
627 | |
628 if (!real) { | |
629 exceptionState.throwDOMException( | |
630 SyntaxError, | |
631 "invalid real array"); | |
632 return nullptr; | |
633 } | |
634 | |
635 if (!imag) { | |
636 exceptionState.throwDOMException( | |
637 SyntaxError, | |
638 "invalid imaginary array"); | |
639 return nullptr; | |
640 } | |
641 | |
642 if (real->length() > PeriodicWave::kMaxPeriodicWaveArraySize) { | |
643 exceptionState.throwDOMException( | |
644 IndexSizeError, | |
645 ExceptionMessages::indexOutsideRange( | |
646 "length of the real part array", | |
647 real->length(), | |
648 1u, | |
649 ExceptionMessages::InclusiveBound, | |
650 PeriodicWave::kMaxPeriodicWaveArraySize, | |
651 ExceptionMessages::InclusiveBound)); | |
652 return nullptr; | |
653 } | |
654 | |
655 if (imag->length() > PeriodicWave::kMaxPeriodicWaveArraySize) { | |
656 exceptionState.throwDOMException( | |
657 IndexSizeError, | |
658 ExceptionMessages::indexOutsideRange( | |
659 "length of the imaginary part array", | |
660 imag->length(), | |
661 1u, | |
662 ExceptionMessages::InclusiveBound, | |
663 PeriodicWave::kMaxPeriodicWaveArraySize, | |
664 ExceptionMessages::InclusiveBound)); | |
665 return nullptr; | |
666 } | |
667 | |
668 if (real->length() != imag->length()) { | |
669 exceptionState.throwDOMException( | |
670 IndexSizeError, | |
671 "length of real array (" + String::number(real->length()) | |
672 + ") and length of imaginary array (" + String::number(imag->length
()) | |
673 + ") must match."); | |
674 return nullptr; | |
675 } | |
676 | |
677 return PeriodicWave::create(sampleRate(), real, imag); | |
678 } | |
679 | |
680 String AudioContext::state() const | |
681 { | |
682 // These strings had better match the strings for AudioContextState in Audio
Context.idl. | |
683 switch (m_contextState) { | |
684 case Suspended: | |
685 return "suspended"; | |
686 case Running: | |
687 return "running"; | |
688 case Closed: | |
689 return "closed"; | |
690 } | |
691 ASSERT_NOT_REACHED(); | |
692 return ""; | |
693 } | |
694 | |
695 void AudioContext::setContextState(AudioContextState newState) | |
696 { | |
697 ASSERT(isMainThread()); | |
698 | |
699 // Validate the transitions. The valid transitions are Suspended->Running,
Running->Suspended, | |
700 // and anything->Closed. | |
701 switch (newState) { | |
702 case Suspended: | |
703 ASSERT(m_contextState == Running); | |
704 break; | |
705 case Running: | |
706 ASSERT(m_contextState == Suspended); | |
707 break; | |
708 case Closed: | |
709 ASSERT(m_contextState != Closed); | |
710 break; | |
711 } | |
712 | |
713 if (newState == m_contextState) { | |
714 // ASSERTs above failed; just return. | |
715 return; | |
716 } | |
717 | |
718 m_contextState = newState; | |
719 | |
720 // Notify context that state changed | |
721 if (executionContext()) | |
722 executionContext()->postTask(FROM_HERE, createSameThreadTask(&AudioConte
xt::notifyStateChange, this)); | |
723 } | |
724 | |
725 void AudioContext::notifyStateChange() | |
726 { | |
727 dispatchEvent(Event::create(EventTypeNames::statechange)); | |
728 } | 78 } |
729 | 79 |
730 ScriptPromise AudioContext::suspendContext(ScriptState* scriptState) | 80 ScriptPromise AudioContext::suspendContext(ScriptState* scriptState) |
731 { | 81 { |
732 ASSERT(isMainThread()); | 82 ASSERT(isMainThread()); |
733 AutoLocker locker(this); | 83 AutoLocker locker(this); |
734 | 84 |
735 if (isOfflineContext()) { | |
736 return ScriptPromise::rejectWithDOMException( | |
737 scriptState, | |
738 DOMException::create( | |
739 InvalidAccessError, | |
740 "cannot suspend an OfflineAudioContext")); | |
741 } | |
742 | |
743 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::
create(scriptState); | 85 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::
create(scriptState); |
744 ScriptPromise promise = resolver->promise(); | 86 ScriptPromise promise = resolver->promise(); |
745 | 87 |
746 if (m_contextState == Closed) { | 88 if (contextState() == Closed) { |
747 resolver->reject( | 89 resolver->reject( |
748 DOMException::create(InvalidStateError, "Cannot suspend a context th
at has been closed")); | 90 DOMException::create(InvalidStateError, "Cannot suspend a context th
at has been closed")); |
749 } else { | 91 } else { |
750 // Stop rendering now. | 92 // Stop rendering now. |
751 if (m_destinationNode) | 93 if (destination()) |
752 stopRendering(); | 94 stopRendering(); |
753 | 95 |
754 // Since we don't have any way of knowing when the hardware actually sto
ps, we'll just | 96 // Since we don't have any way of knowing when the hardware actually sto
ps, we'll just |
755 // resolve the promise now. | 97 // resolve the promise now. |
756 resolver->resolve(); | 98 resolver->resolve(); |
757 } | 99 } |
758 | 100 |
759 return promise; | 101 return promise; |
760 } | 102 } |
761 | 103 |
762 ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) | 104 ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) |
763 { | 105 { |
764 ASSERT(isMainThread()); | 106 ASSERT(isMainThread()); |
765 AutoLocker locker(this); | |
766 | |
767 if (isOfflineContext()) { | |
768 return ScriptPromise::rejectWithDOMException( | |
769 scriptState, | |
770 DOMException::create( | |
771 InvalidAccessError, | |
772 "cannot resume an OfflineAudioContext")); | |
773 } | |
774 | 107 |
775 if (isContextClosed()) { | 108 if (isContextClosed()) { |
776 return ScriptPromise::rejectWithDOMException( | 109 return ScriptPromise::rejectWithDOMException( |
777 scriptState, | 110 scriptState, |
778 DOMException::create( | 111 DOMException::create( |
779 InvalidAccessError, | 112 InvalidAccessError, |
780 "cannot resume a closed AudioContext")); | 113 "cannot resume a closed AudioContext")); |
781 } | 114 } |
782 | 115 |
783 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::
create(scriptState); | 116 RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::
create(scriptState); |
784 ScriptPromise promise = resolver->promise(); | 117 ScriptPromise promise = resolver->promise(); |
785 | 118 |
786 // Restart the destination node to pull on the audio graph. | 119 // Restart the destination node to pull on the audio graph. |
787 if (m_destinationNode) | 120 if (destination()) |
788 startRendering(); | 121 startRendering(); |
789 | 122 |
790 // Save the resolver which will get resolved when the destination node start
s pulling on the | 123 // Save the resolver which will get resolved when the destination node start
s pulling on the |
791 // graph again. | 124 // graph again. |
792 m_resumeResolvers.append(resolver); | 125 { |
| 126 AutoLocker locker(this); |
| 127 m_resumeResolvers.append(resolver); |
| 128 } |
793 | 129 |
794 return promise; | 130 return promise; |
795 } | 131 } |
796 | 132 |
797 void AudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler) | |
798 { | |
799 ASSERT(isAudioThread()); | |
800 m_finishedSourceHandlers.append(handler); | |
801 } | |
802 | |
803 void AudioContext::releaseFinishedSourceNodes() | |
804 { | |
805 ASSERT(isGraphOwner()); | |
806 ASSERT(isAudioThread()); | |
807 for (AudioHandler* handler : m_finishedSourceHandlers) { | |
808 for (unsigned i = 0; i < m_activeSourceNodes.size(); ++i) { | |
809 if (handler == &m_activeSourceNodes[i]->handler()) { | |
810 handler->breakConnection(); | |
811 m_activeSourceNodes.remove(i); | |
812 break; | |
813 } | |
814 } | |
815 } | |
816 | |
817 m_finishedSourceHandlers.clear(); | |
818 } | |
819 | |
820 void AudioContext::notifySourceNodeStartedProcessing(AudioNode* node) | |
821 { | |
822 ASSERT(isMainThread()); | |
823 AutoLocker locker(this); | |
824 | |
825 m_activeSourceNodes.append(node); | |
826 node->handler().makeConnection(); | |
827 } | |
828 | |
829 void AudioContext::releaseActiveSourceNodes() | |
830 { | |
831 ASSERT(isMainThread()); | |
832 for (auto& sourceNode : m_activeSourceNodes) | |
833 sourceNode->handler().breakConnection(); | |
834 | |
835 m_activeSourceNodes.clear(); | |
836 } | |
837 | |
838 void AudioContext::handleStoppableSourceNodes() | |
839 { | |
840 ASSERT(isGraphOwner()); | |
841 | |
842 // Find AudioBufferSourceNodes to see if we can stop playing them. | |
843 for (AudioNode* node : m_activeSourceNodes) { | |
844 if (node->handler().nodeType() == AudioHandler::NodeTypeAudioBufferSourc
e) { | |
845 AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNod
e*>(node); | |
846 sourceNode->audioBufferSourceHandler().handleStoppableSourceNode(); | |
847 } | |
848 } | |
849 } | |
850 | |
851 void AudioContext::handlePreRenderTasks() | |
852 { | |
853 ASSERT(isAudioThread()); | |
854 | |
855 // At the beginning of every render quantum, try to update the internal rend
ering graph state (from main thread changes). | |
856 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u
p the changes. | |
857 if (tryLock()) { | |
858 deferredTaskHandler().handleDeferredTasks(); | |
859 | |
860 resolvePromisesForResume(); | |
861 | |
862 // Check to see if source nodes can be stopped because the end time has
passed. | |
863 handleStoppableSourceNodes(); | |
864 | |
865 unlock(); | |
866 } | |
867 } | |
868 | |
869 void AudioContext::handlePostRenderTasks() | |
870 { | |
871 ASSERT(isAudioThread()); | |
872 | |
873 // Must use a tryLock() here too. Don't worry, the lock will very rarely be
contended and this method is called frequently. | |
874 // The worst that can happen is that there will be some nodes which will tak
e slightly longer than usual to be deleted or removed | |
875 // from the render graph (in which case they'll render silence). | |
876 if (tryLock()) { | |
877 // Take care of AudioNode tasks where the tryLock() failed previously. | |
878 deferredTaskHandler().breakConnections(); | |
879 | |
880 // Dynamically clean up nodes which are no longer needed. | |
881 releaseFinishedSourceNodes(); | |
882 | |
883 deferredTaskHandler().handleDeferredTasks(); | |
884 deferredTaskHandler().requestToDeleteHandlersOnMainThread(); | |
885 | |
886 unlock(); | |
887 } | |
888 } | |
889 | |
890 void AudioContext::resolvePromisesForResumeOnMainThread() | |
891 { | |
892 ASSERT(isMainThread()); | |
893 AutoLocker locker(this); | |
894 | |
895 for (auto& resolver : m_resumeResolvers) { | |
896 if (m_contextState == Closed) { | |
897 resolver->reject( | |
898 DOMException::create(InvalidStateError, "Cannot resume a context
that has been closed")); | |
899 } else { | |
900 resolver->resolve(); | |
901 } | |
902 } | |
903 | |
904 m_resumeResolvers.clear(); | |
905 m_isResolvingResumePromises = false; | |
906 } | |
907 | |
908 void AudioContext::resolvePromisesForResume() | |
909 { | |
910 // This runs inside the AudioContext's lock when handling pre-render tasks. | |
911 ASSERT(isAudioThread()); | |
912 ASSERT(isGraphOwner()); | |
913 | |
914 // Resolve any pending promises created by resume(). Only do this if we have
n't already started | |
915 // resolving these promises. This gets called very often and it takes some t
ime to resolve the | |
916 // promises in the main thread. | |
917 if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) { | |
918 m_isResolvingResumePromises = true; | |
919 Platform::current()->mainThread()->postTask(FROM_HERE, threadSafeBind(&A
udioContext::resolvePromisesForResumeOnMainThread, this)); | |
920 } | |
921 } | |
922 | |
923 void AudioContext::rejectPendingResolvers() | |
924 { | |
925 ASSERT(isMainThread()); | |
926 | |
927 // Audio context is closing down so reject any resume promises that are stil
l pending. | |
928 | |
929 for (auto& resolver : m_resumeResolvers) { | |
930 resolver->reject(DOMException::create(InvalidStateError, "Audio context
is going away")); | |
931 } | |
932 m_resumeResolvers.clear(); | |
933 m_isResolvingResumePromises = false; | |
934 } | |
935 | |
936 const AtomicString& AudioContext::interfaceName() const | |
937 { | |
938 return EventTargetNames::AudioContext; | |
939 } | |
940 | |
941 ExecutionContext* AudioContext::executionContext() const | |
942 { | |
943 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext(); | |
944 } | |
945 | |
946 void AudioContext::startRendering() | |
947 { | |
948 // This is called for both online and offline contexts. | |
949 ASSERT(isMainThread()); | |
950 ASSERT(m_destinationNode); | |
951 | |
952 if (m_contextState == Suspended) { | |
953 destination()->audioDestinationHandler().startRendering(); | |
954 setContextState(Running); | |
955 } | |
956 } | |
957 | |
958 void AudioContext::stopRendering() | |
959 { | |
960 ASSERT(isMainThread()); | |
961 ASSERT(m_destinationNode); | |
962 ASSERT(!isOfflineContext()); | |
963 | |
964 if (m_contextState == Running) { | |
965 destination()->audioDestinationHandler().stopRendering(); | |
966 setContextState(Suspended); | |
967 deferredTaskHandler().clearHandlersToBeDeleted(); | |
968 } | |
969 } | |
970 | |
971 void AudioContext::fireCompletionEvent() | |
972 { | |
973 ASSERT(isMainThread()); | |
974 if (!isMainThread()) | |
975 return; | |
976 | |
977 AudioBuffer* renderedBuffer = m_renderTarget.get(); | |
978 | |
979 // For an offline context, we set the state to closed here so that the oncom
plete handler sees | |
980 // that the context has been closed. | |
981 setContextState(Closed); | |
982 | |
983 ASSERT(renderedBuffer); | |
984 if (!renderedBuffer) | |
985 return; | |
986 | |
987 // Avoid firing the event if the document has already gone away. | |
988 if (executionContext()) { | |
989 // Call the offline rendering completion event listener and resolve the
promise too. | |
990 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); | |
991 m_offlineResolver->resolve(renderedBuffer); | |
992 } | |
993 } | |
994 | |
995 DEFINE_TRACE(AudioContext) | |
996 { | |
997 visitor->trace(m_closeResolver); | |
998 visitor->trace(m_offlineResolver); | |
999 visitor->trace(m_renderTarget); | |
1000 visitor->trace(m_destinationNode); | |
1001 visitor->trace(m_listener); | |
1002 // trace() can be called in AudioContext constructor, and | |
1003 // m_contextGraphMutex might be unavailable. | |
1004 if (m_didInitializeContextGraphMutex) { | |
1005 AutoLocker lock(this); | |
1006 visitor->trace(m_activeSourceNodes); | |
1007 } else { | |
1008 visitor->trace(m_activeSourceNodes); | |
1009 } | |
1010 visitor->trace(m_resumeResolvers); | |
1011 RefCountedGarbageCollectedEventTargetWithInlineData<AudioContext>::trace(vis
itor); | |
1012 ActiveDOMObject::trace(visitor); | |
1013 } | |
1014 | |
1015 SecurityOrigin* AudioContext::securityOrigin() const | |
1016 { | |
1017 if (executionContext()) | |
1018 return executionContext()->securityOrigin(); | |
1019 | |
1020 return nullptr; | |
1021 } | |
1022 | |
1023 ScriptPromise AudioContext::closeContext(ScriptState* scriptState) | 133 ScriptPromise AudioContext::closeContext(ScriptState* scriptState) |
1024 { | 134 { |
1025 if (isOfflineContext()) { | |
1026 return ScriptPromise::rejectWithDOMException( | |
1027 scriptState, | |
1028 DOMException::create(InvalidAccessError, "cannot close an OfflineAud
ioContext.")); | |
1029 } | |
1030 | |
1031 if (isContextClosed()) { | 135 if (isContextClosed()) { |
1032 // We've already closed the context previously, but it hasn't yet been r
esolved, so just | 136 // We've already closed the context previously, but it hasn't yet been r
esolved, so just |
1033 // create a new promise and reject it. | 137 // create a new promise and reject it. |
1034 return ScriptPromise::rejectWithDOMException( | 138 return ScriptPromise::rejectWithDOMException( |
1035 scriptState, | 139 scriptState, |
1036 DOMException::create(InvalidStateError, | 140 DOMException::create(InvalidStateError, |
1037 "Cannot close a context that is being closed or has already been
closed.")); | 141 "Cannot close a context that is being closed or has already been
closed.")); |
1038 } | 142 } |
1039 | 143 |
1040 m_closeResolver = ScriptPromiseResolver::create(scriptState); | 144 m_closeResolver = ScriptPromiseResolver::create(scriptState); |
1041 ScriptPromise promise = m_closeResolver->promise(); | 145 ScriptPromise promise = m_closeResolver->promise(); |
1042 | 146 |
1043 // Stop the audio context. This will stop the destination node from pulling
audio anymore. And | 147 // Stop the audio context. This will stop the destination node from pulling
audio anymore. And |
1044 // since we have disconnected the destination from the audio graph, and thus
has no references, | 148 // since we have disconnected the destination from the audio graph, and thus
has no references, |
1045 // the destination node can GCed if JS has no references. stop() will also r
esolve the Promise | 149 // the destination node can GCed if JS has no references. stop() will also r
esolve the Promise |
1046 // created here. | 150 // created here. |
1047 stop(); | 151 stop(); |
1048 | 152 |
1049 return promise; | 153 return promise; |
1050 } | 154 } |
1051 | 155 |
| 156 void AudioContext::didClose() |
| 157 { |
| 158 // This is specific to AudioContexts. OfflineAudioContexts |
| 159 // are closed in their completion event. |
| 160 setContextState(Closed); |
| 161 |
| 162 ASSERT(s_hardwareContextCount); |
| 163 --s_hardwareContextCount; |
| 164 |
| 165 if (m_closeResolver) |
| 166 m_closeResolver->resolve(); |
| 167 } |
| 168 |
| 169 bool AudioContext::isContextClosed() const |
| 170 { |
| 171 return m_closeResolver || AbstractAudioContext::isContextClosed(); |
| 172 } |
| 173 |
| 174 void AudioContext::stopRendering() |
| 175 { |
| 176 ASSERT(isMainThread()); |
| 177 ASSERT(destination()); |
| 178 |
| 179 if (contextState() == Running) { |
| 180 destination()->audioDestinationHandler().stopRendering(); |
| 181 setContextState(Suspended); |
| 182 deferredTaskHandler().clearHandlersToBeDeleted(); |
| 183 } |
| 184 } |
| 185 |
1052 } // namespace blink | 186 } // namespace blink |
1053 | 187 |
1054 #endif // ENABLE(WEB_AUDIO) | 188 #endif // ENABLE(WEB_AUDIO) |
OLD | NEW |