Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(63)

Side by Side Diff: third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp

Issue 2103043007: Rename AbstractAudioContext to BaseAudioContext (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Use ASSERT(isGraphOwner()) instead of DCHECK Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN Y
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN Y
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O N
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include "modules/webaudio/AbstractAudioContext.h"
26
27 #include "bindings/core/v8/Dictionary.h"
28 #include "bindings/core/v8/ExceptionMessages.h"
29 #include "bindings/core/v8/ExceptionState.h"
30 #include "bindings/core/v8/ScriptPromiseResolver.h"
31 #include "bindings/core/v8/ScriptState.h"
32 #include "core/dom/DOMException.h"
33 #include "core/dom/Document.h"
34 #include "core/dom/ExceptionCode.h"
35 #include "core/dom/ExecutionContextTask.h"
36 #include "core/frame/Settings.h"
37 #include "core/html/HTMLMediaElement.h"
38 #include "modules/mediastream/MediaStream.h"
39 #include "modules/webaudio/AnalyserNode.h"
40 #include "modules/webaudio/AudioBuffer.h"
41 #include "modules/webaudio/AudioBufferCallback.h"
42 #include "modules/webaudio/AudioBufferSourceNode.h"
43 #include "modules/webaudio/AudioContext.h"
44 #include "modules/webaudio/AudioListener.h"
45 #include "modules/webaudio/AudioNodeInput.h"
46 #include "modules/webaudio/AudioNodeOutput.h"
47 #include "modules/webaudio/BiquadFilterNode.h"
48 #include "modules/webaudio/ChannelMergerNode.h"
49 #include "modules/webaudio/ChannelSplitterNode.h"
50 #include "modules/webaudio/ConvolverNode.h"
51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
52 #include "modules/webaudio/DelayNode.h"
53 #include "modules/webaudio/DynamicsCompressorNode.h"
54 #include "modules/webaudio/GainNode.h"
55 #include "modules/webaudio/IIRFilterNode.h"
56 #include "modules/webaudio/MediaElementAudioSourceNode.h"
57 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
58 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
59 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
60 #include "modules/webaudio/OfflineAudioContext.h"
61 #include "modules/webaudio/OfflineAudioDestinationNode.h"
62 #include "modules/webaudio/OscillatorNode.h"
63 #include "modules/webaudio/PannerNode.h"
64 #include "modules/webaudio/PeriodicWave.h"
65 #include "modules/webaudio/PeriodicWaveConstraints.h"
66 #include "modules/webaudio/ScriptProcessorNode.h"
67 #include "modules/webaudio/StereoPannerNode.h"
68 #include "modules/webaudio/WaveShaperNode.h"
69 #include "platform/CrossThreadFunctional.h"
70 #include "platform/Histogram.h"
71 #include "platform/UserGestureIndicator.h"
72 #include "platform/audio/IIRFilter.h"
73 #include "public/platform/Platform.h"
74 #include "wtf/text/WTFString.h"
75
76 namespace blink {
77
78 namespace {
79
80 enum UserGestureRecord {
81 UserGestureRequiredAndAvailable = 0,
82 UserGestureRequiredAndNotAvailable,
83 UserGestureNotRequiredAndAvailable,
84 UserGestureNotRequiredAndNotAvailable,
85 UserGestureRecordMax
86 };
87
88 } // anonymous namespace
89
90 AbstractAudioContext* AbstractAudioContext::create(Document& document, Exception State& exceptionState)
91 {
92 return AudioContext::create(document, exceptionState);
93 }
94
95 // FIXME(dominicc): Devolve these constructors to AudioContext
96 // and OfflineAudioContext respectively.
97
98 // Constructor for rendering to the audio hardware.
99 AbstractAudioContext::AbstractAudioContext(Document* document)
100 : ActiveScriptWrappable(this)
101 , ActiveDOMObject(document)
102 , m_destinationNode(nullptr)
103 , m_isCleared(false)
104 , m_isResolvingResumePromises(false)
105 , m_userGestureRequired(false)
106 , m_connectionCount(0)
107 , m_deferredTaskHandler(DeferredTaskHandler::create())
108 , m_contextState(Suspended)
109 , m_closedContextSampleRate(-1)
110 , m_periodicWaveSine(nullptr)
111 , m_periodicWaveSquare(nullptr)
112 , m_periodicWaveSawtooth(nullptr)
113 , m_periodicWaveTriangle(nullptr)
114 {
115 // TODO(mlamouri): we might want to use other ways of checking for this but
116 // in order to record metrics, re-using the HTMLMediaElement setting is
117 // probably the simplest solution.
118 if (document->settings() && document->settings()->mediaPlaybackRequiresUserG esture())
119 m_userGestureRequired = true;
120
121 m_destinationNode = DefaultAudioDestinationNode::create(this);
122
123 initialize();
124 }
125
126 // Constructor for offline (non-realtime) rendering.
127 AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOf Channels, size_t numberOfFrames, float sampleRate)
128 : ActiveScriptWrappable(this)
129 , ActiveDOMObject(document)
130 , m_destinationNode(nullptr)
131 , m_isCleared(false)
132 , m_isResolvingResumePromises(false)
133 , m_userGestureRequired(false)
134 , m_connectionCount(0)
135 , m_deferredTaskHandler(DeferredTaskHandler::create())
136 , m_contextState(Suspended)
137 , m_closedContextSampleRate(-1)
138 , m_periodicWaveSine(nullptr)
139 , m_periodicWaveSquare(nullptr)
140 , m_periodicWaveSawtooth(nullptr)
141 , m_periodicWaveTriangle(nullptr)
142 {
143 // TODO(mlamouri): we might want to use other ways of checking for this but
144 // in order to record metrics, re-using the HTMLMediaElement setting is
145 // probably the simplest solution.
146 if (document->settings() && document->settings()->mediaPlaybackRequiresUserG esture())
147 m_userGestureRequired = true;
148 }
149
150 AbstractAudioContext::~AbstractAudioContext()
151 {
152 deferredTaskHandler().contextWillBeDestroyed();
153 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around.
154 ASSERT(!isDestinationInitialized());
155 ASSERT(!m_activeSourceNodes.size());
156 ASSERT(!m_finishedSourceHandlers.size());
157 ASSERT(!m_isResolvingResumePromises);
158 ASSERT(!m_resumeResolvers.size());
159 }
160
161 void AbstractAudioContext::initialize()
162 {
163 if (isDestinationInitialized())
164 return;
165
166 FFTFrame::initialize();
167
168 if (m_destinationNode) {
169 m_destinationNode->handler().initialize();
170 // The AudioParams in the listener need access to the destination node, so only create the
171 // listener if the destination node exists.
172 m_listener = AudioListener::create(*this);
173 }
174 }
175
176 void AbstractAudioContext::clear()
177 {
178 m_destinationNode.clear();
179 // The audio rendering thread is dead. Nobody will schedule AudioHandler
180 // deletion. Let's do it ourselves.
181 deferredTaskHandler().clearHandlersToBeDeleted();
182 m_isCleared = true;
183 }
184
185 void AbstractAudioContext::uninitialize()
186 {
187 ASSERT(isMainThread());
188
189 if (!isDestinationInitialized())
190 return;
191
192 // This stops the audio thread and all audio rendering.
193 if (m_destinationNode)
194 m_destinationNode->handler().uninitialize();
195
196 // Get rid of the sources which may still be playing.
197 releaseActiveSourceNodes();
198
199 // Reject any pending resolvers before we go away.
200 rejectPendingResolvers();
201 didClose();
202
203 ASSERT(m_listener);
204 m_listener->waitForHRTFDatabaseLoaderThreadCompletion();
205
206 clear();
207 }
208
209 void AbstractAudioContext::stop()
210 {
211 uninitialize();
212 }
213
214 bool AbstractAudioContext::hasPendingActivity() const
215 {
216 // There's no pending activity if the audio context has been cleared.
217 return !m_isCleared;
218 }
219
220 AudioDestinationNode* AbstractAudioContext::destination() const
221 {
222 // Cannot be called from the audio thread because this method touches object s managed by Oilpan,
223 // and the audio thread is not managed by Oilpan.
224 ASSERT(!isAudioThread());
225 return m_destinationNode;
226 }
227
228 void AbstractAudioContext::throwExceptionForClosedState(ExceptionState& exceptio nState)
229 {
230 exceptionState.throwDOMException(InvalidStateError, "AudioContext has been c losed.");
231 }
232
233 AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_ t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
234 {
235 // It's ok to call createBuffer, even if the context is closed because the A udioBuffer doesn't
236 // really "belong" to any particular context.
237
238 AudioBuffer* buffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
239
240 if (buffer) {
241 // Only record the data if the creation succeeded.
242 DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferChannelsHistogram,
243 ("WebAudio.AudioBuffer.NumberOfChannels"));
244
245 // Arbitrarly limit the maximum length to 1 million frames (about 20 sec
246 // at 48kHz). The number of buckets is fairly arbitrary.
247 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferLengthHistogram,
248 ("WebAudio.AudioBuffer.Length", 1, 1000000, 50));
249 // The limits are the min and max AudioBuffer sample rates currently
250 // supported. We use explicit values here instead of
251 // AudioUtilities::minAudioBufferSampleRate() and
252 // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
253 // fairly arbitrary.
254 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferSampleRateHistogram ,
255 ("WebAudio.AudioBuffer.SampleRate", 3000, 192000, 60));
256
257 audioBufferChannelsHistogram.sample(numberOfChannels);
258 audioBufferLengthHistogram.count(numberOfFrames);
259 audioBufferSampleRateHistogram.count(sampleRate);
260
261 // Compute the ratio of the buffer rate and the context rate so we know
262 // how often the buffer needs to be resampled to match the context. For
263 // the histogram, we multiply the ratio by 100 and round to the nearest
264 // integer. If the context is closed, don't record this because we
265 // don't have a sample rate for closed context.
266 if (!isContextClosed()) {
267 // The limits are choosen from 100*(3000/192000) = 1.5625 and
268 // 100*(192000/3000) = 6400, where 3000 and 192000 are the current
269 // min and max sample rates possible for an AudioBuffer. The number
270 // of buckets is fairly arbitrary.
271 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferSampleRateRatio Histogram,
272 ("WebAudio.AudioBuffer.SampleRateRatio", 1, 6400, 50));
273 float ratio = 100 * sampleRate / this->sampleRate();
274 audioBufferSampleRateRatioHistogram.count(static_cast<int>(0.5 + rat io));
275 }
276 }
277
278 return buffer;
279 }
280
281 ScriptPromise AbstractAudioContext::decodeAudioData(ScriptState* scriptState, DO MArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallba ck* errorCallback, ExceptionState& exceptionState)
282 {
283 ASSERT(isMainThread());
284 ASSERT(audioData);
285
286 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState) ;
287 ScriptPromise promise = resolver->promise();
288
289 float rate = isContextClosed() ? closedContextSampleRate() : sampleRate();
290
291 ASSERT(rate > 0);
292
293 m_decodeAudioResolvers.add(resolver);
294 m_audioDecoder.decodeAsync(audioData, rate, successCallback, errorCallback, resolver, this);
295
296 return promise;
297 }
298
299 void AbstractAudioContext::handleDecodeAudioData(AudioBuffer* audioBuffer, Scrip tPromiseResolver* resolver, AudioBufferCallback* successCallback, AudioBufferCal lback* errorCallback)
300 {
301 ASSERT(isMainThread());
302
303 if (audioBuffer) {
304 // Resolve promise successfully and run the success callback
305 resolver->resolve(audioBuffer);
306 if (successCallback)
307 successCallback->handleEvent(audioBuffer);
308 } else {
309 // Reject the promise and run the error callback
310 DOMException* error = DOMException::create(EncodingError, "Unable to dec ode audio data");
311 resolver->reject(error);
312 if (errorCallback)
313 errorCallback->handleEvent(error);
314 }
315
316 // We've resolved the promise. Remove it now.
317 ASSERT(m_decodeAudioResolvers.contains(resolver));
318 m_decodeAudioResolvers.remove(resolver);
319 }
320
321 AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState& exceptionState)
322 {
323 ASSERT(isMainThread());
324
325 AudioBufferSourceNode* node = AudioBufferSourceNode::create(*this, exception State);
326
327 // Do not add a reference to this source node now. The reference will be add ed when start() is
328 // called.
329
330 return node;
331 }
332
333 MediaElementAudioSourceNode* AbstractAudioContext::createMediaElementSource(HTML MediaElement* mediaElement, ExceptionState& exceptionState)
334 {
335 ASSERT(isMainThread());
336
337 return MediaElementAudioSourceNode::create(*this, *mediaElement, exceptionSt ate);
338 }
339
340 MediaStreamAudioSourceNode* AbstractAudioContext::createMediaStreamSource(MediaS tream* mediaStream, ExceptionState& exceptionState)
341 {
342 ASSERT(isMainThread());
343
344 return MediaStreamAudioSourceNode::create(*this, *mediaStream, exceptionStat e);
345 }
346
347 MediaStreamAudioDestinationNode* AbstractAudioContext::createMediaStreamDestinat ion(ExceptionState& exceptionState)
348 {
349 DCHECK(isMainThread());
350
351 // Set number of output channels to stereo by default.
352 return MediaStreamAudioDestinationNode::create(*this, 2, exceptionState);
353 }
354
355 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(ExceptionState& exceptionState)
356 {
357 DCHECK(isMainThread());
358
359 return ScriptProcessorNode::create(*this, exceptionState);
360 }
361
362 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, ExceptionState& exceptionState)
363 {
364 DCHECK(isMainThread());
365
366 return ScriptProcessorNode::create(*this, bufferSize, exceptionState);
367 }
368
369 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, size_t numberOfInputChannels, ExceptionState& exceptionState)
370 {
371 DCHECK(isMainThread());
372
373 return ScriptProcessorNode::create(*this, bufferSize, numberOfInputChannels, exceptionState);
374 }
375
376 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi ze, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
377 {
378 ASSERT(isMainThread());
379
380 return ScriptProcessorNode::create(
381 *this,
382 bufferSize,
383 numberOfInputChannels,
384 numberOfOutputChannels,
385 exceptionState);
386 }
387
388 StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& excep tionState)
389 {
390 ASSERT(isMainThread());
391
392 return StereoPannerNode::create(*this, exceptionState);
393 }
394
395 BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& excep tionState)
396 {
397 ASSERT(isMainThread());
398
399 return BiquadFilterNode::create(*this, exceptionState);
400 }
401
402 WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exception State)
403 {
404 ASSERT(isMainThread());
405
406 return WaveShaperNode::create(*this, exceptionState);
407 }
408
409 PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState)
410 {
411 ASSERT(isMainThread());
412
413 return PannerNode::create(*this, exceptionState);
414 }
415
416 ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionSt ate)
417 {
418 ASSERT(isMainThread());
419
420 return ConvolverNode::create(*this, exceptionState);
421 }
422
423 DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(Exception State& exceptionState)
424 {
425 ASSERT(isMainThread());
426
427 return DynamicsCompressorNode::create(*this, exceptionState);
428 }
429
430 AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionStat e)
431 {
432 ASSERT(isMainThread());
433
434 return AnalyserNode::create(*this, exceptionState);
435 }
436
437 GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState)
438 {
439 ASSERT(isMainThread());
440
441 return GainNode::create(*this, exceptionState);
442 }
443
444 DelayNode* AbstractAudioContext::createDelay(ExceptionState& exceptionState)
445 {
446 DCHECK(isMainThread());
447
448 return DelayNode::create(*this, exceptionState);
449 }
450
451 DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState & exceptionState)
452 {
453 ASSERT(isMainThread());
454
455 return DelayNode::create(*this, maxDelayTime, exceptionState);
456 }
457
458 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(ExceptionState& exceptionState)
459 {
460 DCHECK(isMainThread());
461
462 return ChannelSplitterNode::create(*this, exceptionState);
463 }
464
465 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOf Outputs, ExceptionState& exceptionState)
466 {
467 ASSERT(isMainThread());
468
469 return ChannelSplitterNode::create(*this, numberOfOutputs, exceptionState);
470 }
471
472 ChannelMergerNode* AbstractAudioContext::createChannelMerger(ExceptionState& exc eptionState)
473 {
474 DCHECK(isMainThread());
475
476 return ChannelMergerNode::create(*this, exceptionState);
477 }
478
479 ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInpu ts, ExceptionState& exceptionState)
480 {
481 ASSERT(isMainThread());
482
483 return ChannelMergerNode::create(*this, numberOfInputs, exceptionState);
484 }
485
486 OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exception State)
487 {
488 ASSERT(isMainThread());
489
490 return OscillatorNode::create(*this, exceptionState);
491 }
492
493 PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO MFloat32Array* imag, ExceptionState& exceptionState)
494 {
495 DCHECK(isMainThread());
496
497 return PeriodicWave::create(*this, real, imag, false, exceptionState);
498 }
499
500 PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO MFloat32Array* imag, const PeriodicWaveConstraints& options, ExceptionState& exc eptionState)
501 {
502 ASSERT(isMainThread());
503
504 bool disable = options.hasDisableNormalization() ? options.disableNormalizat ion() : false;
505
506 return PeriodicWave::create(*this, real, imag, disable, exceptionState);
507 }
508
509 IIRFilterNode* AbstractAudioContext::createIIRFilter(Vector<double> feedforwardC oef, Vector<double> feedbackCoef, ExceptionState& exceptionState)
510 {
511 ASSERT(isMainThread());
512
513 return IIRFilterNode::create(*this, feedforwardCoef, feedbackCoef, exception State);
514 }
515
516 PeriodicWave* AbstractAudioContext::periodicWave(int type)
517 {
518 switch (type) {
519 case OscillatorHandler::SINE:
520 // Initialize the table if necessary
521 if (!m_periodicWaveSine)
522 m_periodicWaveSine = PeriodicWave::createSine(sampleRate());
523 return m_periodicWaveSine;
524 case OscillatorHandler::SQUARE:
525 // Initialize the table if necessary
526 if (!m_periodicWaveSquare)
527 m_periodicWaveSquare = PeriodicWave::createSquare(sampleRate());
528 return m_periodicWaveSquare;
529 case OscillatorHandler::SAWTOOTH:
530 // Initialize the table if necessary
531 if (!m_periodicWaveSawtooth)
532 m_periodicWaveSawtooth = PeriodicWave::createSawtooth(sampleRate());
533 return m_periodicWaveSawtooth;
534 case OscillatorHandler::TRIANGLE:
535 // Initialize the table if necessary
536 if (!m_periodicWaveTriangle)
537 m_periodicWaveTriangle = PeriodicWave::createTriangle(sampleRate());
538 return m_periodicWaveTriangle;
539 default:
540 ASSERT_NOT_REACHED();
541 return nullptr;
542 }
543 }
544
545 void AbstractAudioContext::recordUserGestureState()
546 {
547 DEFINE_STATIC_LOCAL(EnumerationHistogram, userGestureHistogram, ("WebAudio.U serGesture", UserGestureRecordMax));
548
549 if (!m_userGestureRequired) {
550 if (UserGestureIndicator::processingUserGesture())
551 userGestureHistogram.count(UserGestureNotRequiredAndAvailable);
552 else
553 userGestureHistogram.count(UserGestureNotRequiredAndNotAvailable);
554 return;
555 }
556 if (!UserGestureIndicator::processingUserGesture()) {
557 userGestureHistogram.count(UserGestureRequiredAndNotAvailable);
558 return;
559 }
560 userGestureHistogram.count(UserGestureRequiredAndAvailable);
561 m_userGestureRequired = false;
562 }
563
564 String AbstractAudioContext::state() const
565 {
566 // These strings had better match the strings for AudioContextState in Audio Context.idl.
567 switch (m_contextState) {
568 case Suspended:
569 return "suspended";
570 case Running:
571 return "running";
572 case Closed:
573 return "closed";
574 }
575 ASSERT_NOT_REACHED();
576 return "";
577 }
578
579 void AbstractAudioContext::setContextState(AudioContextState newState)
580 {
581 ASSERT(isMainThread());
582
583 // Validate the transitions. The valid transitions are Suspended->Running, Running->Suspended,
584 // and anything->Closed.
585 switch (newState) {
586 case Suspended:
587 ASSERT(m_contextState == Running);
588 break;
589 case Running:
590 ASSERT(m_contextState == Suspended);
591 break;
592 case Closed:
593 ASSERT(m_contextState != Closed);
594 break;
595 }
596
597 if (newState == m_contextState) {
598 // ASSERTs above failed; just return.
599 return;
600 }
601
602 m_contextState = newState;
603
604 // Notify context that state changed
605 if (getExecutionContext())
606 getExecutionContext()->postTask(BLINK_FROM_HERE, createSameThreadTask(&A bstractAudioContext::notifyStateChange, wrapPersistent(this)));
607 }
608
609 void AbstractAudioContext::notifyStateChange()
610 {
611 dispatchEvent(Event::create(EventTypeNames::statechange));
612 }
613
614 void AbstractAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* hand ler)
615 {
616 ASSERT(isAudioThread());
617 m_finishedSourceHandlers.append(handler);
618 }
619
620 void AbstractAudioContext::removeFinishedSourceNodes()
621 {
622 ASSERT(isMainThread());
623 AutoLocker locker(this);
624 // Quadratic worst case, but sizes of both vectors are considered
625 // manageable, especially |m_finishedSourceNodes| is likely to be short.
626 for (AudioNode* node : m_finishedSourceNodes) {
627 size_t i = m_activeSourceNodes.find(node);
628 if (i != kNotFound)
629 m_activeSourceNodes.remove(i);
630 }
631 m_finishedSourceNodes.clear();
632 }
633
634 void AbstractAudioContext::releaseFinishedSourceNodes()
635 {
636 ASSERT(isGraphOwner());
637 ASSERT(isAudioThread());
638 bool didRemove = false;
639 for (AudioHandler* handler : m_finishedSourceHandlers) {
640 for (AudioNode* node : m_activeSourceNodes) {
641 if (m_finishedSourceNodes.contains(node))
642 continue;
643 if (handler == &node->handler()) {
644 handler->breakConnection();
645 m_finishedSourceNodes.add(node);
646 didRemove = true;
647 break;
648 }
649 }
650 }
651 if (didRemove)
652 Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FR OM_HERE, crossThreadBind(&AbstractAudioContext::removeFinishedSourceNodes, wrapC rossThreadPersistent(this)));
653
654 m_finishedSourceHandlers.clear();
655 }
656
657 void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node)
658 {
659 ASSERT(isMainThread());
660 AutoLocker locker(this);
661
662 m_activeSourceNodes.append(node);
663 node->handler().makeConnection();
664 }
665
666 void AbstractAudioContext::releaseActiveSourceNodes()
667 {
668 ASSERT(isMainThread());
669 for (auto& sourceNode : m_activeSourceNodes)
670 sourceNode->handler().breakConnection();
671
672 m_activeSourceNodes.clear();
673 }
674
675 void AbstractAudioContext::handleStoppableSourceNodes()
676 {
677 ASSERT(isGraphOwner());
678
679 // Find AudioBufferSourceNodes to see if we can stop playing them.
680 for (AudioNode* node : m_activeSourceNodes) {
681 // If the AudioNode has been marked as finished and released by
682 // the audio thread, but not yet removed by the main thread
683 // (see releaseActiveSourceNodes() above), |node| must not be
684 // touched as its handler may have been released already.
685 if (m_finishedSourceNodes.contains(node))
686 continue;
687 if (node->handler().getNodeType() == AudioHandler::NodeTypeAudioBufferSo urce) {
688 AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNod e*>(node);
689 sourceNode->audioBufferSourceHandler().handleStoppableSourceNode();
690 }
691 }
692 }
693
694 void AbstractAudioContext::handlePreRenderTasks()
695 {
696 ASSERT(isAudioThread());
697
698 // At the beginning of every render quantum, try to update the internal rend ering graph state (from main thread changes).
699 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u p the changes.
700 if (tryLock()) {
701 deferredTaskHandler().handleDeferredTasks();
702
703 resolvePromisesForResume();
704
705 // Check to see if source nodes can be stopped because the end time has passed.
706 handleStoppableSourceNodes();
707
708 // Update the dirty state of the listener.
709 listener()->updateState();
710
711 unlock();
712 }
713 }
714
715 void AbstractAudioContext::handlePostRenderTasks()
716 {
717 ASSERT(isAudioThread());
718
719 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
720 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed
721 // from the render graph (in which case they'll render silence).
722 if (tryLock()) {
723 // Take care of AudioNode tasks where the tryLock() failed previously.
724 deferredTaskHandler().breakConnections();
725
726 // Dynamically clean up nodes which are no longer needed.
727 releaseFinishedSourceNodes();
728
729 deferredTaskHandler().handleDeferredTasks();
730 deferredTaskHandler().requestToDeleteHandlersOnMainThread();
731
732 unlock();
733 }
734 }
735
736 void AbstractAudioContext::resolvePromisesForResumeOnMainThread()
737 {
738 ASSERT(isMainThread());
739 AutoLocker locker(this);
740
741 for (auto& resolver : m_resumeResolvers) {
742 if (m_contextState == Closed) {
743 resolver->reject(
744 DOMException::create(InvalidStateError, "Cannot resume a context that has been closed"));
745 } else {
746 resolver->resolve();
747 }
748 }
749
750 m_resumeResolvers.clear();
751 m_isResolvingResumePromises = false;
752 }
753
754 void AbstractAudioContext::resolvePromisesForResume()
755 {
756 // This runs inside the AbstractAudioContext's lock when handling pre-render tasks.
757 ASSERT(isAudioThread());
758 ASSERT(isGraphOwner());
759
760 // Resolve any pending promises created by resume(). Only do this if we have n't already started
761 // resolving these promises. This gets called very often and it takes some t ime to resolve the
762 // promises in the main thread.
763 if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) {
764 m_isResolvingResumePromises = true;
765 Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FR OM_HERE, crossThreadBind(&AbstractAudioContext::resolvePromisesForResumeOnMainTh read, wrapCrossThreadPersistent(this)));
766 }
767 }
768
769 void AbstractAudioContext::rejectPendingDecodeAudioDataResolvers()
770 {
771 // Now reject any pending decodeAudioData resolvers
772 for (auto& resolver : m_decodeAudioResolvers)
773 resolver->reject(DOMException::create(InvalidStateError, "Audio context is going away"));
774 m_decodeAudioResolvers.clear();
775 }
776
777 void AbstractAudioContext::rejectPendingResolvers()
778 {
779 ASSERT(isMainThread());
780
781 // Audio context is closing down so reject any resume promises that are stil l pending.
782
783 for (auto& resolver : m_resumeResolvers) {
784 resolver->reject(DOMException::create(InvalidStateError, "Audio context is going away"));
785 }
786 m_resumeResolvers.clear();
787 m_isResolvingResumePromises = false;
788
789 rejectPendingDecodeAudioDataResolvers();
790 }
791
792 const AtomicString& AbstractAudioContext::interfaceName() const
793 {
794 return EventTargetNames::AudioContext;
795 }
796
797 ExecutionContext* AbstractAudioContext::getExecutionContext() const
798 {
799 return ActiveDOMObject::getExecutionContext();
800 }
801
802 void AbstractAudioContext::startRendering()
803 {
804 // This is called for both online and offline contexts.
805 ASSERT(isMainThread());
806 ASSERT(m_destinationNode);
807
808 recordUserGestureState();
809
810 if (m_contextState == Suspended) {
811 destination()->audioDestinationHandler().startRendering();
812 setContextState(Running);
813 }
814 }
815
816 DEFINE_TRACE(AbstractAudioContext)
817 {
818 visitor->trace(m_destinationNode);
819 visitor->trace(m_listener);
820 visitor->trace(m_activeSourceNodes);
821 visitor->trace(m_resumeResolvers);
822 visitor->trace(m_decodeAudioResolvers);
823
824 visitor->trace(m_periodicWaveSine);
825 visitor->trace(m_periodicWaveSquare);
826 visitor->trace(m_periodicWaveSawtooth);
827 visitor->trace(m_periodicWaveTriangle);
828 EventTargetWithInlineData::trace(visitor);
829 ActiveDOMObject::trace(visitor);
830 }
831
832 SecurityOrigin* AbstractAudioContext::getSecurityOrigin() const
833 {
834 if (getExecutionContext())
835 return getExecutionContext()->getSecurityOrigin();
836
837 return nullptr;
838 }
839
840 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698