OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 12 matching lines...) Expand all Loading... |
23 */ | 23 */ |
24 | 24 |
25 #include "config.h" | 25 #include "config.h" |
26 | 26 |
27 #if ENABLE(WEB_AUDIO) | 27 #if ENABLE(WEB_AUDIO) |
28 | 28 |
29 #include "modules/webaudio/AudioContext.h" | 29 #include "modules/webaudio/AudioContext.h" |
30 | 30 |
31 #include "bindings/core/v8/ExceptionMessages.h" | 31 #include "bindings/core/v8/ExceptionMessages.h" |
32 #include "bindings/core/v8/ExceptionState.h" | 32 #include "bindings/core/v8/ExceptionState.h" |
33 #include "bindings/core/v8/ScriptState.h" | |
34 #include "core/dom/DOMException.h" | |
35 #include "core/dom/Document.h" | 33 #include "core/dom/Document.h" |
36 #include "core/dom/ExceptionCode.h" | 34 #include "core/dom/ExceptionCode.h" |
37 #include "core/html/HTMLMediaElement.h" | 35 #include "core/html/HTMLMediaElement.h" |
38 #include "core/inspector/ScriptCallStack.h" | 36 #include "core/inspector/ScriptCallStack.h" |
39 #include "platform/audio/FFTFrame.h" | 37 #include "platform/audio/FFTFrame.h" |
40 #include "platform/audio/HRTFPanner.h" | 38 #include "platform/audio/HRTFPanner.h" |
41 #include "modules/mediastream/MediaStream.h" | 39 #include "modules/mediastream/MediaStream.h" |
42 #include "modules/webaudio/AnalyserNode.h" | 40 #include "modules/webaudio/AnalyserNode.h" |
43 #include "modules/webaudio/AudioBuffer.h" | 41 #include "modules/webaudio/AudioBuffer.h" |
44 #include "modules/webaudio/AudioBufferCallback.h" | 42 #include "modules/webaudio/AudioBufferCallback.h" |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 return audioContext; | 94 return audioContext; |
97 } | 95 } |
98 | 96 |
99 // Constructor for rendering to the audio hardware. | 97 // Constructor for rendering to the audio hardware. |
100 AudioContext::AudioContext(Document* document) | 98 AudioContext::AudioContext(Document* document) |
101 : ActiveDOMObject(document) | 99 : ActiveDOMObject(document) |
102 , m_isStopScheduled(false) | 100 , m_isStopScheduled(false) |
103 , m_isCleared(false) | 101 , m_isCleared(false) |
104 , m_isInitialized(false) | 102 , m_isInitialized(false) |
105 , m_destinationNode(nullptr) | 103 , m_destinationNode(nullptr) |
106 , m_isResolvingResumePromises(false) | |
107 , m_automaticPullNodesNeedUpdating(false) | 104 , m_automaticPullNodesNeedUpdating(false) |
108 , m_connectionCount(0) | 105 , m_connectionCount(0) |
109 , m_audioThread(0) | 106 , m_audioThread(0) |
110 , m_isOfflineContext(false) | 107 , m_isOfflineContext(false) |
111 , m_contextState(Paused) | |
112 { | 108 { |
113 m_destinationNode = DefaultAudioDestinationNode::create(this); | 109 m_destinationNode = DefaultAudioDestinationNode::create(this); |
114 | 110 |
115 initialize(); | 111 initialize(); |
116 #if DEBUG_AUDIONODE_REFERENCES | 112 #if DEBUG_AUDIONODE_REFERENCES |
117 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext
::s_hardwareContextCount); | 113 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext
::s_hardwareContextCount); |
118 #endif | 114 #endif |
119 } | 115 } |
120 | 116 |
121 // Constructor for offline (non-realtime) rendering. | 117 // Constructor for offline (non-realtime) rendering. |
122 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
numberOfFrames, float sampleRate) | 118 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
numberOfFrames, float sampleRate) |
123 : ActiveDOMObject(document) | 119 : ActiveDOMObject(document) |
124 , m_isStopScheduled(false) | 120 , m_isStopScheduled(false) |
125 , m_isCleared(false) | 121 , m_isCleared(false) |
126 , m_isInitialized(false) | 122 , m_isInitialized(false) |
127 , m_destinationNode(nullptr) | 123 , m_destinationNode(nullptr) |
128 , m_isResolvingResumePromises(false) | |
129 , m_automaticPullNodesNeedUpdating(false) | 124 , m_automaticPullNodesNeedUpdating(false) |
130 , m_connectionCount(0) | 125 , m_connectionCount(0) |
131 , m_audioThread(0) | 126 , m_audioThread(0) |
132 , m_isOfflineContext(true) | 127 , m_isOfflineContext(true) |
133 , m_contextState(Paused) | |
134 { | 128 { |
135 // Create a new destination for offline rendering. | 129 // Create a new destination for offline rendering. |
136 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl
eRate); | 130 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl
eRate); |
137 if (m_renderTarget.get()) | 131 if (m_renderTarget.get()) |
138 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa
rget.get()); | 132 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa
rget.get()); |
139 | 133 |
140 initialize(); | 134 initialize(); |
141 } | 135 } |
142 | 136 |
143 AudioContext::~AudioContext() | 137 AudioContext::~AudioContext() |
144 { | 138 { |
145 #if DEBUG_AUDIONODE_REFERENCES | 139 #if DEBUG_AUDIONODE_REFERENCES |
146 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this); | 140 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this); |
147 #endif | 141 #endif |
148 // AudioNodes keep a reference to their context, so there should be no way t
o be in the destructor if there are still AudioNodes around. | 142 // AudioNodes keep a reference to their context, so there should be no way t
o be in the destructor if there are still AudioNodes around. |
149 ASSERT(!m_isInitialized); | 143 ASSERT(!m_isInitialized); |
150 ASSERT(!m_referencedNodes.size()); | 144 ASSERT(!m_referencedNodes.size()); |
151 ASSERT(!m_finishedNodes.size()); | 145 ASSERT(!m_finishedNodes.size()); |
152 ASSERT(!m_automaticPullNodes.size()); | 146 ASSERT(!m_automaticPullNodes.size()); |
153 if (m_automaticPullNodesNeedUpdating) | 147 if (m_automaticPullNodesNeedUpdating) |
154 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); | 148 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); |
155 ASSERT(!m_renderingAutomaticPullNodes.size()); | 149 ASSERT(!m_renderingAutomaticPullNodes.size()); |
156 ASSERT(!m_resumePromises.size()); | |
157 } | 150 } |
158 | 151 |
159 void AudioContext::initialize() | 152 void AudioContext::initialize() |
160 { | 153 { |
161 if (isInitialized()) | 154 if (isInitialized()) |
162 return; | 155 return; |
163 | 156 |
164 FFTFrame::initialize(); | 157 FFTFrame::initialize(); |
165 m_listener = AudioListener::create(); | 158 m_listener = AudioListener::create(); |
166 | 159 |
167 if (m_destinationNode.get()) { | 160 if (m_destinationNode.get()) { |
168 m_destinationNode->initialize(); | 161 m_destinationNode->initialize(); |
169 | 162 |
170 if (!isOfflineContext()) { | 163 if (!isOfflineContext()) { |
171 // This starts the audio thread. The destination node's provideInput
() method will now be called repeatedly to render audio. | 164 // This starts the audio thread. The destination node's provideInput
() method will now be called repeatedly to render audio. |
172 // Each time provideInput() is called, a portion of the audio stream
is rendered. Let's call this time period a "render quantum". | 165 // Each time provideInput() is called, a portion of the audio stream
is rendered. Let's call this time period a "render quantum". |
173 // NOTE: for now default AudioContext does not need an explicit star
tRendering() call from JavaScript. | 166 // NOTE: for now default AudioContext does not need an explicit star
tRendering() call from JavaScript. |
174 // We may want to consider requiring it for symmetry with OfflineAud
ioContext. | 167 // We may want to consider requiring it for symmetry with OfflineAud
ioContext. |
175 startRendering(); | 168 m_destinationNode->startRendering(); |
176 ++s_hardwareContextCount; | 169 ++s_hardwareContextCount; |
177 } | 170 } |
178 | 171 |
179 m_isInitialized = true; | 172 m_isInitialized = true; |
180 } | 173 } |
181 } | 174 } |
182 | 175 |
183 void AudioContext::clear() | 176 void AudioContext::clear() |
184 { | 177 { |
185 // We need to run disposers before destructing m_contextGraphMutex. | 178 // We need to run disposers before destructing m_contextGraphMutex. |
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
531 exceptionState.throwDOMException( | 524 exceptionState.throwDOMException( |
532 IndexSizeError, | 525 IndexSizeError, |
533 "length of imaginary array (" + String::number(imag->length()) | 526 "length of imaginary array (" + String::number(imag->length()) |
534 + ") exceeds allowed maximum of 4096"); | 527 + ") exceeds allowed maximum of 4096"); |
535 return 0; | 528 return 0; |
536 } | 529 } |
537 | 530 |
538 return PeriodicWave::create(sampleRate(), real, imag); | 531 return PeriodicWave::create(sampleRate(), real, imag); |
539 } | 532 } |
540 | 533 |
541 String AudioContext::state() const | |
542 { | |
543 switch (m_contextState) { | |
544 case Paused: | |
545 return "paused"; | |
546 case Running: | |
547 return "running"; | |
548 case Released: | |
549 return "released"; | |
550 } | |
551 ASSERT_NOT_REACHED(); | |
552 return ""; | |
553 } | |
554 | |
555 void AudioContext::setContextState(AudioContextState newState) | |
556 { | |
557 // Validate the transitions | |
558 switch (newState) { | |
559 case Paused: | |
560 ASSERT(m_contextState == Running); | |
561 break; | |
562 case Running: | |
563 ASSERT(m_contextState == Paused); | |
564 break; | |
565 case Released: | |
566 ASSERT(m_contextState != Released); | |
567 break; | |
568 } | |
569 | |
570 m_contextState = newState; | |
571 } | |
572 | |
573 void AudioContext::suspendContext(ExceptionState& exceptionState) | |
574 { | |
575 ASSERT(isMainThread()); | |
576 AutoLocker locker(this); | |
577 | |
578 if (m_contextState == Released) { | |
579 exceptionState.throwDOMException( | |
580 InvalidStateError, | |
581 "cannot suspend an AudioContext that has been released"); | |
582 return; | |
583 } | |
584 | |
585 if (m_destinationNode && !isOfflineContext()) { | |
586 stopRendering(); | |
587 } | |
588 } | |
589 | |
590 ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) | |
591 { | |
592 ASSERT(isMainThread()); | |
593 AutoLocker locker(this); | |
594 | |
595 RefPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scrip
tState); | |
596 | |
597 ScriptPromise promise = resolver->promise(); | |
598 | |
599 if (isOfflineContext()) { | |
600 // For offline context, resolve now, but reject if the context has been
released. | |
601 if (m_contextState == Released) { | |
602 resolver->reject( | |
603 DOMException::create(InvalidStateError, "Cannot resume a context
that has been released")); | |
604 } else { | |
605 resolver->resolve(); | |
606 } | |
607 } else { | |
608 // Restart the destination node to pull on the audio graph. | |
609 if (m_destinationNode) { | |
610 startRendering(); | |
611 } | |
612 | |
613 // Save the promise which will get resolved when the destination node st
arts pulling on the | |
614 // graph again. | |
615 m_resumePromises.append(resolver); | |
616 } | |
617 | |
618 return promise; | |
619 } | |
620 | |
621 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) | 534 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
622 { | 535 { |
623 ASSERT(isAudioThread()); | 536 ASSERT(isAudioThread()); |
624 m_finishedNodes.append(node); | 537 m_finishedNodes.append(node); |
625 } | 538 } |
626 | 539 |
627 void AudioContext::derefFinishedSourceNodes() | 540 void AudioContext::derefFinishedSourceNodes() |
628 { | 541 { |
629 ASSERT(isGraphOwner()); | 542 ASSERT(isGraphOwner()); |
630 ASSERT(isAudioThread()); | 543 ASSERT(isAudioThread()); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
717 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u
p the changes. | 630 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u
p the changes. |
718 if (tryLock()) { | 631 if (tryLock()) { |
719 // Update the channel count mode. | 632 // Update the channel count mode. |
720 updateChangedChannelCountMode(); | 633 updateChangedChannelCountMode(); |
721 | 634 |
722 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu
ts. | 635 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu
ts. |
723 handleDirtyAudioSummingJunctions(); | 636 handleDirtyAudioSummingJunctions(); |
724 handleDirtyAudioNodeOutputs(); | 637 handleDirtyAudioNodeOutputs(); |
725 | 638 |
726 updateAutomaticPullNodes(); | 639 updateAutomaticPullNodes(); |
727 resolvePromisesForResume(); | |
728 | |
729 unlock(); | 640 unlock(); |
730 } | 641 } |
731 } | 642 } |
732 | 643 |
733 void AudioContext::handlePostRenderTasks() | 644 void AudioContext::handlePostRenderTasks() |
734 { | 645 { |
735 ASSERT(isAudioThread()); | 646 ASSERT(isAudioThread()); |
736 | 647 |
737 // Must use a tryLock() here too. Don't worry, the lock will very rarely be
contended and this method is called frequently. | 648 // Must use a tryLock() here too. Don't worry, the lock will very rarely be
contended and this method is called frequently. |
738 // The worst that can happen is that there will be some nodes which will tak
e slightly longer than usual to be deleted or removed | 649 // The worst that can happen is that there will be some nodes which will tak
e slightly longer than usual to be deleted or removed |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
884 } | 795 } |
885 | 796 |
886 void AudioContext::processAutomaticPullNodes(size_t framesToProcess) | 797 void AudioContext::processAutomaticPullNodes(size_t framesToProcess) |
887 { | 798 { |
888 ASSERT(isAudioThread()); | 799 ASSERT(isAudioThread()); |
889 | 800 |
890 for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i) | 801 for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i) |
891 m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess); | 802 m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess); |
892 } | 803 } |
893 | 804 |
894 void AudioContext::resolvePromisesForResumeOnMainThread() | |
895 { | |
896 ASSERT(isMainThread()); | |
897 AutoLocker locker(this); | |
898 | |
899 for (unsigned k = 0; k < m_resumePromises.size(); ++k) { | |
900 if (m_contextState == Released) { | |
901 m_resumePromises[k]->reject( | |
902 DOMException::create(InvalidStateError, "Cannot resume a context
that has been released")); | |
903 } else { | |
904 m_resumePromises[k]->resolve(); | |
905 } | |
906 } | |
907 | |
908 m_resumePromises.clear(); | |
909 m_isResolvingResumePromises = false; | |
910 } | |
911 | |
912 void AudioContext::resolvePromisesForResume() | |
913 { | |
914 // This runs inside the AudioContext's lock when handling pre-render tasks. | |
915 ASSERT(isAudioThread()); | |
916 ASSERT(isGraphOwner()); | |
917 | |
918 // Resolve any pending promises created by resume(). Only do this we if have
n't already started | |
919 // resolving these promises. This gets called very often and it takes some t
ime to resolve the | |
920 // promises in the main thread. | |
921 if (!m_isResolvingResumePromises && m_resumePromises.size() > 0) { | |
922 m_isResolvingResumePromises = true; | |
923 callOnMainThread(bind(&AudioContext::resolvePromisesForResumeOnMainThrea
d, this)); | |
924 } | |
925 } | |
926 | |
927 const AtomicString& AudioContext::interfaceName() const | 805 const AtomicString& AudioContext::interfaceName() const |
928 { | 806 { |
929 return EventTargetNames::AudioContext; | 807 return EventTargetNames::AudioContext; |
930 } | 808 } |
931 | 809 |
932 ExecutionContext* AudioContext::executionContext() const | 810 ExecutionContext* AudioContext::executionContext() const |
933 { | 811 { |
934 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext(); | 812 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext(); |
935 } | 813 } |
936 | 814 |
937 void AudioContext::startRendering() | 815 void AudioContext::startRendering() |
938 { | 816 { |
939 // This is called for both online and offline contexts. | 817 destination()->startRendering(); |
940 ASSERT(isMainThread()); | |
941 ASSERT(m_destinationNode); | |
942 | |
943 if (m_contextState == Paused) { | |
944 destination()->startRendering(); | |
945 setContextState(Running); | |
946 } | |
947 } | |
948 | |
949 void AudioContext::stopRendering() | |
950 { | |
951 ASSERT(isMainThread()); | |
952 ASSERT(m_destinationNode); | |
953 ASSERT(!isOfflineContext()); | |
954 | |
955 if (m_contextState == Running) { | |
956 destination()->stopRendering(); | |
957 setContextState(Paused); | |
958 } | |
959 } | 818 } |
960 | 819 |
961 void AudioContext::fireCompletionEvent() | 820 void AudioContext::fireCompletionEvent() |
962 { | 821 { |
963 ASSERT(isMainThread()); | 822 ASSERT(isMainThread()); |
964 if (!isMainThread()) | 823 if (!isMainThread()) |
965 return; | 824 return; |
966 | 825 |
967 AudioBuffer* renderedBuffer = m_renderTarget.get(); | 826 AudioBuffer* renderedBuffer = m_renderTarget.get(); |
968 | 827 |
969 setContextState(Released); | |
970 | |
971 ASSERT(renderedBuffer); | 828 ASSERT(renderedBuffer); |
972 if (!renderedBuffer) | 829 if (!renderedBuffer) |
973 return; | 830 return; |
974 | 831 |
975 // Avoid firing the event if the document has already gone away. | 832 // Avoid firing the event if the document has already gone away. |
976 if (executionContext()) { | 833 if (executionContext()) { |
977 // Call the offline rendering completion event listener. | 834 // Call the offline rendering completion event listener. |
978 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); | 835 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); |
979 } | 836 } |
980 } | 837 } |
(...skipping 29 matching lines...) Expand all Loading... |
1010 | 867 |
1011 for (HashSet<AudioNode*>::iterator k = m_deferredCountModeChange.begin(); k
!= m_deferredCountModeChange.end(); ++k) | 868 for (HashSet<AudioNode*>::iterator k = m_deferredCountModeChange.begin(); k
!= m_deferredCountModeChange.end(); ++k) |
1012 (*k)->updateChannelCountMode(); | 869 (*k)->updateChannelCountMode(); |
1013 | 870 |
1014 m_deferredCountModeChange.clear(); | 871 m_deferredCountModeChange.clear(); |
1015 } | 872 } |
1016 | 873 |
1017 } // namespace blink | 874 } // namespace blink |
1018 | 875 |
1019 #endif // ENABLE(WEB_AUDIO) | 876 #endif // ENABLE(WEB_AUDIO) |
OLD | NEW |