Index: Source/modules/webaudio/AudioContext.h |
diff --git a/Source/modules/webaudio/AudioContext.h b/Source/modules/webaudio/AudioContext.h |
index 1e54668a91992c2f8d2b5ac42efa20c049dd3414..9616915f4f3219a1d77b750f6f8ebd1fbff1c05b 100644 |
--- a/Source/modules/webaudio/AudioContext.h |
+++ b/Source/modules/webaudio/AudioContext.h |
@@ -25,6 +25,8 @@ |
#ifndef AudioContext_h |
#define AudioContext_h |
+#include "bindings/core/v8/ScriptPromise.h" |
+#include "bindings/core/v8/ScriptPromiseResolver.h" |
#include "core/dom/ActiveDOMObject.h" |
#include "core/dom/DOMTypedArray.h" |
#include "core/events/EventListener.h" |
@@ -78,6 +80,16 @@ class AudioContext : public RefCountedGarbageCollectedWillBeGarbageCollectedFina |
DEFINE_WRAPPERTYPEINFO(); |
WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioContext); |
public: |
+ // The state of an audio context. On creation, the state is Suspended. The state is Running if |
+ // audio is being processed (audio graph is being pulled for data). The state is Closed if the |
+ // audio context has been closed. The valid transitions are from Suspended to either Running or |
+ // Closed; Running to Suspended or Closed. Once Closed, there are no valid transitions. |
+ enum AudioContextState { |
+ Suspended, |
+ Running, |
+ Closed |
+ }; |
+ |
// Create an AudioContext for rendering to the audio hardware. |
static AudioContext* create(Document&, ExceptionState&); |
@@ -96,6 +108,7 @@ public: |
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); } |
double currentTime() const { return m_destinationNode->currentTime(); } |
float sampleRate() const { return m_destinationNode->sampleRate(); } |
+ String state() const; |
AudioBuffer* createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); |
@@ -129,6 +142,10 @@ public: |
OscillatorNode* createOscillator(); |
PeriodicWave* createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, ExceptionState&); |
+ // Suspend/Resume |
+ ScriptPromise suspendContext(ScriptState*); |
+ ScriptPromise resumeContext(ScriptState*); |
+ |
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it. |
void notifyNodeFinishedProcessing(AudioNode*); |
@@ -275,6 +292,27 @@ private: |
// AudioNode::breakConnection() when we remove an AudioNode from this. |
HeapVector<Member<AudioNode> > m_referencedNodes; |
+ // Stop rendering the audio graph. |
+ void stopRendering(); |
+ |
+ // Handle Promises for resume() and suspend() |
+ void resolvePromisesForResume(); |
+ void resolvePromisesForResumeOnMainThread(); |
+ |
+ void resolvePromisesForSuspend(); |
+ void resolvePromisesForSuspendOnMainThread(); |
+ |
+ // Vector of promises created by resume(). It takes time to handle them, so we collect all of |
+ // the promises here until they can be resolved or rejected. |
+ Vector<RefPtr<ScriptPromiseResolver> > m_resumePromises; |
+ // Like m_resumePromises but for suspend(). |
+ Vector<RefPtr<ScriptPromiseResolver> > m_suspendPromises; |
+ |
+ // True if we're in the process of resolving promises for resume(). Resolving can take some |
+ // time and the audio context process loop is very fast, so we don't want to call resolve an |
+ // excessive number of times. |
+ bool m_isResolvingResumePromises; |
+ |
class AudioNodeDisposer { |
public: |
explicit AudioNodeDisposer(AudioNode& node) : m_node(node) { } |
@@ -339,6 +377,9 @@ private: |
bool m_isOfflineContext; |
+ AudioContextState m_contextState; |
+ void setContextState(AudioContextState); |
+ |
AsyncAudioDecoder m_audioDecoder; |
// Collection of nodes where the channel count mode has changed. We want the channel count mode |