Index: Source/modules/webaudio/AudioContext.cpp |
diff --git a/Source/modules/webaudio/AudioContext.cpp b/Source/modules/webaudio/AudioContext.cpp |
index 39f6180ad733c492b35a46d1548be2d553d54d1e..be1f19ed13641e0b91237ecb6b965e8027105d3d 100644 |
--- a/Source/modules/webaudio/AudioContext.cpp |
+++ b/Source/modules/webaudio/AudioContext.cpp |
@@ -30,6 +30,8 @@ |
#include "bindings/core/v8/ExceptionMessages.h" |
#include "bindings/core/v8/ExceptionState.h" |
+#include "bindings/core/v8/ScriptState.h" |
+#include "core/dom/DOMException.h" |
#include "core/dom/Document.h" |
#include "core/dom/ExceptionCode.h" |
#include "core/html/HTMLMediaElement.h" |
@@ -99,10 +101,12 @@ AudioContext::AudioContext(Document* document) |
, m_isCleared(false) |
, m_isInitialized(false) |
, m_destinationNode(nullptr) |
+ , m_isResolvingResumePromises(false) |
, m_automaticPullNodesNeedUpdating(false) |
, m_connectionCount(0) |
, m_audioThread(0) |
, m_isOfflineContext(false) |
+ , m_contextState(Suspended) |
{ |
m_destinationNode = DefaultAudioDestinationNode::create(this); |
@@ -119,10 +123,12 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t |
, m_isCleared(false) |
, m_isInitialized(false) |
, m_destinationNode(nullptr) |
+ , m_isResolvingResumePromises(false) |
, m_automaticPullNodesNeedUpdating(false) |
, m_connectionCount(0) |
, m_audioThread(0) |
, m_isOfflineContext(true) |
+ , m_contextState(Suspended) |
{ |
// Create a new destination for offline rendering. |
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); |
@@ -145,6 +151,7 @@ AudioContext::~AudioContext() |
if (m_automaticPullNodesNeedUpdating) |
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); |
ASSERT(!m_renderingAutomaticPullNodes.size()); |
+ ASSERT(!m_resumePromises.size()); |
} |
void AudioContext::initialize() |
@@ -163,7 +170,7 @@ void AudioContext::initialize() |
// Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". |
// NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. |
// We may want to consider requiring it for symmetry with OfflineAudioContext. |
- m_destinationNode->startRendering(); |
+ startRendering(); |
++s_hardwareContextCount; |
} |
@@ -529,6 +536,88 @@ PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32 |
return PeriodicWave::create(sampleRate(), real->view(), imag->view()); |
} |
+String AudioContext::state() const |
+{ |
+ // These strings had better match the strings for AudioContextState in AudioContext.idl. |
+ switch (m_contextState) { |
+ case Suspended: |
+ return "suspended"; |
+ case Running: |
+ return "running"; |
+ case Closed: |
+ return "closed"; |
+ } |
+ ASSERT_NOT_REACHED(); |
+ return ""; |
+} |
+ |
+void AudioContext::setContextState(AudioContextState newState) |
+{ |
+ // Validate the transitions |
+ switch (newState) { |
+ case Suspended: |
+ ASSERT(m_contextState == Running); |
+ break; |
+ case Running: |
+ ASSERT(m_contextState == Suspended); |
+ break; |
+ case Closed: |
+ ASSERT(m_contextState != Closed); |
+ break; |
+ } |
+ |
+ m_contextState = newState; |
+} |
+ |
+ScriptPromise AudioContext::suspendContext(ScriptState* scriptState) |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ RefPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState); |
+ |
+ ScriptPromise promise = resolver->promise(); |
+ |
+ if (isOfflineContext()) { |
+ resolver->reject( |
+ DOMException::create( |
+ InvalidStateError, |
+ "cannot suspend an OfflineAudioContext")); |
+ } else { |
+ // Save the promise which will get resolved at the end of the rendering quantum. |
+ m_suspendPromises.append(resolver); |
+ } |
+ |
+ return promise; |
+} |
+ |
+ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ RefPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState); |
+ |
+ ScriptPromise promise = resolver->promise(); |
+ |
+ if (isOfflineContext()) { |
+ resolver->reject( |
+ DOMException::create( |
+ InvalidStateError, |
+ "cannot resume an OfflineAudioContext")); |
+ } else { |
+ // Restart the destination node to pull on the audio graph. |
+ if (m_destinationNode) |
+ startRendering(); |
+ |
+ // Save the promise which will get resolved when the destination node starts pulling on the |
+ // graph again. |
+ m_resumePromises.append(resolver); |
+ } |
+ |
+ return promise; |
+} |
+ |
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
{ |
ASSERT(isAudioThread()); |
@@ -635,6 +724,8 @@ void AudioContext::handlePreRenderTasks() |
handleDirtyAudioNodeOutputs(); |
updateAutomaticPullNodes(); |
+ resolvePromisesForResume(); |
+ |
unlock(); |
} |
} |
@@ -661,6 +752,8 @@ void AudioContext::handlePostRenderTasks() |
handleDirtyAudioNodeOutputs(); |
updateAutomaticPullNodes(); |
+ resolvePromisesForSuspend(); |
+ |
unlock(); |
} |
} |
@@ -800,6 +893,72 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess) |
m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess); |
} |
+void AudioContext::resolvePromisesForResumeOnMainThread() |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ for (unsigned k = 0; k < m_resumePromises.size(); ++k) { |
+ if (m_contextState == Closed) { |
+ m_resumePromises[k]->reject( |
+ DOMException::create(InvalidStateError, "Cannot resume a context that has been released")); |
+ } else { |
+ m_resumePromises[k]->resolve(); |
+ } |
+ } |
+ |
+ m_resumePromises.clear(); |
+ m_isResolvingResumePromises = false; |
+} |
+ |
+void AudioContext::resolvePromisesForResume() |
+{ |
+ // This runs inside the AudioContext's lock when handling pre-render tasks. |
+ ASSERT(isAudioThread()); |
+ ASSERT(isGraphOwner()); |
+ |
+ // Resolve any pending promises created by resume(). Only do this we if haven't already started |
+ // resolving these promises. This gets called very often and it takes some time to resolve the |
+ // promises in the main thread. |
+ if (!m_isResolvingResumePromises && m_resumePromises.size() > 0) { |
+ m_isResolvingResumePromises = true; |
+ callOnMainThread(bind(&AudioContext::resolvePromisesForResumeOnMainThread, this)); |
+ } |
+} |
+ |
+void AudioContext::resolvePromisesForSuspendOnMainThread() |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ // We can stop rendering now. |
+ if (m_destinationNode) |
+ stopRendering(); |
+ |
+ for (unsigned k = 0; k < m_suspendPromises.size(); ++k) { |
+ if (m_contextState == Closed) { |
+ m_suspendPromises[k]->reject( |
+ DOMException::create(InvalidStateError, "Cannot suspend a context that has been released")); |
+ } else { |
+ m_suspendPromises[k]->resolve(); |
+ } |
+ } |
+ |
+ m_suspendPromises.clear(); |
+} |
+ |
+void AudioContext::resolvePromisesForSuspend() |
+{ |
+ // This runs inside the AudioContext's lock when handling pre-render tasks. |
+ ASSERT(isAudioThread()); |
+ ASSERT(isGraphOwner()); |
+ |
+ // Resolve any pending promises created by suspend() |
+ if (m_suspendPromises.size() > 0) |
+ callOnMainThread(bind(&AudioContext::resolvePromisesForSuspendOnMainThread, this)); |
+ |
+} |
+ |
const AtomicString& AudioContext::interfaceName() const |
{ |
return EventTargetNames::AudioContext; |
@@ -812,7 +971,26 @@ ExecutionContext* AudioContext::executionContext() const |
void AudioContext::startRendering() |
{ |
- destination()->startRendering(); |
+ // This is called for both online and offline contexts. |
+ ASSERT(isMainThread()); |
+ ASSERT(m_destinationNode); |
+ |
+ if (m_contextState == Suspended) { |
+ destination()->startRendering(); |
+ setContextState(Running); |
+ } |
+} |
+ |
+void AudioContext::stopRendering() |
+{ |
+ ASSERT(isMainThread()); |
+ ASSERT(m_destinationNode); |
+ ASSERT(!isOfflineContext()); |
+ |
+ if (m_contextState == Running) { |
+ destination()->stopRendering(); |
+ setContextState(Suspended); |
+ } |
} |
void AudioContext::fireCompletionEvent() |
@@ -823,6 +1001,8 @@ void AudioContext::fireCompletionEvent() |
AudioBuffer* renderedBuffer = m_renderTarget.get(); |
+ setContextState(Closed); |
+ |
ASSERT(renderedBuffer); |
if (!renderedBuffer) |
return; |