Index: Source/modules/webaudio/AudioContext.cpp |
diff --git a/Source/modules/webaudio/AudioContext.cpp b/Source/modules/webaudio/AudioContext.cpp |
index b5127c069232e519d28db54c806c8ea59c59fe53..00aee42b41e7df1bd4e798ac7abd5e7302c18d88 100644 |
--- a/Source/modules/webaudio/AudioContext.cpp |
+++ b/Source/modules/webaudio/AudioContext.cpp |
@@ -30,6 +30,8 @@ |
#include "bindings/core/v8/ExceptionMessages.h" |
#include "bindings/core/v8/ExceptionState.h" |
+#include "bindings/core/v8/ScriptState.h" |
+#include "core/dom/DOMException.h" |
#include "core/dom/Document.h" |
#include "core/dom/ExceptionCode.h" |
#include "core/html/HTMLMediaElement.h" |
@@ -101,10 +103,12 @@ AudioContext::AudioContext(Document* document) |
, m_isCleared(false) |
, m_isInitialized(false) |
, m_destinationNode(nullptr) |
+ , m_isResolvingResumePromises(false) |
, m_automaticPullNodesNeedUpdating(false) |
, m_connectionCount(0) |
, m_audioThread(0) |
, m_isOfflineContext(false) |
+ , m_contextState(Paused) |
{ |
m_destinationNode = DefaultAudioDestinationNode::create(this); |
@@ -121,10 +125,12 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t |
, m_isCleared(false) |
, m_isInitialized(false) |
, m_destinationNode(nullptr) |
+ , m_isResolvingResumePromises(false) |
, m_automaticPullNodesNeedUpdating(false) |
, m_connectionCount(0) |
, m_audioThread(0) |
, m_isOfflineContext(true) |
+ , m_contextState(Paused) |
{ |
// Create a new destination for offline rendering. |
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); |
@@ -165,7 +171,7 @@ void AudioContext::initialize() |
// Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". |
// NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. |
// We may want to consider requiring it for symmetry with OfflineAudioContext. |
- m_destinationNode->startRendering(); |
+ startRendering(); |
++s_hardwareContextCount; |
} |
@@ -531,6 +537,86 @@ PeriodicWave* AudioContext::createPeriodicWave(Float32Array* real, Float32Array* |
return PeriodicWave::create(sampleRate(), real, imag); |
} |
+String AudioContext::state() const |
+{ |
+ switch (m_contextState) { |
+ case Paused: |
+ return "paused"; |
+ case Running: |
+ return "running"; |
+ case Released: |
+ return "released"; |
+ } |
+ ASSERT_NOT_REACHED(); |
+ return ""; |
+} |
+ |
+void AudioContext::setContextState(AudioContextState newState) |
+{ |
+ // Validate the transitions |
+ switch (newState) { |
+ case Paused: |
+ ASSERT(m_contextState == Running); |
+ break; |
+ case Running: |
+ ASSERT(m_contextState == Paused); |
+ break; |
+ case Released: |
+ ASSERT(m_contextState != Released); |
+ break; |
+ } |
+ |
+ m_contextState = newState; |
+} |
+ |
+void AudioContext::suspendContext(ExceptionState& exceptionState) |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ if (m_contextState == Released) { |
+ exceptionState.throwDOMException( |
+ InvalidStateError, |
+ "cannot suspend an AudioContext that has been released"); |
+ return; |
+ } |
+ |
+ if (m_destinationNode && !isOfflineContext()) { |
+ stopRendering(); |
+ } |
+} |
+ |
+ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ RefPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState); |
+ |
+ ScriptPromise promise = resolver->promise(); |
+ |
+ if (isOfflineContext()) { |
+ // For offline context, resolve now, but reject if the context has been released. |
+ if (m_contextState == Released) { |
+ resolver->reject( |
+ DOMException::create(InvalidStateError, "Cannot resume a context that has been released")); |
+ } else { |
+ resolver->resolve(); |
+ } |
+ } else { |
+ // Restart the destination node to pull on the audio graph. |
+ if (m_destinationNode) { |
+ startRendering(); |
+ } |
+ |
+ // Save the promise which will get resolved when the destination node starts pulling on the |
+ // graph again. |
+ m_resumePromises.append(resolver); |
+ } |
+ |
+ return promise; |
+} |
+ |
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
{ |
ASSERT(isAudioThread()); |
@@ -637,6 +723,8 @@ void AudioContext::handlePreRenderTasks() |
handleDirtyAudioNodeOutputs(); |
updateAutomaticPullNodes(); |
+ resolvePromisesForResume(); |
+ |
unlock(); |
} |
} |
@@ -802,6 +890,39 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess) |
m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess); |
} |
+void AudioContext::resolvePromisesForResumeOnMainThread() |
+{ |
+ ASSERT(isMainThread()); |
+ AutoLocker locker(this); |
+ |
+ for (unsigned k = 0; k < m_resumePromises.size(); ++k) { |
+ if (m_contextState == Released) { |
+ m_resumePromises[k]->reject( |
+ DOMException::create(InvalidStateError, "Cannot resume a context that has been released")); |
+ } else { |
+ m_resumePromises[k]->resolve(); |
+ } |
+ } |
+ |
+ m_resumePromises.clear(); |
+ m_isResolvingResumePromises = false; |
+} |
+ |
+void AudioContext::resolvePromisesForResume() |
+{ |
+ // This runs inside the AudioContext's lock when handling pre-render tasks. |
+ ASSERT(isAudioThread()); |
+ ASSERT(isGraphOwner()); |
+ |
+ // Resolve any pending promises created by resume(). Only do this we if haven't already started |
+ // resolving these promises. This gets called very often and it takes some time to resolve the |
+ // promises in the main thread. |
+ if (!m_isResolvingResumePromises && m_resumePromises.size() > 0) { |
+ m_isResolvingResumePromises = true; |
+ callOnMainThread(bind(&AudioContext::resolvePromisesForResumeOnMainThread, this)); |
+ } |
+} |
+ |
const AtomicString& AudioContext::interfaceName() const |
{ |
return EventTargetNames::AudioContext; |
@@ -814,7 +935,24 @@ ExecutionContext* AudioContext::executionContext() const |
void AudioContext::startRendering() |
{ |
- destination()->startRendering(); |
+ ASSERT(isMainThread()); |
haraken
2014/10/16 01:13:43
Shall we add ASSERT(!isOfflineContext())?
haraken
2014/10/16 01:13:43
Shall we add ASSERT(m_destinationNode)?
Raymond Toy
2014/10/16 17:56:31
Done.
Raymond Toy
2014/10/16 17:56:31
Actually no. startRendering is called for both on
|
+ |
+ if (m_contextState == Paused) { |
+ destination()->startRendering(); |
+ setContextState(Running); |
+ } |
+} |
+ |
+void AudioContext::stopRendering() |
+{ |
+ ASSERT(isMainThread()); |
+ ASSERT(m_destinationNode); |
+ ASSERT(!isOfflineContext()); |
+ |
+ if (m_contextState == Running) { |
+ destination()->stopRendering(); |
+ setContextState(Paused); |
+ } |
} |
void AudioContext::fireCompletionEvent() |
@@ -825,6 +963,8 @@ void AudioContext::fireCompletionEvent() |
AudioBuffer* renderedBuffer = m_renderTarget.get(); |
+ setContextState(Released); |
+ |
ASSERT(renderedBuffer); |
if (!renderedBuffer) |
return; |