Index: Source/modules/webaudio/AbstractAudioContext.cpp |
diff --git a/Source/modules/webaudio/AudioContext.cpp b/Source/modules/webaudio/AbstractAudioContext.cpp |
similarity index 69% |
copy from Source/modules/webaudio/AudioContext.cpp |
copy to Source/modules/webaudio/AbstractAudioContext.cpp |
index a6e0aebbeeab2c4dc1278d17ec43f392fd25f5df..4db6e62fd95183a7bb201eeef1f09cde0c47e7f3 100644 |
--- a/Source/modules/webaudio/AudioContext.cpp |
+++ b/Source/modules/webaudio/AbstractAudioContext.cpp |
@@ -24,7 +24,7 @@ |
#include "config.h" |
#if ENABLE(WEB_AUDIO) |
-#include "modules/webaudio/AudioContext.h" |
+#include "modules/webaudio/AbstractAudioContext.h" |
#include "bindings/core/v8/ExceptionMessages.h" |
#include "bindings/core/v8/ExceptionState.h" |
@@ -40,6 +40,7 @@ |
#include "modules/webaudio/AudioBuffer.h" |
#include "modules/webaudio/AudioBufferCallback.h" |
#include "modules/webaudio/AudioBufferSourceNode.h" |
+#include "modules/webaudio/AudioContext.h" |
#include "modules/webaudio/AudioListener.h" |
#include "modules/webaudio/AudioNodeInput.h" |
#include "modules/webaudio/AudioNodeOutput.h" |
@@ -67,37 +68,18 @@ |
#include "public/platform/Platform.h" |
#include "wtf/text/WTFString.h" |
-#if DEBUG_AUDIONODE_REFERENCES |
-#include <stdio.h> |
-#endif |
- |
namespace blink { |
-// Don't allow more than this number of simultaneous AudioContexts talking to hardware. |
-const unsigned MaxHardwareContexts = 6; |
-unsigned AudioContext::s_hardwareContextCount = 0; |
-unsigned AudioContext::s_contextId = 0; |
- |
-AudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState) |
+AbstractAudioContext* AbstractAudioContext::create(Document& document, ExceptionState& exceptionState) |
{ |
- ASSERT(isMainThread()); |
- if (s_hardwareContextCount >= MaxHardwareContexts) { |
- exceptionState.throwDOMException( |
- NotSupportedError, |
- ExceptionMessages::indexExceedsMaximumBound( |
- "number of hardware contexts", |
- s_hardwareContextCount, |
- MaxHardwareContexts)); |
- return nullptr; |
- } |
- |
- AudioContext* audioContext = new AudioContext(&document); |
- audioContext->suspendIfNeeded(); |
- return audioContext; |
+ return AudioContext::create(document, exceptionState); |
} |
+// FIXME(dominicc): Devolve these constructors to AudioContext |
+// and OfflineAudioContext respectively. |
+ |
// Constructor for rendering to the audio hardware. |
-AudioContext::AudioContext(Document* document) |
+AbstractAudioContext::AbstractAudioContext(Document* document) |
: ActiveDOMObject(document) |
, m_isStopScheduled(false) |
, m_isCleared(false) |
@@ -107,7 +89,6 @@ AudioContext::AudioContext(Document* document) |
, m_connectionCount(0) |
, m_didInitializeContextGraphMutex(false) |
, m_deferredTaskHandler(DeferredTaskHandler::create()) |
- , m_isOfflineContext(false) |
, m_contextState(Suspended) |
{ |
m_didInitializeContextGraphMutex = true; |
@@ -117,7 +98,7 @@ AudioContext::AudioContext(Document* document) |
} |
// Constructor for offline (non-realtime) rendering. |
-AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) |
+AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) |
: ActiveDOMObject(document) |
, m_isStopScheduled(false) |
, m_isCleared(false) |
@@ -127,7 +108,6 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t |
, m_connectionCount(0) |
, m_didInitializeContextGraphMutex(false) |
, m_deferredTaskHandler(DeferredTaskHandler::create()) |
- , m_isOfflineContext(true) |
, m_contextState(Suspended) |
{ |
m_didInitializeContextGraphMutex = true; |
@@ -139,11 +119,8 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t |
initialize(); |
} |
-AudioContext::~AudioContext() |
+AbstractAudioContext::~AbstractAudioContext() |
{ |
-#if DEBUG_AUDIONODE_REFERENCES |
- fprintf(stderr, "%p: AudioContext::~AudioContext(): %u\n", this, m_contextId); |
-#endif |
deferredTaskHandler().contextWillBeDestroyed(); |
// AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around. |
ASSERT(!m_isInitialized); |
@@ -153,7 +130,7 @@ AudioContext::~AudioContext() |
ASSERT(!m_resumeResolvers.size()); |
} |
-void AudioContext::initialize() |
+void AbstractAudioContext::initialize() |
{ |
if (isInitialized()) |
return; |
@@ -163,26 +140,11 @@ void AudioContext::initialize() |
if (m_destinationNode.get()) { |
m_destinationNode->handler().initialize(); |
- |
- if (!isOfflineContext()) { |
- // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio. |
- // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". |
- // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. |
- // We may want to consider requiring it for symmetry with OfflineAudioContext. |
- startRendering(); |
- ++s_hardwareContextCount; |
- } |
- |
- m_contextId = s_contextId++; |
m_isInitialized = true; |
-#if DEBUG_AUDIONODE_REFERENCES |
- fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n", |
- this, m_contextId, AudioContext::s_hardwareContextCount); |
-#endif |
} |
} |
-void AudioContext::clear() |
+void AbstractAudioContext::clear() |
{ |
m_destinationNode.clear(); |
// The audio rendering thread is dead. Nobody will schedule AudioHandler |
@@ -191,7 +153,7 @@ void AudioContext::clear() |
m_isCleared = true; |
} |
-void AudioContext::uninitialize() |
+void AbstractAudioContext::uninitialize() |
{ |
ASSERT(isMainThread()); |
@@ -204,25 +166,12 @@ void AudioContext::uninitialize() |
if (m_destinationNode) |
m_destinationNode->handler().uninitialize(); |
- if (!isOfflineContext()) { |
- ASSERT(s_hardwareContextCount); |
- --s_hardwareContextCount; |
- } |
- |
// Get rid of the sources which may still be playing. |
releaseActiveSourceNodes(); |
// Reject any pending resolvers before we go away. |
rejectPendingResolvers(); |
- |
- // For an offline audio context, the completion event will set the state to closed. For an |
- // online context, we need to do it here. We only want to set the closed state once. |
- if (!isOfflineContext()) |
- setContextState(Closed); |
- |
- // Resolve the promise now, if any |
- if (m_closeResolver) |
- m_closeResolver->resolve(); |
+ didClose(); |
ASSERT(m_listener); |
m_listener->waitForHRTFDatabaseLoaderThreadCompletion(); |
@@ -230,7 +179,7 @@ void AudioContext::uninitialize() |
clear(); |
} |
-void AudioContext::stop() |
+void AbstractAudioContext::stop() |
{ |
// Usually ExecutionContext calls stop twice. |
if (m_isStopScheduled) |
@@ -241,21 +190,21 @@ void AudioContext::stop() |
// of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other |
// ActiveDOMObjects so let's schedule uninitialize() to be called later. |
// FIXME: see if there's a more direct way to handle this issue. |
- Platform::current()->mainThread()->postTask(FROM_HERE, bind(&AudioContext::uninitialize, this)); |
+ Platform::current()->mainThread()->postTask(FROM_HERE, bind(&AbstractAudioContext::uninitialize, this)); |
} |
-bool AudioContext::hasPendingActivity() const |
+bool AbstractAudioContext::hasPendingActivity() const |
{ |
// There's no pending activity if the audio context has been cleared. |
return !m_isCleared; |
} |
-void AudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) |
+void AbstractAudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) |
{ |
exceptionState.throwDOMException(InvalidStateError, "AudioContext has been closed."); |
} |
-AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
+AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
{ |
// It's ok to call createBuffer, even if the context is closed because the AudioBuffer doesn't |
// really "belong" to any particular context. |
@@ -263,7 +212,7 @@ AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t number |
return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState); |
} |
-void AudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState) |
+void AbstractAudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState) |
{ |
if (isContextClosed()) { |
throwExceptionForClosedState(exceptionState); |
@@ -279,7 +228,7 @@ void AudioContext::decodeAudioData(DOMArrayBuffer* audioData, AudioBufferCallbac |
m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback); |
} |
-AudioBufferSourceNode* AudioContext::createBufferSource(ExceptionState& exceptionState) |
+AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -296,7 +245,7 @@ AudioBufferSourceNode* AudioContext::createBufferSource(ExceptionState& exceptio |
return node; |
} |
-MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
+MediaElementAudioSourceNode* AbstractAudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -328,7 +277,7 @@ MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaEle |
return node; |
} |
-MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
+MediaStreamAudioSourceNode* AbstractAudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -364,7 +313,7 @@ MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* m |
return node; |
} |
-MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination(ExceptionState& exceptionState) |
+MediaStreamAudioDestinationNode* AbstractAudioContext::createMediaStreamDestination(ExceptionState& exceptionState) |
{ |
if (isContextClosed()) { |
throwExceptionForClosedState(exceptionState); |
@@ -375,25 +324,25 @@ MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination(Exce |
return MediaStreamAudioDestinationNode::create(*this, 2); |
} |
-ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& exceptionState) |
+ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(ExceptionState& exceptionState) |
{ |
// Set number of input/output channels to stereo by default. |
return createScriptProcessor(0, 2, 2, exceptionState); |
} |
-ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
+ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
{ |
// Set number of input/output channels to stereo by default. |
return createScriptProcessor(bufferSize, 2, 2, exceptionState); |
} |
-ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
+ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
{ |
// Set number of output channels to stereo by default. |
return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState); |
} |
-ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
+ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -409,18 +358,18 @@ ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of input channels and output channels cannot both be zero."); |
- } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { |
+ } else if (numberOfInputChannels > AbstractAudioContext::maxNumberOfChannels()) { |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of input channels (" + String::number(numberOfInputChannels) |
+ ") exceeds maximum (" |
- + String::number(AudioContext::maxNumberOfChannels()) + ")."); |
- } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) { |
+ + String::number(AbstractAudioContext::maxNumberOfChannels()) + ")."); |
+ } else if (numberOfOutputChannels > AbstractAudioContext::maxNumberOfChannels()) { |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of output channels (" + String::number(numberOfInputChannels) |
+ ") exceeds maximum (" |
- + String::number(AudioContext::maxNumberOfChannels()) + ")."); |
+ + String::number(AbstractAudioContext::maxNumberOfChannels()) + ")."); |
} else { |
exceptionState.throwDOMException( |
IndexSizeError, |
@@ -434,7 +383,7 @@ ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size |
return node; |
} |
-StereoPannerNode* AudioContext::createStereoPanner(ExceptionState& exceptionState) |
+StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -445,7 +394,7 @@ StereoPannerNode* AudioContext::createStereoPanner(ExceptionState& exceptionStat |
return StereoPannerNode::create(*this, sampleRate()); |
} |
-BiquadFilterNode* AudioContext::createBiquadFilter(ExceptionState& exceptionState) |
+BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -456,7 +405,7 @@ BiquadFilterNode* AudioContext::createBiquadFilter(ExceptionState& exceptionStat |
return BiquadFilterNode::create(*this, sampleRate()); |
} |
-WaveShaperNode* AudioContext::createWaveShaper(ExceptionState& exceptionState) |
+WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -467,7 +416,7 @@ WaveShaperNode* AudioContext::createWaveShaper(ExceptionState& exceptionState) |
return WaveShaperNode::create(*this); |
} |
-PannerNode* AudioContext::createPanner(ExceptionState& exceptionState) |
+PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -478,7 +427,7 @@ PannerNode* AudioContext::createPanner(ExceptionState& exceptionState) |
return PannerNode::create(*this, sampleRate()); |
} |
-ConvolverNode* AudioContext::createConvolver(ExceptionState& exceptionState) |
+ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -489,7 +438,7 @@ ConvolverNode* AudioContext::createConvolver(ExceptionState& exceptionState) |
return ConvolverNode::create(*this, sampleRate()); |
} |
-DynamicsCompressorNode* AudioContext::createDynamicsCompressor(ExceptionState& exceptionState) |
+DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -500,7 +449,7 @@ DynamicsCompressorNode* AudioContext::createDynamicsCompressor(ExceptionState& e |
return DynamicsCompressorNode::create(*this, sampleRate()); |
} |
-AnalyserNode* AudioContext::createAnalyser(ExceptionState& exceptionState) |
+AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -511,7 +460,7 @@ AnalyserNode* AudioContext::createAnalyser(ExceptionState& exceptionState) |
return AnalyserNode::create(*this, sampleRate()); |
} |
-GainNode* AudioContext::createGain(ExceptionState& exceptionState) |
+GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -522,13 +471,13 @@ GainNode* AudioContext::createGain(ExceptionState& exceptionState) |
return GainNode::create(*this, sampleRate()); |
} |
-DelayNode* AudioContext::createDelay(ExceptionState& exceptionState) |
+DelayNode* AbstractAudioContext::createDelay(ExceptionState& exceptionState) |
{ |
const double defaultMaxDelayTime = 1; |
return createDelay(defaultMaxDelayTime, exceptionState); |
} |
-DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
+DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -539,13 +488,13 @@ DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& except |
return DelayNode::create(*this, sampleRate(), maxDelayTime, exceptionState); |
} |
-ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& exceptionState) |
+ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(ExceptionState& exceptionState) |
{ |
const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState); |
} |
-ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
+ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -561,20 +510,20 @@ ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, |
IndexSizeError, |
"number of outputs (" + String::number(numberOfOutputs) |
+ ") must be between 1 and " |
- + String::number(AudioContext::maxNumberOfChannels()) + "."); |
+ + String::number(AbstractAudioContext::maxNumberOfChannels()) + "."); |
return nullptr; |
} |
return node; |
} |
-ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionState) |
+ChannelMergerNode* AbstractAudioContext::createChannelMerger(ExceptionState& exceptionState) |
{ |
const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState); |
} |
-ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
+ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -592,7 +541,7 @@ ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce |
numberOfInputs, |
1, |
ExceptionMessages::InclusiveBound, |
- AudioContext::maxNumberOfChannels(), |
+ AbstractAudioContext::maxNumberOfChannels(), |
ExceptionMessages::InclusiveBound)); |
return nullptr; |
} |
@@ -600,7 +549,7 @@ ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce |
return node; |
} |
-OscillatorNode* AudioContext::createOscillator(ExceptionState& exceptionState) |
+OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -616,7 +565,7 @@ OscillatorNode* AudioContext::createOscillator(ExceptionState& exceptionState) |
return node; |
} |
-PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, ExceptionState& exceptionState) |
+PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -677,7 +626,7 @@ PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32 |
return PeriodicWave::create(sampleRate(), real, imag); |
} |
-String AudioContext::state() const |
+String AbstractAudioContext::state() const |
{ |
// These strings had better match the strings for AudioContextState in AudioContext.idl. |
switch (m_contextState) { |
@@ -692,7 +641,7 @@ String AudioContext::state() const |
return ""; |
} |
-void AudioContext::setContextState(AudioContextState newState) |
+void AbstractAudioContext::setContextState(AudioContextState newState) |
{ |
ASSERT(isMainThread()); |
@@ -719,88 +668,21 @@ void AudioContext::setContextState(AudioContextState newState) |
// Notify context that state changed |
if (executionContext()) |
- executionContext()->postTask(FROM_HERE, createSameThreadTask(&AudioContext::notifyStateChange, this)); |
+ executionContext()->postTask(FROM_HERE, createSameThreadTask(&AbstractAudioContext::notifyStateChange, this)); |
} |
-void AudioContext::notifyStateChange() |
+void AbstractAudioContext::notifyStateChange() |
{ |
dispatchEvent(Event::create(EventTypeNames::statechange)); |
} |
-ScriptPromise AudioContext::suspendContext(ScriptState* scriptState) |
-{ |
- ASSERT(isMainThread()); |
- AutoLocker locker(this); |
- |
- if (isOfflineContext()) { |
- return ScriptPromise::rejectWithDOMException( |
- scriptState, |
- DOMException::create( |
- InvalidAccessError, |
- "cannot suspend an OfflineAudioContext")); |
- } |
- |
- RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState); |
- ScriptPromise promise = resolver->promise(); |
- |
- if (m_contextState == Closed) { |
- resolver->reject( |
- DOMException::create(InvalidStateError, "Cannot suspend a context that has been closed")); |
- } else { |
- // Stop rendering now. |
- if (m_destinationNode) |
- stopRendering(); |
- |
- // Since we don't have any way of knowing when the hardware actually stops, we'll just |
- // resolve the promise now. |
- resolver->resolve(); |
- } |
- |
- return promise; |
-} |
- |
-ScriptPromise AudioContext::resumeContext(ScriptState* scriptState) |
-{ |
- ASSERT(isMainThread()); |
- AutoLocker locker(this); |
- |
- if (isOfflineContext()) { |
- return ScriptPromise::rejectWithDOMException( |
- scriptState, |
- DOMException::create( |
- InvalidAccessError, |
- "cannot resume an OfflineAudioContext")); |
- } |
- |
- if (isContextClosed()) { |
- return ScriptPromise::rejectWithDOMException( |
- scriptState, |
- DOMException::create( |
- InvalidAccessError, |
- "cannot resume a closed AudioContext")); |
- } |
- |
- RefPtrWillBeRawPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState); |
- ScriptPromise promise = resolver->promise(); |
- |
- // Restart the destination node to pull on the audio graph. |
- if (m_destinationNode) |
- startRendering(); |
- |
- // Save the resolver which will get resolved when the destination node starts pulling on the |
- // graph again. |
- m_resumeResolvers.append(resolver); |
- |
- return promise; |
-} |
- |
-void AudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler) |
+void AbstractAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler) |
{ |
ASSERT(isAudioThread()); |
m_finishedSourceHandlers.append(handler); |
} |
-void AudioContext::releaseFinishedSourceNodes() |
+void AbstractAudioContext::releaseFinishedSourceNodes() |
{ |
ASSERT(isGraphOwner()); |
ASSERT(isAudioThread()); |
@@ -817,7 +699,7 @@ void AudioContext::releaseFinishedSourceNodes() |
m_finishedSourceHandlers.clear(); |
} |
-void AudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
+void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
{ |
ASSERT(isMainThread()); |
AutoLocker locker(this); |
@@ -826,7 +708,7 @@ void AudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
node->handler().makeConnection(); |
} |
-void AudioContext::releaseActiveSourceNodes() |
+void AbstractAudioContext::releaseActiveSourceNodes() |
{ |
ASSERT(isMainThread()); |
for (auto& sourceNode : m_activeSourceNodes) |
@@ -835,7 +717,7 @@ void AudioContext::releaseActiveSourceNodes() |
m_activeSourceNodes.clear(); |
} |
-void AudioContext::handleStoppableSourceNodes() |
+void AbstractAudioContext::handleStoppableSourceNodes() |
{ |
ASSERT(isGraphOwner()); |
@@ -848,7 +730,7 @@ void AudioContext::handleStoppableSourceNodes() |
} |
} |
-void AudioContext::handlePreRenderTasks() |
+void AbstractAudioContext::handlePreRenderTasks() |
{ |
ASSERT(isAudioThread()); |
@@ -866,7 +748,7 @@ void AudioContext::handlePreRenderTasks() |
} |
} |
-void AudioContext::handlePostRenderTasks() |
+void AbstractAudioContext::handlePostRenderTasks() |
{ |
ASSERT(isAudioThread()); |
@@ -887,7 +769,7 @@ void AudioContext::handlePostRenderTasks() |
} |
} |
-void AudioContext::resolvePromisesForResumeOnMainThread() |
+void AbstractAudioContext::resolvePromisesForResumeOnMainThread() |
{ |
ASSERT(isMainThread()); |
AutoLocker locker(this); |
@@ -905,9 +787,9 @@ void AudioContext::resolvePromisesForResumeOnMainThread() |
m_isResolvingResumePromises = false; |
} |
-void AudioContext::resolvePromisesForResume() |
+void AbstractAudioContext::resolvePromisesForResume() |
{ |
- // This runs inside the AudioContext's lock when handling pre-render tasks. |
+ // This runs inside the AbstractAudioContext's lock when handling pre-render tasks. |
ASSERT(isAudioThread()); |
ASSERT(isGraphOwner()); |
@@ -916,11 +798,11 @@ void AudioContext::resolvePromisesForResume() |
// promises in the main thread. |
if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) { |
m_isResolvingResumePromises = true; |
- Platform::current()->mainThread()->postTask(FROM_HERE, threadSafeBind(&AudioContext::resolvePromisesForResumeOnMainThread, this)); |
+ Platform::current()->mainThread()->postTask(FROM_HERE, threadSafeBind(&AbstractAudioContext::resolvePromisesForResumeOnMainThread, this)); |
} |
} |
-void AudioContext::rejectPendingResolvers() |
+void AbstractAudioContext::rejectPendingResolvers() |
{ |
ASSERT(isMainThread()); |
@@ -933,17 +815,17 @@ void AudioContext::rejectPendingResolvers() |
m_isResolvingResumePromises = false; |
} |
-const AtomicString& AudioContext::interfaceName() const |
+const AtomicString& AbstractAudioContext::interfaceName() const |
{ |
return EventTargetNames::AudioContext; |
} |
-ExecutionContext* AudioContext::executionContext() const |
+ExecutionContext* AbstractAudioContext::executionContext() const |
{ |
return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext(); |
} |
-void AudioContext::startRendering() |
+void AbstractAudioContext::startRendering() |
{ |
// This is called for both online and offline contexts. |
ASSERT(isMainThread()); |
@@ -955,20 +837,7 @@ void AudioContext::startRendering() |
} |
} |
-void AudioContext::stopRendering() |
-{ |
- ASSERT(isMainThread()); |
- ASSERT(m_destinationNode); |
- ASSERT(!isOfflineContext()); |
- |
- if (m_contextState == Running) { |
- destination()->audioDestinationHandler().stopRendering(); |
- setContextState(Suspended); |
- deferredTaskHandler().clearHandlersToBeDeleted(); |
- } |
-} |
- |
-void AudioContext::fireCompletionEvent() |
+void AbstractAudioContext::fireCompletionEvent() |
{ |
ASSERT(isMainThread()); |
if (!isMainThread()) |
@@ -992,14 +861,13 @@ void AudioContext::fireCompletionEvent() |
} |
} |
-DEFINE_TRACE(AudioContext) |
+DEFINE_TRACE(AbstractAudioContext) |
{ |
- visitor->trace(m_closeResolver); |
visitor->trace(m_offlineResolver); |
visitor->trace(m_renderTarget); |
visitor->trace(m_destinationNode); |
visitor->trace(m_listener); |
- // trace() can be called in AudioContext constructor, and |
+ // trace() can be called in AbstractAudioContext constructor, and |
// m_contextGraphMutex might be unavailable. |
if (m_didInitializeContextGraphMutex) { |
AutoLocker lock(this); |
@@ -1008,11 +876,11 @@ DEFINE_TRACE(AudioContext) |
visitor->trace(m_activeSourceNodes); |
} |
visitor->trace(m_resumeResolvers); |
- RefCountedGarbageCollectedEventTargetWithInlineData<AudioContext>::trace(visitor); |
+ RefCountedGarbageCollectedEventTargetWithInlineData<AbstractAudioContext>::trace(visitor); |
ActiveDOMObject::trace(visitor); |
} |
-SecurityOrigin* AudioContext::securityOrigin() const |
+SecurityOrigin* AbstractAudioContext::securityOrigin() const |
{ |
if (executionContext()) |
return executionContext()->securityOrigin(); |
@@ -1020,35 +888,6 @@ SecurityOrigin* AudioContext::securityOrigin() const |
return nullptr; |
} |
-ScriptPromise AudioContext::closeContext(ScriptState* scriptState) |
-{ |
- if (isOfflineContext()) { |
- return ScriptPromise::rejectWithDOMException( |
- scriptState, |
- DOMException::create(InvalidAccessError, "cannot close an OfflineAudioContext.")); |
- } |
- |
- if (isContextClosed()) { |
- // We've already closed the context previously, but it hasn't yet been resolved, so just |
- // create a new promise and reject it. |
- return ScriptPromise::rejectWithDOMException( |
- scriptState, |
- DOMException::create(InvalidStateError, |
- "Cannot close a context that is being closed or has already been closed.")); |
- } |
- |
- m_closeResolver = ScriptPromiseResolver::create(scriptState); |
- ScriptPromise promise = m_closeResolver->promise(); |
- |
- // Stop the audio context. This will stop the destination node from pulling audio anymore. And |
- // since we have disconnected the destination from the audio graph, and thus has no references, |
- // the destination node can GCed if JS has no references. stop() will also resolve the Promise |
- // created here. |
- stop(); |
- |
- return promise; |
-} |
- |
} // namespace blink |
#endif // ENABLE(WEB_AUDIO) |