Index: third_party/WebKit/Source/modules/webaudio/BaseAudioContext.cpp |
diff --git a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp b/third_party/WebKit/Source/modules/webaudio/BaseAudioContext.cpp |
similarity index 75% |
rename from third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
rename to third_party/WebKit/Source/modules/webaudio/BaseAudioContext.cpp |
index efd5cd17cfbee13f2019e419a358fa7e68b1be36..511c25db9ae51caad2f079596df4f4e3c04057df 100644 |
--- a/third_party/WebKit/Source/modules/webaudio/AbstractAudioContext.cpp |
+++ b/third_party/WebKit/Source/modules/webaudio/BaseAudioContext.cpp |
@@ -22,7 +22,8 @@ |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
-#include "modules/webaudio/AbstractAudioContext.h" |
+#include "modules/webaudio/BaseAudioContext.h" |
+ |
#include "bindings/core/v8/Dictionary.h" |
#include "bindings/core/v8/ExceptionMessages.h" |
#include "bindings/core/v8/ExceptionState.h" |
@@ -70,7 +71,7 @@ |
namespace blink { |
-AbstractAudioContext* AbstractAudioContext::create(Document& document, ExceptionState& exceptionState) |
+BaseAudioContext* BaseAudioContext::create(Document& document, ExceptionState& exceptionState) |
{ |
return AudioContext::create(document, exceptionState); |
} |
@@ -79,7 +80,7 @@ AbstractAudioContext* AbstractAudioContext::create(Document& document, Exception |
// and OfflineAudioContext respectively. |
// Constructor for rendering to the audio hardware. |
-AbstractAudioContext::AbstractAudioContext(Document* document) |
+BaseAudioContext::BaseAudioContext(Document* document) |
: ActiveScriptWrappable(this) |
, ActiveDOMObject(document) |
, m_destinationNode(nullptr) |
@@ -102,7 +103,7 @@ AbstractAudioContext::AbstractAudioContext(Document* document) |
} |
// Constructor for offline (non-realtime) rendering. |
-AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) |
+BaseAudioContext::BaseAudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) |
: ActiveScriptWrappable(this) |
, ActiveDOMObject(document) |
, m_destinationNode(nullptr) |
@@ -121,7 +122,7 @@ AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOf |
m_didInitializeContextGraphMutex = true; |
} |
-AbstractAudioContext::~AbstractAudioContext() |
+BaseAudioContext::~BaseAudioContext() |
{ |
deferredTaskHandler().contextWillBeDestroyed(); |
// AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around. |
@@ -132,7 +133,7 @@ AbstractAudioContext::~AbstractAudioContext() |
ASSERT(!m_resumeResolvers.size()); |
} |
-void AbstractAudioContext::initialize() |
+void BaseAudioContext::initialize() |
{ |
if (isDestinationInitialized()) |
return; |
@@ -145,7 +146,7 @@ void AbstractAudioContext::initialize() |
} |
} |
-void AbstractAudioContext::clear() |
+void BaseAudioContext::clear() |
{ |
m_destinationNode.clear(); |
// The audio rendering thread is dead. Nobody will schedule AudioHandler |
@@ -154,7 +155,7 @@ void AbstractAudioContext::clear() |
m_isCleared = true; |
} |
-void AbstractAudioContext::uninitialize() |
+void BaseAudioContext::uninitialize() |
{ |
ASSERT(isMainThread()); |
@@ -178,23 +179,23 @@ void AbstractAudioContext::uninitialize() |
clear(); |
} |
-void AbstractAudioContext::stop() |
+void BaseAudioContext::stop() |
{ |
uninitialize(); |
} |
-bool AbstractAudioContext::hasPendingActivity() const |
+bool BaseAudioContext::hasPendingActivity() const |
{ |
// There's no pending activity if the audio context has been cleared. |
return !m_isCleared; |
} |
-void AbstractAudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) |
+void BaseAudioContext::throwExceptionForClosedState(ExceptionState& exceptionState) |
{ |
exceptionState.throwDOMException(InvalidStateError, "AudioContext has been closed."); |
} |
-AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
+AudioBuffer* BaseAudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
{ |
// It's ok to call createBuffer, even if the context is closed because the AudioBuffer doesn't |
// really "belong" to any particular context. |
@@ -202,7 +203,7 @@ AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_ |
return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState); |
} |
-ScriptPromise AbstractAudioContext::decodeAudioData(ScriptState* scriptState, DOMArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState) |
+ScriptPromise BaseAudioContext::decodeAudioData(ScriptState* scriptState, DOMArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
ASSERT(audioData); |
@@ -220,7 +221,7 @@ ScriptPromise AbstractAudioContext::decodeAudioData(ScriptState* scriptState, DO |
return promise; |
} |
-void AbstractAudioContext::handleDecodeAudioData(AudioBuffer* audioBuffer, ScriptPromiseResolver* resolver, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback) |
+void BaseAudioContext::handleDecodeAudioData(AudioBuffer* audioBuffer, ScriptPromiseResolver* resolver, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback) |
{ |
ASSERT(isMainThread()); |
@@ -242,7 +243,7 @@ void AbstractAudioContext::handleDecodeAudioData(AudioBuffer* audioBuffer, Scrip |
m_decodeAudioResolvers.remove(resolver); |
} |
-AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState& exceptionState) |
+AudioBufferSourceNode* BaseAudioContext::createBufferSource(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -259,90 +260,25 @@ AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState& |
return node; |
} |
-MediaElementAudioSourceNode* AbstractAudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
-{ |
- ASSERT(isMainThread()); |
- |
- if (isContextClosed()) { |
- throwExceptionForClosedState(exceptionState); |
- return nullptr; |
- } |
- |
- // First check if this media element already has a source node. |
- if (mediaElement->audioSourceNode()) { |
- exceptionState.throwDOMException( |
- InvalidStateError, |
- "HTMLMediaElement already connected previously to a different MediaElementSourceNode."); |
- return nullptr; |
- } |
- |
- MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(*this, *mediaElement); |
- |
- mediaElement->setAudioSourceNode(node); |
- |
- notifySourceNodeStartedProcessing(node); // context keeps reference until node is disconnected |
- return node; |
-} |
- |
-MediaStreamAudioSourceNode* AbstractAudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
-{ |
- ASSERT(isMainThread()); |
- |
- if (isContextClosed()) { |
- throwExceptionForClosedState(exceptionState); |
- return nullptr; |
- } |
- |
- MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
- if (audioTracks.isEmpty()) { |
- exceptionState.throwDOMException( |
- InvalidStateError, |
- "MediaStream has no audio track"); |
- return nullptr; |
- } |
- |
- // Use the first audio track in the media stream. |
- MediaStreamTrack* audioTrack = audioTracks[0]; |
- OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); |
- MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(*this, *mediaStream, audioTrack, provider.release()); |
- |
- // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. |
- node->setFormat(2, sampleRate()); |
- |
- notifySourceNodeStartedProcessing(node); // context keeps reference until node is disconnected |
- return node; |
-} |
- |
-MediaStreamAudioDestinationNode* AbstractAudioContext::createMediaStreamDestination(ExceptionState& exceptionState) |
-{ |
- if (isContextClosed()) { |
- throwExceptionForClosedState(exceptionState); |
- return nullptr; |
- } |
- |
- // Set number of output channels to stereo by default. |
- return MediaStreamAudioDestinationNode::create(*this, 2); |
-} |
- |
-ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(ExceptionState& exceptionState) |
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(ExceptionState& exceptionState) |
{ |
// Set number of input/output channels to stereo by default. |
return createScriptProcessor(0, 2, 2, exceptionState); |
} |
-ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
{ |
// Set number of input/output channels to stereo by default. |
return createScriptProcessor(bufferSize, 2, 2, exceptionState); |
} |
-ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
{ |
// Set number of output channels to stereo by default. |
return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState); |
} |
-ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -358,18 +294,18 @@ ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of input channels and output channels cannot both be zero."); |
- } else if (numberOfInputChannels > AbstractAudioContext::maxNumberOfChannels()) { |
+ } else if (numberOfInputChannels > BaseAudioContext::maxNumberOfChannels()) { |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of input channels (" + String::number(numberOfInputChannels) |
+ ") exceeds maximum (" |
- + String::number(AbstractAudioContext::maxNumberOfChannels()) + ")."); |
- } else if (numberOfOutputChannels > AbstractAudioContext::maxNumberOfChannels()) { |
+ + String::number(BaseAudioContext::maxNumberOfChannels()) + ")."); |
+ } else if (numberOfOutputChannels > BaseAudioContext::maxNumberOfChannels()) { |
exceptionState.throwDOMException( |
IndexSizeError, |
"number of output channels (" + String::number(numberOfInputChannels) |
+ ") exceeds maximum (" |
- + String::number(AbstractAudioContext::maxNumberOfChannels()) + ")."); |
+ + String::number(BaseAudioContext::maxNumberOfChannels()) + ")."); |
} else { |
exceptionState.throwDOMException( |
IndexSizeError, |
@@ -383,7 +319,7 @@ ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi |
return node; |
} |
-StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& exceptionState) |
+StereoPannerNode* BaseAudioContext::createStereoPanner(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -394,7 +330,7 @@ StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& excep |
return StereoPannerNode::create(*this, sampleRate()); |
} |
-BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& exceptionState) |
+BiquadFilterNode* BaseAudioContext::createBiquadFilter(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -405,7 +341,7 @@ BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& excep |
return BiquadFilterNode::create(*this, sampleRate()); |
} |
-WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exceptionState) |
+WaveShaperNode* BaseAudioContext::createWaveShaper(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -416,7 +352,7 @@ WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exception |
return WaveShaperNode::create(*this); |
} |
-PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState) |
+PannerNode* BaseAudioContext::createPanner(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -427,7 +363,7 @@ PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState) |
return PannerNode::create(*this, sampleRate()); |
} |
-ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionState) |
+ConvolverNode* BaseAudioContext::createConvolver(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -438,7 +374,7 @@ ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionSt |
return ConvolverNode::create(*this, sampleRate()); |
} |
-DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(ExceptionState& exceptionState) |
+DynamicsCompressorNode* BaseAudioContext::createDynamicsCompressor(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -449,7 +385,7 @@ DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(Exception |
return DynamicsCompressorNode::create(*this, sampleRate()); |
} |
-AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionState) |
+AnalyserNode* BaseAudioContext::createAnalyser(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -460,7 +396,7 @@ AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionStat |
return AnalyserNode::create(*this, sampleRate()); |
} |
-GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState) |
+GainNode* BaseAudioContext::createGain(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -471,13 +407,13 @@ GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState) |
return GainNode::create(*this, sampleRate()); |
} |
-DelayNode* AbstractAudioContext::createDelay(ExceptionState& exceptionState) |
+DelayNode* BaseAudioContext::createDelay(ExceptionState& exceptionState) |
{ |
const double defaultMaxDelayTime = 1; |
return createDelay(defaultMaxDelayTime, exceptionState); |
} |
-DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
+DelayNode* BaseAudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -488,13 +424,13 @@ DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState |
return DelayNode::create(*this, sampleRate(), maxDelayTime, exceptionState); |
} |
-ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(ExceptionState& exceptionState) |
+ChannelSplitterNode* BaseAudioContext::createChannelSplitter(ExceptionState& exceptionState) |
{ |
const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState); |
} |
-ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
+ChannelSplitterNode* BaseAudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -510,20 +446,20 @@ ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOf |
IndexSizeError, |
"number of outputs (" + String::number(numberOfOutputs) |
+ ") must be between 1 and " |
- + String::number(AbstractAudioContext::maxNumberOfChannels()) + "."); |
+ + String::number(BaseAudioContext::maxNumberOfChannels()) + "."); |
return nullptr; |
} |
return node; |
} |
-ChannelMergerNode* AbstractAudioContext::createChannelMerger(ExceptionState& exceptionState) |
+ChannelMergerNode* BaseAudioContext::createChannelMerger(ExceptionState& exceptionState) |
{ |
const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState); |
} |
-ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
+ChannelMergerNode* BaseAudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -541,7 +477,7 @@ ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInpu |
numberOfInputs, |
1, |
ExceptionMessages::InclusiveBound, |
- AbstractAudioContext::maxNumberOfChannels(), |
+ BaseAudioContext::maxNumberOfChannels(), |
ExceptionMessages::InclusiveBound)); |
return nullptr; |
} |
@@ -549,7 +485,7 @@ ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInpu |
return node; |
} |
-OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exceptionState) |
+OscillatorNode* BaseAudioContext::createOscillator(ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
if (isContextClosed()) { |
@@ -565,12 +501,12 @@ OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exception |
return node; |
} |
-PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, ExceptionState& exceptionState) |
+PeriodicWave* BaseAudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, ExceptionState& exceptionState) |
{ |
return PeriodicWave::create(sampleRate(), real, imag, false); |
} |
-PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, const Dictionary& options, ExceptionState& exceptionState) |
+PeriodicWave* BaseAudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32Array* imag, const Dictionary& options, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -594,7 +530,7 @@ PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO |
return PeriodicWave::create(sampleRate(), real, imag, isNormalizationDisabled); |
} |
-IIRFilterNode* AbstractAudioContext::createIIRFilter(Vector<double> feedforwardCoef, Vector<double> feedbackCoef, ExceptionState& exceptionState) |
+IIRFilterNode* BaseAudioContext::createIIRFilter(Vector<double> feedforwardCoef, Vector<double> feedbackCoef, ExceptionState& exceptionState) |
{ |
ASSERT(isMainThread()); |
@@ -678,7 +614,7 @@ IIRFilterNode* AbstractAudioContext::createIIRFilter(Vector<double> feedforwardC |
return IIRFilterNode::create(*this, sampleRate(), feedforwardCoef, feedbackCoef); |
} |
-PeriodicWave* AbstractAudioContext::periodicWave(int type) |
+PeriodicWave* BaseAudioContext::periodicWave(int type) |
{ |
switch (type) { |
case OscillatorHandler::SINE: |
@@ -707,7 +643,7 @@ PeriodicWave* AbstractAudioContext::periodicWave(int type) |
} |
} |
-String AbstractAudioContext::state() const |
+String BaseAudioContext::state() const |
{ |
// These strings had better match the strings for AudioContextState in AudioContext.idl. |
switch (m_contextState) { |
@@ -722,7 +658,7 @@ String AbstractAudioContext::state() const |
return ""; |
} |
-void AbstractAudioContext::setContextState(AudioContextState newState) |
+void BaseAudioContext::setContextState(AudioContextState newState) |
{ |
ASSERT(isMainThread()); |
@@ -749,21 +685,21 @@ void AbstractAudioContext::setContextState(AudioContextState newState) |
// Notify context that state changed |
if (getExecutionContext()) |
- getExecutionContext()->postTask(BLINK_FROM_HERE, createSameThreadTask(&AbstractAudioContext::notifyStateChange, this)); |
+ getExecutionContext()->postTask(BLINK_FROM_HERE, createSameThreadTask(&BaseAudioContext::notifyStateChange, this)); |
} |
-void AbstractAudioContext::notifyStateChange() |
+void BaseAudioContext::notifyStateChange() |
{ |
dispatchEvent(Event::create(EventTypeNames::statechange)); |
} |
-void AbstractAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler) |
+void BaseAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* handler) |
{ |
ASSERT(isAudioThread()); |
m_finishedSourceHandlers.append(handler); |
} |
-void AbstractAudioContext::releaseFinishedSourceNodes() |
+void BaseAudioContext::releaseFinishedSourceNodes() |
{ |
ASSERT(isGraphOwner()); |
ASSERT(isAudioThread()); |
@@ -780,7 +716,7 @@ void AbstractAudioContext::releaseFinishedSourceNodes() |
m_finishedSourceHandlers.clear(); |
} |
-void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
+void BaseAudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
{ |
ASSERT(isMainThread()); |
AutoLocker locker(this); |
@@ -789,7 +725,7 @@ void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node) |
node->handler().makeConnection(); |
} |
-void AbstractAudioContext::releaseActiveSourceNodes() |
+void BaseAudioContext::releaseActiveSourceNodes() |
{ |
ASSERT(isMainThread()); |
for (auto& sourceNode : m_activeSourceNodes) |
@@ -798,7 +734,7 @@ void AbstractAudioContext::releaseActiveSourceNodes() |
m_activeSourceNodes.clear(); |
} |
-void AbstractAudioContext::handleStoppableSourceNodes() |
+void BaseAudioContext::handleStoppableSourceNodes() |
{ |
ASSERT(isGraphOwner()); |
@@ -811,7 +747,7 @@ void AbstractAudioContext::handleStoppableSourceNodes() |
} |
} |
-void AbstractAudioContext::handlePreRenderTasks() |
+void BaseAudioContext::handlePreRenderTasks() |
{ |
ASSERT(isAudioThread()); |
@@ -829,7 +765,7 @@ void AbstractAudioContext::handlePreRenderTasks() |
} |
} |
-void AbstractAudioContext::handlePostRenderTasks() |
+void BaseAudioContext::handlePostRenderTasks() |
{ |
ASSERT(isAudioThread()); |
@@ -850,7 +786,7 @@ void AbstractAudioContext::handlePostRenderTasks() |
} |
} |
-void AbstractAudioContext::resolvePromisesForResumeOnMainThread() |
+void BaseAudioContext::resolvePromisesForResumeOnMainThread() |
{ |
ASSERT(isMainThread()); |
AutoLocker locker(this); |
@@ -868,9 +804,9 @@ void AbstractAudioContext::resolvePromisesForResumeOnMainThread() |
m_isResolvingResumePromises = false; |
} |
-void AbstractAudioContext::resolvePromisesForResume() |
+void BaseAudioContext::resolvePromisesForResume() |
{ |
- // This runs inside the AbstractAudioContext's lock when handling pre-render tasks. |
+ // This runs inside the BaseAudioContext's lock when handling pre-render tasks. |
ASSERT(isAudioThread()); |
ASSERT(isGraphOwner()); |
@@ -879,11 +815,11 @@ void AbstractAudioContext::resolvePromisesForResume() |
// promises in the main thread. |
if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) { |
m_isResolvingResumePromises = true; |
- Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FROM_HERE, threadSafeBind(&AbstractAudioContext::resolvePromisesForResumeOnMainThread, this)); |
+ Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FROM_HERE, threadSafeBind(&BaseAudioContext::resolvePromisesForResumeOnMainThread, this)); |
} |
} |
-void AbstractAudioContext::rejectPendingResolvers() |
+void BaseAudioContext::rejectPendingResolvers() |
{ |
ASSERT(isMainThread()); |
@@ -901,17 +837,17 @@ void AbstractAudioContext::rejectPendingResolvers() |
m_decodeAudioResolvers.clear(); |
} |
-const AtomicString& AbstractAudioContext::interfaceName() const |
+const AtomicString& BaseAudioContext::interfaceName() const |
{ |
return EventTargetNames::AudioContext; |
} |
-ExecutionContext* AbstractAudioContext::getExecutionContext() const |
+ExecutionContext* BaseAudioContext::getExecutionContext() const |
{ |
return ActiveDOMObject::getExecutionContext(); |
} |
-void AbstractAudioContext::startRendering() |
+void BaseAudioContext::startRendering() |
{ |
// This is called for both online and offline contexts. |
ASSERT(isMainThread()); |
@@ -923,11 +859,11 @@ void AbstractAudioContext::startRendering() |
} |
} |
-DEFINE_TRACE(AbstractAudioContext) |
+DEFINE_TRACE(BaseAudioContext) |
{ |
visitor->trace(m_destinationNode); |
visitor->trace(m_listener); |
- // trace() can be called in AbstractAudioContext constructor, and |
+ // trace() can be called in BaseAudioContext constructor, and |
// m_contextGraphMutex might be unavailable. |
if (m_didInitializeContextGraphMutex) { |
AutoLocker lock(this); |
@@ -942,11 +878,11 @@ DEFINE_TRACE(AbstractAudioContext) |
visitor->trace(m_periodicWaveSquare); |
visitor->trace(m_periodicWaveSawtooth); |
visitor->trace(m_periodicWaveTriangle); |
- RefCountedGarbageCollectedEventTargetWithInlineData<AbstractAudioContext>::trace(visitor); |
+ RefCountedGarbageCollectedEventTargetWithInlineData<BaseAudioContext>::trace(visitor); |
ActiveDOMObject::trace(visitor); |
} |
-SecurityOrigin* AbstractAudioContext::getSecurityOrigin() const |
+SecurityOrigin* BaseAudioContext::getSecurityOrigin() const |
{ |
if (getExecutionContext()) |
return getExecutionContext()->getSecurityOrigin(); |