| Index: Source/modules/webaudio/AudioContext.cpp
|
| diff --git a/Source/modules/webaudio/AudioContext.cpp b/Source/modules/webaudio/AudioContext.cpp
|
| index 78da508a89fc7f9150eaa98e17ec664e4183795c..869bb69ac6af1f63cf0c2a453307a4e6a543e7e8 100644
|
| --- a/Source/modules/webaudio/AudioContext.cpp
|
| +++ b/Source/modules/webaudio/AudioContext.cpp
|
| @@ -89,7 +89,7 @@ bool AudioContext::isSampleRateRangeGood(float sampleRate)
|
| const unsigned MaxHardwareContexts = 6;
|
| unsigned AudioContext::s_hardwareContextCount = 0;
|
|
|
| -PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| if (s_hardwareContextCount >= MaxHardwareContexts) {
|
| @@ -99,12 +99,12 @@ PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState
|
| return nullptr;
|
| }
|
|
|
| - RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
|
| + RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
|
| audioContext->suspendIfNeeded();
|
| return audioContext.release();
|
| }
|
|
|
| -PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
|
| {
|
| document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
|
| return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, exceptionState);
|
| @@ -281,9 +281,9 @@ bool AudioContext::hasPendingActivity() const
|
| return !m_isCleared;
|
| }
|
|
|
| -PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
|
| {
|
| - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
|
| + RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
|
| if (!audioBuffer.get()) {
|
| if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
|
| exceptionState.throwDOMException(
|
| @@ -313,7 +313,7 @@ PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
|
| return audioBuffer;
|
| }
|
|
|
| -PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState)
|
| {
|
| ASSERT(arrayBuffer);
|
| if (!arrayBuffer) {
|
| @@ -323,7 +323,7 @@ PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
|
| return nullptr;
|
| }
|
|
|
| - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
|
| + RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
|
| if (!audioBuffer.get()) {
|
| exceptionState.throwDOMException(
|
| SyntaxError,
|
| @@ -345,10 +345,10 @@ void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
|
| m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
|
| }
|
|
|
| -PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
|
| +PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
|
| + RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
|
|
|
| // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
|
| // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
|
| @@ -357,7 +357,7 @@ PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
|
| return node;
|
| }
|
|
|
| -PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| if (!mediaElement) {
|
| @@ -375,7 +375,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
|
| return nullptr;
|
| }
|
|
|
| - RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
|
| + RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
|
|
|
| mediaElement->setAudioSourceNode(node.get());
|
|
|
| @@ -383,7 +383,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
|
| return node;
|
| }
|
|
|
| -PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| if (!mediaStream) {
|
| @@ -404,7 +404,7 @@ PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
|
| // Use the first audio track in the media stream.
|
| RefPtr<MediaStreamTrack> audioTrack = audioTracks[0];
|
| OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
|
| - RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
|
| + RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
|
|
|
| // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
|
| node->setFormat(2, sampleRate());
|
| @@ -413,34 +413,34 @@ PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
|
| return node;
|
| }
|
|
|
| -PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
|
| +PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
|
| {
|
| // Set number of output channels to stereo by default.
|
| return MediaStreamAudioDestinationNode::create(this, 2);
|
| }
|
|
|
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
|
| {
|
| // Set number of input/output channels to stereo by default.
|
| return createScriptProcessor(0, 2, 2, exceptionState);
|
| }
|
|
|
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
|
| {
|
| // Set number of input/output channels to stereo by default.
|
| return createScriptProcessor(bufferSize, 2, 2, exceptionState);
|
| }
|
|
|
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
|
| {
|
| // Set number of output channels to stereo by default.
|
| return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
|
| }
|
|
|
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
|
| + RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
|
|
|
| if (!node.get()) {
|
| if (!numberOfInputChannels && !numberOfOutputChannels) {
|
| @@ -472,73 +472,74 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
|
| return node;
|
| }
|
|
|
| -PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
|
| +PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
|
| {
|
| ASSERT(isMainThread());
|
| return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
|
| +PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
|
| {
|
| ASSERT(isMainThread());
|
| return WaveShaperNode::create(this);
|
| }
|
|
|
| -PassRefPtr<PannerNode> AudioContext::createPanner()
|
| +PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
|
| {
|
| ASSERT(isMainThread());
|
| return PannerNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<ConvolverNode> AudioContext::createConvolver()
|
| +PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
|
| {
|
| ASSERT(isMainThread());
|
| return ConvolverNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
|
| +PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
|
| {
|
| ASSERT(isMainThread());
|
| return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
|
| +PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
|
| {
|
| ASSERT(isMainThread());
|
| return AnalyserNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<GainNode> AudioContext::createGain()
|
| +PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
|
| {
|
| ASSERT(isMainThread());
|
| return GainNode::create(this, m_destinationNode->sampleRate());
|
| }
|
|
|
| -PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
|
| {
|
| const double defaultMaxDelayTime = 1;
|
| return createDelay(defaultMaxDelayTime, exceptionState);
|
| }
|
|
|
| -PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
|
| + RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
|
| if (exceptionState.hadException())
|
| return nullptr;
|
| return node;
|
| }
|
|
|
| -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
|
| {
|
| const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
|
| return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
|
| }
|
|
|
| -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
|
| +
|
| + RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
|
|
|
| if (!node.get()) {
|
| exceptionState.throwDOMException(
|
| @@ -552,16 +553,17 @@ PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
|
| return node;
|
| }
|
|
|
| -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
|
| {
|
| const unsigned ChannelMergerDefaultNumberOfInputs = 6;
|
| return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
|
| }
|
|
|
| -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
|
| +
|
| + RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
|
|
|
| if (!node.get()) {
|
| exceptionState.throwDOMException(
|
| @@ -575,10 +577,11 @@ PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
|
| return node;
|
| }
|
|
|
| -PassRefPtr<OscillatorNode> AudioContext::createOscillator()
|
| +PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
|
| {
|
| ASSERT(isMainThread());
|
| - RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
|
| +
|
| + RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
|
|
|
| // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
|
| // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
|
| @@ -587,7 +590,7 @@ PassRefPtr<OscillatorNode> AudioContext::createOscillator()
|
| return node;
|
| }
|
|
|
| -PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
|
| +PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
|
| {
|
| ASSERT(isMainThread());
|
|
|
| @@ -869,7 +872,7 @@ void AudioContext::deleteMarkedNodes()
|
| ASSERT(isMainThread());
|
|
|
| // Protect this object from being deleted before we release the mutex locked by AutoLocker.
|
| - RefPtr<AudioContext> protect(this);
|
| + RefPtrWillBeRawPtr<AudioContext> protect(this);
|
| {
|
| AutoLocker locker(this);
|
|
|
| @@ -886,9 +889,14 @@ void AudioContext::deleteMarkedNodes()
|
| unsigned numberOfOutputs = node->numberOfOutputs();
|
| for (unsigned i = 0; i < numberOfOutputs; ++i)
|
| m_dirtyAudioNodeOutputs.remove(node->output(i));
|
| -
|
| +#if ENABLE(OILPAN)
|
| + // Finally, clear the keep alive handle that keeps this
|
| + // object from being collected.
|
| + node->clearKeepAlive();
|
| +#else
|
| // Finally, delete it.
|
| delete node;
|
| +#endif
|
| }
|
| m_isDeletionScheduled = false;
|
| }
|
| @@ -1023,6 +1031,13 @@ void AudioContext::decrementActiveSourceCount()
|
| atomicDecrement(&m_activeSourceCount);
|
| }
|
|
|
| +void AudioContext::trace(Visitor* visitor)
|
| +{
|
| + visitor->trace(m_renderTarget);
|
| + visitor->trace(m_destinationNode);
|
| + visitor->trace(m_listener);
|
| +}
|
| +
|
| } // namespace WebCore
|
|
|
| #endif // ENABLE(WEB_AUDIO)
|
|
|