Chromium Code Reviews| Index: Source/modules/webaudio/AudioContext.cpp |
| diff --git a/Source/modules/webaudio/AudioContext.cpp b/Source/modules/webaudio/AudioContext.cpp |
| index ced806dde9a14e7f74b3ac1b9e838d79ed01f252..d111200e4ce52b2608c654364f218f213116fbcf 100644 |
| --- a/Source/modules/webaudio/AudioContext.cpp |
| +++ b/Source/modules/webaudio/AudioContext.cpp |
| @@ -89,7 +89,7 @@ bool AudioContext::isSampleRateRangeGood(float sampleRate) |
| const unsigned MaxHardwareContexts = 6; |
| unsigned AudioContext::s_hardwareContextCount = 0; |
| -PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| if (s_hardwareContextCount >= MaxHardwareContexts) { |
| @@ -99,12 +99,12 @@ PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState |
| return nullptr; |
| } |
| - RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); |
| + RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document))); |
| audioContext->suspendIfNeeded(); |
| return audioContext.release(); |
| } |
| -PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
| { |
| document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead"); |
| return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, exceptionState); |
| @@ -279,9 +279,9 @@ bool AudioContext::hasPendingActivity() const |
| return !m_isCleared; |
| } |
| -PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) |
| { |
| - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); |
| + RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); |
| if (!audioBuffer.get()) { |
| if (numberOfChannels > AudioContext::maxNumberOfChannels()) { |
| exceptionState.throwDOMException( |
| @@ -311,7 +311,7 @@ PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si |
| return audioBuffer; |
| } |
| -PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState) |
| { |
| ASSERT(arrayBuffer); |
| if (!arrayBuffer) { |
| @@ -321,7 +321,7 @@ PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo |
| return nullptr; |
| } |
| - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
| + RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
| if (!audioBuffer.get()) { |
| exceptionState.throwDOMException( |
| SyntaxError, |
| @@ -343,11 +343,11 @@ void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe |
| m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback); |
| } |
| -PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| +PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate()); |
| + RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate()); |
| // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. |
| // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing(). |
| @@ -356,7 +356,7 @@ PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| return node; |
| } |
| -PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
| { |
| if (!mediaElement) { |
| exceptionState.throwDOMException( |
| @@ -376,7 +376,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H |
| return nullptr; |
| } |
| - RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement); |
| + RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement); |
| mediaElement->setAudioSourceNode(node.get()); |
| @@ -384,7 +384,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H |
| return node; |
| } |
| -PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
| { |
| if (!mediaStream) { |
| exceptionState.throwDOMException( |
| @@ -407,7 +407,7 @@ PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med |
| // Use the first audio track in the media stream. |
| RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; |
| OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); |
| - RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release()); |
| + RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release()); |
| // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. |
| node->setFormat(2, sampleRate()); |
| @@ -416,35 +416,35 @@ PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med |
| return node; |
| } |
| -PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination() |
| +PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination() |
| { |
| // Set number of output channels to stereo by default. |
| return MediaStreamAudioDestinationNode::create(this, 2); |
| } |
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState) |
| { |
| // Set number of input/output channels to stereo by default. |
| return createScriptProcessor(0, 2, 2, exceptionState); |
| } |
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState) |
| { |
| // Set number of input/output channels to stereo by default. |
| return createScriptProcessor(bufferSize, 2, 2, exceptionState); |
| } |
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
| { |
| // Set number of output channels to stereo by default. |
| return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState); |
| } |
| -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); |
| + RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); |
| if (!node.get()) { |
| if (!numberOfInputChannels && !numberOfOutputChannels) { |
| @@ -476,83 +476,83 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe |
| return node; |
| } |
| -PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| +PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() |
| +PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return WaveShaperNode::create(this); |
| } |
| -PassRefPtr<PannerNode> AudioContext::createPanner() |
| +PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return PannerNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<ConvolverNode> AudioContext::createConvolver() |
| +PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return ConvolverNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() |
| +PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<AnalyserNode> AudioContext::createAnalyser() |
| +PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return AnalyserNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<GainNode> AudioContext::createGain() |
| +PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| return GainNode::create(this, m_destinationNode->sampleRate()); |
| } |
| -PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) |
| { |
| const double defaultMaxDelayTime = 1; |
| return createDelay(defaultMaxDelayTime, exceptionState); |
| } |
| -PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState); |
| + RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState); |
| if (exceptionState.hadException()) |
| return nullptr; |
| return node; |
| } |
| -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState) |
| { |
| const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
| return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState); |
| } |
| -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs); |
| + RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs); |
| if (!node.get()) { |
| exceptionState.throwDOMException( |
| @@ -566,18 +566,18 @@ PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe |
| return node; |
| } |
| -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState) |
| { |
| const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
| return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState); |
| } |
| -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs); |
| + RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs); |
| if (!node.get()) { |
| exceptionState.throwDOMException( |
| @@ -591,12 +591,12 @@ PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI |
| return node; |
| } |
| -PassRefPtr<OscillatorNode> AudioContext::createOscillator() |
| +PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator() |
| { |
| ASSERT(isMainThread()); |
| lazyInitialize(); |
| - RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate()); |
| + RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate()); |
| // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. |
| // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing(). |
| @@ -605,7 +605,7 @@ PassRefPtr<OscillatorNode> AudioContext::createOscillator() |
| return node; |
| } |
| -PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState) |
| +PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState) |
| { |
| ASSERT(isMainThread()); |
| @@ -888,7 +888,7 @@ void AudioContext::deleteMarkedNodes() |
| ASSERT(isMainThread()); |
| // Protect this object from being deleted before we release the mutex locked by AutoLocker. |
| - RefPtr<AudioContext> protect(this); |
| + RefPtrWillBeRawPtr<AudioContext> protect(this); |
| { |
| AutoLocker locker(this); |
| @@ -905,9 +905,11 @@ void AudioContext::deleteMarkedNodes() |
| unsigned numberOfOutputs = node->numberOfOutputs(); |
| for (unsigned i = 0; i < numberOfOutputs; ++i) |
| m_dirtyAudioNodeOutputs.remove(node->output(i)); |
| - |
| - // Finally, delete it. |
| - delete node; |
| +#if ENABLE(OILPAN) |
| + // Finally, clear the keep alive handle that keeps this |
| + // object from being collected. |
| + node->clearKeepAlive(); |
| +#endif |
|
Mads Ager (chromium)
2014/03/27 11:06:49
You need to keep the code for the non-oilpan versi
haraken
2014/03/27 11:44:05
You'll need to call 'delete node' in #if !ENABLE(O
keishi
2014/04/03 06:53:19
Done.
|
| } |
| m_isDeletionScheduled = false; |
| } |
| @@ -1042,6 +1044,12 @@ void AudioContext::decrementActiveSourceCount() |
| atomicDecrement(&m_activeSourceCount); |
| } |
| +void AudioContext::trace(Visitor* visitor) |
| +{ |
| + visitor->trace(m_renderTarget); |
| + visitor->trace(m_listener); |
| +} |
| + |
| } // namespace WebCore |
| #endif // ENABLE(WEB_AUDIO) |