Index: Source/modules/webaudio/AudioContext.h |
diff --git a/Source/modules/webaudio/AudioContext.h b/Source/modules/webaudio/AudioContext.h |
index 415e2358bcdb8cdd6bdd9d9cb4906ce94afa114b..d946f1649d1db18234728023ade3ee70e970c94a 100644 |
--- a/Source/modules/webaudio/AudioContext.h |
+++ b/Source/modules/webaudio/AudioContext.h |
@@ -29,6 +29,7 @@ |
#include "core/dom/ActiveDOMObject.h" |
#include "core/events/EventListener.h" |
#include "core/events/EventTarget.h" |
+#include "heap/Handle.h" |
#include "platform/audio/AudioBus.h" |
#include "modules/webaudio/AsyncAudioDecoder.h" |
#include "modules/webaudio/AudioDestinationNode.h" |
@@ -73,17 +74,19 @@ class WaveShaperNode; |
// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it. |
// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. |
-class AudioContext : public ActiveDOMObject, public ScriptWrappable, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData { |
- DEFINE_EVENT_TARGET_REFCOUNTING(ThreadSafeRefCounted<AudioContext>); |
+class AudioContext : public ActiveDOMObject, public ScriptWrappable, public ThreadSafeRefCountedWillBeRefCountedGarbageCollected<AudioContext>, public EventTargetWithInlineData { |
+ DEFINE_EVENT_TARGET_REFCOUNTING(ThreadSafeRefCountedWillBeRefCountedGarbageCollected<AudioContext>); |
public: |
// Create an AudioContext for rendering to the audio hardware. |
- static PassRefPtr<AudioContext> create(Document&, ExceptionState&); |
+ static PassRefPtrWillBeRawPtr<AudioContext> create(Document&, ExceptionState&); |
// Deprecated: create an AudioContext for offline (non-realtime) rendering. |
- static PassRefPtr<AudioContext> create(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); |
+ static PassRefPtrWillBeRawPtr<AudioContext> create(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); |
virtual ~AudioContext(); |
+ virtual void trace(Visitor*); |
+ |
bool isInitialized() const; |
bool isOfflineContext() { return m_isOfflineContext; } |
@@ -100,8 +103,8 @@ public: |
void incrementActiveSourceCount(); |
void decrementActiveSourceCount(); |
- PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); |
- PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionState&); |
+ PassRefPtrWillBeRawPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&); |
+ PassRefPtrWillBeRawPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionState&); |
// Asynchronous audio file data decoding. |
void decodeAudioData(ArrayBuffer*, PassOwnPtr<AudioBufferCallback>, PassOwnPtr<AudioBufferCallback>, ExceptionState&); |
@@ -131,7 +134,7 @@ public: |
PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionState&); |
PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionState&); |
PassRefPtr<OscillatorNode> createOscillator(); |
- PassRefPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&); |
+ PassRefPtrWillBeRawPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&); |
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it. |
void notifyNodeFinishedProcessing(AudioNode*); |
@@ -271,7 +274,7 @@ private: |
void derefUnfinishedSourceNodes(); |
RefPtr<AudioDestinationNode> m_destinationNode; |
- RefPtr<AudioListener> m_listener; |
+ RefPtrWillBeMember<AudioListener> m_listener; |
// Only accessed in the audio thread. |
Vector<AudioNode*> m_finishedNodes; |
@@ -315,7 +318,7 @@ private: |
// Only accessed in the audio thread. |
Vector<AudioNode*> m_deferredFinishDerefList; |
- RefPtr<AudioBuffer> m_renderTarget; |
+ RefPtrWillBeMember<AudioBuffer> m_renderTarget; |
bool m_isOfflineContext; |