Index: Source/modules/webaudio/AudioNode.h |
diff --git a/Source/modules/webaudio/AudioNode.h b/Source/modules/webaudio/AudioNode.h |
index 8099081df79065a1519bcd8d92b0d7612e1a3528..e7801e20652ba73d76bd22de5deeff4fc4f4773f 100644 |
--- a/Source/modules/webaudio/AudioNode.h |
+++ b/Source/modules/webaudio/AudioNode.h |
@@ -26,6 +26,7 @@ |
#define AudioNode_h |
#include "AudioBus.h" |
+#include "EventTarget.h" |
#include <wtf/Forward.h> |
#include <wtf/OwnPtr.h> |
#include <wtf/PassOwnPtr.h> |
@@ -49,7 +50,7 @@ typedef int ExceptionCode; |
// An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware. |
// Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible. |
-class AudioNode { |
+class AudioNode : public EventTarget { |
public: |
enum { ProcessingSizeInFrames = 128 }; |
@@ -152,7 +153,7 @@ public: |
// tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input. |
virtual double tailTime() const = 0; |
// latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to |
- // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For |
+ // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For |
// example, a "delay" effect is expected to delay the signal, and thus would not be considered latency. |
virtual double latencyTime() const = 0; |
@@ -180,11 +181,17 @@ public: |
ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; } |
AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; } |
+ // EventTarget |
+ virtual const AtomicString& interfaceName() const OVERRIDE; |
+ virtual ScriptExecutionContext* scriptExecutionContext() const OVERRIDE; |
+ virtual EventTargetData* eventTargetData() OVERRIDE { return &m_eventTargetData; } |
+ virtual EventTargetData* ensureEventTargetData() OVERRIDE { return &m_eventTargetData; } |
+ |
protected: |
// Inputs and outputs must be created before the AudioNode is initialized. |
void addInput(PassOwnPtr<AudioNodeInput>); |
void addOutput(PassOwnPtr<AudioNodeOutput>); |
- |
+ |
// Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process. |
// Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called. |
// Called from context's audio thread. |
@@ -201,13 +208,15 @@ private: |
Vector<OwnPtr<AudioNodeInput> > m_inputs; |
Vector<OwnPtr<AudioNodeOutput> > m_outputs; |
+ EventTargetData m_eventTargetData; |
+ |
double m_lastProcessingTime; |
double m_lastNonSilentTime; |
// Ref-counting |
volatile int m_normalRefCount; |
volatile int m_connectionRefCount; |
- |
+ |
bool m_isMarkedForDeletion; |
bool m_isDisabled; |
@@ -216,7 +225,11 @@ private: |
static int s_nodeCount[NodeTypeEnd]; |
#endif |
+ virtual void refEventTarget() OVERRIDE { ref(); } |
+ virtual void derefEventTarget() OVERRIDE { deref(); } |
+ |
protected: |
+ |
Chris Rogers
2013/04/16 21:03:03
extra blank line?
|
unsigned m_channelCount; |
ChannelCountMode m_channelCountMode; |
AudioBus::ChannelInterpretation m_channelInterpretation; |