OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
11 * documentation and/or other materials provided with the distribution. | 11 * documentation and/or other materials provided with the distribution. |
12 * | 12 * |
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y | 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y |
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y | 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y |
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N | 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N |
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
23 */ | 23 */ |
24 | 24 |
25 #ifndef AudioNode_h | 25 #ifndef AudioNode_h |
26 #define AudioNode_h | 26 #define AudioNode_h |
27 | 27 |
28 #include "bindings/v8/ScriptWrappable.h" | 28 #include "bindings/v8/ScriptWrappable.h" |
| 29 #include "core/dom/EventTarget.h" |
29 #include "core/platform/audio/AudioBus.h" | 30 #include "core/platform/audio/AudioBus.h" |
30 #include "wtf/Forward.h" | 31 #include "wtf/Forward.h" |
31 #include "wtf/OwnPtr.h" | 32 #include "wtf/OwnPtr.h" |
32 #include "wtf/PassOwnPtr.h" | 33 #include "wtf/PassOwnPtr.h" |
33 #include "wtf/RefPtr.h" | 34 #include "wtf/RefPtr.h" |
34 #include "wtf/Vector.h" | 35 #include "wtf/Vector.h" |
35 | 36 |
36 #define DEBUG_AUDIONODE_REFERENCES 0 | 37 #define DEBUG_AUDIONODE_REFERENCES 0 |
37 | 38 |
38 namespace WebCore { | 39 namespace WebCore { |
39 | 40 |
40 class AudioContext; | 41 class AudioContext; |
41 class AudioNodeInput; | 42 class AudioNodeInput; |
42 class AudioNodeOutput; | 43 class AudioNodeOutput; |
43 class AudioParam; | 44 class AudioParam; |
44 | 45 |
45 typedef int ExceptionCode; | 46 typedef int ExceptionCode; |
46 | 47 |
47 // An AudioNode is the basic building block for handling audio within an AudioCo
ntext. | 48 // An AudioNode is the basic building block for handling audio within an AudioCo
ntext. |
48 // It may be an audio source, an intermediate processing module, or an audio des
tination. | 49 // It may be an audio source, an intermediate processing module, or an audio des
tination. |
49 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inpu
ts and a single output. | 50 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inpu
ts and a single output. |
50 // An AudioDestinationNode has one input and no outputs and represents the final
destination to the audio hardware. | 51 // An AudioDestinationNode has one input and no outputs and represents the final
destination to the audio hardware. |
51 // Most processing nodes such as filters will have one input and one output, alt
hough multiple inputs and outputs are possible. | 52 // Most processing nodes such as filters will have one input and one output, alt
hough multiple inputs and outputs are possible. |
52 | 53 |
53 class AudioNode : public ScriptWrappable { | 54 class AudioNode : public ScriptWrappable, public EventTarget { |
54 public: | 55 public: |
55 enum { ProcessingSizeInFrames = 128 }; | 56 enum { ProcessingSizeInFrames = 128 }; |
56 | 57 |
57 AudioNode(AudioContext*, float sampleRate); | 58 AudioNode(AudioContext*, float sampleRate); |
58 virtual ~AudioNode(); | 59 virtual ~AudioNode(); |
59 | 60 |
60 AudioContext* context() { return m_context.get(); } | 61 AudioContext* context() { return m_context.get(); } |
61 const AudioContext* context() const { return m_context.get(); } | 62 const AudioContext* context() const { return m_context.get(); } |
62 | 63 |
63 enum NodeType { | 64 enum NodeType { |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
146 | 147 |
147 #if DEBUG_AUDIONODE_REFERENCES | 148 #if DEBUG_AUDIONODE_REFERENCES |
148 static void printNodeCounts(); | 149 static void printNodeCounts(); |
149 #endif | 150 #endif |
150 | 151 |
151 bool isMarkedForDeletion() const { return m_isMarkedForDeletion; } | 152 bool isMarkedForDeletion() const { return m_isMarkedForDeletion; } |
152 | 153 |
153 // tailTime() is the length of time (not counting latency time) where non-ze
ro output may occur after continuous silent input. | 154 // tailTime() is the length of time (not counting latency time) where non-ze
ro output may occur after continuous silent input. |
154 virtual double tailTime() const = 0; | 155 virtual double tailTime() const = 0; |
155 // latencyTime() is the length of time it takes for non-zero output to appea
r after non-zero input is provided. This only applies to | 156 // latencyTime() is the length of time it takes for non-zero output to appea
r after non-zero input is provided. This only applies to |
156 // processing delay which is an artifact of the processing algorithm chosen
and is *not* part of the intrinsic desired effect. For | 157 // processing delay which is an artifact of the processing algorithm chosen
and is *not* part of the intrinsic desired effect. For |
157 // example, a "delay" effect is expected to delay the signal, and thus would
not be considered latency. | 158 // example, a "delay" effect is expected to delay the signal, and thus would
not be considered latency. |
158 virtual double latencyTime() const = 0; | 159 virtual double latencyTime() const = 0; |
159 | 160 |
160 // propagatesSilence() should return true if the node will generate silent o
utput when given silent input. By default, AudioNode | 161 // propagatesSilence() should return true if the node will generate silent o
utput when given silent input. By default, AudioNode |
161 // will take tailTime() and latencyTime() into account when determining whet
her the node will propagate silence. | 162 // will take tailTime() and latencyTime() into account when determining whet
her the node will propagate silence. |
162 virtual bool propagatesSilence() const; | 163 virtual bool propagatesSilence() const; |
163 bool inputsAreSilent(); | 164 bool inputsAreSilent(); |
164 void silenceOutputs(); | 165 void silenceOutputs(); |
165 void unsilenceOutputs(); | 166 void unsilenceOutputs(); |
166 | 167 |
167 void enableOutputsIfNecessary(); | 168 void enableOutputsIfNecessary(); |
168 void disableOutputsIfNecessary(); | 169 void disableOutputsIfNecessary(); |
169 | 170 |
170 void reportMemoryUsage(MemoryObjectInfo*) const; | 171 void reportMemoryUsage(MemoryObjectInfo*) const; |
171 | 172 |
172 unsigned long channelCount(); | 173 unsigned long channelCount(); |
173 virtual void setChannelCount(unsigned long, ExceptionCode&); | 174 virtual void setChannelCount(unsigned long, ExceptionCode&); |
174 | 175 |
175 String channelCountMode(); | 176 String channelCountMode(); |
176 void setChannelCountMode(const String&, ExceptionCode&); | 177 void setChannelCountMode(const String&, ExceptionCode&); |
177 | 178 |
178 String channelInterpretation(); | 179 String channelInterpretation(); |
179 void setChannelInterpretation(const String&, ExceptionCode&); | 180 void setChannelInterpretation(const String&, ExceptionCode&); |
180 | 181 |
181 ChannelCountMode internalChannelCountMode() const { return m_channelCountMod
e; } | 182 ChannelCountMode internalChannelCountMode() const { return m_channelCountMod
e; } |
182 AudioBus::ChannelInterpretation internalChannelInterpretation() const { retu
rn m_channelInterpretation; } | 183 AudioBus::ChannelInterpretation internalChannelInterpretation() const { retu
rn m_channelInterpretation; } |
183 | 184 |
| 185 // EventTarget |
| 186 virtual const AtomicString& interfaceName() const OVERRIDE; |
| 187 virtual ScriptExecutionContext* scriptExecutionContext() const OVERRIDE; |
| 188 virtual EventTargetData* eventTargetData() OVERRIDE { return &m_eventTargetD
ata; } |
| 189 virtual EventTargetData* ensureEventTargetData() OVERRIDE { return &m_eventT
argetData; } |
| 190 |
184 protected: | 191 protected: |
185 // Inputs and outputs must be created before the AudioNode is initialized. | 192 // Inputs and outputs must be created before the AudioNode is initialized. |
186 void addInput(PassOwnPtr<AudioNodeInput>); | 193 void addInput(PassOwnPtr<AudioNodeInput>); |
187 void addOutput(PassOwnPtr<AudioNodeOutput>); | 194 void addOutput(PassOwnPtr<AudioNodeOutput>); |
188 | 195 |
189 // Called by processIfNecessary() to cause all parts of the rendering graph
connected to us to process. | 196 // Called by processIfNecessary() to cause all parts of the rendering graph
connected to us to process. |
190 // Each rendering quantum, the audio data for each of the AudioNode's inputs
will be available after this method is called. | 197 // Each rendering quantum, the audio data for each of the AudioNode's inputs
will be available after this method is called. |
191 // Called from context's audio thread. | 198 // Called from context's audio thread. |
192 virtual void pullInputs(size_t framesToProcess); | 199 virtual void pullInputs(size_t framesToProcess); |
193 | 200 |
194 // Force all inputs to take any channel interpretation changes into account. | 201 // Force all inputs to take any channel interpretation changes into account. |
195 void updateChannelsForInputs(); | 202 void updateChannelsForInputs(); |
196 | 203 |
197 private: | 204 private: |
198 volatile bool m_isInitialized; | 205 volatile bool m_isInitialized; |
199 NodeType m_nodeType; | 206 NodeType m_nodeType; |
200 RefPtr<AudioContext> m_context; | 207 RefPtr<AudioContext> m_context; |
201 float m_sampleRate; | 208 float m_sampleRate; |
202 Vector<OwnPtr<AudioNodeInput> > m_inputs; | 209 Vector<OwnPtr<AudioNodeInput> > m_inputs; |
203 Vector<OwnPtr<AudioNodeOutput> > m_outputs; | 210 Vector<OwnPtr<AudioNodeOutput> > m_outputs; |
204 | 211 |
| 212 EventTargetData m_eventTargetData; |
| 213 |
205 double m_lastProcessingTime; | 214 double m_lastProcessingTime; |
206 double m_lastNonSilentTime; | 215 double m_lastNonSilentTime; |
207 | 216 |
208 // Ref-counting | 217 // Ref-counting |
209 volatile int m_normalRefCount; | 218 volatile int m_normalRefCount; |
210 volatile int m_connectionRefCount; | 219 volatile int m_connectionRefCount; |
211 | 220 |
212 bool m_isMarkedForDeletion; | 221 bool m_isMarkedForDeletion; |
213 bool m_isDisabled; | 222 bool m_isDisabled; |
214 | 223 |
215 #if DEBUG_AUDIONODE_REFERENCES | 224 #if DEBUG_AUDIONODE_REFERENCES |
216 static bool s_isNodeCountInitialized; | 225 static bool s_isNodeCountInitialized; |
217 static int s_nodeCount[NodeTypeEnd]; | 226 static int s_nodeCount[NodeTypeEnd]; |
218 #endif | 227 #endif |
219 | 228 |
| 229 virtual void refEventTarget() OVERRIDE { ref(); } |
| 230 virtual void derefEventTarget() OVERRIDE { deref(); } |
| 231 |
220 protected: | 232 protected: |
221 unsigned m_channelCount; | 233 unsigned m_channelCount; |
222 ChannelCountMode m_channelCountMode; | 234 ChannelCountMode m_channelCountMode; |
223 AudioBus::ChannelInterpretation m_channelInterpretation; | 235 AudioBus::ChannelInterpretation m_channelInterpretation; |
224 }; | 236 }; |
225 | 237 |
226 } // namespace WebCore | 238 } // namespace WebCore |
227 | 239 |
228 #endif // AudioNode_h | 240 #endif // AudioNode_h |
OLD | NEW |