Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(106)

Side by Side Diff: Source/modules/webaudio/AudioContext.cpp

Issue 438293003: Enable Oilpan by default for webaudio/ (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/modules/webaudio/AudioContext.h ('k') | Source/modules/webaudio/AudioContext.idl » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 { 82 {
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz, 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there. 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000; 85 return sampleRate >= 44100 && sampleRate <= 96000;
86 } 86 }
87 87
88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware. 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware.
89 const unsigned MaxHardwareContexts = 6; 89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0; 90 unsigned AudioContext::s_hardwareContextCount = 0;
91 91
92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex ceptionState& exceptionState) 92 AudioContext* AudioContext::create(Document& document, ExceptionState& exception State)
93 { 93 {
94 ASSERT(isMainThread()); 94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) { 95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException( 96 exceptionState.throwDOMException(
97 SyntaxError, 97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ")."); 98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ").");
99 return nullptr; 99 return 0;
100 } 100 }
101 101
102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCou ntedGarbageCollected(new AudioContext(&document))); 102 AudioContext* audioContext = adoptRefCountedGarbageCollected(new AudioContex t(&document));
103 audioContext->suspendIfNeeded(); 103 audioContext->suspendIfNeeded();
104 return audioContext.release(); 104 return audioContext;
105 } 105 }
106 106
107 // Constructor for rendering to the audio hardware. 107 // Constructor for rendering to the audio hardware.
108 AudioContext::AudioContext(Document* document) 108 AudioContext::AudioContext(Document* document)
109 : ActiveDOMObject(document) 109 : ActiveDOMObject(document)
110 , m_isStopScheduled(false) 110 , m_isStopScheduled(false)
111 , m_isCleared(false) 111 , m_isCleared(false)
112 , m_isInitialized(false) 112 , m_isInitialized(false)
113 , m_destinationNode(nullptr) 113 , m_destinationNode(nullptr)
114 #if !ENABLE(OILPAN)
115 , m_isDeletionScheduled(false)
116 #endif
117 , m_automaticPullNodesNeedUpdating(false) 114 , m_automaticPullNodesNeedUpdating(false)
118 , m_connectionCount(0) 115 , m_connectionCount(0)
119 , m_audioThread(0) 116 , m_audioThread(0)
120 , m_graphOwnerThread(UndefinedThreadIdentifier) 117 , m_graphOwnerThread(UndefinedThreadIdentifier)
121 , m_isOfflineContext(false) 118 , m_isOfflineContext(false)
122 { 119 {
123 ScriptWrappable::init(this); 120 ScriptWrappable::init(this);
124 121
125 m_destinationNode = DefaultAudioDestinationNode::create(this); 122 m_destinationNode = DefaultAudioDestinationNode::create(this);
126 123
127 initialize(); 124 initialize();
128 #if DEBUG_AUDIONODE_REFERENCES 125 #if DEBUG_AUDIONODE_REFERENCES
129 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext ::s_hardwareContextCount); 126 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext ::s_hardwareContextCount);
130 #endif 127 #endif
131 } 128 }
132 129
133 // Constructor for offline (non-realtime) rendering. 130 // Constructor for offline (non-realtime) rendering.
134 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) 131 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
135 : ActiveDOMObject(document) 132 : ActiveDOMObject(document)
136 , m_isStopScheduled(false) 133 , m_isStopScheduled(false)
137 , m_isCleared(false) 134 , m_isCleared(false)
138 , m_isInitialized(false) 135 , m_isInitialized(false)
139 , m_destinationNode(nullptr) 136 , m_destinationNode(nullptr)
140 #if !ENABLE(OILPAN)
141 , m_isDeletionScheduled(false)
142 #endif
143 , m_automaticPullNodesNeedUpdating(false) 137 , m_automaticPullNodesNeedUpdating(false)
144 , m_connectionCount(0) 138 , m_connectionCount(0)
145 , m_audioThread(0) 139 , m_audioThread(0)
146 , m_graphOwnerThread(UndefinedThreadIdentifier) 140 , m_graphOwnerThread(UndefinedThreadIdentifier)
147 , m_isOfflineContext(true) 141 , m_isOfflineContext(true)
148 { 142 {
149 ScriptWrappable::init(this); 143 ScriptWrappable::init(this);
150 144
151 // Create a new destination for offline rendering. 145 // Create a new destination for offline rendering.
152 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate); 146 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate);
153 if (m_renderTarget.get()) 147 if (m_renderTarget.get())
154 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get()); 148 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get());
155 149
156 initialize(); 150 initialize();
157 } 151 }
158 152
159 AudioContext::~AudioContext() 153 AudioContext::~AudioContext()
160 { 154 {
161 #if DEBUG_AUDIONODE_REFERENCES 155 #if DEBUG_AUDIONODE_REFERENCES
162 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this); 156 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
163 #endif 157 #endif
164 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around. 158 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around.
165 ASSERT(!m_isInitialized); 159 ASSERT(!m_isInitialized);
166 #if !ENABLE(OILPAN)
167 ASSERT(!m_nodesToDelete.size());
168 #endif
169 ASSERT(!m_referencedNodes.size()); 160 ASSERT(!m_referencedNodes.size());
170 ASSERT(!m_finishedNodes.size()); 161 ASSERT(!m_finishedNodes.size());
171 ASSERT(!m_automaticPullNodes.size()); 162 ASSERT(!m_automaticPullNodes.size());
172 if (m_automaticPullNodesNeedUpdating) 163 if (m_automaticPullNodesNeedUpdating)
173 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); 164 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
174 ASSERT(!m_renderingAutomaticPullNodes.size()); 165 ASSERT(!m_renderingAutomaticPullNodes.size());
175 } 166 }
176 167
177 void AudioContext::initialize() 168 void AudioContext::initialize()
178 { 169 {
(...skipping 14 matching lines...) Expand all
193 m_destinationNode->startRendering(); 184 m_destinationNode->startRendering();
194 ++s_hardwareContextCount; 185 ++s_hardwareContextCount;
195 } 186 }
196 187
197 m_isInitialized = true; 188 m_isInitialized = true;
198 } 189 }
199 } 190 }
200 191
201 void AudioContext::clear() 192 void AudioContext::clear()
202 { 193 {
203 #if ENABLE(OILPAN)
204 // We need to run disposers before destructing m_contextGraphMutex. 194 // We need to run disposers before destructing m_contextGraphMutex.
205 m_liveAudioSummingJunctions.clear(); 195 m_liveAudioSummingJunctions.clear();
206 m_liveNodes.clear(); 196 m_liveNodes.clear();
207 #else 197 m_destinationNode.clear();
208
209 // We have to release our reference to the destination node before the conte xt will ever be deleted since the destination node holds a reference to the cont ext.
210 if (m_destinationNode)
211 m_destinationNode.clear();
212
213 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
214 do {
215 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
216 m_nodesMarkedForDeletion.clear();
217 deleteMarkedNodes();
218 } while (m_nodesToDelete.size());
219 #endif
220
221 m_isCleared = true; 198 m_isCleared = true;
222 } 199 }
223 200
224 void AudioContext::uninitialize() 201 void AudioContext::uninitialize()
225 { 202 {
226 ASSERT(isMainThread()); 203 ASSERT(isMainThread());
227 204
228 if (!isInitialized()) 205 if (!isInitialized())
229 return; 206 return;
230 207
(...skipping 16 matching lines...) Expand all
247 { 224 {
248 // Usually ExecutionContext calls stop twice. 225 // Usually ExecutionContext calls stop twice.
249 if (m_isStopScheduled) 226 if (m_isStopScheduled)
250 return; 227 return;
251 m_isStopScheduled = true; 228 m_isStopScheduled = true;
252 229
253 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle 230 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle
254 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other 231 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
255 // ActiveDOMObjects so let's schedule uninitialize() to be called later. 232 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
256 // FIXME: see if there's a more direct way to handle this issue. 233 // FIXME: see if there's a more direct way to handle this issue.
257 callOnMainThread(bind(&AudioContext::uninitialize, PassRefPtrWillBeRawPtr<Au dioContext>(this))); 234 callOnMainThread(bind(&AudioContext::uninitialize, this));
258 } 235 }
259 236
260 bool AudioContext::hasPendingActivity() const 237 bool AudioContext::hasPendingActivity() const
261 { 238 {
262 // According to spec AudioContext must die only after page navigates. 239 // According to spec AudioContext must die only after page navigates.
263 return !m_isCleared; 240 return !m_isCleared;
264 } 241 }
265 242
266 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat e) 243 AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t number OfFrames, float sampleRate, ExceptionState& exceptionState)
267 { 244 {
268 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh annels, numberOfFrames, sampleRate, exceptionState); 245 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exc eptionState);
269
270 return audioBuffer;
271 } 246 }
272 247
273 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState) 248 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState)
274 { 249 {
275 if (!audioData) { 250 if (!audioData) {
276 exceptionState.throwDOMException( 251 exceptionState.throwDOMException(
277 SyntaxError, 252 SyntaxError,
278 "invalid ArrayBuffer for audioData."); 253 "invalid ArrayBuffer for audioData.");
279 return; 254 return;
280 } 255 }
281 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback); 256 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback);
282 } 257 }
283 258
284 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource() 259 AudioBufferSourceNode* AudioContext::createBufferSource()
285 { 260 {
286 ASSERT(isMainThread()); 261 ASSERT(isMainThread());
287 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea te(this, m_destinationNode->sampleRate()); 262 AudioBufferSourceNode* node = AudioBufferSourceNode::create(this, m_destinat ionNode->sampleRate());
288 263
289 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 264 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
290 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 265 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
291 refNode(node.get()); 266 refNode(node);
292 267
293 return node; 268 return node;
294 } 269 }
295 270
296 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) 271 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaEle ment* mediaElement, ExceptionState& exceptionState)
297 { 272 {
298 ASSERT(isMainThread()); 273 ASSERT(isMainThread());
299 if (!mediaElement) { 274 if (!mediaElement) {
300 exceptionState.throwDOMException( 275 exceptionState.throwDOMException(
301 InvalidStateError, 276 InvalidStateError,
302 "invalid HTMLMedialElement."); 277 "invalid HTMLMedialElement.");
303 return nullptr; 278 return 0;
304 } 279 }
305 280
306 // First check if this media element already has a source node. 281 // First check if this media element already has a source node.
307 if (mediaElement->audioSourceNode()) { 282 if (mediaElement->audioSourceNode()) {
308 exceptionState.throwDOMException( 283 exceptionState.throwDOMException(
309 InvalidStateError, 284 InvalidStateError,
310 "invalid HTMLMediaElement."); 285 "invalid HTMLMediaElement.");
311 return nullptr; 286 return 0;
312 } 287 }
313 288
314 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour ceNode::create(this, mediaElement); 289 MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(this , mediaElement);
315 290
316 mediaElement->setAudioSourceNode(node.get()); 291 mediaElement->setAudioSourceNode(node);
317 292
318 refNode(node.get()); // context keeps reference until node is disconnected 293 refNode(node); // context keeps reference until node is disconnected
319 return node; 294 return node;
320 } 295 }
321 296
322 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre amSource(MediaStream* mediaStream, ExceptionState& exceptionState) 297 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* m ediaStream, ExceptionState& exceptionState)
323 { 298 {
324 ASSERT(isMainThread()); 299 ASSERT(isMainThread());
325 if (!mediaStream) { 300 if (!mediaStream) {
326 exceptionState.throwDOMException( 301 exceptionState.throwDOMException(
327 InvalidStateError, 302 InvalidStateError,
328 "invalid MediaStream source"); 303 "invalid MediaStream source");
329 return nullptr; 304 return 0;
330 } 305 }
331 306
332 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); 307 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
333 if (audioTracks.isEmpty()) { 308 if (audioTracks.isEmpty()) {
334 exceptionState.throwDOMException( 309 exceptionState.throwDOMException(
335 InvalidStateError, 310 InvalidStateError,
336 "MediaStream has no audio track"); 311 "MediaStream has no audio track");
337 return nullptr; 312 return 0;
338 } 313 }
339 314
340 // Use the first audio track in the media stream. 315 // Use the first audio track in the media stream.
341 MediaStreamTrack* audioTrack = audioTracks[0]; 316 MediaStreamTrack* audioTrack = audioTracks[0];
342 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); 317 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
343 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource Node::create(this, mediaStream, audioTrack, provider.release()); 318 MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());
344 319
345 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. 320 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
346 node->setFormat(2, sampleRate()); 321 node->setFormat(2, sampleRate());
347 322
348 refNode(node.get()); // context keeps reference until node is disconnected 323 refNode(node); // context keeps reference until node is disconnected
349 return node; 324 return node;
350 } 325 }
351 326
352 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi aStreamDestination() 327 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination()
353 { 328 {
354 // Set number of output channels to stereo by default. 329 // Set number of output channels to stereo by default.
355 return MediaStreamAudioDestinationNode::create(this, 2); 330 return MediaStreamAudioDestinationNode::create(this, 2);
356 } 331 }
357 332
358 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( ExceptionState& exceptionState) 333 ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& excepti onState)
359 { 334 {
360 // Set number of input/output channels to stereo by default. 335 // Set number of input/output channels to stereo by default.
361 return createScriptProcessor(0, 2, 2, exceptionState); 336 return createScriptProcessor(0, 2, 2, exceptionState);
362 } 337 }
363 338
364 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, ExceptionState& exceptionState) 339 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, Exce ptionState& exceptionState)
365 { 340 {
366 // Set number of input/output channels to stereo by default. 341 // Set number of input/output channels to stereo by default.
367 return createScriptProcessor(bufferSize, 2, 2, exceptionState); 342 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
368 } 343 }
369 344
370 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) 345 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, ExceptionState& exceptionState)
371 { 346 {
372 // Set number of output channels to stereo by default. 347 // Set number of output channels to stereo by default.
373 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State); 348 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State);
374 } 349 }
375 350
376 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) 351 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& excepti onState)
377 { 352 {
378 ASSERT(isMainThread()); 353 ASSERT(isMainThread());
379 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO fOutputChannels); 354 ScriptProcessorNode* node = ScriptProcessorNode::create(this, m_destinationN ode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
380 355
381 if (!node.get()) { 356 if (!node) {
382 if (!numberOfInputChannels && !numberOfOutputChannels) { 357 if (!numberOfInputChannels && !numberOfOutputChannels) {
383 exceptionState.throwDOMException( 358 exceptionState.throwDOMException(
384 IndexSizeError, 359 IndexSizeError,
385 "number of input channels and output channels cannot both be zer o."); 360 "number of input channels and output channels cannot both be zer o.");
386 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { 361 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
387 exceptionState.throwDOMException( 362 exceptionState.throwDOMException(
388 IndexSizeError, 363 IndexSizeError,
389 "number of input channels (" + String::number(numberOfInputChann els) 364 "number of input channels (" + String::number(numberOfInputChann els)
390 + ") exceeds maximum (" 365 + ") exceeds maximum ("
391 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 366 + String::number(AudioContext::maxNumberOfChannels()) + ").");
392 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) { 367 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
393 exceptionState.throwDOMException( 368 exceptionState.throwDOMException(
394 IndexSizeError, 369 IndexSizeError,
395 "number of output channels (" + String::number(numberOfInputChan nels) 370 "number of output channels (" + String::number(numberOfInputChan nels)
396 + ") exceeds maximum (" 371 + ") exceeds maximum ("
397 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 372 + String::number(AudioContext::maxNumberOfChannels()) + ").");
398 } else { 373 } else {
399 exceptionState.throwDOMException( 374 exceptionState.throwDOMException(
400 IndexSizeError, 375 IndexSizeError,
401 "buffer size (" + String::number(bufferSize) 376 "buffer size (" + String::number(bufferSize)
402 + ") must be a power of two between 256 and 16384."); 377 + ") must be a power of two between 256 and 16384.");
403 } 378 }
404 return nullptr; 379 return 0;
405 } 380 }
406 381
407 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks 382 refNode(node); // context keeps reference until we stop making javascript re ndering callbacks
408 return node; 383 return node;
409 } 384 }
410 385
411 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter() 386 BiquadFilterNode* AudioContext::createBiquadFilter()
412 { 387 {
413 ASSERT(isMainThread()); 388 ASSERT(isMainThread());
414 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); 389 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
415 } 390 }
416 391
417 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper() 392 WaveShaperNode* AudioContext::createWaveShaper()
418 { 393 {
419 ASSERT(isMainThread()); 394 ASSERT(isMainThread());
420 return WaveShaperNode::create(this); 395 return WaveShaperNode::create(this);
421 } 396 }
422 397
423 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner() 398 PannerNode* AudioContext::createPanner()
424 { 399 {
425 ASSERT(isMainThread()); 400 ASSERT(isMainThread());
426 return PannerNode::create(this, m_destinationNode->sampleRate()); 401 return PannerNode::create(this, m_destinationNode->sampleRate());
427 } 402 }
428 403
429 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver() 404 ConvolverNode* AudioContext::createConvolver()
430 { 405 {
431 ASSERT(isMainThread()); 406 ASSERT(isMainThread());
432 return ConvolverNode::create(this, m_destinationNode->sampleRate()); 407 return ConvolverNode::create(this, m_destinationNode->sampleRate());
433 } 408 }
434 409
435 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr essor() 410 DynamicsCompressorNode* AudioContext::createDynamicsCompressor()
436 { 411 {
437 ASSERT(isMainThread()); 412 ASSERT(isMainThread());
438 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ; 413 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ;
439 } 414 }
440 415
441 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser() 416 AnalyserNode* AudioContext::createAnalyser()
442 { 417 {
443 ASSERT(isMainThread()); 418 ASSERT(isMainThread());
444 return AnalyserNode::create(this, m_destinationNode->sampleRate()); 419 return AnalyserNode::create(this, m_destinationNode->sampleRate());
445 } 420 }
446 421
447 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain() 422 GainNode* AudioContext::createGain()
448 { 423 {
449 ASSERT(isMainThread()); 424 ASSERT(isMainThread());
450 return GainNode::create(this, m_destinationNode->sampleRate()); 425 return GainNode::create(this, m_destinationNode->sampleRate());
451 } 426 }
452 427
453 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce ptionState) 428 DelayNode* AudioContext::createDelay(ExceptionState& exceptionState)
454 { 429 {
455 const double defaultMaxDelayTime = 1; 430 const double defaultMaxDelayTime = 1;
456 return createDelay(defaultMaxDelayTime, exceptionState); 431 return createDelay(defaultMaxDelayTime, exceptionState);
457 } 432 }
458 433
459 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) 434 DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& except ionState)
460 { 435 {
461 ASSERT(isMainThread()); 436 ASSERT(isMainThread());
462 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo de->sampleRate(), maxDelayTime, exceptionState); 437 DelayNode* node = DelayNode::create(this, m_destinationNode->sampleRate(), m axDelayTime, exceptionState);
463 if (exceptionState.hadException()) 438 if (exceptionState.hadException())
464 return nullptr; 439 return 0;
465 return node; 440 return node;
466 } 441 }
467 442
468 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( ExceptionState& exceptionState) 443 ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& excepti onState)
469 { 444 {
470 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 445 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
471 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 446 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
472 } 447 }
473 448
474 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( size_t numberOfOutputs, ExceptionState& exceptionState) 449 ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
475 { 450 {
476 ASSERT(isMainThread()); 451 ASSERT(isMainThread());
477 452
478 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t his, m_destinationNode->sampleRate(), numberOfOutputs); 453 ChannelSplitterNode* node = ChannelSplitterNode::create(this, m_destinationN ode->sampleRate(), numberOfOutputs);
479 454
480 if (!node.get()) { 455 if (!node) {
481 exceptionState.throwDOMException( 456 exceptionState.throwDOMException(
482 IndexSizeError, 457 IndexSizeError,
483 "number of outputs (" + String::number(numberOfOutputs) 458 "number of outputs (" + String::number(numberOfOutputs)
484 + ") must be between 1 and " 459 + ") must be between 1 and "
485 + String::number(AudioContext::maxNumberOfChannels()) + "."); 460 + String::number(AudioContext::maxNumberOfChannels()) + ".");
486 return nullptr; 461 return 0;
487 } 462 }
488 463
489 return node; 464 return node;
490 } 465 }
491 466
492 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce ptionState& exceptionState) 467 ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionSt ate)
493 { 468 {
494 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 469 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
495 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 470 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
496 } 471 }
497 472
498 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size _t numberOfInputs, ExceptionState& exceptionState) 473 ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce ptionState& exceptionState)
499 { 474 {
500 ASSERT(isMainThread()); 475 ASSERT(isMainThread());
501 476
502 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs); 477 ChannelMergerNode* node = ChannelMergerNode::create(this, m_destinationNode- >sampleRate(), numberOfInputs);
503 478
504 if (!node.get()) { 479 if (!node) {
505 exceptionState.throwDOMException( 480 exceptionState.throwDOMException(
506 IndexSizeError, 481 IndexSizeError,
507 "number of inputs (" + String::number(numberOfInputs) 482 "number of inputs (" + String::number(numberOfInputs)
508 + ") must be between 1 and " 483 + ") must be between 1 and "
509 + String::number(AudioContext::maxNumberOfChannels()) + "."); 484 + String::number(AudioContext::maxNumberOfChannels()) + ".");
510 return nullptr; 485 return 0;
511 } 486 }
512 487
513 return node; 488 return node;
514 } 489 }
515 490
516 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator() 491 OscillatorNode* AudioContext::createOscillator()
517 { 492 {
518 ASSERT(isMainThread()); 493 ASSERT(isMainThread());
519 494
520 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des tinationNode->sampleRate()); 495 OscillatorNode* node = OscillatorNode::create(this, m_destinationNode->sampl eRate());
521 496
522 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 497 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
523 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 498 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
524 refNode(node.get()); 499 refNode(node);
525 500
526 return node; 501 return node;
527 } 502 }
528 503
529 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr ay* real, Float32Array* imag, ExceptionState& exceptionState) 504 PeriodicWave* AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
530 { 505 {
531 ASSERT(isMainThread()); 506 ASSERT(isMainThread());
532 507
533 if (!real) { 508 if (!real) {
534 exceptionState.throwDOMException( 509 exceptionState.throwDOMException(
535 SyntaxError, 510 SyntaxError,
536 "invalid real array"); 511 "invalid real array");
537 return nullptr; 512 return 0;
538 } 513 }
539 514
540 if (!imag) { 515 if (!imag) {
541 exceptionState.throwDOMException( 516 exceptionState.throwDOMException(
542 SyntaxError, 517 SyntaxError,
543 "invalid imaginary array"); 518 "invalid imaginary array");
544 return nullptr; 519 return 0;
545 } 520 }
546 521
547 if (real->length() != imag->length()) { 522 if (real->length() != imag->length()) {
548 exceptionState.throwDOMException( 523 exceptionState.throwDOMException(
549 IndexSizeError, 524 IndexSizeError,
550 "length of real array (" + String::number(real->length()) 525 "length of real array (" + String::number(real->length())
551 + ") and length of imaginary array (" + String::number(imag->length ()) 526 + ") and length of imaginary array (" + String::number(imag->length ())
552 + ") must match."); 527 + ") must match.");
553 return nullptr; 528 return 0;
554 } 529 }
555 530
556 if (real->length() > 4096) { 531 if (real->length() > 4096) {
557 exceptionState.throwDOMException( 532 exceptionState.throwDOMException(
558 IndexSizeError, 533 IndexSizeError,
559 "length of real array (" + String::number(real->length()) 534 "length of real array (" + String::number(real->length())
560 + ") exceeds allowed maximum of 4096"); 535 + ") exceeds allowed maximum of 4096");
561 return nullptr; 536 return 0;
562 } 537 }
563 538
564 if (imag->length() > 4096) { 539 if (imag->length() > 4096) {
565 exceptionState.throwDOMException( 540 exceptionState.throwDOMException(
566 IndexSizeError, 541 IndexSizeError,
567 "length of imaginary array (" + String::number(imag->length()) 542 "length of imaginary array (" + String::number(imag->length())
568 + ") exceeds allowed maximum of 4096"); 543 + ") exceeds allowed maximum of 4096");
569 return nullptr; 544 return 0;
570 } 545 }
571 546
572 return PeriodicWave::create(sampleRate(), real, imag); 547 return PeriodicWave::create(sampleRate(), real, imag);
573 } 548 }
574 549
575 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) 550 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
576 { 551 {
577 ASSERT(isAudioThread()); 552 ASSERT(isAudioThread());
578 m_finishedNodes.append(node); 553 m_finishedNodes.append(node);
579 } 554 }
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 { 662 {
688 return currentThread() == m_graphOwnerThread; 663 return currentThread() == m_graphOwnerThread;
689 } 664 }
690 665
691 void AudioContext::addDeferredBreakConnection(AudioNode& node) 666 void AudioContext::addDeferredBreakConnection(AudioNode& node)
692 { 667 {
693 ASSERT(isAudioThread()); 668 ASSERT(isAudioThread());
694 m_deferredBreakConnectionList.append(&node); 669 m_deferredBreakConnectionList.append(&node);
695 } 670 }
696 671
697 #if !ENABLE(OILPAN)
698 void AudioContext::addDeferredFinishDeref(AudioNode* node)
699 {
700 ASSERT(isAudioThread());
701 m_deferredFinishDerefList.append(node);
702 }
703 #endif
704
705 void AudioContext::handlePreRenderTasks() 672 void AudioContext::handlePreRenderTasks()
706 { 673 {
707 ASSERT(isAudioThread()); 674 ASSERT(isAudioThread());
708 675
709 // At the beginning of every render quantum, try to update the internal rend ering graph state (from main thread changes). 676 // At the beginning of every render quantum, try to update the internal rend ering graph state (from main thread changes).
710 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u p the changes. 677 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u p the changes.
711 bool mustReleaseLock; 678 bool mustReleaseLock;
712 if (tryLock(mustReleaseLock)) { 679 if (tryLock(mustReleaseLock)) {
713 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts. 680 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts.
714 handleDirtyAudioSummingJunctions(); 681 handleDirtyAudioSummingJunctions();
(...skipping 14 matching lines...) Expand all
729 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed 696 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed
730 // from the render graph (in which case they'll render silence). 697 // from the render graph (in which case they'll render silence).
731 bool mustReleaseLock; 698 bool mustReleaseLock;
732 if (tryLock(mustReleaseLock)) { 699 if (tryLock(mustReleaseLock)) {
733 // Take care of AudioNode tasks where the tryLock() failed previously. 700 // Take care of AudioNode tasks where the tryLock() failed previously.
734 handleDeferredAudioNodeTasks(); 701 handleDeferredAudioNodeTasks();
735 702
736 // Dynamically clean up nodes which are no longer needed. 703 // Dynamically clean up nodes which are no longer needed.
737 derefFinishedSourceNodes(); 704 derefFinishedSourceNodes();
738 705
739 #if !ENABLE(OILPAN)
740 // Don't delete in the real-time thread. Let the main thread do it.
741 // Ref-counted objects held by certain AudioNodes may not be thread-safe .
742 scheduleNodeDeletion();
743 #endif
744
745 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts. 706 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts.
746 handleDirtyAudioSummingJunctions(); 707 handleDirtyAudioSummingJunctions();
747 handleDirtyAudioNodeOutputs(); 708 handleDirtyAudioNodeOutputs();
748 709
749 updateAutomaticPullNodes(); 710 updateAutomaticPullNodes();
750 711
751 if (mustReleaseLock) 712 if (mustReleaseLock)
752 unlock(); 713 unlock();
753 } 714 }
754 } 715 }
755 716
756 void AudioContext::handleDeferredAudioNodeTasks() 717 void AudioContext::handleDeferredAudioNodeTasks()
757 { 718 {
758 ASSERT(isAudioThread() && isGraphOwner()); 719 ASSERT(isAudioThread() && isGraphOwner());
759 720
760 for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i) 721 for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i)
761 m_deferredBreakConnectionList[i]->breakConnectionWithLock(); 722 m_deferredBreakConnectionList[i]->breakConnectionWithLock();
762 m_deferredBreakConnectionList.clear(); 723 m_deferredBreakConnectionList.clear();
763
764 #if !ENABLE(OILPAN)
765 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i)
766 m_deferredFinishDerefList[i]->finishDeref();
767 m_deferredFinishDerefList.clear();
768 #endif
769 } 724 }
770 725
771 #if ENABLE(OILPAN)
772 void AudioContext::registerLiveNode(AudioNode& node) 726 void AudioContext::registerLiveNode(AudioNode& node)
773 { 727 {
774 ASSERT(isMainThread()); 728 ASSERT(isMainThread());
775 m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node))); 729 m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node)));
776 } 730 }
777 731
778 AudioContext::AudioNodeDisposer::~AudioNodeDisposer() 732 AudioContext::AudioNodeDisposer::~AudioNodeDisposer()
779 { 733 {
780 ASSERT(isMainThread()); 734 ASSERT(isMainThread());
781 AudioContext::AutoLocker locker(m_node.context()); 735 AudioContext::AutoLocker locker(m_node.context());
782 m_node.dispose(); 736 m_node.dispose();
783 } 737 }
784 738
785 void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& juncti on) 739 void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& juncti on)
786 { 740 {
787 ASSERT(isMainThread()); 741 ASSERT(isMainThread());
788 m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunction Disposer(junction))); 742 m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunction Disposer(junction)));
789 } 743 }
790 744
791 AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer() 745 AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer()
792 { 746 {
793 ASSERT(isMainThread()); 747 ASSERT(isMainThread());
794 m_junction.context()->removeMarkedSummingJunction(&m_junction); 748 m_junction.context()->removeMarkedSummingJunction(&m_junction);
795 } 749 }
796 #else
797
798 void AudioContext::markForDeletion(AudioNode* node)
799 {
800 ASSERT(isGraphOwner());
801
802 if (!isInitialized())
803 m_nodesToDelete.append(node);
804 else
805 m_nodesMarkedForDeletion.append(node);
806 }
807
808 void AudioContext::scheduleNodeDeletion()
809 {
810 bool isGood = isInitialized() && isGraphOwner();
811 ASSERT(isGood);
812 if (!isGood)
813 return;
814
815 // Make sure to call deleteMarkedNodes() on main thread.
816 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
817 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
818 m_nodesMarkedForDeletion.clear();
819
820 m_isDeletionScheduled = true;
821
822 // Don't let ourself get deleted before the callback.
823 // See matching deref() in deleteMarkedNodesDispatch().
824 ref();
825 callOnMainThread(deleteMarkedNodesDispatch, this);
826 }
827 }
828
829 void AudioContext::deleteMarkedNodesDispatch(void* userData)
830 {
831 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
832 ASSERT(context);
833 if (!context)
834 return;
835
836 context->deleteMarkedNodes();
837 context->deref();
838 }
839
840 void AudioContext::deleteMarkedNodes()
841 {
842 ASSERT(isMainThread());
843
844 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
845 RefPtrWillBeRawPtr<AudioContext> protect(this);
846 {
847 AutoLocker locker(this);
848
849 while (size_t n = m_nodesToDelete.size()) {
850 AudioNode* node = m_nodesToDelete[n - 1];
851 m_nodesToDelete.removeLast();
852
853 node->dispose();
854
855 // Finally, delete it.
856 delete node;
857 }
858 m_isDeletionScheduled = false;
859 }
860 }
861 #endif
862 750
863 void AudioContext::unmarkDirtyNode(AudioNode& node) 751 void AudioContext::unmarkDirtyNode(AudioNode& node)
864 { 752 {
865 ASSERT(isGraphOwner()); 753 ASSERT(isGraphOwner());
866 #if !ENABLE(OILPAN)
867 // Before deleting the node, clear out any AudioNodeInputs from
868 // m_dirtySummingJunctions.
869 unsigned numberOfInputs = node.numberOfInputs();
870 for (unsigned i = 0; i < numberOfInputs; ++i)
871 m_dirtySummingJunctions.remove(node.input(i));
872 #endif
873 754
874 // Before deleting the node, clear out any AudioNodeOutputs from 755 // Before deleting the node, clear out any AudioNodeOutputs from
875 // m_dirtyAudioNodeOutputs. 756 // m_dirtyAudioNodeOutputs.
876 unsigned numberOfOutputs = node.numberOfOutputs(); 757 unsigned numberOfOutputs = node.numberOfOutputs();
877 for (unsigned i = 0; i < numberOfOutputs; ++i) 758 for (unsigned i = 0; i < numberOfOutputs; ++i)
878 m_dirtyAudioNodeOutputs.remove(node.output(i)); 759 m_dirtyAudioNodeOutputs.remove(node.output(i));
879 } 760 }
880 761
881 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n) 762 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n)
882 { 763 {
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 // Call the offline rendering completion event listener. 877 // Call the offline rendering completion event listener.
997 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); 878 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
998 } 879 }
999 } 880 }
1000 881
1001 void AudioContext::trace(Visitor* visitor) 882 void AudioContext::trace(Visitor* visitor)
1002 { 883 {
1003 visitor->trace(m_renderTarget); 884 visitor->trace(m_renderTarget);
1004 visitor->trace(m_destinationNode); 885 visitor->trace(m_destinationNode);
1005 visitor->trace(m_listener); 886 visitor->trace(m_listener);
1006 #if ENABLE(OILPAN)
1007 visitor->trace(m_referencedNodes); 887 visitor->trace(m_referencedNodes);
1008 visitor->trace(m_liveNodes); 888 visitor->trace(m_liveNodes);
1009 visitor->trace(m_liveAudioSummingJunctions); 889 visitor->trace(m_liveAudioSummingJunctions);
1010 #endif
1011 EventTargetWithInlineData::trace(visitor); 890 EventTargetWithInlineData::trace(visitor);
1012 } 891 }
1013 892
1014 } // namespace blink 893 } // namespace blink
1015 894
1016 #endif // ENABLE(WEB_AUDIO) 895 #endif // ENABLE(WEB_AUDIO)
OLDNEW
« no previous file with comments | « Source/modules/webaudio/AudioContext.h ('k') | Source/modules/webaudio/AudioContext.idl » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698