Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: Source/modules/webaudio/AudioContext.cpp

Issue 205173002: Move webaudio to oilpan (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 { 82 {
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz, 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there. 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000; 85 return sampleRate >= 44100 && sampleRate <= 96000;
86 } 86 }
87 87
88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware. 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware.
89 const unsigned MaxHardwareContexts = 6; 89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0; 90 unsigned AudioContext::s_hardwareContextCount = 0;
91 91
92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState & exceptionState) 92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex ceptionState& exceptionState)
93 { 93 {
94 ASSERT(isMainThread()); 94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) { 95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException( 96 exceptionState.throwDOMException(
97 SyntaxError, 97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ")."); 98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ").");
99 return nullptr; 99 return nullptr;
100 } 100 }
101 101
102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); 102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeRefCountedGarbag eCollected(new AudioContext(&document)));
103 audioContext->suspendIfNeeded(); 103 audioContext->suspendIfNeeded();
104 return audioContext.release(); 104 return audioContext.release();
105 } 105 }
106 106
107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numbe rOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionS tate) 107 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, un signed numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState & exceptionState)
108 { 108 {
109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead"); 109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState); 110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState);
111 } 111 }
112 112
113 // Constructor for rendering to the audio hardware. 113 // Constructor for rendering to the audio hardware.
114 AudioContext::AudioContext(Document* document) 114 AudioContext::AudioContext(Document* document)
115 : ActiveDOMObject(document) 115 : ActiveDOMObject(document)
116 , m_isStopScheduled(false) 116 , m_isStopScheduled(false)
117 , m_isInitialized(false) 117 , m_isInitialized(false)
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 return; 269 return;
270 m_isStopScheduled = true; 270 m_isStopScheduled = true;
271 271
272 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle 272 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle
273 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other 273 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
274 // ActiveDOMObjects so let's schedule uninitialize() to be called later. 274 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
275 // FIXME: see if there's a more direct way to handle this issue. 275 // FIXME: see if there's a more direct way to handle this issue.
276 callOnMainThread(stopDispatch, this); 276 callOnMainThread(stopDispatch, this);
277 } 277 }
278 278
279 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si ze_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) 279 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat e)
280 { 280 {
281 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb erOfFrames, sampleRate); 281 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh annels, numberOfFrames, sampleRate);
282 if (!audioBuffer.get()) { 282 if (!audioBuffer.get()) {
283 if (numberOfChannels > AudioContext::maxNumberOfChannels()) { 283 if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
284 exceptionState.throwDOMException( 284 exceptionState.throwDOMException(
285 NotSupportedError, 285 NotSupportedError,
286 "requested number of channels (" + String::number(numberOfChanne ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels() ) + ")"); 286 "requested number of channels (" + String::number(numberOfChanne ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels() ) + ")");
287 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat e > AudioBuffer::maxAllowedSampleRate()) { 287 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat e > AudioBuffer::maxAllowedSampleRate()) {
288 exceptionState.throwDOMException( 288 exceptionState.throwDOMException(
289 NotSupportedError, 289 NotSupportedError,
290 "requested sample rate (" + String::number(sampleRate) 290 "requested sample rate (" + String::number(sampleRate)
291 + ") does not lie in the allowed range of " 291 + ") does not lie in the allowed range of "
292 + String::number(AudioBuffer::minAllowedSampleRate()) 292 + String::number(AudioBuffer::minAllowedSampleRate())
293 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz"); 293 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz");
294 } else if (!numberOfFrames) { 294 } else if (!numberOfFrames) {
295 exceptionState.throwDOMException( 295 exceptionState.throwDOMException(
296 NotSupportedError, 296 NotSupportedError,
297 "number of frames must be greater than 0."); 297 "number of frames must be greater than 0.");
298 } else { 298 } else {
299 exceptionState.throwDOMException( 299 exceptionState.throwDOMException(
300 NotSupportedError, 300 NotSupportedError,
301 "unable to create buffer of " + String::number(numberOfChannels) 301 "unable to create buffer of " + String::number(numberOfChannels)
302 + " channel(s) of " + String::number(numberOfFrames) 302 + " channel(s) of " + String::number(numberOfFrames)
303 + " frames each."); 303 + " frames each.");
304 } 304 }
305 return nullptr; 305 return nullptr;
306 } 306 }
307 307
308 return audioBuffer; 308 return audioBuffer;
309 } 309 }
310 310
311 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo l mixToMono, ExceptionState& exceptionState) 311 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arra yBuffer, bool mixToMono, ExceptionState& exceptionState)
312 { 312 {
313 ASSERT(arrayBuffer); 313 ASSERT(arrayBuffer);
314 if (!arrayBuffer) { 314 if (!arrayBuffer) {
315 exceptionState.throwDOMException( 315 exceptionState.throwDOMException(
316 SyntaxError, 316 SyntaxError,
317 "invalid ArrayBuffer."); 317 "invalid ArrayBuffer.");
318 return nullptr; 318 return nullptr;
319 } 319 }
320 320
321 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); 321 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFi leData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
322 if (!audioBuffer.get()) { 322 if (!audioBuffer.get()) {
323 exceptionState.throwDOMException( 323 exceptionState.throwDOMException(
324 SyntaxError, 324 SyntaxError,
325 "invalid audio data in ArrayBuffer."); 325 "invalid audio data in ArrayBuffer.");
326 return nullptr; 326 return nullptr;
327 } 327 }
328 328
329 return audioBuffer; 329 return audioBuffer;
330 } 330 }
331 331
332 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState) 332 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState)
333 { 333 {
334 if (!audioData) { 334 if (!audioData) {
335 exceptionState.throwDOMException( 335 exceptionState.throwDOMException(
336 SyntaxError, 336 SyntaxError,
337 "invalid ArrayBuffer for audioData."); 337 "invalid ArrayBuffer for audioData.");
338 return; 338 return;
339 } 339 }
340 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback); 340 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback);
341 } 341 }
342 342
343 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() 343 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
344 { 344 {
345 ASSERT(isMainThread()); 345 ASSERT(isMainThread());
346 lazyInitialize(); 346 lazyInitialize();
347 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d estinationNode->sampleRate()); 347 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea te(this, m_destinationNode->sampleRate());
348 348
349 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 349 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
350 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 350 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
351 refNode(node.get()); 351 refNode(node.get());
352 352
353 return node; 353 return node;
354 } 354 }
355 355
356 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H TMLMediaElement* mediaElement, ExceptionState& exceptionState) 356 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
357 { 357 {
358 if (!mediaElement) { 358 if (!mediaElement) {
359 exceptionState.throwDOMException( 359 exceptionState.throwDOMException(
360 InvalidStateError, 360 InvalidStateError,
361 "invalid HTMLMedialElement."); 361 "invalid HTMLMedialElement.");
362 return nullptr; 362 return nullptr;
363 } 363 }
364 364
365 ASSERT(isMainThread()); 365 ASSERT(isMainThread());
366 lazyInitialize(); 366 lazyInitialize();
367 367
368 // First check if this media element already has a source node. 368 // First check if this media element already has a source node.
369 if (mediaElement->audioSourceNode()) { 369 if (mediaElement->audioSourceNode()) {
370 exceptionState.throwDOMException( 370 exceptionState.throwDOMException(
371 InvalidStateError, 371 InvalidStateError,
372 "invalid HTMLMediaElement."); 372 "invalid HTMLMediaElement.");
373 return nullptr; 373 return nullptr;
374 } 374 }
375 375
376 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea te(this, mediaElement); 376 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour ceNode::create(this, mediaElement);
377 377
378 mediaElement->setAudioSourceNode(node.get()); 378 mediaElement->setAudioSourceNode(node.get());
379 379
380 refNode(node.get()); // context keeps reference until node is disconnected 380 refNode(node.get()); // context keeps reference until node is disconnected
381 return node; 381 return node;
382 } 382 }
383 383
384 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med iaStream* mediaStream, ExceptionState& exceptionState) 384 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre amSource(MediaStream* mediaStream, ExceptionState& exceptionState)
385 { 385 {
386 if (!mediaStream) { 386 if (!mediaStream) {
387 exceptionState.throwDOMException( 387 exceptionState.throwDOMException(
388 InvalidStateError, 388 InvalidStateError,
389 "invalid MediaStream source"); 389 "invalid MediaStream source");
390 return nullptr; 390 return nullptr;
391 } 391 }
392 392
393 ASSERT(isMainThread()); 393 ASSERT(isMainThread());
394 lazyInitialize(); 394 lazyInitialize();
395 395
396 AudioSourceProvider* provider = 0; 396 AudioSourceProvider* provider = 0;
397 397
398 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); 398 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
399 RefPtr<MediaStreamTrack> audioTrack; 399 RefPtr<MediaStreamTrack> audioTrack;
400 400
401 // FIXME: get a provider for non-local MediaStreams (like from a remote peer ). 401 // FIXME: get a provider for non-local MediaStreams (like from a remote peer ).
402 for (size_t i = 0; i < audioTracks.size(); ++i) { 402 for (size_t i = 0; i < audioTracks.size(); ++i) {
403 audioTrack = audioTracks[i]; 403 audioTrack = audioTracks[i];
404 if (audioTrack->component()->audioSourceProvider()) { 404 if (audioTrack->component()->audioSourceProvider()) {
405 provider = audioTrack->component()->audioSourceProvider(); 405 provider = audioTrack->component()->audioSourceProvider();
406 break; 406 break;
407 } 407 }
408 } 408 }
409 409
410 RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create (this, mediaStream, audioTrack.get(), provider); 410 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource Node::create(this, mediaStream, audioTrack.get(), provider);
411 411
412 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. 412 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
413 node->setFormat(2, sampleRate()); 413 node->setFormat(2, sampleRate());
414 414
415 refNode(node.get()); // context keeps reference until node is disconnected 415 refNode(node.get()); // context keeps reference until node is disconnected
416 return node; 416 return node;
417 } 417 }
418 418
419 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti nation() 419 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi aStreamDestination()
420 { 420 {
421 // FIXME: Add support for an optional argument which specifies the number of channels. 421 // FIXME: Add support for an optional argument which specifies the number of channels.
422 // FIXME: The default should probably be stereo instead of mono. 422 // FIXME: The default should probably be stereo instead of mono.
423 return MediaStreamAudioDestinationNode::create(this, 1); 423 return MediaStreamAudioDestinationNode::create(this, 1);
424 } 424 }
425 425
426 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionSta te& exceptionState) 426 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( ExceptionState& exceptionState)
427 { 427 {
428 // Set number of input/output channels to stereo by default. 428 // Set number of input/output channels to stereo by default.
429 return createScriptProcessor(0, 2, 2, exceptionState); 429 return createScriptProcessor(0, 2, 2, exceptionState);
430 } 430 }
431 431
432 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, ExceptionState& exceptionState) 432 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, ExceptionState& exceptionState)
433 { 433 {
434 // Set number of input/output channels to stereo by default. 434 // Set number of input/output channels to stereo by default.
435 return createScriptProcessor(bufferSize, 2, 2, exceptionState); 435 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
436 } 436 }
437 437
438 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, size_t numberOfInputChannels, ExceptionState& exceptionState) 438 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
439 { 439 {
440 // Set number of output channels to stereo by default. 440 // Set number of output channels to stereo by default.
441 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State); 441 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State);
442 } 442 }
443 443
444 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta te& exceptionState) 444 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
445 { 445 {
446 ASSERT(isMainThread()); 446 ASSERT(isMainThread());
447 lazyInitialize(); 447 lazyInitialize();
448 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann els); 448 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO fOutputChannels);
449 449
450 if (!node.get()) { 450 if (!node.get()) {
451 if (!numberOfInputChannels && !numberOfOutputChannels) { 451 if (!numberOfInputChannels && !numberOfOutputChannels) {
452 exceptionState.throwDOMException( 452 exceptionState.throwDOMException(
453 IndexSizeError, 453 IndexSizeError,
454 "number of input channels and output channels cannot both be zer o."); 454 "number of input channels and output channels cannot both be zer o.");
455 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { 455 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
456 exceptionState.throwDOMException( 456 exceptionState.throwDOMException(
457 IndexSizeError, 457 IndexSizeError,
458 "number of input channels (" + String::number(numberOfInputChann els) 458 "number of input channels (" + String::number(numberOfInputChann els)
(...skipping 11 matching lines...) Expand all
470 "buffer size (" + String::number(bufferSize) 470 "buffer size (" + String::number(bufferSize)
471 + ") must be a power of two between 256 and 16384."); 471 + ") must be a power of two between 256 and 16384.");
472 } 472 }
473 return nullptr; 473 return nullptr;
474 } 474 }
475 475
476 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks 476 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks
477 return node; 477 return node;
478 } 478 }
479 479
480 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() 480 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
481 { 481 {
482 ASSERT(isMainThread()); 482 ASSERT(isMainThread());
483 lazyInitialize(); 483 lazyInitialize();
484 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); 484 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
485 } 485 }
486 486
487 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() 487 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
488 { 488 {
489 ASSERT(isMainThread()); 489 ASSERT(isMainThread());
490 lazyInitialize(); 490 lazyInitialize();
491 return WaveShaperNode::create(this); 491 return WaveShaperNode::create(this);
492 } 492 }
493 493
494 PassRefPtr<PannerNode> AudioContext::createPanner() 494 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
495 { 495 {
496 ASSERT(isMainThread()); 496 ASSERT(isMainThread());
497 lazyInitialize(); 497 lazyInitialize();
498 return PannerNode::create(this, m_destinationNode->sampleRate()); 498 return PannerNode::create(this, m_destinationNode->sampleRate());
499 } 499 }
500 500
501 PassRefPtr<ConvolverNode> AudioContext::createConvolver() 501 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
502 { 502 {
503 ASSERT(isMainThread()); 503 ASSERT(isMainThread());
504 lazyInitialize(); 504 lazyInitialize();
505 return ConvolverNode::create(this, m_destinationNode->sampleRate()); 505 return ConvolverNode::create(this, m_destinationNode->sampleRate());
506 } 506 }
507 507
508 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() 508 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr essor()
509 { 509 {
510 ASSERT(isMainThread()); 510 ASSERT(isMainThread());
511 lazyInitialize(); 511 lazyInitialize();
512 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ; 512 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ;
513 } 513 }
514 514
515 PassRefPtr<AnalyserNode> AudioContext::createAnalyser() 515 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
516 { 516 {
517 ASSERT(isMainThread()); 517 ASSERT(isMainThread());
518 lazyInitialize(); 518 lazyInitialize();
519 return AnalyserNode::create(this, m_destinationNode->sampleRate()); 519 return AnalyserNode::create(this, m_destinationNode->sampleRate());
520 } 520 }
521 521
522 PassRefPtr<GainNode> AudioContext::createGain() 522 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
523 { 523 {
524 ASSERT(isMainThread()); 524 ASSERT(isMainThread());
525 lazyInitialize(); 525 lazyInitialize();
526 return GainNode::create(this, m_destinationNode->sampleRate()); 526 return GainNode::create(this, m_destinationNode->sampleRate());
527 } 527 }
528 528
529 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) 529 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce ptionState)
530 { 530 {
531 const double defaultMaxDelayTime = 1; 531 const double defaultMaxDelayTime = 1;
532 return createDelay(defaultMaxDelayTime, exceptionState); 532 return createDelay(defaultMaxDelayTime, exceptionState);
533 } 533 }
534 534
535 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt ate& exceptionState) 535 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
536 { 536 {
537 ASSERT(isMainThread()); 537 ASSERT(isMainThread());
538 lazyInitialize(); 538 lazyInitialize();
539 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa te(), maxDelayTime, exceptionState); 539 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo de->sampleRate(), maxDelayTime, exceptionState);
540 if (exceptionState.hadException()) 540 if (exceptionState.hadException())
541 return nullptr; 541 return nullptr;
542 return node; 542 return node;
543 } 543 }
544 544
545 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta te& exceptionState) 545 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( ExceptionState& exceptionState)
546 { 546 {
547 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 547 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
548 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 548 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
549 } 549 }
550 550
551 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe rOfOutputs, ExceptionState& exceptionState) 551 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( size_t numberOfOutputs, ExceptionState& exceptionState)
552 { 552 {
553 ASSERT(isMainThread()); 553 ASSERT(isMainThread());
554 lazyInitialize(); 554 lazyInitialize();
555 555
556 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti nationNode->sampleRate(), numberOfOutputs); 556 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t his, m_destinationNode->sampleRate(), numberOfOutputs);
557 557
558 if (!node.get()) { 558 if (!node.get()) {
559 exceptionState.throwDOMException( 559 exceptionState.throwDOMException(
560 IndexSizeError, 560 IndexSizeError,
561 "number of outputs (" + String::number(numberOfOutputs) 561 "number of outputs (" + String::number(numberOfOutputs)
562 + ") must be between 1 and " 562 + ") must be between 1 and "
563 + String::number(AudioContext::maxNumberOfChannels()) + "."); 563 + String::number(AudioContext::maxNumberOfChannels()) + ".");
564 return nullptr; 564 return nullptr;
565 } 565 }
566 566
567 return node; 567 return node;
568 } 568 }
569 569
570 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState) 570 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce ptionState& exceptionState)
571 { 571 {
572 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 572 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
573 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 573 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
574 } 574 }
575 575
576 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI nputs, ExceptionState& exceptionState) 576 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size _t numberOfInputs, ExceptionState& exceptionState)
577 { 577 {
578 ASSERT(isMainThread()); 578 ASSERT(isMainThread());
579 lazyInitialize(); 579 lazyInitialize();
580 580
581 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati onNode->sampleRate(), numberOfInputs); 581 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
582 582
583 if (!node.get()) { 583 if (!node.get()) {
584 exceptionState.throwDOMException( 584 exceptionState.throwDOMException(
585 IndexSizeError, 585 IndexSizeError,
586 "number of inputs (" + String::number(numberOfInputs) 586 "number of inputs (" + String::number(numberOfInputs)
587 + ") must be between 1 and " 587 + ") must be between 1 and "
588 + String::number(AudioContext::maxNumberOfChannels()) + "."); 588 + String::number(AudioContext::maxNumberOfChannels()) + ".");
589 return nullptr; 589 return nullptr;
590 } 590 }
591 591
592 return node; 592 return node;
593 } 593 }
594 594
595 PassRefPtr<OscillatorNode> AudioContext::createOscillator() 595 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
596 { 596 {
597 ASSERT(isMainThread()); 597 ASSERT(isMainThread());
598 lazyInitialize(); 598 lazyInitialize();
599 599
600 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode ->sampleRate()); 600 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des tinationNode->sampleRate());
601 601
602 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 602 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
603 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 603 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
604 refNode(node.get()); 604 refNode(node.get());
605 605
606 return node; 606 return node;
607 } 607 }
608 608
609 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl oat32Array* imag, ExceptionState& exceptionState) 609 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr ay* real, Float32Array* imag, ExceptionState& exceptionState)
610 { 610 {
611 ASSERT(isMainThread()); 611 ASSERT(isMainThread());
612 612
613 if (!real) { 613 if (!real) {
614 exceptionState.throwDOMException( 614 exceptionState.throwDOMException(
615 SyntaxError, 615 SyntaxError,
616 "invalid real array"); 616 "invalid real array");
617 return nullptr; 617 return nullptr;
618 } 618 }
619 619
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
882 882
883 context->deleteMarkedNodes(); 883 context->deleteMarkedNodes();
884 context->deref(); 884 context->deref();
885 } 885 }
886 886
887 void AudioContext::deleteMarkedNodes() 887 void AudioContext::deleteMarkedNodes()
888 { 888 {
889 ASSERT(isMainThread()); 889 ASSERT(isMainThread());
890 890
891 // Protect this object from being deleted before we release the mutex locked by AutoLocker. 891 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
892 RefPtr<AudioContext> protect(this); 892 RefPtrWillBeRawPtr<AudioContext> protect(this);
893 { 893 {
894 AutoLocker locker(this); 894 AutoLocker locker(this);
895 895
896 while (size_t n = m_nodesToDelete.size()) { 896 while (size_t n = m_nodesToDelete.size()) {
897 AudioNode* node = m_nodesToDelete[n - 1]; 897 AudioNode* node = m_nodesToDelete[n - 1];
898 m_nodesToDelete.removeLast(); 898 m_nodesToDelete.removeLast();
899 899
900 // Before deleting the node, clear out any AudioNodeInputs from m_di rtySummingJunctions. 900 // Before deleting the node, clear out any AudioNodeInputs from m_di rtySummingJunctions.
901 unsigned numberOfInputs = node->numberOfInputs(); 901 unsigned numberOfInputs = node->numberOfInputs();
902 for (unsigned i = 0; i < numberOfInputs; ++i) 902 for (unsigned i = 0; i < numberOfInputs; ++i)
903 m_dirtySummingJunctions.remove(node->input(i)); 903 m_dirtySummingJunctions.remove(node->input(i));
904 904
905 // Before deleting the node, clear out any AudioNodeOutputs from m_d irtyAudioNodeOutputs. 905 // Before deleting the node, clear out any AudioNodeOutputs from m_d irtyAudioNodeOutputs.
906 unsigned numberOfOutputs = node->numberOfOutputs(); 906 unsigned numberOfOutputs = node->numberOfOutputs();
907 for (unsigned i = 0; i < numberOfOutputs; ++i) 907 for (unsigned i = 0; i < numberOfOutputs; ++i)
908 m_dirtyAudioNodeOutputs.remove(node->output(i)); 908 m_dirtyAudioNodeOutputs.remove(node->output(i));
909 909
910 // Finally, delete it. 910 // Finally, clear the keep alive handle that keeps this
911 delete node; 911 // object from being collected.
912 node->clearKeepAlive();
912 } 913 }
913 m_isDeletionScheduled = false; 914 m_isDeletionScheduled = false;
914 } 915 }
915 } 916 }
916 917
917 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n) 918 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n)
918 { 919 {
919 ASSERT(isGraphOwner()); 920 ASSERT(isGraphOwner());
920 m_dirtySummingJunctions.add(summingJunction); 921 m_dirtySummingJunctions.add(summingJunction);
921 } 922 }
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
1036 void AudioContext::incrementActiveSourceCount() 1037 void AudioContext::incrementActiveSourceCount()
1037 { 1038 {
1038 atomicIncrement(&m_activeSourceCount); 1039 atomicIncrement(&m_activeSourceCount);
1039 } 1040 }
1040 1041
1041 void AudioContext::decrementActiveSourceCount() 1042 void AudioContext::decrementActiveSourceCount()
1042 { 1043 {
1043 atomicDecrement(&m_activeSourceCount); 1044 atomicDecrement(&m_activeSourceCount);
1044 } 1045 }
1045 1046
1047 void AudioContext::trace(Visitor* visitor)
1048 {
1049 visitor->trace(m_renderTarget);
1050 visitor->trace(m_listener);
1051 }
1052
1046 } // namespace WebCore 1053 } // namespace WebCore
1047 1054
1048 #endif // ENABLE(WEB_AUDIO) 1055 #endif // ENABLE(WEB_AUDIO)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698