Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(55)

Side by Side Diff: Source/modules/webaudio/AudioContext.cpp

Issue 205173002: Move webaudio to oilpan (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: WIP Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 { 82 {
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz, 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there. 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000; 85 return sampleRate >= 44100 && sampleRate <= 96000;
86 } 86 }
87 87
88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware. 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware.
89 const unsigned MaxHardwareContexts = 6; 89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0; 90 unsigned AudioContext::s_hardwareContextCount = 0;
91 91
92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState & exceptionState) 92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex ceptionState& exceptionState)
93 { 93 {
94 ASSERT(isMainThread()); 94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) { 95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException( 96 exceptionState.throwDOMException(
97 SyntaxError, 97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ")."); 98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ").");
99 return nullptr; 99 return nullptr;
100 } 100 }
101 101
102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); 102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCou ntedGarbageCollected(new AudioContext(&document)));
103 audioContext->suspendIfNeeded(); 103 audioContext->suspendIfNeeded();
104 return audioContext.release(); 104 return audioContext.release();
105 } 105 }
106 106
107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numbe rOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionS tate) 107 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, un signed numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState & exceptionState)
108 { 108 {
109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead"); 109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState); 110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState);
111 } 111 }
112 112
113 // Constructor for rendering to the audio hardware. 113 // Constructor for rendering to the audio hardware.
114 AudioContext::AudioContext(Document* document) 114 AudioContext::AudioContext(Document* document)
115 : ActiveDOMObject(document) 115 : ActiveDOMObject(document)
116 , m_isStopScheduled(false) 116 , m_isStopScheduled(false)
117 , m_isCleared(false) 117 , m_isCleared(false)
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
272 // FIXME: see if there's a more direct way to handle this issue. 272 // FIXME: see if there's a more direct way to handle this issue.
273 callOnMainThread(stopDispatch, this); 273 callOnMainThread(stopDispatch, this);
274 } 274 }
275 275
276 bool AudioContext::hasPendingActivity() const 276 bool AudioContext::hasPendingActivity() const
277 { 277 {
278 // According to spec AudioContext must die only after page navigates. 278 // According to spec AudioContext must die only after page navigates.
279 return !m_isCleared; 279 return !m_isCleared;
280 } 280 }
281 281
282 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si ze_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) 282 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat e)
283 { 283 {
284 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb erOfFrames, sampleRate); 284 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh annels, numberOfFrames, sampleRate);
285 if (!audioBuffer.get()) { 285 if (!audioBuffer.get()) {
286 if (numberOfChannels > AudioContext::maxNumberOfChannels()) { 286 if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
287 exceptionState.throwDOMException( 287 exceptionState.throwDOMException(
288 NotSupportedError, 288 NotSupportedError,
289 "requested number of channels (" + String::number(numberOfChanne ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels() ) + ")"); 289 "requested number of channels (" + String::number(numberOfChanne ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels() ) + ")");
290 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat e > AudioBuffer::maxAllowedSampleRate()) { 290 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat e > AudioBuffer::maxAllowedSampleRate()) {
291 exceptionState.throwDOMException( 291 exceptionState.throwDOMException(
292 NotSupportedError, 292 NotSupportedError,
293 "requested sample rate (" + String::number(sampleRate) 293 "requested sample rate (" + String::number(sampleRate)
294 + ") does not lie in the allowed range of " 294 + ") does not lie in the allowed range of "
295 + String::number(AudioBuffer::minAllowedSampleRate()) 295 + String::number(AudioBuffer::minAllowedSampleRate())
296 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz"); 296 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz");
297 } else if (!numberOfFrames) { 297 } else if (!numberOfFrames) {
298 exceptionState.throwDOMException( 298 exceptionState.throwDOMException(
299 NotSupportedError, 299 NotSupportedError,
300 "number of frames must be greater than 0."); 300 "number of frames must be greater than 0.");
301 } else { 301 } else {
302 exceptionState.throwDOMException( 302 exceptionState.throwDOMException(
303 NotSupportedError, 303 NotSupportedError,
304 "unable to create buffer of " + String::number(numberOfChannels) 304 "unable to create buffer of " + String::number(numberOfChannels)
305 + " channel(s) of " + String::number(numberOfFrames) 305 + " channel(s) of " + String::number(numberOfFrames)
306 + " frames each."); 306 + " frames each.");
307 } 307 }
308 return nullptr; 308 return nullptr;
309 } 309 }
310 310
311 return audioBuffer; 311 return audioBuffer;
312 } 312 }
313 313
314 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo l mixToMono, ExceptionState& exceptionState) 314 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arra yBuffer, bool mixToMono, ExceptionState& exceptionState)
315 { 315 {
316 ASSERT(arrayBuffer); 316 ASSERT(arrayBuffer);
317 if (!arrayBuffer) { 317 if (!arrayBuffer) {
318 exceptionState.throwDOMException( 318 exceptionState.throwDOMException(
319 SyntaxError, 319 SyntaxError,
320 "invalid ArrayBuffer."); 320 "invalid ArrayBuffer.");
321 return nullptr; 321 return nullptr;
322 } 322 }
323 323
324 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); 324 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFi leData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
325 if (!audioBuffer.get()) { 325 if (!audioBuffer.get()) {
326 exceptionState.throwDOMException( 326 exceptionState.throwDOMException(
327 SyntaxError, 327 SyntaxError,
328 "invalid audio data in ArrayBuffer."); 328 "invalid audio data in ArrayBuffer.");
329 return nullptr; 329 return nullptr;
330 } 330 }
331 331
332 return audioBuffer; 332 return audioBuffer;
333 } 333 }
334 334
335 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState) 335 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState)
336 { 336 {
337 if (!audioData) { 337 if (!audioData) {
338 exceptionState.throwDOMException( 338 exceptionState.throwDOMException(
339 SyntaxError, 339 SyntaxError,
340 "invalid ArrayBuffer for audioData."); 340 "invalid ArrayBuffer for audioData.");
341 return; 341 return;
342 } 342 }
343 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback); 343 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback);
344 } 344 }
345 345
346 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() 346 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
347 { 347 {
348 ASSERT(isMainThread()); 348 ASSERT(isMainThread());
349 lazyInitialize(); 349 lazyInitialize();
350 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d estinationNode->sampleRate()); 350 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea te(this, m_destinationNode->sampleRate());
351 351
352 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 352 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
353 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 353 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
354 refNode(node.get()); 354 refNode(node.get());
355 355
356 return node; 356 return node;
357 } 357 }
358 358
359 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H TMLMediaElement* mediaElement, ExceptionState& exceptionState) 359 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
360 { 360 {
361 if (!mediaElement) { 361 if (!mediaElement) {
362 exceptionState.throwDOMException( 362 exceptionState.throwDOMException(
363 InvalidStateError, 363 InvalidStateError,
364 "invalid HTMLMedialElement."); 364 "invalid HTMLMedialElement.");
365 return nullptr; 365 return nullptr;
366 } 366 }
367 367
368 ASSERT(isMainThread()); 368 ASSERT(isMainThread());
369 lazyInitialize(); 369 lazyInitialize();
370 370
371 // First check if this media element already has a source node. 371 // First check if this media element already has a source node.
372 if (mediaElement->audioSourceNode()) { 372 if (mediaElement->audioSourceNode()) {
373 exceptionState.throwDOMException( 373 exceptionState.throwDOMException(
374 InvalidStateError, 374 InvalidStateError,
375 "invalid HTMLMediaElement."); 375 "invalid HTMLMediaElement.");
376 return nullptr; 376 return nullptr;
377 } 377 }
378 378
379 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea te(this, mediaElement); 379 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour ceNode::create(this, mediaElement);
380 380
381 mediaElement->setAudioSourceNode(node.get()); 381 mediaElement->setAudioSourceNode(node.get());
382 382
383 refNode(node.get()); // context keeps reference until node is disconnected 383 refNode(node.get()); // context keeps reference until node is disconnected
384 return node; 384 return node;
385 } 385 }
386 386
387 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med iaStream* mediaStream, ExceptionState& exceptionState) 387 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre amSource(MediaStream* mediaStream, ExceptionState& exceptionState)
388 { 388 {
389 if (!mediaStream) { 389 if (!mediaStream) {
390 exceptionState.throwDOMException( 390 exceptionState.throwDOMException(
391 InvalidStateError, 391 InvalidStateError,
392 "invalid MediaStream source"); 392 "invalid MediaStream source");
393 return nullptr; 393 return nullptr;
394 } 394 }
395 395
396 ASSERT(isMainThread()); 396 ASSERT(isMainThread());
397 lazyInitialize(); 397 lazyInitialize();
398 398
399 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); 399 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
400 if (audioTracks.isEmpty()) { 400 if (audioTracks.isEmpty()) {
401 exceptionState.throwDOMException( 401 exceptionState.throwDOMException(
402 InvalidStateError, 402 InvalidStateError,
403 "MediaStream has no audio track"); 403 "MediaStream has no audio track");
404 return nullptr; 404 return nullptr;
405 } 405 }
406 406
407 // Use the first audio track in the media stream. 407 // Use the first audio track in the media stream.
408 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; 408 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0];
409 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); 409 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
410 RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create (this, mediaStream, audioTrack.get(), provider.release()); 410 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource Node::create(this, mediaStream, audioTrack.get(), provider.release());
411 411
412 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. 412 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
413 node->setFormat(2, sampleRate()); 413 node->setFormat(2, sampleRate());
414 414
415 refNode(node.get()); // context keeps reference until node is disconnected 415 refNode(node.get()); // context keeps reference until node is disconnected
416 return node; 416 return node;
417 } 417 }
418 418
419 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti nation() 419 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi aStreamDestination()
420 { 420 {
421 // Set number of output channels to stereo by default. 421 // Set number of output channels to stereo by default.
422 return MediaStreamAudioDestinationNode::create(this, 2); 422 return MediaStreamAudioDestinationNode::create(this, 2);
423 } 423 }
424 424
425 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionSta te& exceptionState) 425 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( ExceptionState& exceptionState)
426 { 426 {
427 // Set number of input/output channels to stereo by default. 427 // Set number of input/output channels to stereo by default.
428 return createScriptProcessor(0, 2, 2, exceptionState); 428 return createScriptProcessor(0, 2, 2, exceptionState);
429 } 429 }
430 430
431 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, ExceptionState& exceptionState) 431 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, ExceptionState& exceptionState)
432 { 432 {
433 // Set number of input/output channels to stereo by default. 433 // Set number of input/output channels to stereo by default.
434 return createScriptProcessor(bufferSize, 2, 2, exceptionState); 434 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
435 } 435 }
436 436
437 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, size_t numberOfInputChannels, ExceptionState& exceptionState) 437 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
438 { 438 {
439 // Set number of output channels to stereo by default. 439 // Set number of output channels to stereo by default.
440 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State); 440 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State);
441 } 441 }
442 442
443 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta te& exceptionState) 443 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
444 { 444 {
445 ASSERT(isMainThread()); 445 ASSERT(isMainThread());
446 lazyInitialize(); 446 lazyInitialize();
447 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann els); 447 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO fOutputChannels);
448 448
449 if (!node.get()) { 449 if (!node.get()) {
450 if (!numberOfInputChannels && !numberOfOutputChannels) { 450 if (!numberOfInputChannels && !numberOfOutputChannels) {
451 exceptionState.throwDOMException( 451 exceptionState.throwDOMException(
452 IndexSizeError, 452 IndexSizeError,
453 "number of input channels and output channels cannot both be zer o."); 453 "number of input channels and output channels cannot both be zer o.");
454 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { 454 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
455 exceptionState.throwDOMException( 455 exceptionState.throwDOMException(
456 IndexSizeError, 456 IndexSizeError,
457 "number of input channels (" + String::number(numberOfInputChann els) 457 "number of input channels (" + String::number(numberOfInputChann els)
(...skipping 11 matching lines...) Expand all
469 "buffer size (" + String::number(bufferSize) 469 "buffer size (" + String::number(bufferSize)
470 + ") must be a power of two between 256 and 16384."); 470 + ") must be a power of two between 256 and 16384.");
471 } 471 }
472 return nullptr; 472 return nullptr;
473 } 473 }
474 474
475 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks 475 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks
476 return node; 476 return node;
477 } 477 }
478 478
479 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() 479 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
480 { 480 {
481 ASSERT(isMainThread()); 481 ASSERT(isMainThread());
482 lazyInitialize(); 482 lazyInitialize();
483 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); 483 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
484 } 484 }
485 485
486 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() 486 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
487 { 487 {
488 ASSERT(isMainThread()); 488 ASSERT(isMainThread());
489 lazyInitialize(); 489 lazyInitialize();
490 return WaveShaperNode::create(this); 490 return WaveShaperNode::create(this);
491 } 491 }
492 492
493 PassRefPtr<PannerNode> AudioContext::createPanner() 493 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
494 { 494 {
495 ASSERT(isMainThread()); 495 ASSERT(isMainThread());
496 lazyInitialize(); 496 lazyInitialize();
497 return PannerNode::create(this, m_destinationNode->sampleRate()); 497 return PannerNode::create(this, m_destinationNode->sampleRate());
498 } 498 }
499 499
500 PassRefPtr<ConvolverNode> AudioContext::createConvolver() 500 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
501 { 501 {
502 ASSERT(isMainThread()); 502 ASSERT(isMainThread());
503 lazyInitialize(); 503 lazyInitialize();
504 return ConvolverNode::create(this, m_destinationNode->sampleRate()); 504 return ConvolverNode::create(this, m_destinationNode->sampleRate());
505 } 505 }
506 506
507 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() 507 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr essor()
508 { 508 {
509 ASSERT(isMainThread()); 509 ASSERT(isMainThread());
510 lazyInitialize(); 510 lazyInitialize();
511 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ; 511 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ;
512 } 512 }
513 513
514 PassRefPtr<AnalyserNode> AudioContext::createAnalyser() 514 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
515 { 515 {
516 ASSERT(isMainThread()); 516 ASSERT(isMainThread());
517 lazyInitialize(); 517 lazyInitialize();
518 return AnalyserNode::create(this, m_destinationNode->sampleRate()); 518 return AnalyserNode::create(this, m_destinationNode->sampleRate());
519 } 519 }
520 520
521 PassRefPtr<GainNode> AudioContext::createGain() 521 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
522 { 522 {
523 ASSERT(isMainThread()); 523 ASSERT(isMainThread());
524 lazyInitialize(); 524 lazyInitialize();
525 return GainNode::create(this, m_destinationNode->sampleRate()); 525 return GainNode::create(this, m_destinationNode->sampleRate());
526 } 526 }
527 527
528 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) 528 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce ptionState)
529 { 529 {
530 const double defaultMaxDelayTime = 1; 530 const double defaultMaxDelayTime = 1;
531 return createDelay(defaultMaxDelayTime, exceptionState); 531 return createDelay(defaultMaxDelayTime, exceptionState);
532 } 532 }
533 533
534 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt ate& exceptionState) 534 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
535 { 535 {
536 ASSERT(isMainThread()); 536 ASSERT(isMainThread());
537 lazyInitialize(); 537 lazyInitialize();
538 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa te(), maxDelayTime, exceptionState); 538 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo de->sampleRate(), maxDelayTime, exceptionState);
539 if (exceptionState.hadException()) 539 if (exceptionState.hadException())
540 return nullptr; 540 return nullptr;
541 return node; 541 return node;
542 } 542 }
543 543
544 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta te& exceptionState) 544 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( ExceptionState& exceptionState)
545 { 545 {
546 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 546 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
547 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 547 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
548 } 548 }
549 549
550 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe rOfOutputs, ExceptionState& exceptionState) 550 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( size_t numberOfOutputs, ExceptionState& exceptionState)
551 { 551 {
552 ASSERT(isMainThread()); 552 ASSERT(isMainThread());
553 lazyInitialize(); 553 lazyInitialize();
554 554
555 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti nationNode->sampleRate(), numberOfOutputs); 555 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t his, m_destinationNode->sampleRate(), numberOfOutputs);
556 556
557 if (!node.get()) { 557 if (!node.get()) {
558 exceptionState.throwDOMException( 558 exceptionState.throwDOMException(
559 IndexSizeError, 559 IndexSizeError,
560 "number of outputs (" + String::number(numberOfOutputs) 560 "number of outputs (" + String::number(numberOfOutputs)
561 + ") must be between 1 and " 561 + ") must be between 1 and "
562 + String::number(AudioContext::maxNumberOfChannels()) + "."); 562 + String::number(AudioContext::maxNumberOfChannels()) + ".");
563 return nullptr; 563 return nullptr;
564 } 564 }
565 565
566 return node; 566 return node;
567 } 567 }
568 568
569 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState) 569 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce ptionState& exceptionState)
570 { 570 {
571 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 571 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
572 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 572 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
573 } 573 }
574 574
575 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI nputs, ExceptionState& exceptionState) 575 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size _t numberOfInputs, ExceptionState& exceptionState)
576 { 576 {
577 ASSERT(isMainThread()); 577 ASSERT(isMainThread());
578 lazyInitialize(); 578 lazyInitialize();
579 579
580 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati onNode->sampleRate(), numberOfInputs); 580 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
581 581
582 if (!node.get()) { 582 if (!node.get()) {
583 exceptionState.throwDOMException( 583 exceptionState.throwDOMException(
584 IndexSizeError, 584 IndexSizeError,
585 "number of inputs (" + String::number(numberOfInputs) 585 "number of inputs (" + String::number(numberOfInputs)
586 + ") must be between 1 and " 586 + ") must be between 1 and "
587 + String::number(AudioContext::maxNumberOfChannels()) + "."); 587 + String::number(AudioContext::maxNumberOfChannels()) + ".");
588 return nullptr; 588 return nullptr;
589 } 589 }
590 590
591 return node; 591 return node;
592 } 592 }
593 593
594 PassRefPtr<OscillatorNode> AudioContext::createOscillator() 594 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
595 { 595 {
596 ASSERT(isMainThread()); 596 ASSERT(isMainThread());
597 lazyInitialize(); 597 lazyInitialize();
598 598
599 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode ->sampleRate()); 599 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des tinationNode->sampleRate());
600 600
601 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 601 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
602 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 602 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
603 refNode(node.get()); 603 refNode(node.get());
604 604
605 return node; 605 return node;
606 } 606 }
607 607
608 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl oat32Array* imag, ExceptionState& exceptionState) 608 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr ay* real, Float32Array* imag, ExceptionState& exceptionState)
609 { 609 {
610 ASSERT(isMainThread()); 610 ASSERT(isMainThread());
611 611
612 if (!real) { 612 if (!real) {
613 exceptionState.throwDOMException( 613 exceptionState.throwDOMException(
614 SyntaxError, 614 SyntaxError,
615 "invalid real array"); 615 "invalid real array");
616 return nullptr; 616 return nullptr;
617 } 617 }
618 618
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
881 881
882 context->deleteMarkedNodes(); 882 context->deleteMarkedNodes();
883 context->deref(); 883 context->deref();
884 } 884 }
885 885
886 void AudioContext::deleteMarkedNodes() 886 void AudioContext::deleteMarkedNodes()
887 { 887 {
888 ASSERT(isMainThread()); 888 ASSERT(isMainThread());
889 889
890 // Protect this object from being deleted before we release the mutex locked by AutoLocker. 890 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
891 RefPtr<AudioContext> protect(this); 891 RefPtrWillBeRawPtr<AudioContext> protect(this);
892 { 892 {
893 AutoLocker locker(this); 893 AutoLocker locker(this);
894 894
895 while (size_t n = m_nodesToDelete.size()) { 895 while (size_t n = m_nodesToDelete.size()) {
896 AudioNode* node = m_nodesToDelete[n - 1]; 896 AudioNode* node = m_nodesToDelete[n - 1];
897 m_nodesToDelete.removeLast(); 897 m_nodesToDelete.removeLast();
898 898
899 // Before deleting the node, clear out any AudioNodeInputs from m_di rtySummingJunctions. 899 // Before deleting the node, clear out any AudioNodeInputs from m_di rtySummingJunctions.
900 unsigned numberOfInputs = node->numberOfInputs(); 900 unsigned numberOfInputs = node->numberOfInputs();
901 for (unsigned i = 0; i < numberOfInputs; ++i) 901 for (unsigned i = 0; i < numberOfInputs; ++i)
902 m_dirtySummingJunctions.remove(node->input(i)); 902 m_dirtySummingJunctions.remove(node->input(i));
903 903
904 // Before deleting the node, clear out any AudioNodeOutputs from m_d irtyAudioNodeOutputs. 904 // Before deleting the node, clear out any AudioNodeOutputs from m_d irtyAudioNodeOutputs.
905 unsigned numberOfOutputs = node->numberOfOutputs(); 905 unsigned numberOfOutputs = node->numberOfOutputs();
906 for (unsigned i = 0; i < numberOfOutputs; ++i) 906 for (unsigned i = 0; i < numberOfOutputs; ++i)
907 m_dirtyAudioNodeOutputs.remove(node->output(i)); 907 m_dirtyAudioNodeOutputs.remove(node->output(i));
908 908 #if ENABLE(OILPAN)
909 // Finally, delete it. 909 // Finally, clear the keep alive handle that keeps this
910 delete node; 910 // object from being collected.
911 node->clearKeepAlive();
912 #endif
Mads Ager (chromium) 2014/03/27 11:06:49 You need to keep the code for the non-oilpan versi
haraken 2014/03/27 11:44:05 You'll need to call 'delete node' in #if !ENABLE(O
keishi 2014/04/03 06:53:19 Done.
911 } 913 }
912 m_isDeletionScheduled = false; 914 m_isDeletionScheduled = false;
913 } 915 }
914 } 916 }
915 917
916 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n) 918 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n)
917 { 919 {
918 ASSERT(isGraphOwner()); 920 ASSERT(isGraphOwner());
919 m_dirtySummingJunctions.add(summingJunction); 921 m_dirtySummingJunctions.add(summingJunction);
920 } 922 }
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
1035 void AudioContext::incrementActiveSourceCount() 1037 void AudioContext::incrementActiveSourceCount()
1036 { 1038 {
1037 atomicIncrement(&m_activeSourceCount); 1039 atomicIncrement(&m_activeSourceCount);
1038 } 1040 }
1039 1041
1040 void AudioContext::decrementActiveSourceCount() 1042 void AudioContext::decrementActiveSourceCount()
1041 { 1043 {
1042 atomicDecrement(&m_activeSourceCount); 1044 atomicDecrement(&m_activeSourceCount);
1043 } 1045 }
1044 1046
1047 void AudioContext::trace(Visitor* visitor)
1048 {
1049 visitor->trace(m_renderTarget);
1050 visitor->trace(m_listener);
1051 }
1052
1045 } // namespace WebCore 1053 } // namespace WebCore
1046 1054
1047 #endif // ENABLE(WEB_AUDIO) 1055 #endif // ENABLE(WEB_AUDIO)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698