OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 10 matching lines...) Expand all Loading... |
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
23 */ | 23 */ |
24 | 24 |
25 #include "config.h" | 25 #include "config.h" |
26 | 26 |
27 #if ENABLE(WEB_AUDIO) | 27 #if ENABLE(WEB_AUDIO) |
28 | 28 |
29 #include "modules/webaudio/AudioContext.h" | 29 #include "modules/webaudio/AudioContext.h" |
30 | 30 |
| 31 #include "bindings/v8/ExceptionMessages.h" |
31 #include "bindings/v8/ExceptionState.h" | 32 #include "bindings/v8/ExceptionState.h" |
32 #include "core/dom/Document.h" | 33 #include "core/dom/Document.h" |
33 #include "core/dom/ExceptionCode.h" | 34 #include "core/dom/ExceptionCode.h" |
34 #include "core/html/HTMLMediaElement.h" | 35 #include "core/html/HTMLMediaElement.h" |
35 #include "core/inspector/ScriptCallStack.h" | 36 #include "core/inspector/ScriptCallStack.h" |
36 #include "core/platform/audio/FFTFrame.h" | 37 #include "core/platform/audio/FFTFrame.h" |
37 #include "core/platform/audio/HRTFDatabaseLoader.h" | 38 #include "core/platform/audio/HRTFDatabaseLoader.h" |
38 #include "core/platform/audio/HRTFPanner.h" | 39 #include "core/platform/audio/HRTFPanner.h" |
39 #include "modules/mediastream/MediaStream.h" | 40 #include "modules/mediastream/MediaStream.h" |
40 #include "modules/webaudio/AnalyserNode.h" | 41 #include "modules/webaudio/AnalyserNode.h" |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
286 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other | 287 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other |
287 // ActiveDOMObjects so let's schedule uninitialize() to be called later. | 288 // ActiveDOMObjects so let's schedule uninitialize() to be called later. |
288 // FIXME: see if there's a more direct way to handle this issue. | 289 // FIXME: see if there's a more direct way to handle this issue. |
289 callOnMainThread(stopDispatch, this); | 290 callOnMainThread(stopDispatch, this); |
290 } | 291 } |
291 | 292 |
292 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionState& es) | 293 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionState& es) |
293 { | 294 { |
294 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); | 295 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); |
295 if (!audioBuffer.get()) { | 296 if (!audioBuffer.get()) { |
296 es.throwUninformativeAndGenericDOMException(SyntaxError); | 297 if (numberOfChannels > AudioContext::maxNumberOfChannels()) { |
| 298 es.throwDOMException( |
| 299 SyntaxError, |
| 300 ExceptionMessages::failedToConstruct( |
| 301 "AudioBuffer", |
| 302 "requested number of channels (" + String::number(numberOfCh
annels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChanne
ls()) + ")")); |
| 303 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat
e > AudioBuffer::maxAllowedSampleRate()) { |
| 304 es.throwDOMException( |
| 305 SyntaxError, |
| 306 ExceptionMessages::failedToConstruct( |
| 307 "AudioBuffer", |
| 308 "requested sample rate (" + String::number(sampleRate) |
| 309 + ") does not lie in the allowed range of " |
| 310 + String::number(AudioBuffer::minAllowedSampleRate()) |
| 311 + "-" + String::number(AudioBuffer::maxAllowedSampleRate())
+ " Hz")); |
| 312 } else { |
| 313 es.throwDOMException( |
| 314 SyntaxError, |
| 315 ExceptionMessages::failedToConstruct( |
| 316 "AudioBuffer", |
| 317 "invalid number of channels, frames, or sample rate.")); |
| 318 } |
297 return 0; | 319 return 0; |
298 } | 320 } |
299 | 321 |
300 return audioBuffer; | 322 return audioBuffer; |
301 } | 323 } |
302 | 324 |
303 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionState& es) | 325 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionState& es) |
304 { | 326 { |
305 ASSERT(arrayBuffer); | 327 ASSERT(arrayBuffer); |
306 if (!arrayBuffer) { | 328 if (!arrayBuffer) { |
307 es.throwUninformativeAndGenericDOMException(SyntaxError); | 329 es.throwDOMException( |
| 330 SyntaxError, |
| 331 ExceptionMessages::failedToConstruct( |
| 332 "AudioBuffer", |
| 333 "invalid ArrayBuffer.")); |
308 return 0; | 334 return 0; |
309 } | 335 } |
310 | 336 |
311 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); | 337 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
312 if (!audioBuffer.get()) { | 338 if (!audioBuffer.get()) { |
313 es.throwUninformativeAndGenericDOMException(SyntaxError); | 339 es.throwDOMException( |
| 340 SyntaxError, |
| 341 ExceptionMessages::failedToConstruct( |
| 342 "AudioBuffer", |
| 343 "invalid audio data in ArrayBuffer.")); |
314 return 0; | 344 return 0; |
315 } | 345 } |
316 | 346 |
317 return audioBuffer; | 347 return audioBuffer; |
318 } | 348 } |
319 | 349 |
320 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionState& es) | 350 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionState& es) |
321 { | 351 { |
322 if (!audioData) { | 352 if (!audioData) { |
323 es.throwUninformativeAndGenericDOMException(SyntaxError); | 353 es.throwDOMException( |
| 354 SyntaxError, |
| 355 ExceptionMessages::failedToExecute( |
| 356 "decodeAudioData", |
| 357 "AudioContext", |
| 358 "invalid ArrayBuffer for audioData.")); |
324 return; | 359 return; |
325 } | 360 } |
326 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | 361 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); |
327 } | 362 } |
328 | 363 |
329 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() | 364 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
330 { | 365 { |
331 ASSERT(isMainThread()); | 366 ASSERT(isMainThread()); |
332 lazyInitialize(); | 367 lazyInitialize(); |
333 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); | 368 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); |
334 | 369 |
335 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 370 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
336 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 371 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
337 refNode(node.get()); | 372 refNode(node.get()); |
338 | 373 |
339 return node; | 374 return node; |
340 } | 375 } |
341 | 376 |
342 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionState& es) | 377 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionState& es) |
343 { | 378 { |
344 ASSERT(mediaElement); | |
345 if (!mediaElement) { | 379 if (!mediaElement) { |
346 es.throwUninformativeAndGenericDOMException(InvalidStateError); | 380 es.throwDOMException( |
| 381 InvalidStateError, |
| 382 ExceptionMessages::failedToConstruct( |
| 383 "MediaElementAudioSourceNode", |
| 384 "invalid HTMLMedialElement.")); |
347 return 0; | 385 return 0; |
348 } | 386 } |
349 | 387 |
350 ASSERT(isMainThread()); | 388 ASSERT(isMainThread()); |
351 lazyInitialize(); | 389 lazyInitialize(); |
352 | 390 |
353 // First check if this media element already has a source node. | 391 // First check if this media element already has a source node. |
354 if (mediaElement->audioSourceNode()) { | 392 if (mediaElement->audioSourceNode()) { |
355 es.throwUninformativeAndGenericDOMException(InvalidStateError); | 393 es.throwDOMException( |
| 394 InvalidStateError, |
| 395 ExceptionMessages::failedToConstruct( |
| 396 "MediaElementAudioSourceNode", |
| 397 "invalid HTMLMediaElement.")); |
356 return 0; | 398 return 0; |
357 } | 399 } |
358 | 400 |
359 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); | 401 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); |
360 | 402 |
361 mediaElement->setAudioSourceNode(node.get()); | 403 mediaElement->setAudioSourceNode(node.get()); |
362 | 404 |
363 refNode(node.get()); // context keeps reference until node is disconnected | 405 refNode(node.get()); // context keeps reference until node is disconnected |
364 return node; | 406 return node; |
365 } | 407 } |
366 | 408 |
367 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionState& es) | 409 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionState& es) |
368 { | 410 { |
369 ASSERT(mediaStream); | |
370 if (!mediaStream) { | 411 if (!mediaStream) { |
371 es.throwUninformativeAndGenericDOMException(InvalidStateError); | 412 es.throwDOMException( |
| 413 InvalidStateError, |
| 414 ExceptionMessages::failedToConstruct( |
| 415 "MediaStreamAudioSourceNode", |
| 416 "invalid MediaStream source")); |
372 return 0; | 417 return 0; |
373 } | 418 } |
374 | 419 |
375 ASSERT(isMainThread()); | 420 ASSERT(isMainThread()); |
376 lazyInitialize(); | 421 lazyInitialize(); |
377 | 422 |
378 AudioSourceProvider* provider = 0; | 423 AudioSourceProvider* provider = 0; |
379 | 424 |
380 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | 425 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
381 | 426 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
416 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, es); | 461 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, es); |
417 } | 462 } |
418 | 463 |
419 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta
te& es) | 464 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta
te& es) |
420 { | 465 { |
421 ASSERT(isMainThread()); | 466 ASSERT(isMainThread()); |
422 lazyInitialize(); | 467 lazyInitialize(); |
423 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); | 468 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); |
424 | 469 |
425 if (!node.get()) { | 470 if (!node.get()) { |
426 es.throwUninformativeAndGenericDOMException(SyntaxError); | 471 if (!numberOfInputChannels && !numberOfOutputChannels) { |
| 472 es.throwDOMException( |
| 473 SyntaxError, |
| 474 ExceptionMessages::failedToConstruct( |
| 475 "ScriptProcessorNode", |
| 476 "number of input channels and output channels cannot both be
zero.")); |
| 477 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ |
| 478 es.throwDOMException( |
| 479 SyntaxError, |
| 480 ExceptionMessages::failedToConstruct( |
| 481 "ScriptProcessorNode", |
| 482 "number of input channels (" + String::number(numberOfInputC
hannels) |
| 483 + ") exceeds maximum (" |
| 484 + String::number(AudioContext::maxNumberOfChannels()) + ")."
)); |
| 485 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
{ |
| 486 es.throwDOMException( |
| 487 SyntaxError, |
| 488 ExceptionMessages::failedToConstruct( |
| 489 "ScriptProcessorNode", |
| 490 "number of output channels (" + String::number(numberOfInput
Channels) |
| 491 + ") exceeds maximum (" |
| 492 + String::number(AudioContext::maxNumberOfChannels()) + ")."
)); |
| 493 } else { |
| 494 es.throwDOMException( |
| 495 SyntaxError, |
| 496 ExceptionMessages::failedToConstruct( |
| 497 "ScriptProcessorNode", |
| 498 "buffer size (" + String::number(bufferSize) |
| 499 + ") must be a power of two between 256 and 16384.")); |
| 500 } |
427 return 0; | 501 return 0; |
428 } | 502 } |
429 | 503 |
430 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks | 504 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks |
431 return node; | 505 return node; |
432 } | 506 } |
433 | 507 |
434 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() | 508 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
435 { | 509 { |
436 ASSERT(isMainThread()); | 510 ASSERT(isMainThread()); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
503 } | 577 } |
504 | 578 |
505 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionState& es) | 579 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionState& es) |
506 { | 580 { |
507 ASSERT(isMainThread()); | 581 ASSERT(isMainThread()); |
508 lazyInitialize(); | 582 lazyInitialize(); |
509 | 583 |
510 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); | 584 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); |
511 | 585 |
512 if (!node.get()) { | 586 if (!node.get()) { |
513 es.throwUninformativeAndGenericDOMException(SyntaxError); | 587 es.throwDOMException( |
| 588 SyntaxError, |
| 589 ExceptionMessages::failedToConstruct( |
| 590 "ChannelSplitterNode", |
| 591 "number of outputs (" + String::number(numberOfOutputs) |
| 592 + ") must be between 1 and " |
| 593 + String::number(AudioContext::maxNumberOfChannels()) + ".")); |
514 return 0; | 594 return 0; |
515 } | 595 } |
516 | 596 |
517 return node; | 597 return node; |
518 } | 598 } |
519 | 599 |
520 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState&
es) | 600 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState&
es) |
521 { | 601 { |
522 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | 602 const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
523 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, es); | 603 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, es); |
524 } | 604 } |
525 | 605 |
526 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionState& es) | 606 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionState& es) |
527 { | 607 { |
528 ASSERT(isMainThread()); | 608 ASSERT(isMainThread()); |
529 lazyInitialize(); | 609 lazyInitialize(); |
530 | 610 |
531 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); | 611 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); |
532 | 612 |
533 if (!node.get()) { | 613 if (!node.get()) { |
534 es.throwUninformativeAndGenericDOMException(SyntaxError); | 614 es.throwDOMException( |
| 615 SyntaxError, |
| 616 ExceptionMessages::failedToConstruct( |
| 617 "ChannelMergerNode", |
| 618 "number of inputs (" + String::number(numberOfInputs) |
| 619 + ") must be between 1 and " |
| 620 + String::number(AudioContext::maxNumberOfChannels()) + ".")); |
535 return 0; | 621 return 0; |
536 } | 622 } |
537 | 623 |
538 return node; | 624 return node; |
539 } | 625 } |
540 | 626 |
541 PassRefPtr<OscillatorNode> AudioContext::createOscillator() | 627 PassRefPtr<OscillatorNode> AudioContext::createOscillator() |
542 { | 628 { |
543 ASSERT(isMainThread()); | 629 ASSERT(isMainThread()); |
544 lazyInitialize(); | 630 lazyInitialize(); |
545 | 631 |
546 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); | 632 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); |
547 | 633 |
548 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 634 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
549 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 635 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
550 refNode(node.get()); | 636 refNode(node.get()); |
551 | 637 |
552 return node; | 638 return node; |
553 } | 639 } |
554 | 640 |
555 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionState& es) | 641 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionState& es) |
556 { | 642 { |
557 ASSERT(isMainThread()); | 643 ASSERT(isMainThread()); |
558 | 644 |
559 if (!real || !imag || (real->length() != imag->length())) { | 645 if (!real) { |
560 es.throwUninformativeAndGenericDOMException(SyntaxError); | 646 es.throwDOMException( |
| 647 SyntaxError, |
| 648 ExceptionMessages::failedToConstruct( |
| 649 "PeriodicWave", |
| 650 "invalid real array")); |
561 return 0; | 651 return 0; |
562 } | 652 } |
563 | 653 |
| 654 if (!imag) { |
| 655 es.throwDOMException( |
| 656 SyntaxError, |
| 657 ExceptionMessages::failedToConstruct( |
| 658 "PeriodicWave", |
| 659 "invalid imaginary array")); |
| 660 return 0; |
| 661 } |
| 662 |
| 663 if (real->length() != imag->length()) { |
| 664 es.throwDOMException( |
| 665 SyntaxError, |
| 666 ExceptionMessages::failedToConstruct( |
| 667 "PeriodicWave", |
| 668 "length of real array (" + String::number(real->length()) |
| 669 + ") and length of imaginary array (" + String::number(imag->le
ngth()) |
| 670 + ") must match.")); |
| 671 return 0; |
| 672 } |
| 673 |
564 lazyInitialize(); | 674 lazyInitialize(); |
565 return PeriodicWave::create(sampleRate(), real, imag); | 675 return PeriodicWave::create(sampleRate(), real, imag); |
566 } | 676 } |
567 | 677 |
568 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) | 678 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
569 { | 679 { |
570 ASSERT(isAudioThread()); | 680 ASSERT(isAudioThread()); |
571 m_finishedNodes.append(node); | 681 m_finishedNodes.append(node); |
572 } | 682 } |
573 | 683 |
(...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
951 } | 1061 } |
952 | 1062 |
953 void AudioContext::decrementActiveSourceCount() | 1063 void AudioContext::decrementActiveSourceCount() |
954 { | 1064 { |
955 atomicDecrement(&m_activeSourceCount); | 1065 atomicDecrement(&m_activeSourceCount); |
956 } | 1066 } |
957 | 1067 |
958 } // namespace WebCore | 1068 } // namespace WebCore |
959 | 1069 |
960 #endif // ENABLE(WEB_AUDIO) | 1070 #endif // ENABLE(WEB_AUDIO) |
OLD | NEW |