| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 23 */ | 23 */ |
| 24 | 24 |
| 25 #include "config.h" | 25 #include "config.h" |
| 26 | 26 |
| 27 #if ENABLE(WEB_AUDIO) | 27 #if ENABLE(WEB_AUDIO) |
| 28 | 28 |
| 29 #include "modules/webaudio/AudioContext.h" | 29 #include "modules/webaudio/AudioContext.h" |
| 30 | 30 |
| 31 #include "bindings/v8/ExceptionState.h" | |
| 32 #include "core/dom/Document.h" | 31 #include "core/dom/Document.h" |
| 33 #include "core/dom/ExceptionCode.h" | 32 #include "core/dom/ExceptionCode.h" |
| 34 #include "core/html/HTMLMediaElement.h" | 33 #include "core/html/HTMLMediaElement.h" |
| 35 #include "core/inspector/ScriptCallStack.h" | 34 #include "core/inspector/ScriptCallStack.h" |
| 36 #include "core/platform/audio/FFTFrame.h" | 35 #include "core/platform/audio/FFTFrame.h" |
| 37 #include "core/platform/audio/HRTFDatabaseLoader.h" | 36 #include "core/platform/audio/HRTFDatabaseLoader.h" |
| 38 #include "core/platform/audio/HRTFPanner.h" | 37 #include "core/platform/audio/HRTFPanner.h" |
| 39 #include "modules/mediastream/MediaStream.h" | 38 #include "modules/mediastream/MediaStream.h" |
| 40 #include "modules/webaudio/AnalyserNode.h" | 39 #include "modules/webaudio/AnalyserNode.h" |
| 41 #include "modules/webaudio/AsyncAudioDecoder.h" | 40 #include "modules/webaudio/AsyncAudioDecoder.h" |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 282 return; | 281 return; |
| 283 m_isStopScheduled = true; | 282 m_isStopScheduled = true; |
| 284 | 283 |
| 285 // Don't call uninitialize() immediately here because the ScriptExecutionCon
text is in the middle | 284 // Don't call uninitialize() immediately here because the ScriptExecutionCon
text is in the middle |
| 286 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other | 285 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other |
| 287 // ActiveDOMObjects so let's schedule uninitialize() to be called later. | 286 // ActiveDOMObjects so let's schedule uninitialize() to be called later. |
| 288 // FIXME: see if there's a more direct way to handle this issue. | 287 // FIXME: see if there's a more direct way to handle this issue. |
| 289 callOnMainThread(stopDispatch, this); | 288 callOnMainThread(stopDispatch, this); |
| 290 } | 289 } |
| 291 | 290 |
| 292 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionState& es) | 291 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionCode& ec) |
| 293 { | 292 { |
| 294 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); | 293 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); |
| 295 if (!audioBuffer.get()) { | 294 if (!audioBuffer.get()) { |
| 296 es.throwDOMException(SyntaxError); | 295 ec = SyntaxError; |
| 297 return 0; | 296 return 0; |
| 298 } | 297 } |
| 299 | 298 |
| 300 return audioBuffer; | 299 return audioBuffer; |
| 301 } | 300 } |
| 302 | 301 |
| 303 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionState& es) | 302 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionCode& ec) |
| 304 { | 303 { |
| 305 ASSERT(arrayBuffer); | 304 ASSERT(arrayBuffer); |
| 306 if (!arrayBuffer) { | 305 if (!arrayBuffer) { |
| 307 es.throwDOMException(SyntaxError); | 306 ec = SyntaxError; |
| 308 return 0; | 307 return 0; |
| 309 } | 308 } |
| 310 | 309 |
| 311 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); | 310 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
| 312 if (!audioBuffer.get()) { | 311 if (!audioBuffer.get()) { |
| 313 es.throwDOMException(SyntaxError); | 312 ec = SyntaxError; |
| 314 return 0; | 313 return 0; |
| 315 } | 314 } |
| 316 | 315 |
| 317 return audioBuffer; | 316 return audioBuffer; |
| 318 } | 317 } |
| 319 | 318 |
| 320 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionState& es) | 319 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionCode& ec) |
| 321 { | 320 { |
| 322 if (!audioData) { | 321 if (!audioData) { |
| 323 es.throwDOMException(SyntaxError); | 322 ec = SyntaxError; |
| 324 return; | 323 return; |
| 325 } | 324 } |
| 326 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | 325 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); |
| 327 } | 326 } |
| 328 | 327 |
| 329 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() | 328 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| 330 { | 329 { |
| 331 ASSERT(isMainThread()); | 330 ASSERT(isMainThread()); |
| 332 lazyInitialize(); | 331 lazyInitialize(); |
| 333 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); | 332 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); |
| 334 | 333 |
| 335 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 334 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 336 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 335 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 337 refNode(node.get()); | 336 refNode(node.get()); |
| 338 | 337 |
| 339 return node; | 338 return node; |
| 340 } | 339 } |
| 341 | 340 |
| 342 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionState& es) | 341 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionCode& ec) |
| 343 { | 342 { |
| 344 ASSERT(mediaElement); | 343 ASSERT(mediaElement); |
| 345 if (!mediaElement) { | 344 if (!mediaElement) { |
| 346 es.throwDOMException(InvalidStateError); | 345 ec = InvalidStateError; |
| 347 return 0; | 346 return 0; |
| 348 } | 347 } |
| 349 | 348 |
| 350 ASSERT(isMainThread()); | 349 ASSERT(isMainThread()); |
| 351 lazyInitialize(); | 350 lazyInitialize(); |
| 352 | 351 |
| 353 // First check if this media element already has a source node. | 352 // First check if this media element already has a source node. |
| 354 if (mediaElement->audioSourceNode()) { | 353 if (mediaElement->audioSourceNode()) { |
| 355 es.throwDOMException(InvalidStateError); | 354 ec = InvalidStateError; |
| 356 return 0; | 355 return 0; |
| 357 } | 356 } |
| 358 | 357 |
| 359 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); | 358 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); |
| 360 | 359 |
| 361 mediaElement->setAudioSourceNode(node.get()); | 360 mediaElement->setAudioSourceNode(node.get()); |
| 362 | 361 |
| 363 refNode(node.get()); // context keeps reference until node is disconnected | 362 refNode(node.get()); // context keeps reference until node is disconnected |
| 364 return node; | 363 return node; |
| 365 } | 364 } |
| 366 | 365 |
| 367 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionState& es) | 366 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionCode& ec) |
| 368 { | 367 { |
| 369 ASSERT(mediaStream); | 368 ASSERT(mediaStream); |
| 370 if (!mediaStream) { | 369 if (!mediaStream) { |
| 371 es.throwDOMException(InvalidStateError); | 370 ec = InvalidStateError; |
| 372 return 0; | 371 return 0; |
| 373 } | 372 } |
| 374 | 373 |
| 375 ASSERT(isMainThread()); | 374 ASSERT(isMainThread()); |
| 376 lazyInitialize(); | 375 lazyInitialize(); |
| 377 | 376 |
| 378 AudioSourceProvider* provider = 0; | 377 AudioSourceProvider* provider = 0; |
| 379 | 378 |
| 380 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | 379 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
| 381 | 380 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 399 return node; | 398 return node; |
| 400 } | 399 } |
| 401 | 400 |
| 402 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti
nation() | 401 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti
nation() |
| 403 { | 402 { |
| 404 // FIXME: Add support for an optional argument which specifies the number of
channels. | 403 // FIXME: Add support for an optional argument which specifies the number of
channels. |
| 405 // FIXME: The default should probably be stereo instead of mono. | 404 // FIXME: The default should probably be stereo instead of mono. |
| 406 return MediaStreamAudioDestinationNode::create(this, 1); | 405 return MediaStreamAudioDestinationNode::create(this, 1); |
| 407 } | 406 } |
| 408 | 407 |
| 409 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, ExceptionState& es) | 408 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, ExceptionCode& ec) |
| 410 { | 409 { |
| 411 // Set number of input/output channels to stereo by default. | 410 // Set number of input/output channels to stereo by default. |
| 412 return createScriptProcessor(bufferSize, 2, 2, es); | 411 return createScriptProcessor(bufferSize, 2, 2, ec); |
| 413 } | 412 } |
| 414 | 413 |
| 415 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, ExceptionState& es) | 414 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, ExceptionCode& ec) |
| 416 { | 415 { |
| 417 // Set number of output channels to stereo by default. | 416 // Set number of output channels to stereo by default. |
| 418 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, es); | 417 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec); |
| 419 } | 418 } |
| 420 | 419 |
| 421 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta
te& es) | 420 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCod
e& ec) |
| 422 { | 421 { |
| 423 ASSERT(isMainThread()); | 422 ASSERT(isMainThread()); |
| 424 lazyInitialize(); | 423 lazyInitialize(); |
| 425 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); | 424 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); |
| 426 | 425 |
| 427 if (!node.get()) { | 426 if (!node.get()) { |
| 428 es.throwDOMException(SyntaxError); | 427 ec = SyntaxError; |
| 429 return 0; | 428 return 0; |
| 430 } | 429 } |
| 431 | 430 |
| 432 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks | 431 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks |
| 433 return node; | 432 return node; |
| 434 } | 433 } |
| 435 | 434 |
| 436 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() | 435 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| 437 { | 436 { |
| 438 ASSERT(isMainThread()); | 437 ASSERT(isMainThread()); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 475 return AnalyserNode::create(this, m_destinationNode->sampleRate()); | 474 return AnalyserNode::create(this, m_destinationNode->sampleRate()); |
| 476 } | 475 } |
| 477 | 476 |
| 478 PassRefPtr<GainNode> AudioContext::createGain() | 477 PassRefPtr<GainNode> AudioContext::createGain() |
| 479 { | 478 { |
| 480 ASSERT(isMainThread()); | 479 ASSERT(isMainThread()); |
| 481 lazyInitialize(); | 480 lazyInitialize(); |
| 482 return GainNode::create(this, m_destinationNode->sampleRate()); | 481 return GainNode::create(this, m_destinationNode->sampleRate()); |
| 483 } | 482 } |
| 484 | 483 |
| 485 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& es) | 484 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec) |
| 486 { | 485 { |
| 487 const double defaultMaxDelayTime = 1; | 486 const double defaultMaxDelayTime = 1; |
| 488 return createDelay(defaultMaxDelayTime, es); | 487 return createDelay(defaultMaxDelayTime, ec); |
| 489 } | 488 } |
| 490 | 489 |
| 491 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt
ate& es) | 490 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCo
de& ec) |
| 492 { | 491 { |
| 493 ASSERT(isMainThread()); | 492 ASSERT(isMainThread()); |
| 494 lazyInitialize(); | 493 lazyInitialize(); |
| 495 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa
te(), maxDelayTime, es); | 494 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa
te(), maxDelayTime, ec); |
| 496 if (es.hadException()) | 495 if (ec) |
| 497 return 0; | 496 return 0; |
| 498 return node; | 497 return node; |
| 499 } | 498 } |
| 500 | 499 |
| 501 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta
te& es) | 500 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCod
e& ec) |
| 502 { | 501 { |
| 503 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; | 502 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
| 504 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, es); | 503 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec); |
| 505 } | 504 } |
| 506 | 505 |
| 507 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionState& es) | 506 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionCode& ec) |
| 508 { | 507 { |
| 509 ASSERT(isMainThread()); | 508 ASSERT(isMainThread()); |
| 510 lazyInitialize(); | 509 lazyInitialize(); |
| 511 | 510 |
| 512 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); | 511 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); |
| 513 | 512 |
| 514 if (!node.get()) { | 513 if (!node.get()) { |
| 515 es.throwDOMException(SyntaxError); | 514 ec = SyntaxError; |
| 516 return 0; | 515 return 0; |
| 517 } | 516 } |
| 518 | 517 |
| 519 return node; | 518 return node; |
| 520 } | 519 } |
| 521 | 520 |
| 522 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState&
es) | 521 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& e
c) |
| 523 { | 522 { |
| 524 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | 523 const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
| 525 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, es); | 524 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec); |
| 526 } | 525 } |
| 527 | 526 |
| 528 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionState& es) | 527 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionCode& ec) |
| 529 { | 528 { |
| 530 ASSERT(isMainThread()); | 529 ASSERT(isMainThread()); |
| 531 lazyInitialize(); | 530 lazyInitialize(); |
| 532 | 531 |
| 533 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); | 532 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); |
| 534 | 533 |
| 535 if (!node.get()) { | 534 if (!node.get()) { |
| 536 es.throwDOMException(SyntaxError); | 535 ec = SyntaxError; |
| 537 return 0; | 536 return 0; |
| 538 } | 537 } |
| 539 | 538 |
| 540 return node; | 539 return node; |
| 541 } | 540 } |
| 542 | 541 |
| 543 PassRefPtr<OscillatorNode> AudioContext::createOscillator() | 542 PassRefPtr<OscillatorNode> AudioContext::createOscillator() |
| 544 { | 543 { |
| 545 ASSERT(isMainThread()); | 544 ASSERT(isMainThread()); |
| 546 lazyInitialize(); | 545 lazyInitialize(); |
| 547 | 546 |
| 548 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); | 547 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); |
| 549 | 548 |
| 550 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 549 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 551 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 550 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 552 refNode(node.get()); | 551 refNode(node.get()); |
| 553 | 552 |
| 554 return node; | 553 return node; |
| 555 } | 554 } |
| 556 | 555 |
| 557 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionState& es) | 556 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionCode& ec) |
| 558 { | 557 { |
| 559 ASSERT(isMainThread()); | 558 ASSERT(isMainThread()); |
| 560 | 559 |
| 561 if (!real || !imag || (real->length() != imag->length())) { | 560 if (!real || !imag || (real->length() != imag->length())) { |
| 562 es.throwDOMException(SyntaxError); | 561 ec = SyntaxError; |
| 563 return 0; | 562 return 0; |
| 564 } | 563 } |
| 565 | 564 |
| 566 lazyInitialize(); | 565 lazyInitialize(); |
| 567 return PeriodicWave::create(sampleRate(), real, imag); | 566 return PeriodicWave::create(sampleRate(), real, imag); |
| 568 } | 567 } |
| 569 | 568 |
| 570 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) | 569 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
| 571 { | 570 { |
| 572 ASSERT(isAudioThread()); | 571 ASSERT(isAudioThread()); |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 953 } | 952 } |
| 954 | 953 |
| 955 void AudioContext::decrementActiveSourceCount() | 954 void AudioContext::decrementActiveSourceCount() |
| 956 { | 955 { |
| 957 atomicDecrement(&m_activeSourceCount); | 956 atomicDecrement(&m_activeSourceCount); |
| 958 } | 957 } |
| 959 | 958 |
| 960 } // namespace WebCore | 959 } // namespace WebCore |
| 961 | 960 |
| 962 #endif // ENABLE(WEB_AUDIO) | 961 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |