| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other | 288 // of dealing with all of its ActiveDOMObjects at this point. uninitialize()
can de-reference other |
| 289 // ActiveDOMObjects so let's schedule uninitialize() to be called later. | 289 // ActiveDOMObjects so let's schedule uninitialize() to be called later. |
| 290 // FIXME: see if there's a more direct way to handle this issue. | 290 // FIXME: see if there's a more direct way to handle this issue. |
| 291 callOnMainThread(stopDispatch, this); | 291 callOnMainThread(stopDispatch, this); |
| 292 } | 292 } |
| 293 | 293 |
| 294 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionCode& ec) | 294 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionCode& ec) |
| 295 { | 295 { |
| 296 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); | 296 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); |
| 297 if (!audioBuffer.get()) { | 297 if (!audioBuffer.get()) { |
| 298 ec = SYNTAX_ERR; | 298 ec = SyntaxError; |
| 299 return 0; | 299 return 0; |
| 300 } | 300 } |
| 301 | 301 |
| 302 return audioBuffer; | 302 return audioBuffer; |
| 303 } | 303 } |
| 304 | 304 |
| 305 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionCode& ec) | 305 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionCode& ec) |
| 306 { | 306 { |
| 307 ASSERT(arrayBuffer); | 307 ASSERT(arrayBuffer); |
| 308 if (!arrayBuffer) { | 308 if (!arrayBuffer) { |
| 309 ec = SYNTAX_ERR; | 309 ec = SyntaxError; |
| 310 return 0; | 310 return 0; |
| 311 } | 311 } |
| 312 | 312 |
| 313 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); | 313 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
| 314 if (!audioBuffer.get()) { | 314 if (!audioBuffer.get()) { |
| 315 ec = SYNTAX_ERR; | 315 ec = SyntaxError; |
| 316 return 0; | 316 return 0; |
| 317 } | 317 } |
| 318 | 318 |
| 319 return audioBuffer; | 319 return audioBuffer; |
| 320 } | 320 } |
| 321 | 321 |
| 322 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionCode& ec) | 322 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBuffe
rCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, Excep
tionCode& ec) |
| 323 { | 323 { |
| 324 if (!audioData) { | 324 if (!audioData) { |
| 325 ec = SYNTAX_ERR; | 325 ec = SyntaxError; |
| 326 return; | 326 return; |
| 327 } | 327 } |
| 328 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | 328 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); |
| 329 } | 329 } |
| 330 | 330 |
| 331 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() | 331 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| 332 { | 332 { |
| 333 ASSERT(isMainThread()); | 333 ASSERT(isMainThread()); |
| 334 lazyInitialize(); | 334 lazyInitialize(); |
| 335 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); | 335 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); |
| 336 | 336 |
| 337 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 337 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 338 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 338 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 339 refNode(node.get()); | 339 refNode(node.get()); |
| 340 | 340 |
| 341 return node; | 341 return node; |
| 342 } | 342 } |
| 343 | 343 |
| 344 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionCode& ec) | 344 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionCode& ec) |
| 345 { | 345 { |
| 346 ASSERT(mediaElement); | 346 ASSERT(mediaElement); |
| 347 if (!mediaElement) { | 347 if (!mediaElement) { |
| 348 ec = INVALID_STATE_ERR; | 348 ec = InvalidStateError; |
| 349 return 0; | 349 return 0; |
| 350 } | 350 } |
| 351 | 351 |
| 352 ASSERT(isMainThread()); | 352 ASSERT(isMainThread()); |
| 353 lazyInitialize(); | 353 lazyInitialize(); |
| 354 | 354 |
| 355 // First check if this media element already has a source node. | 355 // First check if this media element already has a source node. |
| 356 if (mediaElement->audioSourceNode()) { | 356 if (mediaElement->audioSourceNode()) { |
| 357 ec = INVALID_STATE_ERR; | 357 ec = InvalidStateError; |
| 358 return 0; | 358 return 0; |
| 359 } | 359 } |
| 360 | 360 |
| 361 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); | 361 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); |
| 362 | 362 |
| 363 mediaElement->setAudioSourceNode(node.get()); | 363 mediaElement->setAudioSourceNode(node.get()); |
| 364 | 364 |
| 365 refNode(node.get()); // context keeps reference until node is disconnected | 365 refNode(node.get()); // context keeps reference until node is disconnected |
| 366 return node; | 366 return node; |
| 367 } | 367 } |
| 368 | 368 |
| 369 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionCode& ec) | 369 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionCode& ec) |
| 370 { | 370 { |
| 371 ASSERT(mediaStream); | 371 ASSERT(mediaStream); |
| 372 if (!mediaStream) { | 372 if (!mediaStream) { |
| 373 ec = INVALID_STATE_ERR; | 373 ec = InvalidStateError; |
| 374 return 0; | 374 return 0; |
| 375 } | 375 } |
| 376 | 376 |
| 377 ASSERT(isMainThread()); | 377 ASSERT(isMainThread()); |
| 378 lazyInitialize(); | 378 lazyInitialize(); |
| 379 | 379 |
| 380 AudioSourceProvider* provider = 0; | 380 AudioSourceProvider* provider = 0; |
| 381 | 381 |
| 382 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | 382 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
| 383 | 383 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 420 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec); | 420 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec); |
| 421 } | 421 } |
| 422 | 422 |
| 423 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCod
e& ec) | 423 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCod
e& ec) |
| 424 { | 424 { |
| 425 ASSERT(isMainThread()); | 425 ASSERT(isMainThread()); |
| 426 lazyInitialize(); | 426 lazyInitialize(); |
| 427 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); | 427 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); |
| 428 | 428 |
| 429 if (!node.get()) { | 429 if (!node.get()) { |
| 430 ec = SYNTAX_ERR; | 430 ec = SyntaxError; |
| 431 return 0; | 431 return 0; |
| 432 } | 432 } |
| 433 | 433 |
| 434 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks | 434 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks |
| 435 return node; | 435 return node; |
| 436 } | 436 } |
| 437 | 437 |
| 438 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() | 438 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| 439 { | 439 { |
| 440 ASSERT(isMainThread()); | 440 ASSERT(isMainThread()); |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 507 } | 507 } |
| 508 | 508 |
| 509 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionCode& ec) | 509 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionCode& ec) |
| 510 { | 510 { |
| 511 ASSERT(isMainThread()); | 511 ASSERT(isMainThread()); |
| 512 lazyInitialize(); | 512 lazyInitialize(); |
| 513 | 513 |
| 514 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); | 514 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); |
| 515 | 515 |
| 516 if (!node.get()) { | 516 if (!node.get()) { |
| 517 ec = SYNTAX_ERR; | 517 ec = SyntaxError; |
| 518 return 0; | 518 return 0; |
| 519 } | 519 } |
| 520 | 520 |
| 521 return node; | 521 return node; |
| 522 } | 522 } |
| 523 | 523 |
| 524 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& e
c) | 524 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& e
c) |
| 525 { | 525 { |
| 526 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | 526 const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
| 527 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec); | 527 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec); |
| 528 } | 528 } |
| 529 | 529 |
| 530 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionCode& ec) | 530 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionCode& ec) |
| 531 { | 531 { |
| 532 ASSERT(isMainThread()); | 532 ASSERT(isMainThread()); |
| 533 lazyInitialize(); | 533 lazyInitialize(); |
| 534 | 534 |
| 535 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); | 535 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); |
| 536 | 536 |
| 537 if (!node.get()) { | 537 if (!node.get()) { |
| 538 ec = SYNTAX_ERR; | 538 ec = SyntaxError; |
| 539 return 0; | 539 return 0; |
| 540 } | 540 } |
| 541 | 541 |
| 542 return node; | 542 return node; |
| 543 } | 543 } |
| 544 | 544 |
| 545 PassRefPtr<OscillatorNode> AudioContext::createOscillator() | 545 PassRefPtr<OscillatorNode> AudioContext::createOscillator() |
| 546 { | 546 { |
| 547 ASSERT(isMainThread()); | 547 ASSERT(isMainThread()); |
| 548 lazyInitialize(); | 548 lazyInitialize(); |
| 549 | 549 |
| 550 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); | 550 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); |
| 551 | 551 |
| 552 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 552 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 553 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 553 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 554 refNode(node.get()); | 554 refNode(node.get()); |
| 555 | 555 |
| 556 return node; | 556 return node; |
| 557 } | 557 } |
| 558 | 558 |
| 559 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionCode& ec) | 559 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionCode& ec) |
| 560 { | 560 { |
| 561 ASSERT(isMainThread()); | 561 ASSERT(isMainThread()); |
| 562 | 562 |
| 563 if (!real || !imag || (real->length() != imag->length())) { | 563 if (!real || !imag || (real->length() != imag->length())) { |
| 564 ec = SYNTAX_ERR; | 564 ec = SyntaxError; |
| 565 return 0; | 565 return 0; |
| 566 } | 566 } |
| 567 | 567 |
| 568 lazyInitialize(); | 568 lazyInitialize(); |
| 569 return PeriodicWave::create(sampleRate(), real, imag); | 569 return PeriodicWave::create(sampleRate(), real, imag); |
| 570 } | 570 } |
| 571 | 571 |
| 572 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) | 572 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) |
| 573 { | 573 { |
| 574 ASSERT(isAudioThread()); | 574 ASSERT(isAudioThread()); |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 979 info.addMember(m_deferredFinishDerefList, "deferredFinishDerefList"); | 979 info.addMember(m_deferredFinishDerefList, "deferredFinishDerefList"); |
| 980 info.addMember(m_hrtfDatabaseLoader, "hrtfDatabaseLoader"); | 980 info.addMember(m_hrtfDatabaseLoader, "hrtfDatabaseLoader"); |
| 981 info.addMember(m_eventTargetData, "eventTargetData"); | 981 info.addMember(m_eventTargetData, "eventTargetData"); |
| 982 info.addMember(m_renderTarget, "renderTarget"); | 982 info.addMember(m_renderTarget, "renderTarget"); |
| 983 info.addMember(m_audioDecoder, "audioDecoder"); | 983 info.addMember(m_audioDecoder, "audioDecoder"); |
| 984 } | 984 } |
| 985 | 985 |
| 986 } // namespace WebCore | 986 } // namespace WebCore |
| 987 | 987 |
| 988 #endif // ENABLE(WEB_AUDIO) | 988 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |