| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 { | 82 { |
| 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.
1KHz, | 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.
1KHz, |
| 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a
nd some testing there. | 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a
nd some testing there. |
| 85 return sampleRate >= 44100 && sampleRate <= 96000; | 85 return sampleRate >= 44100 && sampleRate <= 96000; |
| 86 } | 86 } |
| 87 | 87 |
| 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha
rdware. | 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha
rdware. |
| 89 const unsigned MaxHardwareContexts = 6; | 89 const unsigned MaxHardwareContexts = 6; |
| 90 unsigned AudioContext::s_hardwareContextCount = 0; | 90 unsigned AudioContext::s_hardwareContextCount = 0; |
| 91 | 91 |
| 92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState
& exceptionState) | 92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex
ceptionState& exceptionState) |
| 93 { | 93 { |
| 94 ASSERT(isMainThread()); | 94 ASSERT(isMainThread()); |
| 95 if (s_hardwareContextCount >= MaxHardwareContexts) { | 95 if (s_hardwareContextCount >= MaxHardwareContexts) { |
| 96 exceptionState.throwDOMException( | 96 exceptionState.throwDOMException( |
| 97 SyntaxError, | 97 SyntaxError, |
| 98 "number of hardware contexts reached maximum (" + String::number(Max
HardwareContexts) + ")."); | 98 "number of hardware contexts reached maximum (" + String::number(Max
HardwareContexts) + ")."); |
| 99 return nullptr; | 99 return nullptr; |
| 100 } | 100 } |
| 101 | 101 |
| 102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); | 102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCou
ntedGarbageCollected(new AudioContext(&document))); |
| 103 audioContext->suspendIfNeeded(); | 103 audioContext->suspendIfNeeded(); |
| 104 return audioContext.release(); | 104 return audioContext.release(); |
| 105 } | 105 } |
| 106 | 106 |
| 107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numbe
rOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionS
tate) | 107 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, un
signed numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState
& exceptionState) |
| 108 { | 108 { |
| 109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated
AudioContext constructor: use OfflineAudioContext instead"); | 109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated
AudioContext constructor: use OfflineAudioContext instead"); |
| 110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram
es, sampleRate, exceptionState); | 110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram
es, sampleRate, exceptionState); |
| 111 } | 111 } |
| 112 | 112 |
| 113 // Constructor for rendering to the audio hardware. | 113 // Constructor for rendering to the audio hardware. |
| 114 AudioContext::AudioContext(Document* document) | 114 AudioContext::AudioContext(Document* document) |
| 115 : ActiveDOMObject(document) | 115 : ActiveDOMObject(document) |
| 116 , m_isStopScheduled(false) | 116 , m_isStopScheduled(false) |
| 117 , m_isCleared(false) | 117 , m_isCleared(false) |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 274 // FIXME: see if there's a more direct way to handle this issue. | 274 // FIXME: see if there's a more direct way to handle this issue. |
| 275 callOnMainThread(stopDispatch, this); | 275 callOnMainThread(stopDispatch, this); |
| 276 } | 276 } |
| 277 | 277 |
| 278 bool AudioContext::hasPendingActivity() const | 278 bool AudioContext::hasPendingActivity() const |
| 279 { | 279 { |
| 280 // According to spec AudioContext must die only after page navigates. | 280 // According to spec AudioContext must die only after page navigates. |
| 281 return !m_isCleared; | 281 return !m_isCleared; |
| 282 } | 282 } |
| 283 | 283 |
| 284 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) | 284 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf
Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat
e) |
| 285 { | 285 { |
| 286 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate); | 286 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh
annels, numberOfFrames, sampleRate); |
| 287 if (!audioBuffer.get()) { | 287 if (!audioBuffer.get()) { |
| 288 if (numberOfChannels > AudioContext::maxNumberOfChannels()) { | 288 if (numberOfChannels > AudioContext::maxNumberOfChannels()) { |
| 289 exceptionState.throwDOMException( | 289 exceptionState.throwDOMException( |
| 290 NotSupportedError, | 290 NotSupportedError, |
| 291 "requested number of channels (" + String::number(numberOfChanne
ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()
) + ")"); | 291 "requested number of channels (" + String::number(numberOfChanne
ls) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()
) + ")"); |
| 292 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat
e > AudioBuffer::maxAllowedSampleRate()) { | 292 } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRat
e > AudioBuffer::maxAllowedSampleRate()) { |
| 293 exceptionState.throwDOMException( | 293 exceptionState.throwDOMException( |
| 294 NotSupportedError, | 294 NotSupportedError, |
| 295 "requested sample rate (" + String::number(sampleRate) | 295 "requested sample rate (" + String::number(sampleRate) |
| 296 + ") does not lie in the allowed range of " | 296 + ") does not lie in the allowed range of " |
| 297 + String::number(AudioBuffer::minAllowedSampleRate()) | 297 + String::number(AudioBuffer::minAllowedSampleRate()) |
| 298 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + "
Hz"); | 298 + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + "
Hz"); |
| 299 } else if (!numberOfFrames) { | 299 } else if (!numberOfFrames) { |
| 300 exceptionState.throwDOMException( | 300 exceptionState.throwDOMException( |
| 301 NotSupportedError, | 301 NotSupportedError, |
| 302 "number of frames must be greater than 0."); | 302 "number of frames must be greater than 0."); |
| 303 } else { | 303 } else { |
| 304 exceptionState.throwDOMException( | 304 exceptionState.throwDOMException( |
| 305 NotSupportedError, | 305 NotSupportedError, |
| 306 "unable to create buffer of " + String::number(numberOfChannels) | 306 "unable to create buffer of " + String::number(numberOfChannels) |
| 307 + " channel(s) of " + String::number(numberOfFrames) | 307 + " channel(s) of " + String::number(numberOfFrames) |
| 308 + " frames each."); | 308 + " frames each."); |
| 309 } | 309 } |
| 310 return nullptr; | 310 return nullptr; |
| 311 } | 311 } |
| 312 | 312 |
| 313 return audioBuffer; | 313 return audioBuffer; |
| 314 } | 314 } |
| 315 | 315 |
| 316 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
l mixToMono, ExceptionState& exceptionState) | 316 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arra
yBuffer, bool mixToMono, ExceptionState& exceptionState) |
| 317 { | 317 { |
| 318 ASSERT(arrayBuffer); | 318 ASSERT(arrayBuffer); |
| 319 if (!arrayBuffer) { | 319 if (!arrayBuffer) { |
| 320 exceptionState.throwDOMException( | 320 exceptionState.throwDOMException( |
| 321 SyntaxError, | 321 SyntaxError, |
| 322 "invalid ArrayBuffer."); | 322 "invalid ArrayBuffer."); |
| 323 return nullptr; | 323 return nullptr; |
| 324 } | 324 } |
| 325 | 325 |
| 326 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array
Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); | 326 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFi
leData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); |
| 327 if (!audioBuffer.get()) { | 327 if (!audioBuffer.get()) { |
| 328 exceptionState.throwDOMException( | 328 exceptionState.throwDOMException( |
| 329 SyntaxError, | 329 SyntaxError, |
| 330 "invalid audio data in ArrayBuffer."); | 330 "invalid audio data in ArrayBuffer."); |
| 331 return nullptr; | 331 return nullptr; |
| 332 } | 332 } |
| 333 | 333 |
| 334 return audioBuffer; | 334 return audioBuffer; |
| 335 } | 335 } |
| 336 | 336 |
| 337 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep
tionState& exceptionState) | 337 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep
tionState& exceptionState) |
| 338 { | 338 { |
| 339 if (!audioData) { | 339 if (!audioData) { |
| 340 exceptionState.throwDOMException( | 340 exceptionState.throwDOMException( |
| 341 SyntaxError, | 341 SyntaxError, |
| 342 "invalid ArrayBuffer for audioData."); | 342 "invalid ArrayBuffer for audioData."); |
| 343 return; | 343 return; |
| 344 } | 344 } |
| 345 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | 345 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); |
| 346 } | 346 } |
| 347 | 347 |
| 348 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() | 348 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| 349 { | 349 { |
| 350 ASSERT(isMainThread()); | 350 ASSERT(isMainThread()); |
| 351 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); | 351 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea
te(this, m_destinationNode->sampleRate()); |
| 352 | 352 |
| 353 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 353 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 354 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 354 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 355 refNode(node.get()); | 355 refNode(node.get()); |
| 356 | 356 |
| 357 return node; | 357 return node; |
| 358 } | 358 } |
| 359 | 359 |
| 360 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionState& exceptionState) | 360 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle
mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
| 361 { | 361 { |
| 362 ASSERT(isMainThread()); | 362 ASSERT(isMainThread()); |
| 363 if (!mediaElement) { | 363 if (!mediaElement) { |
| 364 exceptionState.throwDOMException( | 364 exceptionState.throwDOMException( |
| 365 InvalidStateError, | 365 InvalidStateError, |
| 366 "invalid HTMLMedialElement."); | 366 "invalid HTMLMedialElement."); |
| 367 return nullptr; | 367 return nullptr; |
| 368 } | 368 } |
| 369 | 369 |
| 370 // First check if this media element already has a source node. | 370 // First check if this media element already has a source node. |
| 371 if (mediaElement->audioSourceNode()) { | 371 if (mediaElement->audioSourceNode()) { |
| 372 exceptionState.throwDOMException( | 372 exceptionState.throwDOMException( |
| 373 InvalidStateError, | 373 InvalidStateError, |
| 374 "invalid HTMLMediaElement."); | 374 "invalid HTMLMediaElement."); |
| 375 return nullptr; | 375 return nullptr; |
| 376 } | 376 } |
| 377 | 377 |
| 378 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); | 378 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour
ceNode::create(this, mediaElement); |
| 379 | 379 |
| 380 mediaElement->setAudioSourceNode(node.get()); | 380 mediaElement->setAudioSourceNode(node.get()); |
| 381 | 381 |
| 382 refNode(node.get()); // context keeps reference until node is disconnected | 382 refNode(node.get()); // context keeps reference until node is disconnected |
| 383 return node; | 383 return node; |
| 384 } | 384 } |
| 385 | 385 |
| 386 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionState& exceptionState) | 386 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre
amSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
| 387 { | 387 { |
| 388 ASSERT(isMainThread()); | 388 ASSERT(isMainThread()); |
| 389 if (!mediaStream) { | 389 if (!mediaStream) { |
| 390 exceptionState.throwDOMException( | 390 exceptionState.throwDOMException( |
| 391 InvalidStateError, | 391 InvalidStateError, |
| 392 "invalid MediaStream source"); | 392 "invalid MediaStream source"); |
| 393 return nullptr; | 393 return nullptr; |
| 394 } | 394 } |
| 395 | 395 |
| 396 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | 396 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
| 397 if (audioTracks.isEmpty()) { | 397 if (audioTracks.isEmpty()) { |
| 398 exceptionState.throwDOMException( | 398 exceptionState.throwDOMException( |
| 399 InvalidStateError, | 399 InvalidStateError, |
| 400 "MediaStream has no audio track"); | 400 "MediaStream has no audio track"); |
| 401 return nullptr; | 401 return nullptr; |
| 402 } | 402 } |
| 403 | 403 |
| 404 // Use the first audio track in the media stream. | 404 // Use the first audio track in the media stream. |
| 405 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; | 405 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; |
| 406 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); | 406 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); |
| 407 RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create
(this, mediaStream, audioTrack.get(), provider.release()); | 407 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource
Node::create(this, mediaStream, audioTrack.get(), provider.release()); |
| 408 | 408 |
| 409 // FIXME: Only stereo streams are supported right now. We should be able to
accept multi-channel streams. | 409 // FIXME: Only stereo streams are supported right now. We should be able to
accept multi-channel streams. |
| 410 node->setFormat(2, sampleRate()); | 410 node->setFormat(2, sampleRate()); |
| 411 | 411 |
| 412 refNode(node.get()); // context keeps reference until node is disconnected | 412 refNode(node.get()); // context keeps reference until node is disconnected |
| 413 return node; | 413 return node; |
| 414 } | 414 } |
| 415 | 415 |
| 416 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti
nation() | 416 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi
aStreamDestination() |
| 417 { | 417 { |
| 418 // Set number of output channels to stereo by default. | 418 // Set number of output channels to stereo by default. |
| 419 return MediaStreamAudioDestinationNode::create(this, 2); | 419 return MediaStreamAudioDestinationNode::create(this, 2); |
| 420 } | 420 } |
| 421 | 421 |
| 422 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionSta
te& exceptionState) | 422 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
ExceptionState& exceptionState) |
| 423 { | 423 { |
| 424 // Set number of input/output channels to stereo by default. | 424 // Set number of input/output channels to stereo by default. |
| 425 return createScriptProcessor(0, 2, 2, exceptionState); | 425 return createScriptProcessor(0, 2, 2, exceptionState); |
| 426 } | 426 } |
| 427 | 427 |
| 428 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, ExceptionState& exceptionState) | 428 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, ExceptionState& exceptionState) |
| 429 { | 429 { |
| 430 // Set number of input/output channels to stereo by default. | 430 // Set number of input/output channels to stereo by default. |
| 431 return createScriptProcessor(bufferSize, 2, 2, exceptionState); | 431 return createScriptProcessor(bufferSize, 2, 2, exceptionState); |
| 432 } | 432 } |
| 433 | 433 |
| 434 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, ExceptionState& exceptionState) | 434 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
| 435 { | 435 { |
| 436 // Set number of output channels to stereo by default. | 436 // Set number of output channels to stereo by default. |
| 437 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception
State); | 437 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception
State); |
| 438 } | 438 } |
| 439 | 439 |
| 440 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta
te& exceptionState) | 440 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels,
ExceptionState& exceptionState) |
| 441 { | 441 { |
| 442 ASSERT(isMainThread()); | 442 ASSERT(isMainThread()); |
| 443 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); | 443 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t
his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO
fOutputChannels); |
| 444 | 444 |
| 445 if (!node.get()) { | 445 if (!node.get()) { |
| 446 if (!numberOfInputChannels && !numberOfOutputChannels) { | 446 if (!numberOfInputChannels && !numberOfOutputChannels) { |
| 447 exceptionState.throwDOMException( | 447 exceptionState.throwDOMException( |
| 448 IndexSizeError, | 448 IndexSizeError, |
| 449 "number of input channels and output channels cannot both be zer
o."); | 449 "number of input channels and output channels cannot both be zer
o."); |
| 450 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ | 450 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ |
| 451 exceptionState.throwDOMException( | 451 exceptionState.throwDOMException( |
| 452 IndexSizeError, | 452 IndexSizeError, |
| 453 "number of input channels (" + String::number(numberOfInputChann
els) | 453 "number of input channels (" + String::number(numberOfInputChann
els) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 465 "buffer size (" + String::number(bufferSize) | 465 "buffer size (" + String::number(bufferSize) |
| 466 + ") must be a power of two between 256 and 16384."); | 466 + ") must be a power of two between 256 and 16384."); |
| 467 } | 467 } |
| 468 return nullptr; | 468 return nullptr; |
| 469 } | 469 } |
| 470 | 470 |
| 471 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks | 471 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks |
| 472 return node; | 472 return node; |
| 473 } | 473 } |
| 474 | 474 |
| 475 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() | 475 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| 476 { | 476 { |
| 477 ASSERT(isMainThread()); | 477 ASSERT(isMainThread()); |
| 478 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); | 478 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); |
| 479 } | 479 } |
| 480 | 480 |
| 481 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() | 481 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper() |
| 482 { | 482 { |
| 483 ASSERT(isMainThread()); | 483 ASSERT(isMainThread()); |
| 484 return WaveShaperNode::create(this); | 484 return WaveShaperNode::create(this); |
| 485 } | 485 } |
| 486 | 486 |
| 487 PassRefPtr<PannerNode> AudioContext::createPanner() | 487 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner() |
| 488 { | 488 { |
| 489 ASSERT(isMainThread()); | 489 ASSERT(isMainThread()); |
| 490 return PannerNode::create(this, m_destinationNode->sampleRate()); | 490 return PannerNode::create(this, m_destinationNode->sampleRate()); |
| 491 } | 491 } |
| 492 | 492 |
| 493 PassRefPtr<ConvolverNode> AudioContext::createConvolver() | 493 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver() |
| 494 { | 494 { |
| 495 ASSERT(isMainThread()); | 495 ASSERT(isMainThread()); |
| 496 return ConvolverNode::create(this, m_destinationNode->sampleRate()); | 496 return ConvolverNode::create(this, m_destinationNode->sampleRate()); |
| 497 } | 497 } |
| 498 | 498 |
| 499 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() | 499 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr
essor() |
| 500 { | 500 { |
| 501 ASSERT(isMainThread()); | 501 ASSERT(isMainThread()); |
| 502 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate())
; | 502 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate())
; |
| 503 } | 503 } |
| 504 | 504 |
| 505 PassRefPtr<AnalyserNode> AudioContext::createAnalyser() | 505 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser() |
| 506 { | 506 { |
| 507 ASSERT(isMainThread()); | 507 ASSERT(isMainThread()); |
| 508 return AnalyserNode::create(this, m_destinationNode->sampleRate()); | 508 return AnalyserNode::create(this, m_destinationNode->sampleRate()); |
| 509 } | 509 } |
| 510 | 510 |
| 511 PassRefPtr<GainNode> AudioContext::createGain() | 511 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain() |
| 512 { | 512 { |
| 513 ASSERT(isMainThread()); | 513 ASSERT(isMainThread()); |
| 514 return GainNode::create(this, m_destinationNode->sampleRate()); | 514 return GainNode::create(this, m_destinationNode->sampleRate()); |
| 515 } | 515 } |
| 516 | 516 |
| 517 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) | 517 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce
ptionState) |
| 518 { | 518 { |
| 519 const double defaultMaxDelayTime = 1; | 519 const double defaultMaxDelayTime = 1; |
| 520 return createDelay(defaultMaxDelayTime, exceptionState); | 520 return createDelay(defaultMaxDelayTime, exceptionState); |
| 521 } | 521 } |
| 522 | 522 |
| 523 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt
ate& exceptionState) | 523 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime,
ExceptionState& exceptionState) |
| 524 { | 524 { |
| 525 ASSERT(isMainThread()); | 525 ASSERT(isMainThread()); |
| 526 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa
te(), maxDelayTime, exceptionState); | 526 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo
de->sampleRate(), maxDelayTime, exceptionState); |
| 527 if (exceptionState.hadException()) | 527 if (exceptionState.hadException()) |
| 528 return nullptr; | 528 return nullptr; |
| 529 return node; | 529 return node; |
| 530 } | 530 } |
| 531 | 531 |
| 532 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta
te& exceptionState) | 532 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(
ExceptionState& exceptionState) |
| 533 { | 533 { |
| 534 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; | 534 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
| 535 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio
nState); | 535 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio
nState); |
| 536 } | 536 } |
| 537 | 537 |
| 538 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionState& exceptionState) | 538 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(
size_t numberOfOutputs, ExceptionState& exceptionState) |
| 539 { | 539 { |
| 540 ASSERT(isMainThread()); | 540 ASSERT(isMainThread()); |
| 541 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); | 541 |
| 542 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t
his, m_destinationNode->sampleRate(), numberOfOutputs); |
| 542 | 543 |
| 543 if (!node.get()) { | 544 if (!node.get()) { |
| 544 exceptionState.throwDOMException( | 545 exceptionState.throwDOMException( |
| 545 IndexSizeError, | 546 IndexSizeError, |
| 546 "number of outputs (" + String::number(numberOfOutputs) | 547 "number of outputs (" + String::number(numberOfOutputs) |
| 547 + ") must be between 1 and " | 548 + ") must be between 1 and " |
| 548 + String::number(AudioContext::maxNumberOfChannels()) + "."); | 549 + String::number(AudioContext::maxNumberOfChannels()) + "."); |
| 549 return nullptr; | 550 return nullptr; |
| 550 } | 551 } |
| 551 | 552 |
| 552 return node; | 553 return node; |
| 553 } | 554 } |
| 554 | 555 |
| 555 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState&
exceptionState) | 556 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce
ptionState& exceptionState) |
| 556 { | 557 { |
| 557 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | 558 const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
| 558 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat
e); | 559 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat
e); |
| 559 } | 560 } |
| 560 | 561 |
| 561 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionState& exceptionState) | 562 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size
_t numberOfInputs, ExceptionState& exceptionState) |
| 562 { | 563 { |
| 563 ASSERT(isMainThread()); | 564 ASSERT(isMainThread()); |
| 564 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); | 565 |
| 566 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this,
m_destinationNode->sampleRate(), numberOfInputs); |
| 565 | 567 |
| 566 if (!node.get()) { | 568 if (!node.get()) { |
| 567 exceptionState.throwDOMException( | 569 exceptionState.throwDOMException( |
| 568 IndexSizeError, | 570 IndexSizeError, |
| 569 "number of inputs (" + String::number(numberOfInputs) | 571 "number of inputs (" + String::number(numberOfInputs) |
| 570 + ") must be between 1 and " | 572 + ") must be between 1 and " |
| 571 + String::number(AudioContext::maxNumberOfChannels()) + "."); | 573 + String::number(AudioContext::maxNumberOfChannels()) + "."); |
| 572 return nullptr; | 574 return nullptr; |
| 573 } | 575 } |
| 574 | 576 |
| 575 return node; | 577 return node; |
| 576 } | 578 } |
| 577 | 579 |
| 578 PassRefPtr<OscillatorNode> AudioContext::createOscillator() | 580 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator() |
| 579 { | 581 { |
| 580 ASSERT(isMainThread()); | 582 ASSERT(isMainThread()); |
| 581 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); | 583 |
| 584 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des
tinationNode->sampleRate()); |
| 582 | 585 |
| 583 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 586 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 584 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 587 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 585 refNode(node.get()); | 588 refNode(node.get()); |
| 586 | 589 |
| 587 return node; | 590 return node; |
| 588 } | 591 } |
| 589 | 592 |
| 590 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionState& exceptionState) | 593 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr
ay* real, Float32Array* imag, ExceptionState& exceptionState) |
| 591 { | 594 { |
| 592 ASSERT(isMainThread()); | 595 ASSERT(isMainThread()); |
| 593 | 596 |
| 594 if (!real) { | 597 if (!real) { |
| 595 exceptionState.throwDOMException( | 598 exceptionState.throwDOMException( |
| 596 SyntaxError, | 599 SyntaxError, |
| 597 "invalid real array"); | 600 "invalid real array"); |
| 598 return nullptr; | 601 return nullptr; |
| 599 } | 602 } |
| 600 | 603 |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 862 | 865 |
| 863 context->deleteMarkedNodes(); | 866 context->deleteMarkedNodes(); |
| 864 context->deref(); | 867 context->deref(); |
| 865 } | 868 } |
| 866 | 869 |
| 867 void AudioContext::deleteMarkedNodes() | 870 void AudioContext::deleteMarkedNodes() |
| 868 { | 871 { |
| 869 ASSERT(isMainThread()); | 872 ASSERT(isMainThread()); |
| 870 | 873 |
| 871 // Protect this object from being deleted before we release the mutex locked
by AutoLocker. | 874 // Protect this object from being deleted before we release the mutex locked
by AutoLocker. |
| 872 RefPtr<AudioContext> protect(this); | 875 RefPtrWillBeRawPtr<AudioContext> protect(this); |
| 873 { | 876 { |
| 874 AutoLocker locker(this); | 877 AutoLocker locker(this); |
| 875 | 878 |
| 876 while (size_t n = m_nodesToDelete.size()) { | 879 while (size_t n = m_nodesToDelete.size()) { |
| 877 AudioNode* node = m_nodesToDelete[n - 1]; | 880 AudioNode* node = m_nodesToDelete[n - 1]; |
| 878 m_nodesToDelete.removeLast(); | 881 m_nodesToDelete.removeLast(); |
| 879 | 882 |
| 880 // Before deleting the node, clear out any AudioNodeInputs from m_di
rtySummingJunctions. | 883 // Before deleting the node, clear out any AudioNodeInputs from m_di
rtySummingJunctions. |
| 881 unsigned numberOfInputs = node->numberOfInputs(); | 884 unsigned numberOfInputs = node->numberOfInputs(); |
| 882 for (unsigned i = 0; i < numberOfInputs; ++i) | 885 for (unsigned i = 0; i < numberOfInputs; ++i) |
| 883 m_dirtySummingJunctions.remove(node->input(i)); | 886 m_dirtySummingJunctions.remove(node->input(i)); |
| 884 | 887 |
| 885 // Before deleting the node, clear out any AudioNodeOutputs from m_d
irtyAudioNodeOutputs. | 888 // Before deleting the node, clear out any AudioNodeOutputs from m_d
irtyAudioNodeOutputs. |
| 886 unsigned numberOfOutputs = node->numberOfOutputs(); | 889 unsigned numberOfOutputs = node->numberOfOutputs(); |
| 887 for (unsigned i = 0; i < numberOfOutputs; ++i) | 890 for (unsigned i = 0; i < numberOfOutputs; ++i) |
| 888 m_dirtyAudioNodeOutputs.remove(node->output(i)); | 891 m_dirtyAudioNodeOutputs.remove(node->output(i)); |
| 889 | 892 #if ENABLE(OILPAN) |
| 893 // Finally, clear the keep alive handle that keeps this |
| 894 // object from being collected. |
| 895 node->clearKeepAlive(); |
| 896 #else |
| 890 // Finally, delete it. | 897 // Finally, delete it. |
| 891 delete node; | 898 delete node; |
| 899 #endif |
| 892 } | 900 } |
| 893 m_isDeletionScheduled = false; | 901 m_isDeletionScheduled = false; |
| 894 } | 902 } |
| 895 } | 903 } |
| 896 | 904 |
| 897 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio
n) | 905 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio
n) |
| 898 { | 906 { |
| 899 ASSERT(isGraphOwner()); | 907 ASSERT(isGraphOwner()); |
| 900 m_dirtySummingJunctions.add(summingJunction); | 908 m_dirtySummingJunctions.add(summingJunction); |
| 901 } | 909 } |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1016 void AudioContext::incrementActiveSourceCount() | 1024 void AudioContext::incrementActiveSourceCount() |
| 1017 { | 1025 { |
| 1018 atomicIncrement(&m_activeSourceCount); | 1026 atomicIncrement(&m_activeSourceCount); |
| 1019 } | 1027 } |
| 1020 | 1028 |
| 1021 void AudioContext::decrementActiveSourceCount() | 1029 void AudioContext::decrementActiveSourceCount() |
| 1022 { | 1030 { |
| 1023 atomicDecrement(&m_activeSourceCount); | 1031 atomicDecrement(&m_activeSourceCount); |
| 1024 } | 1032 } |
| 1025 | 1033 |
| 1034 void AudioContext::trace(Visitor* visitor) |
| 1035 { |
| 1036 visitor->trace(m_renderTarget); |
| 1037 visitor->trace(m_destinationNode); |
| 1038 visitor->trace(m_listener); |
| 1039 } |
| 1040 |
| 1026 } // namespace WebCore | 1041 } // namespace WebCore |
| 1027 | 1042 |
| 1028 #endif // ENABLE(WEB_AUDIO) | 1043 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |