| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 { | 82 { |
| 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.
1KHz, | 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.
1KHz, |
| 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a
nd some testing there. | 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a
nd some testing there. |
| 85 return sampleRate >= 44100 && sampleRate <= 96000; | 85 return sampleRate >= 44100 && sampleRate <= 96000; |
| 86 } | 86 } |
| 87 | 87 |
| 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha
rdware. | 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha
rdware. |
| 89 const unsigned MaxHardwareContexts = 6; | 89 const unsigned MaxHardwareContexts = 6; |
| 90 unsigned AudioContext::s_hardwareContextCount = 0; | 90 unsigned AudioContext::s_hardwareContextCount = 0; |
| 91 | 91 |
| 92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState
& exceptionState) | 92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex
ceptionState& exceptionState) |
| 93 { | 93 { |
| 94 ASSERT(isMainThread()); | 94 ASSERT(isMainThread()); |
| 95 if (s_hardwareContextCount >= MaxHardwareContexts) { | 95 if (s_hardwareContextCount >= MaxHardwareContexts) { |
| 96 exceptionState.throwDOMException( | 96 exceptionState.throwDOMException( |
| 97 SyntaxError, | 97 SyntaxError, |
| 98 "number of hardware contexts reached maximum (" + String::number(Max
HardwareContexts) + ")."); | 98 "number of hardware contexts reached maximum (" + String::number(Max
HardwareContexts) + ")."); |
| 99 return nullptr; | 99 return nullptr; |
| 100 } | 100 } |
| 101 | 101 |
| 102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); | 102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCou
ntedGarbageCollected(new AudioContext(&document))); |
| 103 audioContext->suspendIfNeeded(); | 103 audioContext->suspendIfNeeded(); |
| 104 return audioContext.release(); | 104 return audioContext.release(); |
| 105 } | 105 } |
| 106 | 106 |
| 107 // Constructor for rendering to the audio hardware. | 107 // Constructor for rendering to the audio hardware. |
| 108 AudioContext::AudioContext(Document* document) | 108 AudioContext::AudioContext(Document* document) |
| 109 : ActiveDOMObject(document) | 109 : ActiveDOMObject(document) |
| 110 , m_isStopScheduled(false) | 110 , m_isStopScheduled(false) |
| 111 , m_isCleared(false) | 111 , m_isCleared(false) |
| 112 , m_isInitialized(false) | 112 , m_isInitialized(false) |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 266 // FIXME: see if there's a more direct way to handle this issue. | 266 // FIXME: see if there's a more direct way to handle this issue. |
| 267 callOnMainThread(stopDispatch, this); | 267 callOnMainThread(stopDispatch, this); |
| 268 } | 268 } |
| 269 | 269 |
| 270 bool AudioContext::hasPendingActivity() const | 270 bool AudioContext::hasPendingActivity() const |
| 271 { | 271 { |
| 272 // According to spec AudioContext must die only after page navigates. | 272 // According to spec AudioContext must die only after page navigates. |
| 273 return !m_isCleared; | 273 return !m_isCleared; |
| 274 } | 274 } |
| 275 | 275 |
| 276 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, si
ze_t numberOfFrames, float sampleRate, ExceptionState& exceptionState) | 276 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf
Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat
e) |
| 277 { | 277 { |
| 278 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numb
erOfFrames, sampleRate, exceptionState); | 278 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh
annels, numberOfFrames, sampleRate, exceptionState); |
| 279 | 279 |
| 280 return audioBuffer; | 280 return audioBuffer; |
| 281 } | 281 } |
| 282 | 282 |
| 283 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep
tionState& exceptionState) | 283 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep
tionState& exceptionState) |
| 284 { | 284 { |
| 285 if (!audioData) { | 285 if (!audioData) { |
| 286 exceptionState.throwDOMException( | 286 exceptionState.throwDOMException( |
| 287 SyntaxError, | 287 SyntaxError, |
| 288 "invalid ArrayBuffer for audioData."); | 288 "invalid ArrayBuffer for audioData."); |
| 289 return; | 289 return; |
| 290 } | 290 } |
| 291 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); | 291 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa
llback); |
| 292 } | 292 } |
| 293 | 293 |
| 294 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() | 294 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource() |
| 295 { | 295 { |
| 296 ASSERT(isMainThread()); | 296 ASSERT(isMainThread()); |
| 297 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_d
estinationNode->sampleRate()); | 297 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea
te(this, m_destinationNode->sampleRate()); |
| 298 | 298 |
| 299 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 299 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 300 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 300 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 301 refNode(node.get()); | 301 refNode(node.get()); |
| 302 | 302 |
| 303 return node; | 303 return node; |
| 304 } | 304 } |
| 305 | 305 |
| 306 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
TMLMediaElement* mediaElement, ExceptionState& exceptionState) | 306 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle
mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) |
| 307 { | 307 { |
| 308 ASSERT(isMainThread()); | 308 ASSERT(isMainThread()); |
| 309 if (!mediaElement) { | 309 if (!mediaElement) { |
| 310 exceptionState.throwDOMException( | 310 exceptionState.throwDOMException( |
| 311 InvalidStateError, | 311 InvalidStateError, |
| 312 "invalid HTMLMedialElement."); | 312 "invalid HTMLMedialElement."); |
| 313 return nullptr; | 313 return nullptr; |
| 314 } | 314 } |
| 315 | 315 |
| 316 // First check if this media element already has a source node. | 316 // First check if this media element already has a source node. |
| 317 if (mediaElement->audioSourceNode()) { | 317 if (mediaElement->audioSourceNode()) { |
| 318 exceptionState.throwDOMException( | 318 exceptionState.throwDOMException( |
| 319 InvalidStateError, | 319 InvalidStateError, |
| 320 "invalid HTMLMediaElement."); | 320 "invalid HTMLMediaElement."); |
| 321 return nullptr; | 321 return nullptr; |
| 322 } | 322 } |
| 323 | 323 |
| 324 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea
te(this, mediaElement); | 324 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour
ceNode::create(this, mediaElement); |
| 325 | 325 |
| 326 mediaElement->setAudioSourceNode(node.get()); | 326 mediaElement->setAudioSourceNode(node.get()); |
| 327 | 327 |
| 328 refNode(node.get()); // context keeps reference until node is disconnected | 328 refNode(node.get()); // context keeps reference until node is disconnected |
| 329 return node; | 329 return node; |
| 330 } | 330 } |
| 331 | 331 |
| 332 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
iaStream* mediaStream, ExceptionState& exceptionState) | 332 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre
amSource(MediaStream* mediaStream, ExceptionState& exceptionState) |
| 333 { | 333 { |
| 334 ASSERT(isMainThread()); | 334 ASSERT(isMainThread()); |
| 335 if (!mediaStream) { | 335 if (!mediaStream) { |
| 336 exceptionState.throwDOMException( | 336 exceptionState.throwDOMException( |
| 337 InvalidStateError, | 337 InvalidStateError, |
| 338 "invalid MediaStream source"); | 338 "invalid MediaStream source"); |
| 339 return nullptr; | 339 return nullptr; |
| 340 } | 340 } |
| 341 | 341 |
| 342 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); | 342 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); |
| 343 if (audioTracks.isEmpty()) { | 343 if (audioTracks.isEmpty()) { |
| 344 exceptionState.throwDOMException( | 344 exceptionState.throwDOMException( |
| 345 InvalidStateError, | 345 InvalidStateError, |
| 346 "MediaStream has no audio track"); | 346 "MediaStream has no audio track"); |
| 347 return nullptr; | 347 return nullptr; |
| 348 } | 348 } |
| 349 | 349 |
| 350 // Use the first audio track in the media stream. | 350 // Use the first audio track in the media stream. |
| 351 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; | 351 RefPtr<MediaStreamTrack> audioTrack = audioTracks[0]; |
| 352 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); | 352 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); |
| 353 RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create
(this, mediaStream, audioTrack.get(), provider.release()); | 353 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource
Node::create(this, mediaStream, audioTrack.get(), provider.release()); |
| 354 | 354 |
| 355 // FIXME: Only stereo streams are supported right now. We should be able to
accept multi-channel streams. | 355 // FIXME: Only stereo streams are supported right now. We should be able to
accept multi-channel streams. |
| 356 node->setFormat(2, sampleRate()); | 356 node->setFormat(2, sampleRate()); |
| 357 | 357 |
| 358 refNode(node.get()); // context keeps reference until node is disconnected | 358 refNode(node.get()); // context keeps reference until node is disconnected |
| 359 return node; | 359 return node; |
| 360 } | 360 } |
| 361 | 361 |
| 362 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDesti
nation() | 362 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi
aStreamDestination() |
| 363 { | 363 { |
| 364 // Set number of output channels to stereo by default. | 364 // Set number of output channels to stereo by default. |
| 365 return MediaStreamAudioDestinationNode::create(this, 2); | 365 return MediaStreamAudioDestinationNode::create(this, 2); |
| 366 } | 366 } |
| 367 | 367 |
| 368 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionSta
te& exceptionState) | 368 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
ExceptionState& exceptionState) |
| 369 { | 369 { |
| 370 // Set number of input/output channels to stereo by default. | 370 // Set number of input/output channels to stereo by default. |
| 371 return createScriptProcessor(0, 2, 2, exceptionState); | 371 return createScriptProcessor(0, 2, 2, exceptionState); |
| 372 } | 372 } |
| 373 | 373 |
| 374 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, ExceptionState& exceptionState) | 374 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, ExceptionState& exceptionState) |
| 375 { | 375 { |
| 376 // Set number of input/output channels to stereo by default. | 376 // Set number of input/output channels to stereo by default. |
| 377 return createScriptProcessor(bufferSize, 2, 2, exceptionState); | 377 return createScriptProcessor(bufferSize, 2, 2, exceptionState); |
| 378 } | 378 } |
| 379 | 379 |
| 380 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, ExceptionState& exceptionState) | 380 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) |
| 381 { | 381 { |
| 382 // Set number of output channels to stereo by default. | 382 // Set number of output channels to stereo by default. |
| 383 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception
State); | 383 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception
State); |
| 384 } | 384 } |
| 385 | 385 |
| 386 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
rSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionSta
te& exceptionState) | 386 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(
size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels,
ExceptionState& exceptionState) |
| 387 { | 387 { |
| 388 ASSERT(isMainThread()); | 388 ASSERT(isMainThread()); |
| 389 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_desti
nationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChann
els); | 389 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t
his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO
fOutputChannels); |
| 390 | 390 |
| 391 if (!node.get()) { | 391 if (!node.get()) { |
| 392 if (!numberOfInputChannels && !numberOfOutputChannels) { | 392 if (!numberOfInputChannels && !numberOfOutputChannels) { |
| 393 exceptionState.throwDOMException( | 393 exceptionState.throwDOMException( |
| 394 IndexSizeError, | 394 IndexSizeError, |
| 395 "number of input channels and output channels cannot both be zer
o."); | 395 "number of input channels and output channels cannot both be zer
o."); |
| 396 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ | 396 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
{ |
| 397 exceptionState.throwDOMException( | 397 exceptionState.throwDOMException( |
| 398 IndexSizeError, | 398 IndexSizeError, |
| 399 "number of input channels (" + String::number(numberOfInputChann
els) | 399 "number of input channels (" + String::number(numberOfInputChann
els) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 411 "buffer size (" + String::number(bufferSize) | 411 "buffer size (" + String::number(bufferSize) |
| 412 + ") must be a power of two between 256 and 16384."); | 412 + ") must be a power of two between 256 and 16384."); |
| 413 } | 413 } |
| 414 return nullptr; | 414 return nullptr; |
| 415 } | 415 } |
| 416 | 416 |
| 417 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks | 417 refNode(node.get()); // context keeps reference until we stop making javascr
ipt rendering callbacks |
| 418 return node; | 418 return node; |
| 419 } | 419 } |
| 420 | 420 |
| 421 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() | 421 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter() |
| 422 { | 422 { |
| 423 ASSERT(isMainThread()); | 423 ASSERT(isMainThread()); |
| 424 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); | 424 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); |
| 425 } | 425 } |
| 426 | 426 |
| 427 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() | 427 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper() |
| 428 { | 428 { |
| 429 ASSERT(isMainThread()); | 429 ASSERT(isMainThread()); |
| 430 return WaveShaperNode::create(this); | 430 return WaveShaperNode::create(this); |
| 431 } | 431 } |
| 432 | 432 |
| 433 PassRefPtr<PannerNode> AudioContext::createPanner() | 433 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner() |
| 434 { | 434 { |
| 435 ASSERT(isMainThread()); | 435 ASSERT(isMainThread()); |
| 436 return PannerNode::create(this, m_destinationNode->sampleRate()); | 436 return PannerNode::create(this, m_destinationNode->sampleRate()); |
| 437 } | 437 } |
| 438 | 438 |
| 439 PassRefPtr<ConvolverNode> AudioContext::createConvolver() | 439 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver() |
| 440 { | 440 { |
| 441 ASSERT(isMainThread()); | 441 ASSERT(isMainThread()); |
| 442 return ConvolverNode::create(this, m_destinationNode->sampleRate()); | 442 return ConvolverNode::create(this, m_destinationNode->sampleRate()); |
| 443 } | 443 } |
| 444 | 444 |
| 445 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() | 445 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr
essor() |
| 446 { | 446 { |
| 447 ASSERT(isMainThread()); | 447 ASSERT(isMainThread()); |
| 448 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate())
; | 448 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate())
; |
| 449 } | 449 } |
| 450 | 450 |
| 451 PassRefPtr<AnalyserNode> AudioContext::createAnalyser() | 451 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser() |
| 452 { | 452 { |
| 453 ASSERT(isMainThread()); | 453 ASSERT(isMainThread()); |
| 454 return AnalyserNode::create(this, m_destinationNode->sampleRate()); | 454 return AnalyserNode::create(this, m_destinationNode->sampleRate()); |
| 455 } | 455 } |
| 456 | 456 |
| 457 PassRefPtr<GainNode> AudioContext::createGain() | 457 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain() |
| 458 { | 458 { |
| 459 ASSERT(isMainThread()); | 459 ASSERT(isMainThread()); |
| 460 return GainNode::create(this, m_destinationNode->sampleRate()); | 460 return GainNode::create(this, m_destinationNode->sampleRate()); |
| 461 } | 461 } |
| 462 | 462 |
| 463 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState) | 463 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce
ptionState) |
| 464 { | 464 { |
| 465 const double defaultMaxDelayTime = 1; | 465 const double defaultMaxDelayTime = 1; |
| 466 return createDelay(defaultMaxDelayTime, exceptionState); | 466 return createDelay(defaultMaxDelayTime, exceptionState); |
| 467 } | 467 } |
| 468 | 468 |
| 469 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt
ate& exceptionState) | 469 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime,
ExceptionState& exceptionState) |
| 470 { | 470 { |
| 471 ASSERT(isMainThread()); | 471 ASSERT(isMainThread()); |
| 472 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa
te(), maxDelayTime, exceptionState); | 472 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo
de->sampleRate(), maxDelayTime, exceptionState); |
| 473 if (exceptionState.hadException()) | 473 if (exceptionState.hadException()) |
| 474 return nullptr; | 474 return nullptr; |
| 475 return node; | 475 return node; |
| 476 } | 476 } |
| 477 | 477 |
| 478 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta
te& exceptionState) | 478 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(
ExceptionState& exceptionState) |
| 479 { | 479 { |
| 480 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; | 480 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; |
| 481 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio
nState); | 481 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio
nState); |
| 482 } | 482 } |
| 483 | 483 |
| 484 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
rOfOutputs, ExceptionState& exceptionState) | 484 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(
size_t numberOfOutputs, ExceptionState& exceptionState) |
| 485 { | 485 { |
| 486 ASSERT(isMainThread()); | 486 ASSERT(isMainThread()); |
| 487 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti
nationNode->sampleRate(), numberOfOutputs); | 487 |
| 488 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t
his, m_destinationNode->sampleRate(), numberOfOutputs); |
| 488 | 489 |
| 489 if (!node.get()) { | 490 if (!node.get()) { |
| 490 exceptionState.throwDOMException( | 491 exceptionState.throwDOMException( |
| 491 IndexSizeError, | 492 IndexSizeError, |
| 492 "number of outputs (" + String::number(numberOfOutputs) | 493 "number of outputs (" + String::number(numberOfOutputs) |
| 493 + ") must be between 1 and " | 494 + ") must be between 1 and " |
| 494 + String::number(AudioContext::maxNumberOfChannels()) + "."); | 495 + String::number(AudioContext::maxNumberOfChannels()) + "."); |
| 495 return nullptr; | 496 return nullptr; |
| 496 } | 497 } |
| 497 | 498 |
| 498 return node; | 499 return node; |
| 499 } | 500 } |
| 500 | 501 |
| 501 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState&
exceptionState) | 502 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce
ptionState& exceptionState) |
| 502 { | 503 { |
| 503 const unsigned ChannelMergerDefaultNumberOfInputs = 6; | 504 const unsigned ChannelMergerDefaultNumberOfInputs = 6; |
| 504 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat
e); | 505 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat
e); |
| 505 } | 506 } |
| 506 | 507 |
| 507 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
nputs, ExceptionState& exceptionState) | 508 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size
_t numberOfInputs, ExceptionState& exceptionState) |
| 508 { | 509 { |
| 509 ASSERT(isMainThread()); | 510 ASSERT(isMainThread()); |
| 510 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati
onNode->sampleRate(), numberOfInputs); | 511 |
| 512 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this,
m_destinationNode->sampleRate(), numberOfInputs); |
| 511 | 513 |
| 512 if (!node.get()) { | 514 if (!node.get()) { |
| 513 exceptionState.throwDOMException( | 515 exceptionState.throwDOMException( |
| 514 IndexSizeError, | 516 IndexSizeError, |
| 515 "number of inputs (" + String::number(numberOfInputs) | 517 "number of inputs (" + String::number(numberOfInputs) |
| 516 + ") must be between 1 and " | 518 + ") must be between 1 and " |
| 517 + String::number(AudioContext::maxNumberOfChannels()) + "."); | 519 + String::number(AudioContext::maxNumberOfChannels()) + "."); |
| 518 return nullptr; | 520 return nullptr; |
| 519 } | 521 } |
| 520 | 522 |
| 521 return node; | 523 return node; |
| 522 } | 524 } |
| 523 | 525 |
| 524 PassRefPtr<OscillatorNode> AudioContext::createOscillator() | 526 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator() |
| 525 { | 527 { |
| 526 ASSERT(isMainThread()); | 528 ASSERT(isMainThread()); |
| 527 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode
->sampleRate()); | 529 |
| 530 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des
tinationNode->sampleRate()); |
| 528 | 531 |
| 529 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. | 532 // Because this is an AudioScheduledSourceNode, the context keeps a referenc
e until it has finished playing. |
| 530 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). | 533 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext:
:notifyNodeFinishedProcessing(). |
| 531 refNode(node.get()); | 534 refNode(node.get()); |
| 532 | 535 |
| 533 return node; | 536 return node; |
| 534 } | 537 } |
| 535 | 538 |
| 536 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
oat32Array* imag, ExceptionState& exceptionState) | 539 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr
ay* real, Float32Array* imag, ExceptionState& exceptionState) |
| 537 { | 540 { |
| 538 ASSERT(isMainThread()); | 541 ASSERT(isMainThread()); |
| 539 | 542 |
| 540 if (!real) { | 543 if (!real) { |
| 541 exceptionState.throwDOMException( | 544 exceptionState.throwDOMException( |
| 542 SyntaxError, | 545 SyntaxError, |
| 543 "invalid real array"); | 546 "invalid real array"); |
| 544 return nullptr; | 547 return nullptr; |
| 545 } | 548 } |
| 546 | 549 |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 808 | 811 |
| 809 context->deleteMarkedNodes(); | 812 context->deleteMarkedNodes(); |
| 810 context->deref(); | 813 context->deref(); |
| 811 } | 814 } |
| 812 | 815 |
| 813 void AudioContext::deleteMarkedNodes() | 816 void AudioContext::deleteMarkedNodes() |
| 814 { | 817 { |
| 815 ASSERT(isMainThread()); | 818 ASSERT(isMainThread()); |
| 816 | 819 |
| 817 // Protect this object from being deleted before we release the mutex locked
by AutoLocker. | 820 // Protect this object from being deleted before we release the mutex locked
by AutoLocker. |
| 818 RefPtr<AudioContext> protect(this); | 821 RefPtrWillBeRawPtr<AudioContext> protect(this); |
| 819 { | 822 { |
| 820 AutoLocker locker(this); | 823 AutoLocker locker(this); |
| 821 | 824 |
| 822 while (size_t n = m_nodesToDelete.size()) { | 825 while (size_t n = m_nodesToDelete.size()) { |
| 823 AudioNode* node = m_nodesToDelete[n - 1]; | 826 AudioNode* node = m_nodesToDelete[n - 1]; |
| 824 m_nodesToDelete.removeLast(); | 827 m_nodesToDelete.removeLast(); |
| 825 | 828 |
| 826 // Before deleting the node, clear out any AudioNodeInputs from m_di
rtySummingJunctions. | 829 // Before deleting the node, clear out any AudioNodeInputs from m_di
rtySummingJunctions. |
| 827 unsigned numberOfInputs = node->numberOfInputs(); | 830 unsigned numberOfInputs = node->numberOfInputs(); |
| 828 for (unsigned i = 0; i < numberOfInputs; ++i) | 831 for (unsigned i = 0; i < numberOfInputs; ++i) |
| 829 m_dirtySummingJunctions.remove(node->input(i)); | 832 m_dirtySummingJunctions.remove(node->input(i)); |
| 830 | 833 |
| 831 // Before deleting the node, clear out any AudioNodeOutputs from m_d
irtyAudioNodeOutputs. | 834 // Before deleting the node, clear out any AudioNodeOutputs from m_d
irtyAudioNodeOutputs. |
| 832 unsigned numberOfOutputs = node->numberOfOutputs(); | 835 unsigned numberOfOutputs = node->numberOfOutputs(); |
| 833 for (unsigned i = 0; i < numberOfOutputs; ++i) | 836 for (unsigned i = 0; i < numberOfOutputs; ++i) |
| 834 m_dirtyAudioNodeOutputs.remove(node->output(i)); | 837 m_dirtyAudioNodeOutputs.remove(node->output(i)); |
| 835 | 838 #if ENABLE(OILPAN) |
| 839 // Finally, clear the keep alive handle that keeps this |
| 840 // object from being collected. |
| 841 node->clearKeepAlive(); |
| 842 #else |
| 836 // Finally, delete it. | 843 // Finally, delete it. |
| 837 delete node; | 844 delete node; |
| 845 #endif |
| 838 } | 846 } |
| 839 m_isDeletionScheduled = false; | 847 m_isDeletionScheduled = false; |
| 840 } | 848 } |
| 841 } | 849 } |
| 842 | 850 |
| 843 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio
n) | 851 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio
n) |
| 844 { | 852 { |
| 845 ASSERT(isGraphOwner()); | 853 ASSERT(isGraphOwner()); |
| 846 m_dirtySummingJunctions.add(summingJunction); | 854 m_dirtySummingJunctions.add(summingJunction); |
| 847 } | 855 } |
| 848 | 856 |
| 849 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunc
tion) | 857 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunc
tion) |
| 850 { | 858 { |
| 851 ASSERT(isMainThread()); | 859 ASSERT(isMainThread()); |
| 852 AutoLocker locker(this); | 860 AutoLocker locker(this); |
| 853 m_dirtySummingJunctions.remove(summingJunction); | 861 m_dirtySummingJunctions.remove(summingJunction); |
| 854 } | 862 } |
| 855 | 863 |
| 856 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output) | 864 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output) |
| 857 { | 865 { |
| 858 ASSERT(isGraphOwner()); | 866 ASSERT(isGraphOwner()); |
| 859 m_dirtyAudioNodeOutputs.add(output); | 867 m_dirtyAudioNodeOutputs.add(output); |
| 860 } | 868 } |
| 861 | 869 |
| 862 void AudioContext::handleDirtyAudioSummingJunctions() | 870 void AudioContext::handleDirtyAudioSummingJunctions() |
| 863 { | 871 { |
| 864 ASSERT(isGraphOwner()); | 872 ASSERT(isGraphOwner()); |
| 865 | 873 |
| 866 for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.be
gin(); i != m_dirtySummingJunctions.end(); ++i) | 874 for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.b
egin(); i != m_dirtySummingJunctions.end(); ++i) |
| 867 (*i)->updateRenderingState(); | 875 (*i)->updateRenderingState(); |
| 868 | 876 |
| 869 m_dirtySummingJunctions.clear(); | 877 m_dirtySummingJunctions.clear(); |
| 870 } | 878 } |
| 871 | 879 |
| 872 void AudioContext::handleDirtyAudioNodeOutputs() | 880 void AudioContext::handleDirtyAudioNodeOutputs() |
| 873 { | 881 { |
| 874 ASSERT(isGraphOwner()); | 882 ASSERT(isGraphOwner()); |
| 875 | 883 |
| 876 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin()
; i != m_dirtyAudioNodeOutputs.end(); ++i) | 884 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin()
; i != m_dirtyAudioNodeOutputs.end(); ++i) |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 952 if (!renderedBuffer) | 960 if (!renderedBuffer) |
| 953 return; | 961 return; |
| 954 | 962 |
| 955 // Avoid firing the event if the document has already gone away. | 963 // Avoid firing the event if the document has already gone away. |
| 956 if (executionContext()) { | 964 if (executionContext()) { |
| 957 // Call the offline rendering completion event listener. | 965 // Call the offline rendering completion event listener. |
| 958 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); | 966 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); |
| 959 } | 967 } |
| 960 } | 968 } |
| 961 | 969 |
| 970 void AudioContext::trace(Visitor* visitor) |
| 971 { |
| 972 visitor->trace(m_renderTarget); |
| 973 visitor->trace(m_destinationNode); |
| 974 visitor->trace(m_listener); |
| 975 visitor->trace(m_dirtySummingJunctions); |
| 976 } |
| 977 |
| 962 } // namespace WebCore | 978 } // namespace WebCore |
| 963 | 979 |
| 964 #endif // ENABLE(WEB_AUDIO) | 980 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |