| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | |
| 3 * | |
| 4 * Redistribution and use in source and binary forms, with or without | |
| 5 * modification, are permitted provided that the following conditions | |
| 6 * are met: | |
| 7 * 1. Redistributions of source code must retain the above copyright | |
| 8 * notice, this list of conditions and the following disclaimer. | |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | |
| 10 * notice, this list of conditions and the following disclaimer in the | |
| 11 * documentation and/or other materials provided with the distribution. | |
| 12 * | |
| 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y | |
| 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
| 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
| 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y | |
| 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
| 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
| 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N | |
| 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
| 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 23 */ | |
| 24 | |
| 25 #include "modules/webaudio/AbstractAudioContext.h" | |
| 26 #include "bindings/core/v8/Dictionary.h" | |
| 27 #include "bindings/core/v8/ExceptionMessages.h" | |
| 28 #include "bindings/core/v8/ExceptionState.h" | |
| 29 #include "bindings/core/v8/ScriptPromiseResolver.h" | |
| 30 #include "bindings/core/v8/ScriptState.h" | |
| 31 #include "core/dom/DOMException.h" | |
| 32 #include "core/dom/Document.h" | |
| 33 #include "core/dom/ExceptionCode.h" | |
| 34 #include "core/dom/ExecutionContextTask.h" | |
| 35 #include "core/frame/Settings.h" | |
| 36 #include "core/html/HTMLMediaElement.h" | |
| 37 #include "modules/mediastream/MediaStream.h" | |
| 38 #include "modules/webaudio/AnalyserNode.h" | |
| 39 #include "modules/webaudio/AudioBuffer.h" | |
| 40 #include "modules/webaudio/AudioBufferCallback.h" | |
| 41 #include "modules/webaudio/AudioBufferSourceNode.h" | |
| 42 #include "modules/webaudio/AudioContext.h" | |
| 43 #include "modules/webaudio/AudioListener.h" | |
| 44 #include "modules/webaudio/AudioNodeInput.h" | |
| 45 #include "modules/webaudio/AudioNodeOutput.h" | |
| 46 #include "modules/webaudio/BiquadFilterNode.h" | |
| 47 #include "modules/webaudio/ChannelMergerNode.h" | |
| 48 #include "modules/webaudio/ChannelSplitterNode.h" | |
| 49 #include "modules/webaudio/ConvolverNode.h" | |
| 50 #include "modules/webaudio/DefaultAudioDestinationNode.h" | |
| 51 #include "modules/webaudio/DelayNode.h" | |
| 52 #include "modules/webaudio/DynamicsCompressorNode.h" | |
| 53 #include "modules/webaudio/GainNode.h" | |
| 54 #include "modules/webaudio/IIRFilterNode.h" | |
| 55 #include "modules/webaudio/MediaElementAudioSourceNode.h" | |
| 56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h" | |
| 57 #include "modules/webaudio/MediaStreamAudioSourceNode.h" | |
| 58 #include "modules/webaudio/OfflineAudioCompletionEvent.h" | |
| 59 #include "modules/webaudio/OfflineAudioContext.h" | |
| 60 #include "modules/webaudio/OfflineAudioDestinationNode.h" | |
| 61 #include "modules/webaudio/OscillatorNode.h" | |
| 62 #include "modules/webaudio/PannerNode.h" | |
| 63 #include "modules/webaudio/PeriodicWave.h" | |
| 64 #include "modules/webaudio/PeriodicWaveConstraints.h" | |
| 65 #include "modules/webaudio/ScriptProcessorNode.h" | |
| 66 #include "modules/webaudio/StereoPannerNode.h" | |
| 67 #include "modules/webaudio/WaveShaperNode.h" | |
| 68 #include "platform/Histogram.h" | |
| 69 #include "platform/ThreadSafeFunctional.h" | |
| 70 #include "platform/UserGestureIndicator.h" | |
| 71 #include "platform/audio/IIRFilter.h" | |
| 72 #include "public/platform/Platform.h" | |
| 73 #include "wtf/text/WTFString.h" | |
| 74 | |
| 75 namespace blink { | |
| 76 | |
| 77 namespace { | |
| 78 | |
| 79 enum UserGestureRecord { | |
| 80 UserGestureRequiredAndAvailable = 0, | |
| 81 UserGestureRequiredAndNotAvailable, | |
| 82 UserGestureNotRequiredAndAvailable, | |
| 83 UserGestureNotRequiredAndNotAvailable, | |
| 84 UserGestureRecordMax | |
| 85 }; | |
| 86 | |
| 87 } // anonymous namespace | |
| 88 | |
| 89 AbstractAudioContext* AbstractAudioContext::create(Document& document, Exception
State& exceptionState) | |
| 90 { | |
| 91 return AudioContext::create(document, exceptionState); | |
| 92 } | |
| 93 | |
| 94 // FIXME(dominicc): Devolve these constructors to AudioContext | |
| 95 // and OfflineAudioContext respectively. | |
| 96 | |
| 97 // Constructor for rendering to the audio hardware. | |
| 98 AbstractAudioContext::AbstractAudioContext(Document* document) | |
| 99 : ActiveScriptWrappable(this) | |
| 100 , ActiveDOMObject(document) | |
| 101 , m_destinationNode(nullptr) | |
| 102 , m_isCleared(false) | |
| 103 , m_isResolvingResumePromises(false) | |
| 104 , m_userGestureRequired(false) | |
| 105 , m_connectionCount(0) | |
| 106 , m_deferredTaskHandler(DeferredTaskHandler::create()) | |
| 107 , m_contextState(Suspended) | |
| 108 , m_closedContextSampleRate(-1) | |
| 109 , m_periodicWaveSine(nullptr) | |
| 110 , m_periodicWaveSquare(nullptr) | |
| 111 , m_periodicWaveSawtooth(nullptr) | |
| 112 , m_periodicWaveTriangle(nullptr) | |
| 113 { | |
| 114 // TODO(mlamouri): we might want to use other ways of checking for this but | |
| 115 // in order to record metrics, re-using the HTMLMediaElement setting is | |
| 116 // probably the simplest solution. | |
| 117 if (document->settings() && document->settings()->mediaPlaybackRequiresUserG
esture()) | |
| 118 m_userGestureRequired = true; | |
| 119 | |
| 120 m_destinationNode = DefaultAudioDestinationNode::create(this); | |
| 121 | |
| 122 initialize(); | |
| 123 } | |
| 124 | |
| 125 // Constructor for offline (non-realtime) rendering. | |
| 126 AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOf
Channels, size_t numberOfFrames, float sampleRate) | |
| 127 : ActiveScriptWrappable(this) | |
| 128 , ActiveDOMObject(document) | |
| 129 , m_destinationNode(nullptr) | |
| 130 , m_isCleared(false) | |
| 131 , m_isResolvingResumePromises(false) | |
| 132 , m_userGestureRequired(false) | |
| 133 , m_connectionCount(0) | |
| 134 , m_deferredTaskHandler(DeferredTaskHandler::create()) | |
| 135 , m_contextState(Suspended) | |
| 136 , m_closedContextSampleRate(-1) | |
| 137 , m_periodicWaveSine(nullptr) | |
| 138 , m_periodicWaveSquare(nullptr) | |
| 139 , m_periodicWaveSawtooth(nullptr) | |
| 140 , m_periodicWaveTriangle(nullptr) | |
| 141 { | |
| 142 // TODO(mlamouri): we might want to use other ways of checking for this but | |
| 143 // in order to record metrics, re-using the HTMLMediaElement setting is | |
| 144 // probably the simplest solution. | |
| 145 if (document->settings() && document->settings()->mediaPlaybackRequiresUserG
esture()) | |
| 146 m_userGestureRequired = true; | |
| 147 } | |
| 148 | |
| 149 AbstractAudioContext::~AbstractAudioContext() | |
| 150 { | |
| 151 deferredTaskHandler().contextWillBeDestroyed(); | |
| 152 // AudioNodes keep a reference to their context, so there should be no way t
o be in the destructor if there are still AudioNodes around. | |
| 153 ASSERT(!isDestinationInitialized()); | |
| 154 ASSERT(!m_activeSourceNodes.size()); | |
| 155 ASSERT(!m_finishedSourceHandlers.size()); | |
| 156 ASSERT(!m_isResolvingResumePromises); | |
| 157 ASSERT(!m_resumeResolvers.size()); | |
| 158 } | |
| 159 | |
| 160 void AbstractAudioContext::initialize() | |
| 161 { | |
| 162 if (isDestinationInitialized()) | |
| 163 return; | |
| 164 | |
| 165 FFTFrame::initialize(); | |
| 166 | |
| 167 if (m_destinationNode) { | |
| 168 m_destinationNode->handler().initialize(); | |
| 169 // The AudioParams in the listener need access to the destination node,
so only create the | |
| 170 // listener if the destination node exists. | |
| 171 m_listener = AudioListener::create(*this); | |
| 172 } | |
| 173 } | |
| 174 | |
| 175 void AbstractAudioContext::clear() | |
| 176 { | |
| 177 m_destinationNode.clear(); | |
| 178 // The audio rendering thread is dead. Nobody will schedule AudioHandler | |
| 179 // deletion. Let's do it ourselves. | |
| 180 deferredTaskHandler().clearHandlersToBeDeleted(); | |
| 181 m_isCleared = true; | |
| 182 } | |
| 183 | |
| 184 void AbstractAudioContext::uninitialize() | |
| 185 { | |
| 186 ASSERT(isMainThread()); | |
| 187 | |
| 188 if (!isDestinationInitialized()) | |
| 189 return; | |
| 190 | |
| 191 // This stops the audio thread and all audio rendering. | |
| 192 if (m_destinationNode) | |
| 193 m_destinationNode->handler().uninitialize(); | |
| 194 | |
| 195 // Get rid of the sources which may still be playing. | |
| 196 releaseActiveSourceNodes(); | |
| 197 | |
| 198 // Reject any pending resolvers before we go away. | |
| 199 rejectPendingResolvers(); | |
| 200 didClose(); | |
| 201 | |
| 202 ASSERT(m_listener); | |
| 203 m_listener->waitForHRTFDatabaseLoaderThreadCompletion(); | |
| 204 | |
| 205 clear(); | |
| 206 } | |
| 207 | |
| 208 void AbstractAudioContext::stop() | |
| 209 { | |
| 210 uninitialize(); | |
| 211 } | |
| 212 | |
| 213 bool AbstractAudioContext::hasPendingActivity() const | |
| 214 { | |
| 215 // There's no pending activity if the audio context has been cleared. | |
| 216 return !m_isCleared; | |
| 217 } | |
| 218 | |
| 219 AudioDestinationNode* AbstractAudioContext::destination() const | |
| 220 { | |
| 221 // Cannot be called from the audio thread because this method touches object
s managed by Oilpan, | |
| 222 // and the audio thread is not managed by Oilpan. | |
| 223 ASSERT(!isAudioThread()); | |
| 224 return m_destinationNode; | |
| 225 } | |
| 226 | |
| 227 void AbstractAudioContext::throwExceptionForClosedState(ExceptionState& exceptio
nState) | |
| 228 { | |
| 229 exceptionState.throwDOMException(InvalidStateError, "AudioContext has been c
losed."); | |
| 230 } | |
| 231 | |
| 232 AudioBuffer* AbstractAudioContext::createBuffer(unsigned numberOfChannels, size_
t numberOfFrames, float sampleRate, ExceptionState& exceptionState) | |
| 233 { | |
| 234 // It's ok to call createBuffer, even if the context is closed because the A
udioBuffer doesn't | |
| 235 // really "belong" to any particular context. | |
| 236 | |
| 237 AudioBuffer* buffer = AudioBuffer::create(numberOfChannels, numberOfFrames,
sampleRate, exceptionState); | |
| 238 | |
| 239 if (buffer) { | |
| 240 // Only record the data if the creation succeeded. | |
| 241 DEFINE_STATIC_LOCAL(SparseHistogram, audioBufferChannelsHistogram, | |
| 242 ("WebAudio.AudioBuffer.NumberOfChannels")); | |
| 243 | |
| 244 // Arbitrarly limit the maximum length to 1 million frames (about 20 sec | |
| 245 // at 48kHz). The number of buckets is fairly arbitrary. | |
| 246 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferLengthHistogram, | |
| 247 ("WebAudio.AudioBuffer.Length", 1, 1000000, 50)); | |
| 248 // The limits are the min and max AudioBuffer sample rates currently | |
| 249 // supported. We use explicit values here instead of | |
| 250 // AudioUtilities::minAudioBufferSampleRate() and | |
| 251 // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is | |
| 252 // fairly arbitrary. | |
| 253 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferSampleRateHistogram
, | |
| 254 ("WebAudio.AudioBuffer.SampleRate", 3000, 192000, 60)); | |
| 255 | |
| 256 audioBufferChannelsHistogram.sample(numberOfChannels); | |
| 257 audioBufferLengthHistogram.count(numberOfFrames); | |
| 258 audioBufferSampleRateHistogram.count(sampleRate); | |
| 259 | |
| 260 // Compute the ratio of the buffer rate and the context rate so we know | |
| 261 // how often the buffer needs to be resampled to match the context. For | |
| 262 // the histogram, we multiply the ratio by 100 and round to the nearest | |
| 263 // integer. If the context is closed, don't record this because we | |
| 264 // don't have a sample rate for closed context. | |
| 265 if (!isContextClosed()) { | |
| 266 // The limits are choosen from 100*(3000/192000) = 1.5625 and | |
| 267 // 100*(192000/3000) = 6400, where 3000 and 192000 are the current | |
| 268 // min and max sample rates possible for an AudioBuffer. The number | |
| 269 // of buckets is fairly arbitrary. | |
| 270 DEFINE_STATIC_LOCAL(CustomCountHistogram, audioBufferSampleRateRatio
Histogram, | |
| 271 ("WebAudio.AudioBuffer.SampleRateRatio", 1, 6400, 50)); | |
| 272 float ratio = 100 * sampleRate / this->sampleRate(); | |
| 273 audioBufferSampleRateRatioHistogram.count(static_cast<int>(0.5 + rat
io)); | |
| 274 } | |
| 275 } | |
| 276 | |
| 277 return buffer; | |
| 278 } | |
| 279 | |
| 280 ScriptPromise AbstractAudioContext::decodeAudioData(ScriptState* scriptState, DO
MArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallba
ck* errorCallback, ExceptionState& exceptionState) | |
| 281 { | |
| 282 ASSERT(isMainThread()); | |
| 283 ASSERT(audioData); | |
| 284 | |
| 285 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState)
; | |
| 286 ScriptPromise promise = resolver->promise(); | |
| 287 | |
| 288 float rate = isContextClosed() ? closedContextSampleRate() : sampleRate(); | |
| 289 | |
| 290 ASSERT(rate > 0); | |
| 291 | |
| 292 m_decodeAudioResolvers.add(resolver); | |
| 293 m_audioDecoder.decodeAsync(audioData, rate, successCallback, errorCallback,
resolver, this); | |
| 294 | |
| 295 return promise; | |
| 296 } | |
| 297 | |
| 298 void AbstractAudioContext::handleDecodeAudioData(AudioBuffer* audioBuffer, Scrip
tPromiseResolver* resolver, AudioBufferCallback* successCallback, AudioBufferCal
lback* errorCallback) | |
| 299 { | |
| 300 ASSERT(isMainThread()); | |
| 301 | |
| 302 if (audioBuffer) { | |
| 303 // Resolve promise successfully and run the success callback | |
| 304 resolver->resolve(audioBuffer); | |
| 305 if (successCallback) | |
| 306 successCallback->handleEvent(audioBuffer); | |
| 307 } else { | |
| 308 // Reject the promise and run the error callback | |
| 309 DOMException* error = DOMException::create(EncodingError, "Unable to dec
ode audio data"); | |
| 310 resolver->reject(error); | |
| 311 if (errorCallback) | |
| 312 errorCallback->handleEvent(error); | |
| 313 } | |
| 314 | |
| 315 // We've resolved the promise. Remove it now. | |
| 316 ASSERT(m_decodeAudioResolvers.contains(resolver)); | |
| 317 m_decodeAudioResolvers.remove(resolver); | |
| 318 } | |
| 319 | |
| 320 AudioBufferSourceNode* AbstractAudioContext::createBufferSource(ExceptionState&
exceptionState) | |
| 321 { | |
| 322 ASSERT(isMainThread()); | |
| 323 | |
| 324 AudioBufferSourceNode* node = AudioBufferSourceNode::create(*this, exception
State); | |
| 325 | |
| 326 // Do not add a reference to this source node now. The reference will be add
ed when start() is | |
| 327 // called. | |
| 328 | |
| 329 return node; | |
| 330 } | |
| 331 | |
| 332 MediaElementAudioSourceNode* AbstractAudioContext::createMediaElementSource(HTML
MediaElement* mediaElement, ExceptionState& exceptionState) | |
| 333 { | |
| 334 ASSERT(isMainThread()); | |
| 335 | |
| 336 return MediaElementAudioSourceNode::create(*this, *mediaElement, exceptionSt
ate); | |
| 337 } | |
| 338 | |
| 339 MediaStreamAudioSourceNode* AbstractAudioContext::createMediaStreamSource(MediaS
tream* mediaStream, ExceptionState& exceptionState) | |
| 340 { | |
| 341 ASSERT(isMainThread()); | |
| 342 | |
| 343 return MediaStreamAudioSourceNode::create(*this, *mediaStream, exceptionStat
e); | |
| 344 } | |
| 345 | |
| 346 MediaStreamAudioDestinationNode* AbstractAudioContext::createMediaStreamDestinat
ion(ExceptionState& exceptionState) | |
| 347 { | |
| 348 DCHECK(isMainThread()); | |
| 349 | |
| 350 // Set number of output channels to stereo by default. | |
| 351 return MediaStreamAudioDestinationNode::create(*this, 2, exceptionState); | |
| 352 } | |
| 353 | |
| 354 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(ExceptionState&
exceptionState) | |
| 355 { | |
| 356 DCHECK(isMainThread()); | |
| 357 | |
| 358 return ScriptProcessorNode::create(*this, exceptionState); | |
| 359 } | |
| 360 | |
| 361 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi
ze, ExceptionState& exceptionState) | |
| 362 { | |
| 363 DCHECK(isMainThread()); | |
| 364 | |
| 365 return ScriptProcessorNode::create(*this, bufferSize, exceptionState); | |
| 366 } | |
| 367 | |
| 368 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi
ze, size_t numberOfInputChannels, ExceptionState& exceptionState) | |
| 369 { | |
| 370 DCHECK(isMainThread()); | |
| 371 | |
| 372 return ScriptProcessorNode::create(*this, bufferSize, numberOfInputChannels,
exceptionState); | |
| 373 } | |
| 374 | |
| 375 ScriptProcessorNode* AbstractAudioContext::createScriptProcessor(size_t bufferSi
ze, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState&
exceptionState) | |
| 376 { | |
| 377 ASSERT(isMainThread()); | |
| 378 | |
| 379 return ScriptProcessorNode::create( | |
| 380 *this, | |
| 381 bufferSize, | |
| 382 numberOfInputChannels, | |
| 383 numberOfOutputChannels, | |
| 384 exceptionState); | |
| 385 } | |
| 386 | |
| 387 StereoPannerNode* AbstractAudioContext::createStereoPanner(ExceptionState& excep
tionState) | |
| 388 { | |
| 389 ASSERT(isMainThread()); | |
| 390 | |
| 391 return StereoPannerNode::create(*this, exceptionState); | |
| 392 } | |
| 393 | |
| 394 BiquadFilterNode* AbstractAudioContext::createBiquadFilter(ExceptionState& excep
tionState) | |
| 395 { | |
| 396 ASSERT(isMainThread()); | |
| 397 | |
| 398 return BiquadFilterNode::create(*this, exceptionState); | |
| 399 } | |
| 400 | |
| 401 WaveShaperNode* AbstractAudioContext::createWaveShaper(ExceptionState& exception
State) | |
| 402 { | |
| 403 ASSERT(isMainThread()); | |
| 404 | |
| 405 return WaveShaperNode::create(*this, exceptionState); | |
| 406 } | |
| 407 | |
| 408 PannerNode* AbstractAudioContext::createPanner(ExceptionState& exceptionState) | |
| 409 { | |
| 410 ASSERT(isMainThread()); | |
| 411 | |
| 412 return PannerNode::create(*this, exceptionState); | |
| 413 } | |
| 414 | |
| 415 ConvolverNode* AbstractAudioContext::createConvolver(ExceptionState& exceptionSt
ate) | |
| 416 { | |
| 417 ASSERT(isMainThread()); | |
| 418 | |
| 419 return ConvolverNode::create(*this, exceptionState); | |
| 420 } | |
| 421 | |
| 422 DynamicsCompressorNode* AbstractAudioContext::createDynamicsCompressor(Exception
State& exceptionState) | |
| 423 { | |
| 424 ASSERT(isMainThread()); | |
| 425 | |
| 426 return DynamicsCompressorNode::create(*this, exceptionState); | |
| 427 } | |
| 428 | |
| 429 AnalyserNode* AbstractAudioContext::createAnalyser(ExceptionState& exceptionStat
e) | |
| 430 { | |
| 431 ASSERT(isMainThread()); | |
| 432 | |
| 433 return AnalyserNode::create(*this, exceptionState); | |
| 434 } | |
| 435 | |
| 436 GainNode* AbstractAudioContext::createGain(ExceptionState& exceptionState) | |
| 437 { | |
| 438 ASSERT(isMainThread()); | |
| 439 | |
| 440 return GainNode::create(*this, exceptionState); | |
| 441 } | |
| 442 | |
| 443 DelayNode* AbstractAudioContext::createDelay(ExceptionState& exceptionState) | |
| 444 { | |
| 445 DCHECK(isMainThread()); | |
| 446 | |
| 447 return DelayNode::create(*this, exceptionState); | |
| 448 } | |
| 449 | |
| 450 DelayNode* AbstractAudioContext::createDelay(double maxDelayTime, ExceptionState
& exceptionState) | |
| 451 { | |
| 452 ASSERT(isMainThread()); | |
| 453 | |
| 454 return DelayNode::create(*this, maxDelayTime, exceptionState); | |
| 455 } | |
| 456 | |
| 457 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(ExceptionState&
exceptionState) | |
| 458 { | |
| 459 DCHECK(isMainThread()); | |
| 460 | |
| 461 return ChannelSplitterNode::create(*this, exceptionState); | |
| 462 } | |
| 463 | |
| 464 ChannelSplitterNode* AbstractAudioContext::createChannelSplitter(size_t numberOf
Outputs, ExceptionState& exceptionState) | |
| 465 { | |
| 466 ASSERT(isMainThread()); | |
| 467 | |
| 468 return ChannelSplitterNode::create(*this, numberOfOutputs, exceptionState); | |
| 469 } | |
| 470 | |
| 471 ChannelMergerNode* AbstractAudioContext::createChannelMerger(ExceptionState& exc
eptionState) | |
| 472 { | |
| 473 DCHECK(isMainThread()); | |
| 474 | |
| 475 return ChannelMergerNode::create(*this, exceptionState); | |
| 476 } | |
| 477 | |
| 478 ChannelMergerNode* AbstractAudioContext::createChannelMerger(size_t numberOfInpu
ts, ExceptionState& exceptionState) | |
| 479 { | |
| 480 ASSERT(isMainThread()); | |
| 481 | |
| 482 return ChannelMergerNode::create(*this, numberOfInputs, exceptionState); | |
| 483 } | |
| 484 | |
| 485 OscillatorNode* AbstractAudioContext::createOscillator(ExceptionState& exception
State) | |
| 486 { | |
| 487 ASSERT(isMainThread()); | |
| 488 | |
| 489 return OscillatorNode::create(*this, exceptionState); | |
| 490 } | |
| 491 | |
| 492 PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO
MFloat32Array* imag, ExceptionState& exceptionState) | |
| 493 { | |
| 494 DCHECK(isMainThread()); | |
| 495 | |
| 496 return PeriodicWave::create(*this, real, imag, false, exceptionState); | |
| 497 } | |
| 498 | |
| 499 PeriodicWave* AbstractAudioContext::createPeriodicWave(DOMFloat32Array* real, DO
MFloat32Array* imag, const PeriodicWaveConstraints& options, ExceptionState& exc
eptionState) | |
| 500 { | |
| 501 ASSERT(isMainThread()); | |
| 502 | |
| 503 bool disable = options.hasDisableNormalization() ? options.disableNormalizat
ion() : false; | |
| 504 | |
| 505 return PeriodicWave::create(*this, real, imag, disable, exceptionState); | |
| 506 } | |
| 507 | |
| 508 IIRFilterNode* AbstractAudioContext::createIIRFilter(Vector<double> feedforwardC
oef, Vector<double> feedbackCoef, ExceptionState& exceptionState) | |
| 509 { | |
| 510 ASSERT(isMainThread()); | |
| 511 | |
| 512 return IIRFilterNode::create(*this, feedforwardCoef, feedbackCoef, exception
State); | |
| 513 } | |
| 514 | |
| 515 PeriodicWave* AbstractAudioContext::periodicWave(int type) | |
| 516 { | |
| 517 switch (type) { | |
| 518 case OscillatorHandler::SINE: | |
| 519 // Initialize the table if necessary | |
| 520 if (!m_periodicWaveSine) | |
| 521 m_periodicWaveSine = PeriodicWave::createSine(sampleRate()); | |
| 522 return m_periodicWaveSine; | |
| 523 case OscillatorHandler::SQUARE: | |
| 524 // Initialize the table if necessary | |
| 525 if (!m_periodicWaveSquare) | |
| 526 m_periodicWaveSquare = PeriodicWave::createSquare(sampleRate()); | |
| 527 return m_periodicWaveSquare; | |
| 528 case OscillatorHandler::SAWTOOTH: | |
| 529 // Initialize the table if necessary | |
| 530 if (!m_periodicWaveSawtooth) | |
| 531 m_periodicWaveSawtooth = PeriodicWave::createSawtooth(sampleRate()); | |
| 532 return m_periodicWaveSawtooth; | |
| 533 case OscillatorHandler::TRIANGLE: | |
| 534 // Initialize the table if necessary | |
| 535 if (!m_periodicWaveTriangle) | |
| 536 m_periodicWaveTriangle = PeriodicWave::createTriangle(sampleRate()); | |
| 537 return m_periodicWaveTriangle; | |
| 538 default: | |
| 539 ASSERT_NOT_REACHED(); | |
| 540 return nullptr; | |
| 541 } | |
| 542 } | |
| 543 | |
| 544 void AbstractAudioContext::recordUserGestureState() | |
| 545 { | |
| 546 DEFINE_STATIC_LOCAL(EnumerationHistogram, userGestureHistogram, ("WebAudio.U
serGesture", UserGestureRecordMax)); | |
| 547 | |
| 548 if (!m_userGestureRequired) { | |
| 549 if (UserGestureIndicator::processingUserGesture()) | |
| 550 userGestureHistogram.count(UserGestureNotRequiredAndAvailable); | |
| 551 else | |
| 552 userGestureHistogram.count(UserGestureNotRequiredAndNotAvailable); | |
| 553 return; | |
| 554 } | |
| 555 if (!UserGestureIndicator::processingUserGesture()) { | |
| 556 userGestureHistogram.count(UserGestureRequiredAndNotAvailable); | |
| 557 return; | |
| 558 } | |
| 559 userGestureHistogram.count(UserGestureRequiredAndAvailable); | |
| 560 m_userGestureRequired = false; | |
| 561 } | |
| 562 | |
| 563 String AbstractAudioContext::state() const | |
| 564 { | |
| 565 // These strings had better match the strings for AudioContextState in Audio
Context.idl. | |
| 566 switch (m_contextState) { | |
| 567 case Suspended: | |
| 568 return "suspended"; | |
| 569 case Running: | |
| 570 return "running"; | |
| 571 case Closed: | |
| 572 return "closed"; | |
| 573 } | |
| 574 ASSERT_NOT_REACHED(); | |
| 575 return ""; | |
| 576 } | |
| 577 | |
| 578 void AbstractAudioContext::setContextState(AudioContextState newState) | |
| 579 { | |
| 580 ASSERT(isMainThread()); | |
| 581 | |
| 582 // Validate the transitions. The valid transitions are Suspended->Running,
Running->Suspended, | |
| 583 // and anything->Closed. | |
| 584 switch (newState) { | |
| 585 case Suspended: | |
| 586 ASSERT(m_contextState == Running); | |
| 587 break; | |
| 588 case Running: | |
| 589 ASSERT(m_contextState == Suspended); | |
| 590 break; | |
| 591 case Closed: | |
| 592 ASSERT(m_contextState != Closed); | |
| 593 break; | |
| 594 } | |
| 595 | |
| 596 if (newState == m_contextState) { | |
| 597 // ASSERTs above failed; just return. | |
| 598 return; | |
| 599 } | |
| 600 | |
| 601 m_contextState = newState; | |
| 602 | |
| 603 // Notify context that state changed | |
| 604 if (getExecutionContext()) | |
| 605 getExecutionContext()->postTask(BLINK_FROM_HERE, createSameThreadTask(&A
bstractAudioContext::notifyStateChange, wrapPersistent(this))); | |
| 606 } | |
| 607 | |
| 608 void AbstractAudioContext::notifyStateChange() | |
| 609 { | |
| 610 dispatchEvent(Event::create(EventTypeNames::statechange)); | |
| 611 } | |
| 612 | |
| 613 void AbstractAudioContext::notifySourceNodeFinishedProcessing(AudioHandler* hand
ler) | |
| 614 { | |
| 615 ASSERT(isAudioThread()); | |
| 616 m_finishedSourceHandlers.append(handler); | |
| 617 } | |
| 618 | |
| 619 void AbstractAudioContext::removeFinishedSourceNodes() | |
| 620 { | |
| 621 ASSERT(isMainThread()); | |
| 622 AutoLocker locker(this); | |
| 623 // Quadratic worst case, but sizes of both vectors are considered | |
| 624 // manageable, especially |m_finishedSourceNodes| is likely to be short. | |
| 625 for (AudioNode* node : m_finishedSourceNodes) { | |
| 626 size_t i = m_activeSourceNodes.find(node); | |
| 627 if (i != kNotFound) | |
| 628 m_activeSourceNodes.remove(i); | |
| 629 } | |
| 630 m_finishedSourceNodes.clear(); | |
| 631 } | |
| 632 | |
| 633 void AbstractAudioContext::releaseFinishedSourceNodes() | |
| 634 { | |
| 635 ASSERT(isGraphOwner()); | |
| 636 ASSERT(isAudioThread()); | |
| 637 bool didRemove = false; | |
| 638 for (AudioHandler* handler : m_finishedSourceHandlers) { | |
| 639 for (AudioNode* node : m_activeSourceNodes) { | |
| 640 if (m_finishedSourceNodes.contains(node)) | |
| 641 continue; | |
| 642 if (handler == &node->handler()) { | |
| 643 handler->breakConnection(); | |
| 644 m_finishedSourceNodes.add(node); | |
| 645 didRemove = true; | |
| 646 break; | |
| 647 } | |
| 648 } | |
| 649 } | |
| 650 if (didRemove) | |
| 651 Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FR
OM_HERE, threadSafeBind(&AbstractAudioContext::removeFinishedSourceNodes, wrapCr
ossThreadPersistent(this))); | |
| 652 | |
| 653 m_finishedSourceHandlers.clear(); | |
| 654 } | |
| 655 | |
| 656 void AbstractAudioContext::notifySourceNodeStartedProcessing(AudioNode* node) | |
| 657 { | |
| 658 ASSERT(isMainThread()); | |
| 659 AutoLocker locker(this); | |
| 660 | |
| 661 m_activeSourceNodes.append(node); | |
| 662 node->handler().makeConnection(); | |
| 663 } | |
| 664 | |
| 665 void AbstractAudioContext::releaseActiveSourceNodes() | |
| 666 { | |
| 667 ASSERT(isMainThread()); | |
| 668 for (auto& sourceNode : m_activeSourceNodes) | |
| 669 sourceNode->handler().breakConnection(); | |
| 670 | |
| 671 m_activeSourceNodes.clear(); | |
| 672 } | |
| 673 | |
| 674 void AbstractAudioContext::handleStoppableSourceNodes() | |
| 675 { | |
| 676 ASSERT(isGraphOwner()); | |
| 677 | |
| 678 // Find AudioBufferSourceNodes to see if we can stop playing them. | |
| 679 for (AudioNode* node : m_activeSourceNodes) { | |
| 680 // If the AudioNode has been marked as finished and released by | |
| 681 // the audio thread, but not yet removed by the main thread | |
| 682 // (see releaseActiveSourceNodes() above), |node| must not be | |
| 683 // touched as its handler may have been released already. | |
| 684 if (m_finishedSourceNodes.contains(node)) | |
| 685 continue; | |
| 686 if (node->handler().getNodeType() == AudioHandler::NodeTypeAudioBufferSo
urce) { | |
| 687 AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNod
e*>(node); | |
| 688 sourceNode->audioBufferSourceHandler().handleStoppableSourceNode(); | |
| 689 } | |
| 690 } | |
| 691 } | |
| 692 | |
| 693 void AbstractAudioContext::handlePreRenderTasks() | |
| 694 { | |
| 695 ASSERT(isAudioThread()); | |
| 696 | |
| 697 // At the beginning of every render quantum, try to update the internal rend
ering graph state (from main thread changes). | |
| 698 // It's OK if the tryLock() fails, we'll just take slightly longer to pick u
p the changes. | |
| 699 if (tryLock()) { | |
| 700 deferredTaskHandler().handleDeferredTasks(); | |
| 701 | |
| 702 resolvePromisesForResume(); | |
| 703 | |
| 704 // Check to see if source nodes can be stopped because the end time has
passed. | |
| 705 handleStoppableSourceNodes(); | |
| 706 | |
| 707 // Update the dirty state of the listener. | |
| 708 listener()->updateState(); | |
| 709 | |
| 710 unlock(); | |
| 711 } | |
| 712 } | |
| 713 | |
| 714 void AbstractAudioContext::handlePostRenderTasks() | |
| 715 { | |
| 716 ASSERT(isAudioThread()); | |
| 717 | |
| 718 // Must use a tryLock() here too. Don't worry, the lock will very rarely be
contended and this method is called frequently. | |
| 719 // The worst that can happen is that there will be some nodes which will tak
e slightly longer than usual to be deleted or removed | |
| 720 // from the render graph (in which case they'll render silence). | |
| 721 if (tryLock()) { | |
| 722 // Take care of AudioNode tasks where the tryLock() failed previously. | |
| 723 deferredTaskHandler().breakConnections(); | |
| 724 | |
| 725 // Dynamically clean up nodes which are no longer needed. | |
| 726 releaseFinishedSourceNodes(); | |
| 727 | |
| 728 deferredTaskHandler().handleDeferredTasks(); | |
| 729 deferredTaskHandler().requestToDeleteHandlersOnMainThread(); | |
| 730 | |
| 731 unlock(); | |
| 732 } | |
| 733 } | |
| 734 | |
| 735 void AbstractAudioContext::resolvePromisesForResumeOnMainThread() | |
| 736 { | |
| 737 ASSERT(isMainThread()); | |
| 738 AutoLocker locker(this); | |
| 739 | |
| 740 for (auto& resolver : m_resumeResolvers) { | |
| 741 if (m_contextState == Closed) { | |
| 742 resolver->reject( | |
| 743 DOMException::create(InvalidStateError, "Cannot resume a context
that has been closed")); | |
| 744 } else { | |
| 745 resolver->resolve(); | |
| 746 } | |
| 747 } | |
| 748 | |
| 749 m_resumeResolvers.clear(); | |
| 750 m_isResolvingResumePromises = false; | |
| 751 } | |
| 752 | |
| 753 void AbstractAudioContext::resolvePromisesForResume() | |
| 754 { | |
| 755 // This runs inside the AbstractAudioContext's lock when handling pre-render
tasks. | |
| 756 ASSERT(isAudioThread()); | |
| 757 ASSERT(isGraphOwner()); | |
| 758 | |
| 759 // Resolve any pending promises created by resume(). Only do this if we have
n't already started | |
| 760 // resolving these promises. This gets called very often and it takes some t
ime to resolve the | |
| 761 // promises in the main thread. | |
| 762 if (!m_isResolvingResumePromises && m_resumeResolvers.size() > 0) { | |
| 763 m_isResolvingResumePromises = true; | |
| 764 Platform::current()->mainThread()->getWebTaskRunner()->postTask(BLINK_FR
OM_HERE, threadSafeBind(&AbstractAudioContext::resolvePromisesForResumeOnMainThr
ead, wrapCrossThreadPersistent(this))); | |
| 765 } | |
| 766 } | |
| 767 | |
| 768 void AbstractAudioContext::rejectPendingDecodeAudioDataResolvers() | |
| 769 { | |
| 770 // Now reject any pending decodeAudioData resolvers | |
| 771 for (auto& resolver : m_decodeAudioResolvers) | |
| 772 resolver->reject(DOMException::create(InvalidStateError, "Audio context
is going away")); | |
| 773 m_decodeAudioResolvers.clear(); | |
| 774 } | |
| 775 | |
| 776 void AbstractAudioContext::rejectPendingResolvers() | |
| 777 { | |
| 778 ASSERT(isMainThread()); | |
| 779 | |
| 780 // Audio context is closing down so reject any resume promises that are stil
l pending. | |
| 781 | |
| 782 for (auto& resolver : m_resumeResolvers) { | |
| 783 resolver->reject(DOMException::create(InvalidStateError, "Audio context
is going away")); | |
| 784 } | |
| 785 m_resumeResolvers.clear(); | |
| 786 m_isResolvingResumePromises = false; | |
| 787 | |
| 788 rejectPendingDecodeAudioDataResolvers(); | |
| 789 } | |
| 790 | |
| 791 const AtomicString& AbstractAudioContext::interfaceName() const | |
| 792 { | |
| 793 return EventTargetNames::AudioContext; | |
| 794 } | |
| 795 | |
| 796 ExecutionContext* AbstractAudioContext::getExecutionContext() const | |
| 797 { | |
| 798 return ActiveDOMObject::getExecutionContext(); | |
| 799 } | |
| 800 | |
| 801 void AbstractAudioContext::startRendering() | |
| 802 { | |
| 803 // This is called for both online and offline contexts. | |
| 804 ASSERT(isMainThread()); | |
| 805 ASSERT(m_destinationNode); | |
| 806 | |
| 807 recordUserGestureState(); | |
| 808 | |
| 809 if (m_contextState == Suspended) { | |
| 810 destination()->audioDestinationHandler().startRendering(); | |
| 811 setContextState(Running); | |
| 812 } | |
| 813 } | |
| 814 | |
| 815 DEFINE_TRACE(AbstractAudioContext) | |
| 816 { | |
| 817 visitor->trace(m_destinationNode); | |
| 818 visitor->trace(m_listener); | |
| 819 visitor->trace(m_activeSourceNodes); | |
| 820 visitor->trace(m_resumeResolvers); | |
| 821 visitor->trace(m_decodeAudioResolvers); | |
| 822 | |
| 823 visitor->trace(m_periodicWaveSine); | |
| 824 visitor->trace(m_periodicWaveSquare); | |
| 825 visitor->trace(m_periodicWaveSawtooth); | |
| 826 visitor->trace(m_periodicWaveTriangle); | |
| 827 EventTargetWithInlineData::trace(visitor); | |
| 828 ActiveDOMObject::trace(visitor); | |
| 829 } | |
| 830 | |
| 831 SecurityOrigin* AbstractAudioContext::getSecurityOrigin() const | |
| 832 { | |
| 833 if (getExecutionContext()) | |
| 834 return getExecutionContext()->getSecurityOrigin(); | |
| 835 | |
| 836 return nullptr; | |
| 837 } | |
| 838 | |
| 839 } // namespace blink | |
| OLD | NEW |