Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(160)

Side by Side Diff: Source/modules/webaudio/AudioContext.cpp

Issue 438293003: Enable Oilpan by default for webaudio/ (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 { 82 {
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz, 83 // FIXME: It would be nice if the minimum sample-rate could be less than 44. 1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there. 84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), a nd some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000; 85 return sampleRate >= 44100 && sampleRate <= 96000;
86 } 86 }
87 87
88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware. 88 // Don't allow more than this number of simultaneous AudioContexts talking to ha rdware.
89 const unsigned MaxHardwareContexts = 6; 89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0; 90 unsigned AudioContext::s_hardwareContextCount = 0;
91 91
92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, Ex ceptionState& exceptionState) 92 AudioContext* AudioContext::create(Document& document, ExceptionState& exception State)
93 { 93 {
94 ASSERT(isMainThread()); 94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) { 95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException( 96 exceptionState.throwDOMException(
97 SyntaxError, 97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ")."); 98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ").");
99 return nullptr; 99 return 0;
100 } 100 }
101 101
102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCou ntedGarbageCollected(new AudioContext(&document))); 102 AudioContext* audioContext = adoptRefCountedGarbageCollected(new AudioContex t(&document));
103 audioContext->suspendIfNeeded(); 103 audioContext->suspendIfNeeded();
104 return audioContext.release(); 104 return audioContext;
105 } 105 }
106 106
107 // Constructor for rendering to the audio hardware. 107 // Constructor for rendering to the audio hardware.
108 AudioContext::AudioContext(Document* document) 108 AudioContext::AudioContext(Document* document)
109 : ActiveDOMObject(document) 109 : ActiveDOMObject(document)
110 , m_isStopScheduled(false) 110 , m_isStopScheduled(false)
111 , m_isCleared(false) 111 , m_isCleared(false)
112 , m_isInitialized(false) 112 , m_isInitialized(false)
113 , m_destinationNode(nullptr) 113 , m_destinationNode(nullptr)
114 #if !ENABLE(OILPAN)
115 , m_isDeletionScheduled(false)
116 #endif
117 , m_automaticPullNodesNeedUpdating(false) 114 , m_automaticPullNodesNeedUpdating(false)
118 , m_connectionCount(0) 115 , m_connectionCount(0)
119 , m_audioThread(0) 116 , m_audioThread(0)
120 , m_graphOwnerThread(UndefinedThreadIdentifier) 117 , m_graphOwnerThread(UndefinedThreadIdentifier)
121 , m_isOfflineContext(false) 118 , m_isOfflineContext(false)
122 { 119 {
123 ScriptWrappable::init(this); 120 ScriptWrappable::init(this);
124 121
125 m_destinationNode = DefaultAudioDestinationNode::create(this); 122 m_destinationNode = DefaultAudioDestinationNode::create(this);
126 123
127 initialize(); 124 initialize();
128 #if DEBUG_AUDIONODE_REFERENCES 125 #if DEBUG_AUDIONODE_REFERENCES
129 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext ::s_hardwareContextCount); 126 fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext ::s_hardwareContextCount);
130 #endif 127 #endif
131 } 128 }
132 129
133 // Constructor for offline (non-realtime) rendering. 130 // Constructor for offline (non-realtime) rendering.
134 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) 131 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
135 : ActiveDOMObject(document) 132 : ActiveDOMObject(document)
136 , m_isStopScheduled(false) 133 , m_isStopScheduled(false)
137 , m_isCleared(false) 134 , m_isCleared(false)
138 , m_isInitialized(false) 135 , m_isInitialized(false)
139 , m_destinationNode(nullptr) 136 , m_destinationNode(nullptr)
140 #if !ENABLE(OILPAN)
141 , m_isDeletionScheduled(false)
142 #endif
143 , m_automaticPullNodesNeedUpdating(false) 137 , m_automaticPullNodesNeedUpdating(false)
144 , m_connectionCount(0) 138 , m_connectionCount(0)
145 , m_audioThread(0) 139 , m_audioThread(0)
146 , m_graphOwnerThread(UndefinedThreadIdentifier) 140 , m_graphOwnerThread(UndefinedThreadIdentifier)
147 , m_isOfflineContext(true) 141 , m_isOfflineContext(true)
148 { 142 {
149 ScriptWrappable::init(this); 143 ScriptWrappable::init(this);
150 144
151 // Create a new destination for offline rendering. 145 // Create a new destination for offline rendering.
152 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate); 146 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampl eRate);
153 if (m_renderTarget.get()) 147 if (m_renderTarget.get())
154 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get()); 148 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTa rget.get());
155 149
156 initialize(); 150 initialize();
157 } 151 }
158 152
159 AudioContext::~AudioContext() 153 AudioContext::~AudioContext()
160 { 154 {
161 #if DEBUG_AUDIONODE_REFERENCES 155 #if DEBUG_AUDIONODE_REFERENCES
162 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this); 156 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
163 #endif 157 #endif
164 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around. 158 // AudioNodes keep a reference to their context, so there should be no way t o be in the destructor if there are still AudioNodes around.
165 ASSERT(!m_isInitialized); 159 ASSERT(!m_isInitialized);
166 #if !ENABLE(OILPAN) 160 #if !ENABLE(OILPAN)
167 ASSERT(!m_nodesToDelete.size());
168 ASSERT(!m_referencedNodes.size()); 161 ASSERT(!m_referencedNodes.size());
169 #endif 162 #endif
170 ASSERT(!m_finishedNodes.size()); 163 ASSERT(!m_finishedNodes.size());
171 ASSERT(!m_automaticPullNodes.size()); 164 ASSERT(!m_automaticPullNodes.size());
172 if (m_automaticPullNodesNeedUpdating) 165 if (m_automaticPullNodesNeedUpdating)
173 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); 166 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
174 ASSERT(!m_renderingAutomaticPullNodes.size()); 167 ASSERT(!m_renderingAutomaticPullNodes.size());
175 } 168 }
176 169
177 void AudioContext::initialize() 170 void AudioContext::initialize()
(...skipping 15 matching lines...) Expand all
193 m_destinationNode->startRendering(); 186 m_destinationNode->startRendering();
194 ++s_hardwareContextCount; 187 ++s_hardwareContextCount;
195 } 188 }
196 189
197 m_isInitialized = true; 190 m_isInitialized = true;
198 } 191 }
199 } 192 }
200 193
201 void AudioContext::clear() 194 void AudioContext::clear()
202 { 195 {
203 #if ENABLE(OILPAN)
204 // We need to run disposers before destructing m_contextGraphMutex. 196 // We need to run disposers before destructing m_contextGraphMutex.
205 m_liveAudioSummingJunctions.clear(); 197 m_liveAudioSummingJunctions.clear();
206 m_liveNodes.clear(); 198 m_liveNodes.clear();
207 #else 199 m_destinationNode.clear();
208
209 // We have to release our reference to the destination node before the conte xt will ever be deleted since the destination node holds a reference to the cont ext.
210 if (m_destinationNode)
211 m_destinationNode.clear();
212
213 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
214 do {
215 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
216 m_nodesMarkedForDeletion.clear();
217 deleteMarkedNodes();
218 } while (m_nodesToDelete.size());
219 #endif
220
221 m_isCleared = true; 200 m_isCleared = true;
222 } 201 }
223 202
224 void AudioContext::uninitialize() 203 void AudioContext::uninitialize()
225 { 204 {
226 ASSERT(isMainThread()); 205 ASSERT(isMainThread());
227 206
228 if (!isInitialized()) 207 if (!isInitialized())
229 return; 208 return;
230 209
(...skipping 16 matching lines...) Expand all
247 { 226 {
248 // Usually ExecutionContext calls stop twice. 227 // Usually ExecutionContext calls stop twice.
249 if (m_isStopScheduled) 228 if (m_isStopScheduled)
250 return; 229 return;
251 m_isStopScheduled = true; 230 m_isStopScheduled = true;
252 231
253 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle 232 // Don't call uninitialize() immediately here because the ExecutionContext i s in the middle
254 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other 233 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
255 // ActiveDOMObjects so let's schedule uninitialize() to be called later. 234 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
256 // FIXME: see if there's a more direct way to handle this issue. 235 // FIXME: see if there's a more direct way to handle this issue.
257 callOnMainThread(bind(&AudioContext::uninitialize, PassRefPtrWillBeRawPtr<Au dioContext>(this))); 236 callOnMainThread(bind(&AudioContext::uninitialize, this));
258 } 237 }
259 238
260 bool AudioContext::hasPendingActivity() const 239 bool AudioContext::hasPendingActivity() const
261 { 240 {
262 // According to spec AudioContext must die only after page navigates. 241 // According to spec AudioContext must die only after page navigates.
263 return !m_isCleared; 242 return !m_isCleared;
264 } 243 }
265 244
266 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOf Channels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionStat e) 245 AudioBuffer* AudioContext::createBuffer(unsigned numberOfChannels, size_t number OfFrames, float sampleRate, ExceptionState& exceptionState)
267 { 246 {
268 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfCh annels, numberOfFrames, sampleRate, exceptionState); 247 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exc eptionState);
269
270 return audioBuffer;
271 } 248 }
272 249
273 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState) 250 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState)
274 { 251 {
275 if (!audioData) { 252 if (!audioData) {
276 exceptionState.throwDOMException( 253 exceptionState.throwDOMException(
277 SyntaxError, 254 SyntaxError,
278 "invalid ArrayBuffer for audioData."); 255 "invalid ArrayBuffer for audioData.");
279 return; 256 return;
280 } 257 }
281 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback); 258 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCa llback);
282 } 259 }
283 260
284 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource() 261 AudioBufferSourceNode* AudioContext::createBufferSource()
285 { 262 {
286 ASSERT(isMainThread()); 263 ASSERT(isMainThread());
287 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::crea te(this, m_destinationNode->sampleRate()); 264 AudioBufferSourceNode* node = AudioBufferSourceNode::create(this, m_destinat ionNode->sampleRate());
288 265
289 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 266 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
290 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 267 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
291 refNode(node.get()); 268 refNode(node);
292 269
293 return node; 270 return node;
294 } 271 }
295 272
296 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaEle mentSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState) 273 MediaElementAudioSourceNode* AudioContext::createMediaElementSource(HTMLMediaEle ment* mediaElement, ExceptionState& exceptionState)
297 { 274 {
298 ASSERT(isMainThread()); 275 ASSERT(isMainThread());
299 if (!mediaElement) { 276 if (!mediaElement) {
300 exceptionState.throwDOMException( 277 exceptionState.throwDOMException(
301 InvalidStateError, 278 InvalidStateError,
302 "invalid HTMLMedialElement."); 279 "invalid HTMLMedialElement.");
303 return nullptr; 280 return 0;
304 } 281 }
305 282
306 // First check if this media element already has a source node. 283 // First check if this media element already has a source node.
307 if (mediaElement->audioSourceNode()) { 284 if (mediaElement->audioSourceNode()) {
308 exceptionState.throwDOMException( 285 exceptionState.throwDOMException(
309 InvalidStateError, 286 InvalidStateError,
310 "invalid HTMLMediaElement."); 287 "invalid HTMLMediaElement.");
311 return nullptr; 288 return 0;
312 } 289 }
313 290
314 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSour ceNode::create(this, mediaElement); 291 MediaElementAudioSourceNode* node = MediaElementAudioSourceNode::create(this , mediaElement);
315 292
316 mediaElement->setAudioSourceNode(node.get()); 293 mediaElement->setAudioSourceNode(node);
317 294
318 refNode(node.get()); // context keeps reference until node is disconnected 295 refNode(node); // context keeps reference until node is disconnected
319 return node; 296 return node;
320 } 297 }
321 298
322 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStre amSource(MediaStream* mediaStream, ExceptionState& exceptionState) 299 MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* m ediaStream, ExceptionState& exceptionState)
323 { 300 {
324 ASSERT(isMainThread()); 301 ASSERT(isMainThread());
325 if (!mediaStream) { 302 if (!mediaStream) {
326 exceptionState.throwDOMException( 303 exceptionState.throwDOMException(
327 InvalidStateError, 304 InvalidStateError,
328 "invalid MediaStream source"); 305 "invalid MediaStream source");
329 return nullptr; 306 return 0;
330 } 307 }
331 308
332 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); 309 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
333 if (audioTracks.isEmpty()) { 310 if (audioTracks.isEmpty()) {
334 exceptionState.throwDOMException( 311 exceptionState.throwDOMException(
335 InvalidStateError, 312 InvalidStateError,
336 "MediaStream has no audio track"); 313 "MediaStream has no audio track");
337 return nullptr; 314 return 0;
338 } 315 }
339 316
340 // Use the first audio track in the media stream. 317 // Use the first audio track in the media stream.
341 MediaStreamTrack* audioTrack = audioTracks[0]; 318 MediaStreamTrack* audioTrack = audioTracks[0];
342 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource(); 319 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
343 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSource Node::create(this, mediaStream, audioTrack, provider.release()); 320 MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());
344 321
345 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. 322 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
346 node->setFormat(2, sampleRate()); 323 node->setFormat(2, sampleRate());
347 324
348 refNode(node.get()); // context keeps reference until node is disconnected 325 refNode(node); // context keeps reference until node is disconnected
349 return node; 326 return node;
350 } 327 }
351 328
352 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMedi aStreamDestination() 329 MediaStreamAudioDestinationNode* AudioContext::createMediaStreamDestination()
353 { 330 {
354 // Set number of output channels to stereo by default. 331 // Set number of output channels to stereo by default.
355 return MediaStreamAudioDestinationNode::create(this, 2); 332 return MediaStreamAudioDestinationNode::create(this, 2);
356 } 333 }
357 334
358 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( ExceptionState& exceptionState) 335 ScriptProcessorNode* AudioContext::createScriptProcessor(ExceptionState& excepti onState)
359 { 336 {
360 // Set number of input/output channels to stereo by default. 337 // Set number of input/output channels to stereo by default.
361 return createScriptProcessor(0, 2, 2, exceptionState); 338 return createScriptProcessor(0, 2, 2, exceptionState);
362 } 339 }
363 340
364 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, ExceptionState& exceptionState) 341 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, Exce ptionState& exceptionState)
365 { 342 {
366 // Set number of input/output channels to stereo by default. 343 // Set number of input/output channels to stereo by default.
367 return createScriptProcessor(bufferSize, 2, 2, exceptionState); 344 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
368 } 345 }
369 346
370 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState) 347 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, ExceptionState& exceptionState)
371 { 348 {
372 // Set number of output channels to stereo by default. 349 // Set number of output channels to stereo by default.
373 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State); 350 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exception State);
374 } 351 }
375 352
376 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor( size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState) 353 ScriptProcessorNode* AudioContext::createScriptProcessor(size_t bufferSize, size _t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& excepti onState)
377 { 354 {
378 ASSERT(isMainThread()); 355 ASSERT(isMainThread());
379 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(t his, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberO fOutputChannels); 356 ScriptProcessorNode* node = ScriptProcessorNode::create(this, m_destinationN ode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
380 357
381 if (!node.get()) { 358 if (!node) {
382 if (!numberOfInputChannels && !numberOfOutputChannels) { 359 if (!numberOfInputChannels && !numberOfOutputChannels) {
383 exceptionState.throwDOMException( 360 exceptionState.throwDOMException(
384 IndexSizeError, 361 IndexSizeError,
385 "number of input channels and output channels cannot both be zer o."); 362 "number of input channels and output channels cannot both be zer o.");
386 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) { 363 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
387 exceptionState.throwDOMException( 364 exceptionState.throwDOMException(
388 IndexSizeError, 365 IndexSizeError,
389 "number of input channels (" + String::number(numberOfInputChann els) 366 "number of input channels (" + String::number(numberOfInputChann els)
390 + ") exceeds maximum (" 367 + ") exceeds maximum ("
391 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 368 + String::number(AudioContext::maxNumberOfChannels()) + ").");
392 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) { 369 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
393 exceptionState.throwDOMException( 370 exceptionState.throwDOMException(
394 IndexSizeError, 371 IndexSizeError,
395 "number of output channels (" + String::number(numberOfInputChan nels) 372 "number of output channels (" + String::number(numberOfInputChan nels)
396 + ") exceeds maximum (" 373 + ") exceeds maximum ("
397 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 374 + String::number(AudioContext::maxNumberOfChannels()) + ").");
398 } else { 375 } else {
399 exceptionState.throwDOMException( 376 exceptionState.throwDOMException(
400 IndexSizeError, 377 IndexSizeError,
401 "buffer size (" + String::number(bufferSize) 378 "buffer size (" + String::number(bufferSize)
402 + ") must be a power of two between 256 and 16384."); 379 + ") must be a power of two between 256 and 16384.");
403 } 380 }
404 return nullptr; 381 return 0;
405 } 382 }
406 383
407 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks 384 refNode(node); // context keeps reference until we stop making javascript re ndering callbacks
408 return node; 385 return node;
409 } 386 }
410 387
411 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter() 388 BiquadFilterNode* AudioContext::createBiquadFilter()
412 { 389 {
413 ASSERT(isMainThread()); 390 ASSERT(isMainThread());
414 return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); 391 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
415 } 392 }
416 393
417 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper() 394 WaveShaperNode* AudioContext::createWaveShaper()
418 { 395 {
419 ASSERT(isMainThread()); 396 ASSERT(isMainThread());
420 return WaveShaperNode::create(this); 397 return WaveShaperNode::create(this);
421 } 398 }
422 399
423 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner() 400 PannerNode* AudioContext::createPanner()
424 { 401 {
425 ASSERT(isMainThread()); 402 ASSERT(isMainThread());
426 return PannerNode::create(this, m_destinationNode->sampleRate()); 403 return PannerNode::create(this, m_destinationNode->sampleRate());
427 } 404 }
428 405
429 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver() 406 ConvolverNode* AudioContext::createConvolver()
430 { 407 {
431 ASSERT(isMainThread()); 408 ASSERT(isMainThread());
432 return ConvolverNode::create(this, m_destinationNode->sampleRate()); 409 return ConvolverNode::create(this, m_destinationNode->sampleRate());
433 } 410 }
434 411
435 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompr essor() 412 DynamicsCompressorNode* AudioContext::createDynamicsCompressor()
436 { 413 {
437 ASSERT(isMainThread()); 414 ASSERT(isMainThread());
438 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ; 415 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()) ;
439 } 416 }
440 417
441 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser() 418 AnalyserNode* AudioContext::createAnalyser()
442 { 419 {
443 ASSERT(isMainThread()); 420 ASSERT(isMainThread());
444 return AnalyserNode::create(this, m_destinationNode->sampleRate()); 421 return AnalyserNode::create(this, m_destinationNode->sampleRate());
445 } 422 }
446 423
447 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain() 424 GainNode* AudioContext::createGain()
448 { 425 {
449 ASSERT(isMainThread()); 426 ASSERT(isMainThread());
450 return GainNode::create(this, m_destinationNode->sampleRate()); 427 return GainNode::create(this, m_destinationNode->sampleRate());
451 } 428 }
452 429
453 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exce ptionState) 430 DelayNode* AudioContext::createDelay(ExceptionState& exceptionState)
454 { 431 {
455 const double defaultMaxDelayTime = 1; 432 const double defaultMaxDelayTime = 1;
456 return createDelay(defaultMaxDelayTime, exceptionState); 433 return createDelay(defaultMaxDelayTime, exceptionState);
457 } 434 }
458 435
459 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState) 436 DelayNode* AudioContext::createDelay(double maxDelayTime, ExceptionState& except ionState)
460 { 437 {
461 ASSERT(isMainThread()); 438 ASSERT(isMainThread());
462 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNo de->sampleRate(), maxDelayTime, exceptionState); 439 DelayNode* node = DelayNode::create(this, m_destinationNode->sampleRate(), m axDelayTime, exceptionState);
463 if (exceptionState.hadException()) 440 if (exceptionState.hadException())
464 return nullptr; 441 return 0;
465 return node; 442 return node;
466 } 443 }
467 444
468 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( ExceptionState& exceptionState) 445 ChannelSplitterNode* AudioContext::createChannelSplitter(ExceptionState& excepti onState)
469 { 446 {
470 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 447 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
471 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 448 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
472 } 449 }
473 450
474 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter( size_t numberOfOutputs, ExceptionState& exceptionState) 451 ChannelSplitterNode* AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
475 { 452 {
476 ASSERT(isMainThread()); 453 ASSERT(isMainThread());
477 454
478 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(t his, m_destinationNode->sampleRate(), numberOfOutputs); 455 ChannelSplitterNode* node = ChannelSplitterNode::create(this, m_destinationN ode->sampleRate(), numberOfOutputs);
479 456
480 if (!node.get()) { 457 if (!node) {
481 exceptionState.throwDOMException( 458 exceptionState.throwDOMException(
482 IndexSizeError, 459 IndexSizeError,
483 "number of outputs (" + String::number(numberOfOutputs) 460 "number of outputs (" + String::number(numberOfOutputs)
484 + ") must be between 1 and " 461 + ") must be between 1 and "
485 + String::number(AudioContext::maxNumberOfChannels()) + "."); 462 + String::number(AudioContext::maxNumberOfChannels()) + ".");
486 return nullptr; 463 return 0;
487 } 464 }
488 465
489 return node; 466 return node;
490 } 467 }
491 468
492 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(Exce ptionState& exceptionState) 469 ChannelMergerNode* AudioContext::createChannelMerger(ExceptionState& exceptionSt ate)
493 { 470 {
494 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 471 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
495 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 472 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
496 } 473 }
497 474
498 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size _t numberOfInputs, ExceptionState& exceptionState) 475 ChannelMergerNode* AudioContext::createChannelMerger(size_t numberOfInputs, Exce ptionState& exceptionState)
499 { 476 {
500 ASSERT(isMainThread()); 477 ASSERT(isMainThread());
501 478
502 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs); 479 ChannelMergerNode* node = ChannelMergerNode::create(this, m_destinationNode- >sampleRate(), numberOfInputs);
503 480
504 if (!node.get()) { 481 if (!node) {
505 exceptionState.throwDOMException( 482 exceptionState.throwDOMException(
506 IndexSizeError, 483 IndexSizeError,
507 "number of inputs (" + String::number(numberOfInputs) 484 "number of inputs (" + String::number(numberOfInputs)
508 + ") must be between 1 and " 485 + ") must be between 1 and "
509 + String::number(AudioContext::maxNumberOfChannels()) + "."); 486 + String::number(AudioContext::maxNumberOfChannels()) + ".");
510 return nullptr; 487 return 0;
511 } 488 }
512 489
513 return node; 490 return node;
514 } 491 }
515 492
516 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator() 493 OscillatorNode* AudioContext::createOscillator()
517 { 494 {
518 ASSERT(isMainThread()); 495 ASSERT(isMainThread());
519 496
520 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_des tinationNode->sampleRate()); 497 OscillatorNode* node = OscillatorNode::create(this, m_destinationNode->sampl eRate());
521 498
522 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 499 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
523 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 500 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
524 refNode(node.get()); 501 refNode(node);
525 502
526 return node; 503 return node;
527 } 504 }
528 505
529 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Arr ay* real, Float32Array* imag, ExceptionState& exceptionState) 506 PeriodicWave* AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
530 { 507 {
531 ASSERT(isMainThread()); 508 ASSERT(isMainThread());
532 509
533 if (!real) { 510 if (!real) {
534 exceptionState.throwDOMException( 511 exceptionState.throwDOMException(
535 SyntaxError, 512 SyntaxError,
536 "invalid real array"); 513 "invalid real array");
537 return nullptr; 514 return 0;
538 } 515 }
539 516
540 if (!imag) { 517 if (!imag) {
541 exceptionState.throwDOMException( 518 exceptionState.throwDOMException(
542 SyntaxError, 519 SyntaxError,
543 "invalid imaginary array"); 520 "invalid imaginary array");
544 return nullptr; 521 return 0;
545 } 522 }
546 523
547 if (real->length() != imag->length()) { 524 if (real->length() != imag->length()) {
548 exceptionState.throwDOMException( 525 exceptionState.throwDOMException(
549 IndexSizeError, 526 IndexSizeError,
550 "length of real array (" + String::number(real->length()) 527 "length of real array (" + String::number(real->length())
551 + ") and length of imaginary array (" + String::number(imag->length ()) 528 + ") and length of imaginary array (" + String::number(imag->length ())
552 + ") must match."); 529 + ") must match.");
553 return nullptr; 530 return 0;
554 } 531 }
555 532
556 if (real->length() > 4096) { 533 if (real->length() > 4096) {
557 exceptionState.throwDOMException( 534 exceptionState.throwDOMException(
558 IndexSizeError, 535 IndexSizeError,
559 "length of real array (" + String::number(real->length()) 536 "length of real array (" + String::number(real->length())
560 + ") exceeds allowed maximum of 4096"); 537 + ") exceeds allowed maximum of 4096");
561 return nullptr; 538 return 0;
562 } 539 }
563 540
564 if (imag->length() > 4096) { 541 if (imag->length() > 4096) {
565 exceptionState.throwDOMException( 542 exceptionState.throwDOMException(
566 IndexSizeError, 543 IndexSizeError,
567 "length of imaginary array (" + String::number(imag->length()) 544 "length of imaginary array (" + String::number(imag->length())
568 + ") exceeds allowed maximum of 4096"); 545 + ") exceeds allowed maximum of 4096");
569 return nullptr; 546 return 0;
570 } 547 }
571 548
572 return PeriodicWave::create(sampleRate(), real, imag); 549 return PeriodicWave::create(sampleRate(), real, imag);
573 } 550 }
574 551
575 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) 552 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
576 { 553 {
577 ASSERT(isAudioThread()); 554 ASSERT(isAudioThread());
578 m_finishedNodes.append(node); 555 m_finishedNodes.append(node);
579 } 556 }
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
729 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed 706 // The worst that can happen is that there will be some nodes which will tak e slightly longer than usual to be deleted or removed
730 // from the render graph (in which case they'll render silence). 707 // from the render graph (in which case they'll render silence).
731 bool mustReleaseLock; 708 bool mustReleaseLock;
732 if (tryLock(mustReleaseLock)) { 709 if (tryLock(mustReleaseLock)) {
733 // Take care of AudioNode tasks where the tryLock() failed previously. 710 // Take care of AudioNode tasks where the tryLock() failed previously.
734 handleDeferredAudioNodeTasks(); 711 handleDeferredAudioNodeTasks();
735 712
736 // Dynamically clean up nodes which are no longer needed. 713 // Dynamically clean up nodes which are no longer needed.
737 derefFinishedSourceNodes(); 714 derefFinishedSourceNodes();
738 715
739 #if !ENABLE(OILPAN)
740 // Don't delete in the real-time thread. Let the main thread do it.
741 // Ref-counted objects held by certain AudioNodes may not be thread-safe .
742 scheduleNodeDeletion();
743 #endif
744
745 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts. 716 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutpu ts.
746 handleDirtyAudioSummingJunctions(); 717 handleDirtyAudioSummingJunctions();
747 handleDirtyAudioNodeOutputs(); 718 handleDirtyAudioNodeOutputs();
748 719
749 updateAutomaticPullNodes(); 720 updateAutomaticPullNodes();
750 721
751 if (mustReleaseLock) 722 if (mustReleaseLock)
752 unlock(); 723 unlock();
753 } 724 }
754 } 725 }
755 726
756 void AudioContext::handleDeferredAudioNodeTasks() 727 void AudioContext::handleDeferredAudioNodeTasks()
757 { 728 {
758 ASSERT(isAudioThread() && isGraphOwner()); 729 ASSERT(isAudioThread() && isGraphOwner());
759 730
760 for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i) 731 for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i)
761 m_deferredBreakConnectionList[i]->breakConnectionWithLock(); 732 m_deferredBreakConnectionList[i]->breakConnectionWithLock();
762 m_deferredBreakConnectionList.clear(); 733 m_deferredBreakConnectionList.clear();
763 734
764 #if !ENABLE(OILPAN) 735 #if !ENABLE(OILPAN)
765 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) 736 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i)
766 m_deferredFinishDerefList[i]->finishDeref(); 737 m_deferredFinishDerefList[i]->finishDeref();
767 m_deferredFinishDerefList.clear(); 738 m_deferredFinishDerefList.clear();
768 #endif 739 #endif
769 } 740 }
770 741
771 #if ENABLE(OILPAN)
772 void AudioContext::registerLiveNode(AudioNode& node) 742 void AudioContext::registerLiveNode(AudioNode& node)
773 { 743 {
774 ASSERT(isMainThread()); 744 ASSERT(isMainThread());
775 m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node))); 745 m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node)));
776 } 746 }
777 747
778 AudioContext::AudioNodeDisposer::~AudioNodeDisposer() 748 AudioContext::AudioNodeDisposer::~AudioNodeDisposer()
779 { 749 {
780 ASSERT(isMainThread()); 750 ASSERT(isMainThread());
781 AudioContext::AutoLocker locker(m_node.context()); 751 AudioContext::AutoLocker locker(m_node.context());
782 m_node.dispose(); 752 m_node.dispose();
783 } 753 }
784 754
785 void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& juncti on) 755 void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& juncti on)
786 { 756 {
787 ASSERT(isMainThread()); 757 ASSERT(isMainThread());
788 m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunction Disposer(junction))); 758 m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunction Disposer(junction)));
789 } 759 }
790 760
791 AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer() 761 AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer()
792 { 762 {
793 ASSERT(isMainThread()); 763 ASSERT(isMainThread());
794 m_junction.context()->removeMarkedSummingJunction(&m_junction); 764 m_junction.context()->removeMarkedSummingJunction(&m_junction);
795 } 765 }
796 #else
797
798 void AudioContext::markForDeletion(AudioNode* node)
799 {
800 ASSERT(isGraphOwner());
801
802 if (!isInitialized())
803 m_nodesToDelete.append(node);
804 else
805 m_nodesMarkedForDeletion.append(node);
806 }
807
808 void AudioContext::scheduleNodeDeletion()
809 {
810 bool isGood = isInitialized() && isGraphOwner();
811 ASSERT(isGood);
812 if (!isGood)
813 return;
814
815 // Make sure to call deleteMarkedNodes() on main thread.
816 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
817 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
818 m_nodesMarkedForDeletion.clear();
819
820 m_isDeletionScheduled = true;
821
822 // Don't let ourself get deleted before the callback.
823 // See matching deref() in deleteMarkedNodesDispatch().
824 ref();
825 callOnMainThread(deleteMarkedNodesDispatch, this);
826 }
827 }
828
829 void AudioContext::deleteMarkedNodesDispatch(void* userData)
830 {
831 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
832 ASSERT(context);
833 if (!context)
834 return;
835
836 context->deleteMarkedNodes();
837 context->deref();
838 }
839
840 void AudioContext::deleteMarkedNodes()
841 {
842 ASSERT(isMainThread());
843
844 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
845 RefPtrWillBeRawPtr<AudioContext> protect(this);
846 {
847 AutoLocker locker(this);
848
849 while (size_t n = m_nodesToDelete.size()) {
850 AudioNode* node = m_nodesToDelete[n - 1];
851 m_nodesToDelete.removeLast();
852
853 node->dispose();
854
855 // Finally, delete it.
856 delete node;
857 }
858 m_isDeletionScheduled = false;
859 }
860 }
861 #endif
862 766
863 void AudioContext::unmarkDirtyNode(AudioNode& node) 767 void AudioContext::unmarkDirtyNode(AudioNode& node)
864 { 768 {
865 ASSERT(isGraphOwner()); 769 ASSERT(isGraphOwner());
866 #if !ENABLE(OILPAN)
867 // Before deleting the node, clear out any AudioNodeInputs from
868 // m_dirtySummingJunctions.
869 unsigned numberOfInputs = node.numberOfInputs();
870 for (unsigned i = 0; i < numberOfInputs; ++i)
871 m_dirtySummingJunctions.remove(node.input(i));
872 #endif
873 770
874 // Before deleting the node, clear out any AudioNodeOutputs from 771 // Before deleting the node, clear out any AudioNodeOutputs from
875 // m_dirtyAudioNodeOutputs. 772 // m_dirtyAudioNodeOutputs.
876 unsigned numberOfOutputs = node.numberOfOutputs(); 773 unsigned numberOfOutputs = node.numberOfOutputs();
877 for (unsigned i = 0; i < numberOfOutputs; ++i) 774 for (unsigned i = 0; i < numberOfOutputs; ++i)
878 m_dirtyAudioNodeOutputs.remove(node.output(i)); 775 m_dirtyAudioNodeOutputs.remove(node.output(i));
879 } 776 }
880 777
881 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n) 778 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio n)
882 { 779 {
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 // Call the offline rendering completion event listener. 893 // Call the offline rendering completion event listener.
997 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); 894 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
998 } 895 }
999 } 896 }
1000 897
1001 void AudioContext::trace(Visitor* visitor) 898 void AudioContext::trace(Visitor* visitor)
1002 { 899 {
1003 visitor->trace(m_renderTarget); 900 visitor->trace(m_renderTarget);
1004 visitor->trace(m_destinationNode); 901 visitor->trace(m_destinationNode);
1005 visitor->trace(m_listener); 902 visitor->trace(m_listener);
1006 #if ENABLE(OILPAN)
1007 visitor->trace(m_referencedNodes); 903 visitor->trace(m_referencedNodes);
1008 visitor->trace(m_liveNodes); 904 visitor->trace(m_liveNodes);
1009 visitor->trace(m_liveAudioSummingJunctions); 905 visitor->trace(m_liveAudioSummingJunctions);
1010 #endif
1011 EventTargetWithInlineData::trace(visitor); 906 EventTargetWithInlineData::trace(visitor);
1012 } 907 }
1013 908
1014 } // namespace blink 909 } // namespace blink
1015 910
1016 #endif // ENABLE(WEB_AUDIO) 911 #endif // ENABLE(WEB_AUDIO)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698