Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(38)

Side by Side Diff: Source/modules/webaudio/AudioContext.cpp

Issue 170603003: Use nullptr_t for RefPtr, PassRefPtr and RawPtr. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Final rebase Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
89 const unsigned MaxHardwareContexts = 6; 89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0; 90 unsigned AudioContext::s_hardwareContextCount = 0;
91 91
92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState & exceptionState) 92 PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState & exceptionState)
93 { 93 {
94 ASSERT(isMainThread()); 94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) { 95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException( 96 exceptionState.throwDOMException(
97 SyntaxError, 97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ")."); 98 "number of hardware contexts reached maximum (" + String::number(Max HardwareContexts) + ").");
99 return 0; 99 return nullptr;
100 } 100 }
101 101
102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document))); 102 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
103 audioContext->suspendIfNeeded(); 103 audioContext->suspendIfNeeded();
104 return audioContext.release(); 104 return audioContext.release();
105 } 105 }
106 106
107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numbe rOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionS tate) 107 PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numbe rOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionS tate)
108 { 108 {
109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead"); 109 document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState); 110 return OfflineAudioContext::create(&document, numberOfChannels, numberOfFram es, sampleRate, exceptionState);
111 } 111 }
112 112
113 // Constructor for rendering to the audio hardware. 113 // Constructor for rendering to the audio hardware.
114 AudioContext::AudioContext(Document* document) 114 AudioContext::AudioContext(Document* document)
115 : ActiveDOMObject(document) 115 : ActiveDOMObject(document)
116 , m_isStopScheduled(false) 116 , m_isStopScheduled(false)
117 , m_isInitialized(false) 117 , m_isInitialized(false)
118 , m_isAudioThreadFinished(false) 118 , m_isAudioThreadFinished(false)
119 , m_destinationNode(0) 119 , m_destinationNode(nullptr)
120 , m_isDeletionScheduled(false) 120 , m_isDeletionScheduled(false)
121 , m_automaticPullNodesNeedUpdating(false) 121 , m_automaticPullNodesNeedUpdating(false)
122 , m_connectionCount(0) 122 , m_connectionCount(0)
123 , m_audioThread(0) 123 , m_audioThread(0)
124 , m_graphOwnerThread(UndefinedThreadIdentifier) 124 , m_graphOwnerThread(UndefinedThreadIdentifier)
125 , m_isOfflineContext(false) 125 , m_isOfflineContext(false)
126 , m_activeSourceCount(0) 126 , m_activeSourceCount(0)
127 { 127 {
128 constructCommon(); 128 constructCommon();
129 129
130 m_destinationNode = DefaultAudioDestinationNode::create(this); 130 m_destinationNode = DefaultAudioDestinationNode::create(this);
131 131
132 // This sets in motion an asynchronous loading mechanism on another thread. 132 // This sets in motion an asynchronous loading mechanism on another thread.
133 // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded. 133 // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
134 // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph 134 // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
135 // when this has finished (see AudioDestinationNode). 135 // when this has finished (see AudioDestinationNode).
136 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNece ssary(sampleRate()); 136 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNece ssary(sampleRate());
137 } 137 }
138 138
139 // Constructor for offline (non-realtime) rendering. 139 // Constructor for offline (non-realtime) rendering.
140 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) 140 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
141 : ActiveDOMObject(document) 141 : ActiveDOMObject(document)
142 , m_isStopScheduled(false) 142 , m_isStopScheduled(false)
143 , m_isInitialized(false) 143 , m_isInitialized(false)
144 , m_isAudioThreadFinished(false) 144 , m_isAudioThreadFinished(false)
145 , m_destinationNode(0) 145 , m_destinationNode(nullptr)
146 , m_automaticPullNodesNeedUpdating(false) 146 , m_automaticPullNodesNeedUpdating(false)
147 , m_connectionCount(0) 147 , m_connectionCount(0)
148 , m_audioThread(0) 148 , m_audioThread(0)
149 , m_graphOwnerThread(UndefinedThreadIdentifier) 149 , m_graphOwnerThread(UndefinedThreadIdentifier)
150 , m_isOfflineContext(true) 150 , m_isOfflineContext(true)
151 , m_activeSourceCount(0) 151 , m_activeSourceCount(0)
152 { 152 {
153 constructCommon(); 153 constructCommon();
154 154
155 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNece ssary(sampleRate); 155 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNece ssary(sampleRate);
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
313 exceptionState.throwDOMException( 313 exceptionState.throwDOMException(
314 NotSupportedError, 314 NotSupportedError,
315 "number of frames must be greater than 0."); 315 "number of frames must be greater than 0.");
316 } else { 316 } else {
317 exceptionState.throwDOMException( 317 exceptionState.throwDOMException(
318 NotSupportedError, 318 NotSupportedError,
319 "unable to create buffer of " + String::number(numberOfChannels) 319 "unable to create buffer of " + String::number(numberOfChannels)
320 + " channel(s) of " + String::number(numberOfFrames) 320 + " channel(s) of " + String::number(numberOfFrames)
321 + " frames each."); 321 + " frames each.");
322 } 322 }
323 return 0; 323 return nullptr;
324 } 324 }
325 325
326 return audioBuffer; 326 return audioBuffer;
327 } 327 }
328 328
329 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo l mixToMono, ExceptionState& exceptionState) 329 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo l mixToMono, ExceptionState& exceptionState)
330 { 330 {
331 ASSERT(arrayBuffer); 331 ASSERT(arrayBuffer);
332 if (!arrayBuffer) { 332 if (!arrayBuffer) {
333 exceptionState.throwDOMException( 333 exceptionState.throwDOMException(
334 SyntaxError, 334 SyntaxError,
335 "invalid ArrayBuffer."); 335 "invalid ArrayBuffer.");
336 return 0; 336 return nullptr;
337 } 337 }
338 338
339 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); 339 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(array Buffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
340 if (!audioBuffer.get()) { 340 if (!audioBuffer.get()) {
341 exceptionState.throwDOMException( 341 exceptionState.throwDOMException(
342 SyntaxError, 342 SyntaxError,
343 "invalid audio data in ArrayBuffer."); 343 "invalid audio data in ArrayBuffer.");
344 return 0; 344 return nullptr;
345 } 345 }
346 346
347 return audioBuffer; 347 return audioBuffer;
348 } 348 }
349 349
350 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState) 350 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe rCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, Excep tionState& exceptionState)
351 { 351 {
352 if (!audioData) { 352 if (!audioData) {
353 exceptionState.throwDOMException( 353 exceptionState.throwDOMException(
354 SyntaxError, 354 SyntaxError,
(...skipping 15 matching lines...) Expand all
370 370
371 return node; 371 return node;
372 } 372 }
373 373
374 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H TMLMediaElement* mediaElement, ExceptionState& exceptionState) 374 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H TMLMediaElement* mediaElement, ExceptionState& exceptionState)
375 { 375 {
376 if (!mediaElement) { 376 if (!mediaElement) {
377 exceptionState.throwDOMException( 377 exceptionState.throwDOMException(
378 InvalidStateError, 378 InvalidStateError,
379 "invalid HTMLMedialElement."); 379 "invalid HTMLMedialElement.");
380 return 0; 380 return nullptr;
381 } 381 }
382 382
383 ASSERT(isMainThread()); 383 ASSERT(isMainThread());
384 lazyInitialize(); 384 lazyInitialize();
385 385
386 // First check if this media element already has a source node. 386 // First check if this media element already has a source node.
387 if (mediaElement->audioSourceNode()) { 387 if (mediaElement->audioSourceNode()) {
388 exceptionState.throwDOMException( 388 exceptionState.throwDOMException(
389 InvalidStateError, 389 InvalidStateError,
390 "invalid HTMLMediaElement."); 390 "invalid HTMLMediaElement.");
391 return 0; 391 return nullptr;
392 } 392 }
393 393
394 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea te(this, mediaElement); 394 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::crea te(this, mediaElement);
395 395
396 mediaElement->setAudioSourceNode(node.get()); 396 mediaElement->setAudioSourceNode(node.get());
397 397
398 refNode(node.get()); // context keeps reference until node is disconnected 398 refNode(node.get()); // context keeps reference until node is disconnected
399 return node; 399 return node;
400 } 400 }
401 401
402 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med iaStream* mediaStream, ExceptionState& exceptionState) 402 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med iaStream* mediaStream, ExceptionState& exceptionState)
403 { 403 {
404 if (!mediaStream) { 404 if (!mediaStream) {
405 exceptionState.throwDOMException( 405 exceptionState.throwDOMException(
406 InvalidStateError, 406 InvalidStateError,
407 "invalid MediaStream source"); 407 "invalid MediaStream source");
408 return 0; 408 return nullptr;
409 } 409 }
410 410
411 ASSERT(isMainThread()); 411 ASSERT(isMainThread());
412 lazyInitialize(); 412 lazyInitialize();
413 413
414 AudioSourceProvider* provider = 0; 414 AudioSourceProvider* provider = 0;
415 415
416 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks(); 416 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
417 RefPtr<MediaStreamTrack> audioTrack; 417 RefPtr<MediaStreamTrack> audioTrack;
418 418
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
481 IndexSizeError, 481 IndexSizeError,
482 "number of output channels (" + String::number(numberOfInputChan nels) 482 "number of output channels (" + String::number(numberOfInputChan nels)
483 + ") exceeds maximum (" 483 + ") exceeds maximum ("
484 + String::number(AudioContext::maxNumberOfChannels()) + ")."); 484 + String::number(AudioContext::maxNumberOfChannels()) + ").");
485 } else { 485 } else {
486 exceptionState.throwDOMException( 486 exceptionState.throwDOMException(
487 IndexSizeError, 487 IndexSizeError,
488 "buffer size (" + String::number(bufferSize) 488 "buffer size (" + String::number(bufferSize)
489 + ") must be a power of two between 256 and 16384."); 489 + ") must be a power of two between 256 and 16384.");
490 } 490 }
491 return 0; 491 return nullptr;
492 } 492 }
493 493
494 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks 494 refNode(node.get()); // context keeps reference until we stop making javascr ipt rendering callbacks
495 return node; 495 return node;
496 } 496 }
497 497
498 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() 498 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
499 { 499 {
500 ASSERT(isMainThread()); 500 ASSERT(isMainThread());
501 lazyInitialize(); 501 lazyInitialize();
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
549 const double defaultMaxDelayTime = 1; 549 const double defaultMaxDelayTime = 1;
550 return createDelay(defaultMaxDelayTime, exceptionState); 550 return createDelay(defaultMaxDelayTime, exceptionState);
551 } 551 }
552 552
553 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt ate& exceptionState) 553 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionSt ate& exceptionState)
554 { 554 {
555 ASSERT(isMainThread()); 555 ASSERT(isMainThread());
556 lazyInitialize(); 556 lazyInitialize();
557 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa te(), maxDelayTime, exceptionState); 557 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRa te(), maxDelayTime, exceptionState);
558 if (exceptionState.hadException()) 558 if (exceptionState.hadException())
559 return 0; 559 return nullptr;
560 return node; 560 return node;
561 } 561 }
562 562
563 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta te& exceptionState) 563 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionSta te& exceptionState)
564 { 564 {
565 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; 565 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
566 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState); 566 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptio nState);
567 } 567 }
568 568
569 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe rOfOutputs, ExceptionState& exceptionState) 569 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe rOfOutputs, ExceptionState& exceptionState)
570 { 570 {
571 ASSERT(isMainThread()); 571 ASSERT(isMainThread());
572 lazyInitialize(); 572 lazyInitialize();
573 573
574 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti nationNode->sampleRate(), numberOfOutputs); 574 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_desti nationNode->sampleRate(), numberOfOutputs);
575 575
576 if (!node.get()) { 576 if (!node.get()) {
577 exceptionState.throwDOMException( 577 exceptionState.throwDOMException(
578 IndexSizeError, 578 IndexSizeError,
579 "number of outputs (" + String::number(numberOfOutputs) 579 "number of outputs (" + String::number(numberOfOutputs)
580 + ") must be between 1 and " 580 + ") must be between 1 and "
581 + String::number(AudioContext::maxNumberOfChannels()) + "."); 581 + String::number(AudioContext::maxNumberOfChannels()) + ".");
582 return 0; 582 return nullptr;
583 } 583 }
584 584
585 return node; 585 return node;
586 } 586 }
587 587
588 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState) 588 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
589 { 589 {
590 const unsigned ChannelMergerDefaultNumberOfInputs = 6; 590 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
591 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e); 591 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionStat e);
592 } 592 }
593 593
594 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI nputs, ExceptionState& exceptionState) 594 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI nputs, ExceptionState& exceptionState)
595 { 595 {
596 ASSERT(isMainThread()); 596 ASSERT(isMainThread());
597 lazyInitialize(); 597 lazyInitialize();
598 598
599 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati onNode->sampleRate(), numberOfInputs); 599 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinati onNode->sampleRate(), numberOfInputs);
600 600
601 if (!node.get()) { 601 if (!node.get()) {
602 exceptionState.throwDOMException( 602 exceptionState.throwDOMException(
603 IndexSizeError, 603 IndexSizeError,
604 "number of inputs (" + String::number(numberOfInputs) 604 "number of inputs (" + String::number(numberOfInputs)
605 + ") must be between 1 and " 605 + ") must be between 1 and "
606 + String::number(AudioContext::maxNumberOfChannels()) + "."); 606 + String::number(AudioContext::maxNumberOfChannels()) + ".");
607 return 0; 607 return nullptr;
608 } 608 }
609 609
610 return node; 610 return node;
611 } 611 }
612 612
613 PassRefPtr<OscillatorNode> AudioContext::createOscillator() 613 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
614 { 614 {
615 ASSERT(isMainThread()); 615 ASSERT(isMainThread());
616 lazyInitialize(); 616 lazyInitialize();
617 617
618 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode ->sampleRate()); 618 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode ->sampleRate());
619 619
620 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing. 620 // Because this is an AudioScheduledSourceNode, the context keeps a referenc e until it has finished playing.
621 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing(). 621 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext: :notifyNodeFinishedProcessing().
622 refNode(node.get()); 622 refNode(node.get());
623 623
624 return node; 624 return node;
625 } 625 }
626 626
627 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl oat32Array* imag, ExceptionState& exceptionState) 627 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl oat32Array* imag, ExceptionState& exceptionState)
628 { 628 {
629 ASSERT(isMainThread()); 629 ASSERT(isMainThread());
630 630
631 if (!real) { 631 if (!real) {
632 exceptionState.throwDOMException( 632 exceptionState.throwDOMException(
633 SyntaxError, 633 SyntaxError,
634 "invalid real array"); 634 "invalid real array");
635 return 0; 635 return nullptr;
636 } 636 }
637 637
638 if (!imag) { 638 if (!imag) {
639 exceptionState.throwDOMException( 639 exceptionState.throwDOMException(
640 SyntaxError, 640 SyntaxError,
641 "invalid imaginary array"); 641 "invalid imaginary array");
642 return 0; 642 return nullptr;
643 } 643 }
644 644
645 if (real->length() != imag->length()) { 645 if (real->length() != imag->length()) {
646 exceptionState.throwDOMException( 646 exceptionState.throwDOMException(
647 IndexSizeError, 647 IndexSizeError,
648 "length of real array (" + String::number(real->length()) 648 "length of real array (" + String::number(real->length())
649 + ") and length of imaginary array (" + String::number(imag->length ()) 649 + ") and length of imaginary array (" + String::number(imag->length ())
650 + ") must match."); 650 + ") must match.");
651 return 0; 651 return nullptr;
652 } 652 }
653 653
654 if (real->length() > 4096) { 654 if (real->length() > 4096) {
655 exceptionState.throwDOMException( 655 exceptionState.throwDOMException(
656 IndexSizeError, 656 IndexSizeError,
657 "length of real array (" + String::number(real->length()) 657 "length of real array (" + String::number(real->length())
658 + ") exceeds allowed maximum of 4096"); 658 + ") exceeds allowed maximum of 4096");
659 return 0; 659 return nullptr;
660 } 660 }
661 661
662 if (imag->length() > 4096) { 662 if (imag->length() > 4096) {
663 exceptionState.throwDOMException( 663 exceptionState.throwDOMException(
664 IndexSizeError, 664 IndexSizeError,
665 "length of imaginary array (" + String::number(imag->length()) 665 "length of imaginary array (" + String::number(imag->length())
666 + ") exceeds allowed maximum of 4096"); 666 + ") exceeds allowed maximum of 4096");
667 return 0; 667 return nullptr;
668 } 668 }
669 669
670 lazyInitialize(); 670 lazyInitialize();
671 return PeriodicWave::create(sampleRate(), real, imag); 671 return PeriodicWave::create(sampleRate(), real, imag);
672 } 672 }
673 673
674 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) 674 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
675 { 675 {
676 ASSERT(isAudioThread()); 676 ASSERT(isAudioThread());
677 m_finishedNodes.append(node); 677 m_finishedNodes.append(node);
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
1057 } 1057 }
1058 1058
1059 void AudioContext::decrementActiveSourceCount() 1059 void AudioContext::decrementActiveSourceCount()
1060 { 1060 {
1061 atomicDecrement(&m_activeSourceCount); 1061 atomicDecrement(&m_activeSourceCount);
1062 } 1062 }
1063 1063
1064 } // namespace WebCore 1064 } // namespace WebCore
1065 1065
1066 #endif // ENABLE(WEB_AUDIO) 1066 #endif // ENABLE(WEB_AUDIO)
OLDNEW
« no previous file with comments | « Source/modules/webaudio/AudioBufferSourceNode.cpp ('k') | Source/modules/webaudio/AudioParamTimeline.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698