Index: third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
diff --git a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
index ff692918aa84800f9e5bb8ab952f7a81b3382a8c..aa37c3f7a5b6b44d2738644269066acbddc4a414 100644 |
--- a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
+++ b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
@@ -29,13 +29,16 @@ |
#include "platform/audio/AudioDestination.h" |
#include <memory> |
+#include "platform/CrossThreadFunctional.h" |
#include "platform/Histogram.h" |
+#include "platform/WebTaskRunner.h" |
#include "platform/audio/AudioUtilities.h" |
#include "platform/audio/PushPullFIFO.h" |
#include "platform/weborigin/SecurityOrigin.h" |
#include "public/platform/Platform.h" |
#include "public/platform/WebAudioLatencyHint.h" |
#include "public/platform/WebSecurityOrigin.h" |
+#include "public/platform/WebThread.h" |
#include "wtf/PtrUtil.h" |
namespace blink { |
@@ -73,6 +76,10 @@ AudioDestination::AudioDestination(AudioIOCallback& callback, |
m_fifo( |
WTF::wrapUnique(new PushPullFIFO(numberOfOutputChannels, kFIFOSize))), |
m_framesElapsed(0) { |
+ // Create a thread |WebThread| for WebAudio graph rendering. |
+ m_renderingThread = WTF::wrapUnique( |
+ Platform::current()->createThread("WebAudio Rendering Thread")); |
+ |
// Create WebAudioDevice. blink::WebAudioDevice is designed to support the |
// local input (e.g. loopback from OS audio system), but Chromium's media |
// renderer does not support it currently. Thus, we use zero for the number |
@@ -106,6 +113,24 @@ void AudioDestination::render(const WebVector<float*>& destinationData, |
if (!m_fifo || m_fifo->length() < numberOfFrames) |
return; |
+ // Associate the destination data array with the output bus then fill the |
+ // FIFO. |
+ for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
+ m_outputBus->setChannelMemory(i, destinationData[i], numberOfFrames); |
+ |
+ m_renderingThread->getWebTaskRunner()->postTask( |
+ BLINK_FROM_HERE, |
+ crossThreadBind(&AudioDestination::requestRenderOnWebThread, |
+ crossThreadUnretained(this), numberOfFrames, delay, |
+ delayTimestamp, priorFramesSkipped)); |
o1ka
2017/03/29 09:05:11
See the comment below: I would redesign the fifo t
hongchan
2017/03/29 19:39:58
Hmm. This is a rather big change and different fro
|
+ |
+ m_fifo->pull(m_outputBus.get(), numberOfFrames); |
haraken
2017/03/29 07:56:24
Is it okay to pull the result before requestRender
hongchan
2017/03/29 19:39:58
Yes. Then the result of pulling is going to be sil
|
+} |
+ |
+void AudioDestination::requestRenderOnWebThread(size_t numberOfFrames, |
+ double delay, |
+ double delayTimestamp, |
+ size_t priorFramesSkipped) { |
m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); |
double outputPosition = |
m_framesElapsed / static_cast<double>(m_webAudioDevice->sampleRate()) - |
@@ -114,11 +139,6 @@ void AudioDestination::render(const WebVector<float*>& destinationData, |
m_outputPosition.timestamp = delayTimestamp; |
m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); |
o1ka
2017/03/29 09:05:11
Have you considered encapsulating all these Web-th
hongchan
2017/03/29 19:39:58
I tried to have a clear separation between render(
|
- // Associate the destination data array with the output bus then fill the |
- // FIFO. |
- for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
- m_outputBus->setChannelMemory(i, destinationData[i], numberOfFrames); |
- |
// Number of frames to render via WebAudio graph. |framesToRender > 0| means |
// the frames in FIFO is not enough to fulfill the requested frames from the |
// audio device. |
@@ -150,8 +170,6 @@ void AudioDestination::render(const WebVector<float*>& destinationData, |
m_fifo->push(m_renderBus.get()); |
} |
- m_fifo->pull(m_outputBus.get(), numberOfFrames); |
- |
m_framesElapsed += numberOfFrames; |
} |