Chromium Code Reviews| Index: third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
| diff --git a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
| index ff053c2dd65a801f89c79d8f8588ba8f724af20e..176d7efe1f60fabaeed3aaf10d94489e89832808 100644 |
| --- a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
| +++ b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
| @@ -29,7 +29,9 @@ |
| #include "platform/audio/AudioDestination.h" |
| #include <memory> |
| +#include "platform/CrossThreadFunctional.h" |
| #include "platform/Histogram.h" |
| +#include "platform/WebTaskRunner.h" |
| #include "platform/audio/AudioUtilities.h" |
| #include "platform/audio/PushPullFIFO.h" |
| #include "platform/weborigin/SecurityOrigin.h" |
| @@ -37,6 +39,7 @@ |
| #include "public/platform/Platform.h" |
| #include "public/platform/WebAudioLatencyHint.h" |
| #include "public/platform/WebSecurityOrigin.h" |
| +#include "public/platform/WebThread.h" |
| namespace blink { |
| @@ -72,6 +75,8 @@ AudioDestination::AudioDestination(AudioIOCallback& callback, |
| AudioUtilities::kRenderQuantumFrames)), |
| fifo_(WTF::WrapUnique( |
| new PushPullFIFO(number_of_output_channels, kFIFOSize))), |
| + rendering_thread_(WTF::WrapUnique( |
| + Platform::Current()->CreateThread("WebAudio Rendering Thread"))), |
| frames_elapsed_(0) { |
| // Create WebAudioDevice. blink::WebAudioDevice is designed to support the |
| // local input (e.g. loopback from OS audio system), but Chromium's media |
| @@ -97,6 +102,9 @@ void AudioDestination::Render(const WebVector<float*>& destination_data, |
| double delay, |
| double delay_timestamp, |
| size_t prior_frames_skipped) { |
| + // This method is called by AudioDeviceThread. |
| + DCHECK(!IsRenderingThread()); |
| + |
| CHECK_EQ(destination_data.size(), number_of_output_channels_); |
| CHECK_EQ(number_of_frames, callback_buffer_size_); |
| @@ -106,6 +114,40 @@ void AudioDestination::Render(const WebVector<float*>& destination_data, |
| if (!fifo_ || fifo_->length() < number_of_frames) |
| return; |
| + // Associate the destination data array with the output bus then fill the |
| + // FIFO. |
| + for (unsigned i = 0; i < number_of_output_channels_; ++i) |
| + output_bus_->SetChannelMemory(i, destination_data[i], number_of_frames); |
| + |
| + fifo_->Pull(output_bus_.Get(), number_of_frames, |
| + Bind(&AudioDestination::RequestRender, WTF::Unretained(this), |
|
hongchan
2017/04/14 16:31:48
@nhiroki @haraken Please advise this binding looks
haraken
2017/04/14 19:01:43
I'm just curious but what are you concerned about?
hongchan
2017/04/14 20:46:29
I am not really familiar with Bind and Unretained/
nhiroki
2017/04/17 03:27:30
This Bind/Unretained usage would be correct as fol
hongchan
2017/04/17 16:03:29
Thank you for the clarification!
|
| + delay, delay_timestamp, prior_frames_skipped)); |
| +} |
| + |
| +void AudioDestination::RequestRender(double delay, |
| + double delay_timestamp, |
| + size_t prior_frames_skipped, |
| + size_t frames_requested, |
| + size_t frames_to_render) { |
| + // This method is called by AudioDeviceThread. |
| + DCHECK(!IsRenderingThread()); |
| + |
| + rendering_thread_->GetWebTaskRunner()->PostTask( |
| + BLINK_FROM_HERE, |
| + CrossThreadBind(&AudioDestination::RequestRenderOnWebThread, |
| + CrossThreadUnretained(this), |
| + frames_requested, frames_to_render, |
| + delay, delay_timestamp, prior_frames_skipped)); |
| +} |
| + |
| +void AudioDestination::RequestRenderOnWebThread(size_t frames_requested, |
| + size_t frames_to_render, |
| + double delay, |
| + double delay_timestamp, |
| + size_t prior_frames_skipped) { |
| + // This method is called by WebThread. |
| + DCHECK(IsRenderingThread()); |
| + |
| frames_elapsed_ -= std::min(frames_elapsed_, prior_frames_skipped); |
| double output_position = |
| frames_elapsed_ / static_cast<double>(web_audio_device_->SampleRate()) - |
| @@ -114,18 +156,6 @@ void AudioDestination::Render(const WebVector<float*>& destination_data, |
| output_position_.timestamp = delay_timestamp; |
| output_position_received_timestamp_ = base::TimeTicks::Now(); |
| - // Associate the destination data array with the output bus then fill the |
| - // FIFO. |
| - for (unsigned i = 0; i < number_of_output_channels_; ++i) |
| - output_bus_->SetChannelMemory(i, destination_data[i], number_of_frames); |
| - |
| - // Number of frames to render via WebAudio graph. |framesToRender > 0| means |
| - // the frames in FIFO is not enough to fulfill the requested frames from the |
| - // audio device. |
| - size_t frames_to_render = number_of_frames > fifo_->FramesAvailable() |
| - ? number_of_frames - fifo_->FramesAvailable() |
| - : 0; |
| - |
| for (size_t pushed_frames = 0; pushed_frames < frames_to_render; |
| pushed_frames += AudioUtilities::kRenderQuantumFrames) { |
| // If platform buffer is more than two times longer than |framesToProcess| |
| @@ -150,9 +180,7 @@ void AudioDestination::Render(const WebVector<float*>& destination_data, |
| fifo_->Push(render_bus_.Get()); |
| } |
| - fifo_->Pull(output_bus_.Get(), number_of_frames); |
| - |
| - frames_elapsed_ += number_of_frames; |
| + frames_elapsed_ += frames_requested; |
| } |
| void AudioDestination::Start() { |
| @@ -204,4 +232,9 @@ bool AudioDestination::CheckBufferSize() { |
| return is_buffer_size_valid; |
| } |
| +bool AudioDestination::IsRenderingThread() { |
| + return static_cast<ThreadIdentifier>(rendering_thread_->ThreadId()) == |
| + CurrentThread(); |
| +} |
| + |
| } // namespace blink |