Chromium Code Reviews| Index: content/renderer/media/track_audio_renderer.cc |
| diff --git a/content/renderer/media/webrtc_local_audio_renderer.cc b/content/renderer/media/track_audio_renderer.cc |
| similarity index 39% |
| rename from content/renderer/media/webrtc_local_audio_renderer.cc |
| rename to content/renderer/media/track_audio_renderer.cc |
| index f39a017b94db441164f527fba3b8790aca8d46b0..a02f4c4ee9a8f19d639797e8395442f13a3b8307 100644 |
| --- a/content/renderer/media/webrtc_local_audio_renderer.cc |
| +++ b/content/renderer/media/track_audio_renderer.cc |
| @@ -2,9 +2,7 @@ |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| -#include "content/renderer/media/webrtc_local_audio_renderer.h" |
| - |
| -#include <utility> |
| +#include "content/renderer/media/track_audio_renderer.h" |
| #include "base/location.h" |
| #include "base/logging.h" |
| @@ -13,10 +11,7 @@ |
| #include "base/thread_task_runner_handle.h" |
| #include "base/trace_event/trace_event.h" |
| #include "content/renderer/media/audio_device_factory.h" |
| -#include "content/renderer/media/media_stream_dispatcher.h" |
| -#include "content/renderer/media/webrtc_audio_capturer.h" |
| -#include "content/renderer/media/webrtc_audio_renderer.h" |
| -#include "content/renderer/render_frame_impl.h" |
| +#include "content/renderer/media/media_stream_audio_track.h" |
| #include "media/audio/audio_output_device.h" |
| #include "media/base/audio_bus.h" |
| #include "media/base/audio_shifter.h" |
| @@ -31,95 +26,123 @@ enum LocalRendererSinkStates { |
| kSinkStatesMax // Must always be last! |
| }; |
| +// Translates |num_samples_rendered| into a TimeDelta duration and adds it to |
| +// |prior_elapsed_render_time|. |
| +base::TimeDelta ComputeTotalElapsedRenderTime( |
| + base::TimeDelta prior_elapsed_render_time, |
| + int64_t num_samples_rendered, |
| + int sample_rate) { |
| + return prior_elapsed_render_time + base::TimeDelta::FromMicroseconds( |
| + num_samples_rendered * base::Time::kMicrosecondsPerSecond / sample_rate); |
| +} |
| + |
| } // namespace |
| // media::AudioRendererSink::RenderCallback implementation |
| -int WebRtcLocalAudioRenderer::Render(media::AudioBus* audio_bus, |
| - uint32_t audio_delay_milliseconds, |
| - uint32_t frames_skipped) { |
| - TRACE_EVENT0("audio", "WebRtcLocalAudioRenderer::Render"); |
| +int TrackAudioRenderer::Render(media::AudioBus* audio_bus, |
| + uint32_t audio_delay_milliseconds, |
| + uint32_t frames_skipped) { |
| + TRACE_EVENT0("audio", "TrackAudioRenderer::Render"); |
| base::AutoLock auto_lock(thread_lock_); |
| - if (!playing_ || !volume_ || !audio_shifter_) { |
| + if (!audio_shifter_) { |
| audio_bus->Zero(); |
| return 0; |
| } |
| - audio_shifter_->Pull( |
| - audio_bus, |
| - base::TimeTicks::Now() - |
| - base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); |
| - |
| + // TODO(miu): Plumbing is needed to determine the actual playout timestamp |
| + // of the audio, instead of just snapshotting TimeTicks::Now(), for proper |
| + // audio/video sync. http://crbug.com/335335 |
| + const base::TimeTicks playout_time = |
| + base::TimeTicks::Now() + |
| + base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
| + DVLOG(2) << "Pulling audio out of shifter to be played " |
| + << audio_delay_milliseconds << " ms from now."; |
| + audio_shifter_->Pull(audio_bus, playout_time); |
| + num_samples_rendered_ += audio_bus->frames(); |
| return audio_bus->frames(); |
| } |
| -void WebRtcLocalAudioRenderer::OnRenderError() { |
| +void TrackAudioRenderer::OnRenderError() { |
| NOTIMPLEMENTED(); |
| } |
| // content::MediaStreamAudioSink implementation |
| -void WebRtcLocalAudioRenderer::OnData(const media::AudioBus& audio_bus, |
| - base::TimeTicks estimated_capture_time) { |
| - DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - DCHECK(!estimated_capture_time.is_null()); |
| +void TrackAudioRenderer::OnData(const media::AudioBus& audio_bus, |
| + base::TimeTicks reference_time) { |
| + DCHECK(audio_thread_checker_.CalledOnValidThread()); |
| + DCHECK(!reference_time.is_null()); |
| - TRACE_EVENT0("audio", "WebRtcLocalAudioRenderer::CaptureData"); |
| + TRACE_EVENT0("audio", "TrackAudioRenderer::CaptureData"); |
| base::AutoLock auto_lock(thread_lock_); |
| - if (!playing_ || !volume_ || !audio_shifter_) |
| + if (!audio_shifter_) |
| return; |
| scoped_ptr<media::AudioBus> audio_data( |
| media::AudioBus::Create(audio_bus.channels(), audio_bus.frames())); |
| audio_bus.CopyTo(audio_data.get()); |
| - audio_shifter_->Push(std::move(audio_data), estimated_capture_time); |
| - const base::TimeTicks now = base::TimeTicks::Now(); |
| - total_render_time_ += now - last_render_time_; |
| - last_render_time_ = now; |
| + // Note: For remote audio sources, |reference_time| is the local playout time, |
| + // the ideal point-in-time at which the first audio sample should be played |
| + // out in the future. For local sources, |reference_time| is the |
| + // point-in-time at which the first audio sample was captured in the past. In |
| + // either case, AudioShifter will auto-detect and do the right thing when |
| + // audio is pulled from it. |
| + audio_shifter_->Push(std::move(audio_data), reference_time); |
| } |
| -void WebRtcLocalAudioRenderer::OnSetFormat( |
| - const media::AudioParameters& params) { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::OnSetFormat()"; |
| +void TrackAudioRenderer::OnSetFormat(const media::AudioParameters& params) { |
| + DVLOG(1) << "TrackAudioRenderer::OnSetFormat()"; |
| // If the source is restarted, we might have changed to another capture |
| // thread. |
| - capture_thread_checker_.DetachFromThread(); |
| - DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| + audio_thread_checker_.DetachFromThread(); |
| + DCHECK(audio_thread_checker_.CalledOnValidThread()); |
| + |
| + // If the parameters changed, the audio in the AudioShifter is invalid and |
| + // should be dropped. |
| + { |
| + base::AutoLock auto_lock(thread_lock_); |
| + if (audio_shifter_ && |
| + (audio_shifter_->sample_rate() != params.sample_rate() || |
| + audio_shifter_->channels() != params.channels())) { |
| + HaltAudioFlowWhileLockHeld(); |
| + } |
| + } |
| // Post a task on the main render thread to reconfigure the |sink_| with the |
| // new format. |
| task_runner_->PostTask( |
| FROM_HERE, |
| - base::Bind(&WebRtcLocalAudioRenderer::ReconfigureSink, this, params)); |
| + base::Bind(&TrackAudioRenderer::ReconfigureSink, this, params)); |
| } |
| -// WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer implementation. |
| -WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer( |
| +TrackAudioRenderer::TrackAudioRenderer( |
| const blink::WebMediaStreamTrack& audio_track, |
| - int source_render_frame_id, |
| + int playout_render_frame_id, |
| int session_id, |
| const std::string& device_id, |
| const url::Origin& security_origin) |
| : audio_track_(audio_track), |
| - source_render_frame_id_(source_render_frame_id), |
| + playout_render_frame_id_(playout_render_frame_id), |
| session_id_(session_id), |
| task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| + num_samples_rendered_(0), |
| playing_(false), |
| output_device_id_(device_id), |
| security_origin_(security_origin), |
| volume_(0.0), |
| sink_started_(false) { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer()"; |
| + DVLOG(1) << "TrackAudioRenderer::TrackAudioRenderer()"; |
| } |
| -WebRtcLocalAudioRenderer::~WebRtcLocalAudioRenderer() { |
| +TrackAudioRenderer::~TrackAudioRenderer() { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| DCHECK(!sink_.get()); |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::~WebRtcLocalAudioRenderer()"; |
| + DVLOG(1) << "TrackAudioRenderer::~TrackAudioRenderer()"; |
| } |
| -void WebRtcLocalAudioRenderer::Start() { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::Start()"; |
| +void TrackAudioRenderer::Start() { |
| + DVLOG(1) << "TrackAudioRenderer::Start()"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| // We get audio data from |audio_track_|... |
| @@ -127,22 +150,26 @@ void WebRtcLocalAudioRenderer::Start() { |
| // ...and |sink_| will get audio data from us. |
| DCHECK(!sink_.get()); |
| sink_ = |
| - AudioDeviceFactory::NewOutputDevice(source_render_frame_id_, session_id_, |
| + AudioDeviceFactory::NewOutputDevice(playout_render_frame_id_, session_id_, |
| output_device_id_, security_origin_); |
| + sink_->SetVolume(volume_); |
| - base::AutoLock auto_lock(thread_lock_); |
| - last_render_time_ = base::TimeTicks::Now(); |
| playing_ = false; |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
shouldn't this be assumed to be false already? i.e
miu
2016/02/03 03:48:45
Good point. Done.
|
| + |
| + base::AutoLock auto_lock(thread_lock_); |
| + prior_elapsed_render_time_ = base::TimeDelta(); |
| + num_samples_rendered_ = 0; |
| } |
| -void WebRtcLocalAudioRenderer::Stop() { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::Stop()"; |
| +void TrackAudioRenderer::Stop() { |
| + DVLOG(1) << "TrackAudioRenderer::Stop()"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| + playing_ = false; |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
Call Pause()?
miu
2016/02/03 03:48:45
Done.
|
| + |
| { |
| base::AutoLock auto_lock(thread_lock_); |
| - playing_ = false; |
| - audio_shifter_.reset(); |
| + HaltAudioFlowWhileLockHeld(); |
| } |
| // Stop the output audio stream, i.e, stop asking for data to render. |
| @@ -153,7 +180,7 @@ void WebRtcLocalAudioRenderer::Stop() { |
| sink_ = NULL; |
| } |
| - if (!sink_started_) { |
| + if (!sink_started_ && IsLocalRenderer()) { |
| UMA_HISTOGRAM_ENUMERATION("Media.LocalRendererSinkStates", |
| kSinkNeverStarted, kSinkStatesMax); |
| } |
| @@ -163,83 +190,79 @@ void WebRtcLocalAudioRenderer::Stop() { |
| MediaStreamAudioSink::RemoveFromAudioTrack(this, audio_track_); |
| } |
| -void WebRtcLocalAudioRenderer::Play() { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::Play()"; |
| +void TrackAudioRenderer::Play() { |
| + DVLOG(1) << "TrackAudioRenderer::Play()"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| if (!sink_.get()) |
| return; |
| - { |
| - base::AutoLock auto_lock(thread_lock_); |
| - // Resumes rendering by ensuring that WebRtcLocalAudioRenderer::Render() |
| - // now reads data from the local FIFO. |
| - playing_ = true; |
| - last_render_time_ = base::TimeTicks::Now(); |
| - } |
| + playing_ = true; |
| - // Note: If volume_ is currently muted, the |sink_| will not be started yet. |
| MaybeStartSink(); |
| } |
| -void WebRtcLocalAudioRenderer::Pause() { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::Pause()"; |
| +void TrackAudioRenderer::Pause() { |
| + DVLOG(1) << "TrackAudioRenderer::Pause()"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| if (!sink_.get()) |
| return; |
| - base::AutoLock auto_lock(thread_lock_); |
| - // Temporarily suspends rendering audio. |
| - // WebRtcLocalAudioRenderer::Render() will return early during this state |
| - // and only zeros will be provided to the active sink. |
| playing_ = false; |
| + |
| + base::AutoLock auto_lock(thread_lock_); |
| + HaltAudioFlowWhileLockHeld(); |
| } |
| -void WebRtcLocalAudioRenderer::SetVolume(float volume) { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::SetVolume(" << volume << ")"; |
| +void TrackAudioRenderer::SetVolume(float volume) { |
| + DVLOG(1) << "TrackAudioRenderer::SetVolume(" << volume << ")"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| - { |
| - base::AutoLock auto_lock(thread_lock_); |
| - // Cache the volume. |
| - volume_ = volume; |
| - } |
| - |
| - // Lazily start the |sink_| when the local renderer is unmuted during |
| - // playing. |
| - MaybeStartSink(); |
| - |
| + // Cache the volume. |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
Maybe document why we do this instead?
miu
2016/02/03 03:48:45
Done.
|
| + volume_ = volume; |
| if (sink_.get()) |
| sink_->SetVolume(volume); |
| } |
| -media::OutputDevice* WebRtcLocalAudioRenderer::GetOutputDevice() { |
| +media::OutputDevice* TrackAudioRenderer::GetOutputDevice() { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| return this; |
| } |
| -base::TimeDelta WebRtcLocalAudioRenderer::GetCurrentRenderTime() const { |
| +base::TimeDelta TrackAudioRenderer::GetCurrentRenderTime() const { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| base::AutoLock auto_lock(thread_lock_); |
| - if (!sink_.get()) |
| - return base::TimeDelta(); |
| - return total_render_time(); |
| + if (source_params_.IsValid()) { |
| + return ComputeTotalElapsedRenderTime(prior_elapsed_render_time_, |
| + num_samples_rendered_, |
| + source_params_.sample_rate()); |
| + } |
| + return prior_elapsed_render_time_; |
| } |
| -bool WebRtcLocalAudioRenderer::IsLocalRenderer() const { |
| - return true; |
| +bool TrackAudioRenderer::IsLocalRenderer() const { |
| + DCHECK(task_runner_->BelongsToCurrentThread()); |
| + MediaStreamAudioTrack* const track = |
| + MediaStreamAudioTrack::GetTrack(audio_track_); |
| + CHECK(track); |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
nit: not needed due to the next line
miu
2016/02/03 03:48:45
Cleaned this up: I moved the null-check to the con
|
| + return track->is_local_track(); |
| } |
| -void WebRtcLocalAudioRenderer::SwitchOutputDevice( |
| +void TrackAudioRenderer::SwitchOutputDevice( |
| const std::string& device_id, |
| const url::Origin& security_origin, |
| const media::SwitchOutputDeviceCB& callback) { |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::SwitchOutputDevice()"; |
| + DVLOG(1) << "TrackAudioRenderer::SwitchOutputDevice()"; |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| + { |
| + base::AutoLock auto_lock(thread_lock_); |
| + HaltAudioFlowWhileLockHeld(); |
| + } |
| + |
| scoped_refptr<media::AudioOutputDevice> new_sink = |
| - AudioDeviceFactory::NewOutputDevice(source_render_frame_id_, session_id_, |
| + AudioDeviceFactory::NewOutputDevice(playout_render_frame_id_, session_id_, |
| device_id, security_origin); |
| if (new_sink->GetDeviceStatus() != media::OUTPUT_DEVICE_STATUS_OK) { |
| callback.Run(new_sink->GetDeviceStatus()); |
| @@ -255,26 +278,29 @@ void WebRtcLocalAudioRenderer::SwitchOutputDevice( |
| sink_started_ = false; |
| sink_ = new_sink; |
| - int frames_per_buffer = sink_->GetOutputParameters().frames_per_buffer(); |
| - sink_params_ = source_params_; |
| - sink_params_.set_frames_per_buffer(WebRtcAudioRenderer::GetOptimalBufferSize( |
| - source_params_.sample_rate(), frames_per_buffer)); |
| - |
| + sink_->SetVolume(volume_); |
| if (was_sink_started) |
| MaybeStartSink(); |
| callback.Run(media::OUTPUT_DEVICE_STATUS_OK); |
| } |
| -media::AudioParameters WebRtcLocalAudioRenderer::GetOutputParameters() { |
| +media::AudioParameters TrackAudioRenderer::GetOutputParameters() { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| - if (!sink_.get()) |
| + if (!sink_ || !source_params_.IsValid()) |
| return media::AudioParameters(); |
| - return sink_->GetOutputParameters(); |
| + // Output parameters consist of the same channel layout and sample rate as the |
| + // source, but having the buffer duration preferred by the hardware. |
| + const media::AudioParameters& preferred_params = sink_->GetOutputParameters(); |
| + return media::AudioParameters( |
| + preferred_params.format(), source_params_.channel_layout(), |
| + source_params_.sample_rate(), source_params_.bits_per_sample(), |
| + preferred_params.frames_per_buffer() * source_params_.sample_rate() / |
| + preferred_params.sample_rate()); |
| } |
| -media::OutputDeviceStatus WebRtcLocalAudioRenderer::GetDeviceStatus() { |
| +media::OutputDeviceStatus TrackAudioRenderer::GetDeviceStatus() { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| if (!sink_.get()) |
| return media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL; |
| @@ -282,75 +308,93 @@ media::OutputDeviceStatus WebRtcLocalAudioRenderer::GetDeviceStatus() { |
| return sink_->GetDeviceStatus(); |
| } |
| -void WebRtcLocalAudioRenderer::MaybeStartSink() { |
| +void TrackAudioRenderer::MaybeStartSink() { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::MaybeStartSink()"; |
| + DVLOG(1) << "TrackAudioRenderer::MaybeStartSink()"; |
| - if (!sink_.get() || !source_params_.IsValid()) |
| + if (!sink_.get() || !source_params_.IsValid() || !playing_) |
| return; |
| - { |
| - // Clear up the old data in the FIFO. |
| - base::AutoLock auto_lock(thread_lock_); |
| - audio_shifter_->Flush(); |
| - } |
| + // Re-create the AudioShifter to drop old audio data and reset to a starting |
| + // state. MaybeStartSink() is always called in a situation where either the |
| + // source or sink has changed somehow and so all of AudioShifter's internal |
| + // time-sync state is invalid. |
| + CreateAudioShifter(); |
| - if (!sink_params_.IsValid() || !playing_ || !volume_ || sink_started_ || |
| + if (sink_started_ || |
| sink_->GetDeviceStatus() != media::OUTPUT_DEVICE_STATUS_OK) |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
nit: add {}?
miu
2016/02/03 03:48:45
Done.
|
| return; |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::MaybeStartSink() -- Starting sink_."; |
| - sink_->Initialize(sink_params_, this); |
| + DVLOG(1) << ("TrackAudioRenderer::MaybeStartSink() -- Starting sink. " |
| + "source_params_={") |
| + << source_params_.AsHumanReadableString() << "}, sink parameters={" |
| + << GetOutputParameters().AsHumanReadableString() << '}'; |
| + sink_->Initialize(GetOutputParameters(), this); |
| sink_->Start(); |
| sink_started_ = true; |
| - UMA_HISTOGRAM_ENUMERATION("Media.LocalRendererSinkStates", |
| - kSinkStarted, kSinkStatesMax); |
| + if (IsLocalRenderer()) { |
| + UMA_HISTOGRAM_ENUMERATION("Media.LocalRendererSinkStates", kSinkStarted, |
| + kSinkStatesMax); |
| + } |
| } |
| -void WebRtcLocalAudioRenderer::ReconfigureSink( |
| - const media::AudioParameters& params) { |
| +void TrackAudioRenderer::ReconfigureSink(const media::AudioParameters& params) { |
| DCHECK(task_runner_->BelongsToCurrentThread()); |
| - DVLOG(1) << "WebRtcLocalAudioRenderer::ReconfigureSink()"; |
| + DVLOG(1) << "TrackAudioRenderer::ReconfigureSink()"; |
| if (source_params_.Equals(params)) |
| return; |
| - |
| - // Reset the |source_params_|, |sink_params_| and |loopback_fifo_| to match |
| - // the new format. |
| - |
| source_params_ = params; |
| - { |
| - // Note: The max buffer is fairly large, but will rarely be used. |
| - // Cast needs the buffer to hold at least one second of audio. |
| - // The clock accuracy is set to 20ms because clock accuracy is |
| - // ~15ms on windows. |
| - media::AudioShifter* const new_shifter = new media::AudioShifter( |
| - base::TimeDelta::FromSeconds(2), |
| - base::TimeDelta::FromMilliseconds(20), |
| - base::TimeDelta::FromSeconds(20), |
| - source_params_.sample_rate(), |
| - params.channels()); |
| - |
| - base::AutoLock auto_lock(thread_lock_); |
| - audio_shifter_.reset(new_shifter); |
| - } |
| if (!sink_.get()) |
| - return; // WebRtcLocalAudioRenderer has not yet been started. |
| + return; // TrackAudioRenderer has not yet been started. |
| // Stop |sink_| and re-create a new one to be initialized with different audio |
| // parameters. Then, invoke MaybeStartSink() to restart everything again. |
| sink_->Stop(); |
| sink_started_ = false; |
| sink_ = |
| - AudioDeviceFactory::NewOutputDevice(source_render_frame_id_, session_id_, |
| + AudioDeviceFactory::NewOutputDevice(playout_render_frame_id_, session_id_, |
| output_device_id_, security_origin_); |
| - int frames_per_buffer = sink_->GetOutputParameters().frames_per_buffer(); |
| - sink_params_ = source_params_; |
| - sink_params_.set_frames_per_buffer(WebRtcAudioRenderer::GetOptimalBufferSize( |
| - source_params_.sample_rate(), frames_per_buffer)); |
| + sink_->SetVolume(volume_); |
| MaybeStartSink(); |
| } |
| +void TrackAudioRenderer::CreateAudioShifter() { |
| + DCHECK(task_runner_->BelongsToCurrentThread()); |
| + |
| + // Note 1: The max buffer is fairly large to cover the case where |
| + // remotely-sourced audio is delivered well ahead of its scheduled playout |
| + // time (e.g., content streaming with a very large end-to-end |
| + // latency). However, there is no penalty for making it large in the |
| + // low-latency use cases since AudioShifter will discard data as soon as it is |
| + // no longer needed. |
| + // |
| + // Note 2: The clock accuracy is set to 20ms because clock accuracy is |
| + // ~15ms on Windows machines without a working high-resolution clock. See |
| + // comments in base/time/time.h for details. |
| + media::AudioShifter* const new_shifter = new media::AudioShifter( |
|
tommi (sloooow) - chröme
2016/02/01 20:32:24
thanks for keeping the scope of the lock down :)
miu
2016/02/03 03:48:45
Acknowledged.
|
| + base::TimeDelta::FromSeconds(5), base::TimeDelta::FromMilliseconds(20), |
| + base::TimeDelta::FromSeconds(20), source_params_.sample_rate(), |
| + source_params_.channels()); |
| + |
| + base::AutoLock auto_lock(thread_lock_); |
| + audio_shifter_.reset(new_shifter); |
| +} |
| + |
| +void TrackAudioRenderer::HaltAudioFlowWhileLockHeld() { |
| + thread_lock_.AssertAcquired(); |
| + |
| + audio_shifter_.reset(); |
| + |
| + if (source_params_.IsValid()) { |
| + prior_elapsed_render_time_ = |
| + ComputeTotalElapsedRenderTime(prior_elapsed_render_time_, |
| + num_samples_rendered_, |
| + source_params_.sample_rate()); |
| + num_samples_rendered_ = 0; |
| + } |
| +} |
| + |
| } // namespace content |