Index: media/audio/win/audio_low_latency_output_win.cc |
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc |
index 9fa035d80ba6aec06859dfcea5c2f04202e0e1cd..c51548a4776f9d57704522236c9fe17b031c134d 100644 |
--- a/media/audio/win/audio_low_latency_output_win.cc |
+++ b/media/audio/win/audio_low_latency_output_win.cc |
@@ -11,6 +11,7 @@ |
#include "base/macros.h" |
#include "base/metrics/histogram.h" |
#include "base/strings/utf_string_conversions.h" |
+#include "base/time/time.h" |
#include "base/trace_event/trace_event.h" |
#include "base/win/scoped_propvariant.h" |
#include "media/audio/audio_device_description.h" |
@@ -505,37 +506,38 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { |
return false; |
} |
- // Derive the audio delay which corresponds to the delay between |
- // a render event and the time when the first audio sample in a |
- // packet is played out through the speaker. This delay value |
- // can typically be utilized by an acoustic echo-control (AEC) |
- // unit at the render side. |
+ // Find the time when the next sample written to the buffer is expected to |
+ // be played out through the speaker. |
UINT64 position = 0; |
- uint32_t audio_delay_bytes = 0; |
- hr = audio_clock_->GetPosition(&position, NULL); |
+ UINT64 qps_position = 0; |
+ base::TimeTicks target_playout_time; |
+ hr = audio_clock_->GetPosition(&position, &qps_position); |
if (SUCCEEDED(hr)) { |
- // Stream position of the sample that is currently playing |
- // through the speaker. |
- double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
- (static_cast<double>(position) / device_frequency); |
- |
- // Stream position of the last sample written to the endpoint |
- // buffer. Note that, the packet we are about to receive in |
- // the upcoming callback is also included. |
- size_t pos_last_sample_written_frames = |
- num_written_frames_ + packet_size_frames_; |
- |
- // Derive the actual delay value which will be fed to the |
- // render client using the OnMoreData() callback. |
- audio_delay_bytes = (pos_last_sample_written_frames - |
- pos_sample_playing_frames) * format_.Format.nBlockAlign; |
+ // Number of frames already played out through the speaker. |
+ const uint64_t played_out_frames = |
+ format_.Format.nSamplesPerSec * position / device_frequency; |
+ |
+ // Number of frames that have been written to the buffer but not yet |
+ // played out. Note that the packet about to be received in the upcoming |
+ // callback is included. |
+ const uint64_t delay_frames = |
+ num_written_frames_ + packet_size_frames_ - played_out_frames; |
miu
2016/09/16 18:35:58
Is is correct to include the |packet_size_frames_|
jameswest
2016/09/16 21:59:43
I believe you're correct. I'll change it.
jameswest
2016/09/19 23:32:36
Done.
|
+ |
+ // Convert the delay from frames to time. |
+ const base::TimeDelta delay = base::TimeDelta::FromMicroseconds( |
+ delay_frames * base::Time::kMicrosecondsPerSecond / |
+ format_.Format.nSamplesPerSec); |
+ |
+ target_playout_time = base::TimeTicks::FromQPSValue(qps_position) + delay; |
+ } else { |
+ target_playout_time = base::TimeTicks::Now(); |
} |
// Read a data packet from the registered client source and |
// deliver a delay estimate in the same callback to the client. |
int frames_filled = |
- source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, 0); |
+ source_->OnMoreData(target_playout_time, 0, audio_bus_.get()); |
uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; |
DCHECK_LE(num_filled_bytes, packet_size_bytes_); |
@@ -641,7 +643,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
} |
void WASAPIAudioOutputStream::StopThread() { |
- if (render_thread_ ) { |
+ if (render_thread_) { |
if (render_thread_->HasBeenStarted()) { |
// Wait until the thread completes and perform cleanup. |
SetEvent(stop_render_event_.Get()); |