OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/macros.h" | 11 #include "base/macros.h" |
12 #include "base/metrics/histogram.h" | 12 #include "base/metrics/histogram.h" |
13 #include "base/strings/utf_string_conversions.h" | 13 #include "base/strings/utf_string_conversions.h" |
14 #include "base/time/time.h" | |
14 #include "base/trace_event/trace_event.h" | 15 #include "base/trace_event/trace_event.h" |
15 #include "base/win/scoped_propvariant.h" | 16 #include "base/win/scoped_propvariant.h" |
16 #include "media/audio/audio_device_description.h" | 17 #include "media/audio/audio_device_description.h" |
17 #include "media/audio/win/audio_manager_win.h" | 18 #include "media/audio/win/audio_manager_win.h" |
18 #include "media/audio/win/avrt_wrapper_win.h" | 19 #include "media/audio/win/avrt_wrapper_win.h" |
19 #include "media/audio/win/core_audio_util_win.h" | 20 #include "media/audio/win/core_audio_util_win.h" |
20 #include "media/base/limits.h" | 21 #include "media/base/limits.h" |
21 #include "media/base/media_switches.h" | 22 #include "media/base/media_switches.h" |
22 | 23 |
23 using base::win::ScopedComPtr; | 24 using base::win::ScopedComPtr; |
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
498 // Grab all available space in the rendering endpoint buffer | 499 // Grab all available space in the rendering endpoint buffer |
499 // into which the client can write a data packet. | 500 // into which the client can write a data packet. |
500 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 501 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
501 &audio_data); | 502 &audio_data); |
502 if (FAILED(hr)) { | 503 if (FAILED(hr)) { |
503 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 504 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
504 << std::hex << hr; | 505 << std::hex << hr; |
505 return false; | 506 return false; |
506 } | 507 } |
507 | 508 |
508 // Derive the audio delay which corresponds to the delay between | 509 // Find the time when the next sample written to the buffer is expected to |
509 // a render event and the time when the first audio sample in a | 510 // be played out through the speaker. |
510 // packet is played out through the speaker. This delay value | |
511 // can typically be utilized by an acoustic echo-control (AEC) | |
512 // unit at the render side. | |
513 UINT64 position = 0; | 511 UINT64 position = 0; |
514 uint32_t audio_delay_bytes = 0; | 512 UINT64 qps_position = 0; |
515 hr = audio_clock_->GetPosition(&position, NULL); | 513 base::TimeTicks target_playout_time; |
514 hr = audio_clock_->GetPosition(&position, &qps_position); | |
516 if (SUCCEEDED(hr)) { | 515 if (SUCCEEDED(hr)) { |
517 // Stream position of the sample that is currently playing | 516 // Number of frames already played out through the speaker. |
518 // through the speaker. | 517 const uint64_t played_out_frames = |
519 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 518 format_.Format.nSamplesPerSec * position / device_frequency; |
520 (static_cast<double>(position) / device_frequency); | |
521 | 519 |
522 // Stream position of the last sample written to the endpoint | 520 // Number of frames that have been written to the buffer but not yet |
523 // buffer. Note that, the packet we are about to receive in | 521 // played out. Note that the packet about to be received in the upcoming |
524 // the upcoming callback is also included. | 522 // callback is included. |
525 size_t pos_last_sample_written_frames = | 523 const uint64_t delay_frames = |
526 num_written_frames_ + packet_size_frames_; | 524 num_written_frames_ + packet_size_frames_ - played_out_frames; |
miu
2016/09/16 18:35:58
Is is correct to include the |packet_size_frames_|
jameswest
2016/09/16 21:59:43
I believe you're correct. I'll change it.
jameswest
2016/09/19 23:32:36
Done.
| |
527 | 525 |
528 // Derive the actual delay value which will be fed to the | 526 // Convert the delay from frames to time. |
529 // render client using the OnMoreData() callback. | 527 const base::TimeDelta delay = base::TimeDelta::FromMicroseconds( |
530 audio_delay_bytes = (pos_last_sample_written_frames - | 528 delay_frames * base::Time::kMicrosecondsPerSecond / |
531 pos_sample_playing_frames) * format_.Format.nBlockAlign; | 529 format_.Format.nSamplesPerSec); |
530 | |
531 target_playout_time = base::TimeTicks::FromQPSValue(qps_position) + delay; | |
532 } else { | |
533 target_playout_time = base::TimeTicks::Now(); | |
532 } | 534 } |
533 | 535 |
534 // Read a data packet from the registered client source and | 536 // Read a data packet from the registered client source and |
535 // deliver a delay estimate in the same callback to the client. | 537 // deliver a delay estimate in the same callback to the client. |
536 | 538 |
537 int frames_filled = | 539 int frames_filled = |
538 source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, 0); | 540 source_->OnMoreData(target_playout_time, 0, audio_bus_.get()); |
539 uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | 541 uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; |
540 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | 542 DCHECK_LE(num_filled_bytes, packet_size_bytes_); |
541 | 543 |
542 // Note: If this ever changes to output raw float the data must be | 544 // Note: If this ever changes to output raw float the data must be |
543 // clipped and sanitized since it may come from an untrusted | 545 // clipped and sanitized since it may come from an untrusted |
544 // source such as NaCl. | 546 // source such as NaCl. |
545 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 547 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
546 audio_bus_->Scale(volume_); | 548 audio_bus_->Scale(volume_); |
547 audio_bus_->ToInterleaved( | 549 audio_bus_->ToInterleaved( |
548 frames_filled, bytes_per_sample, audio_data); | 550 frames_filled, bytes_per_sample, audio_data); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
634 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; | 636 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; |
635 return hr; | 637 return hr; |
636 } | 638 } |
637 | 639 |
638 *endpoint_buffer_size = buffer_size_in_frames; | 640 *endpoint_buffer_size = buffer_size_in_frames; |
639 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; | 641 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; |
640 return hr; | 642 return hr; |
641 } | 643 } |
642 | 644 |
643 void WASAPIAudioOutputStream::StopThread() { | 645 void WASAPIAudioOutputStream::StopThread() { |
644 if (render_thread_ ) { | 646 if (render_thread_) { |
645 if (render_thread_->HasBeenStarted()) { | 647 if (render_thread_->HasBeenStarted()) { |
646 // Wait until the thread completes and perform cleanup. | 648 // Wait until the thread completes and perform cleanup. |
647 SetEvent(stop_render_event_.Get()); | 649 SetEvent(stop_render_event_.Get()); |
648 render_thread_->Join(); | 650 render_thread_->Join(); |
649 } | 651 } |
650 | 652 |
651 render_thread_.reset(); | 653 render_thread_.reset(); |
652 | 654 |
653 // Ensure that we don't quit the main thread loop immediately next | 655 // Ensure that we don't quit the main thread loop immediately next |
654 // time Start() is called. | 656 // time Start() is called. |
655 ResetEvent(stop_render_event_.Get()); | 657 ResetEvent(stop_render_event_.Get()); |
656 } | 658 } |
657 | 659 |
658 source_ = NULL; | 660 source_ = NULL; |
659 } | 661 } |
660 | 662 |
661 } // namespace media | 663 } // namespace media |
OLD | NEW |