Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 64 opened_(false), | 64 opened_(false), |
| 65 volume_(1.0), | 65 volume_(1.0), |
| 66 packet_size_frames_(0), | 66 packet_size_frames_(0), |
| 67 packet_size_bytes_(0), | 67 packet_size_bytes_(0), |
| 68 endpoint_buffer_size_frames_(0), | 68 endpoint_buffer_size_frames_(0), |
| 69 device_id_(device_id), | 69 device_id_(device_id), |
| 70 device_role_(device_role), | 70 device_role_(device_role), |
| 71 share_mode_(GetShareMode()), | 71 share_mode_(GetShareMode()), |
| 72 num_written_frames_(0), | 72 num_written_frames_(0), |
| 73 source_(NULL), | 73 source_(NULL), |
| 74 hns_units_to_perf_count_(0.0), | |
| 74 audio_bus_(AudioBus::Create(params)) { | 75 audio_bus_(AudioBus::Create(params)) { |
| 75 DCHECK(manager_); | 76 DCHECK(manager_); |
| 76 | 77 |
| 77 // The empty string is used to indicate a default device and the | 78 // The empty string is used to indicate a default device and the |
| 78 // |device_role_| member controls whether that's the default or default | 79 // |device_role_| member controls whether that's the default or default |
| 79 // communications device. | 80 // communications device. |
| 80 DCHECK_NE(device_id_, AudioDeviceDescription::kDefaultDeviceId); | 81 DCHECK_NE(device_id_, AudioDeviceDescription::kDefaultDeviceId); |
| 81 DCHECK_NE(device_id_, AudioDeviceDescription::kCommunicationsDeviceId); | 82 DCHECK_NE(device_id_, AudioDeviceDescription::kCommunicationsDeviceId); |
| 82 | 83 |
| 83 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; | 84 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 120 // All events are auto-reset events and non-signaled initially. | 121 // All events are auto-reset events and non-signaled initially. |
| 121 | 122 |
| 122 // Create the event which the audio engine will signal each time | 123 // Create the event which the audio engine will signal each time |
| 123 // a buffer becomes ready to be processed by the client. | 124 // a buffer becomes ready to be processed by the client. |
| 124 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 125 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 125 DCHECK(audio_samples_render_event_.IsValid()); | 126 DCHECK(audio_samples_render_event_.IsValid()); |
| 126 | 127 |
| 127 // Create the event which will be set in Stop() when capturing shall stop. | 128 // Create the event which will be set in Stop() when capturing shall stop. |
| 128 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 129 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 129 DCHECK(stop_render_event_.IsValid()); | 130 DCHECK(stop_render_event_.IsValid()); |
| 131 | |
| 132 LARGE_INTEGER performance_frequency; | |
| 133 if (QueryPerformanceFrequency(&performance_frequency)) { | |
| 134 hns_units_to_perf_count_ = | |
| 135 (static_cast<double>(performance_frequency.QuadPart) / 10000000.0); | |
| 136 } else { | |
| 137 DLOG(ERROR) << "High-resolution performance counters are not supported."; | |
| 138 } | |
| 130 } | 139 } |
| 131 | 140 |
| 132 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() { | 141 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() { |
| 133 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 142 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 134 } | 143 } |
| 135 | 144 |
| 136 bool WASAPIAudioOutputStream::Open() { | 145 bool WASAPIAudioOutputStream::Open() { |
| 137 DVLOG(1) << "WASAPIAudioOutputStream::Open()"; | 146 DVLOG(1) << "WASAPIAudioOutputStream::Open()"; |
| 138 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 147 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 139 if (opened_) | 148 if (opened_) |
| (...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 504 << std::hex << hr; | 513 << std::hex << hr; |
| 505 return false; | 514 return false; |
| 506 } | 515 } |
| 507 | 516 |
| 508 // Derive the audio delay which corresponds to the delay between | 517 // Derive the audio delay which corresponds to the delay between |
| 509 // a render event and the time when the first audio sample in a | 518 // a render event and the time when the first audio sample in a |
| 510 // packet is played out through the speaker. This delay value | 519 // packet is played out through the speaker. This delay value |
| 511 // can typically be utilized by an acoustic echo-control (AEC) | 520 // can typically be utilized by an acoustic echo-control (AEC) |
| 512 // unit at the render side. | 521 // unit at the render side. |
| 513 UINT64 position = 0; | 522 UINT64 position = 0; |
| 523 UINT64 qpc_position = 0; | |
|
Raymond Toy
2016/06/14 16:42:40
What does "qpc" stand for?
Mikhail
2016/06/17 09:36:57
that is 'QueryPerformanceCounter', renaming to 'pe
| |
| 514 uint32_t audio_delay_bytes = 0; | 524 uint32_t audio_delay_bytes = 0; |
| 515 hr = audio_clock_->GetPosition(&position, NULL); | 525 AudioTimestamp output_timestamp = {0, 0}; |
| 526 | |
| 527 hr = audio_clock_->GetPosition(&position, &qpc_position); | |
| 516 if (SUCCEEDED(hr)) { | 528 if (SUCCEEDED(hr)) { |
| 517 // Stream position of the sample that is currently playing | 529 // Stream position of the sample that is currently playing |
| 518 // through the speaker. | 530 // through the speaker. |
| 519 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 531 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
| 520 (static_cast<double>(position) / device_frequency); | 532 (static_cast<double>(position) / device_frequency); |
| 521 | 533 |
| 522 // Stream position of the last sample written to the endpoint | 534 // Stream position of the last sample written to the endpoint |
| 523 // buffer. Note that, the packet we are about to receive in | 535 // buffer. Note that, the packet we are about to receive in |
| 524 // the upcoming callback is also included. | 536 // the upcoming callback is also included. |
| 525 size_t pos_last_sample_written_frames = | 537 size_t pos_last_sample_written_frames = |
| 526 num_written_frames_ + packet_size_frames_; | 538 num_written_frames_ + packet_size_frames_; |
| 527 | 539 |
| 528 // Derive the actual delay value which will be fed to the | 540 // Derive the actual delay value which will be fed to the |
| 529 // render client using the OnMoreData() callback. | 541 // render client using the OnMoreData() callback. |
| 530 audio_delay_bytes = (pos_last_sample_written_frames - | 542 audio_delay_bytes = (pos_last_sample_written_frames - |
| 531 pos_sample_playing_frames) * format_.Format.nBlockAlign; | 543 pos_sample_playing_frames) * format_.Format.nBlockAlign; |
| 544 if (hns_units_to_perf_count_) { | |
| 545 output_timestamp.frames = pos_sample_playing_frames; | |
| 546 output_timestamp.ticks = base::TimeTicks::FromQPCValue( | |
| 547 qpc_position * hns_units_to_perf_count_) | |
| 548 .ToInternalValue(); | |
| 549 } | |
| 532 } | 550 } |
| 533 | 551 |
| 534 // Read a data packet from the registered client source and | 552 // Read a data packet from the registered client source and |
| 535 // deliver a delay estimate in the same callback to the client. | 553 // deliver a delay estimate in the same callback to the client. |
| 536 | 554 |
| 537 int frames_filled = | 555 int frames_filled = source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, |
| 538 source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, 0); | 556 0, output_timestamp); |
| 539 uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | 557 uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; |
| 540 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | 558 DCHECK_LE(num_filled_bytes, packet_size_bytes_); |
| 541 | 559 |
| 542 // Note: If this ever changes to output raw float the data must be | 560 // Note: If this ever changes to output raw float the data must be |
| 543 // clipped and sanitized since it may come from an untrusted | 561 // clipped and sanitized since it may come from an untrusted |
| 544 // source such as NaCl. | 562 // source such as NaCl. |
| 545 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 563 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
| 546 audio_bus_->Scale(volume_); | 564 audio_bus_->Scale(volume_); |
| 547 audio_bus_->ToInterleaved( | 565 audio_bus_->ToInterleaved( |
| 548 frames_filled, bytes_per_sample, audio_data); | 566 frames_filled, bytes_per_sample, audio_data); |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 652 | 670 |
| 653 // Ensure that we don't quit the main thread loop immediately next | 671 // Ensure that we don't quit the main thread loop immediately next |
| 654 // time Start() is called. | 672 // time Start() is called. |
| 655 ResetEvent(stop_render_event_.Get()); | 673 ResetEvent(stop_render_event_.Get()); |
| 656 } | 674 } |
| 657 | 675 |
| 658 source_ = NULL; | 676 source_ = NULL; |
| 659 } | 677 } |
| 660 | 678 |
| 661 } // namespace media | 679 } // namespace media |
| OLD | NEW |