| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 433 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 433 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
| 434 PLOG(WARNING) << "Failed to disable MMCSS"; | 434 PLOG(WARNING) << "Failed to disable MMCSS"; |
| 435 } | 435 } |
| 436 } | 436 } |
| 437 | 437 |
| 438 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { | 438 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { |
| 439 TRACE_EVENT0("audio", "RenderAudioFromSource"); | 439 TRACE_EVENT0("audio", "RenderAudioFromSource"); |
| 440 | 440 |
| 441 HRESULT hr = S_FALSE; | 441 HRESULT hr = S_FALSE; |
| 442 UINT32 num_queued_frames = 0; | 442 UINT32 num_queued_frames = 0; |
| 443 uint8* audio_data = NULL; | 443 uint8_t* audio_data = NULL; |
| 444 | 444 |
| 445 // Contains how much new data we can write to the buffer without | 445 // Contains how much new data we can write to the buffer without |
| 446 // the risk of overwriting previously written data that the audio | 446 // the risk of overwriting previously written data that the audio |
| 447 // engine has not yet read from the buffer. | 447 // engine has not yet read from the buffer. |
| 448 size_t num_available_frames = 0; | 448 size_t num_available_frames = 0; |
| 449 | 449 |
| 450 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 450 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 451 // Get the padding value which represents the amount of rendering | 451 // Get the padding value which represents the amount of rendering |
| 452 // data that is queued up to play in the endpoint buffer. | 452 // data that is queued up to play in the endpoint buffer. |
| 453 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 453 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 502 << std::hex << hr; | 502 << std::hex << hr; |
| 503 return false; | 503 return false; |
| 504 } | 504 } |
| 505 | 505 |
| 506 // Derive the audio delay which corresponds to the delay between | 506 // Derive the audio delay which corresponds to the delay between |
| 507 // a render event and the time when the first audio sample in a | 507 // a render event and the time when the first audio sample in a |
| 508 // packet is played out through the speaker. This delay value | 508 // packet is played out through the speaker. This delay value |
| 509 // can typically be utilized by an acoustic echo-control (AEC) | 509 // can typically be utilized by an acoustic echo-control (AEC) |
| 510 // unit at the render side. | 510 // unit at the render side. |
| 511 UINT64 position = 0; | 511 UINT64 position = 0; |
| 512 uint32 audio_delay_bytes = 0; | 512 uint32_t audio_delay_bytes = 0; |
| 513 hr = audio_clock_->GetPosition(&position, NULL); | 513 hr = audio_clock_->GetPosition(&position, NULL); |
| 514 if (SUCCEEDED(hr)) { | 514 if (SUCCEEDED(hr)) { |
| 515 // Stream position of the sample that is currently playing | 515 // Stream position of the sample that is currently playing |
| 516 // through the speaker. | 516 // through the speaker. |
| 517 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 517 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
| 518 (static_cast<double>(position) / device_frequency); | 518 (static_cast<double>(position) / device_frequency); |
| 519 | 519 |
| 520 // Stream position of the last sample written to the endpoint | 520 // Stream position of the last sample written to the endpoint |
| 521 // buffer. Note that, the packet we are about to receive in | 521 // buffer. Note that, the packet we are about to receive in |
| 522 // the upcoming callback is also included. | 522 // the upcoming callback is also included. |
| 523 size_t pos_last_sample_written_frames = | 523 size_t pos_last_sample_written_frames = |
| 524 num_written_frames_ + packet_size_frames_; | 524 num_written_frames_ + packet_size_frames_; |
| 525 | 525 |
| 526 // Derive the actual delay value which will be fed to the | 526 // Derive the actual delay value which will be fed to the |
| 527 // render client using the OnMoreData() callback. | 527 // render client using the OnMoreData() callback. |
| 528 audio_delay_bytes = (pos_last_sample_written_frames - | 528 audio_delay_bytes = (pos_last_sample_written_frames - |
| 529 pos_sample_playing_frames) * format_.Format.nBlockAlign; | 529 pos_sample_playing_frames) * format_.Format.nBlockAlign; |
| 530 } | 530 } |
| 531 | 531 |
| 532 // Read a data packet from the registered client source and | 532 // Read a data packet from the registered client source and |
| 533 // deliver a delay estimate in the same callback to the client. | 533 // deliver a delay estimate in the same callback to the client. |
| 534 | 534 |
| 535 int frames_filled = | 535 int frames_filled = |
| 536 source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, 0); | 536 source_->OnMoreData(audio_bus_.get(), audio_delay_bytes, 0); |
| 537 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | 537 uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; |
| 538 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | 538 DCHECK_LE(num_filled_bytes, packet_size_bytes_); |
| 539 | 539 |
| 540 // Note: If this ever changes to output raw float the data must be | 540 // Note: If this ever changes to output raw float the data must be |
| 541 // clipped and sanitized since it may come from an untrusted | 541 // clipped and sanitized since it may come from an untrusted |
| 542 // source such as NaCl. | 542 // source such as NaCl. |
| 543 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 543 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
| 544 audio_bus_->Scale(volume_); | 544 audio_bus_->Scale(volume_); |
| 545 audio_bus_->ToInterleaved( | 545 audio_bus_->ToInterleaved( |
| 546 frames_filled, bytes_per_sample, audio_data); | 546 frames_filled, bytes_per_sample, audio_data); |
| 547 | 547 |
| 548 // Release the buffer space acquired in the GetBuffer() call. | 548 // Release the buffer space acquired in the GetBuffer() call. |
| 549 // Render silence if we were not able to fill up the buffer totally. | 549 // Render silence if we were not able to fill up the buffer totally. |
| 550 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | 550 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? |
| 551 AUDCLNT_BUFFERFLAGS_SILENT : 0; | 551 AUDCLNT_BUFFERFLAGS_SILENT : 0; |
| 552 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); | 552 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); |
| 553 | 553 |
| 554 num_written_frames_ += packet_size_frames_; | 554 num_written_frames_ += packet_size_frames_; |
| 555 } | 555 } |
| 556 | 556 |
| 557 return true; | 557 return true; |
| 558 } | 558 } |
| 559 | 559 |
| 560 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( | 560 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
| 561 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { | 561 IAudioClient* client, |
| 562 HANDLE event_handle, |
| 563 uint32_t* endpoint_buffer_size) { |
| 562 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 564 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
| 563 | 565 |
| 564 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; | 566 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; |
| 565 REFERENCE_TIME requested_buffer_duration = | 567 REFERENCE_TIME requested_buffer_duration = |
| 566 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); | 568 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); |
| 567 | 569 |
| 568 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; | 570 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; |
| 569 bool use_event = (event_handle != NULL && | 571 bool use_event = (event_handle != NULL && |
| 570 event_handle != INVALID_HANDLE_VALUE); | 572 event_handle != INVALID_HANDLE_VALUE); |
| 571 if (use_event) | 573 if (use_event) |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 648 | 650 |
| 649 // Ensure that we don't quit the main thread loop immediately next | 651 // Ensure that we don't quit the main thread loop immediately next |
| 650 // time Start() is called. | 652 // time Start() is called. |
| 651 ResetEvent(stop_render_event_.Get()); | 653 ResetEvent(stop_render_event_.Get()); |
| 652 } | 654 } |
| 653 | 655 |
| 654 source_ = NULL; | 656 source_ = NULL; |
| 655 } | 657 } |
| 656 | 658 |
| 657 } // namespace media | 659 } // namespace media |
| OLD | NEW |