OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
465 // a buffer from the system (triggers this event) the client must | 465 // a buffer from the system (triggers this event) the client must |
466 // process the entire buffer. Calls to the GetCurrentPadding method | 466 // process the entire buffer. Calls to the GetCurrentPadding method |
467 // are unnecessary because the packet size must always equal the | 467 // are unnecessary because the packet size must always equal the |
468 // buffer size. In contrast to the shared mode buffering scheme, | 468 // buffer size. In contrast to the shared mode buffering scheme, |
469 // the latency for an event-driven, exclusive-mode stream depends | 469 // the latency for an event-driven, exclusive-mode stream depends |
470 // directly on the buffer size. | 470 // directly on the buffer size. |
471 num_available_frames = endpoint_buffer_size_frames_; | 471 num_available_frames = endpoint_buffer_size_frames_; |
472 } | 472 } |
473 | 473 |
474 // Check if there is enough available space to fit the packet size | 474 // Check if there is enough available space to fit the packet size |
475 // specified by the client, wait until a future callback. | 475 // specified by the client. If not, wait until a future callback. |
476 if (num_available_frames < packet_size_frames_) | 476 if (num_available_frames < packet_size_frames_) |
477 return true; | 477 return true; |
478 | 478 |
479 // Grab all available space in the rendering endpoint buffer | 479 // Derive the number of packets we need to get from the client to fill up the |
480 // into which the client can write a data packet. | 480 // available area in the endpoint buffer. Well-behaved (> Vista) clients and |
481 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 481 // exclusive mode streams should generally have a |num_packets| value of 1. |
482 &audio_data); | 482 // |
483 if (FAILED(hr)) { | 483 // Vista clients are not able to maintain reliable callbacks, so the endpoint |
484 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 484 // buffer may exhaust itself such that back-to-back callbacks are occasionally |
485 << std::hex << hr; | 485 // necessary to avoid glitches. In such cases we have no choice but to issue |
486 return false; | 486 // back-to-back reads and pray that the browser side has enough data cached or |
487 // that the render can fulfill the read before we glitch anyways. | |
tommi (sloooow) - chröme
2015/09/03 08:31:28
Can we record some sort of stats to see how often
DaleCurtis
2015/09/03 16:36:31
It's tracked by Media.AudioRendererMissedDeadline
| |
488 // | |
489 // API documentation does not guarantee that even on Win7+ clients we won't | |
490 // need to fill more than a period size worth of buffers; but in practice this | |
491 // appears to be infrequent. | |
492 // | |
493 // See http://crbug.com/524947. | |
494 const size_t num_packets = num_available_frames / packet_size_frames_; | |
495 for (size_t n = 0; n < num_packets; ++n) { | |
496 // Grab all available space in the rendering endpoint buffer | |
497 // into which the client can write a data packet. | |
498 hr = audio_render_client_->GetBuffer(packet_size_frames_, | |
499 &audio_data); | |
500 if (FAILED(hr)) { | |
501 DLOG(ERROR) << "Failed to use rendering audio buffer: " | |
502 << std::hex << hr; | |
503 return false; | |
504 } | |
505 | |
506 // Derive the audio delay which corresponds to the delay between | |
507 // a render event and the time when the first audio sample in a | |
508 // packet is played out through the speaker. This delay value | |
509 // can typically be utilized by an acoustic echo-control (AEC) | |
510 // unit at the render side. | |
511 UINT64 position = 0; | |
512 uint32 audio_delay_bytes = 0; | |
513 hr = audio_clock_->GetPosition(&position, NULL); | |
514 if (SUCCEEDED(hr)) { | |
515 // Stream position of the sample that is currently playing | |
516 // through the speaker. | |
517 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
518 (static_cast<double>(position) / device_frequency); | |
519 | |
520 // Stream position of the last sample written to the endpoint | |
521 // buffer. Note that, the packet we are about to receive in | |
522 // the upcoming callback is also included. | |
523 size_t pos_last_sample_written_frames = | |
524 num_written_frames_ + packet_size_frames_; | |
525 | |
526 // Derive the actual delay value which will be fed to the | |
527 // render client using the OnMoreData() callback. | |
528 audio_delay_bytes = (pos_last_sample_written_frames - | |
529 pos_sample_playing_frames) * format_.Format.nBlockAlign; | |
530 } | |
531 | |
532 // Read a data packet from the registered client source and | |
533 // deliver a delay estimate in the same callback to the client. | |
534 | |
535 int frames_filled = source_->OnMoreData( | |
536 audio_bus_.get(), audio_delay_bytes); | |
537 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
538 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
539 | |
540 // Note: If this ever changes to output raw float the data must be | |
541 // clipped and sanitized since it may come from an untrusted | |
542 // source such as NaCl. | |
543 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | |
544 audio_bus_->Scale(volume_); | |
545 audio_bus_->ToInterleaved( | |
546 frames_filled, bytes_per_sample, audio_data); | |
547 | |
548 // Release the buffer space acquired in the GetBuffer() call. | |
549 // Render silence if we were not able to fill up the buffer totally. | |
550 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | |
551 AUDCLNT_BUFFERFLAGS_SILENT : 0; | |
552 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); | |
553 | |
554 num_written_frames_ += packet_size_frames_; | |
487 } | 555 } |
488 | 556 |
489 // Derive the audio delay which corresponds to the delay between | |
490 // a render event and the time when the first audio sample in a | |
491 // packet is played out through the speaker. This delay value | |
492 // can typically be utilized by an acoustic echo-control (AEC) | |
493 // unit at the render side. | |
494 UINT64 position = 0; | |
495 uint32 audio_delay_bytes = 0; | |
496 hr = audio_clock_->GetPosition(&position, NULL); | |
497 if (SUCCEEDED(hr)) { | |
498 // Stream position of the sample that is currently playing | |
499 // through the speaker. | |
500 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
501 (static_cast<double>(position) / device_frequency); | |
502 | |
503 // Stream position of the last sample written to the endpoint | |
504 // buffer. Note that, the packet we are about to receive in | |
505 // the upcoming callback is also included. | |
506 size_t pos_last_sample_written_frames = | |
507 num_written_frames_ + packet_size_frames_; | |
508 | |
509 // Derive the actual delay value which will be fed to the | |
510 // render client using the OnMoreData() callback. | |
511 audio_delay_bytes = (pos_last_sample_written_frames - | |
512 pos_sample_playing_frames) * format_.Format.nBlockAlign; | |
513 } | |
514 | |
515 // Read a data packet from the registered client source and | |
516 // deliver a delay estimate in the same callback to the client. | |
517 | |
518 int frames_filled = source_->OnMoreData( | |
519 audio_bus_.get(), audio_delay_bytes); | |
520 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
521 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
522 | |
523 // Note: If this ever changes to output raw float the data must be | |
524 // clipped and sanitized since it may come from an untrusted | |
525 // source such as NaCl. | |
526 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | |
527 audio_bus_->Scale(volume_); | |
528 audio_bus_->ToInterleaved( | |
529 frames_filled, bytes_per_sample, audio_data); | |
530 | |
531 // Release the buffer space acquired in the GetBuffer() call. | |
532 // Render silence if we were not able to fill up the buffer totally. | |
533 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | |
534 AUDCLNT_BUFFERFLAGS_SILENT : 0; | |
535 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); | |
536 | |
537 num_written_frames_ += packet_size_frames_; | |
538 return true; | 557 return true; |
539 } | 558 } |
540 | 559 |
541 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( | 560 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
542 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { | 561 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { |
543 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 562 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
544 | 563 |
545 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; | 564 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; |
546 REFERENCE_TIME requested_buffer_duration = | 565 REFERENCE_TIME requested_buffer_duration = |
547 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); | 566 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
629 | 648 |
630 // Ensure that we don't quit the main thread loop immediately next | 649 // Ensure that we don't quit the main thread loop immediately next |
631 // time Start() is called. | 650 // time Start() is called. |
632 ResetEvent(stop_render_event_.Get()); | 651 ResetEvent(stop_render_event_.Get()); |
633 } | 652 } |
634 | 653 |
635 source_ = NULL; | 654 source_ = NULL; |
636 } | 655 } |
637 | 656 |
638 } // namespace media | 657 } // namespace media |
OLD | NEW |