Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
| (...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 461 // the risk of overwriting previously written data that the audio | 461 // the risk of overwriting previously written data that the audio |
| 462 // engine has not yet read from the buffer. | 462 // engine has not yet read from the buffer. |
| 463 size_t num_available_frames = 0; | 463 size_t num_available_frames = 0; |
| 464 | 464 |
| 465 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 465 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 466 // Get the padding value which represents the amount of rendering | 466 // Get the padding value which represents the amount of rendering |
| 467 // data that is queued up to play in the endpoint buffer. | 467 // data that is queued up to play in the endpoint buffer. |
| 468 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 468 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 469 num_available_frames = | 469 num_available_frames = |
| 470 endpoint_buffer_size_frames_ - num_queued_frames; | 470 endpoint_buffer_size_frames_ - num_queued_frames; |
| 471 if (FAILED(hr)) { | |
| 472 DLOG(ERROR) << "Failed to retrieve amount of available space: " | |
| 473 << std::hex << hr; | |
| 474 continue; | |
| 475 } | |
| 471 } else { | 476 } else { |
| 472 // While the stream is running, the system alternately sends one | 477 // While the stream is running, the system alternately sends one |
| 473 // buffer or the other to the client. This form of double buffering | 478 // buffer or the other to the client. This form of double buffering |
| 474 // is referred to as "ping-ponging". Each time the client receives | 479 // is referred to as "ping-ponging". Each time the client receives |
| 475 // a buffer from the system (triggers this event) the client must | 480 // a buffer from the system (triggers this event) the client must |
| 476 // process the entire buffer. Calls to the GetCurrentPadding method | 481 // process the entire buffer. Calls to the GetCurrentPadding method |
| 477 // are unnecessary because the packet size must always equal the | 482 // are unnecessary because the packet size must always equal the |
| 478 // buffer size. In contrast to the shared mode buffering scheme, | 483 // buffer size. In contrast to the shared mode buffering scheme, |
| 479 // the latency for an event-driven, exclusive-mode stream depends | 484 // the latency for an event-driven, exclusive-mode stream depends |
| 480 // directly on the buffer size. | 485 // directly on the buffer size. |
| 481 num_available_frames = endpoint_buffer_size_frames_; | 486 num_available_frames = endpoint_buffer_size_frames_; |
| 482 } | 487 } |
| 483 if (FAILED(hr)) { | 488 |
| 484 DLOG(ERROR) << "Failed to retrieve amount of available space: " | 489 // Check if there is enough available space to fit the packet size |
| 485 << std::hex << hr; | 490 // specified by the client. |
| 491 if (num_available_frames < packet_size_frames_) | |
| 486 continue; | 492 continue; |
| 493 | |
| 494 // Derive the number of packets we need to get from the client to | |
| 495 // fill up the available area in the endpoint buffer. | |
| 496 // |num_packets| will always be one for exclusive-mode streams and | |
| 497 // will be one in most cases for shared mode streams as well. | |
| 498 // However, we have found that two packets can sometimes be | |
| 499 // required. | |
| 500 size_t num_packets = (num_available_frames / packet_size_frames_); | |
|
tommi (sloooow) - chröme
2013/02/08 15:03:07
should we also have a DCHECK here?
DCHECK_EQ(num_a
henrika (OOO until Aug 14)
2013/02/08 15:25:12
See second patch. I'd like a log as well so we can
| |
| 501 | |
| 502 for (size_t n = 0; n < num_packets; ++n) { | |
|
tommi (sloooow) - chröme
2013/02/08 15:03:07
This function was big before bug now it's huge! :)
henrika (OOO until Aug 14)
2013/02/08 15:25:12
Stay tuned...
| |
| 503 // Grab all available space in the rendering endpoint buffer | |
| 504 // into which the client can write a data packet. | |
| 505 hr = audio_render_client_->GetBuffer(packet_size_frames_, | |
| 506 &audio_data); | |
| 507 if (FAILED(hr)) { | |
| 508 DLOG(ERROR) << "Failed to use rendering audio buffer: " | |
| 509 << std::hex << hr; | |
| 510 continue; | |
| 511 } | |
| 512 | |
| 513 // Derive the audio delay which corresponds to the delay between | |
| 514 // a render event and the time when the first audio sample in a | |
| 515 // packet is played out through the speaker. This delay value | |
| 516 // can typically be utilized by an acoustic echo-control (AEC) | |
| 517 // unit at the render side. | |
| 518 UINT64 position = 0; | |
| 519 int audio_delay_bytes = 0; | |
| 520 hr = audio_clock->GetPosition(&position, NULL); | |
| 521 if (SUCCEEDED(hr)) { | |
| 522 // Stream position of the sample that is currently playing | |
| 523 // through the speaker. | |
| 524 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 525 (static_cast<double>(position) / device_frequency); | |
| 526 | |
| 527 // Stream position of the last sample written to the endpoint | |
| 528 // buffer. Note that, the packet we are about to receive in | |
| 529 // the upcoming callback is also included. | |
| 530 size_t pos_last_sample_written_frames = | |
| 531 num_written_frames_ + packet_size_frames_; | |
| 532 | |
| 533 // Derive the actual delay value which will be fed to the | |
| 534 // render client using the OnMoreData() callback. | |
| 535 audio_delay_bytes = (pos_last_sample_written_frames - | |
| 536 pos_sample_playing_frames) * format_.Format.nBlockAlign; | |
| 537 } | |
| 538 | |
| 539 // Read a data packet from the registered client source and | |
| 540 // deliver a delay estimate in the same callback to the client. | |
| 541 // A time stamp is also stored in the AudioBuffersState. This | |
| 542 // time stamp can be used at the client side to compensate for | |
| 543 // the delay between the usage of the delay value and the time | |
| 544 // of generation. | |
| 545 | |
| 546 uint32 num_filled_bytes = 0; | |
| 547 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | |
| 548 | |
| 549 int frames_filled = source_->OnMoreData( | |
| 550 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); | |
| 551 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
| 552 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
| 553 | |
| 554 // Note: If this ever changes to output raw float the data must be | |
| 555 // clipped and sanitized since it may come from an untrusted | |
| 556 // source such as NaCl. | |
| 557 audio_bus_->ToInterleaved( | |
| 558 frames_filled, bytes_per_sample, audio_data); | |
| 559 | |
| 560 // Perform in-place, software-volume adjustments. | |
| 561 media::AdjustVolume(audio_data, | |
| 562 num_filled_bytes, | |
| 563 audio_bus_->channels(), | |
| 564 bytes_per_sample, | |
| 565 volume_); | |
| 566 | |
| 567 // Release the buffer space acquired in the GetBuffer() call. | |
| 568 // Render silence if we were not able to fill up the buffer totally. | |
| 569 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | |
| 570 AUDCLNT_BUFFERFLAGS_SILENT : 0; | |
| 571 audio_render_client_->ReleaseBuffer(packet_size_frames_, | |
| 572 flags); | |
| 573 | |
| 574 num_written_frames_ += packet_size_frames_; | |
| 487 } | 575 } |
| 488 | |
| 489 // It can happen that we were not able to find a a perfect match | |
| 490 // between the native device rate and the endpoint buffer size. | |
| 491 // In this case, we are using a packet size which equals the enpoint | |
| 492 // buffer size (does not lead to lowest possible delay and is rare | |
| 493 // case) and must therefore wait for yet another callback until we | |
| 494 // are able to provide data. | |
| 495 if ((num_available_frames > 0) && | |
| 496 (num_available_frames != packet_size_frames_)) { | |
| 497 continue; | |
| 498 } | |
| 499 | |
| 500 // Grab all available space in the rendering endpoint buffer | |
| 501 // into which the client can write a data packet. | |
| 502 hr = audio_render_client_->GetBuffer(packet_size_frames_, | |
| 503 &audio_data); | |
| 504 if (FAILED(hr)) { | |
| 505 DLOG(ERROR) << "Failed to use rendering audio buffer: " | |
| 506 << std::hex << hr; | |
| 507 continue; | |
| 508 } | |
| 509 | |
| 510 // Derive the audio delay which corresponds to the delay between | |
| 511 // a render event and the time when the first audio sample in a | |
| 512 // packet is played out through the speaker. This delay value | |
| 513 // can typically be utilized by an acoustic echo-control (AEC) | |
| 514 // unit at the render side. | |
| 515 UINT64 position = 0; | |
| 516 int audio_delay_bytes = 0; | |
| 517 hr = audio_clock->GetPosition(&position, NULL); | |
| 518 if (SUCCEEDED(hr)) { | |
| 519 // Stream position of the sample that is currently playing | |
| 520 // through the speaker. | |
| 521 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 522 (static_cast<double>(position) / device_frequency); | |
| 523 | |
| 524 // Stream position of the last sample written to the endpoint | |
| 525 // buffer. Note that, the packet we are about to receive in | |
| 526 // the upcoming callback is also included. | |
| 527 size_t pos_last_sample_written_frames = | |
| 528 num_written_frames_ + packet_size_frames_; | |
| 529 | |
| 530 // Derive the actual delay value which will be fed to the | |
| 531 // render client using the OnMoreData() callback. | |
| 532 audio_delay_bytes = (pos_last_sample_written_frames - | |
| 533 pos_sample_playing_frames) * format_.Format.nBlockAlign; | |
| 534 } | |
| 535 | |
| 536 // Read a data packet from the registered client source and | |
| 537 // deliver a delay estimate in the same callback to the client. | |
| 538 // A time stamp is also stored in the AudioBuffersState. This | |
| 539 // time stamp can be used at the client side to compensate for | |
| 540 // the delay between the usage of the delay value and the time | |
| 541 // of generation. | |
| 542 | |
| 543 uint32 num_filled_bytes = 0; | |
| 544 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | |
| 545 | |
| 546 int frames_filled = source_->OnMoreData( | |
| 547 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); | |
| 548 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
| 549 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
| 550 | |
| 551 // Note: If this ever changes to output raw float the data must be | |
| 552 // clipped and sanitized since it may come from an untrusted | |
| 553 // source such as NaCl. | |
| 554 audio_bus_->ToInterleaved( | |
| 555 frames_filled, bytes_per_sample, audio_data); | |
| 556 | |
| 557 // Perform in-place, software-volume adjustments. | |
| 558 media::AdjustVolume(audio_data, | |
| 559 num_filled_bytes, | |
| 560 audio_bus_->channels(), | |
| 561 bytes_per_sample, | |
| 562 volume_); | |
| 563 | |
| 564 // Zero out the part of the packet which has not been filled by | |
| 565 // the client. Using silence is the least bad option in this | |
| 566 // situation. | |
| 567 if (num_filled_bytes < packet_size_bytes_) { | |
| 568 memset(&audio_data[num_filled_bytes], 0, | |
| 569 (packet_size_bytes_ - num_filled_bytes)); | |
| 570 } | |
| 571 | |
| 572 // Release the buffer space acquired in the GetBuffer() call. | |
| 573 DWORD flags = 0; | |
| 574 audio_render_client_->ReleaseBuffer(packet_size_frames_, | |
| 575 flags); | |
| 576 | |
| 577 num_written_frames_ += packet_size_frames_; | |
| 578 } | 576 } |
| 579 break; | 577 break; |
| 580 default: | 578 default: |
| 581 error = true; | 579 error = true; |
| 582 break; | 580 break; |
| 583 } | 581 } |
| 584 } | 582 } |
| 585 | 583 |
| 586 if (playing && error) { | 584 if (playing && error) { |
| 587 // Stop audio rendering since something has gone wrong in our main thread | 585 // Stop audio rendering since something has gone wrong in our main thread |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 678 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; | 676 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; |
| 679 return hr; | 677 return hr; |
| 680 } | 678 } |
| 681 | 679 |
| 682 *endpoint_buffer_size = buffer_size_in_frames; | 680 *endpoint_buffer_size = buffer_size_in_frames; |
| 683 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; | 681 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; |
| 684 return hr; | 682 return hr; |
| 685 } | 683 } |
| 686 | 684 |
| 687 } // namespace media | 685 } // namespace media |
| OLD | NEW |