OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/mac/audio_auhal_mac.h" | 5 #include "media/audio/mac/audio_auhal_mac.h" |
6 | 6 |
7 #include <CoreServices/CoreServices.h> | 7 #include <CoreServices/CoreServices.h> |
8 | 8 |
9 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 // be contended in the middle of stream processing here (starting and stopping | 197 // be contended in the middle of stream processing here (starting and stopping |
198 // the stream are ok) because this is running on a real-time thread. | 198 // the stream are ok) because this is running on a real-time thread. |
199 OSStatus AUHALStream::Render( | 199 OSStatus AUHALStream::Render( |
200 AudioUnitRenderActionFlags* flags, | 200 AudioUnitRenderActionFlags* flags, |
201 const AudioTimeStamp* output_time_stamp, | 201 const AudioTimeStamp* output_time_stamp, |
202 UInt32 bus_number, | 202 UInt32 bus_number, |
203 UInt32 number_of_frames, | 203 UInt32 number_of_frames, |
204 AudioBufferList* data) { | 204 AudioBufferList* data) { |
205 TRACE_EVENT0("audio", "AUHALStream::Render"); | 205 TRACE_EVENT0("audio", "AUHALStream::Render"); |
206 | 206 |
207 UpdatePlayoutTimestamp(output_time_stamp); | 207 UInt32 lost_frames = UpdatePlayoutTimestampAndStats(output_time_stamp); |
| 208 |
| 209 // Inform the source about any skipped (lost) frames. If we use a fifo this |
| 210 // will be out of sync with the fifo pulls (different buffer sizes), so we do |
| 211 // it here and not in ProvideInput(). |
| 212 if (lost_frames > 0) { |
| 213 base::AutoLock auto_lock(source_lock_); |
| 214 if (source_) |
| 215 source_->OnSkippedData(lost_frames); |
| 216 } |
208 | 217 |
209 // If the stream parameters change for any reason, we need to insert a FIFO | 218 // If the stream parameters change for any reason, we need to insert a FIFO |
210 // since the OnMoreData() pipeline can't handle frame size changes. | 219 // since the OnMoreData() pipeline can't handle frame size changes. |
211 if (number_of_frames != number_of_frames_) { | 220 if (number_of_frames != number_of_frames_) { |
212 // Create a FIFO on the fly to handle any discrepancies in callback rates. | 221 // Create a FIFO on the fly to handle any discrepancies in callback rates. |
213 if (!audio_fifo_) { | 222 if (!audio_fifo_) { |
214 number_of_frames_requested_ = number_of_frames; | 223 number_of_frames_requested_ = number_of_frames; |
215 DVLOG(1) << "Audio frame size changed from " << number_of_frames_ | 224 DVLOG(1) << "Audio frame size changed from " << number_of_frames_ |
216 << " to " << number_of_frames | 225 << " to " << number_of_frames |
217 << "; adding FIFO to compensate."; | 226 << "; adding FIFO to compensate."; |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
339 // the bots, probably less so in the wild. | 348 // the bots, probably less so in the wild. |
340 if (now_ns > output_time_ns) | 349 if (now_ns > output_time_ns) |
341 return 0; | 350 return 0; |
342 | 351 |
343 double delay_frames = static_cast<double> | 352 double delay_frames = static_cast<double> |
344 (1e-9 * (output_time_ns - now_ns) * output_format_.mSampleRate); | 353 (1e-9 * (output_time_ns - now_ns) * output_format_.mSampleRate); |
345 | 354 |
346 return (delay_frames + hardware_latency_frames_); | 355 return (delay_frames + hardware_latency_frames_); |
347 } | 356 } |
348 | 357 |
349 void AUHALStream::UpdatePlayoutTimestamp(const AudioTimeStamp* timestamp) { | 358 UInt32 AUHALStream::UpdatePlayoutTimestampAndStats( |
| 359 const AudioTimeStamp* timestamp) { |
350 if ((timestamp->mFlags & kAudioTimeStampSampleTimeValid) == 0) | 360 if ((timestamp->mFlags & kAudioTimeStampSampleTimeValid) == 0) |
351 return; | 361 return; |
352 | 362 |
| 363 UInt32 lost_frames = 0; |
353 if (last_sample_time_) { | 364 if (last_sample_time_) { |
354 DCHECK_NE(0U, last_number_of_frames_); | 365 DCHECK_NE(0U, last_number_of_frames_); |
355 UInt32 diff = | 366 UInt32 diff = |
356 static_cast<UInt32>(timestamp->mSampleTime - last_sample_time_); | 367 static_cast<UInt32>(timestamp->mSampleTime - last_sample_time_); |
357 if (diff != last_number_of_frames_) { | 368 if (diff != last_number_of_frames_) { |
358 DCHECK_GT(diff, last_number_of_frames_); | 369 DCHECK_GT(diff, last_number_of_frames_); |
359 // We're being asked to render samples post what we expected. Update the | 370 // We're being asked to render samples post what we expected. Update the |
360 // glitch count etc and keep a record of the largest glitch. | 371 // glitch count etc and keep a record of the largest glitch. |
361 auto lost_frames = diff - last_number_of_frames_; | 372 lost_frames = diff - last_number_of_frames_; |
362 total_lost_frames_ += lost_frames; | 373 total_lost_frames_ += lost_frames; |
363 if (lost_frames > largest_glitch_frames_) | 374 if (lost_frames > largest_glitch_frames_) |
364 largest_glitch_frames_ = lost_frames; | 375 largest_glitch_frames_ = lost_frames; |
365 ++glitches_detected_; | 376 ++glitches_detected_; |
366 } | 377 } |
367 } | 378 } |
368 | 379 |
369 // Store the last sample time for use next time we get called back. | 380 // Store the last sample time for use next time we get called back. |
370 last_sample_time_ = timestamp->mSampleTime; | 381 last_sample_time_ = timestamp->mSampleTime; |
| 382 |
| 383 return lost_frames; |
371 } | 384 } |
372 | 385 |
373 void AUHALStream::ReportAndResetStats() { | 386 void AUHALStream::ReportAndResetStats() { |
374 if (!last_sample_time_) | 387 if (!last_sample_time_) |
375 return; // No stats gathered to report. | 388 return; // No stats gathered to report. |
376 | 389 |
377 // A value of 0 indicates that we got the buffer size we asked for. | 390 // A value of 0 indicates that we got the buffer size we asked for. |
378 UMA_HISTOGRAM_COUNTS("Media.Audio.Render.FramesRequested", | 391 UMA_HISTOGRAM_COUNTS("Media.Audio.Render.FramesRequested", |
379 number_of_frames_requested_); | 392 number_of_frames_requested_); |
380 // Even if there aren't any glitches, we want to record it to get a feel for | 393 // Even if there aren't any glitches, we want to record it to get a feel for |
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
538 OSStatus result = AudioUnitUninitialize(audio_unit_); | 551 OSStatus result = AudioUnitUninitialize(audio_unit_); |
539 OSSTATUS_DLOG_IF(ERROR, result != noErr, result) | 552 OSSTATUS_DLOG_IF(ERROR, result != noErr, result) |
540 << "AudioUnitUninitialize() failed."; | 553 << "AudioUnitUninitialize() failed."; |
541 result = AudioComponentInstanceDispose(audio_unit_); | 554 result = AudioComponentInstanceDispose(audio_unit_); |
542 OSSTATUS_DLOG_IF(ERROR, result != noErr, result) | 555 OSSTATUS_DLOG_IF(ERROR, result != noErr, result) |
543 << "AudioComponentInstanceDispose() failed."; | 556 << "AudioComponentInstanceDispose() failed."; |
544 audio_unit_ = 0; | 557 audio_unit_ = 0; |
545 } | 558 } |
546 | 559 |
547 } // namespace media | 560 } // namespace media |
OLD | NEW |