Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/base/android/audio_decoder_job.h" | 5 #include "media/base/android/audio_decoder_job.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/lazy_instance.h" | 8 #include "base/lazy_instance.h" |
| 9 #include "base/threading/thread.h" | 9 #include "base/threading/thread.h" |
| 10 #include "media/base/android/sdk_media_codec_bridge.h" | 10 #include "media/base/android/sdk_media_codec_bridge.h" |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 95 audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_); | 95 audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_); |
| 96 } | 96 } |
| 97 | 97 |
| 98 void AudioDecoderJob::ReleaseOutputBuffer( | 98 void AudioDecoderJob::ReleaseOutputBuffer( |
| 99 int output_buffer_index, | 99 int output_buffer_index, |
| 100 size_t offset, | 100 size_t offset, |
| 101 size_t size, | 101 size_t size, |
| 102 bool render_output, | 102 bool render_output, |
| 103 bool /* is_late_frame */, | 103 bool /* is_late_frame */, |
| 104 base::TimeDelta current_presentation_timestamp, | 104 base::TimeDelta current_presentation_timestamp, |
| 105 const ReleaseOutputCompletionCallback& callback) { | 105 MediaCodecStatus status, |
| 106 const DecoderCallback& callback) { | |
| 106 render_output = render_output && (size != 0u); | 107 render_output = render_output && (size != 0u); |
| 107 bool is_audio_underrun = false; | 108 bool is_audio_underrun = false; |
| 109 | |
| 110 // Ignore input value. | |
| 111 current_presentation_timestamp = kNoTimestamp(); | |
| 112 | |
| 108 if (render_output) { | 113 if (render_output) { |
| 109 bool postpone = false; | |
| 110 int64_t head_position; | 114 int64_t head_position; |
| 111 MediaCodecStatus status = | 115 const bool postpone = false; |
| 112 (static_cast<AudioCodecBridge*>(media_codec_bridge_.get())) | |
| 113 ->PlayOutputBuffer(output_buffer_index, size, offset, postpone, | |
| 114 &head_position); | |
| 115 // TODO(timav,watk): This CHECK maintains the behavior of this call before | |
| 116 // we started catching CodecException and returning it as MEDIA_CODEC_ERROR. | |
| 117 // It needs to be handled some other way. http://crbug.com/585978 | |
| 118 CHECK_EQ(status, MEDIA_CODEC_OK); | |
| 119 | 116 |
| 120 base::TimeTicks current_time = base::TimeTicks::Now(); | 117 // Override status. |
| 118 status = (static_cast<AudioCodecBridge*>(media_codec_bridge_.get())) | |
|
qinmin
2016/05/10 22:43:23
The input status could be MEDIA_CODEC_OUTPUT_FORMA
Tima Vaisburd
2016/05/10 23:19:56
Yes, I see it now. Now I override the status only
| |
| 119 ->PlayOutputBuffer(output_buffer_index, size, offset, postpone, | |
| 120 &head_position); | |
| 121 if (status == MEDIA_CODEC_OK) { | |
| 122 base::TimeTicks current_time = base::TimeTicks::Now(); | |
| 121 | 123 |
| 122 size_t bytes_per_frame = kBytesPerAudioOutputSample * output_num_channels_; | 124 size_t bytes_per_frame = |
| 123 size_t new_frames_count = size / bytes_per_frame; | 125 kBytesPerAudioOutputSample * output_num_channels_; |
| 124 frame_count_ += new_frames_count; | 126 size_t new_frames_count = size / bytes_per_frame; |
| 125 audio_timestamp_helper_->AddFrames(new_frames_count); | 127 frame_count_ += new_frames_count; |
| 126 int64_t frames_to_play = frame_count_ - head_position; | 128 audio_timestamp_helper_->AddFrames(new_frames_count); |
| 127 DCHECK_GE(frames_to_play, 0); | 129 int64_t frames_to_play = frame_count_ - head_position; |
| 130 DCHECK_GE(frames_to_play, 0); | |
| 128 | 131 |
| 129 const base::TimeDelta last_buffered = | 132 const base::TimeDelta last_buffered = |
| 130 audio_timestamp_helper_->GetTimestamp(); | 133 audio_timestamp_helper_->GetTimestamp(); |
| 131 | 134 |
| 132 current_presentation_timestamp = | 135 current_presentation_timestamp = |
| 133 last_buffered - | 136 last_buffered - |
| 134 audio_timestamp_helper_->GetFrameDuration(frames_to_play); | 137 audio_timestamp_helper_->GetFrameDuration(frames_to_play); |
| 135 | 138 |
| 136 // Potential audio underrun is considered a late frame for UMA. | 139 // Potential audio underrun is considered a late frame for UMA. |
| 137 is_audio_underrun = !next_frame_time_limit_.is_null() && | 140 is_audio_underrun = !next_frame_time_limit_.is_null() && |
| 138 next_frame_time_limit_ < current_time; | 141 next_frame_time_limit_ < current_time; |
| 139 | 142 |
| 140 next_frame_time_limit_ = | 143 next_frame_time_limit_ = |
| 141 current_time + (last_buffered - current_presentation_timestamp); | 144 current_time + (last_buffered - current_presentation_timestamp); |
| 142 } else { | 145 } |
| 143 current_presentation_timestamp = kNoTimestamp(); | |
| 144 } | 146 } |
| 147 | |
| 145 media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false); | 148 media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false); |
| 146 | 149 |
| 147 callback.Run(is_audio_underrun, current_presentation_timestamp, | 150 callback.Run(status, is_audio_underrun, current_presentation_timestamp, |
| 148 audio_timestamp_helper_->GetTimestamp()); | 151 audio_timestamp_helper_->GetTimestamp()); |
| 149 } | 152 } |
| 150 | 153 |
| 151 bool AudioDecoderJob::ComputeTimeToRender() const { | 154 bool AudioDecoderJob::ComputeTimeToRender() const { |
| 152 return false; | 155 return false; |
| 153 } | 156 } |
| 154 | 157 |
| 155 bool AudioDecoderJob::AreDemuxerConfigsChanged( | 158 bool AudioDecoderJob::AreDemuxerConfigsChanged( |
| 156 const DemuxerConfigs& configs) const { | 159 const DemuxerConfigs& configs) const { |
| 157 return audio_codec_ != configs.audio_codec || | 160 return audio_codec_ != configs.audio_codec || |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 211 DVLOG(2) << __FUNCTION__ << ": new sampling rate " << output_sampling_rate_; | 214 DVLOG(2) << __FUNCTION__ << ": new sampling rate " << output_sampling_rate_; |
| 212 needs_recreate_audio_track = true; | 215 needs_recreate_audio_track = true; |
| 213 | 216 |
| 214 ResetTimestampHelper(); | 217 ResetTimestampHelper(); |
| 215 } | 218 } |
| 216 | 219 |
| 217 const int old_num_channels = output_num_channels_; | 220 const int old_num_channels = output_num_channels_; |
| 218 status = media_codec_bridge_->GetOutputChannelCount(&output_num_channels_); | 221 status = media_codec_bridge_->GetOutputChannelCount(&output_num_channels_); |
| 219 | 222 |
| 220 if (status == MEDIA_CODEC_OK && old_num_channels != output_num_channels_) { | 223 if (status == MEDIA_CODEC_OK && old_num_channels != output_num_channels_) { |
| 221 DCHECK_GT(output_sampling_rate_, 0); | 224 DCHECK_GT(output_num_channels_, 0); |
| 222 DVLOG(2) << __FUNCTION__ << ": new channel count " << output_num_channels_; | 225 DVLOG(2) << __FUNCTION__ << ": new channel count " << output_num_channels_; |
| 223 needs_recreate_audio_track = true; | 226 needs_recreate_audio_track = true; |
| 224 } | 227 } |
| 225 | 228 |
| 226 if (needs_recreate_audio_track && | 229 if (needs_recreate_audio_track && |
| 227 !static_cast<AudioCodecBridge*>(media_codec_bridge_.get()) | 230 !static_cast<AudioCodecBridge*>(media_codec_bridge_.get()) |
| 228 ->CreateAudioTrack(output_sampling_rate_, output_num_channels_)) { | 231 ->CreateAudioTrack(output_sampling_rate_, output_num_channels_)) { |
| 229 DLOG(ERROR) << __FUNCTION__ << ": cannot create AudioTrack"; | 232 DLOG(ERROR) << __FUNCTION__ << ": cannot create AudioTrack"; |
| 230 return false; | 233 return false; |
| 231 } | 234 } |
| 232 | 235 |
| 233 return true; | 236 return true; |
| 234 } | 237 } |
| 235 | 238 |
| 236 } // namespace media | 239 } // namespace media |
| OLD | NEW |