OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/audio_renderer_impl.h" | 5 #include "content/renderer/media/audio_renderer_impl.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
185 } | 185 } |
186 | 186 |
187 void AudioRendererImpl::DoSeek() { | 187 void AudioRendererImpl::DoSeek() { |
188 earliest_end_time_ = base::Time::Now(); | 188 earliest_end_time_ = base::Time::Now(); |
189 | 189 |
190 // Pause and flush the stream when we seek to a new location. | 190 // Pause and flush the stream when we seek to a new location. |
191 DCHECK(audio_device_.get()); | 191 DCHECK(audio_device_.get()); |
192 audio_device_->Pause(true); | 192 audio_device_->Pause(true); |
193 } | 193 } |
194 | 194 |
195 void AudioRendererImpl::Render(const std::vector<float*>& audio_data, | 195 size_t AudioRendererImpl::Render(const std::vector<float*>& audio_data, |
196 size_t number_of_frames, | 196 size_t number_of_frames, |
197 size_t audio_delay_milliseconds) { | 197 size_t audio_delay_milliseconds) { |
198 if (stopped_ || GetPlaybackRate() == 0.0f) { | 198 if (stopped_ || GetPlaybackRate() == 0.0f) { |
199 // Output silence if stopped. | 199 // Output silence if stopped. |
200 for (size_t i = 0; i < audio_data.size(); ++i) | 200 for (size_t i = 0; i < audio_data.size(); ++i) |
201 memset(audio_data[i], 0, sizeof(float) * number_of_frames); | 201 memset(audio_data[i], 0, sizeof(float) * number_of_frames); |
202 return; | 202 return 0; |
203 } | 203 } |
204 | 204 |
205 // Adjust the playback delay. | 205 // Adjust the playback delay. |
206 base::Time current_time = base::Time::Now(); | 206 base::Time current_time = base::Time::Now(); |
207 | 207 |
208 base::TimeDelta request_delay = | 208 base::TimeDelta request_delay = |
209 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); | 209 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
210 | 210 |
211 // Finally we need to adjust the delay according to playback rate. | 211 // Finally we need to adjust the delay according to playback rate. |
212 if (GetPlaybackRate() != 1.0f) { | 212 if (GetPlaybackRate() != 1.0f) { |
213 request_delay = base::TimeDelta::FromMicroseconds( | 213 request_delay = base::TimeDelta::FromMicroseconds( |
214 static_cast<int64>(ceil(request_delay.InMicroseconds() * | 214 static_cast<int64>(ceil(request_delay.InMicroseconds() * |
215 GetPlaybackRate()))); | 215 GetPlaybackRate()))); |
216 } | 216 } |
217 | 217 |
218 uint32 bytes_per_frame = | 218 uint32 bytes_per_frame = |
219 audio_parameters_.bits_per_sample * audio_parameters_.channels / 8; | 219 audio_parameters_.bits_per_sample * audio_parameters_.channels / 8; |
220 | 220 |
221 const size_t buf_size = number_of_frames * bytes_per_frame; | 221 const size_t buf_size = number_of_frames * bytes_per_frame; |
222 scoped_array<uint8> buf(new uint8[buf_size]); | 222 scoped_array<uint8> buf(new uint8[buf_size]); |
223 | 223 |
224 base::Time time_now = base::Time::Now(); | 224 base::Time time_now = base::Time::Now(); |
225 uint32 filled = FillBuffer(buf.get(), | 225 uint32 filled = FillBuffer(buf.get(), |
226 buf_size, | 226 buf_size, |
227 request_delay, | 227 request_delay, |
228 time_now >= earliest_end_time_); | 228 time_now >= earliest_end_time_); |
229 DCHECK_LE(filled, buf_size); | 229 DCHECK_LE(filled, buf_size); |
230 UpdateEarliestEndTime(filled, request_delay, time_now); | |
230 | 231 |
231 uint32 filled_frames = filled / bytes_per_frame; | 232 uint32 filled_frames = filled / bytes_per_frame; |
232 | 233 |
233 // Deinterleave each audio channel. | 234 // Deinterleave each audio channel. |
234 int channels = audio_data.size(); | 235 int channels = audio_data.size(); |
235 for (int channel_index = 0; channel_index < channels; ++channel_index) { | 236 for (int channel_index = 0; channel_index < channels; ++channel_index) { |
236 media::DeinterleaveAudioChannel(buf.get(), | 237 media::DeinterleaveAudioChannel(buf.get(), |
237 audio_data[channel_index], | 238 audio_data[channel_index], |
238 channels, | 239 channels, |
239 channel_index, | 240 channel_index, |
240 bytes_per_frame / channels, | 241 bytes_per_frame / channels, |
241 filled_frames); | 242 filled_frames); |
242 | 243 |
243 // If FillBuffer() didn't give us enough data then zero out the remainder. | 244 // If FillBuffer() didn't give us enough data then zero out the remainder. |
244 if (filled_frames < number_of_frames) { | 245 if (filled_frames < number_of_frames) { |
245 int frames_to_zero = number_of_frames - filled_frames; | 246 int frames_to_zero = number_of_frames - filled_frames; |
246 memset(audio_data[channel_index], 0, sizeof(float) * frames_to_zero); | 247 memset(audio_data[channel_index] + filled_frames, |
248 0, | |
249 sizeof(float) * frames_to_zero); | |
no longer working on chromium
2011/12/16 12:21:13
Do we really need this?
in AudioSyncReader::Read()
enal1
2011/12/16 16:48:23
Yes, we probably do not need that, but I feel safe
| |
247 } | 250 } |
248 } | 251 } |
252 return filled_frames; | |
249 } | 253 } |
OLD | NEW |