OLD | NEW |
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
| 5 #include "media/filters/audio_renderer_base.h" |
| 6 |
5 #include <algorithm> | 7 #include <algorithm> |
6 | 8 |
7 #include "media/base/filter_host.h" | 9 #include "media/base/filter_host.h" |
8 #include "media/filters/audio_renderer_base.h" | 10 #include "media/filters/audio_renderer_algorithm_ola.h" |
9 | 11 |
10 namespace media { | 12 namespace media { |
11 | 13 |
12 // The maximum size of the queue, which also acts as the number of initial reads | 14 AudioRendererBase::AudioRendererBase() |
13 // to perform for buffering. The size of the queue should never exceed this | 15 : state_(kUninitialized), |
14 // number since we read only after we've dequeued and released a buffer in | |
15 // callback thread. | |
16 // | |
17 // This is sort of a magic number, but for 44.1kHz stereo audio this will give | |
18 // us enough data to fill approximately 4 complete callback buffers. | |
19 const size_t AudioRendererBase::kDefaultMaxQueueSize = 16; | |
20 | |
21 AudioRendererBase::AudioRendererBase(size_t max_queue_size) | |
22 : max_queue_size_(max_queue_size), | |
23 data_offset_(0), | |
24 state_(kUninitialized), | |
25 pending_reads_(0) { | 16 pending_reads_(0) { |
26 } | 17 } |
27 | 18 |
28 AudioRendererBase::~AudioRendererBase() { | 19 AudioRendererBase::~AudioRendererBase() { |
29 // Stop() should have been called and OnReadComplete() should have stopped | 20 // Stop() should have been called and |algorithm_| should have been destroyed. |
30 // enqueuing data. | |
31 DCHECK(state_ == kUninitialized || state_ == kStopped); | 21 DCHECK(state_ == kUninitialized || state_ == kStopped); |
32 DCHECK(queue_.empty()); | 22 DCHECK(!algorithm_.get()); |
33 } | 23 } |
34 | 24 |
35 void AudioRendererBase::Play(FilterCallback* callback) { | 25 void AudioRendererBase::Play(FilterCallback* callback) { |
36 AutoLock auto_lock(lock_); | 26 AutoLock auto_lock(lock_); |
37 DCHECK_EQ(kPaused, state_); | 27 DCHECK_EQ(kPaused, state_); |
38 scoped_ptr<FilterCallback> c(callback); | 28 scoped_ptr<FilterCallback> c(callback); |
39 state_ = kPlaying; | 29 state_ = kPlaying; |
40 callback->Run(); | 30 callback->Run(); |
41 } | 31 } |
42 | 32 |
43 void AudioRendererBase::Pause(FilterCallback* callback) { | 33 void AudioRendererBase::Pause(FilterCallback* callback) { |
44 AutoLock auto_lock(lock_); | 34 AutoLock auto_lock(lock_); |
45 DCHECK_EQ(kPlaying, state_); | 35 DCHECK_EQ(kPlaying, state_); |
46 pause_callback_.reset(callback); | 36 pause_callback_.reset(callback); |
47 state_ = kPaused; | 37 state_ = kPaused; |
48 | 38 |
49 // We'll only pause when we've finished all pending reads. | 39 // We'll only pause when we've finished all pending reads. |
50 if (pending_reads_ == 0) { | 40 if (pending_reads_ == 0) { |
51 pause_callback_->Run(); | 41 pause_callback_->Run(); |
52 pause_callback_.reset(); | 42 pause_callback_.reset(); |
53 } else { | 43 } else { |
54 state_ = kPaused; | 44 state_ = kPaused; |
55 } | 45 } |
56 } | 46 } |
57 | 47 |
58 void AudioRendererBase::Stop() { | 48 void AudioRendererBase::Stop() { |
59 OnStop(); | 49 OnStop(); |
60 | |
61 AutoLock auto_lock(lock_); | 50 AutoLock auto_lock(lock_); |
62 state_ = kStopped; | 51 state_ = kStopped; |
63 queue_.clear(); | 52 algorithm_.reset(NULL); |
64 } | 53 } |
65 | 54 |
66 void AudioRendererBase::Seek(base::TimeDelta time, FilterCallback* callback) { | 55 void AudioRendererBase::Seek(base::TimeDelta time, FilterCallback* callback) { |
67 AutoLock auto_lock(lock_); | 56 AutoLock auto_lock(lock_); |
68 DCHECK_EQ(kPaused, state_); | 57 DCHECK_EQ(kPaused, state_); |
69 DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; | 58 DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; |
70 state_ = kSeeking; | 59 state_ = kSeeking; |
71 seek_callback_.reset(callback); | 60 seek_callback_.reset(callback); |
72 | 61 |
73 // Throw away everything and schedule our reads. | 62 // Throw away everything and schedule our reads. |
74 last_fill_buffer_time_ = base::TimeDelta(); | 63 last_fill_buffer_time_ = base::TimeDelta(); |
75 queue_.clear(); | 64 |
76 data_offset_ = 0; | 65 // |algorithm_| will request more reads. |
77 for (size_t i = 0; i < max_queue_size_; ++i) { | 66 algorithm_->FlushBuffers(); |
78 ScheduleRead_Locked(); | |
79 } | |
80 } | 67 } |
81 | 68 |
82 void AudioRendererBase::Initialize(AudioDecoder* decoder, | 69 void AudioRendererBase::Initialize(AudioDecoder* decoder, |
83 FilterCallback* callback) { | 70 FilterCallback* callback) { |
84 DCHECK(decoder); | 71 DCHECK(decoder); |
85 DCHECK(callback); | 72 DCHECK(callback); |
86 DCHECK_EQ(kUninitialized, state_); | 73 DCHECK_EQ(kUninitialized, state_); |
87 scoped_ptr<FilterCallback> c(callback); | 74 scoped_ptr<FilterCallback> c(callback); |
88 decoder_ = decoder; | 75 decoder_ = decoder; |
89 | 76 |
90 // Defer initialization until all scheduled reads have completed. | 77 // Defer initialization until all scheduled reads have completed. |
91 if (!OnInitialize(decoder_->media_format())) { | 78 if (!OnInitialize(decoder_->media_format())) { |
92 host()->SetError(PIPELINE_ERROR_INITIALIZATION_FAILED); | 79 host()->SetError(PIPELINE_ERROR_INITIALIZATION_FAILED); |
93 callback->Run(); | 80 callback->Run(); |
94 return; | 81 return; |
95 } | 82 } |
96 | 83 |
| 84 // Get the media properties to initialize our algorithms. |
| 85 int channels = 0; |
| 86 int sample_rate = 0; |
| 87 int sample_bits = 0; |
| 88 bool ret = ParseMediaFormat(decoder_->media_format(), |
| 89 &channels, |
| 90 &sample_rate, |
| 91 &sample_bits); |
| 92 |
| 93 // We should have successfully parsed the media format, or we would not have |
| 94 // been created. |
| 95 DCHECK(ret); |
| 96 |
| 97 // Create a callback so our algorithm can request more reads. |
| 98 AudioRendererAlgorithmBase::RequestReadCallback* cb = |
| 99 NewCallback(this, &AudioRendererBase::ScheduleRead_Locked); |
| 100 |
| 101 // Construct the algorithm. |
| 102 algorithm_.reset(new AudioRendererAlgorithmOLA()); |
| 103 |
| 104 // Initialize our algorithm with media properties, initial playback rate |
| 105 // (may be 0), and a callback to request more reads from the data source. |
| 106 algorithm_->Initialize(channels, |
| 107 sample_rate, |
| 108 sample_bits, |
| 109 GetPlaybackRate(), |
| 110 cb); |
| 111 |
97 // Finally, execute the start callback. | 112 // Finally, execute the start callback. |
98 state_ = kPaused; | 113 state_ = kPaused; |
99 callback->Run(); | 114 callback->Run(); |
100 } | 115 } |
101 | 116 |
102 void AudioRendererBase::OnReadComplete(Buffer* buffer_in) { | 117 void AudioRendererBase::OnReadComplete(Buffer* buffer_in) { |
103 AutoLock auto_lock(lock_); | 118 AutoLock auto_lock(lock_); |
104 DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying); | 119 DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying); |
105 DCHECK_GT(pending_reads_, 0u); | 120 DCHECK_GT(pending_reads_, 0u); |
106 --pending_reads_; | 121 --pending_reads_; |
107 | 122 |
108 // If we have stopped don't enqueue, same for end of stream buffer since | 123 // Note: Calling this may schedule more reads. |
109 // it has no data. | 124 algorithm_->EnqueueBuffer(buffer_in); |
110 if (!buffer_in->IsEndOfStream()) { | |
111 queue_.push_back(buffer_in); | |
112 DCHECK(queue_.size() <= max_queue_size_); | |
113 } | |
114 | 125 |
115 // Check for our preroll complete condition. | 126 // Check for our preroll complete condition. |
116 if (state_ == kSeeking) { | 127 if (state_ == kSeeking) { |
117 DCHECK(seek_callback_.get()); | 128 DCHECK(seek_callback_.get()); |
118 if (queue_.size() == max_queue_size_ || buffer_in->IsEndOfStream()) { | 129 if (algorithm_->IsQueueFull() || buffer_in->IsEndOfStream()) { |
119 // Transition into paused whether we have data in |queue_| or not. | 130 // Transition into paused whether we have data in |algorithm_| or not. |
120 // FillBuffer() will play silence if there's nothing to fill. | 131 // FillBuffer() will play silence if there's nothing to fill. |
121 state_ = kPaused; | 132 state_ = kPaused; |
122 seek_callback_->Run(); | 133 seek_callback_->Run(); |
123 seek_callback_.reset(); | 134 seek_callback_.reset(); |
124 } | 135 } |
125 } else if (state_ == kPaused && pending_reads_ == 0) { | 136 } else if (state_ == kPaused && pending_reads_ == 0) { |
126 // No more pending reads! We're now officially "paused". | 137 // No more pending reads! We're now officially "paused". |
127 if (pause_callback_.get()) { | 138 if (pause_callback_.get()) { |
128 pause_callback_->Run(); | 139 pause_callback_->Run(); |
129 pause_callback_.reset(); | 140 pause_callback_.reset(); |
130 } | 141 } |
131 } | 142 } |
132 } | 143 } |
133 | 144 |
134 // TODO(scherkus): clean up FillBuffer().. it's overly complex!! | |
135 size_t AudioRendererBase::FillBuffer(uint8* dest, | 145 size_t AudioRendererBase::FillBuffer(uint8* dest, |
136 size_t dest_len, | 146 size_t dest_len, |
137 float rate, | |
138 const base::TimeDelta& playback_delay) { | 147 const base::TimeDelta& playback_delay) { |
139 size_t dest_written = 0; | |
140 | |
141 // The timestamp of the last buffer written during the last call to | 148 // The timestamp of the last buffer written during the last call to |
142 // FillBuffer(). | 149 // FillBuffer(). |
143 base::TimeDelta last_fill_buffer_time; | 150 base::TimeDelta last_fill_buffer_time; |
| 151 size_t dest_written = 0; |
144 { | 152 { |
145 AutoLock auto_lock(lock_); | 153 AutoLock auto_lock(lock_); |
146 | 154 |
147 // Mute audio by returning 0 when not playing. | 155 // Mute audio by returning 0 when not playing. |
148 if (state_ != kPlaying) { | 156 if (state_ != kPlaying) { |
149 // TODO(scherkus): To keep the audio hardware busy we write at most 8k of | 157 // TODO(scherkus): To keep the audio hardware busy we write at most 8k of |
150 // zeros. This gets around the tricky situation of pausing and resuming | 158 // zeros. This gets around the tricky situation of pausing and resuming |
151 // the audio IPC layer in Chrome. Ideally, we should return zero and then | 159 // the audio IPC layer in Chrome. Ideally, we should return zero and then |
152 // the subclass can restart the conversation. | 160 // the subclass can restart the conversation. |
153 const size_t kZeroLength = 8192; | 161 const size_t kZeroLength = 8192; |
154 dest_written = std::min(kZeroLength, dest_len); | 162 dest_written = std::min(kZeroLength, dest_len); |
155 memset(dest, 0, dest_written); | 163 memset(dest, 0, dest_written); |
156 return dest_written; | 164 return dest_written; |
157 } | 165 } |
158 | 166 |
159 // Save a local copy of last fill buffer time and reset the member. | 167 // Save a local copy of last fill buffer time and reset the member. |
160 last_fill_buffer_time = last_fill_buffer_time_; | 168 last_fill_buffer_time = last_fill_buffer_time_; |
161 last_fill_buffer_time_ = base::TimeDelta(); | 169 last_fill_buffer_time_ = base::TimeDelta(); |
162 | 170 |
163 // Loop until the buffer has been filled. | 171 // Do the fill. |
164 while (dest_len > 0 && !queue_.empty()) { | 172 dest_written = algorithm_->FillBuffer(dest, dest_len); |
165 scoped_refptr<Buffer> buffer = queue_.front(); | |
166 | 173 |
167 // Determine how much to copy. | 174 // Get the current time. |
168 DCHECK_LE(data_offset_, buffer->GetDataSize()); | 175 last_fill_buffer_time_ = algorithm_->GetTime(); |
169 const uint8* data = buffer->GetData() + data_offset_; | |
170 size_t data_len = buffer->GetDataSize() - data_offset_; | |
171 | |
172 // New scaled packet size aligned to 16 to ensure it's on a | |
173 // channel/sample boundary. Only guaranteed to work for power of 2 | |
174 // number of channels and sample size. | |
175 size_t scaled_data_len = (rate <= 0.0f) ? 0 : | |
176 static_cast<size_t>(data_len / rate) & ~15; | |
177 if (scaled_data_len > dest_len) { | |
178 data_len = (data_len * dest_len / scaled_data_len) & ~15; | |
179 scaled_data_len = dest_len; | |
180 } | |
181 | |
182 // Handle playback rate in three different cases: | |
183 // 1. If rate >= 1.0 | |
184 // Speed up the playback, we copy partial amount of decoded samples | |
185 // into target buffer. | |
186 // 2. If 0.5 <= rate < 1.0 | |
187 // Slow down the playback, duplicate the decoded samples to fill a | |
188 // larger size of target buffer. | |
189 // 3. If rate < 0.5 | |
190 // Playback is too slow, simply mute the audio. | |
191 // TODO(hclam): the logic for handling playback rate is too complex and | |
192 // is not careful enough. I should do some bounds checking and even better | |
193 // replace this with a better/clearer implementation. | |
194 if (rate >= 1.0f) { | |
195 memcpy(dest, data, scaled_data_len); | |
196 } else if (rate >= 0.5) { | |
197 memcpy(dest, data, data_len); | |
198 memcpy(dest + data_len, data, scaled_data_len - data_len); | |
199 } else { | |
200 memset(dest, 0, data_len); | |
201 } | |
202 dest += scaled_data_len; | |
203 dest_len -= scaled_data_len; | |
204 dest_written += scaled_data_len; | |
205 | |
206 data_offset_ += data_len; | |
207 | |
208 if (rate == 0.0f) { | |
209 dest_written = 0; | |
210 break; | |
211 } | |
212 | |
213 // Check to see if we're finished with the front buffer. | |
214 if (buffer->GetDataSize() - data_offset_ < 16) { | |
215 // Update the time. If this is the last buffer in the queue, we'll | |
216 // drop out of the loop before len == 0, so we need to always update | |
217 // the time here. | |
218 if (buffer->GetTimestamp().InMicroseconds() > 0) { | |
219 last_fill_buffer_time_ = buffer->GetTimestamp() + | |
220 buffer->GetDuration(); | |
221 } | |
222 | |
223 // Dequeue the buffer and request another. | |
224 queue_.pop_front(); | |
225 ScheduleRead_Locked(); | |
226 | |
227 // Reset our offset into the front buffer. | |
228 data_offset_ = 0; | |
229 } else { | |
230 // If we're done with the read, compute the time. | |
231 // Integer divide so multiply before divide to work properly. | |
232 int64 us_written = (buffer->GetDuration().InMicroseconds() * | |
233 data_offset_) / buffer->GetDataSize(); | |
234 | |
235 if (buffer->GetTimestamp().InMicroseconds() > 0) { | |
236 last_fill_buffer_time_ = | |
237 buffer->GetTimestamp() + | |
238 base::TimeDelta::FromMicroseconds(us_written); | |
239 } | |
240 } | |
241 } | |
242 } | 176 } |
243 | 177 |
244 // Update the pipeline's time if it was set last time. | 178 // Update the pipeline's time if it was set last time. |
245 if (last_fill_buffer_time.InMicroseconds() > 0) { | 179 if (last_fill_buffer_time.InMicroseconds() > 0 && |
| 180 last_fill_buffer_time != last_fill_buffer_time_) { |
246 // Adjust the |last_fill_buffer_time| with the playback delay. | 181 // Adjust the |last_fill_buffer_time| with the playback delay. |
247 // TODO(hclam): If there is a playback delay, the pipeline would not be | 182 // TODO(hclam): If there is a playback delay, the pipeline would not be |
248 // updated with a correct timestamp when the stream is played at the very | 183 // updated with a correct timestamp when the stream is played at the very |
249 // end since we use decoded packets to trigger time updates. A better | 184 // end since we use decoded packets to trigger time updates. A better |
250 // solution is to start a timer when an audio packet is decoded to allow | 185 // solution is to start a timer when an audio packet is decoded to allow |
251 // finer time update events. | 186 // finer time update events. |
252 if (playback_delay < last_fill_buffer_time) | 187 if (playback_delay < last_fill_buffer_time) |
253 last_fill_buffer_time -= playback_delay; | 188 last_fill_buffer_time -= playback_delay; |
254 host()->SetTime(last_fill_buffer_time); | 189 host()->SetTime(last_fill_buffer_time); |
255 } | 190 } |
256 | 191 |
257 return dest_written; | 192 return dest_written; |
258 } | 193 } |
259 | 194 |
260 void AudioRendererBase::ScheduleRead_Locked() { | 195 void AudioRendererBase::ScheduleRead_Locked() { |
261 lock_.AssertAcquired(); | 196 lock_.AssertAcquired(); |
262 DCHECK_LT(pending_reads_, max_queue_size_); | |
263 ++pending_reads_; | 197 ++pending_reads_; |
264 decoder_->Read(NewCallback(this, &AudioRendererBase::OnReadComplete)); | 198 decoder_->Read(NewCallback(this, &AudioRendererBase::OnReadComplete)); |
265 } | 199 } |
266 | 200 |
267 // static | 201 // static |
268 bool AudioRendererBase::ParseMediaFormat(const MediaFormat& media_format, | 202 bool AudioRendererBase::ParseMediaFormat(const MediaFormat& media_format, |
269 int* channels_out, | 203 int* channels_out, |
270 int* sample_rate_out, | 204 int* sample_rate_out, |
271 int* sample_bits_out) { | 205 int* sample_bits_out) { |
272 // TODO(scherkus): might be handy to support NULL parameters. | 206 // TODO(scherkus): might be handy to support NULL parameters. |
273 std::string mime_type; | 207 std::string mime_type; |
274 return media_format.GetAsString(MediaFormat::kMimeType, &mime_type) && | 208 return media_format.GetAsString(MediaFormat::kMimeType, &mime_type) && |
275 media_format.GetAsInteger(MediaFormat::kChannels, channels_out) && | 209 media_format.GetAsInteger(MediaFormat::kChannels, channels_out) && |
276 media_format.GetAsInteger(MediaFormat::kSampleRate, sample_rate_out) && | 210 media_format.GetAsInteger(MediaFormat::kSampleRate, sample_rate_out) && |
277 media_format.GetAsInteger(MediaFormat::kSampleBits, sample_bits_out) && | 211 media_format.GetAsInteger(MediaFormat::kSampleBits, sample_bits_out) && |
278 mime_type.compare(mime_type::kUncompressedAudio) == 0; | 212 mime_type.compare(mime_type::kUncompressedAudio) == 0; |
279 } | 213 } |
280 | 214 |
| 215 void AudioRendererBase::SetPlaybackRate(float playback_rate) { |
| 216 algorithm_->set_playback_rate(playback_rate); |
| 217 } |
| 218 |
| 219 float AudioRendererBase::GetPlaybackRate() { |
| 220 return algorithm_->playback_rate(); |
| 221 } |
| 222 |
281 } // namespace media | 223 } // namespace media |
OLD | NEW |