| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/audio_renderer_impl.h" | 5 #include "content/renderer/media/audio_renderer_impl.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 | 10 |
| 11 #include "base/bind.h" | 11 #include "base/bind.h" |
| 12 #include "base/command_line.h" | |
| 13 #include "content/common/child_process.h" | 12 #include "content/common/child_process.h" |
| 14 #include "content/common/media/audio_messages.h" | 13 #include "content/common/media/audio_messages.h" |
| 15 #include "content/public/common/content_switches.h" | |
| 16 #include "content/renderer/render_thread_impl.h" | 14 #include "content/renderer/render_thread_impl.h" |
| 17 #include "media/audio/audio_buffers_state.h" | 15 #include "media/audio/audio_buffers_state.h" |
| 18 #include "media/audio/audio_output_controller.h" | |
| 19 #include "media/audio/audio_util.h" | 16 #include "media/audio/audio_util.h" |
| 20 #include "media/base/filter_host.h" | |
| 21 | 17 |
| 22 // Static variable that says what code path we are using -- low or high | 18 // We define GetBufferSizeForSampleRate() instead of using |
| 23 // latency. Made separate variable so we don't have to go to command line | 19 // GetAudioHardwareBufferSize() in audio_util because we're using |
| 24 // for every DCHECK(). | 20 // the AUDIO_PCM_LINEAR flag, instead of AUDIO_PCM_LOW_LATENCY, |
| 25 AudioRendererImpl::LatencyType AudioRendererImpl::latency_type_ = | 21 // which the audio_util functions assume. |
| 26 AudioRendererImpl::kUninitializedLatency; | 22 // |
| 23 // See: http://code.google.com/p/chromium/issues/detail?id=103627 |
| 24 // for a more detailed description of the subtleties. |
| 25 static size_t GetBufferSizeForSampleRate(int sample_rate) { |
| 26 // kNominalBufferSize has been tested on Windows, Mac OS X, and Linux |
| 27 // using the low-latency audio codepath (SyncSocket implementation) |
| 28 // with the AUDIO_PCM_LINEAR flag. |
| 29 const size_t kNominalBufferSize = 2048; |
| 30 |
| 31 if (sample_rate <= 48000) |
| 32 return kNominalBufferSize; |
| 33 else if (sample_rate <= 96000) |
| 34 return kNominalBufferSize * 2; |
| 35 return kNominalBufferSize * 4; |
| 36 } |
| 27 | 37 |
| 28 AudioRendererImpl::AudioRendererImpl() | 38 AudioRendererImpl::AudioRendererImpl() |
| 29 : AudioRendererBase(), | 39 : AudioRendererBase(), |
| 30 bytes_per_second_(0), | 40 bytes_per_second_(0), |
| 31 stream_created_(false), | 41 stopped_(false) { |
| 32 stream_id_(0), | 42 // We create the AudioDevice here because it must be created in the |
| 33 shared_memory_(NULL), | 43 // main thread. But we don't yet know the audio format (sample-rate, etc.) |
| 34 shared_memory_size_(0), | 44 // at this point. Later, when OnInitialize() is called, we have |
| 35 stopped_(false), | 45 // the audio format information and call the AudioDevice::Initialize() |
| 36 pending_request_(false) { | 46 // method to fully initialize it. |
| 37 filter_ = RenderThreadImpl::current()->audio_message_filter(); | 47 audio_device_ = new AudioDevice(); |
| 38 // Figure out if we are planning to use high or low latency code path. | |
| 39 // We are initializing only one variable and double initialization is Ok, | |
| 40 // so there would not be any issues caused by CPU memory model. | |
| 41 if (latency_type_ == kUninitializedLatency) { | |
| 42 // Urgent workaround for | |
| 43 // http://code.google.com/p/chromium-os/issues/detail?id=21491 | |
| 44 // TODO(enal): Fix it properly. | |
| 45 #if defined(OS_CHROMEOS) | |
| 46 latency_type_ = kHighLatency; | |
| 47 #else | |
| 48 if (!CommandLine::ForCurrentProcess()->HasSwitch( | |
| 49 switches::kHighLatencyAudio)) { | |
| 50 latency_type_ = kLowLatency; | |
| 51 } else { | |
| 52 latency_type_ = kHighLatency; | |
| 53 } | |
| 54 #endif | |
| 55 } | |
| 56 } | 48 } |
| 57 | 49 |
| 58 AudioRendererImpl::~AudioRendererImpl() { | 50 AudioRendererImpl::~AudioRendererImpl() { |
| 59 } | 51 } |
| 60 | 52 |
| 61 // static | |
| 62 void AudioRendererImpl::set_latency_type(LatencyType latency_type) { | |
| 63 DCHECK_EQ(kUninitializedLatency, latency_type_); | |
| 64 latency_type_ = latency_type; | |
| 65 } | |
| 66 | |
| 67 base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) { | 53 base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) { |
| 68 if (bytes_per_second_) { | 54 if (bytes_per_second_) { |
| 69 return base::TimeDelta::FromMicroseconds( | 55 return base::TimeDelta::FromMicroseconds( |
| 70 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); | 56 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); |
| 71 } | 57 } |
| 72 return base::TimeDelta(); | 58 return base::TimeDelta(); |
| 73 } | 59 } |
| 74 | 60 |
| 75 void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled, | 61 void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled, |
| 76 base::TimeDelta request_delay, | 62 base::TimeDelta request_delay, |
| 77 base::Time time_now) { | 63 base::Time time_now) { |
| 78 if (bytes_filled != 0) { | 64 if (bytes_filled != 0) { |
| 79 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); | 65 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); |
| 80 float playback_rate = GetPlaybackRate(); | 66 float playback_rate = GetPlaybackRate(); |
| 81 if (playback_rate != 1.0f) { | 67 if (playback_rate != 1.0f) { |
| 82 predicted_play_time = base::TimeDelta::FromMicroseconds( | 68 predicted_play_time = base::TimeDelta::FromMicroseconds( |
| 83 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * | 69 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * |
| 84 playback_rate))); | 70 playback_rate))); |
| 85 } | 71 } |
| 86 earliest_end_time_ = | 72 earliest_end_time_ = |
| 87 std::max(earliest_end_time_, | 73 std::max(earliest_end_time_, |
| 88 time_now + request_delay + predicted_play_time); | 74 time_now + request_delay + predicted_play_time); |
| 89 } | 75 } |
| 90 } | 76 } |
| 91 | 77 |
| 92 bool AudioRendererImpl::OnInitialize(int bits_per_channel, | 78 bool AudioRendererImpl::OnInitialize(int bits_per_channel, |
| 93 ChannelLayout channel_layout, | 79 ChannelLayout channel_layout, |
| 94 int sample_rate) { | 80 int sample_rate) { |
| 95 AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, channel_layout, | 81 // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY |
| 96 sample_rate, bits_per_channel, 0); | 82 // does not currently support all the sample-rates that we require. |
| 83 // Please see: http://code.google.com/p/chromium/issues/detail?id=103627 |
| 84 // for more details. |
| 85 audio_parameters_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, |
| 86 channel_layout, |
| 87 sample_rate, |
| 88 bits_per_channel, |
| 89 0); |
| 97 | 90 |
| 98 bytes_per_second_ = params.GetBytesPerSecond(); | 91 bytes_per_second_ = audio_parameters_.GetBytesPerSecond(); |
| 99 | 92 |
| 100 ChildProcess::current()->io_message_loop()->PostTask( | 93 DCHECK(audio_device_.get()); |
| 101 FROM_HERE, | 94 |
| 102 base::Bind(&AudioRendererImpl::CreateStreamTask, this, params)); | 95 if (!audio_device_->IsInitialized()) { |
| 96 audio_device_->Initialize( |
| 97 GetBufferSizeForSampleRate(sample_rate), |
| 98 audio_parameters_.channels, |
| 99 audio_parameters_.sample_rate, |
| 100 audio_parameters_.format, |
| 101 this); |
| 102 |
| 103 audio_device_->Start(); |
| 104 } |
| 105 |
| 103 return true; | 106 return true; |
| 104 } | 107 } |
| 105 | 108 |
| 106 void AudioRendererImpl::OnStop() { | 109 void AudioRendererImpl::OnStop() { |
| 107 // Since joining with the audio thread can acquire lock_, we make sure to | |
| 108 // Join() with it not under lock. | |
| 109 base::DelegateSimpleThread* audio_thread = NULL; | |
| 110 { | |
| 111 base::AutoLock auto_lock(lock_); | |
| 112 if (stopped_) | |
| 113 return; | |
| 114 stopped_ = true; | |
| 115 | |
| 116 DCHECK_EQ(!audio_thread_.get(), !socket_.get()); | |
| 117 if (socket_.get()) | |
| 118 socket_->Close(); | |
| 119 if (audio_thread_.get()) | |
| 120 audio_thread = audio_thread_.get(); | |
| 121 | |
| 122 ChildProcess::current()->io_message_loop()->PostTask( | |
| 123 FROM_HERE, | |
| 124 base::Bind(&AudioRendererImpl::DestroyTask, this)); | |
| 125 } | |
| 126 | |
| 127 if (audio_thread) | |
| 128 audio_thread->Join(); | |
| 129 } | |
| 130 | |
| 131 void AudioRendererImpl::NotifyDataAvailableIfNecessary() { | |
| 132 if (latency_type_ == kHighLatency) { | |
| 133 // Post a task to render thread to notify a packet reception. | |
| 134 ChildProcess::current()->io_message_loop()->PostTask( | |
| 135 FROM_HERE, | |
| 136 base::Bind(&AudioRendererImpl::NotifyPacketReadyTask, this)); | |
| 137 } | |
| 138 } | |
| 139 | |
| 140 void AudioRendererImpl::ConsumeAudioSamples( | |
| 141 scoped_refptr<media::Buffer> buffer_in) { | |
| 142 base::AutoLock auto_lock(lock_); | |
| 143 if (stopped_) | 110 if (stopped_) |
| 144 return; | 111 return; |
| 145 | 112 |
| 146 // TODO(hclam): handle end of stream here. | 113 DCHECK(audio_device_.get()); |
| 114 audio_device_->Stop(); |
| 147 | 115 |
| 148 // Use the base class to queue the buffer. | 116 stopped_ = true; |
| 149 AudioRendererBase::ConsumeAudioSamples(buffer_in); | |
| 150 | |
| 151 NotifyDataAvailableIfNecessary(); | |
| 152 } | 117 } |
| 153 | 118 |
| 154 void AudioRendererImpl::SetPlaybackRate(float rate) { | 119 void AudioRendererImpl::SetPlaybackRate(float rate) { |
| 155 DCHECK_LE(0.0f, rate); | 120 DCHECK_LE(0.0f, rate); |
| 156 | 121 |
| 157 base::AutoLock auto_lock(lock_); | |
| 158 // Handle the case where we stopped due to IO message loop dying. | 122 // Handle the case where we stopped due to IO message loop dying. |
| 159 if (stopped_) { | 123 if (stopped_) { |
| 160 AudioRendererBase::SetPlaybackRate(rate); | 124 AudioRendererBase::SetPlaybackRate(rate); |
| 161 return; | 125 return; |
| 162 } | 126 } |
| 163 | 127 |
| 164 // We have two cases here: | 128 // We have two cases here: |
| 165 // Play: GetPlaybackRate() == 0.0 && rate != 0.0 | 129 // Play: GetPlaybackRate() == 0.0 && rate != 0.0 |
| 166 // Pause: GetPlaybackRate() != 0.0 && rate == 0.0 | 130 // Pause: GetPlaybackRate() != 0.0 && rate == 0.0 |
| 167 if (GetPlaybackRate() == 0.0f && rate != 0.0f) { | 131 if (GetPlaybackRate() == 0.0f && rate != 0.0f) { |
| 168 ChildProcess::current()->io_message_loop()->PostTask( | 132 DoPlay(); |
| 169 FROM_HERE, | |
| 170 base::Bind(&AudioRendererImpl::PlayTask, this)); | |
| 171 } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) { | 133 } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) { |
| 172 // Pause is easy, we can always pause. | 134 // Pause is easy, we can always pause. |
| 173 ChildProcess::current()->io_message_loop()->PostTask( | 135 DoPause(); |
| 174 FROM_HERE, | |
| 175 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
| 176 } | 136 } |
| 177 AudioRendererBase::SetPlaybackRate(rate); | 137 AudioRendererBase::SetPlaybackRate(rate); |
| 178 | |
| 179 // If we are playing, give a kick to try fulfilling the packet request as | |
| 180 // the previous packet request may be stalled by a pause. | |
| 181 if (rate > 0.0f) { | |
| 182 NotifyDataAvailableIfNecessary(); | |
| 183 } | |
| 184 } | 138 } |
| 185 | 139 |
| 186 void AudioRendererImpl::Pause(const base::Closure& callback) { | 140 void AudioRendererImpl::Pause(const base::Closure& callback) { |
| 187 AudioRendererBase::Pause(callback); | 141 AudioRendererBase::Pause(callback); |
| 188 base::AutoLock auto_lock(lock_); | |
| 189 if (stopped_) | 142 if (stopped_) |
| 190 return; | 143 return; |
| 191 | 144 |
| 192 ChildProcess::current()->io_message_loop()->PostTask( | 145 DoPause(); |
| 193 FROM_HERE, | |
| 194 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
| 195 } | 146 } |
| 196 | 147 |
| 197 void AudioRendererImpl::Seek(base::TimeDelta time, | 148 void AudioRendererImpl::Seek(base::TimeDelta time, |
| 198 const media::FilterStatusCB& cb) { | 149 const media::FilterStatusCB& cb) { |
| 199 AudioRendererBase::Seek(time, cb); | 150 AudioRendererBase::Seek(time, cb); |
| 200 base::AutoLock auto_lock(lock_); | |
| 201 if (stopped_) | 151 if (stopped_) |
| 202 return; | 152 return; |
| 203 | 153 |
| 204 ChildProcess::current()->io_message_loop()->PostTask( | 154 DoSeek(); |
| 205 FROM_HERE, | |
| 206 base::Bind(&AudioRendererImpl::SeekTask, this)); | |
| 207 } | 155 } |
| 208 | 156 |
| 209 | |
| 210 void AudioRendererImpl::Play(const base::Closure& callback) { | 157 void AudioRendererImpl::Play(const base::Closure& callback) { |
| 211 AudioRendererBase::Play(callback); | 158 AudioRendererBase::Play(callback); |
| 212 base::AutoLock auto_lock(lock_); | |
| 213 if (stopped_) | 159 if (stopped_) |
| 214 return; | 160 return; |
| 215 | 161 |
| 216 if (GetPlaybackRate() != 0.0f) { | 162 if (GetPlaybackRate() != 0.0f) { |
| 217 ChildProcess::current()->io_message_loop()->PostTask( | 163 DoPlay(); |
| 218 FROM_HERE, | |
| 219 base::Bind(&AudioRendererImpl::PlayTask, this)); | |
| 220 } else { | 164 } else { |
| 221 ChildProcess::current()->io_message_loop()->PostTask( | 165 DoPause(); |
| 222 FROM_HERE, | |
| 223 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
| 224 } | 166 } |
| 225 } | 167 } |
| 226 | 168 |
| 227 void AudioRendererImpl::SetVolume(float volume) { | 169 void AudioRendererImpl::SetVolume(float volume) { |
| 228 base::AutoLock auto_lock(lock_); | |
| 229 if (stopped_) | 170 if (stopped_) |
| 230 return; | 171 return; |
| 231 ChildProcess::current()->io_message_loop()->PostTask( | 172 DCHECK(audio_device_.get()); |
| 232 FROM_HERE, | 173 audio_device_->SetVolume(volume); |
| 233 base::Bind(&AudioRendererImpl::SetVolumeTask, this, volume)); | |
| 234 } | 174 } |
| 235 | 175 |
| 236 void AudioRendererImpl::OnCreated(base::SharedMemoryHandle handle, | 176 void AudioRendererImpl::DoPlay() { |
| 237 uint32 length) { | 177 earliest_end_time_ = base::Time::Now(); |
| 238 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 178 DCHECK(audio_device_.get()); |
| 239 DCHECK_EQ(kHighLatency, latency_type_); | 179 audio_device_->Play(); |
| 240 | |
| 241 base::AutoLock auto_lock(lock_); | |
| 242 if (stopped_) | |
| 243 return; | |
| 244 | |
| 245 shared_memory_.reset(new base::SharedMemory(handle, false)); | |
| 246 shared_memory_->Map(length); | |
| 247 shared_memory_size_ = length; | |
| 248 } | 180 } |
| 249 | 181 |
| 250 void AudioRendererImpl::CreateSocket(base::SyncSocket::Handle socket_handle) { | 182 void AudioRendererImpl::DoPause() { |
| 251 DCHECK_EQ(kLowLatency, latency_type_); | 183 DCHECK(audio_device_.get()); |
| 252 #if defined(OS_WIN) | 184 audio_device_->Pause(false); |
| 253 DCHECK(socket_handle); | |
| 254 #else | |
| 255 DCHECK_GE(socket_handle, 0); | |
| 256 #endif | |
| 257 socket_.reset(new base::SyncSocket(socket_handle)); | |
| 258 } | 185 } |
| 259 | 186 |
| 260 void AudioRendererImpl::CreateAudioThread() { | 187 void AudioRendererImpl::DoSeek() { |
| 261 DCHECK_EQ(kLowLatency, latency_type_); | 188 earliest_end_time_ = base::Time::Now(); |
| 262 audio_thread_.reset( | 189 |
| 263 new base::DelegateSimpleThread(this, "renderer_audio_thread")); | 190 // Pause and flush the stream when we seek to a new location. |
| 264 audio_thread_->Start(); | 191 DCHECK(audio_device_.get()); |
| 192 audio_device_->Pause(true); |
| 265 } | 193 } |
| 266 | 194 |
| 267 void AudioRendererImpl::OnLowLatencyCreated( | 195 void AudioRendererImpl::Render(const std::vector<float*>& audio_data, |
| 268 base::SharedMemoryHandle handle, | 196 size_t number_of_frames, |
| 269 base::SyncSocket::Handle socket_handle, | 197 size_t audio_delay_milliseconds) { |
| 270 uint32 length) { | 198 if (stopped_ || GetPlaybackRate() == 0.0f) { |
| 271 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 199 // Output silence if stopped. |
| 272 DCHECK_EQ(kLowLatency, latency_type_); | 200 for (size_t i = 0; i < audio_data.size(); ++i) |
| 273 #if defined(OS_WIN) | 201 memset(audio_data[i], 0, sizeof(float) * number_of_frames); |
| 274 DCHECK(handle); | |
| 275 #else | |
| 276 DCHECK_GE(handle.fd, 0); | |
| 277 #endif | |
| 278 DCHECK_NE(0u, length); | |
| 279 | |
| 280 base::AutoLock auto_lock(lock_); | |
| 281 if (stopped_) | |
| 282 return; | 202 return; |
| 283 | |
| 284 shared_memory_.reset(new base::SharedMemory(handle, false)); | |
| 285 shared_memory_->Map(media::TotalSharedMemorySizeInBytes(length)); | |
| 286 shared_memory_size_ = length; | |
| 287 | |
| 288 CreateSocket(socket_handle); | |
| 289 CreateAudioThread(); | |
| 290 } | |
| 291 | |
| 292 void AudioRendererImpl::OnRequestPacket(AudioBuffersState buffers_state) { | |
| 293 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 294 DCHECK_EQ(kHighLatency, latency_type_); | |
| 295 { | |
| 296 base::AutoLock auto_lock(lock_); | |
| 297 DCHECK(!pending_request_); | |
| 298 pending_request_ = true; | |
| 299 request_buffers_state_ = buffers_state; | |
| 300 } | 203 } |
| 301 | 204 |
| 302 // Try to fill in the fulfill the packet request. | 205 // Adjust the playback delay. |
| 303 NotifyPacketReadyTask(); | 206 base::Time current_time = base::Time::Now(); |
| 304 } | |
| 305 | 207 |
| 306 void AudioRendererImpl::OnStateChanged(AudioStreamState state) { | 208 base::TimeDelta request_delay = |
| 307 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 209 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
| 308 | 210 |
| 309 base::AutoLock auto_lock(lock_); | 211 // Finally we need to adjust the delay according to playback rate. |
| 310 if (stopped_) | 212 if (GetPlaybackRate() != 1.0f) { |
| 311 return; | 213 request_delay = base::TimeDelta::FromMicroseconds( |
| 214 static_cast<int64>(ceil(request_delay.InMicroseconds() * |
| 215 GetPlaybackRate()))); |
| 216 } |
| 312 | 217 |
| 313 switch (state) { | 218 uint32 bytes_per_frame = |
| 314 case kAudioStreamError: | 219 audio_parameters_.bits_per_sample * audio_parameters_.channels / 8; |
| 315 // We receive this error if we counter an hardware error on the browser | 220 |
| 316 // side. We can proceed with ignoring the audio stream. | 221 const size_t buf_size = number_of_frames * bytes_per_frame; |
| 317 // TODO(hclam): We need more handling of these kind of error. For example | 222 scoped_array<uint8> buf(new uint8[buf_size]); |
| 318 // re-try creating the audio output stream on the browser side or fail | 223 |
| 319 // nicely and report to demuxer that the whole audio stream is discarded. | 224 base::Time time_now = base::Time::Now(); |
| 320 host()->DisableAudioRenderer(); | 225 uint32 filled = FillBuffer(buf.get(), |
| 321 break; | 226 buf_size, |
| 322 // TODO(hclam): handle these events. | 227 request_delay, |
| 323 case kAudioStreamPlaying: | 228 time_now >= earliest_end_time_); |
| 324 case kAudioStreamPaused: | 229 DCHECK_LE(filled, buf_size); |
| 325 break; | 230 |
| 326 default: | 231 uint32 filled_frames = filled / bytes_per_frame; |
| 327 NOTREACHED(); | 232 |
| 328 break; | 233 // Deinterleave each audio channel. |
| 234 int channels = audio_data.size(); |
| 235 for (int channel_index = 0; channel_index < channels; ++channel_index) { |
| 236 media::DeinterleaveAudioChannel(buf.get(), |
| 237 audio_data[channel_index], |
| 238 channels, |
| 239 channel_index, |
| 240 bytes_per_frame / channels, |
| 241 filled_frames); |
| 242 |
| 243 // If FillBuffer() didn't give us enough data then zero out the remainder. |
| 244 if (filled_frames < number_of_frames) { |
| 245 int frames_to_zero = number_of_frames - filled_frames; |
| 246 memset(audio_data[channel_index], 0, sizeof(float) * frames_to_zero); |
| 247 } |
| 329 } | 248 } |
| 330 } | 249 } |
| 331 | |
| 332 void AudioRendererImpl::OnVolume(double volume) { | |
| 333 // TODO(hclam): decide whether we need to report the current volume to | |
| 334 // pipeline. | |
| 335 } | |
| 336 | |
| 337 void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) { | |
| 338 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 339 | |
| 340 base::AutoLock auto_lock(lock_); | |
| 341 if (stopped_) | |
| 342 return; | |
| 343 | |
| 344 stream_created_ = true; | |
| 345 | |
| 346 // Make sure we don't call create more than once. | |
| 347 DCHECK_EQ(0, stream_id_); | |
| 348 stream_id_ = filter_->AddDelegate(this); | |
| 349 ChildProcess::current()->io_message_loop()->AddDestructionObserver(this); | |
| 350 | |
| 351 AudioParameters params_to_send(audio_params); | |
| 352 // Let the browser choose packet size. | |
| 353 params_to_send.samples_per_packet = 0; | |
| 354 | |
| 355 Send(new AudioHostMsg_CreateStream(stream_id_, | |
| 356 params_to_send, | |
| 357 latency_type_ == kLowLatency)); | |
| 358 } | |
| 359 | |
| 360 void AudioRendererImpl::PlayTask() { | |
| 361 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 362 | |
| 363 earliest_end_time_ = base::Time::Now(); | |
| 364 Send(new AudioHostMsg_PlayStream(stream_id_)); | |
| 365 } | |
| 366 | |
| 367 void AudioRendererImpl::PauseTask() { | |
| 368 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 369 | |
| 370 Send(new AudioHostMsg_PauseStream(stream_id_)); | |
| 371 } | |
| 372 | |
| 373 void AudioRendererImpl::SeekTask() { | |
| 374 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 375 | |
| 376 earliest_end_time_ = base::Time::Now(); | |
| 377 // We have to pause the audio stream before we can flush. | |
| 378 Send(new AudioHostMsg_PauseStream(stream_id_)); | |
| 379 Send(new AudioHostMsg_FlushStream(stream_id_)); | |
| 380 } | |
| 381 | |
| 382 void AudioRendererImpl::DestroyTask() { | |
| 383 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 384 | |
| 385 base::AutoLock auto_lock(lock_); | |
| 386 // Errors can cause us to get here before CreateStreamTask ever ran, in which | |
| 387 // case there's nothing to do. | |
| 388 if (!stream_created_) | |
| 389 return; | |
| 390 | |
| 391 // Make sure we don't call destroy more than once. | |
| 392 DCHECK_NE(0, stream_id_); | |
| 393 filter_->RemoveDelegate(stream_id_); | |
| 394 Send(new AudioHostMsg_CloseStream(stream_id_)); | |
| 395 // During shutdown this may be NULL; don't worry about deregistering in that | |
| 396 // case. | |
| 397 if (ChildProcess::current()) | |
| 398 ChildProcess::current()->io_message_loop()->RemoveDestructionObserver(this); | |
| 399 stream_id_ = 0; | |
| 400 } | |
| 401 | |
| 402 void AudioRendererImpl::SetVolumeTask(double volume) { | |
| 403 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 404 | |
| 405 base::AutoLock auto_lock(lock_); | |
| 406 if (stopped_) | |
| 407 return; | |
| 408 Send(new AudioHostMsg_SetVolume(stream_id_, volume)); | |
| 409 } | |
| 410 | |
| 411 void AudioRendererImpl::NotifyPacketReadyTask() { | |
| 412 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
| 413 DCHECK_EQ(kHighLatency, latency_type_); | |
| 414 | |
| 415 base::AutoLock auto_lock(lock_); | |
| 416 if (stopped_) | |
| 417 return; | |
| 418 if (pending_request_ && GetPlaybackRate() > 0.0f) { | |
| 419 DCHECK(shared_memory_.get()); | |
| 420 | |
| 421 // Adjust the playback delay. | |
| 422 base::Time current_time = base::Time::Now(); | |
| 423 | |
| 424 base::TimeDelta request_delay = | |
| 425 ConvertToDuration(request_buffers_state_.total_bytes()); | |
| 426 | |
| 427 // Add message delivery delay. | |
| 428 if (current_time > request_buffers_state_.timestamp) { | |
| 429 base::TimeDelta receive_latency = | |
| 430 current_time - request_buffers_state_.timestamp; | |
| 431 | |
| 432 // If the receive latency is too much it may offset all the delay. | |
| 433 if (receive_latency >= request_delay) { | |
| 434 request_delay = base::TimeDelta(); | |
| 435 } else { | |
| 436 request_delay -= receive_latency; | |
| 437 } | |
| 438 } | |
| 439 | |
| 440 // Finally we need to adjust the delay according to playback rate. | |
| 441 if (GetPlaybackRate() != 1.0f) { | |
| 442 request_delay = base::TimeDelta::FromMicroseconds( | |
| 443 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
| 444 GetPlaybackRate()))); | |
| 445 } | |
| 446 | |
| 447 bool buffer_empty = (request_buffers_state_.pending_bytes == 0) && | |
| 448 (current_time >= earliest_end_time_); | |
| 449 | |
| 450 // For high latency mode we don't write length into shared memory, | |
| 451 // it is explicit part of AudioHostMsg_NotifyPacketReady() message, | |
| 452 // so no need to reserve first word of buffer for length. | |
| 453 uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), | |
| 454 shared_memory_size_, request_delay, | |
| 455 buffer_empty); | |
| 456 UpdateEarliestEndTime(filled, request_delay, current_time); | |
| 457 pending_request_ = false; | |
| 458 | |
| 459 // Then tell browser process we are done filling into the buffer. | |
| 460 Send(new AudioHostMsg_NotifyPacketReady(stream_id_, filled)); | |
| 461 } | |
| 462 } | |
| 463 | |
| 464 void AudioRendererImpl::WillDestroyCurrentMessageLoop() { | |
| 465 DCHECK(!ChildProcess::current() || // During shutdown. | |
| 466 (MessageLoop::current() == | |
| 467 ChildProcess::current()->io_message_loop())); | |
| 468 | |
| 469 // We treat the IO loop going away the same as stopping. | |
| 470 { | |
| 471 base::AutoLock auto_lock(lock_); | |
| 472 if (stopped_) | |
| 473 return; | |
| 474 | |
| 475 stopped_ = true; | |
| 476 } | |
| 477 DestroyTask(); | |
| 478 } | |
| 479 | |
| 480 // Our audio thread runs here. We receive requests for more data and send it | |
| 481 // on this thread. | |
| 482 void AudioRendererImpl::Run() { | |
| 483 DCHECK_EQ(kLowLatency, latency_type_); | |
| 484 audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); | |
| 485 | |
| 486 int bytes; | |
| 487 while (sizeof(bytes) == socket_->Receive(&bytes, sizeof(bytes))) { | |
| 488 if (bytes == media::AudioOutputController::kPauseMark) { | |
| 489 // When restarting playback, host should get new data, | |
| 490 // not what is currently in the buffer. | |
| 491 media::SetActualDataSizeInBytes(shared_memory_.get(), | |
| 492 shared_memory_size_, | |
| 493 0); | |
| 494 continue; | |
| 495 } | |
| 496 else if (bytes < 0) | |
| 497 break; | |
| 498 base::AutoLock auto_lock(lock_); | |
| 499 if (stopped_) | |
| 500 break; | |
| 501 float playback_rate = GetPlaybackRate(); | |
| 502 if (playback_rate <= 0.0f) | |
| 503 continue; | |
| 504 DCHECK(shared_memory_.get()); | |
| 505 base::TimeDelta request_delay = ConvertToDuration(bytes); | |
| 506 | |
| 507 // We need to adjust the delay according to playback rate. | |
| 508 if (playback_rate != 1.0f) { | |
| 509 request_delay = base::TimeDelta::FromMicroseconds( | |
| 510 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
| 511 playback_rate))); | |
| 512 } | |
| 513 base::Time time_now = base::Time::Now(); | |
| 514 uint32 size = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), | |
| 515 shared_memory_size_, | |
| 516 request_delay, | |
| 517 time_now >= earliest_end_time_); | |
| 518 media::SetActualDataSizeInBytes(shared_memory_.get(), | |
| 519 shared_memory_size_, | |
| 520 size); | |
| 521 UpdateEarliestEndTime(size, request_delay, time_now); | |
| 522 } | |
| 523 } | |
| 524 | |
| 525 void AudioRendererImpl::Send(IPC::Message* message) { | |
| 526 filter_->Send(message); | |
| 527 } | |
| OLD | NEW |