| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ | 5 #ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ |
| 6 #define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ | 6 #define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ |
| 7 | 7 |
| 8 #include <string> | 8 #include <string> |
| 9 #include <vector> | 9 #include <vector> |
| 10 | 10 |
| (...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 179 namespace content { | 179 namespace content { |
| 180 | 180 |
| 181 class WebRtcAudioCapturer; | 181 class WebRtcAudioCapturer; |
| 182 class WebRtcAudioRenderer; | 182 class WebRtcAudioRenderer; |
| 183 | 183 |
| 184 // TODO(xians): Move the following two interfaces to webrtc so that | 184 // TODO(xians): Move the following two interfaces to webrtc so that |
| 185 // libjingle can own references to the renderer and capturer. | 185 // libjingle can own references to the renderer and capturer. |
| 186 class WebRtcAudioRendererSource { | 186 class WebRtcAudioRendererSource { |
| 187 public: | 187 public: |
| 188 // Callback to get the rendered interleaved data. | 188 // Callback to get the rendered interleaved data. |
| 189 // TODO(xians): Change uint8* to int16*. | 189 virtual void RenderData(media::AudioBus* audio_bus, |
| 190 virtual void RenderData(uint8* audio_data, | 190 int sample_rate, |
| 191 int number_of_channels, | |
| 192 int number_of_frames, | |
| 193 int audio_delay_milliseconds) = 0; | 191 int audio_delay_milliseconds) = 0; |
| 194 | 192 |
| 195 // Set the format for the capture audio parameters. | |
| 196 virtual void SetRenderFormat(const media::AudioParameters& params) = 0; | |
| 197 | |
| 198 // Callback to notify the client that the renderer is going away. | 193 // Callback to notify the client that the renderer is going away. |
| 199 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) = 0; | 194 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) = 0; |
| 200 | 195 |
| 201 protected: | 196 protected: |
| 202 virtual ~WebRtcAudioRendererSource() {} | 197 virtual ~WebRtcAudioRendererSource() {} |
| 203 }; | 198 }; |
| 204 | 199 |
| 205 class PeerConnectionAudioSink { | 200 class PeerConnectionAudioSink { |
| 206 public: | 201 public: |
| 207 // Callback to deliver the captured interleaved data. | 202 // Callback to deliver the captured interleaved data. |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 // Sets the |renderer_|, returns false if |renderer_| already exists. | 293 // Sets the |renderer_|, returns false if |renderer_| already exists. |
| 299 // Called on the main renderer thread. | 294 // Called on the main renderer thread. |
| 300 bool SetAudioRenderer(WebRtcAudioRenderer* renderer); | 295 bool SetAudioRenderer(WebRtcAudioRenderer* renderer); |
| 301 | 296 |
| 302 // Adds/Removes the capturer to the ADM. | 297 // Adds/Removes the capturer to the ADM. |
| 303 // TODO(xians): Remove these two methods once the ADM does not need to pass | 298 // TODO(xians): Remove these two methods once the ADM does not need to pass |
| 304 // hardware information up to WebRtc. | 299 // hardware information up to WebRtc. |
| 305 void AddAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer); | 300 void AddAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer); |
| 306 void RemoveAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer); | 301 void RemoveAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer); |
| 307 | 302 |
| 303 // Adds/Removes the observer of WebRtcAudioRendererSource to the ADM. |
| 304 // These methods are used by the MediaStreamAudioProcesssor to get the |
| 305 // render data for AEC. |
| 306 void AddRenderDataObserver(WebRtcAudioRendererSource* observer); |
| 307 void RemoveRenderDataObserver(WebRtcAudioRendererSource* observer); |
| 308 |
| 308 // Gets paired device information of the capture device for the audio | 309 // Gets paired device information of the capture device for the audio |
| 309 // renderer. This is used to pass on a session id, sample rate and buffer | 310 // renderer. This is used to pass on a session id, sample rate and buffer |
| 310 // size to a webrtc audio renderer (either local or remote), so that audio | 311 // size to a webrtc audio renderer (either local or remote), so that audio |
| 311 // will be rendered to a matching output device. | 312 // will be rendered to a matching output device. |
| 312 // Returns true if the capture device has a paired output device, otherwise | 313 // Returns true if the capture device has a paired output device, otherwise |
| 313 // false. Note that if there are more than one open capture device the | 314 // false. Note that if there are more than one open capture device the |
| 314 // function will not be able to pick an appropriate device and return false. | 315 // function will not be able to pick an appropriate device and return false. |
| 315 bool GetAuthorizedDeviceInfoForAudioRenderer( | 316 bool GetAuthorizedDeviceInfoForAudioRenderer( |
| 316 int* session_id, int* output_sample_rate, int* output_buffer_size); | 317 int* session_id, int* output_sample_rate, int* output_buffer_size); |
| 317 | 318 |
| 318 const scoped_refptr<WebRtcAudioRenderer>& renderer() const { | 319 const scoped_refptr<WebRtcAudioRenderer>& renderer() const { |
| 319 return renderer_; | 320 return renderer_; |
| 320 } | 321 } |
| 321 int output_buffer_size() const { | |
| 322 return output_audio_parameters_.frames_per_buffer(); | |
| 323 } | |
| 324 int output_channels() const { | |
| 325 return output_audio_parameters_.channels(); | |
| 326 } | |
| 327 int output_sample_rate() const { | |
| 328 return output_audio_parameters_.sample_rate(); | |
| 329 } | |
| 330 | 322 |
| 331 private: | 323 private: |
| 332 typedef std::list<scoped_refptr<WebRtcAudioCapturer> > CapturerList; | 324 typedef std::list<scoped_refptr<WebRtcAudioCapturer> > CapturerList; |
| 325 typedef std::list<WebRtcAudioRendererSource* > RenderDataObservers; |
| 326 class RenderBuffer; |
| 333 | 327 |
| 334 // Make destructor private to ensure that we can only be deleted by Release(). | 328 // Make destructor private to ensure that we can only be deleted by Release(). |
| 335 virtual ~WebRtcAudioDeviceImpl(); | 329 virtual ~WebRtcAudioDeviceImpl(); |
| 336 | 330 |
| 337 // PeerConnectionAudioSink implementation. | 331 // PeerConnectionAudioSink implementation. |
| 338 | 332 |
| 339 // Called on the AudioInputDevice worker thread. | 333 // Called on the AudioInputDevice worker thread. |
| 340 virtual int OnData(const int16* audio_data, | 334 virtual int OnData(const int16* audio_data, |
| 341 int sample_rate, | 335 int sample_rate, |
| 342 int number_of_channels, | 336 int number_of_channels, |
| 343 int number_of_frames, | 337 int number_of_frames, |
| 344 const std::vector<int>& channels, | 338 const std::vector<int>& channels, |
| 345 int audio_delay_milliseconds, | 339 int audio_delay_milliseconds, |
| 346 int current_volume, | 340 int current_volume, |
| 347 bool need_audio_processing, | 341 bool need_audio_processing, |
| 348 bool key_pressed) OVERRIDE; | 342 bool key_pressed) OVERRIDE; |
| 349 | 343 |
| 350 // Called on the AudioInputDevice worker thread. | 344 // Called on the AudioInputDevice worker thread. |
| 351 virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE; | 345 virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE; |
| 352 | 346 |
| 353 // WebRtcAudioRendererSource implementation. | 347 // WebRtcAudioRendererSource implementation. |
| 354 | 348 |
| 355 // Called on the AudioInputDevice worker thread. | 349 // Called on the AudioInputDevice worker thread. |
| 356 virtual void RenderData(uint8* audio_data, | 350 virtual void RenderData(media::AudioBus* audio_bus, |
| 357 int number_of_channels, | 351 int sample_rate, |
| 358 int number_of_frames, | |
| 359 int audio_delay_milliseconds) OVERRIDE; | 352 int audio_delay_milliseconds) OVERRIDE; |
| 360 | 353 |
| 361 // Called on the main render thread. | 354 // Called on the main render thread. |
| 362 virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE; | |
| 363 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE; | 355 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE; |
| 364 | 356 |
| 365 // Helper to get the default capturer, which is the last capturer in | 357 // Helper to get the default capturer, which is the last capturer in |
| 366 // |capturers_|. | 358 // |capturers_|. |
| 367 scoped_refptr<WebRtcAudioCapturer> GetDefaultCapturer() const; | 359 scoped_refptr<WebRtcAudioCapturer> GetDefaultCapturer() const; |
| 368 | 360 |
| 369 // Used to DCHECK that we are called on the correct thread. | 361 // Used to DCHECK that we are called on the correct thread. |
| 370 base::ThreadChecker thread_checker_; | 362 base::ThreadChecker thread_checker_; |
| 371 | 363 |
| 372 int ref_count_; | 364 int ref_count_; |
| 373 | 365 |
| 374 // List of captures which provides access to the native audio input layer | 366 // List of captures which provides access to the native audio input layer |
| 375 // in the browser process. | 367 // in the browser process. |
| 376 CapturerList capturers_; | 368 CapturerList capturers_; |
| 377 | 369 |
| 378 // Provides access to the audio renderer in the browser process. | 370 // Provides access to the audio renderer in the browser process. |
| 379 scoped_refptr<WebRtcAudioRenderer> renderer_; | 371 scoped_refptr<WebRtcAudioRenderer> renderer_; |
| 380 | 372 |
| 373 // List of observers which requires access to the render data. |
| 374 RenderDataObservers render_data_observers_; |
| 375 |
| 381 // Weak reference to the audio callback. | 376 // Weak reference to the audio callback. |
| 382 // The webrtc client defines |audio_transport_callback_| by calling | 377 // The webrtc client defines |audio_transport_callback_| by calling |
| 383 // RegisterAudioCallback(). | 378 // RegisterAudioCallback(). |
| 384 webrtc::AudioTransport* audio_transport_callback_; | 379 webrtc::AudioTransport* audio_transport_callback_; |
| 385 | 380 |
| 386 // Cached values of used output audio parameters. Platform dependent. | |
| 387 media::AudioParameters output_audio_parameters_; | |
| 388 | |
| 389 // Cached value of the current audio delay on the input/capture side. | 381 // Cached value of the current audio delay on the input/capture side. |
| 390 int input_delay_ms_; | 382 int input_delay_ms_; |
| 391 | 383 |
| 392 // Cached value of the current audio delay on the output/renderer side. | 384 // Cached value of the current audio delay on the output/renderer side. |
| 393 int output_delay_ms_; | 385 int output_delay_ms_; |
| 394 | 386 |
| 395 // Protects |recording_|, |output_delay_ms_|, |input_delay_ms_|, |renderer_| | 387 // Protects |recording_|, |output_delay_ms_|, |input_delay_ms_|, |renderer_| |
| 396 // |recording_| and |microphone_volume_|. | 388 // |recording_| and |microphone_volume_|. |
| 397 mutable base::Lock lock_; | 389 mutable base::Lock lock_; |
| 398 | 390 |
| 399 // Used to protect the racing of calling OnData() since there can be more | 391 // Used to protect the racing of calling OnData() since there can be more |
| 400 // than one input stream calling OnData(). | 392 // than one input stream calling OnData(). |
| 401 mutable base::Lock capture_callback_lock_; | 393 mutable base::Lock capture_callback_lock_; |
| 402 | 394 |
| 403 bool initialized_; | 395 bool initialized_; |
| 404 bool playing_; | 396 bool playing_; |
| 405 bool recording_; | 397 bool recording_; |
| 406 | 398 |
| 407 // Used for histograms of total recording and playout times. | 399 // Used for histograms of total recording and playout times. |
| 408 base::Time start_capture_time_; | 400 base::Time start_capture_time_; |
| 409 base::Time start_render_time_; | 401 base::Time start_render_time_; |
| 410 | 402 |
| 411 // Stores latest microphone volume received in a CaptureData() callback. | 403 // Stores latest microphone volume received in a CaptureData() callback. |
| 412 // Range is [0, 255]. | 404 // Range is [0, 255]. |
| 413 uint32_t microphone_volume_; | 405 uint32_t microphone_volume_; |
| 414 | 406 |
| 407 // Buffer used for temporary storage during render callback. |
| 408 // It is only accessed by the audio render thread. |
| 409 scoped_ptr<int16[]> render_buffer_; |
| 410 int render_buffer_size_; |
| 411 |
| 415 DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl); | 412 DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl); |
| 416 }; | 413 }; |
| 417 | 414 |
| 418 } // namespace content | 415 } // namespace content |
| 419 | 416 |
| 420 #endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ | 417 #endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ |
| OLD | NEW |