Index: content/renderer/media/webrtc/processed_local_audio_source.h |
diff --git a/content/renderer/media/webrtc/processed_local_audio_source.h b/content/renderer/media/webrtc/processed_local_audio_source.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..ea2875ae12149f18f0c09293adc349d9b1a02bb9 |
--- /dev/null |
+++ b/content/renderer/media/webrtc/processed_local_audio_source.h |
@@ -0,0 +1,128 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_ |
+#define CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_ |
+ |
+#include "base/memory/ref_counted.h" |
+#include "base/synchronization/lock.h" |
+#include "content/common/media/media_stream_options.h" |
+#include "content/renderer/media/media_stream_audio_level_calculator.h" |
+#include "content/renderer/media/media_stream_audio_source.h" |
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h" |
+#include "media/base/audio_capturer_source.h" |
+#include "third_party/webrtc/api/mediastreaminterface.h" |
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h" |
+ |
+namespace media { |
+class AudioInputDevice; |
+} // namespace media |
+ |
+namespace content { |
+ |
+class MediaStreamAudioProcessor; |
+ |
+// Represents a local source of audio data that is routed through the WebRTC |
+// audio pipeline for post-processing. Owns a media::AudioInputDevice and the |
+// MediaStreamProcessor that modifies its audio. Modified audio is delivered to |
+// WebRtcLocalAudioTracks. |
+class CONTENT_EXPORT ProcessedLocalAudioSource |
+ : NON_EXPORTED_BASE(public MediaStreamAudioSource), |
+ NON_EXPORTED_BASE(public media::AudioCapturerSource::CaptureCallback) { |
+ public: |
+ // |consumer_render_frame_id| references the RenderFrame that will consume the |
+ // audio data. Audio parameters and (optionally) a pre-existing audio session |
+ // ID are read from |device_info|. |factory| must outlive this instance. |
+ ProcessedLocalAudioSource(int consumer_render_frame_id, |
+ const StreamDeviceInfo& device_info, |
+ PeerConnectionDependencyFactory* factory); |
+ |
+ ~ProcessedLocalAudioSource() final; |
+ |
+ // If |source| is an instance of ProcessedLocalAudioSource, return a |
+ // type-casted pointer to it. Otherwise, return null. |
+ static ProcessedLocalAudioSource* From(MediaStreamAudioSource* source); |
+ |
+ // Non-browser unit tests cannot provide RenderFrame implementations at |
+ // run-time. This is used to skip the otherwise mandatory check for a valid |
+ // render frame ID when the source is started. |
+ void SetAllowInvalidRenderFrameIdForTesting(bool allowed) { |
+ allow_invalid_render_frame_id_for_testing_ = allowed; |
+ } |
+ |
+ // Gets/Sets source constraints. Using this is optional, but must be done |
+ // before the first call to ConnectToTrack(). |
+ blink::WebMediaConstraints source_constraints() const { return constraints_; } |
+ void SetSourceConstraints(const blink::WebMediaConstraints& constraints); |
+ |
+ // Not valid until after the source is started (when the first track is |
+ // connected). |
+ webrtc::AudioSourceInterface* rtc_source() const { return rtc_source_.get(); } |
+ |
+ // Thread-safe volume accessors used by WebRtcAudioDeviceImpl. |
+ void SetVolume(int volume); |
+ int Volume() const; |
+ int MaxVolume() const; |
+ |
+ // Thread-safe accessor for querying the audio format prior to processing. |
+ // TODO(phoglund): Think over the implications of this accessor and if we can |
+ // remove it. |
+ media::AudioParameters GetInputAudioParameters() const; |
+ |
+ protected: |
+ // MediaStreamAudioSource implementation. |
+ void* GetClassIdentifier() const final; |
+ void DoStopSource() final; |
+ scoped_ptr<MediaStreamAudioTrack> CreateMediaStreamAudioTrack( |
+ const std::string& id) final; |
+ bool EnsureSourceIsStarted() final; |
+ |
+ // AudioCapturerSource::CaptureCallback implementation. |
+ void Capture(const media::AudioBus* audio_bus, |
+ int audio_delay_milliseconds, |
+ double volume, |
+ bool key_pressed) override; |
+ void OnCaptureError(const std::string& message) override; |
+ |
+ private: |
+ // The RenderFrame that will consume the audio data. Used when creating |
+ // AudioInputDevices via the AudioDeviceFactory. |
+ const int consumer_render_frame_id_; |
+ |
+ PeerConnectionDependencyFactory* const pc_factory_; |
+ |
+ // Constraints used when initializing and starting the source. |
+ blink::WebMediaConstraints constraints_; |
+ |
+ // Audio processor doing processing like FIFO, AGC, AEC and NS. Its output |
+ // data is in a unit of 10 ms data chunk. Created by EnsureSourceIsStarted(). |
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor_; |
+ |
+ // The device created by the AudioDeviceFactory in EnsureSourceIsStarted(). |
+ // This is set once, and should never be changed again to avoid a |
+ // thread-race condition in the SetVolume() method. |
+ scoped_refptr<media::AudioInputDevice> input_device_; |
+ |
+ // Holder for WebRTC audio pipeline objects. Created in |
+ // EnsureSourceIsStarted(). |
+ scoped_refptr<webrtc::AudioSourceInterface> rtc_source_; |
+ |
+ // Protects data elements from concurrent access when using the volume methods. |
+ mutable base::Lock volume_lock_; |
+ |
+ // Stores latest microphone volume received in a CaptureData() callback. |
+ // Range is [0, 255]. |
+ int exposed_volume_; |
+ |
+ // Used to calculate the signal level that shows in the UI. |
+ MediaStreamAudioLevelCalculator level_calculator_; |
+ |
+ bool allow_invalid_render_frame_id_for_testing_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(ProcessedLocalAudioSource); |
+}; |
+ |
+} // namespace content |
+ |
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_ |