| Index: content/renderer/media/webrtc_local_audio_track.cc
|
| diff --git a/content/renderer/media/webrtc_local_audio_track.cc b/content/renderer/media/webrtc_local_audio_track.cc
|
| index cae6bfe8ba38bcd85a32f75cf6c6faaa6d1514e5..1ada43fd474a26aaad3d83463ebdbda17df51e95 100644
|
| --- a/content/renderer/media/webrtc_local_audio_track.cc
|
| +++ b/content/renderer/media/webrtc_local_audio_track.cc
|
| @@ -12,28 +12,74 @@ namespace content {
|
|
|
| static const char kAudioTrackKind[] = "audio";
|
|
|
| +namespace {
|
| +
|
| +using webrtc::MediaConstraintsInterface;
|
| +
|
| +// This helper function checks if any audio constraints are set that require
|
| +// audio processing to be applied. Right now this is a big, single switch for
|
| +// all of the properties, but in the future they'll be handled one by one.
|
| +bool NeedsAudioProcessing(
|
| + const webrtc::MediaConstraintsInterface* constraints) {
|
| + if (!constraints)
|
| + return false;
|
| +
|
| + static const char* kAudioProcessingProperties[] = {
|
| + MediaConstraintsInterface::kEchoCancellation,
|
| + MediaConstraintsInterface::kExperimentalEchoCancellation,
|
| + MediaConstraintsInterface::kAutoGainControl,
|
| + MediaConstraintsInterface::kExperimentalAutoGainControl,
|
| + MediaConstraintsInterface::kNoiseSuppression,
|
| + MediaConstraintsInterface::kHighpassFilter,
|
| + };
|
| +
|
| + for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) {
|
| + bool value = false;
|
| + if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i],
|
| + &value, NULL) &&
|
| + value) {
|
| + return true;
|
| + }
|
| + }
|
| +
|
| + return false;
|
| +}
|
| +
|
| +} // namespace.
|
| +
|
| scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
|
| const std::string& id,
|
| const scoped_refptr<WebRtcAudioCapturer>& capturer,
|
| - webrtc::AudioSourceInterface* track_source) {
|
| + webrtc::AudioSourceInterface* track_source,
|
| + const webrtc::MediaConstraintsInterface* constraints) {
|
| talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
|
| new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
|
| - id, capturer, track_source);
|
| + id, capturer, track_source, constraints);
|
| return track;
|
| }
|
|
|
| WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
|
| const std::string& label,
|
| const scoped_refptr<WebRtcAudioCapturer>& capturer,
|
| - webrtc::AudioSourceInterface* track_source)
|
| + webrtc::AudioSourceInterface* track_source,
|
| + const webrtc::MediaConstraintsInterface* constraints)
|
| : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
|
| capturer_(capturer),
|
| track_source_(track_source),
|
| - need_audio_processing_(!capturer->device_id().empty()) {
|
| + need_audio_processing_(NeedsAudioProcessing(constraints)) {
|
| // The capturer with a valid device id is using microphone as source,
|
| // and APM (AudioProcessingModule) is turned on only for microphone data.
|
| DCHECK(capturer.get());
|
| DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
|
| +
|
| + // TODO(xians): Remove this workaround. It is currently here to maintain
|
| + // backwards compatibility with the previous implementations and apps where
|
| + // no constraints are specified for audio but we've still enabled audio
|
| + // processing. For more see http://crbug.com/277134.
|
| + if (!need_audio_processing_ && !capturer->device_id().empty()) {
|
| + DLOG(WARNING) << "Enabling audio processing despite lack of constraints.";
|
| + need_audio_processing_ = true;
|
| + }
|
| }
|
|
|
| WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
|
|
|