Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(365)

Side by Side Diff: content/renderer/media/media_stream_audio_processor.cc

Issue 200293002: added uma stats to check the usage of media stream audio track audio processing (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: fixed the indentation and EXPECT_EQ Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/metrics/histograms/histograms.xml » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/media_stream_audio_processor.h" 5 #include "content/renderer/media/media_stream_audio_processor.h"
6 6
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h" 8 #include "base/debug/trace_event.h"
9 #include "base/metrics/field_trial.h" 9 #include "base/metrics/field_trial.h"
10 #include "base/metrics/histogram.h"
10 #include "content/public/common/content_switches.h" 11 #include "content/public/common/content_switches.h"
11 #include "content/renderer/media/media_stream_audio_processor_options.h" 12 #include "content/renderer/media/media_stream_audio_processor_options.h"
12 #include "content/renderer/media/rtc_media_constraints.h" 13 #include "content/renderer/media/rtc_media_constraints.h"
13 #include "media/audio/audio_parameters.h" 14 #include "media/audio/audio_parameters.h"
14 #include "media/base/audio_converter.h" 15 #include "media/base/audio_converter.h"
15 #include "media/base/audio_fifo.h" 16 #include "media/base/audio_fifo.h"
16 #include "media/base/channel_layout.h" 17 #include "media/base/channel_layout.h"
17 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" 18 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
18 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" 19 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h"
19 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" 20 #include "third_party/webrtc/modules/audio_processing/typing_detection.h"
20 21
21 namespace content { 22 namespace content {
22 23
23 namespace { 24 namespace {
24 25
25 using webrtc::AudioProcessing; 26 using webrtc::AudioProcessing;
26 using webrtc::MediaConstraintsInterface; 27 using webrtc::MediaConstraintsInterface;
27 28
28 #if defined(OS_ANDROID) 29 #if defined(OS_ANDROID)
29 const int kAudioProcessingSampleRate = 16000; 30 const int kAudioProcessingSampleRate = 16000;
30 #else 31 #else
31 const int kAudioProcessingSampleRate = 32000; 32 const int kAudioProcessingSampleRate = 32000;
32 #endif 33 #endif
33 const int kAudioProcessingNumberOfChannel = 1; 34 const int kAudioProcessingNumberOfChannels = 1;
34 35
35 const int kMaxNumberOfBuffersInFifo = 2; 36 const int kMaxNumberOfBuffersInFifo = 2;
36 37
38 // Used by UMA histograms and entries shouldn't be re-ordered or removed.
39 enum AudioTrackProcessingStates {
40 AUDIO_PROCESSING_ENABLED = 0,
41 AUDIO_PROCESSING_DISABLED,
42 AUDIO_PROCESSING_IN_WEBRTC,
43 AUDIO_PROCESSING_MAX
44 };
45
46 void RecordProcessingState(AudioTrackProcessingStates state) {
47 UMA_HISTOGRAM_ENUMERATION("Media.AudioTrackProcessingStates",
48 state, AUDIO_PROCESSING_MAX);
49 }
50
37 } // namespace 51 } // namespace
38 52
39 class MediaStreamAudioProcessor::MediaStreamAudioConverter 53 class MediaStreamAudioProcessor::MediaStreamAudioConverter
40 : public media::AudioConverter::InputCallback { 54 : public media::AudioConverter::InputCallback {
41 public: 55 public:
42 MediaStreamAudioConverter(const media::AudioParameters& source_params, 56 MediaStreamAudioConverter(const media::AudioParameters& source_params,
43 const media::AudioParameters& sink_params) 57 const media::AudioParameters& sink_params)
44 : source_params_(source_params), 58 : source_params_(source_params),
45 sink_params_(sink_params), 59 sink_params_(sink_params),
46 audio_converter_(source_params, sink_params_, false) { 60 audio_converter_(source_params, sink_params_, false) {
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
258 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { 272 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
259 stats->typing_noise_detected = 273 stats->typing_noise_detected =
260 (base::subtle::Acquire_Load(&typing_detected_) != false); 274 (base::subtle::Acquire_Load(&typing_detected_) != false);
261 GetAecStats(audio_processing_.get(), stats); 275 GetAecStats(audio_processing_.get(), stats);
262 } 276 }
263 277
264 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( 278 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
265 const blink::WebMediaConstraints& constraints, int effects, 279 const blink::WebMediaConstraints& constraints, int effects,
266 MediaStreamType type) { 280 MediaStreamType type) {
267 DCHECK(!audio_processing_); 281 DCHECK(!audio_processing_);
268 if (!IsAudioTrackProcessingEnabled()) 282 if (!IsAudioTrackProcessingEnabled()) {
283 RecordProcessingState(AUDIO_PROCESSING_IN_WEBRTC);
269 return; 284 return;
285 }
270 286
271 RTCMediaConstraints native_constraints(constraints); 287 RTCMediaConstraints native_constraints(constraints);
272 288
273 // Only apply the fixed constraints for gUM of MEDIA_DEVICE_AUDIO_CAPTURE. 289 // Only apply the fixed constraints for gUM of MEDIA_DEVICE_AUDIO_CAPTURE.
274 DCHECK(IsAudioMediaType(type)); 290 DCHECK(IsAudioMediaType(type));
275 if (type == MEDIA_DEVICE_AUDIO_CAPTURE) 291 if (type == MEDIA_DEVICE_AUDIO_CAPTURE)
276 ApplyFixedAudioConstraints(&native_constraints); 292 ApplyFixedAudioConstraints(&native_constraints);
277 293
278 if (effects & media::AudioParameters::ECHO_CANCELLER) { 294 if (effects & media::AudioParameters::ECHO_CANCELLER) {
279 // If platform echo canceller is enabled, disable the software AEC. 295 // If platform echo canceller is enabled, disable the software AEC.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
312 const bool enable_high_pass_filter = GetPropertyFromConstraints( 328 const bool enable_high_pass_filter = GetPropertyFromConstraints(
313 &native_constraints, MediaConstraintsInterface::kHighpassFilter); 329 &native_constraints, MediaConstraintsInterface::kHighpassFilter);
314 330
315 audio_mirroring_ = GetPropertyFromConstraints( 331 audio_mirroring_ = GetPropertyFromConstraints(
316 &native_constraints, webrtc::MediaConstraintsInterface::kAudioMirroring); 332 &native_constraints, webrtc::MediaConstraintsInterface::kAudioMirroring);
317 333
318 // Return immediately if no audio processing component is enabled. 334 // Return immediately if no audio processing component is enabled.
319 if (!enable_aec && !enable_experimental_aec && !enable_ns && 335 if (!enable_aec && !enable_experimental_aec && !enable_ns &&
320 !enable_high_pass_filter && !enable_typing_detection && !enable_agc && 336 !enable_high_pass_filter && !enable_typing_detection && !enable_agc &&
321 !enable_experimental_ns) { 337 !enable_experimental_ns) {
338 RecordProcessingState(AUDIO_PROCESSING_DISABLED);
322 return; 339 return;
323 } 340 }
324 341
325 // Create and configure the webrtc::AudioProcessing. 342 // Create and configure the webrtc::AudioProcessing.
326 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); 343 audio_processing_.reset(webrtc::AudioProcessing::Create(0));
327 344
328 // Enable the audio processing components. 345 // Enable the audio processing components.
329 if (enable_aec) { 346 if (enable_aec) {
330 EnableEchoCancellation(audio_processing_.get()); 347 EnableEchoCancellation(audio_processing_.get());
331 if (enable_experimental_aec) 348 if (enable_experimental_aec)
(...skipping 17 matching lines...) Expand all
349 // is enabled by default. 366 // is enabled by default.
350 typing_detector_.reset(new webrtc::TypingDetection()); 367 typing_detector_.reset(new webrtc::TypingDetection());
351 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); 368 EnableTypingDetection(audio_processing_.get(), typing_detector_.get());
352 } 369 }
353 370
354 if (enable_agc) 371 if (enable_agc)
355 EnableAutomaticGainControl(audio_processing_.get()); 372 EnableAutomaticGainControl(audio_processing_.get());
356 373
357 // Configure the audio format the audio processing is running on. This 374 // Configure the audio format the audio processing is running on. This
358 // has to be done after all the needed components are enabled. 375 // has to be done after all the needed components are enabled.
359 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate), 376 CHECK_EQ(0,
360 0); 377 audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate));
361 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, 378 CHECK_EQ(0, audio_processing_->set_num_channels(
362 kAudioProcessingNumberOfChannel), 379 kAudioProcessingNumberOfChannels, kAudioProcessingNumberOfChannels));
363 0); 380
381 RecordProcessingState(AUDIO_PROCESSING_ENABLED);
364 } 382 }
365 383
366 void MediaStreamAudioProcessor::InitializeCaptureConverter( 384 void MediaStreamAudioProcessor::InitializeCaptureConverter(
367 const media::AudioParameters& source_params) { 385 const media::AudioParameters& source_params) {
368 DCHECK(main_thread_checker_.CalledOnValidThread()); 386 DCHECK(main_thread_checker_.CalledOnValidThread());
369 DCHECK(source_params.IsValid()); 387 DCHECK(source_params.IsValid());
370 388
371 // Create and initialize audio converter for the source data. 389 // Create and initialize audio converter for the source data.
372 // When the webrtc AudioProcessing is enabled, the sink format of the 390 // When the webrtc AudioProcessing is enabled, the sink format of the
373 // converter will be the same as the post-processed data format, which is 391 // converter will be the same as the post-processed data format, which is
374 // 32k mono for desktops and 16k mono for Android. When the AudioProcessing 392 // 32k mono for desktops and 16k mono for Android. When the AudioProcessing
375 // is disabled, the sink format will be the same as the source format. 393 // is disabled, the sink format will be the same as the source format.
376 const int sink_sample_rate = audio_processing_ ? 394 const int sink_sample_rate = audio_processing_ ?
377 kAudioProcessingSampleRate : source_params.sample_rate(); 395 kAudioProcessingSampleRate : source_params.sample_rate();
378 const media::ChannelLayout sink_channel_layout = audio_processing_ ? 396 const media::ChannelLayout sink_channel_layout = audio_processing_ ?
379 media::GuessChannelLayout(kAudioProcessingNumberOfChannel) : 397 media::GuessChannelLayout(kAudioProcessingNumberOfChannels) :
380 source_params.channel_layout(); 398 source_params.channel_layout();
381 399
382 // WebRtc AudioProcessing requires 10ms as its packet size. We use this 400 // WebRtc AudioProcessing requires 10ms as its packet size. We use this
383 // native size when processing is enabled. While processing is disabled, and 401 // native size when processing is enabled. While processing is disabled, and
384 // the source is running with a buffer size smaller than 10ms buffer, we use 402 // the source is running with a buffer size smaller than 10ms buffer, we use
385 // same buffer size as the incoming format to avoid extra FIFO for WebAudio. 403 // same buffer size as the incoming format to avoid extra FIFO for WebAudio.
386 int sink_buffer_size = sink_sample_rate / 100; 404 int sink_buffer_size = sink_sample_rate / 100;
387 if (!audio_processing_ && 405 if (!audio_processing_ &&
388 source_params.frames_per_buffer() < sink_buffer_size) { 406 source_params.frames_per_buffer() < sink_buffer_size) {
389 sink_buffer_size = source_params.frames_per_buffer(); 407 sink_buffer_size = source_params.frames_per_buffer();
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
489 } 507 }
490 508
491 bool MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() const { 509 bool MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() const {
492 const std::string group_name = 510 const std::string group_name =
493 base::FieldTrialList::FindFullName("MediaStreamAudioTrackProcessing"); 511 base::FieldTrialList::FindFullName("MediaStreamAudioTrackProcessing");
494 return group_name == "Enabled" || CommandLine::ForCurrentProcess()->HasSwitch( 512 return group_name == "Enabled" || CommandLine::ForCurrentProcess()->HasSwitch(
495 switches::kEnableAudioTrackProcessing); 513 switches::kEnableAudioTrackProcessing);
496 } 514 }
497 515
498 } // namespace content 516 } // namespace content
OLDNEW
« no previous file with comments | « no previous file | tools/metrics/histograms/histograms.xml » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698