OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
6 | 6 |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
9 #if defined(OS_MACOSX) | 9 #if defined(OS_MACOSX) |
10 #include "base/metrics/field_trial.h" | 10 #include "base/metrics/field_trial.h" |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
43 case media::CHANNEL_LAYOUT_STEREO: | 43 case media::CHANNEL_LAYOUT_STEREO: |
44 return AudioProcessing::kStereo; | 44 return AudioProcessing::kStereo; |
45 case media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC: | 45 case media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC: |
46 return AudioProcessing::kStereoAndKeyboard; | 46 return AudioProcessing::kStereoAndKeyboard; |
47 default: | 47 default: |
48 NOTREACHED() << "Layout not supported: " << media_layout; | 48 NOTREACHED() << "Layout not supported: " << media_layout; |
49 return AudioProcessing::kMono; | 49 return AudioProcessing::kMono; |
50 } | 50 } |
51 } | 51 } |
52 | 52 |
53 // This is only used for playout data where only max two channels is supported. | |
53 AudioProcessing::ChannelLayout ChannelsToLayout(int num_channels) { | 54 AudioProcessing::ChannelLayout ChannelsToLayout(int num_channels) { |
54 switch (num_channels) { | 55 switch (num_channels) { |
55 case 1: | 56 case 1: |
56 return AudioProcessing::kMono; | 57 return AudioProcessing::kMono; |
57 case 2: | 58 case 2: |
58 return AudioProcessing::kStereo; | 59 return AudioProcessing::kStereo; |
59 default: | 60 default: |
60 NOTREACHED() << "Channels not supported: " << num_channels; | 61 NOTREACHED() << "Channels not supported: " << num_channels; |
61 return AudioProcessing::kMono; | 62 return AudioProcessing::kMono; |
62 } | 63 } |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
107 } | 108 } |
108 | 109 |
109 private: | 110 private: |
110 base::ThreadChecker thread_checker_; | 111 base::ThreadChecker thread_checker_; |
111 scoped_ptr<media::AudioBus> bus_; | 112 scoped_ptr<media::AudioBus> bus_; |
112 scoped_ptr<float*[]> channel_ptrs_; | 113 scoped_ptr<float*[]> channel_ptrs_; |
113 }; | 114 }; |
114 | 115 |
115 // Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor. | 116 // Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor. |
116 // It avoids the FIFO when the source and destination frames match. All methods | 117 // It avoids the FIFO when the source and destination frames match. All methods |
117 // are called on one of the capture or render audio threads exclusively. | 118 // are called on one of the capture or render audio threads exclusively. If |
119 // |source_channels| is larger than |destination_channels|, only the first | |
120 // |destination_channels| are kept from the source. | |
118 class MediaStreamAudioFifo { | 121 class MediaStreamAudioFifo { |
119 public: | 122 public: |
120 MediaStreamAudioFifo(int channels, int source_frames, | 123 MediaStreamAudioFifo(int source_channels, |
124 int destination_channels, | |
125 int source_frames, | |
121 int destination_frames) | 126 int destination_frames) |
122 : source_frames_(source_frames), | 127 : source_channels_(source_channels), |
123 destination_(new MediaStreamAudioBus(channels, destination_frames)), | 128 source_frames_(source_frames), |
129 destination_( | |
130 new MediaStreamAudioBus(destination_channels, destination_frames)), | |
124 data_available_(false) { | 131 data_available_(false) { |
132 DCHECK_GE(source_channels, destination_channels); | |
125 if (source_frames != destination_frames) { | 133 if (source_frames != destination_frames) { |
126 // Since we require every Push to be followed by as many Consumes as | 134 // Since we require every Push to be followed by as many Consumes as |
127 // possible, twice the larger of the two is a (probably) loose upper bound | 135 // possible, twice the larger of the two is a (probably) loose upper bound |
128 // on the FIFO size. | 136 // on the FIFO size. |
129 const int fifo_frames = 2 * std::max(source_frames, destination_frames); | 137 const int fifo_frames = 2 * std::max(source_frames, destination_frames); |
130 fifo_.reset(new media::AudioFifo(channels, fifo_frames)); | 138 fifo_.reset(new media::AudioFifo(destination_channels, fifo_frames)); |
131 } | 139 } |
132 | 140 |
133 // May be created in the main render thread and used in the audio threads. | 141 // May be created in the main render thread and used in the audio threads. |
134 thread_checker_.DetachFromThread(); | 142 thread_checker_.DetachFromThread(); |
135 } | 143 } |
136 | 144 |
137 void Push(const media::AudioBus* source) { | 145 void Push(const media::AudioBus* source) { |
138 DCHECK(thread_checker_.CalledOnValidThread()); | 146 DCHECK(thread_checker_.CalledOnValidThread()); |
139 DCHECK_EQ(source->channels(), destination_->bus()->channels()); | 147 DCHECK_EQ(source->channels(), source_channels_); |
140 DCHECK_EQ(source->frames(), source_frames_); | 148 DCHECK_EQ(source->frames(), source_frames_); |
141 | 149 |
150 const media::AudioBus* source_to_push = source; | |
151 | |
152 if (source->channels() > destination_->bus()->channels()) { | |
153 if (!audio_source_intermediate_) { | |
154 audio_source_intermediate_ = | |
no longer working on chromium
2014/09/26 11:20:41
You can set up this audio_source_intermediate_ in
Henrik Grunell
2014/09/30 07:46:54
Done.
| |
155 media::AudioBus::CreateWrapper(destination_->bus()->channels()); | |
156 } | |
157 for (int i = 0; i < destination_->bus()->channels(); ++i) { | |
158 audio_source_intermediate_->SetChannelData( | |
159 i, | |
160 const_cast<float*>(source->channel(i))); | |
161 } | |
162 audio_source_intermediate_->set_frames(source->frames()); | |
163 source_to_push = audio_source_intermediate_.get(); | |
164 } | |
165 | |
142 if (fifo_) { | 166 if (fifo_) { |
143 fifo_->Push(source); | 167 fifo_->Push(source_to_push); |
144 } else { | 168 } else { |
145 source->CopyTo(destination_->bus()); | 169 source_to_push->CopyTo(destination_->bus()); |
146 data_available_ = true; | 170 data_available_ = true; |
147 } | 171 } |
148 } | 172 } |
149 | 173 |
150 // Returns true if there are destination_frames() of data available to be | 174 // Returns true if there are destination_frames() of data available to be |
151 // consumed, and otherwise false. | 175 // consumed, and otherwise false. |
152 bool Consume(MediaStreamAudioBus** destination) { | 176 bool Consume(MediaStreamAudioBus** destination) { |
153 DCHECK(thread_checker_.CalledOnValidThread()); | 177 DCHECK(thread_checker_.CalledOnValidThread()); |
154 | 178 |
155 if (fifo_) { | 179 if (fifo_) { |
156 if (fifo_->frames() < destination_->bus()->frames()) | 180 if (fifo_->frames() < destination_->bus()->frames()) |
157 return false; | 181 return false; |
158 | 182 |
159 fifo_->Consume(destination_->bus(), 0, destination_->bus()->frames()); | 183 fifo_->Consume(destination_->bus(), 0, destination_->bus()->frames()); |
160 } else { | 184 } else { |
161 if (!data_available_) | 185 if (!data_available_) |
162 return false; | 186 return false; |
163 | 187 |
164 // The data was already copied to |destination_| in this case. | 188 // The data was already copied to |destination_| in this case. |
165 data_available_ = false; | 189 data_available_ = false; |
166 } | 190 } |
167 | 191 |
168 *destination = destination_.get(); | 192 *destination = destination_.get(); |
169 return true; | 193 return true; |
170 } | 194 } |
171 | 195 |
172 private: | 196 private: |
173 base::ThreadChecker thread_checker_; | 197 base::ThreadChecker thread_checker_; |
198 const int source_channels_; // For a DCHECK. | |
174 const int source_frames_; // For a DCHECK. | 199 const int source_frames_; // For a DCHECK. |
200 scoped_ptr<media::AudioBus> audio_source_intermediate_; | |
175 scoped_ptr<MediaStreamAudioBus> destination_; | 201 scoped_ptr<MediaStreamAudioBus> destination_; |
176 scoped_ptr<media::AudioFifo> fifo_; | 202 scoped_ptr<media::AudioFifo> fifo_; |
177 // Only used when the FIFO is disabled; | 203 // Only used when the FIFO is disabled; |
178 bool data_available_; | 204 bool data_available_; |
179 }; | 205 }; |
180 | 206 |
181 bool MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() { | 207 bool MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() { |
182 return !CommandLine::ForCurrentProcess()->HasSwitch( | 208 return !CommandLine::ForCurrentProcess()->HasSwitch( |
183 switches::kDisableAudioTrackProcessing); | 209 switches::kDisableAudioTrackProcessing); |
184 } | 210 } |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
459 DCHECK(input_format.IsValid()); | 485 DCHECK(input_format.IsValid()); |
460 input_format_ = input_format; | 486 input_format_ = input_format; |
461 | 487 |
462 // TODO(ajm): For now, we assume fixed parameters for the output when audio | 488 // TODO(ajm): For now, we assume fixed parameters for the output when audio |
463 // processing is enabled, to match the previous behavior. We should either | 489 // processing is enabled, to match the previous behavior. We should either |
464 // use the input parameters (in which case, audio processing will convert | 490 // use the input parameters (in which case, audio processing will convert |
465 // at output) or ideally, have a backchannel from the sink to know what | 491 // at output) or ideally, have a backchannel from the sink to know what |
466 // format it would prefer. | 492 // format it would prefer. |
467 const int output_sample_rate = audio_processing_ ? | 493 const int output_sample_rate = audio_processing_ ? |
468 kAudioProcessingSampleRate : input_format.sample_rate(); | 494 kAudioProcessingSampleRate : input_format.sample_rate(); |
469 const media::ChannelLayout output_channel_layout = audio_processing_ ? | 495 media::ChannelLayout output_channel_layout = audio_processing_ ? |
470 media::GuessChannelLayout(kAudioProcessingNumberOfChannels) : | 496 media::GuessChannelLayout(kAudioProcessingNumberOfChannels) : |
471 input_format.channel_layout(); | 497 input_format.channel_layout(); |
472 | 498 |
499 // The output channels from the fifo is normally the same as input. | |
500 int fifo_output_channels = input_format.channels(); | |
501 | |
502 // Special case for if we have a keyboard mic channel on the input and no | |
503 // audio processing is used. We will then have the fifo strip away that | |
504 // channel. So we use stereo as output layout, and also change the output | |
505 // channels for the fifo. | |
506 if (input_format.channel_layout() == | |
507 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC && | |
508 !audio_processing_) { | |
509 output_channel_layout = media::CHANNEL_LAYOUT_STEREO; | |
510 fifo_output_channels = ChannelLayoutToChannelCount(output_channel_layout); | |
511 } | |
512 | |
473 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native | 513 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native |
474 // size when processing is enabled. When disabled we use the same size as | 514 // size when processing is enabled. When disabled we use the same size as |
475 // the source if less than 10 ms. | 515 // the source if less than 10 ms. |
476 // | 516 // |
477 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of | 517 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of |
478 // the sink based on the source parameters. PeerConnection sinks seem to want | 518 // the sink based on the source parameters. PeerConnection sinks seem to want |
479 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming | 519 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming |
480 // we can identify WebAudio sinks by the input chunk size. Less fragile would | 520 // we can identify WebAudio sinks by the input chunk size. Less fragile would |
481 // be to have the sink actually tell us how much it wants (as in the above | 521 // be to have the sink actually tell us how much it wants (as in the above |
482 // TODO). | 522 // TODO). |
483 int processing_frames = input_format.sample_rate() / 100; | 523 int processing_frames = input_format.sample_rate() / 100; |
484 int output_frames = output_sample_rate / 100; | 524 int output_frames = output_sample_rate / 100; |
485 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { | 525 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { |
486 processing_frames = input_format.frames_per_buffer(); | 526 processing_frames = input_format.frames_per_buffer(); |
487 output_frames = processing_frames; | 527 output_frames = processing_frames; |
488 } | 528 } |
489 | 529 |
490 output_format_ = media::AudioParameters( | 530 output_format_ = media::AudioParameters( |
491 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 531 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
492 output_channel_layout, | 532 output_channel_layout, |
493 output_sample_rate, | 533 output_sample_rate, |
494 16, | 534 16, |
495 output_frames); | 535 output_frames); |
496 | 536 |
497 capture_fifo_.reset( | 537 capture_fifo_.reset( |
498 new MediaStreamAudioFifo(input_format.channels(), | 538 new MediaStreamAudioFifo(input_format.channels(), |
539 fifo_output_channels, | |
499 input_format.frames_per_buffer(), | 540 input_format.frames_per_buffer(), |
500 processing_frames)); | 541 processing_frames)); |
501 | 542 |
502 if (audio_processing_) { | 543 if (audio_processing_) { |
503 output_bus_.reset(new MediaStreamAudioBus(output_format_.channels(), | 544 output_bus_.reset(new MediaStreamAudioBus(output_format_.channels(), |
504 output_frames)); | 545 output_frames)); |
505 } | 546 } |
506 output_data_.reset(new int16[output_format_.GetBytesPerBuffer() / | 547 output_data_.reset(new int16[output_format_.GetBytesPerBuffer() / |
507 sizeof(int16)]); | 548 sizeof(int16)]); |
508 } | 549 } |
(...skipping 12 matching lines...) Expand all Loading... | |
521 render_format_ = media::AudioParameters( | 562 render_format_ = media::AudioParameters( |
522 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 563 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
523 media::GuessChannelLayout(number_of_channels), | 564 media::GuessChannelLayout(number_of_channels), |
524 sample_rate, | 565 sample_rate, |
525 16, | 566 16, |
526 frames_per_buffer); | 567 frames_per_buffer); |
527 | 568 |
528 const int analysis_frames = sample_rate / 100; // 10 ms chunks. | 569 const int analysis_frames = sample_rate / 100; // 10 ms chunks. |
529 render_fifo_.reset( | 570 render_fifo_.reset( |
530 new MediaStreamAudioFifo(number_of_channels, | 571 new MediaStreamAudioFifo(number_of_channels, |
572 number_of_channels, | |
531 frames_per_buffer, | 573 frames_per_buffer, |
532 analysis_frames)); | 574 analysis_frames)); |
533 } | 575 } |
534 | 576 |
535 int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs, | 577 int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs, |
536 int process_frames, | 578 int process_frames, |
537 base::TimeDelta capture_delay, | 579 base::TimeDelta capture_delay, |
538 int volume, | 580 int volume, |
539 bool key_pressed, | 581 bool key_pressed, |
540 float* const* output_ptrs) { | 582 float* const* output_ptrs) { |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
580 vad->stream_has_voice()); | 622 vad->stream_has_voice()); |
581 base::subtle::Release_Store(&typing_detected_, detected); | 623 base::subtle::Release_Store(&typing_detected_, detected); |
582 } | 624 } |
583 | 625 |
584 // Return 0 if the volume hasn't been changed, and otherwise the new volume. | 626 // Return 0 if the volume hasn't been changed, and otherwise the new volume. |
585 return (agc->stream_analog_level() == volume) ? | 627 return (agc->stream_analog_level() == volume) ? |
586 0 : agc->stream_analog_level(); | 628 0 : agc->stream_analog_level(); |
587 } | 629 } |
588 | 630 |
589 } // namespace content | 631 } // namespace content |
OLD | NEW |