OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/copresence/mediums/audio/audio_manager_impl.h" | 5 #include "components/copresence/mediums/audio/audio_manager_impl.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | |
8 #include <vector> | 9 #include <vector> |
9 | 10 |
10 #include "base/bind.h" | 11 #include "base/bind.h" |
11 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
13 #include "base/command_line.h" | |
12 #include "base/logging.h" | 14 #include "base/logging.h" |
13 #include "base/run_loop.h" | 15 #include "base/run_loop.h" |
14 #include "base/strings/string_util.h" | 16 #include "base/strings/string_util.h" |
15 #include "base/time/time.h" | 17 #include "base/time/time.h" |
18 #include "components/copresence/copresence_switches.h" | |
16 #include "components/copresence/mediums/audio/audio_player_impl.h" | 19 #include "components/copresence/mediums/audio/audio_player_impl.h" |
17 #include "components/copresence/mediums/audio/audio_recorder_impl.h" | 20 #include "components/copresence/mediums/audio/audio_recorder_impl.h" |
18 #include "components/copresence/public/copresence_constants.h" | 21 #include "components/copresence/public/copresence_constants.h" |
19 #include "components/copresence/public/whispernet_client.h" | 22 #include "components/copresence/public/whispernet_client.h" |
20 #include "content/public/browser/browser_thread.h" | 23 #include "content/public/browser/browser_thread.h" |
21 #include "media/audio/audio_manager.h" | 24 #include "media/audio/audio_manager.h" |
22 #include "media/audio/audio_manager_base.h" | 25 #include "media/audio/audio_manager_base.h" |
23 #include "media/base/audio_bus.h" | 26 #include "media/base/audio_bus.h" |
27 #include "third_party/webrtc/common_audio/wav_file.h" | |
24 | 28 |
25 namespace copresence { | 29 namespace copresence { |
26 | 30 |
27 namespace { | 31 namespace { |
28 | 32 |
33 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. | |
34 const int kMaxSamples = 10000; | |
35 const int kTokenTimeoutMs = 2000; | |
36 const int kMonoChannelCount = 1; | |
37 | |
29 // UrlSafe is defined as: | 38 // UrlSafe is defined as: |
30 // '/' represented by a '_' and '+' represented by a '-' | 39 // '/' represented by a '_' and '+' represented by a '-' |
31 // TODO(rkc): Move this processing to the whispernet wrapper. | 40 // TODO(ckehoe): Move this to a central place. |
32 std::string FromUrlSafe(std::string token) { | 41 std::string FromUrlSafe(std::string token) { |
33 base::ReplaceChars(token, "-", "+", &token); | 42 base::ReplaceChars(token, "-", "+", &token); |
34 base::ReplaceChars(token, "_", "/", &token); | 43 base::ReplaceChars(token, "_", "/", &token); |
35 return token; | 44 return token; |
36 } | 45 } |
46 std::string ToUrlSafe(std::string token) { | |
47 base::ReplaceChars(token, "+", "-", &token); | |
48 base::ReplaceChars(token, "/", "_", &token); | |
49 return token; | |
50 } | |
37 | 51 |
38 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. | 52 // TODO(ckehoe): Move this to a central place. |
39 const int kMaxSamples = 10000; | 53 std::string AudioTypeToString(AudioType audio_type) { |
40 const int kTokenTimeoutMs = 2000; | 54 if (audio_type == AUDIBLE) |
55 return "audible"; | |
56 if (audio_type == INAUDIBLE) | |
57 return "inaudible"; | |
58 | |
59 NOTREACHED() << "Got unexpected token type " << audio_type; | |
60 return std::string(); | |
61 } | |
62 | |
63 bool ReadBooleanFlag(const std::string& flag, bool default_value) { | |
64 const std::string flag_value = base::StringToLowerASCII( | |
65 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(flag)); | |
66 if (flag_value == "true" || flag_value == "1") | |
67 return true; | |
68 if (flag_value == "false" || flag_value == "0") | |
69 return false; | |
70 LOG_IF(ERROR, !flag_value.empty()) | |
71 << "Unrecognized value \"" << flag_value << " for flag " | |
72 << flag << ". Defaulting to " << default_value; | |
73 return default_value; | |
74 } | |
41 | 75 |
42 } // namespace | 76 } // namespace |
43 | 77 |
44 // Public methods. | 78 |
79 // Public functions. | |
45 | 80 |
46 AudioManagerImpl::AudioManagerImpl() | 81 AudioManagerImpl::AudioManagerImpl() |
47 : whispernet_client_(nullptr), recorder_(nullptr) { | 82 : whispernet_client_(nullptr), recorder_(nullptr) { |
48 // TODO(rkc): Move all of these into initializer lists once it is allowed. | 83 // TODO(rkc): Move all of these into initializer lists once it is allowed. |
49 should_be_playing_[AUDIBLE] = false; | 84 should_be_playing_[AUDIBLE] = false; |
50 should_be_playing_[INAUDIBLE] = false; | 85 should_be_playing_[INAUDIBLE] = false; |
51 should_be_recording_[AUDIBLE] = false; | 86 should_be_recording_[AUDIBLE] = false; |
52 should_be_recording_[INAUDIBLE] = false; | 87 should_be_recording_[INAUDIBLE] = false; |
53 | 88 |
89 player_enabled_[AUDIBLE] = ReadBooleanFlag( | |
90 switches::kCopresenceEnableAudibleBroadcast, true); | |
91 player_enabled_[INAUDIBLE] = ReadBooleanFlag( | |
92 switches::kCopresenceEnableInaudibleBroadcast, true); | |
54 player_[AUDIBLE] = nullptr; | 93 player_[AUDIBLE] = nullptr; |
55 player_[INAUDIBLE] = nullptr; | 94 player_[INAUDIBLE] = nullptr; |
56 token_length_[0] = 0; | 95 token_length_[0] = 0; |
57 token_length_[1] = 0; | 96 token_length_[1] = 0; |
58 } | 97 } |
59 | 98 |
60 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, | 99 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, |
61 const TokensCallback& tokens_cb) { | 100 const TokensCallback& tokens_cb) { |
62 samples_cache_.resize(2); | 101 samples_cache_.resize(2); |
63 samples_cache_[AUDIBLE] = new SamplesMap( | 102 samples_cache_[AUDIBLE] = new SamplesMap( |
(...skipping 17 matching lines...) Expand all Loading... | |
81 | 120 |
82 if (!player_[INAUDIBLE]) | 121 if (!player_[INAUDIBLE]) |
83 player_[INAUDIBLE] = new AudioPlayerImpl(); | 122 player_[INAUDIBLE] = new AudioPlayerImpl(); |
84 player_[INAUDIBLE]->Initialize(); | 123 player_[INAUDIBLE]->Initialize(); |
85 | 124 |
86 decode_cancelable_cb_.Reset(base::Bind( | 125 decode_cancelable_cb_.Reset(base::Bind( |
87 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); | 126 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); |
88 if (!recorder_) | 127 if (!recorder_) |
89 recorder_ = new AudioRecorderImpl(); | 128 recorder_ = new AudioRecorderImpl(); |
90 recorder_->Initialize(decode_cancelable_cb_.callback()); | 129 recorder_->Initialize(decode_cancelable_cb_.callback()); |
130 | |
131 dump_tokens_dir_ = base::FilePath(base::CommandLine::ForCurrentProcess() | |
132 ->GetSwitchValueASCII(switches::kCopresenceDumpTokensToDir)); | |
91 } | 133 } |
92 | 134 |
93 AudioManagerImpl::~AudioManagerImpl() { | 135 AudioManagerImpl::~AudioManagerImpl() { |
94 if (player_[AUDIBLE]) | 136 if (player_[AUDIBLE]) |
95 player_[AUDIBLE]->Finalize(); | 137 player_[AUDIBLE]->Finalize(); |
96 if (player_[INAUDIBLE]) | 138 if (player_[INAUDIBLE]) |
97 player_[INAUDIBLE]->Finalize(); | 139 player_[INAUDIBLE]->Finalize(); |
98 if (recorder_) | 140 if (recorder_) |
99 recorder_->Finalize(); | 141 recorder_->Finalize(); |
100 | 142 |
101 // Whispernet initialization may never have completed. | 143 // Whispernet initialization may never have completed. |
102 if (whispernet_client_) { | 144 if (whispernet_client_) { |
103 whispernet_client_->RegisterTokensCallback(TokensCallback()); | 145 whispernet_client_->RegisterTokensCallback(TokensCallback()); |
104 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); | 146 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); |
105 } | 147 } |
106 } | 148 } |
107 | 149 |
108 void AudioManagerImpl::StartPlaying(AudioType type) { | 150 void AudioManagerImpl::StartPlaying(AudioType type) { |
109 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 151 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
110 should_be_playing_[type] = true; | 152 should_be_playing_[type] = true; |
111 // If we don't have our token encoded yet, this check will be false, for now. | 153 // If we don't have our token encoded yet, this check will be false, for now. |
112 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which | 154 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which |
113 // will call this code again (if we're still supposed to be playing). | 155 // will call this code again (if we're still supposed to be playing). |
114 if (samples_cache_[type]->HasKey(playing_token_[type])) { | 156 if (samples_cache_[type]->HasKey(playing_token_[type])) { |
115 DCHECK(!playing_token_[type].empty()); | 157 DCHECK(!playing_token_[type].empty()); |
116 started_playing_[type] = base::Time::Now(); | 158 if (player_enabled_[type]) { |
117 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 159 started_playing_[type] = base::Time::Now(); |
118 // If we're playing, we always record to hear what we are playing. | 160 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); |
119 recorder_->Record(); | 161 |
162 // If we're playing, we always record to hear what we are playing. | |
163 recorder_->Record(); | |
164 } else { | |
165 DVLOG(3) << "Skipping playback for disabled " << AudioTypeToString(type) | |
166 << " player."; | |
167 } | |
120 } | 168 } |
121 } | 169 } |
122 | 170 |
123 void AudioManagerImpl::StopPlaying(AudioType type) { | 171 void AudioManagerImpl::StopPlaying(AudioType type) { |
124 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 172 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
125 should_be_playing_[type] = false; | 173 should_be_playing_[type] = false; |
126 player_[type]->Stop(); | 174 player_[type]->Stop(); |
127 // If we were only recording to hear our own played tokens, stop. | 175 // If we were only recording to hear our own played tokens, stop. |
128 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) | 176 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) |
129 recorder_->Stop(); | 177 recorder_->Stop(); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
166 if (base::Time::Now() - started_playing_[type] < tokenTimeout) | 214 if (base::Time::Now() - started_playing_[type] < tokenTimeout) |
167 return true; | 215 return true; |
168 | 216 |
169 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; | 217 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; |
170 } | 218 } |
171 | 219 |
172 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { | 220 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { |
173 token_length_[type] = token_length; | 221 token_length_[type] = token_length; |
174 } | 222 } |
175 | 223 |
176 // Private methods. | 224 |
225 // Private functions. | |
177 | 226 |
178 void AudioManagerImpl::OnTokenEncoded( | 227 void AudioManagerImpl::OnTokenEncoded( |
179 AudioType type, | 228 AudioType type, |
180 const std::string& token, | 229 const std::string& token, |
181 const scoped_refptr<media::AudioBusRefCounted>& samples) { | 230 const scoped_refptr<media::AudioBusRefCounted>& samples) { |
182 samples_cache_[type]->Add(token, samples); | 231 samples_cache_[type]->Add(token, samples); |
232 DumpToken(type, token, samples.get()); | |
183 UpdateToken(type, token); | 233 UpdateToken(type, token); |
184 } | 234 } |
185 | 235 |
186 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { | 236 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { |
187 std::vector<AudioToken> tokens_to_report; | 237 std::vector<AudioToken> tokens_to_report; |
188 for (const auto& token : tokens) { | 238 for (const auto& token : tokens) { |
189 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; | 239 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; |
190 if (playing_token_[type] == token.token) | 240 if (playing_token_[type] == token.token) |
191 heard_own_token_[type] = base::Time::Now(); | 241 heard_own_token_[type] = base::Time::Now(); |
192 | 242 |
(...skipping 22 matching lines...) Expand all Loading... | |
215 RestartPlaying(type); | 265 RestartPlaying(type); |
216 } | 266 } |
217 | 267 |
218 void AudioManagerImpl::RestartPlaying(AudioType type) { | 268 void AudioManagerImpl::RestartPlaying(AudioType type) { |
219 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 269 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
220 // We should already have this token in the cache. This function is not | 270 // We should already have this token in the cache. This function is not |
221 // called from anywhere except update token and only once we have our samples | 271 // called from anywhere except update token and only once we have our samples |
222 // in the cache. | 272 // in the cache. |
223 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); | 273 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); |
224 | 274 |
225 started_playing_[type] = base::Time::Now(); | |
226 player_[type]->Stop(); | 275 player_[type]->Stop(); |
227 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 276 StartPlaying(type); |
228 // If we're playing, we always record to hear what we are playing. | |
229 recorder_->Record(); | |
230 } | 277 } |
231 | 278 |
232 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { | 279 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { |
233 // If we are either supposed to be recording *or* playing, audible or | 280 // If we are either supposed to be recording *or* playing, audible or |
234 // inaudible, we should be decoding that type. This is so that if we are | 281 // inaudible, we should be decoding that type. This is so that if we are |
235 // just playing, we will still decode our recorded token so we can check | 282 // just playing, we will still decode our recorded token so we can check |
236 // if we heard our own token. Whether or not we report the token to the | 283 // if we heard our own token. Whether or not we report the token to the |
237 // server is checked for and handled in OnTokensFound. | 284 // server is checked for and handled in OnTokensFound. |
238 | 285 |
239 bool decode_audible = | 286 bool decode_audible = |
240 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; | 287 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; |
241 bool decode_inaudible = | 288 bool decode_inaudible = |
242 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; | 289 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; |
243 | 290 |
244 if (decode_audible && decode_inaudible) { | 291 if (decode_audible && decode_inaudible) { |
245 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); | 292 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); |
246 } else if (decode_audible) { | 293 } else if (decode_audible) { |
247 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); | 294 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); |
248 } else if (decode_inaudible) { | 295 } else if (decode_inaudible) { |
249 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); | 296 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); |
250 } | 297 } |
251 } | 298 } |
252 | 299 |
300 void AudioManagerImpl::DumpToken(AudioType audio_type, | |
301 const std::string& token, | |
302 const media::AudioBus* samples) { | |
303 if (dump_tokens_dir_.empty()) | |
304 return; | |
305 | |
306 // Convert the samples to 16-bit integers. | |
307 std::vector<int16_t> int_samples; | |
308 int_samples.reserve(samples->frames()); | |
309 for (int i = 0; i < samples->frames(); i++) { | |
310 int_samples.push_back(round( | |
311 samples->channel(0)[i] * std::numeric_limits<int16_t>::max())); | |
312 } | |
313 DCHECK_EQ((int) int_samples.size(), samples->frames()); | |
ajm
2015/02/05 00:18:35
static_cast?
Also, perhaps:
DCHECK_EQ(kMonoChanne
Charlie
2015/02/05 01:53:46
Done.
| |
314 | |
315 const std::string filename = AudioTypeToString(audio_type) + | |
316 " " + ToUrlSafe(token) + ".wav"; | |
ajm
2015/02/05 00:18:35
Do you really want whitespace in a filename?
Charlie
2015/02/05 01:53:46
Yes. Unfortunately the tokens can contain undersco
ajm
2015/02/05 02:03:32
Acknowledged.
| |
317 DVLOG(3) << "Dumping token " << filename; | |
318 webrtc::WavWriter writer(dump_tokens_dir_.Append(filename).value(), | |
319 kDefaultSampleRate, | |
320 kMonoChannelCount); | |
321 writer.WriteSamples(int_samples.data(), int_samples.size()); | |
322 } | |
323 | |
253 } // namespace copresence | 324 } // namespace copresence |
OLD | NEW |