OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/copresence/mediums/audio/audio_manager_impl.h" | 5 #include "components/copresence/mediums/audio/audio_manager_impl.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> |
8 #include <vector> | 9 #include <vector> |
9 | 10 |
10 #include "base/bind.h" | 11 #include "base/bind.h" |
11 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
| 13 #include "base/command_line.h" |
12 #include "base/logging.h" | 14 #include "base/logging.h" |
13 #include "base/run_loop.h" | 15 #include "base/run_loop.h" |
14 #include "base/strings/string_util.h" | 16 #include "base/strings/string_util.h" |
| 17 #include "base/strings/stringprintf.h" |
| 18 #include "base/strings/sys_string_conversions.h" |
15 #include "base/time/time.h" | 19 #include "base/time/time.h" |
| 20 #include "components/copresence/copresence_switches.h" |
16 #include "components/copresence/mediums/audio/audio_player_impl.h" | 21 #include "components/copresence/mediums/audio/audio_player_impl.h" |
17 #include "components/copresence/mediums/audio/audio_recorder_impl.h" | 22 #include "components/copresence/mediums/audio/audio_recorder_impl.h" |
18 #include "components/copresence/public/copresence_constants.h" | 23 #include "components/copresence/public/copresence_constants.h" |
19 #include "components/copresence/public/whispernet_client.h" | 24 #include "components/copresence/public/whispernet_client.h" |
20 #include "content/public/browser/browser_thread.h" | 25 #include "content/public/browser/browser_thread.h" |
21 #include "media/audio/audio_manager.h" | 26 #include "media/audio/audio_manager.h" |
22 #include "media/audio/audio_manager_base.h" | 27 #include "media/audio/audio_manager_base.h" |
23 #include "media/base/audio_bus.h" | 28 #include "media/base/audio_bus.h" |
| 29 #include "third_party/webrtc/common_audio/wav_file.h" |
24 | 30 |
25 namespace copresence { | 31 namespace copresence { |
26 | 32 |
27 namespace { | 33 namespace { |
28 | 34 |
| 35 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. |
| 36 const int kMaxSamples = 10000; |
| 37 const int kTokenTimeoutMs = 2000; |
| 38 const int kMonoChannelCount = 1; |
| 39 |
29 // UrlSafe is defined as: | 40 // UrlSafe is defined as: |
30 // '/' represented by a '_' and '+' represented by a '-' | 41 // '/' represented by a '_' and '+' represented by a '-' |
31 // TODO(rkc): Move this processing to the whispernet wrapper. | 42 // TODO(ckehoe): Move this to a central place. |
32 std::string FromUrlSafe(std::string token) { | 43 std::string FromUrlSafe(std::string token) { |
33 base::ReplaceChars(token, "-", "+", &token); | 44 base::ReplaceChars(token, "-", "+", &token); |
34 base::ReplaceChars(token, "_", "/", &token); | 45 base::ReplaceChars(token, "_", "/", &token); |
35 return token; | 46 return token; |
36 } | 47 } |
| 48 std::string ToUrlSafe(std::string token) { |
| 49 base::ReplaceChars(token, "+", "-", &token); |
| 50 base::ReplaceChars(token, "/", "_", &token); |
| 51 return token; |
| 52 } |
37 | 53 |
38 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. | 54 // TODO(ckehoe): Move this to a central place. |
39 const int kMaxSamples = 10000; | 55 std::string AudioTypeToString(AudioType audio_type) { |
40 const int kTokenTimeoutMs = 2000; | 56 if (audio_type == AUDIBLE) |
| 57 return "audible"; |
| 58 if (audio_type == INAUDIBLE) |
| 59 return "inaudible"; |
| 60 |
| 61 NOTREACHED() << "Got unexpected token type " << audio_type; |
| 62 return std::string(); |
| 63 } |
| 64 |
| 65 bool ReadBooleanFlag(const std::string& flag, bool default_value) { |
| 66 const std::string flag_value = base::StringToLowerASCII( |
| 67 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(flag)); |
| 68 if (flag_value == "true" || flag_value == "1") |
| 69 return true; |
| 70 if (flag_value == "false" || flag_value == "0") |
| 71 return false; |
| 72 LOG_IF(ERROR, !flag_value.empty()) |
| 73 << "Unrecognized value \"" << flag_value << " for flag " |
| 74 << flag << ". Defaulting to " << default_value; |
| 75 return default_value; |
| 76 } |
41 | 77 |
42 } // namespace | 78 } // namespace |
43 | 79 |
44 // Public methods. | 80 |
| 81 // Public functions. |
45 | 82 |
46 AudioManagerImpl::AudioManagerImpl() | 83 AudioManagerImpl::AudioManagerImpl() |
47 : whispernet_client_(nullptr), recorder_(nullptr) { | 84 : whispernet_client_(nullptr), recorder_(nullptr) { |
48 // TODO(rkc): Move all of these into initializer lists once it is allowed. | 85 // TODO(rkc): Move all of these into initializer lists once it is allowed. |
49 should_be_playing_[AUDIBLE] = false; | 86 should_be_playing_[AUDIBLE] = false; |
50 should_be_playing_[INAUDIBLE] = false; | 87 should_be_playing_[INAUDIBLE] = false; |
51 should_be_recording_[AUDIBLE] = false; | 88 should_be_recording_[AUDIBLE] = false; |
52 should_be_recording_[INAUDIBLE] = false; | 89 should_be_recording_[INAUDIBLE] = false; |
53 | 90 |
| 91 player_enabled_[AUDIBLE] = ReadBooleanFlag( |
| 92 switches::kCopresenceEnableAudibleBroadcast, true); |
| 93 player_enabled_[INAUDIBLE] = ReadBooleanFlag( |
| 94 switches::kCopresenceEnableInaudibleBroadcast, true); |
54 player_[AUDIBLE] = nullptr; | 95 player_[AUDIBLE] = nullptr; |
55 player_[INAUDIBLE] = nullptr; | 96 player_[INAUDIBLE] = nullptr; |
56 token_length_[0] = 0; | 97 token_length_[0] = 0; |
57 token_length_[1] = 0; | 98 token_length_[1] = 0; |
58 } | 99 } |
59 | 100 |
60 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, | 101 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, |
61 const TokensCallback& tokens_cb) { | 102 const TokensCallback& tokens_cb) { |
62 samples_cache_.resize(2); | 103 samples_cache_.resize(2); |
63 samples_cache_[AUDIBLE] = new SamplesMap( | 104 samples_cache_[AUDIBLE] = new SamplesMap( |
(...skipping 17 matching lines...) Expand all Loading... |
81 | 122 |
82 if (!player_[INAUDIBLE]) | 123 if (!player_[INAUDIBLE]) |
83 player_[INAUDIBLE] = new AudioPlayerImpl(); | 124 player_[INAUDIBLE] = new AudioPlayerImpl(); |
84 player_[INAUDIBLE]->Initialize(); | 125 player_[INAUDIBLE]->Initialize(); |
85 | 126 |
86 decode_cancelable_cb_.Reset(base::Bind( | 127 decode_cancelable_cb_.Reset(base::Bind( |
87 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); | 128 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); |
88 if (!recorder_) | 129 if (!recorder_) |
89 recorder_ = new AudioRecorderImpl(); | 130 recorder_ = new AudioRecorderImpl(); |
90 recorder_->Initialize(decode_cancelable_cb_.callback()); | 131 recorder_->Initialize(decode_cancelable_cb_.callback()); |
| 132 |
| 133 dump_tokens_dir_ = base::FilePath(base::CommandLine::ForCurrentProcess() |
| 134 ->GetSwitchValueNative(switches::kCopresenceDumpTokensToDir)); |
91 } | 135 } |
92 | 136 |
93 AudioManagerImpl::~AudioManagerImpl() { | 137 AudioManagerImpl::~AudioManagerImpl() { |
94 if (player_[AUDIBLE]) | 138 if (player_[AUDIBLE]) |
95 player_[AUDIBLE]->Finalize(); | 139 player_[AUDIBLE]->Finalize(); |
96 if (player_[INAUDIBLE]) | 140 if (player_[INAUDIBLE]) |
97 player_[INAUDIBLE]->Finalize(); | 141 player_[INAUDIBLE]->Finalize(); |
98 if (recorder_) | 142 if (recorder_) |
99 recorder_->Finalize(); | 143 recorder_->Finalize(); |
100 | 144 |
101 // Whispernet initialization may never have completed. | 145 // Whispernet initialization may never have completed. |
102 if (whispernet_client_) { | 146 if (whispernet_client_) { |
103 whispernet_client_->RegisterTokensCallback(TokensCallback()); | 147 whispernet_client_->RegisterTokensCallback(TokensCallback()); |
104 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); | 148 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); |
105 } | 149 } |
106 } | 150 } |
107 | 151 |
108 void AudioManagerImpl::StartPlaying(AudioType type) { | 152 void AudioManagerImpl::StartPlaying(AudioType type) { |
109 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 153 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
110 should_be_playing_[type] = true; | 154 should_be_playing_[type] = true; |
111 // If we don't have our token encoded yet, this check will be false, for now. | 155 // If we don't have our token encoded yet, this check will be false, for now. |
112 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which | 156 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which |
113 // will call this code again (if we're still supposed to be playing). | 157 // will call this code again (if we're still supposed to be playing). |
114 if (samples_cache_[type]->HasKey(playing_token_[type])) { | 158 if (samples_cache_[type]->HasKey(playing_token_[type])) { |
115 DCHECK(!playing_token_[type].empty()); | 159 DCHECK(!playing_token_[type].empty()); |
116 started_playing_[type] = base::Time::Now(); | 160 if (player_enabled_[type]) { |
117 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 161 started_playing_[type] = base::Time::Now(); |
118 // If we're playing, we always record to hear what we are playing. | 162 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); |
119 recorder_->Record(); | 163 |
| 164 // If we're playing, we always record to hear what we are playing. |
| 165 recorder_->Record(); |
| 166 } else { |
| 167 DVLOG(3) << "Skipping playback for disabled " << AudioTypeToString(type) |
| 168 << " player."; |
| 169 } |
120 } | 170 } |
121 } | 171 } |
122 | 172 |
123 void AudioManagerImpl::StopPlaying(AudioType type) { | 173 void AudioManagerImpl::StopPlaying(AudioType type) { |
124 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 174 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
125 should_be_playing_[type] = false; | 175 should_be_playing_[type] = false; |
126 player_[type]->Stop(); | 176 player_[type]->Stop(); |
127 // If we were only recording to hear our own played tokens, stop. | 177 // If we were only recording to hear our own played tokens, stop. |
128 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) | 178 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) |
129 recorder_->Stop(); | 179 recorder_->Stop(); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
166 if (base::Time::Now() - started_playing_[type] < tokenTimeout) | 216 if (base::Time::Now() - started_playing_[type] < tokenTimeout) |
167 return true; | 217 return true; |
168 | 218 |
169 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; | 219 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; |
170 } | 220 } |
171 | 221 |
172 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { | 222 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { |
173 token_length_[type] = token_length; | 223 token_length_[type] = token_length; |
174 } | 224 } |
175 | 225 |
176 // Private methods. | 226 |
| 227 // Private functions. |
177 | 228 |
178 void AudioManagerImpl::OnTokenEncoded( | 229 void AudioManagerImpl::OnTokenEncoded( |
179 AudioType type, | 230 AudioType type, |
180 const std::string& token, | 231 const std::string& token, |
181 const scoped_refptr<media::AudioBusRefCounted>& samples) { | 232 const scoped_refptr<media::AudioBusRefCounted>& samples) { |
182 samples_cache_[type]->Add(token, samples); | 233 samples_cache_[type]->Add(token, samples); |
| 234 DumpToken(type, token, samples.get()); |
183 UpdateToken(type, token); | 235 UpdateToken(type, token); |
184 } | 236 } |
185 | 237 |
186 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { | 238 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { |
187 std::vector<AudioToken> tokens_to_report; | 239 std::vector<AudioToken> tokens_to_report; |
188 for (const auto& token : tokens) { | 240 for (const auto& token : tokens) { |
189 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; | 241 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; |
190 if (playing_token_[type] == token.token) | 242 if (playing_token_[type] == token.token) |
191 heard_own_token_[type] = base::Time::Now(); | 243 heard_own_token_[type] = base::Time::Now(); |
192 | 244 |
(...skipping 22 matching lines...) Expand all Loading... |
215 RestartPlaying(type); | 267 RestartPlaying(type); |
216 } | 268 } |
217 | 269 |
218 void AudioManagerImpl::RestartPlaying(AudioType type) { | 270 void AudioManagerImpl::RestartPlaying(AudioType type) { |
219 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 271 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
220 // We should already have this token in the cache. This function is not | 272 // We should already have this token in the cache. This function is not |
221 // called from anywhere except update token and only once we have our samples | 273 // called from anywhere except update token and only once we have our samples |
222 // in the cache. | 274 // in the cache. |
223 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); | 275 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); |
224 | 276 |
225 started_playing_[type] = base::Time::Now(); | |
226 player_[type]->Stop(); | 277 player_[type]->Stop(); |
227 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 278 StartPlaying(type); |
228 // If we're playing, we always record to hear what we are playing. | |
229 recorder_->Record(); | |
230 } | 279 } |
231 | 280 |
232 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { | 281 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { |
233 // If we are either supposed to be recording *or* playing, audible or | 282 // If we are either supposed to be recording *or* playing, audible or |
234 // inaudible, we should be decoding that type. This is so that if we are | 283 // inaudible, we should be decoding that type. This is so that if we are |
235 // just playing, we will still decode our recorded token so we can check | 284 // just playing, we will still decode our recorded token so we can check |
236 // if we heard our own token. Whether or not we report the token to the | 285 // if we heard our own token. Whether or not we report the token to the |
237 // server is checked for and handled in OnTokensFound. | 286 // server is checked for and handled in OnTokensFound. |
238 | 287 |
239 bool decode_audible = | 288 bool decode_audible = |
240 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; | 289 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; |
241 bool decode_inaudible = | 290 bool decode_inaudible = |
242 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; | 291 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; |
243 | 292 |
244 if (decode_audible && decode_inaudible) { | 293 if (decode_audible && decode_inaudible) { |
245 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); | 294 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); |
246 } else if (decode_audible) { | 295 } else if (decode_audible) { |
247 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); | 296 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); |
248 } else if (decode_inaudible) { | 297 } else if (decode_inaudible) { |
249 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); | 298 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); |
250 } | 299 } |
251 } | 300 } |
252 | 301 |
| 302 void AudioManagerImpl::DumpToken(AudioType audio_type, |
| 303 const std::string& token, |
| 304 const media::AudioBus* samples) { |
| 305 if (dump_tokens_dir_.empty()) |
| 306 return; |
| 307 |
| 308 // Convert the samples to 16-bit integers. |
| 309 std::vector<int16_t> int_samples; |
| 310 int_samples.reserve(samples->frames()); |
| 311 for (int i = 0; i < samples->frames(); i++) { |
| 312 int_samples.push_back(round( |
| 313 samples->channel(0)[i] * std::numeric_limits<int16_t>::max())); |
| 314 } |
| 315 DCHECK_EQ(static_cast<int>(int_samples.size()), samples->frames()); |
| 316 DCHECK_EQ(kMonoChannelCount, samples->channels()); |
| 317 |
| 318 const std::string filename = base::StringPrintf("%s %s.wav", |
| 319 AudioTypeToString(audio_type).c_str(), ToUrlSafe(token).c_str()); |
| 320 DVLOG(3) << "Dumping token " << filename; |
| 321 |
| 322 std::string file_str; |
| 323 #if defined(OS_WIN) |
| 324 base::FilePath file_path = dump_tokens_dir_.Append( |
| 325 base::SysNativeMBToWide(filename)); |
| 326 file_str = base::SysWideToNativeMB(file_path.value()); |
| 327 #else |
| 328 file_str = dump_tokens_dir_.Append(filename).value(); |
| 329 #endif |
| 330 |
| 331 webrtc::WavWriter writer(file_str, kDefaultSampleRate, kMonoChannelCount); |
| 332 writer.WriteSamples(int_samples.data(), int_samples.size()); |
| 333 } |
| 334 |
253 } // namespace copresence | 335 } // namespace copresence |
OLD | NEW |