OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/copresence/mediums/audio/audio_manager_impl.h" | 5 #include "components/copresence/mediums/audio/audio_manager_impl.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> |
8 #include <vector> | 9 #include <vector> |
9 | 10 |
10 #include "base/bind.h" | 11 #include "base/bind.h" |
11 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
| 13 #include "base/command_line.h" |
| 14 #include "base/files/file_util.h" |
12 #include "base/logging.h" | 15 #include "base/logging.h" |
13 #include "base/run_loop.h" | 16 #include "base/run_loop.h" |
14 #include "base/strings/string_util.h" | 17 #include "base/strings/string_util.h" |
15 #include "base/time/time.h" | 18 #include "base/time/time.h" |
| 19 #include "components/copresence/copresence_switches.h" |
16 #include "components/copresence/mediums/audio/audio_player_impl.h" | 20 #include "components/copresence/mediums/audio/audio_player_impl.h" |
17 #include "components/copresence/mediums/audio/audio_recorder_impl.h" | 21 #include "components/copresence/mediums/audio/audio_recorder_impl.h" |
18 #include "components/copresence/public/copresence_constants.h" | 22 #include "components/copresence/public/copresence_constants.h" |
19 #include "components/copresence/public/whispernet_client.h" | 23 #include "components/copresence/public/whispernet_client.h" |
20 #include "content/public/browser/browser_thread.h" | 24 #include "content/public/browser/browser_thread.h" |
21 #include "media/audio/audio_manager.h" | 25 #include "media/audio/audio_manager.h" |
22 #include "media/audio/audio_manager_base.h" | 26 #include "media/audio/audio_manager_base.h" |
23 #include "media/base/audio_bus.h" | 27 #include "media/base/audio_bus.h" |
| 28 #include "third_party/webrtc/common_audio/wav_file.h" |
24 | 29 |
25 namespace copresence { | 30 namespace copresence { |
26 | 31 |
27 namespace { | 32 namespace { |
28 | 33 |
| 34 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. |
| 35 const int kMaxSamples = 10000; |
| 36 const int kTokenTimeoutMs = 2000; |
| 37 const int kMonoChannelCount = 1; |
| 38 |
29 // UrlSafe is defined as: | 39 // UrlSafe is defined as: |
30 // '/' represented by a '_' and '+' represented by a '-' | 40 // '/' represented by a '_' and '+' represented by a '-' |
31 // TODO(rkc): Move this processing to the whispernet wrapper. | 41 // TODO(ckehoe): Move this to a central place. |
32 std::string FromUrlSafe(std::string token) { | 42 std::string FromUrlSafe(std::string token) { |
33 base::ReplaceChars(token, "-", "+", &token); | 43 base::ReplaceChars(token, "-", "+", &token); |
34 base::ReplaceChars(token, "_", "/", &token); | 44 base::ReplaceChars(token, "_", "/", &token); |
35 return token; | 45 return token; |
36 } | 46 } |
| 47 std::string ToUrlSafe(std::string token) { |
| 48 base::ReplaceChars(token, "+", "-", &token); |
| 49 base::ReplaceChars(token, "/", "_", &token); |
| 50 return token; |
| 51 } |
37 | 52 |
38 const int kSampleExpiryTimeMs = 60 * 60 * 1000; // 60 minutes. | 53 // TODO(ckehoe): Move this to a central place. |
39 const int kMaxSamples = 10000; | 54 std::string AudioTypeToString(AudioType audio_type) { |
40 const int kTokenTimeoutMs = 2000; | 55 if (audio_type == AUDIBLE) |
| 56 return "audible"; |
| 57 if (audio_type == INAUDIBLE) |
| 58 return "inaudible"; |
| 59 |
| 60 NOTREACHED() << "Got unexpected token type " << audio_type; |
| 61 return std::string(); |
| 62 } |
| 63 |
| 64 bool ReadBooleanFlag(const std::string& flag, bool default_value) { |
| 65 const std::string flag_value = base::StringToLowerASCII( |
| 66 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(flag)); |
| 67 if (flag_value == "true" || flag_value == "1") |
| 68 return true; |
| 69 if (flag_value == "false" || flag_value == "0") |
| 70 return false; |
| 71 LOG_IF(ERROR, !flag_value.empty()) |
| 72 << "Unrecognized value \"" << flag_value << " for flag " |
| 73 << flag << ". Defaulting to " << default_value; |
| 74 return default_value; |
| 75 } |
41 | 76 |
42 } // namespace | 77 } // namespace |
43 | 78 |
44 // Public methods. | 79 |
| 80 // Public functions. |
45 | 81 |
46 AudioManagerImpl::AudioManagerImpl() | 82 AudioManagerImpl::AudioManagerImpl() |
47 : whispernet_client_(nullptr), recorder_(nullptr) { | 83 : whispernet_client_(nullptr), recorder_(nullptr) { |
48 // TODO(rkc): Move all of these into initializer lists once it is allowed. | 84 // TODO(rkc): Move all of these into initializer lists once it is allowed. |
49 should_be_playing_[AUDIBLE] = false; | 85 should_be_playing_[AUDIBLE] = false; |
50 should_be_playing_[INAUDIBLE] = false; | 86 should_be_playing_[INAUDIBLE] = false; |
51 should_be_recording_[AUDIBLE] = false; | 87 should_be_recording_[AUDIBLE] = false; |
52 should_be_recording_[INAUDIBLE] = false; | 88 should_be_recording_[INAUDIBLE] = false; |
53 | 89 |
| 90 player_enabled_[AUDIBLE] = ReadBooleanFlag( |
| 91 switches::kCopresenceEnableAudibleBroadcast, true); |
| 92 player_enabled_[INAUDIBLE] = ReadBooleanFlag( |
| 93 switches::kCopresenceEnableInaudibleBroadcast, true); |
54 player_[AUDIBLE] = nullptr; | 94 player_[AUDIBLE] = nullptr; |
55 player_[INAUDIBLE] = nullptr; | 95 player_[INAUDIBLE] = nullptr; |
56 token_length_[0] = 0; | 96 token_length_[0] = 0; |
57 token_length_[1] = 0; | 97 token_length_[1] = 0; |
58 } | 98 } |
59 | 99 |
60 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, | 100 void AudioManagerImpl::Initialize(WhispernetClient* whispernet_client, |
61 const TokensCallback& tokens_cb) { | 101 const TokensCallback& tokens_cb) { |
62 samples_cache_.resize(2); | 102 samples_cache_.resize(2); |
63 samples_cache_[AUDIBLE] = new SamplesMap( | 103 samples_cache_[AUDIBLE] = new SamplesMap( |
(...skipping 17 matching lines...) Expand all Loading... |
81 | 121 |
82 if (!player_[INAUDIBLE]) | 122 if (!player_[INAUDIBLE]) |
83 player_[INAUDIBLE] = new AudioPlayerImpl(); | 123 player_[INAUDIBLE] = new AudioPlayerImpl(); |
84 player_[INAUDIBLE]->Initialize(); | 124 player_[INAUDIBLE]->Initialize(); |
85 | 125 |
86 decode_cancelable_cb_.Reset(base::Bind( | 126 decode_cancelable_cb_.Reset(base::Bind( |
87 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); | 127 &AudioManagerImpl::DecodeSamplesConnector, base::Unretained(this))); |
88 if (!recorder_) | 128 if (!recorder_) |
89 recorder_ = new AudioRecorderImpl(); | 129 recorder_ = new AudioRecorderImpl(); |
90 recorder_->Initialize(decode_cancelable_cb_.callback()); | 130 recorder_->Initialize(decode_cancelable_cb_.callback()); |
| 131 |
| 132 dump_tokens_dir_ = base::FilePath(base::CommandLine::ForCurrentProcess() |
| 133 ->GetSwitchValueASCII(switches::kCopresenceDumpTokensToDir)); |
| 134 content::BrowserThread::PostTask( |
| 135 content::BrowserThread::FILE, |
| 136 FROM_HERE, |
| 137 base::Bind(&AudioManagerImpl::ValidateTokenDir, |
| 138 base::Unretained(this))); |
91 } | 139 } |
92 | 140 |
93 AudioManagerImpl::~AudioManagerImpl() { | 141 AudioManagerImpl::~AudioManagerImpl() { |
94 if (player_[AUDIBLE]) | 142 if (player_[AUDIBLE]) |
95 player_[AUDIBLE]->Finalize(); | 143 player_[AUDIBLE]->Finalize(); |
96 if (player_[INAUDIBLE]) | 144 if (player_[INAUDIBLE]) |
97 player_[INAUDIBLE]->Finalize(); | 145 player_[INAUDIBLE]->Finalize(); |
98 if (recorder_) | 146 if (recorder_) |
99 recorder_->Finalize(); | 147 recorder_->Finalize(); |
100 | 148 |
101 // Whispernet initialization may never have completed. | 149 // Whispernet initialization may never have completed. |
102 if (whispernet_client_) { | 150 if (whispernet_client_) { |
103 whispernet_client_->RegisterTokensCallback(TokensCallback()); | 151 whispernet_client_->RegisterTokensCallback(TokensCallback()); |
104 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); | 152 whispernet_client_->RegisterSamplesCallback(SamplesCallback()); |
105 } | 153 } |
106 } | 154 } |
107 | 155 |
108 void AudioManagerImpl::StartPlaying(AudioType type) { | 156 void AudioManagerImpl::StartPlaying(AudioType type) { |
109 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 157 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
110 should_be_playing_[type] = true; | 158 should_be_playing_[type] = true; |
111 // If we don't have our token encoded yet, this check will be false, for now. | 159 // If we don't have our token encoded yet, this check will be false, for now. |
112 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which | 160 // Once our token is encoded, OnTokenEncoded will call UpdateToken, which |
113 // will call this code again (if we're still supposed to be playing). | 161 // will call this code again (if we're still supposed to be playing). |
114 if (samples_cache_[type]->HasKey(playing_token_[type])) { | 162 if (samples_cache_[type]->HasKey(playing_token_[type])) { |
115 DCHECK(!playing_token_[type].empty()); | 163 DCHECK(!playing_token_[type].empty()); |
116 started_playing_[type] = base::Time::Now(); | 164 if (player_enabled_[type]) { |
117 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 165 started_playing_[type] = base::Time::Now(); |
118 // If we're playing, we always record to hear what we are playing. | 166 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); |
119 recorder_->Record(); | 167 |
| 168 // If we're playing, we always record to hear what we are playing. |
| 169 recorder_->Record(); |
| 170 } else { |
| 171 DVLOG(3) << "Skipping playback for disabled " << AudioTypeToString(type) |
| 172 << " player."; |
| 173 } |
120 } | 174 } |
121 } | 175 } |
122 | 176 |
123 void AudioManagerImpl::StopPlaying(AudioType type) { | 177 void AudioManagerImpl::StopPlaying(AudioType type) { |
124 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 178 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
125 should_be_playing_[type] = false; | 179 should_be_playing_[type] = false; |
126 player_[type]->Stop(); | 180 player_[type]->Stop(); |
127 // If we were only recording to hear our own played tokens, stop. | 181 // If we were only recording to hear our own played tokens, stop. |
128 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) | 182 if (!should_be_recording_[AUDIBLE] && !should_be_recording_[INAUDIBLE]) |
129 recorder_->Stop(); | 183 recorder_->Stop(); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
166 if (base::Time::Now() - started_playing_[type] < tokenTimeout) | 220 if (base::Time::Now() - started_playing_[type] < tokenTimeout) |
167 return true; | 221 return true; |
168 | 222 |
169 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; | 223 return base::Time::Now() - heard_own_token_[type] < tokenTimeout; |
170 } | 224 } |
171 | 225 |
172 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { | 226 void AudioManagerImpl::SetTokenLength(AudioType type, size_t token_length) { |
173 token_length_[type] = token_length; | 227 token_length_[type] = token_length; |
174 } | 228 } |
175 | 229 |
176 // Private methods. | 230 |
| 231 // Private functions. |
177 | 232 |
178 void AudioManagerImpl::OnTokenEncoded( | 233 void AudioManagerImpl::OnTokenEncoded( |
179 AudioType type, | 234 AudioType type, |
180 const std::string& token, | 235 const std::string& token, |
181 const scoped_refptr<media::AudioBusRefCounted>& samples) { | 236 const scoped_refptr<media::AudioBusRefCounted>& samples) { |
182 samples_cache_[type]->Add(token, samples); | 237 samples_cache_[type]->Add(token, samples); |
| 238 DumpToken(type, token, samples.get()); |
183 UpdateToken(type, token); | 239 UpdateToken(type, token); |
184 } | 240 } |
185 | 241 |
186 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { | 242 void AudioManagerImpl::OnTokensFound(const std::vector<AudioToken>& tokens) { |
187 std::vector<AudioToken> tokens_to_report; | 243 std::vector<AudioToken> tokens_to_report; |
188 for (const auto& token : tokens) { | 244 for (const auto& token : tokens) { |
189 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; | 245 AudioType type = token.audible ? AUDIBLE : INAUDIBLE; |
190 if (playing_token_[type] == token.token) | 246 if (playing_token_[type] == token.token) |
191 heard_own_token_[type] = base::Time::Now(); | 247 heard_own_token_[type] = base::Time::Now(); |
192 | 248 |
(...skipping 22 matching lines...) Expand all Loading... |
215 RestartPlaying(type); | 271 RestartPlaying(type); |
216 } | 272 } |
217 | 273 |
218 void AudioManagerImpl::RestartPlaying(AudioType type) { | 274 void AudioManagerImpl::RestartPlaying(AudioType type) { |
219 DCHECK(type == AUDIBLE || type == INAUDIBLE); | 275 DCHECK(type == AUDIBLE || type == INAUDIBLE); |
220 // We should already have this token in the cache. This function is not | 276 // We should already have this token in the cache. This function is not |
221 // called from anywhere except update token and only once we have our samples | 277 // called from anywhere except update token and only once we have our samples |
222 // in the cache. | 278 // in the cache. |
223 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); | 279 DCHECK(samples_cache_[type]->HasKey(playing_token_[type])); |
224 | 280 |
225 started_playing_[type] = base::Time::Now(); | |
226 player_[type]->Stop(); | 281 player_[type]->Stop(); |
227 player_[type]->Play(samples_cache_[type]->GetValue(playing_token_[type])); | 282 StartPlaying(type); |
228 // If we're playing, we always record to hear what we are playing. | |
229 recorder_->Record(); | |
230 } | 283 } |
231 | 284 |
232 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { | 285 void AudioManagerImpl::DecodeSamplesConnector(const std::string& samples) { |
233 // If we are either supposed to be recording *or* playing, audible or | 286 // If we are either supposed to be recording *or* playing, audible or |
234 // inaudible, we should be decoding that type. This is so that if we are | 287 // inaudible, we should be decoding that type. This is so that if we are |
235 // just playing, we will still decode our recorded token so we can check | 288 // just playing, we will still decode our recorded token so we can check |
236 // if we heard our own token. Whether or not we report the token to the | 289 // if we heard our own token. Whether or not we report the token to the |
237 // server is checked for and handled in OnTokensFound. | 290 // server is checked for and handled in OnTokensFound. |
238 | 291 |
239 bool decode_audible = | 292 bool decode_audible = |
240 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; | 293 should_be_recording_[AUDIBLE] || should_be_playing_[AUDIBLE]; |
241 bool decode_inaudible = | 294 bool decode_inaudible = |
242 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; | 295 should_be_recording_[INAUDIBLE] || should_be_playing_[INAUDIBLE]; |
243 | 296 |
244 if (decode_audible && decode_inaudible) { | 297 if (decode_audible && decode_inaudible) { |
245 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); | 298 whispernet_client_->DecodeSamples(BOTH, samples, token_length_); |
246 } else if (decode_audible) { | 299 } else if (decode_audible) { |
247 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); | 300 whispernet_client_->DecodeSamples(AUDIBLE, samples, token_length_); |
248 } else if (decode_inaudible) { | 301 } else if (decode_inaudible) { |
249 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); | 302 whispernet_client_->DecodeSamples(INAUDIBLE, samples, token_length_); |
250 } | 303 } |
251 } | 304 } |
252 | 305 |
| 306 void AudioManagerImpl::ValidateTokenDir() { |
| 307 if (dump_tokens_dir_.empty()) |
| 308 return; |
| 309 |
| 310 if (!base::DirectoryExists(dump_tokens_dir_) || |
| 311 !base::PathIsWritable(dump_tokens_dir_)) { |
| 312 LOG(ERROR) << "Invalid token dump directory \"" |
| 313 << dump_tokens_dir_.value() << "\""; |
| 314 dump_tokens_dir_.clear(); |
| 315 } |
| 316 } |
| 317 |
| 318 void AudioManagerImpl::DumpToken(AudioType audio_type, |
| 319 const std::string& token, |
| 320 const media::AudioBus* samples) { |
| 321 if (dump_tokens_dir_.empty()) |
| 322 return; |
| 323 |
| 324 // Convert the samples to 16-bit integers. |
| 325 std::vector<int16_t> int_samples; |
| 326 int_samples.reserve(samples->frames()); |
| 327 for (int i = 0; i < samples->frames(); i++) { |
| 328 int_samples.push_back(round( |
| 329 samples->channel(0)[i] * std::numeric_limits<int16_t>::max())); |
| 330 } |
| 331 DCHECK_EQ(static_cast<int>(int_samples.size()), samples->frames()); |
| 332 |
| 333 const std::string filename = AudioTypeToString(audio_type) + |
| 334 " " + ToUrlSafe(token) + ".wav"; |
| 335 DVLOG(3) << "Dumping token " << filename; |
| 336 DCHECK_EQ(kMonoChannelCount, samples->channels()); |
| 337 webrtc::WavWriter writer(dump_tokens_dir_.Append(filename).value(), |
| 338 kDefaultSampleRate, |
| 339 kMonoChannelCount); |
| 340 writer.WriteSamples(int_samples.data(), int_samples.size()); |
| 341 } |
| 342 |
253 } // namespace copresence | 343 } // namespace copresence |
OLD | NEW |