| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 _audioFramePool(NULL), | 121 _audioFramePool(NULL), |
| 122 _participantList(), | 122 _participantList(), |
| 123 _additionalParticipantList(), | 123 _additionalParticipantList(), |
| 124 _numMixedParticipants(0), | 124 _numMixedParticipants(0), |
| 125 use_limiter_(true), | 125 use_limiter_(true), |
| 126 _timeStamp(0), | 126 _timeStamp(0), |
| 127 _timeScheduler(kProcessPeriodicityInMs), | 127 _timeScheduler(kProcessPeriodicityInMs), |
| 128 _processCalls(0) {} | 128 _processCalls(0) {} |
| 129 | 129 |
| 130 bool AudioConferenceMixerImpl::Init() { | 130 bool AudioConferenceMixerImpl::Init() { |
| 131 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
| 132 if (_crit.get() == NULL) | |
| 133 return false; | |
| 134 | |
| 135 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection()); | |
| 136 if(_cbCrit.get() == NULL) | |
| 137 return false; | |
| 138 | |
| 139 Config config; | 131 Config config; |
| 140 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | 132 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
| 141 _limiter.reset(AudioProcessing::Create(config)); | 133 _limiter.reset(AudioProcessing::Create(config)); |
| 142 if(!_limiter.get()) | 134 if(!_limiter.get()) |
| 143 return false; | 135 return false; |
| 144 | 136 |
| 145 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, | 137 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool, |
| 146 DEFAULT_AUDIO_FRAME_POOLSIZE); | 138 DEFAULT_AUDIO_FRAME_POOLSIZE); |
| 147 if(_audioFramePool == NULL) | 139 if(_audioFramePool == NULL) |
| 148 return false; | 140 return false; |
| (...skipping 25 matching lines...) Expand all Loading... |
| 174 } | 166 } |
| 175 | 167 |
| 176 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { | 168 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() { |
| 177 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); | 169 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); |
| 178 assert(_audioFramePool == NULL); | 170 assert(_audioFramePool == NULL); |
| 179 } | 171 } |
| 180 | 172 |
| 181 // Process should be called every kProcessPeriodicityInMs ms | 173 // Process should be called every kProcessPeriodicityInMs ms |
| 182 int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() { | 174 int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() { |
| 183 int64_t timeUntilNextProcess = 0; | 175 int64_t timeUntilNextProcess = 0; |
| 184 CriticalSectionScoped cs(_crit.get()); | 176 rtc::CritScope cs(&_crit); |
| 185 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { | 177 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { |
| 186 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 178 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 187 "failed in TimeToNextUpdate() call"); | 179 "failed in TimeToNextUpdate() call"); |
| 188 // Sanity check | 180 // Sanity check |
| 189 assert(false); | 181 assert(false); |
| 190 return -1; | 182 return -1; |
| 191 } | 183 } |
| 192 return timeUntilNextProcess; | 184 return timeUntilNextProcess; |
| 193 } | 185 } |
| 194 | 186 |
| 195 void AudioConferenceMixerImpl::Process() { | 187 void AudioConferenceMixerImpl::Process() { |
| 196 size_t remainingParticipantsAllowedToMix = | 188 size_t remainingParticipantsAllowedToMix = |
| 197 kMaximumAmountOfMixedParticipants; | 189 kMaximumAmountOfMixedParticipants; |
| 198 { | 190 { |
| 199 CriticalSectionScoped cs(_crit.get()); | 191 rtc::CritScope cs(&_crit); |
| 200 assert(_processCalls == 0); | 192 assert(_processCalls == 0); |
| 201 _processCalls++; | 193 _processCalls++; |
| 202 | 194 |
| 203 // Let the scheduler know that we are running one iteration. | 195 // Let the scheduler know that we are running one iteration. |
| 204 _timeScheduler.UpdateScheduler(); | 196 _timeScheduler.UpdateScheduler(); |
| 205 } | 197 } |
| 206 | 198 |
| 207 AudioFrameList mixList; | 199 AudioFrameList mixList; |
| 208 AudioFrameList rampOutList; | 200 AudioFrameList rampOutList; |
| 209 AudioFrameList additionalFramesList; | 201 AudioFrameList additionalFramesList; |
| 210 std::map<int, MixerParticipant*> mixedParticipantsMap; | 202 std::map<int, MixerParticipant*> mixedParticipantsMap; |
| 211 { | 203 { |
| 212 CriticalSectionScoped cs(_cbCrit.get()); | 204 rtc::CritScope cs(&_cbCrit); |
| 213 | 205 |
| 214 int32_t lowFreq = GetLowestMixingFrequency(); | 206 int32_t lowFreq = GetLowestMixingFrequency(); |
| 215 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 207 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
| 216 // supported so use the closest higher frequency to not lose any | 208 // supported so use the closest higher frequency to not lose any |
| 217 // information. | 209 // information. |
| 218 // TODO(henrike): this is probably more appropriate to do in | 210 // TODO(henrike): this is probably more appropriate to do in |
| 219 // GetLowestMixingFrequency(). | 211 // GetLowestMixingFrequency(). |
| 220 if (lowFreq == 12000) { | 212 if (lowFreq == 12000) { |
| 221 lowFreq = 16000; | 213 lowFreq = 16000; |
| 222 } else if (lowFreq == 24000) { | 214 } else if (lowFreq == 24000) { |
| 223 lowFreq = 32000; | 215 lowFreq = 32000; |
| 224 } | 216 } |
| 225 if(lowFreq <= 0) { | 217 if(lowFreq <= 0) { |
| 226 CriticalSectionScoped cs(_crit.get()); | 218 rtc::CritScope cs(&_crit); |
| 227 _processCalls--; | 219 _processCalls--; |
| 228 return; | 220 return; |
| 229 } else { | 221 } else { |
| 230 switch(lowFreq) { | 222 switch(lowFreq) { |
| 231 case 8000: | 223 case 8000: |
| 232 if(OutputFrequency() != kNbInHz) { | 224 if(OutputFrequency() != kNbInHz) { |
| 233 SetOutputFrequency(kNbInHz); | 225 SetOutputFrequency(kNbInHz); |
| 234 } | 226 } |
| 235 break; | 227 break; |
| 236 case 16000: | 228 case 16000: |
| 237 if(OutputFrequency() != kWbInHz) { | 229 if(OutputFrequency() != kWbInHz) { |
| 238 SetOutputFrequency(kWbInHz); | 230 SetOutputFrequency(kWbInHz); |
| 239 } | 231 } |
| 240 break; | 232 break; |
| 241 case 32000: | 233 case 32000: |
| 242 if(OutputFrequency() != kSwbInHz) { | 234 if(OutputFrequency() != kSwbInHz) { |
| 243 SetOutputFrequency(kSwbInHz); | 235 SetOutputFrequency(kSwbInHz); |
| 244 } | 236 } |
| 245 break; | 237 break; |
| 246 case 48000: | 238 case 48000: |
| 247 if(OutputFrequency() != kFbInHz) { | 239 if(OutputFrequency() != kFbInHz) { |
| 248 SetOutputFrequency(kFbInHz); | 240 SetOutputFrequency(kFbInHz); |
| 249 } | 241 } |
| 250 break; | 242 break; |
| 251 default: | 243 default: |
| 252 assert(false); | 244 assert(false); |
| 253 | 245 |
| 254 CriticalSectionScoped cs(_crit.get()); | 246 rtc::CritScope cs(&_crit); |
| 255 _processCalls--; | 247 _processCalls--; |
| 256 return; | 248 return; |
| 257 } | 249 } |
| 258 } | 250 } |
| 259 | 251 |
| 260 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, | 252 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, |
| 261 &remainingParticipantsAllowedToMix); | 253 &remainingParticipantsAllowedToMix); |
| 262 | 254 |
| 263 GetAdditionalAudio(&additionalFramesList); | 255 GetAdditionalAudio(&additionalFramesList); |
| 264 UpdateMixedStatus(mixedParticipantsMap); | 256 UpdateMixedStatus(mixedParticipantsMap); |
| 265 } | 257 } |
| 266 | 258 |
| 267 // Get an AudioFrame for mixing from the memory pool. | 259 // Get an AudioFrame for mixing from the memory pool. |
| 268 AudioFrame* mixedAudio = NULL; | 260 AudioFrame* mixedAudio = NULL; |
| 269 if(_audioFramePool->PopMemory(mixedAudio) == -1) { | 261 if(_audioFramePool->PopMemory(mixedAudio) == -1) { |
| 270 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, | 262 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, |
| 271 "failed PopMemory() call"); | 263 "failed PopMemory() call"); |
| 272 assert(false); | 264 assert(false); |
| 273 return; | 265 return; |
| 274 } | 266 } |
| 275 | 267 |
| 276 { | 268 { |
| 277 CriticalSectionScoped cs(_crit.get()); | 269 rtc::CritScope cs(&_crit); |
| 278 | 270 |
| 279 // TODO(henrike): it might be better to decide the number of channels | 271 // TODO(henrike): it might be better to decide the number of channels |
| 280 // with an API instead of dynamically. | 272 // with an API instead of dynamically. |
| 281 | 273 |
| 282 // Find the max channels over all mixing lists. | 274 // Find the max channels over all mixing lists. |
| 283 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), | 275 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), |
| 284 std::max(MaxNumChannels(&additionalFramesList), | 276 std::max(MaxNumChannels(&additionalFramesList), |
| 285 MaxNumChannels(&rampOutList))); | 277 MaxNumChannels(&rampOutList))); |
| 286 | 278 |
| 287 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, | 279 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency, |
| (...skipping 16 matching lines...) Expand all Loading... |
| 304 // Nothing was mixed, set the audio samples to silence. | 296 // Nothing was mixed, set the audio samples to silence. |
| 305 mixedAudio->samples_per_channel_ = _sampleSize; | 297 mixedAudio->samples_per_channel_ = _sampleSize; |
| 306 AudioFrameOperations::Mute(mixedAudio); | 298 AudioFrameOperations::Mute(mixedAudio); |
| 307 } else { | 299 } else { |
| 308 // Only call the limiter if we have something to mix. | 300 // Only call the limiter if we have something to mix. |
| 309 LimitMixedAudio(mixedAudio); | 301 LimitMixedAudio(mixedAudio); |
| 310 } | 302 } |
| 311 } | 303 } |
| 312 | 304 |
| 313 { | 305 { |
| 314 CriticalSectionScoped cs(_cbCrit.get()); | 306 rtc::CritScope cs(&_cbCrit); |
| 315 if(_mixReceiver != NULL) { | 307 if(_mixReceiver != NULL) { |
| 316 const AudioFrame** dummy = NULL; | 308 const AudioFrame** dummy = NULL; |
| 317 _mixReceiver->NewMixedAudio( | 309 _mixReceiver->NewMixedAudio( |
| 318 _id, | 310 _id, |
| 319 *mixedAudio, | 311 *mixedAudio, |
| 320 dummy, | 312 dummy, |
| 321 0); | 313 0); |
| 322 } | 314 } |
| 323 } | 315 } |
| 324 | 316 |
| 325 // Reclaim all outstanding memory. | 317 // Reclaim all outstanding memory. |
| 326 _audioFramePool->PushMemory(mixedAudio); | 318 _audioFramePool->PushMemory(mixedAudio); |
| 327 ClearAudioFrameList(&mixList); | 319 ClearAudioFrameList(&mixList); |
| 328 ClearAudioFrameList(&rampOutList); | 320 ClearAudioFrameList(&rampOutList); |
| 329 ClearAudioFrameList(&additionalFramesList); | 321 ClearAudioFrameList(&additionalFramesList); |
| 330 { | 322 { |
| 331 CriticalSectionScoped cs(_crit.get()); | 323 rtc::CritScope cs(&_crit); |
| 332 _processCalls--; | 324 _processCalls--; |
| 333 } | 325 } |
| 334 return; | 326 return; |
| 335 } | 327 } |
| 336 | 328 |
| 337 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( | 329 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( |
| 338 AudioMixerOutputReceiver* mixReceiver) { | 330 AudioMixerOutputReceiver* mixReceiver) { |
| 339 CriticalSectionScoped cs(_cbCrit.get()); | 331 rtc::CritScope cs(&_cbCrit); |
| 340 if(_mixReceiver != NULL) { | 332 if(_mixReceiver != NULL) { |
| 341 return -1; | 333 return -1; |
| 342 } | 334 } |
| 343 _mixReceiver = mixReceiver; | 335 _mixReceiver = mixReceiver; |
| 344 return 0; | 336 return 0; |
| 345 } | 337 } |
| 346 | 338 |
| 347 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { | 339 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { |
| 348 CriticalSectionScoped cs(_cbCrit.get()); | 340 rtc::CritScope cs(&_cbCrit); |
| 349 if(_mixReceiver == NULL) { | 341 if(_mixReceiver == NULL) { |
| 350 return -1; | 342 return -1; |
| 351 } | 343 } |
| 352 _mixReceiver = NULL; | 344 _mixReceiver = NULL; |
| 353 return 0; | 345 return 0; |
| 354 } | 346 } |
| 355 | 347 |
| 356 int32_t AudioConferenceMixerImpl::SetOutputFrequency( | 348 int32_t AudioConferenceMixerImpl::SetOutputFrequency( |
| 357 const Frequency& frequency) { | 349 const Frequency& frequency) { |
| 358 CriticalSectionScoped cs(_crit.get()); | 350 rtc::CritScope cs(&_crit); |
| 359 | 351 |
| 360 _outputFrequency = frequency; | 352 _outputFrequency = frequency; |
| 361 _sampleSize = | 353 _sampleSize = |
| 362 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); | 354 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); |
| 363 | 355 |
| 364 return 0; | 356 return 0; |
| 365 } | 357 } |
| 366 | 358 |
| 367 AudioConferenceMixer::Frequency | 359 AudioConferenceMixer::Frequency |
| 368 AudioConferenceMixerImpl::OutputFrequency() const { | 360 AudioConferenceMixerImpl::OutputFrequency() const { |
| 369 CriticalSectionScoped cs(_crit.get()); | 361 rtc::CritScope cs(&_crit); |
| 370 return _outputFrequency; | 362 return _outputFrequency; |
| 371 } | 363 } |
| 372 | 364 |
| 373 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( | 365 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( |
| 374 MixerParticipant* participant, bool mixable) { | 366 MixerParticipant* participant, bool mixable) { |
| 375 if (!mixable) { | 367 if (!mixable) { |
| 376 // Anonymous participants are in a separate list. Make sure that the | 368 // Anonymous participants are in a separate list. Make sure that the |
| 377 // participant is in the _participantList if it is being mixed. | 369 // participant is in the _participantList if it is being mixed. |
| 378 SetAnonymousMixabilityStatus(participant, false); | 370 SetAnonymousMixabilityStatus(participant, false); |
| 379 } | 371 } |
| 380 size_t numMixedParticipants; | 372 size_t numMixedParticipants; |
| 381 { | 373 { |
| 382 CriticalSectionScoped cs(_cbCrit.get()); | 374 rtc::CritScope cs(&_cbCrit); |
| 383 const bool isMixed = | 375 const bool isMixed = |
| 384 IsParticipantInList(*participant, _participantList); | 376 IsParticipantInList(*participant, _participantList); |
| 385 // API must be called with a new state. | 377 // API must be called with a new state. |
| 386 if(!(mixable ^ isMixed)) { | 378 if(!(mixable ^ isMixed)) { |
| 387 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 379 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
| 388 "Mixable is aready %s", | 380 "Mixable is aready %s", |
| 389 isMixed ? "ON" : "off"); | 381 isMixed ? "ON" : "off"); |
| 390 return -1; | 382 return -1; |
| 391 } | 383 } |
| 392 bool success = false; | 384 bool success = false; |
| (...skipping 13 matching lines...) Expand all Loading... |
| 406 size_t numMixedNonAnonymous = _participantList.size(); | 398 size_t numMixedNonAnonymous = _participantList.size(); |
| 407 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { | 399 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { |
| 408 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; | 400 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; |
| 409 } | 401 } |
| 410 numMixedParticipants = | 402 numMixedParticipants = |
| 411 numMixedNonAnonymous + _additionalParticipantList.size(); | 403 numMixedNonAnonymous + _additionalParticipantList.size(); |
| 412 } | 404 } |
| 413 // A MixerParticipant was added or removed. Make sure the scratch | 405 // A MixerParticipant was added or removed. Make sure the scratch |
| 414 // buffer is updated if necessary. | 406 // buffer is updated if necessary. |
| 415 // Note: The scratch buffer may only be updated in Process(). | 407 // Note: The scratch buffer may only be updated in Process(). |
| 416 CriticalSectionScoped cs(_crit.get()); | 408 rtc::CritScope cs(&_crit); |
| 417 _numMixedParticipants = numMixedParticipants; | 409 _numMixedParticipants = numMixedParticipants; |
| 418 return 0; | 410 return 0; |
| 419 } | 411 } |
| 420 | 412 |
| 421 bool AudioConferenceMixerImpl::MixabilityStatus( | 413 bool AudioConferenceMixerImpl::MixabilityStatus( |
| 422 const MixerParticipant& participant) const { | 414 const MixerParticipant& participant) const { |
| 423 CriticalSectionScoped cs(_cbCrit.get()); | 415 rtc::CritScope cs(&_cbCrit); |
| 424 return IsParticipantInList(participant, _participantList); | 416 return IsParticipantInList(participant, _participantList); |
| 425 } | 417 } |
| 426 | 418 |
| 427 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus( | 419 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus( |
| 428 MixerParticipant* participant, bool anonymous) { | 420 MixerParticipant* participant, bool anonymous) { |
| 429 CriticalSectionScoped cs(_cbCrit.get()); | 421 rtc::CritScope cs(&_cbCrit); |
| 430 if(IsParticipantInList(*participant, _additionalParticipantList)) { | 422 if(IsParticipantInList(*participant, _additionalParticipantList)) { |
| 431 if(anonymous) { | 423 if(anonymous) { |
| 432 return 0; | 424 return 0; |
| 433 } | 425 } |
| 434 if(!RemoveParticipantFromList(participant, | 426 if(!RemoveParticipantFromList(participant, |
| 435 &_additionalParticipantList)) { | 427 &_additionalParticipantList)) { |
| 436 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 428 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 437 "unable to remove participant from anonymous list"); | 429 "unable to remove participant from anonymous list"); |
| 438 assert(false); | 430 assert(false); |
| 439 return -1; | 431 return -1; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 454 // Setting anonymous status is only possible if MixerParticipant is | 446 // Setting anonymous status is only possible if MixerParticipant is |
| 455 // already registered. | 447 // already registered. |
| 456 return -1; | 448 return -1; |
| 457 } | 449 } |
| 458 return AddParticipantToList(participant, &_additionalParticipantList) ? | 450 return AddParticipantToList(participant, &_additionalParticipantList) ? |
| 459 0 : -1; | 451 0 : -1; |
| 460 } | 452 } |
| 461 | 453 |
| 462 bool AudioConferenceMixerImpl::AnonymousMixabilityStatus( | 454 bool AudioConferenceMixerImpl::AnonymousMixabilityStatus( |
| 463 const MixerParticipant& participant) const { | 455 const MixerParticipant& participant) const { |
| 464 CriticalSectionScoped cs(_cbCrit.get()); | 456 rtc::CritScope cs(&_cbCrit); |
| 465 return IsParticipantInList(participant, _additionalParticipantList); | 457 return IsParticipantInList(participant, _additionalParticipantList); |
| 466 } | 458 } |
| 467 | 459 |
| 468 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency( | 460 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency( |
| 469 Frequency freq) { | 461 Frequency freq) { |
| 470 // Make sure that only allowed sampling frequencies are used. Use closest | 462 // Make sure that only allowed sampling frequencies are used. Use closest |
| 471 // higher sampling frequency to avoid losing information. | 463 // higher sampling frequency to avoid losing information. |
| 472 if (static_cast<int>(freq) == 12000) { | 464 if (static_cast<int>(freq) == 12000) { |
| 473 freq = kWbInHz; | 465 freq = kWbInHz; |
| 474 } else if (static_cast<int>(freq) == 24000) { | 466 } else if (static_cast<int>(freq) == 24000) { |
| (...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 765 "UpdateMixedStatus(mixedParticipantsMap)"); | 757 "UpdateMixedStatus(mixedParticipantsMap)"); |
| 766 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); | 758 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); |
| 767 | 759 |
| 768 // Loop through all participants. If they are in the mix map they | 760 // Loop through all participants. If they are in the mix map they |
| 769 // were mixed. | 761 // were mixed. |
| 770 for (MixerParticipantList::const_iterator | 762 for (MixerParticipantList::const_iterator |
| 771 participant =_participantList.begin(); | 763 participant =_participantList.begin(); |
| 772 participant != _participantList.end(); | 764 participant != _participantList.end(); |
| 773 ++participant) { | 765 ++participant) { |
| 774 bool isMixed = false; | 766 bool isMixed = false; |
| 775 for (std::map<int, MixerParticipant*>::const_iterator it = | 767 for (auto it = mixedParticipantsMap.begin(); |
| 776 mixedParticipantsMap.begin(); | |
| 777 it != mixedParticipantsMap.end(); | 768 it != mixedParticipantsMap.end(); |
| 778 ++it) { | 769 ++it) { |
| 779 if (it->second == *participant) { | 770 if (it->second == *participant) { |
| 780 isMixed = true; | 771 isMixed = true; |
| 781 break; | 772 break; |
| 782 } | 773 } |
| 783 } | 774 } |
| 784 (*participant)->_mixHistory->SetIsMixed(isMixed); | 775 (*participant)->_mixHistory->SetIsMixed(isMixed); |
| 785 } | 776 } |
| 786 } | 777 } |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 926 | 917 |
| 927 if(error != _limiter->kNoError) { | 918 if(error != _limiter->kNoError) { |
| 928 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 919 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 929 "Error from AudioProcessing: %d", error); | 920 "Error from AudioProcessing: %d", error); |
| 930 assert(false); | 921 assert(false); |
| 931 return false; | 922 return false; |
| 932 } | 923 } |
| 933 return true; | 924 return true; |
| 934 } | 925 } |
| 935 } // namespace webrtc | 926 } // namespace webrtc |
| OLD | NEW |