2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
12 #include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
13 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
14 #include "webrtc/modules/audio_processing/include/audio_processing.h"
15 #include "webrtc/modules/utility/interface/audio_frame_operations.h"
16 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
17 #include "webrtc/system_wrappers/interface/trace.h"
22 struct ParticipantFramePair {
23 MixerParticipant* participant;
24 AudioFrame* audioFrame;
27 typedef std::list<ParticipantFramePair*> ParticipantFramePairList;
29 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
30 // These effects are applied to |frame| itself prior to mixing. Assumes that
31 // |mixed_frame| always has at least as many channels as |frame|. Supports
34 // TODO(andrew): consider not modifying |frame| here.
35 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame) {
36 assert(mixed_frame->num_channels_ >= frame->num_channels_);
37 // Divide by two to avoid saturation in the mixing.
39 if (mixed_frame->num_channels_ > frame->num_channels_) {
40 // We only support mono-to-stereo.
41 assert(mixed_frame->num_channels_ == 2 &&
42 frame->num_channels_ == 1);
43 AudioFrameOperations::MonoToStereo(frame);
46 *mixed_frame += *frame;
49 // Return the max number of channels from a |list| composed of AudioFrames.
50 int MaxNumChannels(const AudioFrameList* list) {
51 int max_num_channels = 1;
52 for (AudioFrameList::const_iterator iter = list->begin();
55 max_num_channels = std::max(max_num_channels, (*iter)->num_channels_);
57 return max_num_channels;
60 void SetParticipantStatistics(ParticipantStatistics* stats,
61 const AudioFrame& frame) {
62 stats->participant = frame.id_;
63 stats->level = 0; // TODO(andrew): to what should this be set?
68 MixerParticipant::MixerParticipant()
69 : _mixHistory(new MixHistory()) {
72 MixerParticipant::~MixerParticipant() {
76 int32_t MixerParticipant::IsMixed(bool& mixed) const {
77 return _mixHistory->IsMixed(mixed);
80 MixHistory::MixHistory()
84 MixHistory::~MixHistory() {
87 int32_t MixHistory::IsMixed(bool& mixed) const {
92 int32_t MixHistory::WasMixed(bool& wasMixed) const {
93 // Was mixed is the same as is mixed depending on perspective. This function
94 // is for the perspective of AudioConferenceMixerImpl.
95 return IsMixed(wasMixed);
98 int32_t MixHistory::SetIsMixed(const bool mixed) {
103 void MixHistory::ResetMixedStatus() {
107 AudioConferenceMixer* AudioConferenceMixer::Create(int id) {
108 AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id);
116 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
117 : _scratchParticipantsToMixAmount(0),
118 _scratchMixedParticipants(),
119 _scratchVadPositiveParticipantsAmount(0),
120 _scratchVadPositiveParticipants(),
122 _minimumMixingFreq(kLowestPossible),
124 _mixerStatusCallback(NULL),
125 _amountOf10MsBetweenCallbacks(1),
126 _amountOf10MsUntilNextCallback(0),
127 _mixerStatusCb(false),
128 _outputFrequency(kDefaultFrequency),
130 _audioFramePool(NULL),
132 _additionalParticipantList(),
133 _numMixedParticipants(0),
135 _timeScheduler(kProcessPeriodicityInMs),
139 bool AudioConferenceMixerImpl::Init() {
140 _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
141 if (_crit.get() == NULL)
144 _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection());
145 if(_cbCrit.get() == NULL)
149 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
150 _limiter.reset(AudioProcessing::Create(config));
154 MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
155 DEFAULT_AUDIO_FRAME_POOLSIZE);
156 if(_audioFramePool == NULL)
159 if(SetOutputFrequency(kDefaultFrequency) == -1)
162 if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
166 // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
167 // divide-by-2 but -7 is used instead to give a bit of headroom since the
168 // AGC is not a hard limiter.
169 if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
172 if(_limiter->gain_control()->set_compression_gain_db(0)
173 != _limiter->kNoError)
176 if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
179 if(_limiter->gain_control()->Enable(true) != _limiter->kNoError)
185 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() {
186 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
187 assert(_audioFramePool == NULL);
190 int32_t AudioConferenceMixerImpl::ChangeUniqueId(const int32_t id) {
195 // Process should be called every kProcessPeriodicityInMs ms
196 int32_t AudioConferenceMixerImpl::TimeUntilNextProcess() {
197 int32_t timeUntilNextProcess = 0;
198 CriticalSectionScoped cs(_crit.get());
199 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
200 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
201 "failed in TimeToNextUpdate() call");
206 return timeUntilNextProcess;
209 int32_t AudioConferenceMixerImpl::Process() {
210 size_t remainingParticipantsAllowedToMix =
211 kMaximumAmountOfMixedParticipants;
213 CriticalSectionScoped cs(_crit.get());
214 assert(_processCalls == 0);
217 // Let the scheduler know that we are running one iteration.
218 _timeScheduler.UpdateScheduler();
221 AudioFrameList mixList;
222 AudioFrameList rampOutList;
223 AudioFrameList additionalFramesList;
224 std::map<int, MixerParticipant*> mixedParticipantsMap;
226 CriticalSectionScoped cs(_cbCrit.get());
228 int32_t lowFreq = GetLowestMixingFrequency();
229 // SILK can run in 12 kHz and 24 kHz. These frequencies are not
230 // supported so use the closest higher frequency to not lose any
232 // TODO(henrike): this is probably more appropriate to do in
233 // GetLowestMixingFrequency().
234 if (lowFreq == 12000) {
236 } else if (lowFreq == 24000) {
240 CriticalSectionScoped cs(_crit.get());
246 if(OutputFrequency() != kNbInHz) {
247 SetOutputFrequency(kNbInHz);
251 if(OutputFrequency() != kWbInHz) {
252 SetOutputFrequency(kWbInHz);
256 if(OutputFrequency() != kSwbInHz) {
257 SetOutputFrequency(kSwbInHz);
261 if(OutputFrequency() != kFbInHz) {
262 SetOutputFrequency(kFbInHz);
268 CriticalSectionScoped cs(_crit.get());
274 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
275 remainingParticipantsAllowedToMix);
277 GetAdditionalAudio(&additionalFramesList);
278 UpdateMixedStatus(mixedParticipantsMap);
279 _scratchParticipantsToMixAmount = mixedParticipantsMap.size();
282 // Get an AudioFrame for mixing from the memory pool.
283 AudioFrame* mixedAudio = NULL;
284 if(_audioFramePool->PopMemory(mixedAudio) == -1) {
285 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
286 "failed PopMemory() call");
291 bool timeForMixerCallback = false;
293 int32_t audioLevel = 0;
295 CriticalSectionScoped cs(_crit.get());
297 // TODO(henrike): it might be better to decide the number of channels
298 // with an API instead of dynamically.
300 // Find the max channels over all mixing lists.
301 const int num_mixed_channels = std::max(MaxNumChannels(&mixList),
302 std::max(MaxNumChannels(&additionalFramesList),
303 MaxNumChannels(&rampOutList)));
305 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
306 AudioFrame::kNormalSpeech,
307 AudioFrame::kVadPassive, num_mixed_channels);
309 _timeStamp += _sampleSize;
311 MixFromList(*mixedAudio, &mixList);
312 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
313 MixAnonomouslyFromList(*mixedAudio, &rampOutList);
315 if(mixedAudio->samples_per_channel_ == 0) {
316 // Nothing was mixed, set the audio samples to silence.
317 mixedAudio->samples_per_channel_ = _sampleSize;
320 // Only call the limiter if we have something to mix.
321 if(!LimitMixedAudio(*mixedAudio))
325 _mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize);
326 audioLevel = _mixedAudioLevel.GetLevel();
329 _scratchVadPositiveParticipantsAmount = 0;
330 UpdateVADPositiveParticipants(&mixList);
331 if(_amountOf10MsUntilNextCallback-- == 0) {
332 _amountOf10MsUntilNextCallback = _amountOf10MsBetweenCallbacks;
333 timeForMixerCallback = true;
339 CriticalSectionScoped cs(_cbCrit.get());
340 if(_mixReceiver != NULL) {
341 const AudioFrame** dummy = NULL;
342 _mixReceiver->NewMixedAudio(
349 if((_mixerStatusCallback != NULL) &&
350 timeForMixerCallback) {
351 _mixerStatusCallback->MixedParticipants(
353 _scratchMixedParticipants,
354 static_cast<uint32_t>(_scratchParticipantsToMixAmount));
356 _mixerStatusCallback->VADPositiveParticipants(
358 _scratchVadPositiveParticipants,
359 _scratchVadPositiveParticipantsAmount);
360 _mixerStatusCallback->MixedAudioLevel(_id,audioLevel);
364 // Reclaim all outstanding memory.
365 _audioFramePool->PushMemory(mixedAudio);
366 ClearAudioFrameList(&mixList);
367 ClearAudioFrameList(&rampOutList);
368 ClearAudioFrameList(&additionalFramesList);
370 CriticalSectionScoped cs(_crit.get());
376 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
377 AudioMixerOutputReceiver& mixReceiver) {
378 CriticalSectionScoped cs(_cbCrit.get());
379 if(_mixReceiver != NULL) {
382 _mixReceiver = &mixReceiver;
386 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
387 CriticalSectionScoped cs(_cbCrit.get());
388 if(_mixReceiver == NULL) {
395 int32_t AudioConferenceMixerImpl::SetOutputFrequency(
396 const Frequency frequency) {
397 CriticalSectionScoped cs(_crit.get());
399 _outputFrequency = frequency;
400 _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
405 AudioConferenceMixer::Frequency
406 AudioConferenceMixerImpl::OutputFrequency() const {
407 CriticalSectionScoped cs(_crit.get());
408 return _outputFrequency;
411 int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback(
412 AudioMixerStatusReceiver& mixerStatusCallback,
413 const uint32_t amountOf10MsBetweenCallbacks) {
414 if(amountOf10MsBetweenCallbacks == 0) {
417 kTraceAudioMixerServer,
419 "amountOf10MsBetweenCallbacks(%d) needs to be larger than 0");
423 CriticalSectionScoped cs(_cbCrit.get());
424 if(_mixerStatusCallback != NULL) {
425 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
426 "Mixer status callback already registered");
429 _mixerStatusCallback = &mixerStatusCallback;
432 CriticalSectionScoped cs(_crit.get());
433 _amountOf10MsBetweenCallbacks = amountOf10MsBetweenCallbacks;
434 _amountOf10MsUntilNextCallback = 0;
435 _mixerStatusCb = true;
440 int32_t AudioConferenceMixerImpl::UnRegisterMixerStatusCallback() {
442 CriticalSectionScoped cs(_crit.get());
445 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
446 "Mixer status callback not registered");
449 _mixerStatusCb = false;
452 CriticalSectionScoped cs(_cbCrit.get());
453 _mixerStatusCallback = NULL;
458 int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
459 MixerParticipant& participant,
462 // Anonymous participants are in a separate list. Make sure that the
463 // participant is in the _participantList if it is being mixed.
464 SetAnonymousMixabilityStatus(participant, false);
466 size_t numMixedParticipants;
468 CriticalSectionScoped cs(_cbCrit.get());
470 IsParticipantInList(participant, &_participantList);
471 // API must be called with a new state.
472 if(!(mixable ^ isMixed)) {
473 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
474 "Mixable is aready %s",
475 isMixed ? "ON" : "off");
478 bool success = false;
480 success = AddParticipantToList(participant, &_participantList);
482 success = RemoveParticipantFromList(participant, &_participantList);
485 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
486 "failed to %s participant",
487 mixable ? "add" : "remove");
492 size_t numMixedNonAnonymous = _participantList.size();
493 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
494 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
496 numMixedParticipants =
497 numMixedNonAnonymous + _additionalParticipantList.size();
499 // A MixerParticipant was added or removed. Make sure the scratch
500 // buffer is updated if necessary.
501 // Note: The scratch buffer may only be updated in Process().
502 CriticalSectionScoped cs(_crit.get());
503 _numMixedParticipants = numMixedParticipants;
507 int32_t AudioConferenceMixerImpl::MixabilityStatus(
508 MixerParticipant& participant,
510 CriticalSectionScoped cs(_cbCrit.get());
511 mixable = IsParticipantInList(participant, &_participantList);
515 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
516 MixerParticipant& participant, const bool anonymous) {
517 CriticalSectionScoped cs(_cbCrit.get());
518 if(IsParticipantInList(participant, &_additionalParticipantList)) {
522 if(!RemoveParticipantFromList(participant,
523 &_additionalParticipantList)) {
524 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
525 "unable to remove participant from anonymous list");
529 return AddParticipantToList(participant, &_participantList) ? 0 : -1;
534 const bool mixable = RemoveParticipantFromList(participant,
539 kTraceAudioMixerServer,
541 "participant must be registered before turning it into anonymous");
542 // Setting anonymous status is only possible if MixerParticipant is
543 // already registered.
546 return AddParticipantToList(participant, &_additionalParticipantList) ?
550 int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus(
551 MixerParticipant& participant, bool& mixable) {
552 CriticalSectionScoped cs(_cbCrit.get());
553 mixable = IsParticipantInList(participant,
554 &_additionalParticipantList);
558 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
560 // Make sure that only allowed sampling frequencies are used. Use closest
561 // higher sampling frequency to avoid losing information.
562 if (static_cast<int>(freq) == 12000) {
564 } else if (static_cast<int>(freq) == 24000) {
568 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
569 (freq == kLowestPossible)) {
570 _minimumMixingFreq=freq;
573 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
574 "SetMinimumMixingFrequency incorrect frequency: %i",freq);
580 // Check all AudioFrames that are to be mixed. The highest sampling frequency
581 // found is the lowest that can be used without losing information.
582 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
583 const int participantListFrequency =
584 GetLowestMixingFrequencyFromList(&_participantList);
585 const int anonymousListFrequency =
586 GetLowestMixingFrequencyFromList(&_additionalParticipantList);
587 const int highestFreq =
588 (participantListFrequency > anonymousListFrequency) ?
589 participantListFrequency : anonymousListFrequency;
590 // Check if the user specified a lowest mixing frequency.
591 if(_minimumMixingFreq != kLowestPossible) {
592 if(_minimumMixingFreq > highestFreq) {
593 return _minimumMixingFreq;
599 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
600 MixerParticipantList* mixList) {
601 int32_t highestFreq = 8000;
602 for (MixerParticipantList::iterator iter = mixList->begin();
603 iter != mixList->end();
605 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
606 if(neededFrequency > highestFreq) {
607 highestFreq = neededFrequency;
613 void AudioConferenceMixerImpl::UpdateToMix(
614 AudioFrameList* mixList,
615 AudioFrameList* rampOutList,
616 std::map<int, MixerParticipant*>* mixParticipantList,
617 size_t& maxAudioFrameCounter) {
618 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
619 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
620 maxAudioFrameCounter);
621 const size_t mixListStartSize = mixList->size();
622 AudioFrameList activeList;
623 // Struct needed by the passive lists to keep track of which AudioFrame
624 // belongs to which MixerParticipant.
625 ParticipantFramePairList passiveWasNotMixedList;
626 ParticipantFramePairList passiveWasMixedList;
627 for (MixerParticipantList::iterator participant = _participantList.begin();
628 participant != _participantList.end();
630 // Stop keeping track of passive participants if there are already
631 // enough participants available (they wont be mixed anyway).
632 bool mustAddToPassiveList = (maxAudioFrameCounter >
634 passiveWasMixedList.size() +
635 passiveWasNotMixedList.size()));
637 bool wasMixed = false;
638 (*participant)->_mixHistory->WasMixed(wasMixed);
639 AudioFrame* audioFrame = NULL;
640 if(_audioFramePool->PopMemory(audioFrame) == -1) {
641 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
642 "failed PopMemory() call");
646 audioFrame->sample_rate_hz_ = _outputFrequency;
648 if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) {
649 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
650 "failed to GetAudioFrame() from participant");
651 _audioFramePool->PushMemory(audioFrame);
654 // TODO(henrike): this assert triggers in some test cases where SRTP is
655 // used which prevents NetEQ from making a VAD. Temporarily disable this
656 // assert until the problem is fixed on a higher level.
657 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
658 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
659 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
660 "invalid VAD state from participant");
663 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) {
668 if(activeList.size() >= maxAudioFrameCounter) {
669 // There are already more active participants than should be
670 // mixed. Only keep the ones with the highest energy.
671 AudioFrameList::iterator replaceItem;
672 CalculateEnergy(*audioFrame);
673 uint32_t lowestEnergy = audioFrame->energy_;
675 bool found_replace_item = false;
676 for (AudioFrameList::iterator iter = activeList.begin();
677 iter != activeList.end();
679 CalculateEnergy(**iter);
680 if((*iter)->energy_ < lowestEnergy) {
682 lowestEnergy = (*iter)->energy_;
683 found_replace_item = true;
686 if(found_replace_item) {
687 AudioFrame* replaceFrame = *replaceItem;
689 bool replaceWasMixed = false;
690 std::map<int, MixerParticipant*>::iterator it =
691 mixParticipantList->find(replaceFrame->id_);
693 // When a frame is pushed to |activeList| it is also pushed
694 // to mixParticipantList with the frame's id. This means
695 // that the Find call above should never fail.
696 assert(it != mixParticipantList->end());
697 it->second->_mixHistory->WasMixed(replaceWasMixed);
699 mixParticipantList->erase(replaceFrame->id_);
700 activeList.erase(replaceItem);
702 activeList.push_front(audioFrame);
703 (*mixParticipantList)[audioFrame->id_] = *participant;
704 assert(mixParticipantList->size() <=
705 kMaximumAmountOfMixedParticipants);
707 if (replaceWasMixed) {
708 RampOut(*replaceFrame);
709 rampOutList->push_back(replaceFrame);
710 assert(rampOutList->size() <=
711 kMaximumAmountOfMixedParticipants);
713 _audioFramePool->PushMemory(replaceFrame);
717 RampOut(*audioFrame);
718 rampOutList->push_back(audioFrame);
719 assert(rampOutList->size() <=
720 kMaximumAmountOfMixedParticipants);
722 _audioFramePool->PushMemory(audioFrame);
726 activeList.push_front(audioFrame);
727 (*mixParticipantList)[audioFrame->id_] = *participant;
728 assert(mixParticipantList->size() <=
729 kMaximumAmountOfMixedParticipants);
733 ParticipantFramePair* pair = new ParticipantFramePair;
734 pair->audioFrame = audioFrame;
735 pair->participant = *participant;
736 passiveWasMixedList.push_back(pair);
737 } else if(mustAddToPassiveList) {
739 ParticipantFramePair* pair = new ParticipantFramePair;
740 pair->audioFrame = audioFrame;
741 pair->participant = *participant;
742 passiveWasNotMixedList.push_back(pair);
744 _audioFramePool->PushMemory(audioFrame);
748 assert(activeList.size() <= maxAudioFrameCounter);
749 // At this point it is known which participants should be mixed. Transfer
750 // this information to this functions output parameters.
751 for (AudioFrameList::iterator iter = activeList.begin();
752 iter != activeList.end();
754 mixList->push_back(*iter);
757 // Always mix a constant number of AudioFrames. If there aren't enough
758 // active participants mix passive ones. Starting with those that was mixed
760 for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin();
761 iter != passiveWasMixedList.end();
763 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
764 mixList->push_back((*iter)->audioFrame);
765 (*mixParticipantList)[(*iter)->audioFrame->id_] =
766 (*iter)->participant;
767 assert(mixParticipantList->size() <=
768 kMaximumAmountOfMixedParticipants);
770 _audioFramePool->PushMemory((*iter)->audioFrame);
774 // And finally the ones that have not been mixed for a while.
775 for (ParticipantFramePairList::iterator iter =
776 passiveWasNotMixedList.begin();
777 iter != passiveWasNotMixedList.end();
779 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
780 mixList->push_back((*iter)->audioFrame);
781 (*mixParticipantList)[(*iter)->audioFrame->id_] =
782 (*iter)->participant;
783 assert(mixParticipantList->size() <=
784 kMaximumAmountOfMixedParticipants);
786 _audioFramePool->PushMemory((*iter)->audioFrame);
790 assert(maxAudioFrameCounter + mixListStartSize >= mixList->size());
791 maxAudioFrameCounter += mixListStartSize - mixList->size();
794 void AudioConferenceMixerImpl::GetAdditionalAudio(
795 AudioFrameList* additionalFramesList) {
796 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
797 "GetAdditionalAudio(additionalFramesList)");
798 // The GetAudioFrame() callback may result in the participant being removed
799 // from additionalParticipantList_. If that happens it will invalidate any
800 // iterators. Create a copy of the participants list such that the list of
801 // participants can be traversed safely.
802 MixerParticipantList additionalParticipantList;
803 additionalParticipantList.insert(additionalParticipantList.begin(),
804 _additionalParticipantList.begin(),
805 _additionalParticipantList.end());
807 for (MixerParticipantList::iterator participant =
808 additionalParticipantList.begin();
809 participant != additionalParticipantList.end();
811 AudioFrame* audioFrame = NULL;
812 if(_audioFramePool->PopMemory(audioFrame) == -1) {
813 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
814 "failed PopMemory() call");
818 audioFrame->sample_rate_hz_ = _outputFrequency;
819 if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) {
820 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
821 "failed to GetAudioFrame() from participant");
822 _audioFramePool->PushMemory(audioFrame);
825 if(audioFrame->samples_per_channel_ == 0) {
826 // Empty frame. Don't use it.
827 _audioFramePool->PushMemory(audioFrame);
830 additionalFramesList->push_back(audioFrame);
834 void AudioConferenceMixerImpl::UpdateMixedStatus(
835 std::map<int, MixerParticipant*>& mixedParticipantsMap) {
836 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
837 "UpdateMixedStatus(mixedParticipantsMap)");
838 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
840 // Loop through all participants. If they are in the mix map they
842 for (MixerParticipantList::iterator participant = _participantList.begin();
843 participant != _participantList.end();
845 bool isMixed = false;
846 for (std::map<int, MixerParticipant*>::iterator it =
847 mixedParticipantsMap.begin();
848 it != mixedParticipantsMap.end();
850 if (it->second == *participant) {
855 (*participant)->_mixHistory->SetIsMixed(isMixed);
859 void AudioConferenceMixerImpl::ClearAudioFrameList(
860 AudioFrameList* audioFrameList) {
861 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
862 "ClearAudioFrameList(audioFrameList)");
863 for (AudioFrameList::iterator iter = audioFrameList->begin();
864 iter != audioFrameList->end();
866 _audioFramePool->PushMemory(*iter);
868 audioFrameList->clear();
871 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
872 AudioFrameList* mixList) {
873 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
874 "UpdateVADPositiveParticipants(mixList)");
876 for (AudioFrameList::iterator iter = mixList->begin();
877 iter != mixList->end();
879 CalculateEnergy(**iter);
880 if((*iter)->vad_activity_ == AudioFrame::kVadActive) {
881 _scratchVadPositiveParticipants[
882 _scratchVadPositiveParticipantsAmount].participant =
884 // TODO(andrew): to what should this be set?
885 _scratchVadPositiveParticipants[
886 _scratchVadPositiveParticipantsAmount].level = 0;
887 _scratchVadPositiveParticipantsAmount++;
892 bool AudioConferenceMixerImpl::IsParticipantInList(
893 MixerParticipant& participant,
894 MixerParticipantList* participantList) const {
895 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
896 "IsParticipantInList(participant,participantList)");
897 for (MixerParticipantList::const_iterator iter = participantList->begin();
898 iter != participantList->end();
900 if(&participant == *iter) {
907 bool AudioConferenceMixerImpl::AddParticipantToList(
908 MixerParticipant& participant,
909 MixerParticipantList* participantList) {
910 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
911 "AddParticipantToList(participant, participantList)");
912 participantList->push_back(&participant);
913 // Make sure that the mixed status is correct for new MixerParticipant.
914 participant._mixHistory->ResetMixedStatus();
918 bool AudioConferenceMixerImpl::RemoveParticipantFromList(
919 MixerParticipant& participant,
920 MixerParticipantList* participantList) {
921 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
922 "RemoveParticipantFromList(participant, participantList)");
923 for (MixerParticipantList::iterator iter = participantList->begin();
924 iter != participantList->end();
926 if(*iter == &participant) {
927 participantList->erase(iter);
928 // Participant is no longer mixed, reset to default.
929 participant._mixHistory->ResetMixedStatus();
936 int32_t AudioConferenceMixerImpl::MixFromList(
937 AudioFrame& mixedAudio,
938 const AudioFrameList* audioFrameList) {
939 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
940 "MixFromList(mixedAudio, audioFrameList)");
941 if(audioFrameList->empty()) return 0;
943 uint32_t position = 0;
944 if(_numMixedParticipants == 1) {
945 // No mixing required here; skip the saturation protection.
946 AudioFrame* audioFrame = audioFrameList->front();
947 mixedAudio.CopyFrom(*audioFrame);
948 SetParticipantStatistics(&_scratchMixedParticipants[position],
953 for (AudioFrameList::const_iterator iter = audioFrameList->begin();
954 iter != audioFrameList->end();
956 if(position >= kMaximumAmountOfMixedParticipants) {
959 kTraceAudioMixerServer,
961 "Trying to mix more than max amount of mixed participants:%d!",
962 kMaximumAmountOfMixedParticipants);
963 // Assert and avoid crash
967 MixFrames(&mixedAudio, (*iter));
969 SetParticipantStatistics(&_scratchMixedParticipants[position],
978 // TODO(andrew): consolidate this function with MixFromList.
979 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
980 AudioFrame& mixedAudio,
981 const AudioFrameList* audioFrameList) {
982 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
983 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
985 if(audioFrameList->empty()) return 0;
987 if(_numMixedParticipants == 1) {
988 // No mixing required here; skip the saturation protection.
989 AudioFrame* audioFrame = audioFrameList->front();
990 mixedAudio.CopyFrom(*audioFrame);
994 for (AudioFrameList::const_iterator iter = audioFrameList->begin();
995 iter != audioFrameList->end();
997 MixFrames(&mixedAudio, *iter);
1002 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
1003 if(_numMixedParticipants == 1) {
1007 // Smoothly limit the mixed frame.
1008 const int error = _limiter->ProcessStream(&mixedAudio);
1010 // And now we can safely restore the level. This procedure results in
1011 // some loss of resolution, deemed acceptable.
1013 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
1014 // and compression gain of 6 dB). However, in the transition frame when this
1015 // is enabled (moving from one to two participants) it has the potential to
1016 // create discontinuities in the mixed frame.
1018 // Instead we double the frame (with addition since left-shifting a
1019 // negative value is undefined).
1020 mixedAudio += mixedAudio;
1022 if(error != _limiter->kNoError) {
1023 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
1024 "Error from AudioProcessing: %d", error);
1030 } // namespace webrtc