#include "webrtc/voice_engine/channel.h"
+#include "webrtc/base/timeutils.h"
#include "webrtc/common.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
ChannelStatistics stats_;
};
+class VoEBitrateObserver : public BitrateObserver {
+ public:
+ explicit VoEBitrateObserver(Channel* owner)
+ : owner_(owner) {}
+ virtual ~VoEBitrateObserver() {}
+
+ // Implements BitrateObserver.
+ virtual void OnNetworkChanged(const uint32_t bitrate_bps,
+ const uint8_t fraction_lost,
+ const uint32_t rtt) OVERRIDE {
+ // |fraction_lost| has a scale of 0 - 255.
+ owner_->OnNetworkChanged(bitrate_bps, fraction_lost, rtt);
+ }
+
+ private:
+ Channel* owner_;
+};
+
int32_t
Channel::SendData(FrameType frameType,
uint8_t payloadType,
// Store current audio level in the RTP/RTCP module.
// The level will be used in combination with voice-activity state
// (frameType) to add an RTP header extension
- _rtpRtcpModule->SetAudioLevel(rtp_audioproc_->level_estimator()->RMS());
+ _rtpRtcpModule->SetAudioLevel(rms_level_.RMS());
}
// Push data from ACM to RTP/RTCP-module to deliver audio frame for
"Channel::OnIncomingSSRCChanged(id=%d, SSRC=%d)",
id, ssrc);
- int32_t channel = VoEChannelId(id);
- assert(channel == _channelId);
-
// Update ssrc so that NTP for AV sync can be updated.
_rtpRtcpModule->SetRemoteSSRC(ssrc);
-
- if (_rtpObserver)
- {
- CriticalSectionScoped cs(&_callbackCritSect);
-
- if (_rtpObserverPtr)
- {
- // Send new SSRC to registered observer using callback
- _rtpObserverPtr->OnIncomingSSRCChanged(channel, ssrc);
- }
- }
}
void Channel::OnIncomingCSRCChanged(int32_t id,
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::OnIncomingCSRCChanged(id=%d, CSRC=%d, added=%d)",
id, CSRC, added);
-
- int32_t channel = VoEChannelId(id);
- assert(channel == _channelId);
-
- if (_rtpObserver)
- {
- CriticalSectionScoped cs(&_callbackCritSect);
-
- if (_rtpObserverPtr)
- {
- _rtpObserverPtr->OnIncomingCSRCChanged(channel, CSRC, added);
- }
- }
}
void Channel::ResetStatistics(uint32_t ssrc) {
isAlive = (_outputSpeechType != AudioFrame::kPLCCNG);
}
- UpdateDeadOrAliveCounters(isAlive);
-
// Send callback to the registered observer
if (_connectionObserver)
{
rtpHeader->header.payloadType,
rtpHeader->type.Audio.channel);
- _lastRemoteTimeStamp = rtpHeader->header.timestamp;
-
if (!channel_state_.Get().playing)
{
// Avoid inserting into NetEQ when we are not playing. Count the
MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
}
- // Place channel in on-hold state (~muted) if on-hold is activated
- if (state.output_is_on_hold)
- {
- AudioFrameOperations::Mute(audioFrame);
- }
-
// External media
if (_outputExternalMedia)
{
// Measure audio level (0-9)
_outputAudioLevel.ComputeLevel(audioFrame);
+ if (capture_start_rtp_time_stamp_ < 0 && audioFrame.timestamp_ != 0) {
+ // The first frame with a valid rtp timestamp.
+ capture_start_rtp_time_stamp_ = audioFrame.timestamp_;
+ }
+
+ if (capture_start_rtp_time_stamp_ >= 0) {
+ // audioFrame.timestamp_ should be valid from now on.
+
+ // Compute elapsed time.
+ int64_t unwrap_timestamp =
+ rtp_ts_wraparound_handler_->Unwrap(audioFrame.timestamp_);
+ audioFrame.elapsed_time_ms_ =
+ (unwrap_timestamp - capture_start_rtp_time_stamp_) /
+ (GetPlayoutFrequency() / 1000);
+
+ {
+ CriticalSectionScoped lock(ts_stats_lock_.get());
+ // Compute ntp time.
+ audioFrame.ntp_time_ms_ = ntp_estimator_.Estimate(
+ audioFrame.timestamp_);
+ // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
+ if (audioFrame.ntp_time_ms_ > 0) {
+ // Compute |capture_start_ntp_time_ms_| so that
+ // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
+ capture_start_ntp_time_ms_ =
+ audioFrame.ntp_time_ms_ - audioFrame.elapsed_time_ms_;
+ }
+ }
+ }
+
return 0;
}
_channelId(channelId),
rtp_header_parser_(RtpHeaderParser::Create()),
rtp_payload_registry_(
- new RTPPayloadRegistry(channelId,
- RTPPayloadStrategy::CreateStrategy(true))),
+ new RTPPayloadRegistry(RTPPayloadStrategy::CreateStrategy(true))),
rtp_receive_statistics_(ReceiveStatistics::Create(
Clock::GetRealTimeClock())),
rtp_receiver_(RtpReceiver::CreateAudioReceiver(
VoEModuleId(instanceId, channelId), Clock::GetRealTimeClock(), this,
this, this, rtp_payload_registry_.get())),
telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
- audio_coding_(config.Get<AudioCodingModuleFactory>().Create(
+ audio_coding_(AudioCodingModule::Create(
VoEModuleId(instanceId, channelId))),
_rtpDumpIn(*RtpDump::CreateRtpDump()),
_rtpDumpOut(*RtpDump::CreateRtpDump()),
_outputExternalMediaCallbackPtr(NULL),
_timeStamp(0), // This is just an offset, RTP module will add it's own random offset
_sendTelephoneEventPayloadType(106),
+ ntp_estimator_(Clock::GetRealTimeClock()),
jitter_buffer_playout_timestamp_(0),
playout_timestamp_rtp_(0),
playout_timestamp_rtcp_(0),
playout_delay_ms_(0),
_numberOfDiscardedPackets(0),
send_sequence_number_(0),
+ ts_stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
+ rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
+ capture_start_rtp_time_stamp_(-1),
+ capture_start_ntp_time_ms_(-1),
_engineStatisticsPtr(NULL),
_outputMixerPtr(NULL),
_transmitMixerPtr(NULL),
_voiceEngineObserverPtr(NULL),
_callbackCritSectPtr(NULL),
_transportPtr(NULL),
- rx_audioproc_(AudioProcessing::Create(VoEModuleId(instanceId, channelId))),
_rxVadObserverPtr(NULL),
_oldVadDecision(-1),
_sendFrameType(0),
- _rtpObserverPtr(NULL),
_rtcpObserverPtr(NULL),
_externalPlayout(false),
_externalMixing(false),
- _inputIsOnHold(false),
_mixFileWithMicrophone(false),
- _rtpObserver(false),
_rtcpObserver(false),
_mute(false),
_panLeft(1.0f),
_playOutbandDtmfEvent(false),
_playInbandDtmfEvent(false),
_lastLocalTimeStamp(0),
- _lastRemoteTimeStamp(0),
_lastPayloadType(0),
_includeAudioLevelIndication(false),
_rtpPacketTimedOut(false),
_rtpTimeOutSeconds(0),
_connectionObserver(false),
_connectionObserverPtr(NULL),
- _countAliveDetections(0),
- _countDeadDetections(0),
_outputSpeechType(AudioFrame::kNormalSpeech),
vie_network_(NULL),
video_channel_(-1),
_RxVadDetection(false),
_rxAgcIsEnabled(false),
_rxNsIsEnabled(false),
- restored_packet_in_use_(false)
+ restored_packet_in_use_(false),
+ bitrate_controller_(
+ BitrateController::CreateBitrateController(Clock::GetRealTimeClock(),
+ true)),
+ rtcp_bandwidth_observer_(
+ bitrate_controller_->CreateRtcpBandwidthObserver()),
+ send_bitrate_observer_(new VoEBitrateObserver(this)),
+ network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock()))
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::Channel() - ctor");
configuration.rtcp_feedback = this;
configuration.audio_messages = this;
configuration.receive_statistics = rtp_receive_statistics_.get();
+ configuration.bandwidth_callback = rtcp_bandwidth_observer_.get();
_rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC()));
rtp_receive_statistics_->RegisterRtcpStatisticsCallback(
statistics_proxy_.get());
+
+ Config audioproc_config;
+ audioproc_config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+ rx_audioproc_.reset(AudioProcessing::Create(audioproc_config));
}
Channel::~Channel()
return 0;
}
- // Recover DTMF detection status.
- telephone_event_handler_->SetTelephoneEventForwardToDecoder(true);
- RegisterReceiveCodecsToRTPModule();
channel_state_.SetReceiving(false);
return 0;
}
int32_t
-Channel::SetNetEQPlayoutMode(NetEqModes mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetNetEQPlayoutMode()");
- AudioPlayoutMode playoutMode(voice);
- switch (mode)
- {
- case kNetEqDefault:
- playoutMode = voice;
- break;
- case kNetEqStreaming:
- playoutMode = streaming;
- break;
- case kNetEqFax:
- playoutMode = fax;
- break;
- case kNetEqOff:
- playoutMode = off;
- break;
- }
- if (audio_coding_->SetPlayoutMode(playoutMode) != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetNetEQPlayoutMode() failed to set playout mode");
- return -1;
- }
- return 0;
-}
-
-int32_t
-Channel::GetNetEQPlayoutMode(NetEqModes& mode)
-{
- const AudioPlayoutMode playoutMode = audio_coding_->PlayoutMode();
- switch (playoutMode)
- {
- case voice:
- mode = kNetEqDefault;
- break;
- case streaming:
- mode = kNetEqStreaming;
- break;
- case fax:
- mode = kNetEqFax;
- break;
- case off:
- mode = kNetEqOff;
- }
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::GetNetEQPlayoutMode() => mode=%u", mode);
- return 0;
-}
-
-int32_t
-Channel::SetOnHoldStatus(bool enable, OnHoldModes mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetOnHoldStatus()");
- if (mode == kHoldSendAndPlay)
- {
- channel_state_.SetOutputIsOnHold(enable);
- _inputIsOnHold = enable;
- }
- else if (mode == kHoldPlayOnly)
- {
- channel_state_.SetOutputIsOnHold(enable);
- }
- if (mode == kHoldSendOnly)
- {
- _inputIsOnHold = enable;
- }
- return 0;
-}
-
-int32_t
-Channel::GetOnHoldStatus(bool& enabled, OnHoldModes& mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetOnHoldStatus()");
- bool output_is_on_hold = channel_state_.Get().output_is_on_hold;
- enabled = (output_is_on_hold || _inputIsOnHold);
- if (output_is_on_hold && _inputIsOnHold)
- {
- mode = kHoldSendAndPlay;
- }
- else if (output_is_on_hold && !_inputIsOnHold)
- {
- mode = kHoldPlayOnly;
- }
- else if (!output_is_on_hold && _inputIsOnHold)
- {
- mode = kHoldSendOnly;
- }
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetOnHoldStatus() => enabled=%d, mode=%d",
- enabled, mode);
- return 0;
-}
-
-int32_t
Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
return -1;
}
+ bitrate_controller_->SetBitrateObserver(send_bitrate_observer_.get(),
+ codec.rate, 0, 0);
+
return 0;
}
+void
+Channel::OnNetworkChanged(const uint32_t bitrate_bps,
+ const uint8_t fraction_lost, // 0 - 255.
+ const uint32_t rtt) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnNetworkChanged(bitrate_bps=%d, fration_lost=%d, rtt=%d)",
+ bitrate_bps, fraction_lost, rtt);
+ // |fraction_lost| from BitrateObserver is short time observation of packet
+ // loss rate from past. We use network predictor to make a more reasonable
+ // loss rate estimation.
+ network_predictor_->UpdatePacketLossRate(fraction_lost);
+ uint8_t loss_rate = network_predictor_->GetLossRate();
+ // Normalizes rate to 0 - 100.
+ if (audio_coding_->SetPacketLossRate(100 * loss_rate / 255) != 0) {
+ _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
+ kTraceError, "OnNetworkChanged() failed to set packet loss rate");
+ assert(false); // This should not happen.
+ }
+}
+
int32_t
Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX)
{
}
int32_t
-Channel::SetAMREncFormat(AmrMode mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetAMREncFormat()");
-
- // ACM doesn't support AMR
- return -1;
-}
-
-int32_t
-Channel::SetAMRDecFormat(AmrMode mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetAMRDecFormat()");
-
- // ACM doesn't support AMR
- return -1;
-}
-
-int32_t
-Channel::SetAMRWbEncFormat(AmrMode mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetAMRWbEncFormat()");
-
- // ACM doesn't support AMR
- return -1;
-
-}
-
-int32_t
-Channel::SetAMRWbDecFormat(AmrMode mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetAMRWbDecFormat()");
-
- // ACM doesn't support AMR
- return -1;
-}
-
-int32_t
Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
return 0;
}
-int32_t
-Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetISACInitTargetRate()");
-
- CodecInst sendCodec;
- if (audio_coding_->SendCodec(&sendCodec) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACInitTargetRate() failed to retrieve send codec");
- return -1;
- }
- if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
- {
- // This API is only valid if iSAC is setup to run in channel-adaptive
- // mode.
- // We do not validate the adaptive mode here. It is done later in the
- // ConfigISACBandwidthEstimator() API.
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACInitTargetRate() send codec is not iSAC");
- return -1;
- }
-
- uint8_t initFrameSizeMsec(0);
- if (16000 == sendCodec.plfreq)
- {
- // Note that 0 is a valid and corresponds to "use default
- if ((rateBps != 0 &&
- rateBps < kVoiceEngineMinIsacInitTargetRateBpsWb) ||
- (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsWb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACInitTargetRate() invalid target rate - 1");
- return -1;
- }
- // 30 or 60ms
- initFrameSizeMsec = (uint8_t)(sendCodec.pacsize / 16);
- }
- else if (32000 == sendCodec.plfreq)
- {
- if ((rateBps != 0 &&
- rateBps < kVoiceEngineMinIsacInitTargetRateBpsSwb) ||
- (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsSwb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACInitTargetRate() invalid target rate - 2");
- return -1;
- }
- initFrameSizeMsec = (uint8_t)(sendCodec.pacsize / 32); // 30ms
- }
-
- if (audio_coding_->ConfigISACBandwidthEstimator(
- initFrameSizeMsec, rateBps, useFixedFrameSize) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetISACInitTargetRate() iSAC BWE config failed");
- return -1;
- }
-
- return 0;
-}
-
-int32_t
-Channel::SetISACMaxRate(int rateBps)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetISACMaxRate()");
-
- CodecInst sendCodec;
- if (audio_coding_->SendCodec(&sendCodec) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACMaxRate() failed to retrieve send codec");
- return -1;
- }
- if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
- {
- // This API is only valid if iSAC is selected as sending codec.
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACMaxRate() send codec is not iSAC");
- return -1;
- }
- if (16000 == sendCodec.plfreq)
- {
- if ((rateBps < kVoiceEngineMinIsacMaxRateBpsWb) ||
- (rateBps > kVoiceEngineMaxIsacMaxRateBpsWb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACMaxRate() invalid max rate - 1");
- return -1;
- }
- }
- else if (32000 == sendCodec.plfreq)
- {
- if ((rateBps < kVoiceEngineMinIsacMaxRateBpsSwb) ||
- (rateBps > kVoiceEngineMaxIsacMaxRateBpsSwb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACMaxRate() invalid max rate - 2");
- return -1;
- }
- }
- if (channel_state_.Get().sending)
- {
- _engineStatisticsPtr->SetLastError(
- VE_SENDING, kTraceError,
- "SetISACMaxRate() unable to set max rate while sending");
- return -1;
- }
-
- // Set the maximum instantaneous rate of iSAC (works for both adaptive
- // and non-adaptive mode)
- if (audio_coding_->SetISACMaxRate(rateBps) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetISACMaxRate() failed to set max rate");
- return -1;
- }
-
- return 0;
-}
-
-int32_t
-Channel::SetISACMaxPayloadSize(int sizeBytes)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetISACMaxPayloadSize()");
- CodecInst sendCodec;
- if (audio_coding_->SendCodec(&sendCodec) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACMaxPayloadSize() failed to retrieve send codec");
- return -1;
- }
- if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_CODEC_ERROR, kTraceError,
- "SetISACMaxPayloadSize() send codec is not iSAC");
- return -1;
- }
- if (16000 == sendCodec.plfreq)
- {
- if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesWb) ||
- (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesWb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACMaxPayloadSize() invalid max payload - 1");
- return -1;
- }
- }
- else if (32000 == sendCodec.plfreq)
- {
- if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesSwb) ||
- (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb))
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "SetISACMaxPayloadSize() invalid max payload - 2");
- return -1;
- }
- }
- if (channel_state_.Get().sending)
- {
- _engineStatisticsPtr->SetLastError(
- VE_SENDING, kTraceError,
- "SetISACMaxPayloadSize() unable to set max rate while sending");
- return -1;
- }
+int Channel::SetOpusMaxPlaybackRate(int frequency_hz) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetOpusMaxPlaybackRate()");
- if (audio_coding_->SetISACMaxPayloadSize(sizeBytes) == -1)
- {
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetISACMaxPayloadSize() failed to set max payload size");
- return -1;
- }
- return 0;
+ if (audio_coding_->SetOpusMaxPlaybackRate(frequency_hz) != 0) {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetOpusMaxPlaybackRate() failed to set maximum playback rate");
+ return -1;
+ }
+ return 0;
}
int32_t Channel::RegisterExternalTransport(Transport& transport)
VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
"Channel::IncomingRTPPacket() RTCP packet is invalid");
}
+
+ {
+ CriticalSectionScoped lock(ts_stats_lock_.get());
+ ntp_estimator_.UpdateRtcpTimestamp(rtp_receiver_->SSRC(),
+ _rtpRtcpModule.get());
+ }
return 0;
}
// |_fileCritSect| cannot be taken while calling
// SetAnonymousMixabilityStatus() since as soon as the participant is added
// frames can be pulled by the mixer. Since the frames are generated from
- // the file, _fileCritSect will be taken. This would result in a deadlock.
- if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
- {
- channel_state_.SetOutputFilePlaying(false);
- CriticalSectionScoped cs(&_fileCritSect);
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
- "StartPlayingFile() failed to add participant as file to mixer");
- _outputFilePlayerPtr->StopPlayingFile();
- FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
- _outputFilePlayerPtr = NULL;
- return -1;
- }
-
- return 0;
-}
-
-int Channel::ScaleLocalFilePlayout(float scale)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::ScaleLocalFilePlayout(scale=%5.3f)", scale);
-
- CriticalSectionScoped cs(&_fileCritSect);
-
- if (!channel_state_.Get().output_file_playing)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceError,
- "ScaleLocalFilePlayout() isnot playing");
- return -1;
- }
- if ((_outputFilePlayerPtr == NULL) ||
- (_outputFilePlayerPtr->SetAudioScaling(scale) != 0))
- {
- _engineStatisticsPtr->SetLastError(
- VE_BAD_ARGUMENT, kTraceError,
- "SetAudioScaling() failed to scale the playout");
- return -1;
- }
-
- return 0;
-}
-
-int Channel::GetLocalPlayoutPosition(int& positionMs)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetLocalPlayoutPosition(position=?)");
-
- uint32_t position;
-
- CriticalSectionScoped cs(&_fileCritSect);
-
- if (_outputFilePlayerPtr == NULL)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceError,
- "GetLocalPlayoutPosition() filePlayer instance doesnot exist");
- return -1;
- }
-
- if (_outputFilePlayerPtr->GetPlayoutPosition(position) != 0)
+ // the file, _fileCritSect will be taken. This would result in a deadlock.
+ if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
{
+ channel_state_.SetOutputFilePlaying(false);
+ CriticalSectionScoped cs(&_fileCritSect);
_engineStatisticsPtr->SetLastError(
- VE_BAD_FILE, kTraceError,
- "GetLocalPlayoutPosition() failed");
+ VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+ "StartPlayingFile() failed to add participant as file to mixer");
+ _outputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
return -1;
}
- positionMs = position;
return 0;
}
return channel_state_.Get().input_file_playing;
}
-int Channel::ScaleFileAsMicrophonePlayout(float scale)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::ScaleFileAsMicrophonePlayout(scale=%5.3f)", scale);
-
- CriticalSectionScoped cs(&_fileCritSect);
-
- if (!channel_state_.Get().input_file_playing)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceError,
- "ScaleFileAsMicrophonePlayout() isnot playing");
- return -1;
- }
-
- if ((_inputFilePlayerPtr == NULL) ||
- (_inputFilePlayerPtr->SetAudioScaling(scale) != 0))
- {
- _engineStatisticsPtr->SetLastError(
- VE_BAD_ARGUMENT, kTraceError,
- "SetAudioScaling() failed to scale playout");
- return -1;
- }
-
- return 0;
-}
-
int Channel::StartRecordingPlayout(const char* fileName,
const CodecInst* codecInst)
{
#endif // #ifdef WEBRTC_VOICE_ENGINE_NR
int
-Channel::RegisterRTPObserver(VoERTPObserver& observer)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::RegisterRTPObserver()");
- CriticalSectionScoped cs(&_callbackCritSect);
-
- if (_rtpObserverPtr)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceError,
- "RegisterRTPObserver() observer already enabled");
- return -1;
- }
-
- _rtpObserverPtr = &observer;
- _rtpObserver = true;
-
- return 0;
-}
-
-int
-Channel::DeRegisterRTPObserver()
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::DeRegisterRTPObserver()");
- CriticalSectionScoped cs(&_callbackCritSect);
-
- if (!_rtpObserverPtr)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceWarning,
- "DeRegisterRTPObserver() observer already disabled");
- return 0;
- }
-
- _rtpObserver = false;
- _rtpObserverPtr = NULL;
-
- return 0;
-}
-
-int
Channel::RegisterRTCPObserver(VoERTCPObserver& observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"SetLocalSSRC() already sending");
return -1;
}
- if (_rtpRtcpModule->SetSSRC(ssrc) != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_RTP_RTCP_MODULE_ERROR, kTraceError,
- "SetLocalSSRC() failed to set SSRC");
- return -1;
- }
+ _rtpRtcpModule->SetSSRC(ssrc);
return 0;
}
return 0;
}
-int
-Channel::GetRemoteCSRCs(unsigned int arrCSRC[15])
-{
- if (arrCSRC == NULL)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_ARGUMENT, kTraceError,
- "GetRemoteCSRCs() invalid array argument");
- return -1;
- }
- uint32_t arrOfCSRC[kRtpCsrcSize];
- int32_t CSRCs(0);
- CSRCs = rtp_receiver_->CSRCs(arrOfCSRC);
- if (CSRCs > 0)
- {
- memcpy(arrCSRC, arrOfCSRC, CSRCs * sizeof(uint32_t));
- for (int i = 0; i < (int) CSRCs; i++)
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRemoteCSRCs() => arrCSRC[%d]=%lu", i, arrCSRC[i]);
- }
- } else
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRemoteCSRCs() => list is empty!");
- }
- return CSRCs;
-}
-
int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
- if (rtp_audioproc_.get() == NULL) {
- rtp_audioproc_.reset(AudioProcessing::Create(VoEModuleId(_instanceId,
- _channelId)));
- }
+ _includeAudioLevelIndication = enable;
+ return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+}
- if (rtp_audioproc_->level_estimator()->Enable(enable) !=
- AudioProcessing::kNoError) {
- _engineStatisticsPtr->SetLastError(VE_APM_ERROR, kTraceError,
- "Failed to enable AudioProcessing::level_estimator()");
+int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
+ unsigned char id) {
+ rtp_header_parser_->DeregisterRtpHeaderExtension(
+ kRtpExtensionAudioLevel);
+ if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionAudioLevel, id)) {
return -1;
}
-
- _includeAudioLevelIndication = enable;
-
- return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+ return 0;
}
int Channel::SetSendAbsoluteSenderTimeStatus(bool enable, unsigned char id) {
}
int
-Channel::GetRTCP_CNAME(char cName[256])
-{
- if (_rtpRtcpModule->CNAME(cName) != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_RTP_RTCP_MODULE_ERROR, kTraceError,
- "GetRTCP_CNAME() failed to retrieve RTCP CNAME");
- return -1;
- }
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRTCP_CNAME() => cName=%s", cName);
- return 0;
-}
-
-int
Channel::GetRemoteRTCP_CNAME(char cName[256])
{
if (cName == NULL)
return 0;
}
-int Channel::GetRemoteRTCPSenderInfo(SenderInfo* sender_info) {
- if (sender_info == NULL) {
- _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "GetRemoteRTCPSenderInfo() invalid sender_info.");
- return -1;
- }
-
- // Get the sender info from the latest received RTCP Sender Report.
- RTCPSenderInfo rtcp_sender_info;
- if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_sender_info) != 0) {
- _engineStatisticsPtr->SetLastError(VE_RTP_RTCP_MODULE_ERROR, kTraceError,
- "GetRemoteRTCPSenderInfo() failed to read RTCP SR sender info.");
- return -1;
- }
-
- sender_info->NTP_timestamp_high = rtcp_sender_info.NTPseconds;
- sender_info->NTP_timestamp_low = rtcp_sender_info.NTPfraction;
- sender_info->RTP_timestamp = rtcp_sender_info.RTPtimeStamp;
- sender_info->sender_packet_count = rtcp_sender_info.sendPacketCount;
- sender_info->sender_octet_count = rtcp_sender_info.sendOctetCount;
- return 0;
-}
-
int Channel::GetRemoteRTCPReportBlocks(
std::vector<ReportBlock>* report_blocks) {
if (report_blocks == NULL) {
int
Channel::GetRTPStatistics(CallStatistics& stats)
{
- // --- Part one of the final structure (four values)
+ // --- RtcpStatistics
// The jitter statistics is updated for each received RTP packet and is
// based on received packets.
stats.fractionLost, stats.cumulativeLost, stats.extendedMax,
stats.jitterSamples);
- // --- Part two of the final structure (one value)
-
- uint16_t RTT(0);
- RTCPMethod method = _rtpRtcpModule->RTCP();
- if (method == kRtcpOff)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRTPStatistics() RTCP is disabled => valid RTT "
- "measurements cannot be retrieved");
- } else
- {
- // The remote SSRC will be zero if no RTP packet has been received.
- uint32_t remoteSSRC = rtp_receiver_->SSRC();
- if (remoteSSRC > 0)
- {
- uint16_t avgRTT(0);
- uint16_t maxRTT(0);
- uint16_t minRTT(0);
-
- if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT, &maxRTT)
- != 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRTPStatistics() failed to retrieve RTT from "
- "the RTP/RTCP module");
- }
- } else
- {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRTPStatistics() failed to measure RTT since no "
- "RTP packets have been received yet");
- }
- }
-
- stats.rttMs = static_cast<int> (RTT);
+ // --- RTT
+ stats.rttMs = GetRTT();
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_instanceId, _channelId),
"GetRTPStatistics() => rttMs=%d", stats.rttMs);
- // --- Part three of the final structure (four values)
+ // --- Data counters
uint32_t bytesSent(0);
uint32_t packetsSent(0);
stats.bytesSent, stats.packetsSent, stats.bytesReceived,
stats.packetsReceived);
+ // --- Timestamps
+ {
+ CriticalSectionScoped lock(ts_stats_lock_.get());
+ stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
+ }
return 0;
}
-int Channel::SetFECStatus(bool enable, int redPayloadtype) {
+int Channel::SetREDStatus(bool enable, int redPayloadtype) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::SetFECStatus()");
+ "Channel::SetREDStatus()");
if (enable) {
if (redPayloadtype < 0 || redPayloadtype > 127) {
_engineStatisticsPtr->SetLastError(
VE_PLTYPE_ERROR, kTraceError,
- "SetFECStatus() invalid RED payload type");
+ "SetREDStatus() invalid RED payload type");
return -1;
}
}
}
- if (audio_coding_->SetFECStatus(enable) != 0) {
+ if (audio_coding_->SetREDStatus(enable) != 0) {
_engineStatisticsPtr->SetLastError(
VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetFECStatus() failed to set FEC state in the ACM");
+ "SetREDStatus() failed to set RED state in the ACM");
return -1;
}
return 0;
}
int
-Channel::GetFECStatus(bool& enabled, int& redPayloadtype)
+Channel::GetREDStatus(bool& enabled, int& redPayloadtype)
{
- enabled = audio_coding_->FECStatus();
+ enabled = audio_coding_->REDStatus();
if (enabled)
{
int8_t payloadType(0);
{
_engineStatisticsPtr->SetLastError(
VE_RTP_RTCP_MODULE_ERROR, kTraceError,
- "GetFECStatus() failed to retrieve RED PT from RTP/RTCP "
+ "GetREDStatus() failed to retrieve RED PT from RTP/RTCP "
"module");
return -1;
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_instanceId, _channelId),
- "GetFECStatus() => enabled=%d, redPayloadtype=%d",
+ "GetREDStatus() => enabled=%d, redPayloadtype=%d",
enabled, redPayloadtype);
return 0;
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_instanceId, _channelId),
- "GetFECStatus() => enabled=%d", enabled);
+ "GetREDStatus() => enabled=%d", enabled);
return 0;
}
+int Channel::SetCodecFECStatus(bool enable) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetCodecFECStatus()");
+
+ if (audio_coding_->SetCodecFEC(enable) != 0) {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetCodecFECStatus() failed to set FEC state");
+ return -1;
+ }
+ return 0;
+}
+
+bool Channel::GetCodecFECStatus() {
+ bool enabled = audio_coding_->CodecFEC();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetCodecFECStatus() => enabled=%d", enabled);
+ return enabled;
+}
+
void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
// None of these functions can fail.
_rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
return 0;
}
-// TODO(xians): This method borrows quite some code from
-// TransmitMixer::GenerateAudioFrame(), refactor these two methods and reduce
-// code duplication.
void Channel::Demultiplex(const int16_t* audio_data,
int sample_rate,
int number_of_frames,
int number_of_channels) {
- // The highest sample rate that WebRTC supports for mono audio is 96kHz.
- static const int kMaxNumberOfFrames = 960;
- assert(number_of_frames <= kMaxNumberOfFrames);
-
- // Get the send codec information for doing resampling or downmixing later on.
CodecInst codec;
GetSendCodec(codec);
- assert(codec.channels == 1 || codec.channels == 2);
- int support_sample_rate = std::min(32000,
- std::min(sample_rate, codec.plfreq));
-
- // Downmix the data to mono if needed.
- const int16_t* audio_ptr = audio_data;
- if (number_of_channels == 2 && codec.channels == 1) {
- if (!mono_recording_audio_.get())
- mono_recording_audio_.reset(new int16_t[kMaxNumberOfFrames]);
-
- AudioFrameOperations::StereoToMono(audio_data, number_of_frames,
- mono_recording_audio_.get());
- audio_ptr = mono_recording_audio_.get();
- }
-
- // Resample the data to the sample rate that the codec is using.
- if (input_resampler_.InitializeIfNeeded(sample_rate,
- support_sample_rate,
- codec.channels)) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "Channel::Demultiplex() unable to resample");
- return;
- }
- int out_length = input_resampler_.Resample(audio_ptr,
- number_of_frames * codec.channels,
- _audioFrame.data_,
- AudioFrame::kMaxDataSizeSamples);
- if (out_length == -1) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "Channel::Demultiplex() resampling failed");
- return;
+ if (!mono_recording_audio_.get()) {
+ // Temporary space for DownConvertToCodecFormat.
+ mono_recording_audio_.reset(new int16_t[kMaxMonoDataSizeSamples]);
}
-
- _audioFrame.samples_per_channel_ = out_length / codec.channels;
- _audioFrame.timestamp_ = -1;
- _audioFrame.sample_rate_hz_ = support_sample_rate;
- _audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- _audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
- _audioFrame.num_channels_ = codec.channels;
- _audioFrame.id_ = _channelId;
+ DownConvertToCodecFormat(audio_data,
+ number_of_frames,
+ number_of_channels,
+ sample_rate,
+ codec.channels,
+ codec.plfreq,
+ mono_recording_audio_.get(),
+ &input_resampler_,
+ &_audioFrame);
}
uint32_t
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::PrepareEncodeAndSend() invalid audio frame");
- return -1;
+ return 0xFFFFFFFF;
}
if (channel_state_.Get().input_file_playing)
MixOrReplaceAudioWithFile(mixingFrequency);
}
- if (Mute())
- {
- AudioFrameOperations::Mute(_audioFrame);
+ bool is_muted = Mute(); // Cache locally as Mute() takes a lock.
+ if (is_muted) {
+ AudioFrameOperations::Mute(_audioFrame);
}
if (channel_state_.Get().input_external_media)
InsertInbandDtmfTone();
if (_includeAudioLevelIndication) {
- // Performs level analysis only; does not affect the signal.
- int err = rtp_audioproc_->ProcessStream(&_audioFrame);
- if (err) {
- LOG(LS_ERROR) << "ProcessStream() error: " << err;
- assert(false);
+ int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
+ if (is_muted) {
+ rms_level_.ProcessMuted(length);
+ } else {
+ rms_level_.Process(_audioFrame.data_, length);
}
}
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::EncodeAndSend() invalid audio frame");
- return -1;
+ return 0xFFFFFFFF;
}
_audioFrame.id_ = _channelId;
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::EncodeAndSend() ACM encoding failed");
- return -1;
+ return 0xFFFFFFFF;
}
_timeStamp += _audioFrame.samples_per_channel_;
}
int
-Channel::ResetRTCPStatistics()
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::ResetRTCPStatistics()");
- uint32_t remoteSSRC(0);
- remoteSSRC = rtp_receiver_->SSRC();
- return _rtpRtcpModule->ResetRTT(remoteSSRC);
-}
-
-int
-Channel::GetRoundTripTimeSummary(StatVal& delaysMs) const
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetRoundTripTimeSummary()");
- // Override default module outputs for the case when RTCP is disabled.
- // This is done to ensure that we are backward compatible with the
- // VoiceEngine where we did not use RTP/RTCP module.
- if (!_rtpRtcpModule->RTCP())
- {
- delaysMs.min = -1;
- delaysMs.max = -1;
- delaysMs.average = -1;
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetRoundTripTimeSummary() RTCP is disabled =>"
- " valid RTT measurements cannot be retrieved");
- return 0;
- }
-
- uint32_t remoteSSRC;
- uint16_t RTT;
- uint16_t avgRTT;
- uint16_t maxRTT;
- uint16_t minRTT;
- // The remote SSRC will be zero if no RTP packet has been received.
- remoteSSRC = rtp_receiver_->SSRC();
- if (remoteSSRC == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::GetRoundTripTimeSummary() unable to measure RTT"
- " since no RTP packet has been received yet");
- }
-
- // Retrieve RTT statistics from the RTP/RTCP module for the specified
- // channel and SSRC. The SSRC is required to parse out the correct source
- // in conference scenarios.
- if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT,&maxRTT) != 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
- "GetRoundTripTimeSummary unable to retrieve RTT values"
- " from the RTCP layer");
- delaysMs.min = -1; delaysMs.max = -1; delaysMs.average = -1;
- }
- else
- {
- delaysMs.min = minRTT;
- delaysMs.max = maxRTT;
- delaysMs.average = avgRTT;
- }
- return 0;
-}
-
-int
Channel::GetNetworkStatistics(NetworkStatistics& stats)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
uint32_t playout_timestamp = 0;
if (audio_coding_->PlayoutTimestamp(&playout_timestamp) == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::UpdatePlayoutTimestamp() failed to read playout"
- " timestamp from the ACM");
- _engineStatisticsPtr->SetLastError(
- VE_CANNOT_RETRIEVE_VALUE, kTraceError,
- "UpdatePlayoutTimestamp() failed to retrieve timestamp");
+ // This can happen if this channel has not been received any RTP packet. In
+ // this case, NetEq is not capable of computing playout timestamp.
return;
}
return;
}
- int32_t playout_frequency = audio_coding_->PlayoutFrequency();
- CodecInst current_recive_codec;
- if (audio_coding_->ReceiveCodec(¤t_recive_codec) == 0) {
- if (STR_CASE_CMP("G722", current_recive_codec.plname) == 0) {
- playout_frequency = 8000;
- } else if (STR_CASE_CMP("opus", current_recive_codec.plname) == 0) {
- playout_frequency = 48000;
- }
- }
-
jitter_buffer_playout_timestamp_ = playout_timestamp;
// Remove the playout delay.
- playout_timestamp -= (delay_ms * (playout_frequency / 1000));
+ playout_timestamp -= (delay_ms * (GetPlayoutFrequency() / 1000));
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::UpdatePlayoutTimestamp() => playoutTimestamp = %lu",
int32_t
Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
{
- scoped_array<int16_t> fileBuffer(new int16_t[640]);
+ scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
int fileSamples(0);
{
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- Utility::MixWithSat(_audioFrame.data_,
- _audioFrame.num_channels_,
- fileBuffer.get(),
- 1,
- fileSamples);
+ MixWithSat(_audioFrame.data_,
+ _audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
}
else
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
_audioFrame.UpdateFrame(_channelId,
- -1,
+ 0xFFFFFFFF,
fileBuffer.get(),
fileSamples,
mixingFrequency,
Channel::MixAudioWithFile(AudioFrame& audioFrame,
int mixingFrequency)
{
- assert(mixingFrequency <= 32000);
+ assert(mixingFrequency <= 48000);
- scoped_array<int16_t> fileBuffer(new int16_t[640]);
+ scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]);
int fileSamples(0);
{
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- Utility::MixWithSat(audioFrame.data_,
- audioFrame.num_channels_,
- fileBuffer.get(),
- 1,
- fileSamples);
+ MixWithSat(audioFrame.data_,
+ audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
}
else
{
return 0;
}
-void
-Channel::ResetDeadOrAliveCounters()
-{
- _countDeadDetections = 0;
- _countAliveDetections = 0;
-}
-
-void
-Channel::UpdateDeadOrAliveCounters(bool alive)
-{
- if (alive)
- _countAliveDetections++;
- else
- _countDeadDetections++;
-}
-
-int
-Channel::GetDeadOrAliveCounters(int& countDead, int& countAlive) const
-{
- return 0;
-}
-
int32_t
Channel::SendPacketRaw(const void *data, int len, bool RTCP)
{
rtp_timestamp, sequence_number);
// Get frequency of last received payload
- int rtp_receive_frequency = audio_coding_->ReceiveFrequency();
-
- CodecInst current_receive_codec;
- if (audio_coding_->ReceiveCodec(¤t_receive_codec) != 0) {
- return;
- }
+ int rtp_receive_frequency = GetPlayoutFrequency();
// Update the least required delay.
least_required_delay_ms_ = audio_coding_->LeastRequiredDelayMs();
- if (STR_CASE_CMP("G722", current_receive_codec.plname) == 0) {
- // Even though the actual sampling rate for G.722 audio is
- // 16,000 Hz, the RTP clock rate for the G722 payload format is
- // 8,000 Hz because that value was erroneously assigned in
- // RFC 1890 and must remain unchanged for backward compatibility.
- rtp_receive_frequency = 8000;
- } else if (STR_CASE_CMP("opus", current_receive_codec.plname) == 0) {
- // We are resampling Opus internally to 32,000 Hz until all our
- // DSP routines can operate at 48,000 Hz, but the RTP clock
- // rate for the Opus payload format is standardized to 48,000 Hz,
- // because that is the maximum supported decoding sampling rate.
- rtp_receive_frequency = 48000;
- }
-
// |jitter_buffer_playout_timestamp_| updated in UpdatePlayoutTimestamp for
// every incoming packet.
uint32_t timestamp_diff_ms = (rtp_timestamp -
}
return error;
}
+
+int32_t Channel::GetPlayoutFrequency() {
+ int32_t playout_frequency = audio_coding_->PlayoutFrequency();
+ CodecInst current_recive_codec;
+ if (audio_coding_->ReceiveCodec(¤t_recive_codec) == 0) {
+ if (STR_CASE_CMP("G722", current_recive_codec.plname) == 0) {
+ // Even though the actual sampling rate for G.722 audio is
+ // 16,000 Hz, the RTP clock rate for the G722 payload format is
+ // 8,000 Hz because that value was erroneously assigned in
+ // RFC 1890 and must remain unchanged for backward compatibility.
+ playout_frequency = 8000;
+ } else if (STR_CASE_CMP("opus", current_recive_codec.plname) == 0) {
+ // We are resampling Opus internally to 32,000 Hz until all our
+ // DSP routines can operate at 48,000 Hz, but the RTP clock
+ // rate for the Opus payload format is standardized to 48,000 Hz,
+ // because that is the maximum supported decoding sampling rate.
+ playout_frequency = 48000;
+ }
+ }
+ return playout_frequency;
+}
+
+int Channel::GetRTT() const {
+ RTCPMethod method = _rtpRtcpModule->RTCP();
+ if (method == kRtcpOff) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() RTCP is disabled => valid RTT "
+ "measurements cannot be retrieved");
+ return 0;
+ }
+ std::vector<RTCPReportBlock> report_blocks;
+ _rtpRtcpModule->RemoteRTCPStat(&report_blocks);
+ if (report_blocks.empty()) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() failed to measure RTT since no "
+ "RTCP packets have been received yet");
+ return 0;
+ }
+
+ uint32_t remoteSSRC = rtp_receiver_->SSRC();
+ std::vector<RTCPReportBlock>::const_iterator it = report_blocks.begin();
+ for (; it != report_blocks.end(); ++it) {
+ if (it->remoteSSRC == remoteSSRC)
+ break;
+ }
+ if (it == report_blocks.end()) {
+ // We have not received packets with SSRC matching the report blocks.
+ // To calculate RTT we try with the SSRC of the first report block.
+ // This is very important for send-only channels where we don't know
+ // the SSRC of the other end.
+ remoteSSRC = report_blocks[0].remoteSSRC;
+ }
+ uint16_t rtt = 0;
+ uint16_t avg_rtt = 0;
+ uint16_t max_rtt= 0;
+ uint16_t min_rtt = 0;
+ if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt)
+ != 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() failed to retrieve RTT from "
+ "the RTP/RTCP module");
+ return 0;
+ }
+ return static_cast<int>(rtt);
+}
+
} // namespace voe
} // namespace webrtc