true)),
rtcp_bandwidth_observer_(
bitrate_controller_->CreateRtcpBandwidthObserver()),
- send_bitrate_observer_(new VoEBitrateObserver(this))
+ send_bitrate_observer_(new VoEBitrateObserver(this)),
+ network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock()))
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::Channel() - ctor");
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::OnNetworkChanged(bitrate_bps=%d, fration_lost=%d, rtt=%d)",
bitrate_bps, fraction_lost, rtt);
+ // |fraction_lost| from BitrateObserver is short time observation of packet
+ // loss rate from past. We use network predictor to make a more reasonable
+ // loss rate estimation.
+ network_predictor_->UpdatePacketLossRate(fraction_lost);
+ uint8_t loss_rate = network_predictor_->GetLossRate();
// Normalizes rate to 0 - 100.
- if (audio_coding_->SetPacketLossRate(100 * fraction_lost / 255) != 0) {
+ if (audio_coding_->SetPacketLossRate(100 * loss_rate / 255) != 0) {
_engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
kTraceError, "OnNetworkChanged() failed to set packet loss rate");
assert(false); // This should not happen.
return 0;
}
+int Channel::SetOpusMaxBandwidth(int bandwidth_hz) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetOpusMaxBandwidth()");
+
+ if (audio_coding_->SetOpusMaxBandwidth(bandwidth_hz) != 0) {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetOpusMaxBandwidth() failed to set maximum encoding bandwidth");
+ return -1;
+ }
+ return 0;
+}
+
int32_t Channel::RegisterExternalTransport(Transport& transport)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
}
int
-Channel::GetRTCP_CNAME(char cName[256])
-{
- if (_rtpRtcpModule->CNAME(cName) != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_RTP_RTCP_MODULE_ERROR, kTraceError,
- "GetRTCP_CNAME() failed to retrieve RTCP CNAME");
- return -1;
- }
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId, _channelId),
- "GetRTCP_CNAME() => cName=%s", cName);
- return 0;
-}
-
-int
Channel::GetRemoteRTCP_CNAME(char cName[256])
{
if (cName == NULL)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::PrepareEncodeAndSend() invalid audio frame");
- return -1;
+ return 0xFFFFFFFF;
}
if (channel_state_.Get().input_file_playing)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::EncodeAndSend() invalid audio frame");
- return -1;
+ return 0xFFFFFFFF;
}
_audioFrame.id_ = _channelId;
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::EncodeAndSend() ACM encoding failed");
- return -1;
+ return 0xFFFFFFFF;
}
_timeStamp += _audioFrame.samples_per_channel_;
uint32_t playout_timestamp = 0;
if (audio_coding_->PlayoutTimestamp(&playout_timestamp) == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::UpdatePlayoutTimestamp() failed to read playout"
- " timestamp from the ACM");
- _engineStatisticsPtr->SetLastError(
- VE_CANNOT_RETRIEVE_VALUE, kTraceError,
- "UpdatePlayoutTimestamp() failed to retrieve timestamp");
+ // This can happen if this channel has not been received any RTP packet. In
+ // this case, NetEq is not capable of computing playout timestamp.
return;
}
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
_audioFrame.UpdateFrame(_channelId,
- -1,
+ 0xFFFFFFFF,
fileBuffer.get(),
fileSamples,
mixingFrequency,
Channel::MixAudioWithFile(AudioFrame& audioFrame,
int mixingFrequency)
{
- assert(mixingFrequency <= 32000);
+ assert(mixingFrequency <= 48000);
- scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
+ scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]);
int fileSamples(0);
{