#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/source/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
#include "webrtc/modules/audio_coding/main/source/acm_dtmf_detection.h"
#include "webrtc/modules/audio_coding/main/source/acm_generic_codec.h"
#include "webrtc/modules/audio_coding/main/source/acm_resampler.h"
return -1;
}
- // Allow for 8, 16, 32 and 48kHz input audio.
- if ((audio_frame.sample_rate_hz_ != 8000)
- && (audio_frame.sample_rate_hz_ != 16000)
- && (audio_frame.sample_rate_hz_ != 32000)
- && (audio_frame.sample_rate_hz_ != 48000)) {
+ if (audio_frame.sample_rate_hz_ > 48000) {
assert(false);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot Add 10 ms audio, input frequency not valid");
if (preprocess_frame_.samples_per_channel_ < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
- "Cannot add 10 ms audio, resmapling failed");
+ "Cannot add 10 ms audio, resampling failed");
return -1;
}
preprocess_frame_.sample_rate_hz_ = send_codec_inst_.plfreq;
int decoded_seq_num;
uint32_t decoded_timestamp;
bool update_nack =
- neteq_.DecodedRtpInfo(&decoded_seq_num, &decoded_timestamp) &&
- nack_enabled_; // Update NACK only if it is enabled.
- audio_frame->num_channels_ = audio_frame_.num_channels_;
- audio_frame->vad_activity_ = audio_frame_.vad_activity_;
- audio_frame->speech_type_ = audio_frame_.speech_type_;
+ neteq_.DecodedRtpInfo(&decoded_seq_num, &decoded_timestamp);
- stereo_mode = (audio_frame_.num_channels_ > 1);
-
- // For stereo playout:
// Master and Slave samples are interleaved starting with Master.
- const uint16_t receive_freq =
- static_cast<uint16_t>(audio_frame_.sample_rate_hz_);
+ uint16_t receive_freq;
bool tone_detected = false;
int16_t last_detected_tone;
int16_t tone;
{
CriticalSectionScoped lock(acm_crit_sect_);
- if (update_nack) {
+ audio_frame->num_channels_ = audio_frame_.num_channels_;
+ audio_frame->vad_activity_ = audio_frame_.vad_activity_;
+ audio_frame->speech_type_ = audio_frame_.speech_type_;
+
+ stereo_mode = (audio_frame_.num_channels_ > 1);
+
+ receive_freq = static_cast<uint16_t>(audio_frame_.sample_rate_hz_);
+ // Update call statistics.
+ call_stats_.DecodedByNetEq(audio_frame->speech_type_);
+
+ if (nack_enabled_ && update_nack) {
assert(nack_.get());
nack_->UpdateLastDecodedPacket(decoded_seq_num, decoded_timestamp);
}
return false;
}
+ // Record call to silence generator.
+ call_stats_.DecodedBySilenceGenerator();
+
// We stop accumulating packets, if the number of packets or the total size
// exceeds a threshold.
int max_num_packets;
nack_enabled_ = false;
}
+void AudioCodingModuleImpl::GetDecodingCallStatistics(
+ AudioDecodingCallStats* call_stats) const {
+ CriticalSectionScoped lock(acm_crit_sect_);
+ *call_stats = call_stats_.GetDecodingStatistics();
+}
+
} // namespace acm1
} // namespace webrtc