#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
namespace webrtc {
-
namespace voe {
-// Used for downmixing before resampling.
-// TODO(ajm): audio_device should advertise the maximum sample rate it can
-// provide.
-static const int kMaxMonoDeviceDataSizeSamples = 960; // 10 ms, 96 kHz, mono.
-
// TODO(ajm): The thread safety of this is dubious...
void
TransmitMixer::OnPeriodicProcess()
if (channel->Sending()) {
CodecInst codec;
channel->GetSendCodec(codec);
- // TODO(tlegrand): Remove the 32 kHz restriction once we have full 48 kHz
- // support in Audio Coding Module.
- *max_sample_rate = std::min(32000,
- std::max(*max_sample_rate, codec.plfreq));
+ *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
*max_channels = std::max(*max_channels, codec.channels);
}
}
totalDelayMS, clockDrift, currentMicLevel);
// --- Resample input audio and create/store the initial audio frame
- if (GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
- nSamples,
- nChannels,
- samplesPerSec) == -1)
- {
- return -1;
- }
+ GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
+ nSamples,
+ nChannels,
+ samplesPerSec);
{
CriticalSectionScoped cs(&_callbackCritSect);
}
// --- Record to file
- if (_fileRecording)
+ bool file_recording = false;
+ {
+ CriticalSectionScoped cs(&_critSect);
+ file_recording = _fileRecording;
+ }
+ if (file_recording)
{
RecordAudioToFile(_audioFrame.sample_rate_hz_);
}
it.Increment())
{
Channel* channelPtr = it.GetChannel();
- if (channelPtr->InputIsOnHold())
- {
- channelPtr->UpdateLocalTimeStamp();
- } else if (channelPtr->Sending())
+ if (channelPtr->Sending())
{
// Demultiplex makes a copy of its input.
channelPtr->Demultiplex(_audioFrame);
voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
voe::Channel* channel_ptr = ch.channel();
if (channel_ptr) {
- if (channel_ptr->InputIsOnHold()) {
- channel_ptr->UpdateLocalTimeStamp();
- } else if (channel_ptr->Sending()) {
+ if (channel_ptr->Sending()) {
// Demultiplex makes a copy of its input.
channel_ptr->Demultiplex(_audioFrame);
channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
it.Increment())
{
Channel* channelPtr = it.GetChannel();
- if (channelPtr->Sending() && !channelPtr->InputIsOnHold())
+ if (channelPtr->Sending())
{
channelPtr->EncodeAndSend();
}
for (int i = 0; i < number_of_voe_channels; ++i) {
voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
voe::Channel* channel_ptr = ch.channel();
- if (channel_ptr && channel_ptr->Sending() && !channel_ptr->InputIsOnHold())
+ if (channel_ptr && channel_ptr->Sending())
channel_ptr->EncodeAndSend();
}
}
return _filePlaying;
}
-int TransmitMixer::ScaleFileAsMicrophonePlayout(float scale)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
- "TransmitMixer::ScaleFileAsMicrophonePlayout(scale=%5.3f)",
- scale);
-
- CriticalSectionScoped cs(&_critSect);
-
- if (!_filePlaying)
- {
- _engineStatisticsPtr->SetLastError(
- VE_INVALID_OPERATION, kTraceError,
- "ScaleFileAsMicrophonePlayout() isnot playing file");
- return -1;
- }
-
- if ((_filePlayerPtr == NULL) ||
- (_filePlayerPtr->SetAudioScaling(scale) != 0))
- {
- _engineStatisticsPtr->SetLastError(
- VE_BAD_ARGUMENT, kTraceError,
- "SetAudioScaling() failed to scale playout");
- return -1;
- }
-
- return 0;
-}
-
int TransmitMixer::StartRecordingMicrophone(const char* fileName,
const CodecInst* codecInst)
{
"TransmitMixer::StartRecordingMicrophone(fileName=%s)",
fileName);
+ CriticalSectionScoped cs(&_critSect);
+
if (_fileRecording)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
format = kFileFormatCompressedFile;
}
- CriticalSectionScoped cs(&_critSect);
-
// Destroy the old instance
if (_fileRecorderPtr)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::StartRecordingMicrophone()");
+ CriticalSectionScoped cs(&_critSect);
+
if (_fileRecording)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
format = kFileFormatCompressedFile;
}
- CriticalSectionScoped cs(&_critSect);
-
// Destroy the old instance
if (_fileRecorderPtr)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::StopRecordingMicrophone()");
+ CriticalSectionScoped cs(&_critSect);
+
if (!_fileRecording)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
return 0;
}
- CriticalSectionScoped cs(&_critSect);
-
if (_fileRecorderPtr->StopRecording() != 0)
{
_engineStatisticsPtr->SetLastError(
bool TransmitMixer::IsRecordingMic()
{
-
+ CriticalSectionScoped cs(&_critSect);
return _fileRecording;
}
-// TODO(andrew): use RemixAndResample for this.
-int TransmitMixer::GenerateAudioFrame(const int16_t audio[],
- int samples_per_channel,
- int num_channels,
- int sample_rate_hz) {
- int destination_rate;
+void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
+ int samples_per_channel,
+ int num_channels,
+ int sample_rate_hz) {
+ int codec_rate;
int num_codec_channels;
- GetSendCodecInfo(&destination_rate, &num_codec_channels);
-
- // Never upsample the capture signal here. This should be done at the
- // end of the send chain.
- destination_rate = std::min(destination_rate, sample_rate_hz);
- stereo_codec_ = num_codec_channels == 2;
-
- const int16_t* audio_ptr = audio;
- int16_t mono_audio[kMaxMonoDeviceDataSizeSamples];
- assert(samples_per_channel <= kMaxMonoDeviceDataSizeSamples);
- // If no stereo codecs are in use, we downmix a stereo stream from the
- // device early in the chain, before resampling.
- if (num_channels == 2 && !stereo_codec_) {
- AudioFrameOperations::StereoToMono(audio, samples_per_channel,
- mono_audio);
- audio_ptr = mono_audio;
- num_channels = 1;
- }
-
- if (resampler_.InitializeIfNeeded(sample_rate_hz,
- destination_rate,
- num_channels) != 0) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "TransmitMixer::GenerateAudioFrame() unable to resample");
- return -1;
+ GetSendCodecInfo(&codec_rate, &num_codec_channels);
+ // TODO(ajm): This currently restricts the sample rate to 32 kHz.
+ // See: https://code.google.com/p/webrtc/issues/detail?id=3146
+ // When 48 kHz is supported natively by AudioProcessing, this will have
+ // to be changed to handle 44.1 kHz.
+ int max_sample_rate_hz = kAudioProcMaxNativeSampleRateHz;
+ if (audioproc_->echo_control_mobile()->is_enabled()) {
+ // AECM only supports 8 and 16 kHz.
+ max_sample_rate_hz = 16000;
}
+ codec_rate = std::min(codec_rate, max_sample_rate_hz);
+ stereo_codec_ = num_codec_channels == 2;
- int out_length = resampler_.Resample(audio_ptr,
- samples_per_channel * num_channels,
- _audioFrame.data_,
- AudioFrame::kMaxDataSizeSamples);
- if (out_length == -1) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "TransmitMixer::GenerateAudioFrame() resampling failed");
- return -1;
+ if (!mono_buffer_.get()) {
+ // Temporary space for DownConvertToCodecFormat.
+ mono_buffer_.reset(new int16_t[kMaxMonoDataSizeSamples]);
}
-
- _audioFrame.samples_per_channel_ = out_length / num_channels;
- _audioFrame.id_ = _instanceId;
- _audioFrame.timestamp_ = -1;
- _audioFrame.sample_rate_hz_ = destination_rate;
- _audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- _audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
- _audioFrame.num_channels_ = num_channels;
-
- return 0;
+ DownConvertToCodecFormat(audio,
+ samples_per_channel,
+ num_channels,
+ sample_rate_hz,
+ num_codec_channels,
+ codec_rate,
+ mono_buffer_.get(),
+ &resampler_,
+ &_audioFrame);
}
int32_t TransmitMixer::RecordAudioToFile(
int32_t TransmitMixer::MixOrReplaceAudioWithFile(
int mixingFrequency)
{
- scoped_array<int16_t> fileBuffer(new int16_t[640]);
+ scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
int fileSamples(0);
{
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- Utility::MixWithSat(_audioFrame.data_,
- _audioFrame.num_channels_,
- fileBuffer.get(),
- 1,
- fileSamples);
+ MixWithSat(_audioFrame.data_,
+ _audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
} else
{
// Replace ACM audio with file.
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
_audioFrame.UpdateFrame(-1,
- -1,
+ 0xFFFFFFFF,
fileBuffer.get(),
fileSamples,
mixingFrequency,
}
} // namespace voe
-
} // namespace webrtc