Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / voice_engine / utility.cc
index 5b7ee81..b7eb885 100644 (file)
 
 #include "webrtc/voice_engine/utility.h"
 
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-namespace webrtc
-{
-
-namespace voe
-{
-enum{kMaxTargetLen = 2*32*10}; // stereo 32KHz 10ms
-
-void Utility::MixWithSat(int16_t target[],
-                         int target_channel,
-                         const int16_t source[],
-                         int source_channel,
-                         int source_len)
-{
-    assert((target_channel == 1) || (target_channel == 2));
-    assert((source_channel == 1) || (source_channel == 2));
-    assert(source_len <= kMaxTargetLen);
-
-    if ((target_channel == 2) && (source_channel == 1))
-    {
-        // Convert source from mono to stereo.
-        int32_t left = 0;
-        int32_t right = 0;
-        for (int i = 0; i < source_len; ++i) {
-            left  = source[i] + target[i*2];
-            right = source[i] + target[i*2 + 1];
-            target[i*2]     = WebRtcSpl_SatW32ToW16(left);
-            target[i*2 + 1] = WebRtcSpl_SatW32ToW16(right);
-        }
-    }
-    else if ((target_channel == 1) && (source_channel == 2))
-    {
-        // Convert source from stereo to mono.
-        int32_t temp = 0;
-        for (int i = 0; i < source_len/2; ++i) {
-          temp = ((source[i*2] + source[i*2 + 1])>>1) + target[i];
-          target[i] = WebRtcSpl_SatW32ToW16(temp);
-        }
-    }
-    else
-    {
-        int32_t temp = 0;
-        for (int i = 0; i < source_len; ++i) {
-          temp = source[i] + target[i];
-          target[i] = WebRtcSpl_SatW32ToW16(temp);
-        }
-    }
-}
+#include "webrtc/common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/voice_engine/voice_engine_defines.h"
 
-void Utility::MixSubtractWithSat(int16_t target[],
-                                 const int16_t source[],
-                                 uint16_t len)
-{
-    int32_t temp(0);
-    for (int i = 0; i < len; i++)
-    {
-        temp = target[i] - source[i];
-        if (temp > 32767)
-            target[i] = 32767;
-        else if (temp < -32768)
-            target[i] = -32768;
-        else
-            target[i] = (int16_t) temp;
-    }
-}
+namespace webrtc {
+namespace voe {
 
-void Utility::MixAndScaleWithSat(int16_t target[],
-                                 const int16_t source[], float scale,
-                                 uint16_t len)
-{
-    int32_t temp(0);
-    for (int i = 0; i < len; i++)
-    {
-        temp = (int32_t) (target[i] + scale * source[i]);
-        if (temp > 32767)
-            target[i] = 32767;
-        else if (temp < -32768)
-            target[i] = -32768;
-        else
-            target[i] = (int16_t) temp;
-    }
+// TODO(ajm): There is significant overlap between RemixAndResample and
+// ConvertToCodecFormat, but if we're to consolidate we should probably make a
+// real converter class.
+void RemixAndResample(const AudioFrame& src_frame,
+                      PushResampler<int16_t>* resampler,
+                      AudioFrame* dst_frame) {
+  const int16_t* audio_ptr = src_frame.data_;
+  int audio_ptr_num_channels = src_frame.num_channels_;
+  int16_t mono_audio[AudioFrame::kMaxDataSizeSamples];
+
+  // Downmix before resampling.
+  if (src_frame.num_channels_ == 2 && dst_frame->num_channels_ == 1) {
+    AudioFrameOperations::StereoToMono(src_frame.data_,
+                                       src_frame.samples_per_channel_,
+                                       mono_audio);
+    audio_ptr = mono_audio;
+    audio_ptr_num_channels = 1;
+  }
+
+  if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_,
+                                    dst_frame->sample_rate_hz_,
+                                    audio_ptr_num_channels) == -1) {
+    dst_frame->CopyFrom(src_frame);
+    LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_,
+              dst_frame->sample_rate_hz_, audio_ptr_num_channels);
+    assert(false);
+  }
+
+  const int src_length = src_frame.samples_per_channel_ *
+                         audio_ptr_num_channels;
+  int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_,
+                                       AudioFrame::kMaxDataSizeSamples);
+  if (out_length == -1) {
+    dst_frame->CopyFrom(src_frame);
+    LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_);
+    assert(false);
+  }
+  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+
+  // Upmix after resampling.
+  if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) {
+    // The audio in dst_frame really is mono at this point; MonoToStereo will
+    // set this back to stereo.
+    dst_frame->num_channels_ = 1;
+    AudioFrameOperations::MonoToStereo(dst_frame);
+  }
 }
 
-void Utility::Scale(int16_t vector[], float scale, uint16_t len)
-{
-    for (int i = 0; i < len; i++)
-    {
-        vector[i] = (int16_t) (scale * vector[i]);
-    }
+void DownConvertToCodecFormat(const int16_t* src_data,
+                              int samples_per_channel,
+                              int num_channels,
+                              int sample_rate_hz,
+                              int codec_num_channels,
+                              int codec_rate_hz,
+                              int16_t* mono_buffer,
+                              PushResampler<int16_t>* resampler,
+                              AudioFrame* dst_af) {
+  assert(samples_per_channel <= kMaxMonoDataSizeSamples);
+  assert(num_channels == 1 || num_channels == 2);
+  assert(codec_num_channels == 1 || codec_num_channels == 2);
+
+  // Never upsample the capture signal here. This should be done at the
+  // end of the send chain.
+  int destination_rate = std::min(codec_rate_hz, sample_rate_hz);
+
+  // If no stereo codecs are in use, we downmix a stereo stream from the
+  // device early in the chain, before resampling.
+  if (num_channels == 2 && codec_num_channels == 1) {
+    AudioFrameOperations::StereoToMono(src_data, samples_per_channel,
+                                       mono_buffer);
+    src_data = mono_buffer;
+    num_channels = 1;
+  }
+
+  if (resampler->InitializeIfNeeded(
+          sample_rate_hz, destination_rate, num_channels) != 0) {
+    LOG_FERR3(LS_ERROR,
+              InitializeIfNeeded,
+              sample_rate_hz,
+              destination_rate,
+              num_channels);
+    assert(false);
+  }
+
+  const int in_length = samples_per_channel * num_channels;
+  int out_length = resampler->Resample(
+      src_data, in_length, dst_af->data_, AudioFrame::kMaxDataSizeSamples);
+  if (out_length == -1) {
+    LOG_FERR3(LS_ERROR, Resample, src_data, in_length, dst_af->data_);
+    assert(false);
+  }
+
+  dst_af->samples_per_channel_ = out_length / num_channels;
+  dst_af->sample_rate_hz_ = destination_rate;
+  dst_af->num_channels_ = num_channels;
+  dst_af->timestamp_ = -1;
+  dst_af->speech_type_ = AudioFrame::kNormalSpeech;
+  dst_af->vad_activity_ = AudioFrame::kVadUnknown;
 }
 
-void Utility::ScaleWithSat(int16_t vector[], float scale,
-                           uint16_t len)
-{
-    int32_t temp(0);
-    for (int i = 0; i < len; i++)
-    {
-        temp = (int32_t) (scale * vector[i]);
-        if (temp > 32767)
-            vector[i] = 32767;
-        else if (temp < -32768)
-            vector[i] = -32768;
-        else
-            vector[i] = (int16_t) temp;
+void MixWithSat(int16_t target[],
+                int target_channel,
+                const int16_t source[],
+                int source_channel,
+                int source_len) {
+  assert(target_channel == 1 || target_channel == 2);
+  assert(source_channel == 1 || source_channel == 2);
+
+  if (target_channel == 2 && source_channel == 1) {
+    // Convert source from mono to stereo.
+    int32_t left = 0;
+    int32_t right = 0;
+    for (int i = 0; i < source_len; ++i) {
+      left = source[i] + target[i * 2];
+      right = source[i] + target[i * 2 + 1];
+      target[i * 2] = WebRtcSpl_SatW32ToW16(left);
+      target[i * 2 + 1] = WebRtcSpl_SatW32ToW16(right);
+    }
+  } else if (target_channel == 1 && source_channel == 2) {
+    // Convert source from stereo to mono.
+    int32_t temp = 0;
+    for (int i = 0; i < source_len / 2; ++i) {
+      temp = ((source[i * 2] + source[i * 2 + 1]) >> 1) + target[i];
+      target[i] = WebRtcSpl_SatW32ToW16(temp);
     }
+  } else {
+    int32_t temp = 0;
+    for (int i = 0; i < source_len; ++i) {
+      temp = source[i] + target[i];
+      target[i] = WebRtcSpl_SatW32ToW16(temp);
+    }
+  }
 }
 
 }  // namespace voe
-
 }  // namespace webrtc