Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / modules / audio_processing / test / audio_processing_unittest.cc
index 1d32341..a0fb303 100644 (file)
@@ -8,11 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
 #include <stdio.h>
-
 #include <algorithm>
+#include <limits>
 #include <queue>
 
+#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/test/test_utils.h"
 #include "webrtc/audio_processing/unittest.pb.h"
 #endif
 
-#if (defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)) || \
-    (defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && !defined(NDEBUG))
-#  define WEBRTC_AUDIOPROC_BIT_EXACT
-#endif
-
-using webrtc::AudioProcessing;
-using webrtc::AudioFrame;
-using webrtc::GainControl;
-using webrtc::NoiseSuppression;
-using webrtc::EchoCancellation;
-using webrtc::EventWrapper;
-using webrtc::scoped_array;
-using webrtc::scoped_ptr;
-using webrtc::Trace;
-using webrtc::LevelEstimator;
-using webrtc::EchoCancellation;
-using webrtc::EchoControlMobile;
-using webrtc::VoiceDetection;
-
+namespace webrtc {
 namespace {
+
 // TODO(bjornv): This is not feasible until the functionality has been
-// re-implemented; see comment at the bottom of this file.
+// re-implemented; see comment at the bottom of this file. For now, the user has
+// to hard code the |write_ref_data| value.
 // When false, this will compare the output data with the results stored to
 // file. This is the typical case. When the file should be updated, it can
 // be set to true with the command-line switch --write_ref_data.
-#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
 bool write_ref_data = false;
 const int kChannels[] = {1, 2};
 const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
-#endif
 
 const int kSampleRates[] = {8000, 16000, 32000};
 const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
@@ -73,21 +59,54 @@ const int kProcessSampleRates[] = {8000, 16000, 32000};
 const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
     sizeof(*kProcessSampleRates);
 
+void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
+  ChannelBuffer<int16_t> cb_int(cb->samples_per_channel(),
+                                cb->num_channels());
+  Deinterleave(int_data,
+               cb->samples_per_channel(),
+               cb->num_channels(),
+               cb_int.channels());
+  ScaleToFloat(cb_int.data(),
+               cb->samples_per_channel() * cb->num_channels(),
+               cb->data());
+}
+
+void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
+  ConvertToFloat(frame.data_, cb);
+}
+
+// Number of channels including the keyboard channel.
+int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+  switch (layout) {
+    case AudioProcessing::kMono:
+      return 1;
+    case AudioProcessing::kMonoAndKeyboard:
+    case AudioProcessing::kStereo:
+      return 2;
+    case AudioProcessing::kStereoAndKeyboard:
+      return 3;
+  }
+  assert(false);
+  return -1;
+}
+
 int TruncateToMultipleOf10(int value) {
   return (value / 10) * 10;
 }
 
-// TODO(andrew): Use the MonoToStereo routine from AudioFrameOperations.
-void MixStereoToMono(const int16_t* stereo,
-                     int16_t* mono,
+void MixStereoToMono(const float* stereo, float* mono,
                      int samples_per_channel) {
-  for (int i = 0; i < samples_per_channel; i++) {
-    int32_t mono_s32 = (static_cast<int32_t>(stereo[i * 2]) +
-        static_cast<int32_t>(stereo[i * 2 + 1])) >> 1;
-    mono[i] = static_cast<int16_t>(mono_s32);
+  for (int i = 0; i < samples_per_channel; ++i) {
+    mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
   }
 }
 
+void MixStereoToMono(const int16_t* stereo, int16_t* mono,
+                     int samples_per_channel) {
+  for (int i = 0; i < samples_per_channel; i++)
+    mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
+}
+
 void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
   for (int i = 0; i < samples_per_channel; i++) {
     stereo[i * 2 + 1] = stereo[i * 2];
@@ -101,8 +120,7 @@ void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
 }
 
 void SetFrameTo(AudioFrame* frame, int16_t value) {
-  for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
-      ++i) {
+  for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
     frame->data_[i] = value;
   }
 }
@@ -115,25 +133,52 @@ void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
   }
 }
 
+void ScaleFrame(AudioFrame* frame, float scale) {
+  for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
+    frame->data_[i] = RoundToInt16(frame->data_[i] * scale);
+  }
+}
+
 bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
-  if (frame1.samples_per_channel_ !=
-      frame2.samples_per_channel_) {
+  if (frame1.samples_per_channel_ != frame2.samples_per_channel_) {
     return false;
   }
-  if (frame1.num_channels_ !=
-      frame2.num_channels_) {
+  if (frame1.num_channels_ != frame2.num_channels_) {
     return false;
   }
   if (memcmp(frame1.data_, frame2.data_,
              frame1.samples_per_channel_ * frame1.num_channels_ *
-               sizeof(int16_t))) {
+                 sizeof(int16_t))) {
     return false;
   }
   return true;
 }
 
-#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
-// These functions are only used by the bit-exact test.
+void EnableAllAPComponents(AudioProcessing* ap) {
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+  EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
+
+  EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+  EXPECT_NOERR(ap->gain_control()->Enable(true));
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+  EXPECT_NOERR(ap->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_NOERR(ap->echo_cancellation()->enable_metrics(true));
+  EXPECT_NOERR(ap->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_NOERR(ap->echo_cancellation()->Enable(true));
+
+  EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_NOERR(ap->gain_control()->set_analog_level_limits(0, 255));
+  EXPECT_NOERR(ap->gain_control()->Enable(true));
+#endif
+
+  EXPECT_NOERR(ap->high_pass_filter()->Enable(true));
+  EXPECT_NOERR(ap->level_estimator()->Enable(true));
+  EXPECT_NOERR(ap->noise_suppression()->Enable(true));
+
+  EXPECT_NOERR(ap->voice_detection()->Enable(true));
+}
+
+// These functions are only used by ApmTest.Process.
 template <class T>
 T AbsValue(T a) {
   return a > 0 ? a: -a;
@@ -151,7 +196,7 @@ int16_t MaxAudioFrame(const AudioFrame& frame) {
 
 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
 void TestStats(const AudioProcessing::Statistic& test,
-               const webrtc::audioproc::Test::Statistic& reference) {
+               const audioproc::Test::Statistic& reference) {
   EXPECT_EQ(reference.instant(), test.instant);
   EXPECT_EQ(reference.average(), test.average);
   EXPECT_EQ(reference.maximum(), test.maximum);
@@ -159,50 +204,71 @@ void TestStats(const AudioProcessing::Statistic& test,
 }
 
 void WriteStatsMessage(const AudioProcessing::Statistic& output,
-                       webrtc::audioproc::Test::Statistic* message) {
-  message->set_instant(output.instant);
-  message->set_average(output.average);
-  message->set_maximum(output.maximum);
-  message->set_minimum(output.minimum);
+                       audioproc::Test::Statistic* msg) {
+  msg->set_instant(output.instant);
+  msg->set_average(output.average);
+  msg->set_maximum(output.maximum);
+  msg->set_minimum(output.minimum);
 }
 #endif
 
-void WriteMessageLiteToFile(const std::string filename,
-                            const ::google::protobuf::MessageLite& message) {
+void OpenFileAndWriteMessage(const std::string filename,
+                             const ::google::protobuf::MessageLite& msg) {
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
   FILE* file = fopen(filename.c_str(), "wb");
-  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
-  int size = message.ByteSize();
+  ASSERT_TRUE(file != NULL);
+
+  int32_t size = msg.ByteSize();
   ASSERT_GT(size, 0);
-  unsigned char* array = new unsigned char[size];
-  ASSERT_TRUE(message.SerializeToArray(array, size));
+  scoped_ptr<uint8_t[]> array(new uint8_t[size]);
+  ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
 
-  ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
+  ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
   ASSERT_EQ(static_cast<size_t>(size),
-      fwrite(array, sizeof(unsigned char), size, file));
-
-  delete [] array;
+      fwrite(array.get(), sizeof(array[0]), size, file));
   fclose(file);
+#else
+  std::cout << "Warning: Writing new reference is only allowed on Linux!"
+      << std::endl;
+#endif
 }
 
-void ReadMessageLiteFromFile(const std::string filename,
-                             ::google::protobuf::MessageLite* message) {
-  assert(message != NULL);
+std::string ResourceFilePath(std::string name, int sample_rate_hz) {
+  std::ostringstream ss;
+  // Resource files are all stereo.
+  ss << name << sample_rate_hz / 1000 << "_stereo";
+  return test::ResourcePath(ss.str(), "pcm");
+}
 
-  FILE* file = fopen(filename.c_str(), "rb");
-  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
-  int size = 0;
-  ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
-  ASSERT_GT(size, 0);
-  unsigned char* array = new unsigned char[size];
-  ASSERT_EQ(static_cast<size_t>(size),
-      fread(array, sizeof(unsigned char), size, file));
+std::string OutputFilePath(std::string name,
+                           int input_rate,
+                           int output_rate,
+                           int reverse_rate,
+                           int num_input_channels,
+                           int num_output_channels,
+                           int num_reverse_channels) {
+  std::ostringstream ss;
+  ss << name << "_i" << num_input_channels << "_" << input_rate / 1000
+     << "_r" << num_reverse_channels << "_" << reverse_rate  / 1000 << "_";
+  if (num_output_channels == 1) {
+    ss << "mono";
+  } else if (num_output_channels == 2) {
+    ss << "stereo";
+  } else {
+    assert(false);
+  }
+  ss << output_rate / 1000 << ".pcm";
 
-  ASSERT_TRUE(message->ParseFromArray(array, size));
+  return test::OutputPath() + ss.str();
+}
 
-  delete [] array;
+void OpenFileAndReadMessage(const std::string filename,
+                            ::google::protobuf::MessageLite* msg) {
+  FILE* file = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(file != NULL);
+  ReadMessageFromFile(file, msg);
   fclose(file);
 }
-#endif  // WEBRTC_AUDIOPROC_BIT_EXACT
 
 class ApmTest : public ::testing::Test {
  protected:
@@ -212,8 +278,7 @@ class ApmTest : public ::testing::Test {
 
   static void SetUpTestCase() {
     Trace::CreateTrace();
-    std::string trace_filename = webrtc::test::OutputPath() +
-        "audioproc_trace.txt";
+    std::string trace_filename = test::OutputPath() + "audioproc_trace.txt";
     ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
   }
 
@@ -221,49 +286,75 @@ class ApmTest : public ::testing::Test {
     Trace::ReturnTrace();
   }
 
-  void Init(int sample_rate_hz, int num_reverse_channels,
-            int num_input_channels, int num_output_channels,
+  // Used to select between int and float interface tests.
+  enum Format {
+    kIntFormat,
+    kFloatFormat
+  };
+
+  void Init(int sample_rate_hz,
+            int output_sample_rate_hz,
+            int reverse_sample_rate_hz,
+            int num_reverse_channels,
+            int num_input_channels,
+            int num_output_channels,
             bool open_output_file);
-  std::string ResourceFilePath(std::string name, int sample_rate_hz);
-  std::string OutputFilePath(std::string name,
-                             int sample_rate_hz,
-                             int num_reverse_channels,
-                             int num_input_channels,
-                             int num_output_channels);
+  void Init(AudioProcessing* ap);
   void EnableAllComponents();
   bool ReadFrame(FILE* file, AudioFrame* frame);
+  bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb);
+  void ReadFrameWithRewind(FILE* file, AudioFrame* frame);
+  void ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+                           ChannelBuffer<float>* cb);
   void ProcessWithDefaultStreamParameters(AudioFrame* frame);
   void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
                                     int delay_min, int delay_max);
   void TestChangingChannels(int num_channels,
                             AudioProcessing::Error expected_return);
+  void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
+  void RunManualVolumeChangeIsPossibleTest(int sample_rate);
+  void StreamParametersTest(Format format);
+  int ProcessStreamChooser(Format format);
+  int AnalyzeReverseStreamChooser(Format format);
+  void ProcessDebugDump(const std::string& in_filename,
+                        const std::string& out_filename,
+                        Format format);
+  void VerifyDebugDumpTest(Format format);
 
   const std::string output_path_;
   const std::string ref_path_;
   const std::string ref_filename_;
-  scoped_ptr<webrtc::AudioProcessing> apm_;
+  scoped_ptr<AudioProcessing> apm_;
   AudioFrame* frame_;
   AudioFrame* revframe_;
+  scoped_ptr<ChannelBuffer<float> > float_cb_;
+  scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
+  int output_sample_rate_hz_;
+  int num_output_channels_;
   FILE* far_file_;
   FILE* near_file_;
   FILE* out_file_;
 };
 
 ApmTest::ApmTest()
-    : output_path_(webrtc::test::OutputPath()),
-      ref_path_(webrtc::test::ProjectRootPath() +
-                "data/audio_processing/"),
+    : output_path_(test::OutputPath()),
+      ref_path_(test::ProjectRootPath() + "data/audio_processing/"),
 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
       ref_filename_(ref_path_ + "output_data_fixed.pb"),
 #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
       ref_filename_(ref_path_ + "output_data_float.pb"),
 #endif
-      apm_(AudioProcessing::Create(0)),
       frame_(NULL),
       revframe_(NULL),
+      output_sample_rate_hz_(0),
+      num_output_channels_(0),
       far_file_(NULL),
       near_file_(NULL),
-      out_file_(NULL) {}
+      out_file_(NULL) {
+  Config config;
+  config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+  apm_.reset(AudioProcessing::Create(config));
+}
 
 void ApmTest::SetUp() {
   ASSERT_TRUE(apm_.get() != NULL);
@@ -272,9 +363,9 @@ void ApmTest::SetUp() {
   revframe_ = new AudioFrame();
 
 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
-  Init(16000, 2, 2, 2, false);
+  Init(16000, 16000, 16000, 2, 2, 2, false);
 #else
-  Init(32000, 2, 2, 2, false);
+  Init(32000, 32000, 32000, 2, 2, 2, false);
 #endif
 }
 
@@ -305,51 +396,30 @@ void ApmTest::TearDown() {
   out_file_ = NULL;
 }
 
-std::string ApmTest::ResourceFilePath(std::string name, int sample_rate_hz) {
-  std::ostringstream ss;
-  // Resource files are all stereo.
-  ss << name << sample_rate_hz / 1000 << "_stereo";
-  return webrtc::test::ResourcePath(ss.str(), "pcm");
+void ApmTest::Init(AudioProcessing* ap) {
+  ASSERT_EQ(kNoErr,
+            ap->Initialize(frame_->sample_rate_hz_,
+                           output_sample_rate_hz_,
+                           revframe_->sample_rate_hz_,
+                           LayoutFromChannels(frame_->num_channels_),
+                           LayoutFromChannels(num_output_channels_),
+                           LayoutFromChannels(revframe_->num_channels_)));
 }
 
-std::string ApmTest::OutputFilePath(std::string name,
-                                    int sample_rate_hz,
-                                    int num_reverse_channels,
-                                    int num_input_channels,
-                                    int num_output_channels) {
-  std::ostringstream ss;
-  ss << name << sample_rate_hz / 1000 << "_" << num_reverse_channels << "r" <<
-      num_input_channels << "i" << "_";
-  if (num_output_channels == 1) {
-    ss << "mono";
-  } else if (num_output_channels == 2) {
-    ss << "stereo";
-  } else {
-    assert(false);
-    return "";
-  }
-  ss << ".pcm";
-
-  return output_path_ + ss.str();
-}
-
-void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
-                   int num_input_channels, int num_output_channels,
+void ApmTest::Init(int sample_rate_hz,
+                   int output_sample_rate_hz,
+                   int reverse_sample_rate_hz,
+                   int num_input_channels,
+                   int num_output_channels,
+                   int num_reverse_channels,
                    bool open_output_file) {
-  // We always use 10 ms frames.
-  const int samples_per_channel = kChunkSizeMs * sample_rate_hz / 1000;
-  frame_->samples_per_channel_ = samples_per_channel;
-  frame_->num_channels_ = num_input_channels;
-  frame_->sample_rate_hz_ = sample_rate_hz;
-  revframe_->samples_per_channel_ = samples_per_channel;
-  revframe_->num_channels_ = num_reverse_channels;
-  revframe_->sample_rate_hz_ = sample_rate_hz;
-
-  // Make one process call to ensure the audio parameters are set. It might
-  // result in a stream error which we can safely ignore.
-  int err = apm_->ProcessStream(frame_);
-  ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
-  ASSERT_EQ(apm_->kNoError, apm_->Initialize());
+  SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
+  output_sample_rate_hz_ = output_sample_rate_hz;
+  num_output_channels_ = num_output_channels;
+
+  SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_,
+                     &revfloat_cb_);
+  Init(apm_.get());
 
   if (far_file_) {
     ASSERT_EQ(0, fclose(far_file_));
@@ -371,8 +441,13 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
     if (out_file_) {
       ASSERT_EQ(0, fclose(out_file_));
     }
-    filename = OutputFilePath("out", sample_rate_hz, num_reverse_channels,
-                              num_input_channels, num_output_channels);
+    filename = OutputFilePath("out",
+                              sample_rate_hz,
+                              output_sample_rate_hz,
+                              reverse_sample_rate_hz,
+                              num_input_channels,
+                              num_output_channels,
+                              num_reverse_channels);
     out_file_ = fopen(filename.c_str(), "wb");
     ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
           filename << "\n";
@@ -380,42 +455,11 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
 }
 
 void ApmTest::EnableAllComponents() {
-#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
-  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_drift_compensation(true));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_metrics(true));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_delay_logging(true));
-  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_analog_level_limits(0, 255));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-#endif
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->high_pass_filter()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->level_estimator()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->noise_suppression()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->voice_detection()->Enable(true));
+  EnableAllAPComponents(apm_.get());
 }
 
-bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
+                        ChannelBuffer<float>* cb) {
   // The files always contain stereo audio.
   size_t frame_size = frame->samples_per_channel_ * 2;
   size_t read_count = fread(frame->data_,
@@ -433,9 +477,30 @@ bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
                     frame->samples_per_channel_);
   }
 
+  if (cb) {
+    ConvertToFloat(*frame, cb);
+  }
   return true;
 }
 
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
+  return ReadFrame(file, frame, NULL);
+}
+
+// If the end of the file has been reached, rewind it and attempt to read the
+// frame again.
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+                                  ChannelBuffer<float>* cb) {
+  if (!ReadFrame(near_file_, frame_, cb)) {
+    rewind(near_file_);
+    ASSERT_TRUE(ReadFrame(near_file_, frame_, cb));
+  }
+}
+
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame) {
+  ReadFrameWithRewind(file, frame, NULL);
+}
+
 void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
   apm_->echo_cancellation()->set_stream_drift_samples(0);
@@ -444,12 +509,36 @@ void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
 }
 
+int ApmTest::ProcessStreamChooser(Format format) {
+  if (format == kIntFormat) {
+    return apm_->ProcessStream(frame_);
+  }
+  return apm_->ProcessStream(float_cb_->channels(),
+                             frame_->samples_per_channel_,
+                             frame_->sample_rate_hz_,
+                             LayoutFromChannels(frame_->num_channels_),
+                             output_sample_rate_hz_,
+                             LayoutFromChannels(num_output_channels_),
+                             float_cb_->channels());
+}
+
+int ApmTest::AnalyzeReverseStreamChooser(Format format) {
+  if (format == kIntFormat) {
+    return apm_->AnalyzeReverseStream(revframe_);
+  }
+  return apm_->AnalyzeReverseStream(
+      revfloat_cb_->channels(),
+      revframe_->samples_per_channel_,
+      revframe_->sample_rate_hz_,
+      LayoutFromChannels(revframe_->num_channels_));
+}
+
 void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
                                            int delay_min, int delay_max) {
   // The |revframe_| and |frame_| should include the proper frame information,
   // hence can be used for extracting information.
-  webrtc::AudioFrame tmp_frame;
-  std::queue<webrtc::AudioFrame*> frame_queue;
+  AudioFrame tmp_frame;
+  std::queue<AudioFrame*> frame_queue;
   bool causal = true;
 
   tmp_frame.CopyFrom(*revframe_);
@@ -459,14 +548,14 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
   // Initialize the |frame_queue| with empty frames.
   int frame_delay = delay_ms / 10;
   while (frame_delay < 0) {
-    webrtc::AudioFrame* frame = new AudioFrame();
+    AudioFrame* frame = new AudioFrame();
     frame->CopyFrom(tmp_frame);
     frame_queue.push(frame);
     frame_delay++;
     causal = false;
   }
   while (frame_delay > 0) {
-    webrtc::AudioFrame* frame = new AudioFrame();
+    AudioFrame* frame = new AudioFrame();
     frame->CopyFrom(tmp_frame);
     frame_queue.push(frame);
     frame_delay--;
@@ -476,13 +565,13 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
   // possible to keep processing time down.  4.5 seconds seemed to be a good
   // compromise for this recording.
   for (int frame_count = 0; frame_count < 450; ++frame_count) {
-    webrtc::AudioFrame* frame = new AudioFrame();
+    AudioFrame* frame = new AudioFrame();
     frame->CopyFrom(tmp_frame);
     // Use the near end recording, since that has more speech in it.
     ASSERT_TRUE(ReadFrame(near_file_, frame));
     frame_queue.push(frame);
-    webrtc::AudioFrame* reverse_frame = frame;
-    webrtc::AudioFrame* process_frame = frame_queue.front();
+    AudioFrame* reverse_frame = frame;
+    AudioFrame* process_frame = frame_queue.front();
     if (!causal) {
       reverse_frame = frame_queue.front();
       // When we call ProcessStream() the frame is modified, so we can't use the
@@ -509,7 +598,7 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
 
   rewind(near_file_);
   while (!frame_queue.empty()) {
-    webrtc::AudioFrame* frame = frame_queue.front();
+    AudioFrame* frame = frame_queue.front();
     frame_queue.pop();
     delete frame;
   }
@@ -531,20 +620,21 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
   EXPECT_LE(expected_median_low, median);
 }
 
-TEST_F(ApmTest, DISABLED_StreamParameters) {
+void ApmTest::StreamParametersTest(Format format) {
   // No errors when the components are disabled.
-  EXPECT_EQ(apm_->kNoError,
-            apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
 
   // -- Missing AGC level --
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Resets after successful ProcessStream().
   EXPECT_EQ(apm_->kNoError,
             apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Other stream parameters set correctly.
   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
@@ -553,20 +643,22 @@ TEST_F(ApmTest, DISABLED_StreamParameters) {
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
   apm_->echo_cancellation()->set_stream_drift_samples(0);
   EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
+            ProcessStreamChooser(format));
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_cancellation()->enable_drift_compensation(false));
 
   // -- Missing delay --
   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Resets after successful ProcessStream().
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Other stream parameters set correctly.
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
@@ -575,37 +667,49 @@ TEST_F(ApmTest, DISABLED_StreamParameters) {
   apm_->echo_cancellation()->set_stream_drift_samples(0);
   EXPECT_EQ(apm_->kNoError,
             apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
 
   // -- Missing drift --
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Resets after successful ProcessStream().
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
   apm_->echo_cancellation()->set_stream_drift_samples(0);
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // Other stream parameters set correctly.
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
   EXPECT_EQ(apm_->kNoError,
             apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
 
   // -- No stream parameters --
   EXPECT_EQ(apm_->kNoError,
-            apm_->AnalyzeReverseStream(revframe_));
+            AnalyzeReverseStreamChooser(format));
   EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
+            ProcessStreamChooser(format));
 
   // -- All there --
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
   apm_->echo_cancellation()->set_stream_drift_samples(0);
   EXPECT_EQ(apm_->kNoError,
             apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+}
+
+TEST_F(ApmTest, StreamParametersInt) {
+  StreamParametersTest(kIntFormat);
+}
+
+TEST_F(ApmTest, StreamParametersFloat) {
+  StreamParametersTest(kFloatFormat);
 }
 
 TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
@@ -647,20 +751,21 @@ TEST_F(ApmTest, Channels) {
   for (int i = 1; i < 3; i++) {
     TestChangingChannels(i, kNoErr);
     EXPECT_EQ(i, apm_->num_input_channels());
-    EXPECT_EQ(i, apm_->num_reverse_channels());
+    // We always force the number of reverse channels used for processing to 1.
+    EXPECT_EQ(1, apm_->num_reverse_channels());
   }
 }
 
-TEST_F(ApmTest, SampleRates) {
+TEST_F(ApmTest, SampleRatesInt) {
   // Testing invalid sample rates
-  SetFrameSampleRate(frame_, 10000);
-  EXPECT_EQ(apm_->kBadSampleRateError, apm_->ProcessStream(frame_));
+  SetContainerFormat(10000, 2, frame_, &float_cb_);
+  EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
   // Testing valid sample rates
   int fs[] = {8000, 16000, 32000};
   for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
-    SetFrameSampleRate(frame_, fs[i]);
-    EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
-    EXPECT_EQ(fs[i], apm_->sample_rate_hz());
+    SetContainerFormat(fs[i], 2, frame_, &float_cb_);
+    EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(fs[i], apm_->input_sample_rate_hz());
   }
 }
 
@@ -672,19 +777,6 @@ TEST_F(ApmTest, EchoCancellation) {
             apm_->echo_cancellation()->enable_drift_compensation(false));
   EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
 
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
-
-  int rate[] = {16000, 44100, 48000};
-  for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
-    EXPECT_EQ(rate[i],
-        apm_->echo_cancellation()->device_sample_rate_hz());
-  }
-
   EchoCancellation::SuppressionLevel level[] = {
     EchoCancellation::kLowSuppression,
     EchoCancellation::kModerateSuppression,
@@ -733,7 +825,7 @@ TEST_F(ApmTest, EchoCancellation) {
   EXPECT_FALSE(apm_->echo_cancellation()->aec_core() != NULL);
 }
 
-TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
+TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
   // Enable AEC only.
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_cancellation()->enable_drift_compensation(false));
@@ -742,6 +834,9 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_cancellation()->enable_delay_logging(true));
   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  Config config;
+  config.Set<ReportedDelay>(new ReportedDelay(true));
+  apm_->SetExtraOptions(config);
 
   // Internally in the AEC the amount of lookahead the delay estimation can
   // handle is 15 blocks and the maximum delay is set to 60 blocks.
@@ -762,7 +857,13 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
   // within a valid region (set to +-1.5 blocks). Note that these cases are
   // sampling frequency dependent.
   for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
-    Init(kProcessSampleRates[i], 2, 2, 2, false);
+    Init(kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         2,
+         2,
+         2,
+         false);
     // Sampling frequency dependent variables.
     const int num_ms_per_block = std::max(4,
                                           640 / frame_->samples_per_channel_);
@@ -804,18 +905,18 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
 TEST_F(ApmTest, EchoControlMobile) {
   // AECM won't use super-wideband.
   SetFrameSampleRate(frame_, 32000);
-  EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
+  EXPECT_NOERR(apm_->ProcessStream(frame_));
   EXPECT_EQ(apm_->kBadSampleRateError,
             apm_->echo_control_mobile()->Enable(true));
   SetFrameSampleRate(frame_, 16000);
-  EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
+  EXPECT_NOERR(apm_->ProcessStream(frame_));
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_control_mobile()->Enable(true));
   SetFrameSampleRate(frame_, 32000);
   EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
 
   // Turn AECM on (and AEC off)
-  Init(16000, 2, 2, 2, false);
+  Init(16000, 16000, 16000, 2, 2, 2, false);
   EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
   EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
 
@@ -843,8 +944,8 @@ TEST_F(ApmTest, EchoControlMobile) {
   // Set and get echo path
   const size_t echo_path_size =
       apm_->echo_control_mobile()->echo_path_size_bytes();
-  scoped_array<char> echo_path_in(new char[echo_path_size]);
-  scoped_array<char> echo_path_out(new char[echo_path_size]);
+  scoped_ptr<char[]> echo_path_in(new char[echo_path_size]);
+  scoped_ptr<char[]> echo_path_out(new char[echo_path_size]);
   EXPECT_EQ(apm_->kNullPointerError,
             apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
   EXPECT_EQ(apm_->kNullPointerError,
@@ -881,7 +982,7 @@ TEST_F(ApmTest, EchoControlMobile) {
   EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
 }
 
-TEST_F(ApmTest, DISABLED_GainControl) {
+TEST_F(ApmTest, GainControl) {
   // Testing gain modes
   EXPECT_EQ(apm_->kNoError,
       apm_->gain_control()->set_mode(
@@ -977,6 +1078,82 @@ TEST_F(ApmTest, DISABLED_GainControl) {
   EXPECT_FALSE(apm_->gain_control()->is_enabled());
 }
 
+void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
+  Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  int out_analog_level = 0;
+  for (int i = 0; i < 2000; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    // Ensure the audio is at a low level, so the AGC will try to increase it.
+    ScaleFrame(frame_, 0.25);
+
+    // Always pass in the same volume.
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(100));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+  }
+
+  // Ensure the AGC is still able to reach the maximum.
+  EXPECT_EQ(255, out_analog_level);
+}
+
+// Verifies that despite volume slider quantization, the AGC can continue to
+// increase its volume.
+TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
+  for (size_t i = 0; i < kSampleRatesSize; ++i) {
+    RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
+  }
+}
+
+void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
+  Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  int out_analog_level = 100;
+  for (int i = 0; i < 1000; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    // Ensure the audio is at a low level, so the AGC will try to increase it.
+    ScaleFrame(frame_, 0.25);
+
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(out_analog_level));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+  }
+
+  // Ensure the volume was raised.
+  EXPECT_GT(out_analog_level, 100);
+  int highest_level_reached = out_analog_level;
+  // Simulate a user manual volume change.
+  out_analog_level = 100;
+
+  for (int i = 0; i < 300; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    ScaleFrame(frame_, 0.25);
+
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(out_analog_level));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+    // Check that AGC respected the manually adjusted volume.
+    EXPECT_LT(out_analog_level, highest_level_reached);
+  }
+  // Check that the volume was still raised.
+  EXPECT_GT(out_analog_level, 100);
+}
+
+TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
+  for (size_t i = 0; i < kSampleRatesSize; ++i) {
+    RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
+  }
+}
+
 TEST_F(ApmTest, NoiseSuppression) {
   // Test valid suppression levels.
   NoiseSuppression::Level level[] = {
@@ -1053,15 +1230,6 @@ TEST_F(ApmTest, LevelEstimator) {
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
   EXPECT_EQ(70, apm_->level_estimator()->RMS());
 
-  // Min value if energy_ == 0.
-  SetFrameTo(frame_, 10000);
-  uint32_t energy = frame_->energy_;  // Save default to restore below.
-  frame_->energy_ = 0;
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(127, apm_->level_estimator()->RMS());
-  frame_->energy_ = energy;
-
   // Verify reset after enable/disable.
   SetFrameTo(frame_, 32767);
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
@@ -1155,7 +1323,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) {
 
 TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
   for (size_t i = 0; i < kSampleRatesSize; i++) {
-    Init(kSampleRates[i], 2, 2, 2, false);
+    Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
     SetFrameTo(frame_, 1000, 2000);
     AudioFrame frame_copy;
     frame_copy.CopyFrom(*frame_);
@@ -1170,25 +1338,29 @@ TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
   EnableAllComponents();
 
   for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
-    Init(kProcessSampleRates[i], 2, 2, 2, false);
+    Init(kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         2,
+         2,
+         2,
+         false);
     int analog_level = 127;
-    EXPECT_EQ(0, feof(far_file_));
-    EXPECT_EQ(0, feof(near_file_));
-    while (1) {
-      if (!ReadFrame(far_file_, revframe_)) break;
+    ASSERT_EQ(0, feof(far_file_));
+    ASSERT_EQ(0, feof(near_file_));
+    while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
       CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
 
-      EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
+      ASSERT_EQ(kNoErr, apm_->AnalyzeReverseStream(revframe_));
 
-      if (!ReadFrame(near_file_, frame_)) break;
       CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
       frame_->vad_activity_ = AudioFrame::kVadUnknown;
 
-      EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+      ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
       apm_->echo_cancellation()->set_stream_drift_samples(0);
-      EXPECT_EQ(apm_->kNoError,
+      ASSERT_EQ(kNoErr,
           apm_->gain_control()->set_stream_analog_level(analog_level));
-      EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+      ASSERT_EQ(kNoErr, apm_->ProcessStream(frame_));
       analog_level = apm_->gain_control()->stream_analog_level();
 
       VerifyChannelsAreEqual(frame_->data_, frame_->samples_per_channel_);
@@ -1246,6 +1418,11 @@ TEST_F(ApmTest, SplittingFilter) {
   // TODO(andrew): This test, and the one below, rely rather tenuously on the
   // behavior of the AEC. Think of something more robust.
   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  // Make sure we have extended filter enabled. This makes sure nothing is
+  // touched until we have a farend frame.
+  Config config;
+  config.Set<DelayCorrection>(new DelayCorrection(true));
+  apm_->SetExtraOptions(config);
   SetFrameTo(frame_, 1000);
   frame_copy.CopyFrom(*frame_);
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
@@ -1269,9 +1446,148 @@ TEST_F(ApmTest, SplittingFilter) {
   EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
 }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+void ApmTest::ProcessDebugDump(const std::string& in_filename,
+                               const std::string& out_filename,
+                               Format format) {
+  FILE* in_file = fopen(in_filename.c_str(), "rb");
+  ASSERT_TRUE(in_file != NULL);
+  audioproc::Event event_msg;
+  bool first_init = true;
+
+  while (ReadMessageFromFile(in_file, &event_msg)) {
+    if (event_msg.type() == audioproc::Event::INIT) {
+      const audioproc::Init msg = event_msg.init();
+      int reverse_sample_rate = msg.sample_rate();
+      if (msg.has_reverse_sample_rate()) {
+        reverse_sample_rate = msg.reverse_sample_rate();
+      }
+      int output_sample_rate = msg.sample_rate();
+      if (msg.has_output_sample_rate()) {
+        output_sample_rate = msg.output_sample_rate();
+      }
+
+      Init(msg.sample_rate(),
+           output_sample_rate,
+           reverse_sample_rate,
+           msg.num_input_channels(),
+           msg.num_output_channels(),
+           msg.num_reverse_channels(),
+           false);
+      if (first_init) {
+        // StartDebugRecording() writes an additional init message. Don't start
+        // recording until after the first init to avoid the extra message.
+        EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str()));
+        first_init = false;
+      }
+
+    } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
+      const audioproc::ReverseStream msg = event_msg.reverse_stream();
+
+      if (msg.channel_size() > 0) {
+        ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
+        for (int i = 0; i < msg.channel_size(); ++i) {
+           memcpy(revfloat_cb_->channel(i), msg.channel(i).data(),
+                  msg.channel(i).size());
+        }
+      } else {
+        memcpy(revframe_->data_, msg.data().data(), msg.data().size());
+        if (format == kFloatFormat) {
+          // We're using an int16 input file; convert to float.
+          ConvertToFloat(*revframe_, revfloat_cb_.get());
+        }
+      }
+      AnalyzeReverseStreamChooser(format);
+
+    } else if (event_msg.type() == audioproc::Event::STREAM) {
+      const audioproc::Stream msg = event_msg.stream();
+      // ProcessStream could have changed this for the output frame.
+      frame_->num_channels_ = apm_->num_input_channels();
+
+      EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
+      EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
+      apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
+      if (msg.has_keypress()) {
+        apm_->set_stream_key_pressed(msg.keypress());
+      } else {
+        apm_->set_stream_key_pressed(true);
+      }
+
+      if (msg.input_channel_size() > 0) {
+        ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
+        for (int i = 0; i < msg.input_channel_size(); ++i) {
+           memcpy(float_cb_->channel(i), msg.input_channel(i).data(),
+                  msg.input_channel(i).size());
+        }
+      } else {
+        memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size());
+        if (format == kFloatFormat) {
+          // We're using an int16 input file; convert to float.
+          ConvertToFloat(*frame_, float_cb_.get());
+        }
+      }
+      ProcessStreamChooser(format);
+    }
+  }
+  EXPECT_NOERR(apm_->StopDebugRecording());
+  fclose(in_file);
+}
+
+void ApmTest::VerifyDebugDumpTest(Format format) {
+  const std::string in_filename = test::ResourcePath("ref03", "aecdump");
+  std::string format_string;
+  switch (format) {
+    case kIntFormat:
+      format_string = "_int";
+      break;
+    case kFloatFormat:
+      format_string = "_float";
+      break;
+  }
+  const std::string ref_filename =
+      test::OutputPath() + "ref" + format_string + ".aecdump";
+  const std::string out_filename =
+      test::OutputPath() + "out" + format_string + ".aecdump";
+  EnableAllComponents();
+  ProcessDebugDump(in_filename, ref_filename, format);
+  ProcessDebugDump(ref_filename, out_filename, format);
+
+  FILE* ref_file = fopen(ref_filename.c_str(), "rb");
+  FILE* out_file = fopen(out_filename.c_str(), "rb");
+  ASSERT_TRUE(ref_file != NULL);
+  ASSERT_TRUE(out_file != NULL);
+  scoped_ptr<uint8_t[]> ref_bytes;
+  scoped_ptr<uint8_t[]> out_bytes;
+
+  size_t ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+  size_t out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+  size_t bytes_read = 0;
+  while (ref_size > 0 && out_size > 0) {
+    bytes_read += ref_size;
+    EXPECT_EQ(ref_size, out_size);
+    EXPECT_EQ(0, memcmp(ref_bytes.get(), out_bytes.get(), ref_size));
+    ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+    out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+  }
+  EXPECT_GT(bytes_read, 0u);
+  EXPECT_NE(0, feof(ref_file));
+  EXPECT_NE(0, feof(out_file));
+  ASSERT_EQ(0, fclose(ref_file));
+  ASSERT_EQ(0, fclose(out_file));
+}
+
+TEST_F(ApmTest, VerifyDebugDumpInt) {
+  VerifyDebugDumpTest(kIntFormat);
+}
+
+TEST_F(ApmTest, VerifyDebugDumpFloat) {
+  VerifyDebugDumpTest(kFloatFormat);
+}
+#endif
+
 // TODO(andrew): expand test to verify output.
 TEST_F(ApmTest, DebugDump) {
-  const std::string filename = webrtc::test::OutputPath() + "debug.aec";
+  const std::string filename = test::OutputPath() + "debug.aec";
   EXPECT_EQ(apm_->kNullPointerError,
             apm_->StartDebugRecording(static_cast<const char*>(NULL)));
 
@@ -1305,7 +1621,7 @@ TEST_F(ApmTest, DebugDump) {
 TEST_F(ApmTest, DebugDumpFromFileHandle) {
   FILE* fid = NULL;
   EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(fid));
-  const std::string filename = webrtc::test::OutputPath() + "debug.aec";
+  const std::string filename = test::OutputPath() + "debug.aec";
   fid = fopen(filename.c_str(), "w");
   ASSERT_TRUE(fid);
 
@@ -1334,24 +1650,109 @@ TEST_F(ApmTest, DebugDumpFromFileHandle) {
 #endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 }
 
+TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
+  audioproc::OutputData ref_data;
+  OpenFileAndReadMessage(ref_filename_, &ref_data);
+
+  Config config;
+  config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+  scoped_ptr<AudioProcessing> fapm(AudioProcessing::Create(config));
+  EnableAllComponents();
+  EnableAllAPComponents(fapm.get());
+  for (int i = 0; i < ref_data.test_size(); i++) {
+    printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
+
+    audioproc::Test* test = ref_data.mutable_test(i);
+    // TODO(ajm): Restore downmixing test cases.
+    if (test->num_input_channels() != test->num_output_channels())
+      continue;
+
+    const int num_render_channels = test->num_reverse_channels();
+    const int num_input_channels = test->num_input_channels();
+    const int num_output_channels = test->num_output_channels();
+    const int samples_per_channel = test->sample_rate() *
+        AudioProcessing::kChunkSizeMs / 1000;
+    const int output_length = samples_per_channel * num_output_channels;
+
+    Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
+         num_input_channels, num_output_channels, num_render_channels, true);
+    Init(fapm.get());
+
+    ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
+    scoped_ptr<int16_t[]> output_int16(new int16_t[output_length]);
+
+    int analog_level = 127;
+    while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
+           ReadFrame(near_file_, frame_, float_cb_.get())) {
+      frame_->vad_activity_ = AudioFrame::kVadUnknown;
+
+      EXPECT_NOERR(apm_->AnalyzeReverseStream(revframe_));
+      EXPECT_NOERR(fapm->AnalyzeReverseStream(
+          revfloat_cb_->channels(),
+          samples_per_channel,
+          test->sample_rate(),
+          LayoutFromChannels(num_render_channels)));
+
+      EXPECT_NOERR(apm_->set_stream_delay_ms(0));
+      EXPECT_NOERR(fapm->set_stream_delay_ms(0));
+      apm_->echo_cancellation()->set_stream_drift_samples(0);
+      fapm->echo_cancellation()->set_stream_drift_samples(0);
+      EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
+      EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
+
+      EXPECT_NOERR(apm_->ProcessStream(frame_));
+      // TODO(ajm): Update to support different output rates.
+      EXPECT_NOERR(fapm->ProcessStream(
+          float_cb_->channels(),
+          samples_per_channel,
+          test->sample_rate(),
+          LayoutFromChannels(num_input_channels),
+          test->sample_rate(),
+          LayoutFromChannels(num_output_channels),
+          float_cb_->channels()));
+
+      // Convert to interleaved int16.
+      ScaleAndRoundToInt16(float_cb_->data(), output_length, output_cb.data());
+      Interleave(output_cb.channels(),
+                 samples_per_channel,
+                 num_output_channels,
+                 output_int16.get());
+      // Verify float and int16 paths produce identical output.
+      EXPECT_EQ(0, memcmp(frame_->data_, output_int16.get(), output_length));
+
+      analog_level = fapm->gain_control()->stream_analog_level();
+      EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
+                fapm->gain_control()->stream_analog_level());
+      EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(),
+                fapm->echo_cancellation()->stream_has_echo());
+      EXPECT_EQ(apm_->voice_detection()->stream_has_voice(),
+                fapm->voice_detection()->stream_has_voice());
+      EXPECT_EQ(apm_->noise_suppression()->speech_probability(),
+                fapm->noise_suppression()->speech_probability());
+
+      // Reset in case of downmixing.
+      frame_->num_channels_ = test->num_input_channels();
+    }
+    rewind(far_file_);
+    rewind(near_file_);
+  }
+}
+
 // TODO(andrew): Add a test to process a few frames with different combinations
 // of enabled components.
 
-// TODO(andrew): Make this test more robust such that it can be run on multiple
-// platforms. It currently requires bit-exactness.
-#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
-TEST_F(ApmTest, DISABLED_Process) {
+TEST_F(ApmTest, Process) {
   GOOGLE_PROTOBUF_VERIFY_VERSION;
-  webrtc::audioproc::OutputData ref_data;
+  audioproc::OutputData ref_data;
 
   if (!write_ref_data) {
-    ReadMessageLiteFromFile(ref_filename_, &ref_data);
+    OpenFileAndReadMessage(ref_filename_, &ref_data);
   } else {
     // Write the desired tests to the protobuf reference file.
     for (size_t i = 0; i < kChannelsSize; i++) {
       for (size_t j = 0; j < kChannelsSize; j++) {
         for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
-          webrtc::audioproc::Test* test = ref_data.add_test();
+          audioproc::Test* test = ref_data.add_test();
           test->set_num_reverse_channels(kChannels[i]);
           test->set_num_input_channels(kChannels[j]);
           test->set_num_output_channels(kChannels[j]);
@@ -1366,14 +1767,19 @@ TEST_F(ApmTest, DISABLED_Process) {
   for (int i = 0; i < ref_data.test_size(); i++) {
     printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
 
-    webrtc::audioproc::Test* test = ref_data.mutable_test(i);
+    audioproc::Test* test = ref_data.mutable_test(i);
     // TODO(ajm): We no longer allow different input and output channels. Skip
     // these tests for now, but they should be removed from the set.
     if (test->num_input_channels() != test->num_output_channels())
       continue;
 
-    Init(test->sample_rate(), test->num_reverse_channels(),
-         test->num_input_channels(), test->num_output_channels(), true);
+    Init(test->sample_rate(),
+         test->sample_rate(),
+         test->sample_rate(),
+         test->num_input_channels(),
+         test->num_output_channels(),
+         test->num_reverse_channels(),
+         true);
 
     int frame_count = 0;
     int has_echo_count = 0;
@@ -1384,11 +1790,9 @@ TEST_F(ApmTest, DISABLED_Process) {
     int max_output_average = 0;
     float ns_speech_prob_average = 0.0f;
 
-    while (1) {
-      if (!ReadFrame(far_file_, revframe_)) break;
+    while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
       EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
 
-      if (!ReadFrame(near_file_, frame_)) break;
       frame_->vad_activity_ = AudioFrame::kVadUnknown;
 
       EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
@@ -1397,6 +1801,7 @@ TEST_F(ApmTest, DISABLED_Process) {
           apm_->gain_control()->set_stream_analog_level(analog_level));
 
       EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
       // Ensure the frame was downmixed properly.
       EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
 
@@ -1450,16 +1855,37 @@ TEST_F(ApmTest, DISABLED_Process) {
 #endif
 
     if (!write_ref_data) {
-      EXPECT_EQ(test->has_echo_count(), has_echo_count);
-      EXPECT_EQ(test->has_voice_count(), has_voice_count);
-      EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
+      const int kIntNear = 1;
+      // When running the test on a N7 we get a {2, 6} difference of
+      // |has_voice_count| and |max_output_average| is up to 18 higher.
+      // All numbers being consistently higher on N7 compare to ref_data.
+      // TODO(bjornv): If we start getting more of these offsets on Android we
+      // should consider a different approach. Either using one slack for all,
+      // or generate a separate android reference.
+#if defined(WEBRTC_ANDROID)
+      const int kHasVoiceCountOffset = 3;
+      const int kHasVoiceCountNear = 3;
+      const int kMaxOutputAverageOffset = 9;
+      const int kMaxOutputAverageNear = 9;
+#else
+      const int kHasVoiceCountOffset = 0;
+      const int kHasVoiceCountNear = kIntNear;
+      const int kMaxOutputAverageOffset = 0;
+      const int kMaxOutputAverageNear = kIntNear;
+#endif
+      EXPECT_NEAR(test->has_echo_count(), has_echo_count, kIntNear);
+      EXPECT_NEAR(test->has_voice_count(),
+                  has_voice_count - kHasVoiceCountOffset,
+                  kHasVoiceCountNear);
+      EXPECT_NEAR(test->is_saturated_count(), is_saturated_count, kIntNear);
 
-      EXPECT_EQ(test->analog_level_average(), analog_level_average);
-      EXPECT_EQ(test->max_output_average(), max_output_average);
+      EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear);
+      EXPECT_NEAR(test->max_output_average(),
+                  max_output_average - kMaxOutputAverageOffset,
+                  kMaxOutputAverageNear);
 
 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
-      webrtc::audioproc::Test::EchoMetrics reference =
-          test->echo_metrics();
+      audioproc::Test::EchoMetrics reference = test->echo_metrics();
       TestStats(echo_metrics.residual_echo_return_loss,
                 reference.residual_echo_return_loss());
       TestStats(echo_metrics.echo_return_loss,
@@ -1469,15 +1895,16 @@ TEST_F(ApmTest, DISABLED_Process) {
       TestStats(echo_metrics.a_nlp,
                 reference.a_nlp());
 
-      webrtc::audioproc::Test::DelayMetrics reference_delay =
-          test->delay_metrics();
-      EXPECT_EQ(reference_delay.median(), median);
-      EXPECT_EQ(reference_delay.std(), std);
+      const double kFloatNear = 0.0005;
+      audioproc::Test::DelayMetrics reference_delay = test->delay_metrics();
+      EXPECT_NEAR(reference_delay.median(), median, kIntNear);
+      EXPECT_NEAR(reference_delay.std(), std, kIntNear);
 
-      EXPECT_EQ(test->rms_level(), rms_level);
+      EXPECT_NEAR(test->rms_level(), rms_level, kIntNear);
 
-      EXPECT_FLOAT_EQ(test->ns_speech_probability_average(),
-                      ns_speech_prob_average);
+      EXPECT_NEAR(test->ns_speech_probability_average(),
+                  ns_speech_prob_average,
+                  kFloatNear);
 #endif
     } else {
       test->set_has_echo_count(has_echo_count);
@@ -1488,8 +1915,7 @@ TEST_F(ApmTest, DISABLED_Process) {
       test->set_max_output_average(max_output_average);
 
 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
-      webrtc::audioproc::Test::EchoMetrics* message =
-          test->mutable_echo_metrics();
+      audioproc::Test::EchoMetrics* message = test->mutable_echo_metrics();
       WriteStatsMessage(echo_metrics.residual_echo_return_loss,
                         message->mutable_residual_echo_return_loss());
       WriteStatsMessage(echo_metrics.echo_return_loss,
@@ -1499,7 +1925,7 @@ TEST_F(ApmTest, DISABLED_Process) {
       WriteStatsMessage(echo_metrics.a_nlp,
                         message->mutable_a_nlp());
 
-      webrtc::audioproc::Test::DelayMetrics* message_delay =
+      audioproc::Test::DelayMetrics* message_delay =
           test->mutable_delay_metrics();
       message_delay->set_median(median);
       message_delay->set_std(std);
@@ -1517,13 +1943,500 @@ TEST_F(ApmTest, DISABLED_Process) {
   }
 
   if (write_ref_data) {
-    WriteMessageLiteToFile(ref_filename_, ref_data);
+    OpenFileAndWriteMessage(ref_filename_, ref_data);
+  }
+}
+
+TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
+  struct ChannelFormat {
+    AudioProcessing::ChannelLayout in_layout;
+    AudioProcessing::ChannelLayout out_layout;
+  };
+  ChannelFormat cf[] = {
+    {AudioProcessing::kMonoAndKeyboard, AudioProcessing::kMono},
+    {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
+    {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
+  };
+  size_t channel_format_size = sizeof(cf) / sizeof(*cf);
+
+  scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
+  // Enable one component just to ensure some processing takes place.
+  ap->noise_suppression()->Enable(true);
+  for (size_t i = 0; i < channel_format_size; ++i) {
+    const int in_rate = 44100;
+    const int out_rate = 48000;
+    ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
+                               TotalChannelsFromLayout(cf[i].in_layout));
+    ChannelBuffer<float> out_cb(SamplesFromRate(out_rate),
+                                ChannelsFromLayout(cf[i].out_layout));
+
+    // Run over a few chunks.
+    for (int j = 0; j < 10; ++j) {
+      EXPECT_NOERR(ap->ProcessStream(
+          in_cb.channels(),
+          in_cb.samples_per_channel(),
+          in_rate,
+          cf[i].in_layout,
+          out_rate,
+          cf[i].out_layout,
+          out_cb.channels()));
+    }
+  }
+}
+
+// Reads a 10 ms chunk of int16 interleaved audio from the given (assumed
+// stereo) file, converts to deinterleaved float (optionally downmixing) and
+// returns the result in |cb|. Returns false if the file ended (or on error) and
+// true otherwise.
+//
+// |int_data| and |float_data| are just temporary space that must be
+// sufficiently large to hold the 10 ms chunk.
+bool ReadChunk(FILE* file, int16_t* int_data, float* float_data,
+               ChannelBuffer<float>* cb) {
+  // The files always contain stereo audio.
+  size_t frame_size = cb->samples_per_channel() * 2;
+  size_t read_count = fread(int_data, sizeof(int16_t), frame_size, file);
+  if (read_count != frame_size) {
+    // Check that the file really ended.
+    assert(feof(file));
+    return false;  // This is expected.
+  }
+
+  ScaleToFloat(int_data, frame_size, float_data);
+  if (cb->num_channels() == 1) {
+    MixStereoToMono(float_data, cb->data(), cb->samples_per_channel());
+  } else {
+    Deinterleave(float_data, cb->samples_per_channel(), 2,
+                 cb->channels());
+  }
+
+  return true;
+}
+
+// Compares the reference and test arrays over a region around the expected
+// delay. Finds the highest SNR in that region and adds the variance and squared
+// error results to the supplied accumulators.
+void UpdateBestSNR(const float* ref,
+                   const float* test,
+                   int length,
+                   int expected_delay,
+                   double* variance_acc,
+                   double* sq_error_acc) {
+  double best_snr = std::numeric_limits<double>::min();
+  double best_variance = 0;
+  double best_sq_error = 0;
+  // Search over a region of eight samples around the expected delay.
+  for (int delay = std::max(expected_delay - 4, 0); delay <= expected_delay + 4;
+       ++delay) {
+    double sq_error = 0;
+    double variance = 0;
+    for (int i = 0; i < length - delay; ++i) {
+      double error = test[i + delay] - ref[i];
+      sq_error += error * error;
+      variance += ref[i] * ref[i];
+    }
+
+    if (sq_error == 0) {
+      *variance_acc += variance;
+      return;
+    }
+    double snr = variance / sq_error;
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_variance = variance;
+      best_sq_error = sq_error;
+    }
   }
+
+  *variance_acc += best_variance;
+  *sq_error_acc += best_sq_error;
 }
-#endif  // WEBRTC_AUDIOPROC_BIT_EXACT
+
+// Used to test a multitude of sample rate and channel combinations. It works
+// by first producing a set of reference files (in SetUpTestCase) that are
+// assumed to be correct, as the used parameters are verified by other tests
+// in this collection. Primarily the reference files are all produced at
+// "native" rates which do not involve any resampling.
+
+// Each test pass produces an output file with a particular format. The output
+// is matched against the reference file closest to its internal processing
+// format. If necessary the output is resampled back to its process format.
+// Due to the resampling distortion, we don't expect identical results, but
+// enforce SNR thresholds which vary depending on the format. 0 is a special
+// case SNR which corresponds to inf, or zero error.
+typedef std::tr1::tuple<int, int, int, double> AudioProcessingTestData;
+class AudioProcessingTest
+    : public testing::TestWithParam<AudioProcessingTestData> {
+ public:
+  AudioProcessingTest()
+      : input_rate_(std::tr1::get<0>(GetParam())),
+        output_rate_(std::tr1::get<1>(GetParam())),
+        reverse_rate_(std::tr1::get<2>(GetParam())),
+        expected_snr_(std::tr1::get<3>(GetParam())) {}
+
+  virtual ~AudioProcessingTest() {}
+
+  static void SetUpTestCase() {
+    // Create all needed output reference files.
+    const int kNativeRates[] = {8000, 16000, 32000};
+    const size_t kNativeRatesSize =
+        sizeof(kNativeRates) / sizeof(*kNativeRates);
+    const int kNumChannels[] = {1, 2};
+    const size_t kNumChannelsSize =
+        sizeof(kNumChannels) / sizeof(*kNumChannels);
+    for (size_t i = 0; i < kNativeRatesSize; ++i) {
+      for (size_t j = 0; j < kNumChannelsSize; ++j) {
+        for (size_t k = 0; k < kNumChannelsSize; ++k) {
+          // The reference files always have matching input and output channels.
+          ProcessFormat(kNativeRates[i],
+                        kNativeRates[i],
+                        kNativeRates[i],
+                        kNumChannels[j],
+                        kNumChannels[j],
+                        kNumChannels[k],
+                        "ref");
+        }
+      }
+    }
+  }
+
+  // Runs a process pass on files with the given parameters and dumps the output
+  // to a file specified with |output_file_prefix|.
+  static void ProcessFormat(int input_rate,
+                            int output_rate,
+                            int reverse_rate,
+                            int num_input_channels,
+                            int num_output_channels,
+                            int num_reverse_channels,
+                            std::string output_file_prefix) {
+    scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
+    EnableAllAPComponents(ap.get());
+    ap->Initialize(input_rate,
+                   output_rate,
+                   reverse_rate,
+                   LayoutFromChannels(num_input_channels),
+                   LayoutFromChannels(num_output_channels),
+                   LayoutFromChannels(num_reverse_channels));
+
+    FILE* far_file = fopen(ResourceFilePath("far", reverse_rate).c_str(), "rb");
+    FILE* near_file = fopen(ResourceFilePath("near", input_rate).c_str(), "rb");
+    FILE* out_file = fopen(OutputFilePath(output_file_prefix,
+                                          input_rate,
+                                          output_rate,
+                                          reverse_rate,
+                                          num_input_channels,
+                                          num_output_channels,
+                                          num_reverse_channels).c_str(), "wb");
+    ASSERT_TRUE(far_file != NULL);
+    ASSERT_TRUE(near_file != NULL);
+    ASSERT_TRUE(out_file != NULL);
+
+    ChannelBuffer<float> fwd_cb(SamplesFromRate(input_rate),
+                                num_input_channels);
+    ChannelBuffer<float> rev_cb(SamplesFromRate(reverse_rate),
+                                num_reverse_channels);
+    ChannelBuffer<float> out_cb(SamplesFromRate(output_rate),
+                                num_output_channels);
+
+    // Temporary buffers.
+    const int max_length =
+        2 * std::max(out_cb.samples_per_channel(),
+                     std::max(fwd_cb.samples_per_channel(),
+                              rev_cb.samples_per_channel()));
+    scoped_ptr<float[]> float_data(new float[max_length]);
+    scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
+
+    int analog_level = 127;
+    while (ReadChunk(far_file, int_data.get(), float_data.get(), &rev_cb) &&
+           ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
+      EXPECT_NOERR(ap->AnalyzeReverseStream(
+          rev_cb.channels(),
+          rev_cb.samples_per_channel(),
+          reverse_rate,
+          LayoutFromChannels(num_reverse_channels)));
+
+      EXPECT_NOERR(ap->set_stream_delay_ms(0));
+      ap->echo_cancellation()->set_stream_drift_samples(0);
+      EXPECT_NOERR(ap->gain_control()->set_stream_analog_level(analog_level));
+
+      EXPECT_NOERR(ap->ProcessStream(
+          fwd_cb.channels(),
+          fwd_cb.samples_per_channel(),
+          input_rate,
+          LayoutFromChannels(num_input_channels),
+          output_rate,
+          LayoutFromChannels(num_output_channels),
+          out_cb.channels()));
+
+      Interleave(out_cb.channels(),
+                 out_cb.samples_per_channel(),
+                 out_cb.num_channels(),
+                 float_data.get());
+      // Dump output to file.
+      ASSERT_EQ(static_cast<size_t>(out_cb.length()),
+                fwrite(float_data.get(), sizeof(float_data[0]),
+                       out_cb.length(), out_file));
+
+      analog_level = ap->gain_control()->stream_analog_level();
+    }
+    fclose(far_file);
+    fclose(near_file);
+    fclose(out_file);
+  }
+
+ protected:
+  int input_rate_;
+  int output_rate_;
+  int reverse_rate_;
+  double expected_snr_;
+};
+
+TEST_P(AudioProcessingTest, Formats) {
+  struct ChannelFormat {
+    int num_input;
+    int num_output;
+    int num_reverse;
+  };
+  ChannelFormat cf[] = {
+    {1, 1, 1},
+    {1, 1, 2},
+    {2, 1, 1},
+    {2, 1, 2},
+    {2, 2, 1},
+    {2, 2, 2},
+  };
+  size_t channel_format_size = sizeof(cf) / sizeof(*cf);
+
+  for (size_t i = 0; i < channel_format_size; ++i) {
+    ProcessFormat(input_rate_,
+                  output_rate_,
+                  reverse_rate_,
+                  cf[i].num_input,
+                  cf[i].num_output,
+                  cf[i].num_reverse,
+                  "out");
+    int min_ref_rate = std::min(input_rate_, output_rate_);
+    int ref_rate;
+    if (min_ref_rate > 16000) {
+      ref_rate = 32000;
+    } else if (min_ref_rate > 8000) {
+      ref_rate = 16000;
+    } else {
+      ref_rate = 8000;
+    }
+#ifdef WEBRTC_AUDIOPROC_FIXED_PROFILE
+    ref_rate = std::min(ref_rate, 16000);
+#endif
+
+    FILE* out_file = fopen(OutputFilePath("out",
+                                          input_rate_,
+                                          output_rate_,
+                                          reverse_rate_,
+                                          cf[i].num_input,
+                                          cf[i].num_output,
+                                          cf[i].num_reverse).c_str(), "rb");
+    // The reference files always have matching input and output channels.
+    FILE* ref_file = fopen(OutputFilePath("ref",
+                                          ref_rate,
+                                          ref_rate,
+                                          ref_rate,
+                                          cf[i].num_output,
+                                          cf[i].num_output,
+                                          cf[i].num_reverse).c_str(), "rb");
+    ASSERT_TRUE(out_file != NULL);
+    ASSERT_TRUE(ref_file != NULL);
+
+    const int ref_length = SamplesFromRate(ref_rate) * cf[i].num_output;
+    const int out_length = SamplesFromRate(output_rate_) * cf[i].num_output;
+    // Data from the reference file.
+    scoped_ptr<float[]> ref_data(new float[ref_length]);
+    // Data from the output file.
+    scoped_ptr<float[]> out_data(new float[out_length]);
+    // Data from the resampled output, in case the reference and output rates
+    // don't match.
+    scoped_ptr<float[]> cmp_data(new float[ref_length]);
+
+    PushResampler<float> resampler;
+    resampler.InitializeIfNeeded(output_rate_, ref_rate, cf[i].num_output);
+
+    // Compute the resampling delay of the output relative to the reference,
+    // to find the region over which we should search for the best SNR.
+    float expected_delay_sec = 0;
+    if (input_rate_ != ref_rate) {
+      // Input resampling delay.
+      expected_delay_sec +=
+          PushSincResampler::AlgorithmicDelaySeconds(input_rate_);
+    }
+    if (output_rate_ != ref_rate) {
+      // Output resampling delay.
+      expected_delay_sec +=
+          PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
+      // Delay of converting the output back to its processing rate for testing.
+      expected_delay_sec +=
+          PushSincResampler::AlgorithmicDelaySeconds(output_rate_);
+    }
+    int expected_delay = floor(expected_delay_sec * ref_rate + 0.5f) *
+                         cf[i].num_output;
+
+    double variance = 0;
+    double sq_error = 0;
+    while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
+           fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
+      float* out_ptr = out_data.get();
+      if (output_rate_ != ref_rate) {
+        // Resample the output back to its internal processing rate if necssary.
+        ASSERT_EQ(ref_length, resampler.Resample(out_ptr,
+                                                 out_length,
+                                                 cmp_data.get(),
+                                                 ref_length));
+        out_ptr = cmp_data.get();
+      }
+
+      // Update the |sq_error| and |variance| accumulators with the highest SNR
+      // of reference vs output.
+      UpdateBestSNR(ref_data.get(),
+                    out_ptr,
+                    ref_length,
+                    expected_delay,
+                    &variance,
+                    &sq_error);
+    }
+
+    std::cout << "(" << input_rate_ << ", "
+                     << output_rate_ << ", "
+                     << reverse_rate_ << ", "
+                     << cf[i].num_input << ", "
+                     << cf[i].num_output << ", "
+                     << cf[i].num_reverse << "): ";
+    if (sq_error > 0) {
+      double snr = 10 * log10(variance / sq_error);
+      EXPECT_GE(snr, expected_snr_);
+      EXPECT_NE(0, expected_snr_);
+      std::cout << "SNR=" << snr << " dB" << std::endl;
+    } else {
+      EXPECT_EQ(expected_snr_, 0);
+      std::cout << "SNR=" << "inf dB" << std::endl;
+    }
+
+    fclose(out_file);
+    fclose(ref_file);
+  }
+}
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+INSTANTIATE_TEST_CASE_P(
+    CommonFormats, AudioProcessingTest, testing::Values(
+        std::tr1::make_tuple(48000, 48000, 48000, 20),
+        std::tr1::make_tuple(48000, 48000, 32000, 20),
+        std::tr1::make_tuple(48000, 48000, 16000, 20),
+        std::tr1::make_tuple(48000, 44100, 48000, 15),
+        std::tr1::make_tuple(48000, 44100, 32000, 15),
+        std::tr1::make_tuple(48000, 44100, 16000, 15),
+        std::tr1::make_tuple(48000, 32000, 48000, 20),
+        std::tr1::make_tuple(48000, 32000, 32000, 20),
+        std::tr1::make_tuple(48000, 32000, 16000, 20),
+        std::tr1::make_tuple(48000, 16000, 48000, 20),
+        std::tr1::make_tuple(48000, 16000, 32000, 20),
+        std::tr1::make_tuple(48000, 16000, 16000, 20),
+
+        std::tr1::make_tuple(44100, 48000, 48000, 20),
+        std::tr1::make_tuple(44100, 48000, 32000, 20),
+        std::tr1::make_tuple(44100, 48000, 16000, 20),
+        std::tr1::make_tuple(44100, 44100, 48000, 15),
+        std::tr1::make_tuple(44100, 44100, 32000, 15),
+        std::tr1::make_tuple(44100, 44100, 16000, 15),
+        std::tr1::make_tuple(44100, 32000, 48000, 20),
+        std::tr1::make_tuple(44100, 32000, 32000, 20),
+        std::tr1::make_tuple(44100, 32000, 16000, 20),
+        std::tr1::make_tuple(44100, 16000, 48000, 20),
+        std::tr1::make_tuple(44100, 16000, 32000, 20),
+        std::tr1::make_tuple(44100, 16000, 16000, 20),
+
+        std::tr1::make_tuple(32000, 48000, 48000, 25),
+        std::tr1::make_tuple(32000, 48000, 32000, 25),
+        std::tr1::make_tuple(32000, 48000, 16000, 25),
+        std::tr1::make_tuple(32000, 44100, 48000, 20),
+        std::tr1::make_tuple(32000, 44100, 32000, 20),
+        std::tr1::make_tuple(32000, 44100, 16000, 20),
+        std::tr1::make_tuple(32000, 32000, 48000, 30),
+        std::tr1::make_tuple(32000, 32000, 32000, 0),
+        std::tr1::make_tuple(32000, 32000, 16000, 30),
+        std::tr1::make_tuple(32000, 16000, 48000, 20),
+        std::tr1::make_tuple(32000, 16000, 32000, 20),
+        std::tr1::make_tuple(32000, 16000, 16000, 20),
+
+        std::tr1::make_tuple(16000, 48000, 48000, 25),
+        std::tr1::make_tuple(16000, 48000, 32000, 25),
+        std::tr1::make_tuple(16000, 48000, 16000, 25),
+        std::tr1::make_tuple(16000, 44100, 48000, 15),
+        std::tr1::make_tuple(16000, 44100, 32000, 15),
+        std::tr1::make_tuple(16000, 44100, 16000, 15),
+        std::tr1::make_tuple(16000, 32000, 48000, 25),
+        std::tr1::make_tuple(16000, 32000, 32000, 25),
+        std::tr1::make_tuple(16000, 32000, 16000, 25),
+        std::tr1::make_tuple(16000, 16000, 48000, 30),
+        std::tr1::make_tuple(16000, 16000, 32000, 30),
+        std::tr1::make_tuple(16000, 16000, 16000, 0)));
+
+#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+INSTANTIATE_TEST_CASE_P(
+    CommonFormats, AudioProcessingTest, testing::Values(
+        std::tr1::make_tuple(48000, 48000, 48000, 20),
+        std::tr1::make_tuple(48000, 48000, 32000, 20),
+        std::tr1::make_tuple(48000, 48000, 16000, 20),
+        std::tr1::make_tuple(48000, 44100, 48000, 15),
+        std::tr1::make_tuple(48000, 44100, 32000, 15),
+        std::tr1::make_tuple(48000, 44100, 16000, 15),
+        std::tr1::make_tuple(48000, 32000, 48000, 20),
+        std::tr1::make_tuple(48000, 32000, 32000, 20),
+        std::tr1::make_tuple(48000, 32000, 16000, 20),
+        std::tr1::make_tuple(48000, 16000, 48000, 20),
+        std::tr1::make_tuple(48000, 16000, 32000, 20),
+        std::tr1::make_tuple(48000, 16000, 16000, 20),
+
+        std::tr1::make_tuple(44100, 48000, 48000, 19),
+        std::tr1::make_tuple(44100, 48000, 32000, 19),
+        std::tr1::make_tuple(44100, 48000, 16000, 19),
+        std::tr1::make_tuple(44100, 44100, 48000, 15),
+        std::tr1::make_tuple(44100, 44100, 32000, 15),
+        std::tr1::make_tuple(44100, 44100, 16000, 15),
+        std::tr1::make_tuple(44100, 32000, 48000, 19),
+        std::tr1::make_tuple(44100, 32000, 32000, 19),
+        std::tr1::make_tuple(44100, 32000, 16000, 19),
+        std::tr1::make_tuple(44100, 16000, 48000, 19),
+        std::tr1::make_tuple(44100, 16000, 32000, 19),
+        std::tr1::make_tuple(44100, 16000, 16000, 19),
+
+        std::tr1::make_tuple(32000, 48000, 48000, 19),
+        std::tr1::make_tuple(32000, 48000, 32000, 19),
+        std::tr1::make_tuple(32000, 48000, 16000, 19),
+        std::tr1::make_tuple(32000, 44100, 48000, 15),
+        std::tr1::make_tuple(32000, 44100, 32000, 15),
+        std::tr1::make_tuple(32000, 44100, 16000, 15),
+        std::tr1::make_tuple(32000, 32000, 48000, 19),
+        std::tr1::make_tuple(32000, 32000, 32000, 19),
+        std::tr1::make_tuple(32000, 32000, 16000, 19),
+        std::tr1::make_tuple(32000, 16000, 48000, 19),
+        std::tr1::make_tuple(32000, 16000, 32000, 19),
+        std::tr1::make_tuple(32000, 16000, 16000, 19),
+
+        std::tr1::make_tuple(16000, 48000, 48000, 25),
+        std::tr1::make_tuple(16000, 48000, 32000, 25),
+        std::tr1::make_tuple(16000, 48000, 16000, 25),
+        std::tr1::make_tuple(16000, 44100, 48000, 15),
+        std::tr1::make_tuple(16000, 44100, 32000, 15),
+        std::tr1::make_tuple(16000, 44100, 16000, 15),
+        std::tr1::make_tuple(16000, 32000, 48000, 25),
+        std::tr1::make_tuple(16000, 32000, 32000, 25),
+        std::tr1::make_tuple(16000, 32000, 16000, 25),
+        std::tr1::make_tuple(16000, 16000, 48000, 30),
+        std::tr1::make_tuple(16000, 16000, 32000, 30),
+        std::tr1::make_tuple(16000, 16000, 16000, 0)));
+#endif
 
 // TODO(henrike): re-implement functionality lost when removing the old main
 //                function. See
 //                https://code.google.com/p/webrtc/issues/detail?id=1981
 
 }  // namespace
+}  // namespace webrtc