Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / modules / audio_processing / test / audio_processing_unittest.cc
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #include <math.h>
12 #include <stdio.h>
13 #include <algorithm>
14 #include <limits>
15 #include <queue>
16
17 #include "webrtc/common_audio/include/audio_util.h"
18 #include "webrtc/common_audio/resampler/include/push_resampler.h"
19 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
20 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
21 #include "webrtc/modules/audio_processing/include/audio_processing.h"
22 #include "webrtc/modules/audio_processing/test/test_utils.h"
23 #include "webrtc/modules/interface/module_common_types.h"
24 #include "webrtc/system_wrappers/interface/event_wrapper.h"
25 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
26 #include "webrtc/system_wrappers/interface/trace.h"
27 #include "webrtc/test/testsupport/fileutils.h"
28 #include "webrtc/test/testsupport/gtest_disable.h"
29 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD
30 #include "gtest/gtest.h"
31 #include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
32 #else
33 #include "testing/gtest/include/gtest/gtest.h"
34 #include "webrtc/audio_processing/unittest.pb.h"
35 #endif
36
37 #if (defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)) || \
38     (defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && !defined(NDEBUG))
39 #  define WEBRTC_AUDIOPROC_BIT_EXACT
40 #endif
41
42 namespace webrtc {
43 namespace {
44
45 // TODO(bjornv): This is not feasible until the functionality has been
46 // re-implemented; see comment at the bottom of this file.
47 // When false, this will compare the output data with the results stored to
48 // file. This is the typical case. When the file should be updated, it can
49 // be set to true with the command-line switch --write_ref_data.
50 #ifdef WEBRTC_AUDIOPROC_BIT_EXACT
51 bool write_ref_data = false;
52 const int kChannels[] = {1, 2};
53 const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
54 #endif
55
56 const int kSampleRates[] = {8000, 16000, 32000};
57 const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
58
59 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
60 // AECM doesn't support super-wb.
61 const int kProcessSampleRates[] = {8000, 16000};
62 #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
63 const int kProcessSampleRates[] = {8000, 16000, 32000};
64 #endif
65 const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
66     sizeof(*kProcessSampleRates);
67
68 void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
69   ChannelBuffer<int16_t> cb_int(cb->samples_per_channel(),
70                                 cb->num_channels());
71   Deinterleave(int_data,
72                cb->samples_per_channel(),
73                cb->num_channels(),
74                cb_int.channels());
75   ScaleToFloat(cb_int.data(),
76                cb->samples_per_channel() * cb->num_channels(),
77                cb->data());
78 }
79
80 void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
81   ConvertToFloat(frame.data_, cb);
82 }
83
84 // Number of channels including the keyboard channel.
85 int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
86   switch (layout) {
87     case AudioProcessing::kMono:
88       return 1;
89     case AudioProcessing::kMonoAndKeyboard:
90     case AudioProcessing::kStereo:
91       return 2;
92     case AudioProcessing::kStereoAndKeyboard:
93       return 3;
94   }
95   assert(false);
96   return -1;
97 }
98
99 int TruncateToMultipleOf10(int value) {
100   return (value / 10) * 10;
101 }
102
103 void MixStereoToMono(const float* stereo, float* mono,
104                      int samples_per_channel) {
105   for (int i = 0; i < samples_per_channel; ++i) {
106     mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
107   }
108 }
109
110 void MixStereoToMono(const int16_t* stereo, int16_t* mono,
111                      int samples_per_channel) {
112   for (int i = 0; i < samples_per_channel; i++)
113     mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
114 }
115
116 void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
117   for (int i = 0; i < samples_per_channel; i++) {
118     stereo[i * 2 + 1] = stereo[i * 2];
119   }
120 }
121
122 void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
123   for (int i = 0; i < samples_per_channel; i++) {
124     EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
125   }
126 }
127
128 void SetFrameTo(AudioFrame* frame, int16_t value) {
129   for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
130     frame->data_[i] = value;
131   }
132 }
133
134 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
135   ASSERT_EQ(2, frame->num_channels_);
136   for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
137     frame->data_[i] = left;
138     frame->data_[i + 1] = right;
139   }
140 }
141
142 void ScaleFrame(AudioFrame* frame, float scale) {
143   for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
144     frame->data_[i] = RoundToInt16(frame->data_[i] * scale);
145   }
146 }
147
148 bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
149   if (frame1.samples_per_channel_ != frame2.samples_per_channel_) {
150     return false;
151   }
152   if (frame1.num_channels_ != frame2.num_channels_) {
153     return false;
154   }
155   if (memcmp(frame1.data_, frame2.data_,
156              frame1.samples_per_channel_ * frame1.num_channels_ *
157                  sizeof(int16_t))) {
158     return false;
159   }
160   return true;
161 }
162
163 void EnableAllAPComponents(AudioProcessing* ap) {
164 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
165   EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
166
167   EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveDigital));
168   EXPECT_NOERR(ap->gain_control()->Enable(true));
169 #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
170   EXPECT_NOERR(ap->echo_cancellation()->enable_drift_compensation(true));
171   EXPECT_NOERR(ap->echo_cancellation()->enable_metrics(true));
172   EXPECT_NOERR(ap->echo_cancellation()->enable_delay_logging(true));
173   EXPECT_NOERR(ap->echo_cancellation()->Enable(true));
174
175   EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
176   EXPECT_NOERR(ap->gain_control()->set_analog_level_limits(0, 255));
177   EXPECT_NOERR(ap->gain_control()->Enable(true));
178 #endif
179
180   EXPECT_NOERR(ap->high_pass_filter()->Enable(true));
181   EXPECT_NOERR(ap->level_estimator()->Enable(true));
182   EXPECT_NOERR(ap->noise_suppression()->Enable(true));
183
184   EXPECT_NOERR(ap->voice_detection()->Enable(true));
185 }
186
187 #ifdef WEBRTC_AUDIOPROC_BIT_EXACT
188 // These functions are only used by the bit-exact test.
189 template <class T>
190 T AbsValue(T a) {
191   return a > 0 ? a: -a;
192 }
193
194 int16_t MaxAudioFrame(const AudioFrame& frame) {
195   const int length = frame.samples_per_channel_ * frame.num_channels_;
196   int16_t max_data = AbsValue(frame.data_[0]);
197   for (int i = 1; i < length; i++) {
198     max_data = std::max(max_data, AbsValue(frame.data_[i]));
199   }
200
201   return max_data;
202 }
203
204 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
205 void TestStats(const AudioProcessing::Statistic& test,
206                const audioproc::Test::Statistic& reference) {
207   EXPECT_EQ(reference.instant(), test.instant);
208   EXPECT_EQ(reference.average(), test.average);
209   EXPECT_EQ(reference.maximum(), test.maximum);
210   EXPECT_EQ(reference.minimum(), test.minimum);
211 }
212
213 void WriteStatsMessage(const AudioProcessing::Statistic& output,
214                        audioproc::Test::Statistic* msg) {
215   msg->set_instant(output.instant);
216   msg->set_average(output.average);
217   msg->set_maximum(output.maximum);
218   msg->set_minimum(output.minimum);
219 }
220 #endif
221
222 void OpenFileAndWriteMessage(const std::string filename,
223                              const ::google::protobuf::MessageLite& msg) {
224   FILE* file = fopen(filename.c_str(), "wb");
225   ASSERT_TRUE(file != NULL);
226
227   int32_t size = msg.ByteSize();
228   ASSERT_GT(size, 0);
229   scoped_ptr<uint8_t[]> array(new uint8_t[size]);
230   ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
231
232   ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
233   ASSERT_EQ(static_cast<size_t>(size),
234       fwrite(array.get(), sizeof(array[0]), size, file));
235   fclose(file);
236 }
237 #endif  // WEBRTC_AUDIOPROC_BIT_EXACT
238
239 std::string ResourceFilePath(std::string name, int sample_rate_hz) {
240   std::ostringstream ss;
241   // Resource files are all stereo.
242   ss << name << sample_rate_hz / 1000 << "_stereo";
243   return test::ResourcePath(ss.str(), "pcm");
244 }
245
246 std::string OutputFilePath(std::string name,
247                            int input_rate,
248                            int output_rate,
249                            int reverse_rate,
250                            int num_input_channels,
251                            int num_output_channels,
252                            int num_reverse_channels) {
253   std::ostringstream ss;
254   ss << name << "_i" << num_input_channels << "_" << input_rate / 1000
255      << "_r" << num_reverse_channels << "_" << reverse_rate  / 1000 << "_";
256   if (num_output_channels == 1) {
257     ss << "mono";
258   } else if (num_output_channels == 2) {
259     ss << "stereo";
260   } else {
261     assert(false);
262   }
263   ss << output_rate / 1000 << ".pcm";
264
265   return test::OutputPath() + ss.str();
266 }
267
268 void OpenFileAndReadMessage(const std::string filename,
269                             ::google::protobuf::MessageLite* msg) {
270   FILE* file = fopen(filename.c_str(), "rb");
271   ASSERT_TRUE(file != NULL);
272   ReadMessageFromFile(file, msg);
273   fclose(file);
274 }
275
276 class ApmTest : public ::testing::Test {
277  protected:
278   ApmTest();
279   virtual void SetUp();
280   virtual void TearDown();
281
282   static void SetUpTestCase() {
283     Trace::CreateTrace();
284     std::string trace_filename = test::OutputPath() + "audioproc_trace.txt";
285     ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
286   }
287
288   static void TearDownTestCase() {
289     Trace::ReturnTrace();
290   }
291
292   // Used to select between int and float interface tests.
293   enum Format {
294     kIntFormat,
295     kFloatFormat
296   };
297
298   void Init(int sample_rate_hz,
299             int output_sample_rate_hz,
300             int reverse_sample_rate_hz,
301             int num_reverse_channels,
302             int num_input_channels,
303             int num_output_channels,
304             bool open_output_file);
305   void Init(AudioProcessing* ap);
306   void EnableAllComponents();
307   bool ReadFrame(FILE* file, AudioFrame* frame);
308   bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb);
309   void ReadFrameWithRewind(FILE* file, AudioFrame* frame);
310   void ReadFrameWithRewind(FILE* file, AudioFrame* frame,
311                            ChannelBuffer<float>* cb);
312   void ProcessWithDefaultStreamParameters(AudioFrame* frame);
313   void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
314                                     int delay_min, int delay_max);
315   void TestChangingChannels(int num_channels,
316                             AudioProcessing::Error expected_return);
317   void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
318   void RunManualVolumeChangeIsPossibleTest(int sample_rate);
319   void StreamParametersTest(Format format);
320   int ProcessStreamChooser(Format format);
321   int AnalyzeReverseStreamChooser(Format format);
322   void ProcessDebugDump(const std::string& in_filename,
323                         const std::string& out_filename,
324                         Format format);
325   void VerifyDebugDumpTest(Format format);
326
327   const std::string output_path_;
328   const std::string ref_path_;
329   const std::string ref_filename_;
330   scoped_ptr<AudioProcessing> apm_;
331   AudioFrame* frame_;
332   AudioFrame* revframe_;
333   scoped_ptr<ChannelBuffer<float> > float_cb_;
334   scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
335   int output_sample_rate_hz_;
336   int num_output_channels_;
337   FILE* far_file_;
338   FILE* near_file_;
339   FILE* out_file_;
340 };
341
342 ApmTest::ApmTest()
343     : output_path_(test::OutputPath()),
344       ref_path_(test::ProjectRootPath() + "data/audio_processing/"),
345 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
346       ref_filename_(ref_path_ + "output_data_fixed.pb"),
347 #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
348       ref_filename_(ref_path_ + "output_data_float.pb"),
349 #endif
350       frame_(NULL),
351       revframe_(NULL),
352       output_sample_rate_hz_(0),
353       num_output_channels_(0),
354       far_file_(NULL),
355       near_file_(NULL),
356       out_file_(NULL) {
357   Config config;
358   config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
359   apm_.reset(AudioProcessing::Create(config));
360 }
361
362 void ApmTest::SetUp() {
363   ASSERT_TRUE(apm_.get() != NULL);
364
365   frame_ = new AudioFrame();
366   revframe_ = new AudioFrame();
367
368 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
369   Init(16000, 16000, 16000, 2, 2, 2, false);
370 #else
371   Init(32000, 32000, 32000, 2, 2, 2, false);
372 #endif
373 }
374
375 void ApmTest::TearDown() {
376   if (frame_) {
377     delete frame_;
378   }
379   frame_ = NULL;
380
381   if (revframe_) {
382     delete revframe_;
383   }
384   revframe_ = NULL;
385
386   if (far_file_) {
387     ASSERT_EQ(0, fclose(far_file_));
388   }
389   far_file_ = NULL;
390
391   if (near_file_) {
392     ASSERT_EQ(0, fclose(near_file_));
393   }
394   near_file_ = NULL;
395
396   if (out_file_) {
397     ASSERT_EQ(0, fclose(out_file_));
398   }
399   out_file_ = NULL;
400 }
401
402 void ApmTest::Init(AudioProcessing* ap) {
403   ASSERT_EQ(kNoErr,
404             ap->Initialize(frame_->sample_rate_hz_,
405                            output_sample_rate_hz_,
406                            revframe_->sample_rate_hz_,
407                            LayoutFromChannels(frame_->num_channels_),
408                            LayoutFromChannels(num_output_channels_),
409                            LayoutFromChannels(revframe_->num_channels_)));
410 }
411
412 void ApmTest::Init(int sample_rate_hz,
413                    int output_sample_rate_hz,
414                    int reverse_sample_rate_hz,
415                    int num_input_channels,
416                    int num_output_channels,
417                    int num_reverse_channels,
418                    bool open_output_file) {
419   SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
420   output_sample_rate_hz_ = output_sample_rate_hz;
421   num_output_channels_ = num_output_channels;
422
423   SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_,
424                      &revfloat_cb_);
425   Init(apm_.get());
426
427   if (far_file_) {
428     ASSERT_EQ(0, fclose(far_file_));
429   }
430   std::string filename = ResourceFilePath("far", sample_rate_hz);
431   far_file_ = fopen(filename.c_str(), "rb");
432   ASSERT_TRUE(far_file_ != NULL) << "Could not open file " <<
433       filename << "\n";
434
435   if (near_file_) {
436     ASSERT_EQ(0, fclose(near_file_));
437   }
438   filename = ResourceFilePath("near", sample_rate_hz);
439   near_file_ = fopen(filename.c_str(), "rb");
440   ASSERT_TRUE(near_file_ != NULL) << "Could not open file " <<
441         filename << "\n";
442
443   if (open_output_file) {
444     if (out_file_) {
445       ASSERT_EQ(0, fclose(out_file_));
446     }
447     filename = OutputFilePath("out",
448                               sample_rate_hz,
449                               output_sample_rate_hz,
450                               reverse_sample_rate_hz,
451                               num_input_channels,
452                               num_output_channels,
453                               num_reverse_channels);
454     out_file_ = fopen(filename.c_str(), "wb");
455     ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
456           filename << "\n";
457   }
458 }
459
460 void ApmTest::EnableAllComponents() {
461   EnableAllAPComponents(apm_.get());
462 }
463
464 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
465                         ChannelBuffer<float>* cb) {
466   // The files always contain stereo audio.
467   size_t frame_size = frame->samples_per_channel_ * 2;
468   size_t read_count = fread(frame->data_,
469                             sizeof(int16_t),
470                             frame_size,
471                             file);
472   if (read_count != frame_size) {
473     // Check that the file really ended.
474     EXPECT_NE(0, feof(file));
475     return false;  // This is expected.
476   }
477
478   if (frame->num_channels_ == 1) {
479     MixStereoToMono(frame->data_, frame->data_,
480                     frame->samples_per_channel_);
481   }
482
483   if (cb) {
484     ConvertToFloat(*frame, cb);
485   }
486   return true;
487 }
488
489 bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
490   return ReadFrame(file, frame, NULL);
491 }
492
493 // If the end of the file has been reached, rewind it and attempt to read the
494 // frame again.
495 void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame,
496                                   ChannelBuffer<float>* cb) {
497   if (!ReadFrame(near_file_, frame_, cb)) {
498     rewind(near_file_);
499     ASSERT_TRUE(ReadFrame(near_file_, frame_, cb));
500   }
501 }
502
503 void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame) {
504   ReadFrameWithRewind(file, frame, NULL);
505 }
506
507 void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
508   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
509   apm_->echo_cancellation()->set_stream_drift_samples(0);
510   EXPECT_EQ(apm_->kNoError,
511       apm_->gain_control()->set_stream_analog_level(127));
512   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
513 }
514
515 int ApmTest::ProcessStreamChooser(Format format) {
516   if (format == kIntFormat) {
517     return apm_->ProcessStream(frame_);
518   }
519   return apm_->ProcessStream(float_cb_->channels(),
520                              frame_->samples_per_channel_,
521                              frame_->sample_rate_hz_,
522                              LayoutFromChannels(frame_->num_channels_),
523                              output_sample_rate_hz_,
524                              LayoutFromChannels(num_output_channels_),
525                              float_cb_->channels());
526 }
527
528 int ApmTest::AnalyzeReverseStreamChooser(Format format) {
529   if (format == kIntFormat) {
530     return apm_->AnalyzeReverseStream(revframe_);
531   }
532   return apm_->AnalyzeReverseStream(
533       revfloat_cb_->channels(),
534       revframe_->samples_per_channel_,
535       revframe_->sample_rate_hz_,
536       LayoutFromChannels(revframe_->num_channels_));
537 }
538
539 void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
540                                            int delay_min, int delay_max) {
541   // The |revframe_| and |frame_| should include the proper frame information,
542   // hence can be used for extracting information.
543   AudioFrame tmp_frame;
544   std::queue<AudioFrame*> frame_queue;
545   bool causal = true;
546
547   tmp_frame.CopyFrom(*revframe_);
548   SetFrameTo(&tmp_frame, 0);
549
550   EXPECT_EQ(apm_->kNoError, apm_->Initialize());
551   // Initialize the |frame_queue| with empty frames.
552   int frame_delay = delay_ms / 10;
553   while (frame_delay < 0) {
554     AudioFrame* frame = new AudioFrame();
555     frame->CopyFrom(tmp_frame);
556     frame_queue.push(frame);
557     frame_delay++;
558     causal = false;
559   }
560   while (frame_delay > 0) {
561     AudioFrame* frame = new AudioFrame();
562     frame->CopyFrom(tmp_frame);
563     frame_queue.push(frame);
564     frame_delay--;
565   }
566   // Run for 4.5 seconds, skipping statistics from the first 2.5 seconds.  We
567   // need enough frames with audio to have reliable estimates, but as few as
568   // possible to keep processing time down.  4.5 seconds seemed to be a good
569   // compromise for this recording.
570   for (int frame_count = 0; frame_count < 450; ++frame_count) {
571     AudioFrame* frame = new AudioFrame();
572     frame->CopyFrom(tmp_frame);
573     // Use the near end recording, since that has more speech in it.
574     ASSERT_TRUE(ReadFrame(near_file_, frame));
575     frame_queue.push(frame);
576     AudioFrame* reverse_frame = frame;
577     AudioFrame* process_frame = frame_queue.front();
578     if (!causal) {
579       reverse_frame = frame_queue.front();
580       // When we call ProcessStream() the frame is modified, so we can't use the
581       // pointer directly when things are non-causal. Use an intermediate frame
582       // and copy the data.
583       process_frame = &tmp_frame;
584       process_frame->CopyFrom(*frame);
585     }
586     EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(reverse_frame));
587     EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(system_delay_ms));
588     EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(process_frame));
589     frame = frame_queue.front();
590     frame_queue.pop();
591     delete frame;
592
593     if (frame_count == 250) {
594       int median;
595       int std;
596       // Discard the first delay metrics to avoid convergence effects.
597       EXPECT_EQ(apm_->kNoError,
598                 apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
599     }
600   }
601
602   rewind(near_file_);
603   while (!frame_queue.empty()) {
604     AudioFrame* frame = frame_queue.front();
605     frame_queue.pop();
606     delete frame;
607   }
608   // Calculate expected delay estimate and acceptable regions. Further,
609   // limit them w.r.t. AEC delay estimation support.
610   const int samples_per_ms = std::min(16, frame_->samples_per_channel_ / 10);
611   int expected_median = std::min(std::max(delay_ms - system_delay_ms,
612                                           delay_min), delay_max);
613   int expected_median_high = std::min(std::max(
614       expected_median + 96 / samples_per_ms, delay_min), delay_max);
615   int expected_median_low = std::min(std::max(
616       expected_median - 96 / samples_per_ms, delay_min), delay_max);
617   // Verify delay metrics.
618   int median;
619   int std;
620   EXPECT_EQ(apm_->kNoError,
621             apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
622   EXPECT_GE(expected_median_high, median);
623   EXPECT_LE(expected_median_low, median);
624 }
625
626 void ApmTest::StreamParametersTest(Format format) {
627   // No errors when the components are disabled.
628   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
629
630   // -- Missing AGC level --
631   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
632   EXPECT_EQ(apm_->kStreamParameterNotSetError,
633             ProcessStreamChooser(format));
634
635   // Resets after successful ProcessStream().
636   EXPECT_EQ(apm_->kNoError,
637             apm_->gain_control()->set_stream_analog_level(127));
638   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
639   EXPECT_EQ(apm_->kStreamParameterNotSetError,
640             ProcessStreamChooser(format));
641
642   // Other stream parameters set correctly.
643   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
644   EXPECT_EQ(apm_->kNoError,
645             apm_->echo_cancellation()->enable_drift_compensation(true));
646   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
647   apm_->echo_cancellation()->set_stream_drift_samples(0);
648   EXPECT_EQ(apm_->kStreamParameterNotSetError,
649             ProcessStreamChooser(format));
650   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
651   EXPECT_EQ(apm_->kNoError,
652             apm_->echo_cancellation()->enable_drift_compensation(false));
653
654   // -- Missing delay --
655   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
656   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
657   EXPECT_EQ(apm_->kStreamParameterNotSetError,
658             ProcessStreamChooser(format));
659
660   // Resets after successful ProcessStream().
661   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
662   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
663   EXPECT_EQ(apm_->kStreamParameterNotSetError,
664             ProcessStreamChooser(format));
665
666   // Other stream parameters set correctly.
667   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
668   EXPECT_EQ(apm_->kNoError,
669             apm_->echo_cancellation()->enable_drift_compensation(true));
670   apm_->echo_cancellation()->set_stream_drift_samples(0);
671   EXPECT_EQ(apm_->kNoError,
672             apm_->gain_control()->set_stream_analog_level(127));
673   EXPECT_EQ(apm_->kStreamParameterNotSetError,
674             ProcessStreamChooser(format));
675   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
676
677   // -- Missing drift --
678   EXPECT_EQ(apm_->kStreamParameterNotSetError,
679             ProcessStreamChooser(format));
680
681   // Resets after successful ProcessStream().
682   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
683   apm_->echo_cancellation()->set_stream_drift_samples(0);
684   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
685   EXPECT_EQ(apm_->kStreamParameterNotSetError,
686             ProcessStreamChooser(format));
687
688   // Other stream parameters set correctly.
689   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
690   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
691   EXPECT_EQ(apm_->kNoError,
692             apm_->gain_control()->set_stream_analog_level(127));
693   EXPECT_EQ(apm_->kStreamParameterNotSetError,
694             ProcessStreamChooser(format));
695
696   // -- No stream parameters --
697   EXPECT_EQ(apm_->kNoError,
698             AnalyzeReverseStreamChooser(format));
699   EXPECT_EQ(apm_->kStreamParameterNotSetError,
700             ProcessStreamChooser(format));
701
702   // -- All there --
703   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
704   apm_->echo_cancellation()->set_stream_drift_samples(0);
705   EXPECT_EQ(apm_->kNoError,
706             apm_->gain_control()->set_stream_analog_level(127));
707   EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
708 }
709
710 TEST_F(ApmTest, StreamParametersInt) {
711   StreamParametersTest(kIntFormat);
712 }
713
714 TEST_F(ApmTest, StreamParametersFloat) {
715   StreamParametersTest(kFloatFormat);
716 }
717
718 TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
719   EXPECT_EQ(0, apm_->delay_offset_ms());
720   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(50));
721   EXPECT_EQ(50, apm_->stream_delay_ms());
722 }
723
724 TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
725   // High limit of 500 ms.
726   apm_->set_delay_offset_ms(100);
727   EXPECT_EQ(100, apm_->delay_offset_ms());
728   EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(450));
729   EXPECT_EQ(500, apm_->stream_delay_ms());
730   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
731   EXPECT_EQ(200, apm_->stream_delay_ms());
732
733   // Low limit of 0 ms.
734   apm_->set_delay_offset_ms(-50);
735   EXPECT_EQ(-50, apm_->delay_offset_ms());
736   EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20));
737   EXPECT_EQ(0, apm_->stream_delay_ms());
738   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
739   EXPECT_EQ(50, apm_->stream_delay_ms());
740 }
741
742 void ApmTest::TestChangingChannels(int num_channels,
743                                    AudioProcessing::Error expected_return) {
744   frame_->num_channels_ = num_channels;
745   EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
746   EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
747 }
748
749 TEST_F(ApmTest, Channels) {
750   // Testing number of invalid channels.
751   TestChangingChannels(0, apm_->kBadNumberChannelsError);
752   TestChangingChannels(3, apm_->kBadNumberChannelsError);
753   // Testing number of valid channels.
754   for (int i = 1; i < 3; i++) {
755     TestChangingChannels(i, kNoErr);
756     EXPECT_EQ(i, apm_->num_input_channels());
757     EXPECT_EQ(i, apm_->num_reverse_channels());
758   }
759 }
760
761 TEST_F(ApmTest, SampleRatesInt) {
762   // Testing invalid sample rates
763   SetContainerFormat(10000, 2, frame_, &float_cb_);
764   EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
765   // Testing valid sample rates
766   int fs[] = {8000, 16000, 32000};
767   for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
768     SetContainerFormat(fs[i], 2, frame_, &float_cb_);
769     EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
770     EXPECT_EQ(fs[i], apm_->input_sample_rate_hz());
771   }
772 }
773
774 TEST_F(ApmTest, EchoCancellation) {
775   EXPECT_EQ(apm_->kNoError,
776             apm_->echo_cancellation()->enable_drift_compensation(true));
777   EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
778   EXPECT_EQ(apm_->kNoError,
779             apm_->echo_cancellation()->enable_drift_compensation(false));
780   EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
781
782   EchoCancellation::SuppressionLevel level[] = {
783     EchoCancellation::kLowSuppression,
784     EchoCancellation::kModerateSuppression,
785     EchoCancellation::kHighSuppression,
786   };
787   for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
788     EXPECT_EQ(apm_->kNoError,
789         apm_->echo_cancellation()->set_suppression_level(level[i]));
790     EXPECT_EQ(level[i],
791         apm_->echo_cancellation()->suppression_level());
792   }
793
794   EchoCancellation::Metrics metrics;
795   EXPECT_EQ(apm_->kNotEnabledError,
796             apm_->echo_cancellation()->GetMetrics(&metrics));
797
798   EXPECT_EQ(apm_->kNoError,
799             apm_->echo_cancellation()->enable_metrics(true));
800   EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
801   EXPECT_EQ(apm_->kNoError,
802             apm_->echo_cancellation()->enable_metrics(false));
803   EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
804
805   int median = 0;
806   int std = 0;
807   EXPECT_EQ(apm_->kNotEnabledError,
808             apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
809
810   EXPECT_EQ(apm_->kNoError,
811             apm_->echo_cancellation()->enable_delay_logging(true));
812   EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
813   EXPECT_EQ(apm_->kNoError,
814             apm_->echo_cancellation()->enable_delay_logging(false));
815   EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
816
817   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
818   EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
819   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
820   EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
821
822   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
823   EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
824   EXPECT_TRUE(apm_->echo_cancellation()->aec_core() != NULL);
825   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
826   EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
827   EXPECT_FALSE(apm_->echo_cancellation()->aec_core() != NULL);
828 }
829
830 TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
831   // Enable AEC only.
832   EXPECT_EQ(apm_->kNoError,
833             apm_->echo_cancellation()->enable_drift_compensation(false));
834   EXPECT_EQ(apm_->kNoError,
835             apm_->echo_cancellation()->enable_metrics(false));
836   EXPECT_EQ(apm_->kNoError,
837             apm_->echo_cancellation()->enable_delay_logging(true));
838   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
839
840   // Internally in the AEC the amount of lookahead the delay estimation can
841   // handle is 15 blocks and the maximum delay is set to 60 blocks.
842   const int kLookaheadBlocks = 15;
843   const int kMaxDelayBlocks = 60;
844   // The AEC has a startup time before it actually starts to process. This
845   // procedure can flush the internal far-end buffer, which of course affects
846   // the delay estimation. Therefore, we set a system_delay high enough to
847   // avoid that. The smallest system_delay you can report without flushing the
848   // buffer is 66 ms in 8 kHz.
849   //
850   // It is known that for 16 kHz (and 32 kHz) sampling frequency there is an
851   // additional stuffing of 8 ms on the fly, but it seems to have no impact on
852   // delay estimation. This should be noted though. In case of test failure,
853   // this could be the cause.
854   const int kSystemDelayMs = 66;
855   // Test a couple of corner cases and verify that the estimated delay is
856   // within a valid region (set to +-1.5 blocks). Note that these cases are
857   // sampling frequency dependent.
858   for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
859     Init(kProcessSampleRates[i],
860          kProcessSampleRates[i],
861          kProcessSampleRates[i],
862          2,
863          2,
864          2,
865          false);
866     // Sampling frequency dependent variables.
867     const int num_ms_per_block = std::max(4,
868                                           640 / frame_->samples_per_channel_);
869     const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
870     const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
871
872     // 1) Verify correct delay estimate at lookahead boundary.
873     int delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_min_ms);
874     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
875                                  delay_max_ms);
876     // 2) A delay less than maximum lookahead should give an delay estimate at
877     //    the boundary (= -kLookaheadBlocks * num_ms_per_block).
878     delay_ms -= 20;
879     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
880                                  delay_max_ms);
881     // 3) Three values around zero delay. Note that we need to compensate for
882     //    the fake system_delay.
883     delay_ms = TruncateToMultipleOf10(kSystemDelayMs - 10);
884     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
885                                  delay_max_ms);
886     delay_ms = TruncateToMultipleOf10(kSystemDelayMs);
887     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
888                                  delay_max_ms);
889     delay_ms = TruncateToMultipleOf10(kSystemDelayMs + 10);
890     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
891                                  delay_max_ms);
892     // 4) Verify correct delay estimate at maximum delay boundary.
893     delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_max_ms);
894     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
895                                  delay_max_ms);
896     // 5) A delay above the maximum delay should give an estimate at the
897     //    boundary (= (kMaxDelayBlocks - 1) * num_ms_per_block).
898     delay_ms += 20;
899     ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
900                                  delay_max_ms);
901   }
902 }
903
904 TEST_F(ApmTest, EchoControlMobile) {
905   // AECM won't use super-wideband.
906   SetFrameSampleRate(frame_, 32000);
907   EXPECT_NOERR(apm_->ProcessStream(frame_));
908   EXPECT_EQ(apm_->kBadSampleRateError,
909             apm_->echo_control_mobile()->Enable(true));
910   SetFrameSampleRate(frame_, 16000);
911   EXPECT_NOERR(apm_->ProcessStream(frame_));
912   EXPECT_EQ(apm_->kNoError,
913             apm_->echo_control_mobile()->Enable(true));
914   SetFrameSampleRate(frame_, 32000);
915   EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
916
917   // Turn AECM on (and AEC off)
918   Init(16000, 16000, 16000, 2, 2, 2, false);
919   EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
920   EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
921
922   // Toggle routing modes
923   EchoControlMobile::RoutingMode mode[] = {
924       EchoControlMobile::kQuietEarpieceOrHeadset,
925       EchoControlMobile::kEarpiece,
926       EchoControlMobile::kLoudEarpiece,
927       EchoControlMobile::kSpeakerphone,
928       EchoControlMobile::kLoudSpeakerphone,
929   };
930   for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
931     EXPECT_EQ(apm_->kNoError,
932         apm_->echo_control_mobile()->set_routing_mode(mode[i]));
933     EXPECT_EQ(mode[i],
934         apm_->echo_control_mobile()->routing_mode());
935   }
936   // Turn comfort noise off/on
937   EXPECT_EQ(apm_->kNoError,
938       apm_->echo_control_mobile()->enable_comfort_noise(false));
939   EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
940   EXPECT_EQ(apm_->kNoError,
941       apm_->echo_control_mobile()->enable_comfort_noise(true));
942   EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
943   // Set and get echo path
944   const size_t echo_path_size =
945       apm_->echo_control_mobile()->echo_path_size_bytes();
946   scoped_ptr<char[]> echo_path_in(new char[echo_path_size]);
947   scoped_ptr<char[]> echo_path_out(new char[echo_path_size]);
948   EXPECT_EQ(apm_->kNullPointerError,
949             apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
950   EXPECT_EQ(apm_->kNullPointerError,
951             apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
952   EXPECT_EQ(apm_->kBadParameterError,
953             apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
954   EXPECT_EQ(apm_->kNoError,
955             apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
956                                                      echo_path_size));
957   for (size_t i = 0; i < echo_path_size; i++) {
958     echo_path_in[i] = echo_path_out[i] + 1;
959   }
960   EXPECT_EQ(apm_->kBadParameterError,
961             apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
962   EXPECT_EQ(apm_->kNoError,
963             apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
964                                                      echo_path_size));
965   EXPECT_EQ(apm_->kNoError,
966             apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
967                                                      echo_path_size));
968   for (size_t i = 0; i < echo_path_size; i++) {
969     EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
970   }
971
972   // Process a few frames with NS in the default disabled state. This exercises
973   // a different codepath than with it enabled.
974   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
975   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
976   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
977   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
978
979   // Turn AECM off
980   EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
981   EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
982 }
983
984 TEST_F(ApmTest, GainControl) {
985   // Testing gain modes
986   EXPECT_EQ(apm_->kNoError,
987       apm_->gain_control()->set_mode(
988       apm_->gain_control()->mode()));
989
990   GainControl::Mode mode[] = {
991     GainControl::kAdaptiveAnalog,
992     GainControl::kAdaptiveDigital,
993     GainControl::kFixedDigital
994   };
995   for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
996     EXPECT_EQ(apm_->kNoError,
997         apm_->gain_control()->set_mode(mode[i]));
998     EXPECT_EQ(mode[i], apm_->gain_control()->mode());
999   }
1000   // Testing invalid target levels
1001   EXPECT_EQ(apm_->kBadParameterError,
1002       apm_->gain_control()->set_target_level_dbfs(-3));
1003   EXPECT_EQ(apm_->kBadParameterError,
1004       apm_->gain_control()->set_target_level_dbfs(-40));
1005   // Testing valid target levels
1006   EXPECT_EQ(apm_->kNoError,
1007       apm_->gain_control()->set_target_level_dbfs(
1008       apm_->gain_control()->target_level_dbfs()));
1009
1010   int level_dbfs[] = {0, 6, 31};
1011   for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
1012     EXPECT_EQ(apm_->kNoError,
1013         apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
1014     EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
1015   }
1016
1017   // Testing invalid compression gains
1018   EXPECT_EQ(apm_->kBadParameterError,
1019       apm_->gain_control()->set_compression_gain_db(-1));
1020   EXPECT_EQ(apm_->kBadParameterError,
1021       apm_->gain_control()->set_compression_gain_db(100));
1022
1023   // Testing valid compression gains
1024   EXPECT_EQ(apm_->kNoError,
1025       apm_->gain_control()->set_compression_gain_db(
1026       apm_->gain_control()->compression_gain_db()));
1027
1028   int gain_db[] = {0, 10, 90};
1029   for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
1030     EXPECT_EQ(apm_->kNoError,
1031         apm_->gain_control()->set_compression_gain_db(gain_db[i]));
1032     EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
1033   }
1034
1035   // Testing limiter off/on
1036   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
1037   EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
1038   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
1039   EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
1040
1041   // Testing invalid level limits
1042   EXPECT_EQ(apm_->kBadParameterError,
1043       apm_->gain_control()->set_analog_level_limits(-1, 512));
1044   EXPECT_EQ(apm_->kBadParameterError,
1045       apm_->gain_control()->set_analog_level_limits(100000, 512));
1046   EXPECT_EQ(apm_->kBadParameterError,
1047       apm_->gain_control()->set_analog_level_limits(512, -1));
1048   EXPECT_EQ(apm_->kBadParameterError,
1049       apm_->gain_control()->set_analog_level_limits(512, 100000));
1050   EXPECT_EQ(apm_->kBadParameterError,
1051       apm_->gain_control()->set_analog_level_limits(512, 255));
1052
1053   // Testing valid level limits
1054   EXPECT_EQ(apm_->kNoError,
1055       apm_->gain_control()->set_analog_level_limits(
1056       apm_->gain_control()->analog_level_minimum(),
1057       apm_->gain_control()->analog_level_maximum()));
1058
1059   int min_level[] = {0, 255, 1024};
1060   for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
1061     EXPECT_EQ(apm_->kNoError,
1062         apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
1063     EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
1064   }
1065
1066   int max_level[] = {0, 1024, 65535};
1067   for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
1068     EXPECT_EQ(apm_->kNoError,
1069         apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
1070     EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
1071   }
1072
1073   // TODO(ajm): stream_is_saturated() and stream_analog_level()
1074
1075   // Turn AGC off
1076   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
1077   EXPECT_FALSE(apm_->gain_control()->is_enabled());
1078 }
1079
1080 void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
1081   Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
1082   EXPECT_EQ(apm_->kNoError,
1083             apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
1084   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
1085
1086   int out_analog_level = 0;
1087   for (int i = 0; i < 2000; ++i) {
1088     ReadFrameWithRewind(near_file_, frame_);
1089     // Ensure the audio is at a low level, so the AGC will try to increase it.
1090     ScaleFrame(frame_, 0.25);
1091
1092     // Always pass in the same volume.
1093     EXPECT_EQ(apm_->kNoError,
1094         apm_->gain_control()->set_stream_analog_level(100));
1095     EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1096     out_analog_level = apm_->gain_control()->stream_analog_level();
1097   }
1098
1099   // Ensure the AGC is still able to reach the maximum.
1100   EXPECT_EQ(255, out_analog_level);
1101 }
1102
1103 // Verifies that despite volume slider quantization, the AGC can continue to
1104 // increase its volume.
1105 TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
1106   for (size_t i = 0; i < kSampleRatesSize; ++i) {
1107     RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
1108   }
1109 }
1110
1111 void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
1112   Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
1113   EXPECT_EQ(apm_->kNoError,
1114             apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
1115   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
1116
1117   int out_analog_level = 100;
1118   for (int i = 0; i < 1000; ++i) {
1119     ReadFrameWithRewind(near_file_, frame_);
1120     // Ensure the audio is at a low level, so the AGC will try to increase it.
1121     ScaleFrame(frame_, 0.25);
1122
1123     EXPECT_EQ(apm_->kNoError,
1124         apm_->gain_control()->set_stream_analog_level(out_analog_level));
1125     EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1126     out_analog_level = apm_->gain_control()->stream_analog_level();
1127   }
1128
1129   // Ensure the volume was raised.
1130   EXPECT_GT(out_analog_level, 100);
1131   int highest_level_reached = out_analog_level;
1132   // Simulate a user manual volume change.
1133   out_analog_level = 100;
1134
1135   for (int i = 0; i < 300; ++i) {
1136     ReadFrameWithRewind(near_file_, frame_);
1137     ScaleFrame(frame_, 0.25);
1138
1139     EXPECT_EQ(apm_->kNoError,
1140         apm_->gain_control()->set_stream_analog_level(out_analog_level));
1141     EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1142     out_analog_level = apm_->gain_control()->stream_analog_level();
1143     // Check that AGC respected the manually adjusted volume.
1144     EXPECT_LT(out_analog_level, highest_level_reached);
1145   }
1146   // Check that the volume was still raised.
1147   EXPECT_GT(out_analog_level, 100);
1148 }
1149
1150 TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
1151   for (size_t i = 0; i < kSampleRatesSize; ++i) {
1152     RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
1153   }
1154 }
1155
1156 TEST_F(ApmTest, NoiseSuppression) {
1157   // Test valid suppression levels.
1158   NoiseSuppression::Level level[] = {
1159     NoiseSuppression::kLow,
1160     NoiseSuppression::kModerate,
1161     NoiseSuppression::kHigh,
1162     NoiseSuppression::kVeryHigh
1163   };
1164   for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
1165     EXPECT_EQ(apm_->kNoError,
1166         apm_->noise_suppression()->set_level(level[i]));
1167     EXPECT_EQ(level[i], apm_->noise_suppression()->level());
1168   }
1169
1170   // Turn NS on/off
1171   EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
1172   EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
1173   EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
1174   EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
1175 }
1176
1177 TEST_F(ApmTest, HighPassFilter) {
1178   // Turn HP filter on/off
1179   EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
1180   EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
1181   EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
1182   EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
1183 }
1184
1185 TEST_F(ApmTest, LevelEstimator) {
1186   // Turn level estimator on/off
1187   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1188   EXPECT_FALSE(apm_->level_estimator()->is_enabled());
1189
1190   EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
1191
1192   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1193   EXPECT_TRUE(apm_->level_estimator()->is_enabled());
1194
1195   // Run this test in wideband; in super-wb, the splitting filter distorts the
1196   // audio enough to cause deviation from the expectation for small values.
1197   frame_->samples_per_channel_ = 160;
1198   frame_->num_channels_ = 2;
1199   frame_->sample_rate_hz_ = 16000;
1200
1201   // Min value if no frames have been processed.
1202   EXPECT_EQ(127, apm_->level_estimator()->RMS());
1203
1204   // Min value on zero frames.
1205   SetFrameTo(frame_, 0);
1206   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1207   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1208   EXPECT_EQ(127, apm_->level_estimator()->RMS());
1209
1210   // Try a few RMS values.
1211   // (These also test that the value resets after retrieving it.)
1212   SetFrameTo(frame_, 32767);
1213   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1214   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1215   EXPECT_EQ(0, apm_->level_estimator()->RMS());
1216
1217   SetFrameTo(frame_, 30000);
1218   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1219   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1220   EXPECT_EQ(1, apm_->level_estimator()->RMS());
1221
1222   SetFrameTo(frame_, 10000);
1223   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1224   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1225   EXPECT_EQ(10, apm_->level_estimator()->RMS());
1226
1227   SetFrameTo(frame_, 10);
1228   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1229   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1230   EXPECT_EQ(70, apm_->level_estimator()->RMS());
1231
1232   // Min value if energy_ == 0.
1233   SetFrameTo(frame_, 10000);
1234   uint32_t energy = frame_->energy_;  // Save default to restore below.
1235   frame_->energy_ = 0;
1236   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1237   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1238   EXPECT_EQ(127, apm_->level_estimator()->RMS());
1239   frame_->energy_ = energy;
1240
1241   // Verify reset after enable/disable.
1242   SetFrameTo(frame_, 32767);
1243   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1244   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1245   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1246   SetFrameTo(frame_, 1);
1247   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1248   EXPECT_EQ(90, apm_->level_estimator()->RMS());
1249
1250   // Verify reset after initialize.
1251   SetFrameTo(frame_, 32767);
1252   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1253   EXPECT_EQ(apm_->kNoError, apm_->Initialize());
1254   SetFrameTo(frame_, 1);
1255   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1256   EXPECT_EQ(90, apm_->level_estimator()->RMS());
1257 }
1258
1259 TEST_F(ApmTest, VoiceDetection) {
1260   // Test external VAD
1261   EXPECT_EQ(apm_->kNoError,
1262             apm_->voice_detection()->set_stream_has_voice(true));
1263   EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
1264   EXPECT_EQ(apm_->kNoError,
1265             apm_->voice_detection()->set_stream_has_voice(false));
1266   EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
1267
1268   // Test valid likelihoods
1269   VoiceDetection::Likelihood likelihood[] = {
1270       VoiceDetection::kVeryLowLikelihood,
1271       VoiceDetection::kLowLikelihood,
1272       VoiceDetection::kModerateLikelihood,
1273       VoiceDetection::kHighLikelihood
1274   };
1275   for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
1276     EXPECT_EQ(apm_->kNoError,
1277               apm_->voice_detection()->set_likelihood(likelihood[i]));
1278     EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
1279   }
1280
1281   /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
1282   // Test invalid frame sizes
1283   EXPECT_EQ(apm_->kBadParameterError,
1284       apm_->voice_detection()->set_frame_size_ms(12));
1285
1286   // Test valid frame sizes
1287   for (int i = 10; i <= 30; i += 10) {
1288     EXPECT_EQ(apm_->kNoError,
1289         apm_->voice_detection()->set_frame_size_ms(i));
1290     EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
1291   }
1292   */
1293
1294   // Turn VAD on/off
1295   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1296   EXPECT_TRUE(apm_->voice_detection()->is_enabled());
1297   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1298   EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1299
1300   // Test that AudioFrame activity is maintained when VAD is disabled.
1301   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1302   AudioFrame::VADActivity activity[] = {
1303       AudioFrame::kVadActive,
1304       AudioFrame::kVadPassive,
1305       AudioFrame::kVadUnknown
1306   };
1307   for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
1308     frame_->vad_activity_ = activity[i];
1309     EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1310     EXPECT_EQ(activity[i], frame_->vad_activity_);
1311   }
1312
1313   // Test that AudioFrame activity is set when VAD is enabled.
1314   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1315   frame_->vad_activity_ = AudioFrame::kVadUnknown;
1316   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1317   EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
1318
1319   // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
1320 }
1321
1322 TEST_F(ApmTest, AllProcessingDisabledByDefault) {
1323   EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
1324   EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
1325   EXPECT_FALSE(apm_->gain_control()->is_enabled());
1326   EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
1327   EXPECT_FALSE(apm_->level_estimator()->is_enabled());
1328   EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
1329   EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1330 }
1331
1332 TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
1333   for (size_t i = 0; i < kSampleRatesSize; i++) {
1334     Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
1335     SetFrameTo(frame_, 1000, 2000);
1336     AudioFrame frame_copy;
1337     frame_copy.CopyFrom(*frame_);
1338     for (int j = 0; j < 1000; j++) {
1339       EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1340       EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1341     }
1342   }
1343 }
1344
1345 TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
1346   EnableAllComponents();
1347
1348   for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
1349     Init(kProcessSampleRates[i],
1350          kProcessSampleRates[i],
1351          kProcessSampleRates[i],
1352          2,
1353          2,
1354          2,
1355          false);
1356     int analog_level = 127;
1357     ASSERT_EQ(0, feof(far_file_));
1358     ASSERT_EQ(0, feof(near_file_));
1359     while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
1360       CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
1361
1362       ASSERT_EQ(kNoErr, apm_->AnalyzeReverseStream(revframe_));
1363
1364       CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
1365       frame_->vad_activity_ = AudioFrame::kVadUnknown;
1366
1367       ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
1368       apm_->echo_cancellation()->set_stream_drift_samples(0);
1369       ASSERT_EQ(kNoErr,
1370           apm_->gain_control()->set_stream_analog_level(analog_level));
1371       ASSERT_EQ(kNoErr, apm_->ProcessStream(frame_));
1372       analog_level = apm_->gain_control()->stream_analog_level();
1373
1374       VerifyChannelsAreEqual(frame_->data_, frame_->samples_per_channel_);
1375     }
1376     rewind(far_file_);
1377     rewind(near_file_);
1378   }
1379 }
1380
1381 TEST_F(ApmTest, SplittingFilter) {
1382   // Verify the filter is not active through undistorted audio when:
1383   // 1. No components are enabled...
1384   SetFrameTo(frame_, 1000);
1385   AudioFrame frame_copy;
1386   frame_copy.CopyFrom(*frame_);
1387   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1388   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1389   EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1390
1391   // 2. Only the level estimator is enabled...
1392   SetFrameTo(frame_, 1000);
1393   frame_copy.CopyFrom(*frame_);
1394   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1395   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1396   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1397   EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1398   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1399
1400   // 3. Only VAD is enabled...
1401   SetFrameTo(frame_, 1000);
1402   frame_copy.CopyFrom(*frame_);
1403   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1404   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1405   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1406   EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1407   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1408
1409   // 4. Both VAD and the level estimator are enabled...
1410   SetFrameTo(frame_, 1000);
1411   frame_copy.CopyFrom(*frame_);
1412   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1413   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1414   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1415   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1416   EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1417   EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1418   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1419
1420   // 5. Not using super-wb.
1421   frame_->samples_per_channel_ = 160;
1422   frame_->num_channels_ = 2;
1423   frame_->sample_rate_hz_ = 16000;
1424   // Enable AEC, which would require the filter in super-wb. We rely on the
1425   // first few frames of data being unaffected by the AEC.
1426   // TODO(andrew): This test, and the one below, rely rather tenuously on the
1427   // behavior of the AEC. Think of something more robust.
1428   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
1429   SetFrameTo(frame_, 1000);
1430   frame_copy.CopyFrom(*frame_);
1431   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1432   apm_->echo_cancellation()->set_stream_drift_samples(0);
1433   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1434   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1435   apm_->echo_cancellation()->set_stream_drift_samples(0);
1436   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1437   EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1438
1439   // Check the test is valid. We should have distortion from the filter
1440   // when AEC is enabled (which won't affect the audio).
1441   frame_->samples_per_channel_ = 320;
1442   frame_->num_channels_ = 2;
1443   frame_->sample_rate_hz_ = 32000;
1444   SetFrameTo(frame_, 1000);
1445   frame_copy.CopyFrom(*frame_);
1446   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1447   apm_->echo_cancellation()->set_stream_drift_samples(0);
1448   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1449   EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
1450 }
1451
1452 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1453 void ApmTest::ProcessDebugDump(const std::string& in_filename,
1454                                const std::string& out_filename,
1455                                Format format) {
1456   FILE* in_file = fopen(in_filename.c_str(), "rb");
1457   ASSERT_TRUE(in_file != NULL);
1458   audioproc::Event event_msg;
1459   bool first_init = true;
1460
1461   while (ReadMessageFromFile(in_file, &event_msg)) {
1462     if (event_msg.type() == audioproc::Event::INIT) {
1463       const audioproc::Init msg = event_msg.init();
1464       int reverse_sample_rate = msg.sample_rate();
1465       if (msg.has_reverse_sample_rate()) {
1466         reverse_sample_rate = msg.reverse_sample_rate();
1467       }
1468       int output_sample_rate = msg.sample_rate();
1469       if (msg.has_output_sample_rate()) {
1470         output_sample_rate = msg.output_sample_rate();
1471       }
1472
1473       Init(msg.sample_rate(),
1474            output_sample_rate,
1475            reverse_sample_rate,
1476            msg.num_input_channels(),
1477            msg.num_output_channels(),
1478            msg.num_reverse_channels(),
1479            false);
1480       if (first_init) {
1481         // StartDebugRecording() writes an additional init message. Don't start
1482         // recording until after the first init to avoid the extra message.
1483         EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str()));
1484         first_init = false;
1485       }
1486
1487     } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
1488       const audioproc::ReverseStream msg = event_msg.reverse_stream();
1489
1490       if (msg.channel_size() > 0) {
1491         ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
1492         for (int i = 0; i < msg.channel_size(); ++i) {
1493            memcpy(revfloat_cb_->channel(i), msg.channel(i).data(),
1494                   msg.channel(i).size());
1495         }
1496       } else {
1497         memcpy(revframe_->data_, msg.data().data(), msg.data().size());
1498         if (format == kFloatFormat) {
1499           // We're using an int16 input file; convert to float.
1500           ConvertToFloat(*revframe_, revfloat_cb_.get());
1501         }
1502       }
1503       AnalyzeReverseStreamChooser(format);
1504
1505     } else if (event_msg.type() == audioproc::Event::STREAM) {
1506       const audioproc::Stream msg = event_msg.stream();
1507       // ProcessStream could have changed this for the output frame.
1508       frame_->num_channels_ = apm_->num_input_channels();
1509
1510       EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
1511       EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
1512       apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
1513       if (msg.has_keypress()) {
1514         apm_->set_stream_key_pressed(msg.keypress());
1515       } else {
1516         apm_->set_stream_key_pressed(true);
1517       }
1518
1519       if (msg.input_channel_size() > 0) {
1520         ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
1521         for (int i = 0; i < msg.input_channel_size(); ++i) {
1522            memcpy(float_cb_->channel(i), msg.input_channel(i).data(),
1523                   msg.input_channel(i).size());
1524         }
1525       } else {
1526         memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size());
1527         if (format == kFloatFormat) {
1528           // We're using an int16 input file; convert to float.
1529           ConvertToFloat(*frame_, float_cb_.get());
1530         }
1531       }
1532       ProcessStreamChooser(format);
1533     }
1534   }
1535   EXPECT_NOERR(apm_->StopDebugRecording());
1536   fclose(in_file);
1537 }
1538
1539 void ApmTest::VerifyDebugDumpTest(Format format) {
1540   const std::string in_filename = test::ResourcePath("ref03", "aecdump");
1541   std::string format_string;
1542   switch (format) {
1543     case kIntFormat:
1544       format_string = "_int";
1545       break;
1546     case kFloatFormat:
1547       format_string = "_float";
1548       break;
1549   }
1550   const std::string ref_filename =
1551       test::OutputPath() + "ref" + format_string + ".aecdump";
1552   const std::string out_filename =
1553       test::OutputPath() + "out" + format_string + ".aecdump";
1554   EnableAllComponents();
1555   ProcessDebugDump(in_filename, ref_filename, format);
1556   ProcessDebugDump(ref_filename, out_filename, format);
1557
1558   FILE* ref_file = fopen(ref_filename.c_str(), "rb");
1559   FILE* out_file = fopen(out_filename.c_str(), "rb");
1560   ASSERT_TRUE(ref_file != NULL);
1561   ASSERT_TRUE(out_file != NULL);
1562   scoped_ptr<uint8_t[]> ref_bytes;
1563   scoped_ptr<uint8_t[]> out_bytes;
1564
1565   size_t ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
1566   size_t out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
1567   size_t bytes_read = 0;
1568   while (ref_size > 0 && out_size > 0) {
1569     bytes_read += ref_size;
1570     EXPECT_EQ(ref_size, out_size);
1571     EXPECT_EQ(0, memcmp(ref_bytes.get(), out_bytes.get(), ref_size));
1572     ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
1573     out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
1574   }
1575   EXPECT_GT(bytes_read, 0u);
1576   EXPECT_NE(0, feof(ref_file));
1577   EXPECT_NE(0, feof(out_file));
1578   ASSERT_EQ(0, fclose(ref_file));
1579   ASSERT_EQ(0, fclose(out_file));
1580 }
1581
1582 TEST_F(ApmTest, VerifyDebugDumpInt) {
1583   VerifyDebugDumpTest(kIntFormat);
1584 }
1585
1586 TEST_F(ApmTest, VerifyDebugDumpFloat) {
1587   VerifyDebugDumpTest(kFloatFormat);
1588 }
1589 #endif
1590
1591 // TODO(andrew): expand test to verify output.
1592 TEST_F(ApmTest, DebugDump) {
1593   const std::string filename = test::OutputPath() + "debug.aec";
1594   EXPECT_EQ(apm_->kNullPointerError,
1595             apm_->StartDebugRecording(static_cast<const char*>(NULL)));
1596
1597 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1598   // Stopping without having started should be OK.
1599   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1600
1601   EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
1602   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1603   EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1604   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1605
1606   // Verify the file has been written.
1607   FILE* fid = fopen(filename.c_str(), "r");
1608   ASSERT_TRUE(fid != NULL);
1609
1610   // Clean it up.
1611   ASSERT_EQ(0, fclose(fid));
1612   ASSERT_EQ(0, remove(filename.c_str()));
1613 #else
1614   EXPECT_EQ(apm_->kUnsupportedFunctionError,
1615             apm_->StartDebugRecording(filename.c_str()));
1616   EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
1617
1618   // Verify the file has NOT been written.
1619   ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
1620 #endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
1621 }
1622
1623 // TODO(andrew): expand test to verify output.
1624 TEST_F(ApmTest, DebugDumpFromFileHandle) {
1625   FILE* fid = NULL;
1626   EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(fid));
1627   const std::string filename = test::OutputPath() + "debug.aec";
1628   fid = fopen(filename.c_str(), "w");
1629   ASSERT_TRUE(fid);
1630
1631 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1632   // Stopping without having started should be OK.
1633   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1634
1635   EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(fid));
1636   EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1637   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1638   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1639
1640   // Verify the file has been written.
1641   fid = fopen(filename.c_str(), "r");
1642   ASSERT_TRUE(fid != NULL);
1643
1644   // Clean it up.
1645   ASSERT_EQ(0, fclose(fid));
1646   ASSERT_EQ(0, remove(filename.c_str()));
1647 #else
1648   EXPECT_EQ(apm_->kUnsupportedFunctionError,
1649             apm_->StartDebugRecording(fid));
1650   EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
1651
1652   ASSERT_EQ(0, fclose(fid));
1653 #endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
1654 }
1655
1656 TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
1657   audioproc::OutputData ref_data;
1658   OpenFileAndReadMessage(ref_filename_, &ref_data);
1659
1660   Config config;
1661   config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
1662   scoped_ptr<AudioProcessing> fapm(AudioProcessing::Create(config));
1663   EnableAllComponents();
1664   EnableAllAPComponents(fapm.get());
1665   for (int i = 0; i < ref_data.test_size(); i++) {
1666     printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
1667
1668     audioproc::Test* test = ref_data.mutable_test(i);
1669     // TODO(ajm): Restore downmixing test cases.
1670     if (test->num_input_channels() != test->num_output_channels())
1671       continue;
1672
1673     const int num_render_channels = test->num_reverse_channels();
1674     const int num_input_channels = test->num_input_channels();
1675     const int num_output_channels = test->num_output_channels();
1676     const int samples_per_channel = test->sample_rate() *
1677         AudioProcessing::kChunkSizeMs / 1000;
1678     const int output_length =  samples_per_channel * num_output_channels;
1679
1680     Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
1681          num_input_channels, num_output_channels, num_render_channels, true);
1682     Init(fapm.get());
1683
1684     ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
1685     scoped_ptr<int16_t[]> output_int16(new int16_t[output_length]);
1686
1687     int analog_level = 127;
1688     while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
1689            ReadFrame(near_file_, frame_, float_cb_.get())) {
1690       frame_->vad_activity_ = AudioFrame::kVadUnknown;
1691
1692       EXPECT_NOERR(apm_->AnalyzeReverseStream(revframe_));
1693       EXPECT_NOERR(fapm->AnalyzeReverseStream(
1694           revfloat_cb_->channels(),
1695           samples_per_channel,
1696           test->sample_rate(),
1697           LayoutFromChannels(num_render_channels)));
1698
1699       EXPECT_NOERR(apm_->set_stream_delay_ms(0));
1700       EXPECT_NOERR(fapm->set_stream_delay_ms(0));
1701       apm_->echo_cancellation()->set_stream_drift_samples(0);
1702       fapm->echo_cancellation()->set_stream_drift_samples(0);
1703       EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
1704       EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
1705
1706       EXPECT_NOERR(apm_->ProcessStream(frame_));
1707       // TODO(ajm): Update to support different output rates.
1708       EXPECT_NOERR(fapm->ProcessStream(
1709           float_cb_->channels(),
1710           samples_per_channel,
1711           test->sample_rate(),
1712           LayoutFromChannels(num_input_channels),
1713           test->sample_rate(),
1714           LayoutFromChannels(num_output_channels),
1715           float_cb_->channels()));
1716
1717       // Convert to interleaved int16.
1718       ScaleAndRoundToInt16(float_cb_->data(), output_length, output_cb.data());
1719       Interleave(output_cb.channels(),
1720                  samples_per_channel,
1721                  num_output_channels,
1722                  output_int16.get());
1723       // Verify float and int16 paths produce identical output.
1724       EXPECT_EQ(0, memcmp(frame_->data_, output_int16.get(), output_length));
1725
1726       analog_level = fapm->gain_control()->stream_analog_level();
1727       EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
1728                 fapm->gain_control()->stream_analog_level());
1729       EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(),
1730                 fapm->echo_cancellation()->stream_has_echo());
1731       EXPECT_EQ(apm_->voice_detection()->stream_has_voice(),
1732                 fapm->voice_detection()->stream_has_voice());
1733       EXPECT_EQ(apm_->noise_suppression()->speech_probability(),
1734                 fapm->noise_suppression()->speech_probability());
1735
1736       // Reset in case of downmixing.
1737       frame_->num_channels_ = test->num_input_channels();
1738     }
1739     rewind(far_file_);
1740     rewind(near_file_);
1741   }
1742 }
1743
1744 // TODO(andrew): Add a test to process a few frames with different combinations
1745 // of enabled components.
1746
1747 // TODO(andrew): Make this test more robust such that it can be run on multiple
1748 // platforms. It currently requires bit-exactness.
1749 #ifdef WEBRTC_AUDIOPROC_BIT_EXACT
1750 TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
1751   GOOGLE_PROTOBUF_VERIFY_VERSION;
1752   audioproc::OutputData ref_data;
1753
1754   if (!write_ref_data) {
1755     OpenFileAndReadMessage(ref_filename_, &ref_data);
1756   } else {
1757     // Write the desired tests to the protobuf reference file.
1758     for (size_t i = 0; i < kChannelsSize; i++) {
1759       for (size_t j = 0; j < kChannelsSize; j++) {
1760         for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
1761           audioproc::Test* test = ref_data.add_test();
1762           test->set_num_reverse_channels(kChannels[i]);
1763           test->set_num_input_channels(kChannels[j]);
1764           test->set_num_output_channels(kChannels[j]);
1765           test->set_sample_rate(kProcessSampleRates[l]);
1766         }
1767       }
1768     }
1769   }
1770
1771   EnableAllComponents();
1772
1773   for (int i = 0; i < ref_data.test_size(); i++) {
1774     printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
1775
1776     audioproc::Test* test = ref_data.mutable_test(i);
1777     // TODO(ajm): We no longer allow different input and output channels. Skip
1778     // these tests for now, but they should be removed from the set.
1779     if (test->num_input_channels() != test->num_output_channels())
1780       continue;
1781
1782     Init(test->sample_rate(),
1783          test->sample_rate(),
1784          test->sample_rate(),
1785          test->num_input_channels(),
1786          test->num_output_channels(),
1787          test->num_reverse_channels(),
1788          true);
1789
1790     int frame_count = 0;
1791     int has_echo_count = 0;
1792     int has_voice_count = 0;
1793     int is_saturated_count = 0;
1794     int analog_level = 127;
1795     int analog_level_average = 0;
1796     int max_output_average = 0;
1797     float ns_speech_prob_average = 0.0f;
1798
1799     while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
1800       EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1801
1802       frame_->vad_activity_ = AudioFrame::kVadUnknown;
1803
1804       EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1805       apm_->echo_cancellation()->set_stream_drift_samples(0);
1806       EXPECT_EQ(apm_->kNoError,
1807           apm_->gain_control()->set_stream_analog_level(analog_level));
1808
1809       EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1810
1811       // Ensure the frame was downmixed properly.
1812       EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
1813
1814       max_output_average += MaxAudioFrame(*frame_);
1815
1816       if (apm_->echo_cancellation()->stream_has_echo()) {
1817         has_echo_count++;
1818       }
1819
1820       analog_level = apm_->gain_control()->stream_analog_level();
1821       analog_level_average += analog_level;
1822       if (apm_->gain_control()->stream_is_saturated()) {
1823         is_saturated_count++;
1824       }
1825       if (apm_->voice_detection()->stream_has_voice()) {
1826         has_voice_count++;
1827         EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
1828       } else {
1829         EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
1830       }
1831
1832       ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
1833
1834       size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
1835       size_t write_count = fwrite(frame_->data_,
1836                                   sizeof(int16_t),
1837                                   frame_size,
1838                                   out_file_);
1839       ASSERT_EQ(frame_size, write_count);
1840
1841       // Reset in case of downmixing.
1842       frame_->num_channels_ = test->num_input_channels();
1843       frame_count++;
1844     }
1845     max_output_average /= frame_count;
1846     analog_level_average /= frame_count;
1847     ns_speech_prob_average /= frame_count;
1848
1849 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
1850     EchoCancellation::Metrics echo_metrics;
1851     EXPECT_EQ(apm_->kNoError,
1852               apm_->echo_cancellation()->GetMetrics(&echo_metrics));
1853     int median = 0;
1854     int std = 0;
1855     EXPECT_EQ(apm_->kNoError,
1856               apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
1857
1858     int rms_level = apm_->level_estimator()->RMS();
1859     EXPECT_LE(0, rms_level);
1860     EXPECT_GE(127, rms_level);
1861 #endif
1862
1863     if (!write_ref_data) {
1864       EXPECT_EQ(test->has_echo_count(), has_echo_count);
1865       EXPECT_EQ(test->has_voice_count(), has_voice_count);
1866       EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
1867
1868       EXPECT_EQ(test->analog_level_average(), analog_level_average);
1869       EXPECT_EQ(test->max_output_average(), max_output_average);
1870
1871 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
1872       audioproc::Test::EchoMetrics reference = test->echo_metrics();
1873       TestStats(echo_metrics.residual_echo_return_loss,
1874                 reference.residual_echo_return_loss());
1875       TestStats(echo_metrics.echo_return_loss,
1876                 reference.echo_return_loss());
1877       TestStats(echo_metrics.echo_return_loss_enhancement,
1878                 reference.echo_return_loss_enhancement());
1879       TestStats(echo_metrics.a_nlp,
1880                 reference.a_nlp());
1881
1882       audioproc::Test::DelayMetrics reference_delay = test->delay_metrics();
1883       EXPECT_EQ(reference_delay.median(), median);
1884       EXPECT_EQ(reference_delay.std(), std);
1885
1886       EXPECT_EQ(test->rms_level(), rms_level);
1887
1888       EXPECT_FLOAT_EQ(test->ns_speech_probability_average(),
1889                       ns_speech_prob_average);
1890 #endif
1891     } else {
1892       test->set_has_echo_count(has_echo_count);
1893       test->set_has_voice_count(has_voice_count);
1894       test->set_is_saturated_count(is_saturated_count);
1895
1896       test->set_analog_level_average(analog_level_average);
1897       test->set_max_output_average(max_output_average);
1898
1899 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
1900       audioproc::Test::EchoMetrics* message = test->mutable_echo_metrics();
1901       WriteStatsMessage(echo_metrics.residual_echo_return_loss,
1902                         message->mutable_residual_echo_return_loss());
1903       WriteStatsMessage(echo_metrics.echo_return_loss,
1904                         message->mutable_echo_return_loss());
1905       WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
1906                         message->mutable_echo_return_loss_enhancement());
1907       WriteStatsMessage(echo_metrics.a_nlp,
1908                         message->mutable_a_nlp());
1909
1910       audioproc::Test::DelayMetrics* message_delay =
1911           test->mutable_delay_metrics();
1912       message_delay->set_median(median);
1913       message_delay->set_std(std);
1914
1915       test->set_rms_level(rms_level);
1916
1917       EXPECT_LE(0.0f, ns_speech_prob_average);
1918       EXPECT_GE(1.0f, ns_speech_prob_average);
1919       test->set_ns_speech_probability_average(ns_speech_prob_average);
1920 #endif
1921     }
1922
1923     rewind(far_file_);
1924     rewind(near_file_);
1925   }
1926
1927   if (write_ref_data) {
1928     OpenFileAndWriteMessage(ref_filename_, ref_data);
1929   }
1930 }
1931
1932 #endif  // WEBRTC_AUDIOPROC_BIT_EXACT
1933
1934 TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
1935   struct ChannelFormat {
1936     AudioProcessing::ChannelLayout in_layout;
1937     AudioProcessing::ChannelLayout out_layout;
1938   };
1939   ChannelFormat cf[] = {
1940     {AudioProcessing::kMonoAndKeyboard, AudioProcessing::kMono},
1941     {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
1942     {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
1943   };
1944   size_t channel_format_size = sizeof(cf) / sizeof(*cf);
1945
1946   scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
1947   // Enable one component just to ensure some processing takes place.
1948   ap->noise_suppression()->Enable(true);
1949   for (size_t i = 0; i < channel_format_size; ++i) {
1950     const int in_rate = 44100;
1951     const int out_rate = 48000;
1952     ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
1953                                TotalChannelsFromLayout(cf[i].in_layout));
1954     ChannelBuffer<float> out_cb(SamplesFromRate(out_rate),
1955                                 ChannelsFromLayout(cf[i].out_layout));
1956
1957     // Run over a few chunks.
1958     for (int j = 0; j < 10; ++j) {
1959       EXPECT_NOERR(ap->ProcessStream(
1960           in_cb.channels(),
1961           in_cb.samples_per_channel(),
1962           in_rate,
1963           cf[i].in_layout,
1964           out_rate,
1965           cf[i].out_layout,
1966           out_cb.channels()));
1967     }
1968   }
1969 }
1970
1971 // Reads a 10 ms chunk of int16 interleaved audio from the given (assumed
1972 // stereo) file, converts to deinterleaved float (optionally downmixing) and
1973 // returns the result in |cb|. Returns false if the file ended (or on error) and
1974 // true otherwise.
1975 //
1976 // |int_data| and |float_data| are just temporary space that must be
1977 // sufficiently large to hold the 10 ms chunk.
1978 bool ReadChunk(FILE* file, int16_t* int_data, float* float_data,
1979                ChannelBuffer<float>* cb) {
1980   // The files always contain stereo audio.
1981   size_t frame_size = cb->samples_per_channel() * 2;
1982   size_t read_count = fread(int_data, sizeof(int16_t), frame_size, file);
1983   if (read_count != frame_size) {
1984     // Check that the file really ended.
1985     assert(feof(file));
1986     return false;  // This is expected.
1987   }
1988
1989   ScaleToFloat(int_data, frame_size, float_data);
1990   if (cb->num_channels() == 1) {
1991     MixStereoToMono(float_data, cb->data(), cb->samples_per_channel());
1992   } else {
1993     Deinterleave(float_data, cb->samples_per_channel(), 2,
1994                  cb->channels());
1995   }
1996
1997   return true;
1998 }
1999
2000 // Compares the reference and test arrays over a region around the expected
2001 // delay. Finds the highest SNR in that region and adds the variance and squared
2002 // error results to the supplied accumulators.
2003 void UpdateBestSNR(const float* ref,
2004                    const float* test,
2005                    int length,
2006                    int expected_delay,
2007                    double* variance_acc,
2008                    double* sq_error_acc) {
2009   double best_snr = std::numeric_limits<double>::min();
2010   double best_variance = 0;
2011   double best_sq_error = 0;
2012   // Search over a region of eight samples around the expected delay.
2013   for (int delay = std::max(expected_delay - 4, 0); delay <= expected_delay + 4;
2014        ++delay) {
2015     double sq_error = 0;
2016     double variance = 0;
2017     for (int i = 0; i < length - delay; ++i) {
2018       double error = test[i + delay] - ref[i];
2019       sq_error += error * error;
2020       variance += ref[i] * ref[i];
2021     }
2022
2023     if (sq_error == 0) {
2024       *variance_acc += variance;
2025       return;
2026     }
2027     double snr = variance / sq_error;
2028     if (snr > best_snr) {
2029       best_snr = snr;
2030       best_variance = variance;
2031       best_sq_error = sq_error;
2032     }
2033   }
2034
2035   *variance_acc += best_variance;
2036   *sq_error_acc += best_sq_error;
2037 }
2038
2039 // Used to test a multitude of sample rate and channel combinations. It works
2040 // by first producing a set of reference files (in SetUpTestCase) that are
2041 // assumed to be correct, as the used parameters are verified by other tests
2042 // in this collection. Primarily the reference files are all produced at
2043 // "native" rates which do not involve any resampling.
2044
2045 // Each test pass produces an output file with a particular format. The output
2046 // is matched against the reference file closest to its internal processing
2047 // format. If necessary the output is resampled back to its process format.
2048 // Due to the resampling distortion, we don't expect identical results, but
2049 // enforce SNR thresholds which vary depending on the format. 0 is a special
2050 // case SNR which corresponds to inf, or zero error.
2051 typedef std::tr1::tuple<int, int, int, double> AudioProcessingTestData;
2052 class AudioProcessingTest
2053     : public testing::TestWithParam<AudioProcessingTestData> {
2054  public:
2055   AudioProcessingTest()
2056       : input_rate_(std::tr1::get<0>(GetParam())),
2057         output_rate_(std::tr1::get<1>(GetParam())),
2058         reverse_rate_(std::tr1::get<2>(GetParam())),
2059         expected_snr_(std::tr1::get<3>(GetParam())) {}
2060
2061   virtual ~AudioProcessingTest() {}
2062
2063   static void SetUpTestCase() {
2064     // Create all needed output reference files.
2065     const int kNativeRates[] = {8000, 16000, 32000};
2066     const size_t kNativeRatesSize =
2067         sizeof(kNativeRates) / sizeof(*kNativeRates);
2068     const int kNumChannels[] = {1, 2};
2069     const size_t kNumChannelsSize =
2070         sizeof(kNumChannels) / sizeof(*kNumChannels);
2071     for (size_t i = 0; i < kNativeRatesSize; ++i) {
2072       for (size_t j = 0; j < kNumChannelsSize; ++j) {
2073         for (size_t k = 0; k < kNumChannelsSize; ++k) {
2074           // The reference files always have matching input and output channels.
2075           ProcessFormat(kNativeRates[i],
2076                         kNativeRates[i],
2077                         kNativeRates[i],
2078                         kNumChannels[j],
2079                         kNumChannels[j],
2080                         kNumChannels[k],
2081                         "ref");
2082         }
2083       }
2084     }
2085   }
2086
2087   // Runs a process pass on files with the given parameters and dumps the output
2088   // to a file specified with |output_file_prefix|.
2089   static void ProcessFormat(int input_rate,
2090                             int output_rate,
2091                             int reverse_rate,
2092                             int num_input_channels,
2093                             int num_output_channels,
2094                             int num_reverse_channels,
2095                             std::string output_file_prefix) {
2096     scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
2097     EnableAllAPComponents(ap.get());
2098     ap->Initialize(input_rate,
2099                    output_rate,
2100                    reverse_rate,
2101                    LayoutFromChannels(num_input_channels),
2102                    LayoutFromChannels(num_output_channels),
2103                    LayoutFromChannels(num_reverse_channels));
2104
2105     FILE* far_file = fopen(ResourceFilePath("far", reverse_rate).c_str(), "rb");
2106     FILE* near_file = fopen(ResourceFilePath("near", input_rate).c_str(), "rb");
2107     FILE* out_file = fopen(OutputFilePath(output_file_prefix,
2108                                           input_rate,
2109                                           output_rate,
2110                                           reverse_rate,
2111                                           num_input_channels,
2112                                           num_output_channels,
2113                                           num_reverse_channels).c_str(), "wb");
2114     ASSERT_TRUE(far_file != NULL);
2115     ASSERT_TRUE(near_file != NULL);
2116     ASSERT_TRUE(out_file != NULL);
2117
2118     ChannelBuffer<float> fwd_cb(SamplesFromRate(input_rate),
2119                                 num_input_channels);
2120     ChannelBuffer<float> rev_cb(SamplesFromRate(reverse_rate),
2121                                 num_reverse_channels);
2122     ChannelBuffer<float> out_cb(SamplesFromRate(output_rate),
2123                                 num_output_channels);
2124
2125     // Temporary buffers.
2126     const int max_length =
2127         2 * std::max(out_cb.samples_per_channel(),
2128                      std::max(fwd_cb.samples_per_channel(),
2129                               rev_cb.samples_per_channel()));
2130     scoped_ptr<float[]> float_data(new float[max_length]);
2131     scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
2132
2133     int analog_level = 127;
2134     while (ReadChunk(far_file, int_data.get(), float_data.get(), &rev_cb) &&
2135            ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
2136       EXPECT_NOERR(ap->AnalyzeReverseStream(
2137           rev_cb.channels(),
2138           rev_cb.samples_per_channel(),
2139           reverse_rate,
2140           LayoutFromChannels(num_reverse_channels)));
2141
2142       EXPECT_NOERR(ap->set_stream_delay_ms(0));
2143       ap->echo_cancellation()->set_stream_drift_samples(0);
2144       EXPECT_NOERR(ap->gain_control()->set_stream_analog_level(analog_level));
2145
2146       EXPECT_NOERR(ap->ProcessStream(
2147           fwd_cb.channels(),
2148           fwd_cb.samples_per_channel(),
2149           input_rate,
2150           LayoutFromChannels(num_input_channels),
2151           output_rate,
2152           LayoutFromChannels(num_output_channels),
2153           out_cb.channels()));
2154
2155       Interleave(out_cb.channels(),
2156                  out_cb.samples_per_channel(),
2157                  out_cb.num_channels(),
2158                  float_data.get());
2159       // Dump output to file.
2160       ASSERT_EQ(static_cast<size_t>(out_cb.length()),
2161                 fwrite(float_data.get(), sizeof(float_data[0]),
2162                        out_cb.length(), out_file));
2163
2164       analog_level = ap->gain_control()->stream_analog_level();
2165     }
2166     fclose(far_file);
2167     fclose(near_file);
2168     fclose(out_file);
2169   }
2170
2171  protected:
2172   int input_rate_;
2173   int output_rate_;
2174   int reverse_rate_;
2175   double expected_snr_;
2176 };
2177
2178 TEST_P(AudioProcessingTest, Formats) {
2179   struct ChannelFormat {
2180     int num_input;
2181     int num_output;
2182     int num_reverse;
2183   };
2184   ChannelFormat cf[] = {
2185     {1, 1, 1},
2186     {1, 1, 2},
2187     {2, 1, 1},
2188     {2, 1, 2},
2189     {2, 2, 1},
2190     {2, 2, 2},
2191   };
2192   size_t channel_format_size = sizeof(cf) / sizeof(*cf);
2193
2194   for (size_t i = 0; i < channel_format_size; ++i) {
2195     ProcessFormat(input_rate_,
2196                   output_rate_,
2197                   reverse_rate_,
2198                   cf[i].num_input,
2199                   cf[i].num_output,
2200                   cf[i].num_reverse,
2201                   "out");
2202     int min_ref_rate = std::min(input_rate_, output_rate_);
2203     int ref_rate;
2204     if (min_ref_rate > 16000) {
2205       ref_rate = 32000;
2206     } else if (min_ref_rate > 8000) {
2207       ref_rate = 16000;
2208     } else {
2209       ref_rate = 8000;
2210     }
2211 #ifdef WEBRTC_AUDIOPROC_FIXED_PROFILE
2212     ref_rate = std::min(ref_rate, 16000);
2213 #endif
2214
2215     FILE* out_file = fopen(OutputFilePath("out",
2216                                           input_rate_,
2217                                           output_rate_,
2218                                           reverse_rate_,
2219                                           cf[i].num_input,
2220                                           cf[i].num_output,
2221                                           cf[i].num_reverse).c_str(), "rb");
2222     // The reference files always have matching input and output channels.
2223     FILE* ref_file = fopen(OutputFilePath("ref",
2224                                           ref_rate,
2225                                           ref_rate,
2226                                           ref_rate,
2227                                           cf[i].num_output,
2228                                           cf[i].num_output,
2229                                           cf[i].num_reverse).c_str(), "rb");
2230     ASSERT_TRUE(out_file != NULL);
2231     ASSERT_TRUE(ref_file != NULL);
2232
2233     const int ref_length = SamplesFromRate(ref_rate) * cf[i].num_output;
2234     const int out_length = SamplesFromRate(output_rate_) * cf[i].num_output;
2235     // Data from the reference file.
2236     scoped_ptr<float[]> ref_data(new float[ref_length]);
2237     // Data from the output file.
2238     scoped_ptr<float[]> out_data(new float[out_length]);
2239     // Data from the resampled output, in case the reference and output rates
2240     // don't match.
2241     scoped_ptr<float[]> cmp_data(new float[ref_length]);
2242
2243     PushResampler<float> resampler;
2244     resampler.InitializeIfNeeded(output_rate_, ref_rate, cf[i].num_output);
2245
2246     // Compute the resampling delay of the output relative to the reference,
2247     // to find the region over which we should search for the best SNR.
2248     float expected_delay_sec = 0;
2249     if (input_rate_ != ref_rate) {
2250       // Input resampling delay.
2251       expected_delay_sec +=
2252           PushSincResampler::AlgorithmicDelaySeconds(input_rate_);
2253     }
2254     if (output_rate_ != ref_rate) {
2255       // Output resampling delay.
2256       expected_delay_sec +=
2257           PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
2258       // Delay of converting the output back to its processing rate for testing.
2259       expected_delay_sec +=
2260           PushSincResampler::AlgorithmicDelaySeconds(output_rate_);
2261     }
2262     int expected_delay = floor(expected_delay_sec * ref_rate + 0.5f) *
2263                          cf[i].num_output;
2264
2265     double variance = 0;
2266     double sq_error = 0;
2267     while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
2268            fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
2269       float* out_ptr = out_data.get();
2270       if (output_rate_ != ref_rate) {
2271         // Resample the output back to its internal processing rate if necssary.
2272         ASSERT_EQ(ref_length, resampler.Resample(out_ptr,
2273                                                  out_length,
2274                                                  cmp_data.get(),
2275                                                  ref_length));
2276         out_ptr = cmp_data.get();
2277       }
2278
2279       // Update the |sq_error| and |variance| accumulators with the highest SNR
2280       // of reference vs output.
2281       UpdateBestSNR(ref_data.get(),
2282                     out_ptr,
2283                     ref_length,
2284                     expected_delay,
2285                     &variance,
2286                     &sq_error);
2287     }
2288
2289     std::cout << "(" << input_rate_ << ", "
2290                      << output_rate_ << ", "
2291                      << reverse_rate_ << ", "
2292                      << cf[i].num_input << ", "
2293                      << cf[i].num_output << ", "
2294                      << cf[i].num_reverse << "): ";
2295     if (sq_error > 0) {
2296       double snr = 10 * log10(variance / sq_error);
2297       EXPECT_GE(snr, expected_snr_);
2298       EXPECT_NE(0, expected_snr_);
2299       std::cout << "SNR=" << snr << " dB" << std::endl;
2300     } else {
2301       EXPECT_EQ(expected_snr_, 0);
2302       std::cout << "SNR=" << "inf dB" << std::endl;
2303     }
2304
2305     fclose(out_file);
2306     fclose(ref_file);
2307   }
2308 }
2309
2310 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
2311 INSTANTIATE_TEST_CASE_P(
2312     CommonFormats, AudioProcessingTest, testing::Values(
2313         std::tr1::make_tuple(48000, 48000, 48000, 25),
2314         std::tr1::make_tuple(48000, 48000, 32000, 25),
2315         std::tr1::make_tuple(48000, 48000, 16000, 25),
2316         std::tr1::make_tuple(48000, 44100, 48000, 20),
2317         std::tr1::make_tuple(48000, 44100, 32000, 20),
2318         std::tr1::make_tuple(48000, 44100, 16000, 20),
2319         std::tr1::make_tuple(48000, 32000, 48000, 25),
2320         std::tr1::make_tuple(48000, 32000, 32000, 25),
2321         std::tr1::make_tuple(48000, 32000, 16000, 25),
2322         std::tr1::make_tuple(48000, 16000, 48000, 25),
2323         std::tr1::make_tuple(48000, 16000, 32000, 25),
2324         std::tr1::make_tuple(48000, 16000, 16000, 25),
2325
2326         std::tr1::make_tuple(44100, 48000, 48000, 20),
2327         std::tr1::make_tuple(44100, 48000, 32000, 20),
2328         std::tr1::make_tuple(44100, 48000, 16000, 20),
2329         std::tr1::make_tuple(44100, 44100, 48000, 20),
2330         std::tr1::make_tuple(44100, 44100, 32000, 20),
2331         std::tr1::make_tuple(44100, 44100, 16000, 20),
2332         std::tr1::make_tuple(44100, 32000, 48000, 20),
2333         std::tr1::make_tuple(44100, 32000, 32000, 20),
2334         std::tr1::make_tuple(44100, 32000, 16000, 20),
2335         std::tr1::make_tuple(44100, 16000, 48000, 20),
2336         std::tr1::make_tuple(44100, 16000, 32000, 20),
2337         std::tr1::make_tuple(44100, 16000, 16000, 20),
2338
2339         std::tr1::make_tuple(32000, 48000, 48000, 25),
2340         std::tr1::make_tuple(32000, 48000, 32000, 25),
2341         std::tr1::make_tuple(32000, 48000, 16000, 25),
2342         std::tr1::make_tuple(32000, 44100, 48000, 20),
2343         std::tr1::make_tuple(32000, 44100, 32000, 20),
2344         std::tr1::make_tuple(32000, 44100, 16000, 20),
2345         std::tr1::make_tuple(32000, 32000, 48000, 30),
2346         std::tr1::make_tuple(32000, 32000, 32000, 0),
2347         std::tr1::make_tuple(32000, 32000, 16000, 30),
2348         std::tr1::make_tuple(32000, 16000, 48000, 25),
2349         std::tr1::make_tuple(32000, 16000, 32000, 25),
2350         std::tr1::make_tuple(32000, 16000, 16000, 25),
2351
2352         std::tr1::make_tuple(16000, 48000, 48000, 25),
2353         std::tr1::make_tuple(16000, 48000, 32000, 25),
2354         std::tr1::make_tuple(16000, 48000, 16000, 25),
2355         std::tr1::make_tuple(16000, 44100, 48000, 15),
2356         std::tr1::make_tuple(16000, 44100, 32000, 15),
2357         std::tr1::make_tuple(16000, 44100, 16000, 15),
2358         std::tr1::make_tuple(16000, 32000, 48000, 25),
2359         std::tr1::make_tuple(16000, 32000, 32000, 25),
2360         std::tr1::make_tuple(16000, 32000, 16000, 25),
2361         std::tr1::make_tuple(16000, 16000, 48000, 30),
2362         std::tr1::make_tuple(16000, 16000, 32000, 30),
2363         std::tr1::make_tuple(16000, 16000, 16000, 0)));
2364
2365 #elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
2366 INSTANTIATE_TEST_CASE_P(
2367     CommonFormats, AudioProcessingTest, testing::Values(
2368         std::tr1::make_tuple(48000, 48000, 48000, 20),
2369         std::tr1::make_tuple(48000, 48000, 32000, 20),
2370         std::tr1::make_tuple(48000, 48000, 16000, 20),
2371         std::tr1::make_tuple(48000, 44100, 48000, 15),
2372         std::tr1::make_tuple(48000, 44100, 32000, 15),
2373         std::tr1::make_tuple(48000, 44100, 16000, 15),
2374         std::tr1::make_tuple(48000, 32000, 48000, 20),
2375         std::tr1::make_tuple(48000, 32000, 32000, 20),
2376         std::tr1::make_tuple(48000, 32000, 16000, 20),
2377         std::tr1::make_tuple(48000, 16000, 48000, 20),
2378         std::tr1::make_tuple(48000, 16000, 32000, 20),
2379         std::tr1::make_tuple(48000, 16000, 16000, 20),
2380
2381         std::tr1::make_tuple(44100, 48000, 48000, 19),
2382         std::tr1::make_tuple(44100, 48000, 32000, 19),
2383         std::tr1::make_tuple(44100, 48000, 16000, 19),
2384         std::tr1::make_tuple(44100, 44100, 48000, 15),
2385         std::tr1::make_tuple(44100, 44100, 32000, 15),
2386         std::tr1::make_tuple(44100, 44100, 16000, 15),
2387         std::tr1::make_tuple(44100, 32000, 48000, 19),
2388         std::tr1::make_tuple(44100, 32000, 32000, 19),
2389         std::tr1::make_tuple(44100, 32000, 16000, 19),
2390         std::tr1::make_tuple(44100, 16000, 48000, 19),
2391         std::tr1::make_tuple(44100, 16000, 32000, 19),
2392         std::tr1::make_tuple(44100, 16000, 16000, 19),
2393
2394         std::tr1::make_tuple(32000, 48000, 48000, 19),
2395         std::tr1::make_tuple(32000, 48000, 32000, 19),
2396         std::tr1::make_tuple(32000, 48000, 16000, 19),
2397         std::tr1::make_tuple(32000, 44100, 48000, 15),
2398         std::tr1::make_tuple(32000, 44100, 32000, 15),
2399         std::tr1::make_tuple(32000, 44100, 16000, 15),
2400         std::tr1::make_tuple(32000, 32000, 48000, 19),
2401         std::tr1::make_tuple(32000, 32000, 32000, 19),
2402         std::tr1::make_tuple(32000, 32000, 16000, 19),
2403         std::tr1::make_tuple(32000, 16000, 48000, 19),
2404         std::tr1::make_tuple(32000, 16000, 32000, 19),
2405         std::tr1::make_tuple(32000, 16000, 16000, 19),
2406
2407         std::tr1::make_tuple(16000, 48000, 48000, 25),
2408         std::tr1::make_tuple(16000, 48000, 32000, 25),
2409         std::tr1::make_tuple(16000, 48000, 16000, 25),
2410         std::tr1::make_tuple(16000, 44100, 48000, 15),
2411         std::tr1::make_tuple(16000, 44100, 32000, 15),
2412         std::tr1::make_tuple(16000, 44100, 16000, 15),
2413         std::tr1::make_tuple(16000, 32000, 48000, 25),
2414         std::tr1::make_tuple(16000, 32000, 32000, 25),
2415         std::tr1::make_tuple(16000, 32000, 16000, 25),
2416         std::tr1::make_tuple(16000, 16000, 48000, 30),
2417         std::tr1::make_tuple(16000, 16000, 32000, 30),
2418         std::tr1::make_tuple(16000, 16000, 16000, 0)));
2419 #endif
2420
2421 // TODO(henrike): re-implement functionality lost when removing the old main
2422 //                function. See
2423 //                https://code.google.com/p/webrtc/issues/detail?id=1981
2424
2425 }  // namespace
2426 }  // namespace webrtc