Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / content / renderer / media / webrtc_audio_device_unittest.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <vector>
6
7 #include "base/environment.h"
8 #include "base/file_util.h"
9 #include "base/files/file_path.h"
10 #include "base/path_service.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/test/test_timeouts.h"
13 #include "content/renderer/media/mock_media_stream_dependency_factory.h"
14 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
15 #include "content/renderer/media/webrtc_audio_capturer.h"
16 #include "content/renderer/media/webrtc_audio_device_impl.h"
17 #include "content/renderer/media/webrtc_audio_renderer.h"
18 #include "content/renderer/media/webrtc_local_audio_track.h"
19 #include "content/renderer/render_thread_impl.h"
20 #include "content/test/webrtc_audio_device_test.h"
21 #include "media/audio/audio_manager_base.h"
22 #include "media/base/audio_hardware_config.h"
23 #include "testing/gmock/include/gmock/gmock.h"
24 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
25 #include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
26 #include "third_party/webrtc/voice_engine/include/voe_base.h"
27 #include "third_party/webrtc/voice_engine/include/voe_codec.h"
28 #include "third_party/webrtc/voice_engine/include/voe_external_media.h"
29 #include "third_party/webrtc/voice_engine/include/voe_file.h"
30 #include "third_party/webrtc/voice_engine/include/voe_network.h"
31
32 #if defined(OS_WIN)
33 #include "base/win/windows_version.h"
34 #endif
35
36 using media::AudioParameters;
37 using media::CHANNEL_LAYOUT_STEREO;
38 using testing::_;
39 using testing::AnyNumber;
40 using testing::InvokeWithoutArgs;
41 using testing::Return;
42 using testing::StrEq;
43
44 namespace content {
45
46 namespace {
47
48 const int kRenderViewId = 1;
49
50 // The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
51 const int kNumberOfPacketsForLoopbackTest = 100;
52
53 // The hardware latency we feed to WebRtc.
54 const int kHardwareLatencyInMs = 50;
55
56 scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
57     media::AudioManager* manager) {
58   const AudioParameters output_parameters =
59       manager->GetDefaultOutputStreamParameters();
60   const AudioParameters input_parameters =
61       manager->GetInputStreamParameters(
62           media::AudioManagerBase::kDefaultDeviceId);
63
64   return make_scoped_ptr(new media::AudioHardwareConfig(
65       input_parameters, output_parameters));
66 }
67
68 // Return true if at least one element in the array matches |value|.
69 bool FindElementInArray(const int* array, int size, int value) {
70   return (std::find(&array[0], &array[0] + size, value) != &array[size]);
71 }
72
73 // This method returns false if a non-supported rate is detected on the
74 // input or output side.
75 // TODO(henrika): add support for automatic fallback to Windows Wave audio
76 // if a non-supported rate is detected. It is probably better to detect
77 // invalid audio settings by actually trying to open the audio streams instead
78 // of relying on hard coded conditions.
79 bool HardwareSampleRatesAreValid() {
80   // These are the currently supported hardware sample rates in both directions.
81   // The actual WebRTC client can limit these ranges further depending on
82   // platform but this is the maximum range we support today.
83   int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000};
84   int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000};
85
86   media::AudioHardwareConfig* hardware_config =
87       RenderThreadImpl::current()->GetAudioHardwareConfig();
88
89   // Verify the input sample rate.
90   int input_sample_rate = hardware_config->GetInputSampleRate();
91
92   if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates),
93                           input_sample_rate)) {
94     LOG(WARNING) << "Non-supported input sample rate detected.";
95     return false;
96   }
97
98   // Given that the input rate was OK, verify the output rate as well.
99   int output_sample_rate = hardware_config->GetOutputSampleRate();
100   if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates),
101                           output_sample_rate)) {
102     LOG(WARNING) << "Non-supported output sample rate detected.";
103     return false;
104   }
105
106   return true;
107 }
108
109 // Utility method which creates the audio capturer, it returns a scoped
110 // reference of the capturer if it is created successfully, otherwise it returns
111 // NULL. This method should be used in tests where
112 // HardwareSampleRatesAreValid() has been called and returned true.
113 scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
114     WebRtcAudioDeviceImpl* webrtc_audio_device) {
115   media::AudioHardwareConfig* hardware_config =
116       RenderThreadImpl::current()->GetAudioHardwareConfig();
117   // Use native capture sample rate and channel configuration to get some
118   // action in this test.
119   int sample_rate = hardware_config->GetInputSampleRate();
120   media::ChannelLayout channel_layout =
121       hardware_config->GetInputChannelLayout();
122   blink::WebMediaConstraints constraints;
123   StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
124                           media::AudioManagerBase::kDefaultDeviceName,
125                           media::AudioManagerBase::kDefaultDeviceId,
126                           sample_rate, channel_layout, 0);
127   device.session_id = 1;
128   return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device,
129                                              constraints,
130                                              webrtc_audio_device);
131 }
132
133 // Create and start a local audio track. Starting the audio track will connect
134 // the audio track to the capturer and also start the source of the capturer.
135 // Also, connect the sink to the audio track.
136 scoped_ptr<WebRtcLocalAudioTrack>
137 CreateAndStartLocalAudioTrack(WebRtcLocalAudioTrackAdapter* adapter,
138                               WebRtcAudioCapturer* capturer,
139                               PeerConnectionAudioSink* sink) {
140   scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
141       new WebRtcLocalAudioTrack(adapter, capturer, NULL));
142
143   local_audio_track->AddSink(sink);
144   local_audio_track->Start();
145   return local_audio_track.Pass();
146 }
147
148 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
149  public:
150   explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
151       : event_(event),
152         channel_id_(-1),
153         type_(webrtc::kPlaybackPerChannel),
154         packet_size_(0),
155         sample_rate_(0),
156         channels_(0) {
157   }
158   virtual ~WebRTCMediaProcessImpl() {}
159
160   // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
161   virtual void Process(int channel,
162                        webrtc::ProcessingTypes type,
163                        int16_t audio_10ms[],
164                        int length,
165                        int sampling_freq,
166                        bool is_stereo) OVERRIDE {
167     base::AutoLock auto_lock(lock_);
168     channel_id_ = channel;
169     type_ = type;
170     packet_size_ = length;
171     sample_rate_ = sampling_freq;
172     channels_ = (is_stereo ? 2 : 1);
173     if (event_) {
174       // Signal that a new callback has been received.
175       event_->Signal();
176     }
177   }
178
179   int channel_id() const {
180     base::AutoLock auto_lock(lock_);
181     return channel_id_;
182   }
183
184   int type() const {
185     base::AutoLock auto_lock(lock_);
186     return type_;
187   }
188
189   int packet_size() const {
190     base::AutoLock auto_lock(lock_);
191     return packet_size_;
192   }
193
194   int sample_rate() const {
195     base::AutoLock auto_lock(lock_);
196     return sample_rate_;
197   }
198
199  private:
200   base::WaitableEvent* event_;
201   int channel_id_;
202   webrtc::ProcessingTypes type_;
203   int packet_size_;
204   int sample_rate_;
205   int channels_;
206   mutable base::Lock lock_;
207   DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
208 };
209
210 // TODO(xians): Use MediaStreamAudioSink.
211 class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
212  public:
213   explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
214       : event_(event) {
215     DCHECK(event_);
216   }
217   virtual ~MockMediaStreamAudioSink() {}
218
219   // PeerConnectionAudioSink implementation.
220   virtual int OnData(const int16* audio_data,
221                      int sample_rate,
222                      int number_of_channels,
223                      int number_of_frames,
224                      const std::vector<int>& channels,
225                      int audio_delay_milliseconds,
226                      int current_volume,
227                      bool need_audio_processing,
228                      bool key_pressed) OVERRIDE {
229     // Signal that a callback has been received.
230     event_->Signal();
231     return 0;
232   }
233
234   // Set the format for the capture audio parameters.
235   virtual void OnSetFormat(
236       const media::AudioParameters& params) OVERRIDE {}
237
238  private:
239    base::WaitableEvent* event_;
240
241    DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
242 };
243
244 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
245  public:
246   explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
247       : event_(event) {
248     DCHECK(event_);
249   }
250   virtual ~MockWebRtcAudioRendererSource() {}
251
252   // WebRtcAudioRendererSource implementation.
253   virtual void RenderData(uint8* audio_data,
254                           int number_of_channels,
255                           int number_of_frames,
256                           int audio_delay_milliseconds) OVERRIDE {
257     // Signal that a callback has been received.
258     // Initialize the memory to zero to avoid uninitialized warning from
259     // Valgrind.
260     memset(audio_data, 0,
261            sizeof(int16) * number_of_channels * number_of_frames);
262     event_->Signal();
263   }
264
265   virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
266   }
267
268   virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
269
270  private:
271    base::WaitableEvent* event_;
272
273    DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
274 };
275
276 // Prints numerical information to stdout in a controlled format so we can plot
277 // the result.
278 void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
279   std::string times;
280   base::StringAppendF(&times, "%.2f,", time_ms);
281   std::string result = base::StringPrintf(
282       "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
283       trace,  "[", times.c_str(), "]", "ms");
284
285   fflush(stdout);
286   printf("%s", result.c_str());
287   fflush(stdout);
288 }
289
290 void ReadDataFromSpeechFile(char* data, int length) {
291   base::FilePath data_file;
292   CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
293   data_file =
294       data_file.Append(FILE_PATH_LITERAL("media"))
295                .Append(FILE_PATH_LITERAL("test"))
296                .Append(FILE_PATH_LITERAL("data"))
297                .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
298   DCHECK(base::PathExists(data_file));
299   int64 data_file_size64 = 0;
300   DCHECK(base::GetFileSize(data_file, &data_file_size64));
301   EXPECT_EQ(length, base::ReadFile(data_file, data, length));
302   DCHECK(data_file_size64 > length);
303 }
304
305 void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
306   // TODO(xians): move the codec as an input param to this function, and add
307   // tests for different codecs, also add support to Android and IOS.
308 #if !defined(OS_ANDROID) && !defined(OS_IOS)
309   webrtc::CodecInst isac;
310   strcpy(isac.plname, "ISAC");
311   isac.pltype = 104;
312   isac.pacsize = 960;
313   isac.plfreq = 32000;
314   isac.channels = 1;
315   isac.rate = -1;
316   ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
317   EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
318   EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
319 #endif
320 }
321
322 // Returns the time in millisecond for sending packets to WebRtc for encoding,
323 // signal processing, decoding and receiving them back.
324 int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
325                               bool enable_apm) {
326   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
327       new WebRtcAudioDeviceImpl());
328   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
329   EXPECT_TRUE(engine.valid());
330   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
331   EXPECT_TRUE(base.valid());
332   int err = base->Init(webrtc_audio_device.get());
333   EXPECT_EQ(0, err);
334
335   // We use OnSetFormat() and SetRenderFormat() to configure the audio
336   // parameters so that this test can run on machine without hardware device.
337   const media::AudioParameters params = media::AudioParameters(
338       media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
339       48000, 2, 480);
340   PeerConnectionAudioSink* capturer_sink =
341       static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
342   WebRtcAudioRendererSource* renderer_source =
343       static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
344   renderer_source->SetRenderFormat(params);
345
346   // Turn on/off all the signal processing components like AGC, AEC and NS.
347   ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
348   EXPECT_TRUE(audio_processing.valid());
349   audio_processing->SetAgcStatus(enable_apm);
350   audio_processing->SetNsStatus(enable_apm);
351   audio_processing->SetEcStatus(enable_apm);
352
353   // Create a voice channel for the WebRtc.
354   int channel = base->CreateChannel();
355   EXPECT_NE(-1, channel);
356   SetChannelCodec(engine.get(), channel);
357
358   // Use our fake network transmission and start playout and recording.
359   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
360   EXPECT_TRUE(network.valid());
361   scoped_ptr<WebRTCTransportImpl> transport(
362       new WebRTCTransportImpl(network.get()));
363   EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
364   EXPECT_EQ(0, base->StartPlayout(channel));
365   EXPECT_EQ(0, base->StartSend(channel));
366
367   // Read speech data from a speech test file.
368   const int input_packet_size =
369       params.frames_per_buffer() * 2 * params.channels();
370   const int num_output_channels = webrtc_audio_device->output_channels();
371   const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
372       num_output_channels;
373   const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
374   scoped_ptr<char[]> capture_data(new char[length]);
375   ReadDataFromSpeechFile(capture_data.get(), length);
376
377   // Start the timer.
378   scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
379   base::Time start_time = base::Time::Now();
380   int delay = 0;
381   std::vector<int> voe_channels;
382   voe_channels.push_back(channel);
383   for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
384     // Sending fake capture data to WebRtc.
385     capturer_sink->OnData(
386         reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
387         params.sample_rate(),
388         params.channels(),
389         params.frames_per_buffer(),
390         voe_channels,
391         kHardwareLatencyInMs,
392         1.0,
393         enable_apm,
394         false);
395
396     // Receiving data from WebRtc.
397     renderer_source->RenderData(
398         reinterpret_cast<uint8*>(buffer.get()),
399         num_output_channels, webrtc_audio_device->output_buffer_size(),
400         kHardwareLatencyInMs + delay);
401     delay = (base::Time::Now() - start_time).InMilliseconds();
402   }
403
404   int latency = (base::Time::Now() - start_time).InMilliseconds();
405
406   EXPECT_EQ(0, base->StopSend(channel));
407   EXPECT_EQ(0, base->StopPlayout(channel));
408   EXPECT_EQ(0, base->DeleteChannel(channel));
409   EXPECT_EQ(0, base->Terminate());
410
411   return latency;
412 }
413
414 }  // namespace
415
416 // Trivial test which verifies that one part of the test harness
417 // (HardwareSampleRatesAreValid()) works as intended for all supported
418 // hardware input sample rates.
419 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) {
420   int valid_rates[] = {16000, 32000, 44100, 48000, 96000};
421
422   // Verify that we will approve all rates listed in |valid_rates|.
423   for (size_t i = 0; i < arraysize(valid_rates); ++i) {
424     EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
425         valid_rates[i]));
426   }
427
428   // Verify that any value outside the valid range results in negative
429   // find results.
430   int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000};
431   for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
432     EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
433         invalid_rates[i]));
434   }
435 }
436
437 // Trivial test which verifies that one part of the test harness
438 // (HardwareSampleRatesAreValid()) works as intended for all supported
439 // hardware output sample rates.
440 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) {
441   int valid_rates[] = {44100, 48000, 96000};
442
443   // Verify that we will approve all rates listed in |valid_rates|.
444   for (size_t i = 0; i < arraysize(valid_rates); ++i) {
445     EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
446         valid_rates[i]));
447   }
448
449   // Verify that any value outside the valid range results in negative
450   // find results.
451   int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
452   for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
453     EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
454         invalid_rates[i]));
455   }
456 }
457
458 // Basic test that instantiates and initializes an instance of
459 // WebRtcAudioDeviceImpl.
460 TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
461 #if defined(OS_WIN)
462   // This test crashes on Win XP bots.
463   if (base::win::GetVersion() <= base::win::VERSION_XP)
464     return;
465 #endif
466
467   AudioParameters input_params(
468       AudioParameters::AUDIO_PCM_LOW_LATENCY,
469       media::CHANNEL_LAYOUT_MONO,
470       48000,
471       16,
472       480);
473
474   AudioParameters output_params(
475       AudioParameters::AUDIO_PCM_LOW_LATENCY,
476       media::CHANNEL_LAYOUT_STEREO,
477       48000,
478       16,
479       480);
480
481   media::AudioHardwareConfig audio_config(input_params, output_params);
482   SetAudioHardwareConfig(&audio_config);
483
484   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
485       new WebRtcAudioDeviceImpl());
486
487   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
488   ASSERT_TRUE(engine.valid());
489
490   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
491   int err = base->Init(webrtc_audio_device.get());
492   EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL);
493   EXPECT_EQ(0, err);
494   EXPECT_EQ(0, base->Terminate());
495 }
496
497 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
498 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
499 // be utilized to implement the actual audio path. The test registers a
500 // webrtc::VoEExternalMedia implementation to hijack the output audio and
501 // verify that streaming starts correctly.
502 // TODO(henrika): include on Android as well as soon as alla race conditions
503 // in OpenSLES are resolved.
504 #if defined(OS_ANDROID)
505 #define MAYBE_StartPlayout DISABLED_StartPlayout
506 #else
507 #define MAYBE_StartPlayout StartPlayout
508 #endif
509 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
510   if (!has_output_devices_) {
511     LOG(WARNING) << "No output device detected.";
512     return;
513   }
514
515   scoped_ptr<media::AudioHardwareConfig> config =
516       CreateRealHardwareConfig(audio_manager_.get());
517   SetAudioHardwareConfig(config.get());
518
519   if (!HardwareSampleRatesAreValid())
520     return;
521
522   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
523   ASSERT_TRUE(engine.valid());
524   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
525   ASSERT_TRUE(base.valid());
526
527   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
528       new WebRtcAudioDeviceImpl());
529   int err = base->Init(webrtc_audio_device.get());
530   ASSERT_EQ(0, err);
531
532   ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
533   ASSERT_TRUE(external_media.valid());
534   base::WaitableEvent event(false, false);
535   scoped_ptr<WebRTCMediaProcessImpl> media_process(
536       new WebRTCMediaProcessImpl(&event));
537   int ch = base->CreateChannel();
538   EXPECT_NE(-1, ch);
539   EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
540       ch, webrtc::kPlaybackPerChannel, *media_process.get()));
541
542   scoped_refptr<webrtc::MediaStreamInterface> media_stream(
543       new talk_base::RefCountedObject<MockMediaStream>("label"));
544
545   EXPECT_EQ(0, base->StartPlayout(ch));
546   scoped_refptr<WebRtcAudioRenderer> renderer(
547       CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
548   scoped_refptr<MediaStreamAudioRenderer> proxy(
549       renderer->CreateSharedAudioRendererProxy(media_stream));
550   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
551   proxy->Start();
552   proxy->Play();
553
554   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
555   WaitForIOThreadCompletion();
556
557   EXPECT_TRUE(webrtc_audio_device->Playing());
558   EXPECT_FALSE(webrtc_audio_device->Recording());
559   EXPECT_EQ(ch, media_process->channel_id());
560   EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type());
561   EXPECT_EQ(80, media_process->packet_size());
562   EXPECT_EQ(8000, media_process->sample_rate());
563
564   EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
565       ch, webrtc::kPlaybackPerChannel));
566   EXPECT_EQ(0, base->StopPlayout(ch));
567   proxy->Stop();
568   EXPECT_EQ(0, base->DeleteChannel(ch));
569   EXPECT_EQ(0, base->Terminate());
570 }
571
572 // Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
573 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
574 // be utilized to implement the actual audio path. The test registers a
575 // webrtc::VoEExternalMedia implementation to hijack the input audio and
576 // verify that streaming starts correctly. An external transport implementation
577 // is also required to ensure that "sending" can start without actually trying
578 // to send encoded packets to the network. Our main interest here is to ensure
579 // that the audio capturing starts as it should.
580 // Disabled when running headless since the bots don't have the required config.
581
582 // TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
583 // disable this unit test on Android for now.
584 #if defined(OS_ANDROID)
585 #define MAYBE_StartRecording DISABLED_StartRecording
586 #elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
587 // This test is failing on ARM linux: http://crbug.com/238490
588 #define MAYBE_StartRecording DISABLED_StartRecording
589 #else
590 // Flakily hangs on all other platforms as well: crbug.com/268376.
591 // When the flakiness has been fixed, you probably want to leave it disabled
592 // on the above platforms.
593 #define MAYBE_StartRecording DISABLED_StartRecording
594 #endif
595
596 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
597   if (!has_input_devices_ || !has_output_devices_) {
598     LOG(WARNING) << "Missing audio devices.";
599     return;
600   }
601
602   scoped_ptr<media::AudioHardwareConfig> config =
603       CreateRealHardwareConfig(audio_manager_.get());
604   SetAudioHardwareConfig(config.get());
605
606   if (!HardwareSampleRatesAreValid())
607     return;
608
609   // TODO(tommi): extend MediaObserver and MockMediaObserver with support
610   // for new interfaces, like OnSetAudioStreamRecording(). When done, add
611   // EXPECT_CALL() macros here.
612   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
613       new WebRtcAudioDeviceImpl());
614
615   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
616   ASSERT_TRUE(engine.valid());
617
618   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
619   ASSERT_TRUE(base.valid());
620   int err = base->Init(webrtc_audio_device.get());
621   ASSERT_EQ(0, err);
622
623   int ch = base->CreateChannel();
624   EXPECT_NE(-1, ch);
625
626   ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
627   ASSERT_TRUE(external_media.valid());
628
629   base::WaitableEvent event(false, false);
630   scoped_ptr<WebRTCMediaProcessImpl> media_process(
631       new WebRTCMediaProcessImpl(&event));
632   EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
633       ch, webrtc::kRecordingPerChannel, *media_process.get()));
634
635   // We must add an external transport implementation to be able to start
636   // recording without actually sending encoded packets to the network. All
637   // we want to do here is to verify that audio capturing starts as it should.
638   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
639   scoped_ptr<WebRTCTransportImpl> transport(
640       new WebRTCTransportImpl(network.get()));
641   EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
642   EXPECT_EQ(0, base->StartSend(ch));
643
644   // Create the capturer which starts the source of the data flow.
645   scoped_refptr<WebRtcAudioCapturer> capturer(
646       CreateAudioCapturer(webrtc_audio_device));
647   EXPECT_TRUE(capturer);
648
649   // Create and start a local audio track which is bridging the data flow
650   // between the capturer and WebRtcAudioDeviceImpl.
651   scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
652       WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
653   scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
654       CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device));
655   // connect the VoE voice channel to the audio track
656   static_cast<webrtc::AudioTrackInterface*>(
657       adapter.get())->GetRenderer()->AddChannel(ch);
658
659   // Verify we get the data flow.
660   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
661   WaitForIOThreadCompletion();
662
663   EXPECT_FALSE(webrtc_audio_device->Playing());
664   EXPECT_TRUE(webrtc_audio_device->Recording());
665   EXPECT_EQ(ch, media_process->channel_id());
666   EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type());
667   EXPECT_EQ(80, media_process->packet_size());
668   EXPECT_EQ(8000, media_process->sample_rate());
669
670   EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
671       ch, webrtc::kRecordingPerChannel));
672   EXPECT_EQ(0, base->StopSend(ch));
673
674   capturer->Stop();
675   EXPECT_EQ(0, base->DeleteChannel(ch));
676   EXPECT_EQ(0, base->Terminate());
677 }
678
679 // Uses WebRtcAudioDeviceImpl to play a local wave file.
680 // TODO(henrika): include on Android as well as soon as alla race conditions
681 // in OpenSLES are resolved.
682 #if defined(OS_ANDROID)
683 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile
684 #else
685 #define MAYBE_PlayLocalFile PlayLocalFile
686 #endif
687 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
688   if (!has_output_devices_) {
689     LOG(WARNING) << "No output device detected.";
690     return;
691   }
692
693   std::string file_path(
694       GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
695
696   scoped_ptr<media::AudioHardwareConfig> config =
697       CreateRealHardwareConfig(audio_manager_.get());
698   SetAudioHardwareConfig(config.get());
699
700   if (!HardwareSampleRatesAreValid())
701     return;
702
703   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
704   ASSERT_TRUE(engine.valid());
705   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
706   ASSERT_TRUE(base.valid());
707
708   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
709       new WebRtcAudioDeviceImpl());
710   int err = base->Init(webrtc_audio_device.get());
711   ASSERT_EQ(0, err);
712   int ch = base->CreateChannel();
713   EXPECT_NE(-1, ch);
714   EXPECT_EQ(0, base->StartPlayout(ch));
715   scoped_refptr<webrtc::MediaStreamInterface> media_stream(
716       new talk_base::RefCountedObject<MockMediaStream>("label"));
717   scoped_refptr<WebRtcAudioRenderer> renderer(
718       CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
719   scoped_refptr<MediaStreamAudioRenderer> proxy(
720       renderer->CreateSharedAudioRendererProxy(media_stream));
721   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
722   proxy->Start();
723   proxy->Play();
724
725   ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
726   ASSERT_TRUE(file.valid());
727   int duration = 0;
728   EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration,
729                                      webrtc::kFileFormatPcm16kHzFile));
730   EXPECT_NE(0, duration);
731
732   EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false,
733                                              webrtc::kFileFormatPcm16kHzFile));
734
735   // Play 2 seconds worth of audio and then quit.
736   message_loop_.PostDelayedTask(FROM_HERE,
737                                 base::MessageLoop::QuitClosure(),
738                                 base::TimeDelta::FromSeconds(2));
739   message_loop_.Run();
740
741   proxy->Stop();
742   EXPECT_EQ(0, base->StopSend(ch));
743   EXPECT_EQ(0, base->StopPlayout(ch));
744   EXPECT_EQ(0, base->DeleteChannel(ch));
745   EXPECT_EQ(0, base->Terminate());
746 }
747
748 // Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
749 // An external transport implementation is utilized to feed back RTP packets
750 // which are recorded, encoded, packetized into RTP packets and finally
751 // "transmitted". The RTP packets are then fed back into the VoiceEngine
752 // where they are decoded and played out on the default audio output device.
753 // Disabled when running headless since the bots don't have the required config.
754 // TODO(henrika): improve quality by using a wideband codec, enabling noise-
755 // suppressions etc.
756 // FullDuplexAudioWithAGC is flaky on Android, disable it for now.
757 // Also flakily hangs on Windows: crbug.com/269348.
758 #if defined(OS_ANDROID) || defined(OS_WIN)
759 #define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
760 #else
761 #define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
762 #endif
763 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
764   if (!has_output_devices_ || !has_input_devices_) {
765     LOG(WARNING) << "Missing audio devices.";
766     return;
767   }
768
769   scoped_ptr<media::AudioHardwareConfig> config =
770       CreateRealHardwareConfig(audio_manager_.get());
771   SetAudioHardwareConfig(config.get());
772
773   if (!HardwareSampleRatesAreValid())
774     return;
775
776   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
777   ASSERT_TRUE(engine.valid());
778   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
779   ASSERT_TRUE(base.valid());
780
781   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
782       new WebRtcAudioDeviceImpl());
783   int err = base->Init(webrtc_audio_device.get());
784   ASSERT_EQ(0, err);
785
786   ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
787   ASSERT_TRUE(audio_processing.valid());
788 #if defined(OS_ANDROID)
789   // On Android, by default AGC is off.
790   bool enabled = true;
791   webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
792   EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
793   EXPECT_FALSE(enabled);
794 #else
795   bool enabled = false;
796   webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
797   EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
798   EXPECT_TRUE(enabled);
799   EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog);
800 #endif
801
802   int ch = base->CreateChannel();
803   EXPECT_NE(-1, ch);
804
805   scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
806       WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
807   scoped_refptr<WebRtcAudioCapturer> capturer(
808       CreateAudioCapturer(webrtc_audio_device));
809   EXPECT_TRUE(capturer);
810   scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
811       CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device));
812   // connect the VoE voice channel to the audio track adapter.
813   static_cast<webrtc::AudioTrackInterface*>(
814       adapter.get())->GetRenderer()->AddChannel(ch);
815
816   ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
817   ASSERT_TRUE(network.valid());
818   scoped_ptr<WebRTCTransportImpl> transport(
819       new WebRTCTransportImpl(network.get()));
820   EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
821   EXPECT_EQ(0, base->StartPlayout(ch));
822   EXPECT_EQ(0, base->StartSend(ch));
823   scoped_refptr<webrtc::MediaStreamInterface> media_stream(
824       new talk_base::RefCountedObject<MockMediaStream>("label"));
825   scoped_refptr<WebRtcAudioRenderer> renderer(
826       CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
827   scoped_refptr<MediaStreamAudioRenderer> proxy(
828       renderer->CreateSharedAudioRendererProxy(media_stream));
829   EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
830   proxy->Start();
831   proxy->Play();
832
833   VLOG(0) << ">> You should now be able to hear yourself in loopback...";
834   message_loop_.PostDelayedTask(FROM_HERE,
835                                 base::MessageLoop::QuitClosure(),
836                                 base::TimeDelta::FromSeconds(2));
837   message_loop_.Run();
838
839   capturer->Stop();
840   proxy->Stop();
841   EXPECT_EQ(0, base->StopSend(ch));
842   EXPECT_EQ(0, base->StopPlayout(ch));
843
844   EXPECT_EQ(0, base->DeleteChannel(ch));
845   EXPECT_EQ(0, base->Terminate());
846 }
847
848 // Test times out on bots, see http://crbug.com/247447
849 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
850   if (!has_input_devices_) {
851     LOG(WARNING) << "Missing audio capture devices.";
852     return;
853   }
854
855   scoped_ptr<media::AudioHardwareConfig> config =
856       CreateRealHardwareConfig(audio_manager_.get());
857   SetAudioHardwareConfig(config.get());
858
859   if (!HardwareSampleRatesAreValid())
860     return;
861
862   scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
863       new WebRtcAudioDeviceImpl());
864
865   WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
866   ASSERT_TRUE(engine.valid());
867
868   ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
869   ASSERT_TRUE(base.valid());
870   int err = base->Init(webrtc_audio_device.get());
871   ASSERT_EQ(0, err);
872
873   int ch = base->CreateChannel();
874   EXPECT_NE(-1, ch);
875
876   scoped_refptr<WebRtcAudioCapturer> capturer(
877       CreateAudioCapturer(webrtc_audio_device));
878   EXPECT_TRUE(capturer);
879   base::WaitableEvent event(false, false);
880   scoped_ptr<MockMediaStreamAudioSink> sink(
881       new MockMediaStreamAudioSink(&event));
882
883   // Create and start a local audio track. Starting the audio track will connect
884   // the audio track to the capturer and also start the source of the capturer.
885   scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
886       WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
887   scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
888       CreateAndStartLocalAudioTrack(adapter, capturer, sink.get()));
889
890   // connect the VoE voice channel to the audio track adapter.
891   static_cast<webrtc::AudioTrackInterface*>(
892       adapter.get())->GetRenderer()->AddChannel(ch);
893
894   base::Time start_time = base::Time::Now();
895   EXPECT_EQ(0, base->StartSend(ch));
896
897   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
898   int delay = (base::Time::Now() - start_time).InMilliseconds();
899   PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
900
901   capturer->Stop();
902   EXPECT_EQ(0, base->StopSend(ch));
903   EXPECT_EQ(0, base->DeleteChannel(ch));
904   EXPECT_EQ(0, base->Terminate());
905 }
906
907
908 // TODO(henrika): include on Android as well as soon as alla race conditions
909 // in OpenSLES are resolved.
910 #if defined(OS_ANDROID)
911 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime
912 #else
913 #define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime
914 #endif
915 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
916   if (!has_output_devices_) {
917     LOG(WARNING) << "No output device detected.";
918     return;
919   }
920
921   scoped_ptr<media::AudioHardwareConfig> config =
922       CreateRealHardwareConfig(audio_manager_.get());
923   SetAudioHardwareConfig(config.get());
924
925   if (!HardwareSampleRatesAreValid())
926     return;
927
928   base::WaitableEvent event(false, false);
929   scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
930       new MockWebRtcAudioRendererSource(&event));
931
932   scoped_refptr<webrtc::MediaStreamInterface> media_stream(
933       new talk_base::RefCountedObject<MockMediaStream>("label"));
934   scoped_refptr<WebRtcAudioRenderer> renderer(
935       CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
936   renderer->Initialize(renderer_source.get());
937   scoped_refptr<MediaStreamAudioRenderer> proxy(
938       renderer->CreateSharedAudioRendererProxy(media_stream));
939   proxy->Start();
940
941   // Start the timer and playout.
942   base::Time start_time = base::Time::Now();
943   proxy->Play();
944   EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
945   int delay = (base::Time::Now() - start_time).InMilliseconds();
946   PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
947
948   proxy->Stop();
949 }
950
951 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
952 // Timing out on ARM linux bot: http://crbug.com/238490
953 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
954         DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing
955 #else
956 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
957         WebRtcLoopbackTimeWithoutSignalProcessing
958 #endif
959
960 TEST_F(MAYBE_WebRTCAudioDeviceTest,
961        MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) {
962 #if defined(OS_WIN)
963   // This test hangs on WinXP: see http://crbug.com/318189.
964   if (base::win::GetVersion() <= base::win::VERSION_XP) {
965     LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
966     return;
967   }
968 #endif
969   int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
970   PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
971                     "t", latency);
972 }
973
974 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
975 // Timing out on ARM linux bot: http://crbug.com/238490
976 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
977         DISABLED_WebRtcLoopbackTimeWithSignalProcessing
978 #else
979 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
980         WebRtcLoopbackTimeWithSignalProcessing
981 #endif
982
983 TEST_F(MAYBE_WebRTCAudioDeviceTest,
984        MAYBE_WebRtcLoopbackTimeWithSignalProcessing) {
985 #if defined(OS_WIN)
986   // This test hangs on WinXP: see http://crbug.com/318189.
987   if (base::win::GetVersion() <= base::win::VERSION_XP) {
988     LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
989     return;
990   }
991 #endif
992   int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
993   PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
994                     "t", latency);
995 }
996
997 }  // namespace content