1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "base/environment.h"
8 #include "base/file_util.h"
9 #include "base/files/file_path.h"
10 #include "base/path_service.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/test/test_timeouts.h"
13 #include "content/renderer/media/mock_media_stream_dependency_factory.h"
14 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
15 #include "content/renderer/media/webrtc_audio_capturer.h"
16 #include "content/renderer/media/webrtc_audio_device_impl.h"
17 #include "content/renderer/media/webrtc_audio_renderer.h"
18 #include "content/renderer/media/webrtc_local_audio_track.h"
19 #include "content/renderer/render_thread_impl.h"
20 #include "content/test/webrtc_audio_device_test.h"
21 #include "media/audio/audio_manager_base.h"
22 #include "media/base/audio_hardware_config.h"
23 #include "testing/gmock/include/gmock/gmock.h"
24 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
25 #include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
26 #include "third_party/webrtc/voice_engine/include/voe_base.h"
27 #include "third_party/webrtc/voice_engine/include/voe_codec.h"
28 #include "third_party/webrtc/voice_engine/include/voe_external_media.h"
29 #include "third_party/webrtc/voice_engine/include/voe_file.h"
30 #include "third_party/webrtc/voice_engine/include/voe_network.h"
33 #include "base/win/windows_version.h"
36 using media::AudioParameters;
37 using media::CHANNEL_LAYOUT_STEREO;
39 using testing::AnyNumber;
40 using testing::InvokeWithoutArgs;
41 using testing::Return;
48 const int kRenderViewId = 1;
50 // The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
51 const int kNumberOfPacketsForLoopbackTest = 100;
53 // The hardware latency we feed to WebRtc.
54 const int kHardwareLatencyInMs = 50;
56 scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
57 media::AudioManager* manager) {
58 const AudioParameters output_parameters =
59 manager->GetDefaultOutputStreamParameters();
60 const AudioParameters input_parameters =
61 manager->GetInputStreamParameters(
62 media::AudioManagerBase::kDefaultDeviceId);
64 return make_scoped_ptr(new media::AudioHardwareConfig(
65 input_parameters, output_parameters));
68 // Return true if at least one element in the array matches |value|.
69 bool FindElementInArray(const int* array, int size, int value) {
70 return (std::find(&array[0], &array[0] + size, value) != &array[size]);
73 // This method returns false if a non-supported rate is detected on the
74 // input or output side.
75 // TODO(henrika): add support for automatic fallback to Windows Wave audio
76 // if a non-supported rate is detected. It is probably better to detect
77 // invalid audio settings by actually trying to open the audio streams instead
78 // of relying on hard coded conditions.
79 bool HardwareSampleRatesAreValid() {
80 // These are the currently supported hardware sample rates in both directions.
81 // The actual WebRTC client can limit these ranges further depending on
82 // platform but this is the maximum range we support today.
83 int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000};
84 int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000};
86 media::AudioHardwareConfig* hardware_config =
87 RenderThreadImpl::current()->GetAudioHardwareConfig();
89 // Verify the input sample rate.
90 int input_sample_rate = hardware_config->GetInputSampleRate();
92 if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates),
94 LOG(WARNING) << "Non-supported input sample rate detected.";
98 // Given that the input rate was OK, verify the output rate as well.
99 int output_sample_rate = hardware_config->GetOutputSampleRate();
100 if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates),
101 output_sample_rate)) {
102 LOG(WARNING) << "Non-supported output sample rate detected.";
109 // Utility method which creates the audio capturer, it returns a scoped
110 // reference of the capturer if it is created successfully, otherwise it returns
111 // NULL. This method should be used in tests where
112 // HardwareSampleRatesAreValid() has been called and returned true.
113 scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
114 WebRtcAudioDeviceImpl* webrtc_audio_device) {
115 media::AudioHardwareConfig* hardware_config =
116 RenderThreadImpl::current()->GetAudioHardwareConfig();
117 // Use native capture sample rate and channel configuration to get some
118 // action in this test.
119 int sample_rate = hardware_config->GetInputSampleRate();
120 media::ChannelLayout channel_layout =
121 hardware_config->GetInputChannelLayout();
122 blink::WebMediaConstraints constraints;
123 StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
124 media::AudioManagerBase::kDefaultDeviceName,
125 media::AudioManagerBase::kDefaultDeviceId,
126 sample_rate, channel_layout, 0);
127 device.session_id = 1;
128 return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device,
130 webrtc_audio_device);
133 // Create and start a local audio track. Starting the audio track will connect
134 // the audio track to the capturer and also start the source of the capturer.
135 // Also, connect the sink to the audio track.
136 scoped_ptr<WebRtcLocalAudioTrack>
137 CreateAndStartLocalAudioTrack(WebRtcLocalAudioTrackAdapter* adapter,
138 WebRtcAudioCapturer* capturer,
139 PeerConnectionAudioSink* sink) {
140 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
141 new WebRtcLocalAudioTrack(adapter, capturer, NULL));
143 local_audio_track->AddSink(sink);
144 local_audio_track->Start();
145 return local_audio_track.Pass();
148 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
150 explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
153 type_(webrtc::kPlaybackPerChannel),
158 virtual ~WebRTCMediaProcessImpl() {}
160 // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
161 virtual void Process(int channel,
162 webrtc::ProcessingTypes type,
163 int16_t audio_10ms[],
166 bool is_stereo) OVERRIDE {
167 base::AutoLock auto_lock(lock_);
168 channel_id_ = channel;
170 packet_size_ = length;
171 sample_rate_ = sampling_freq;
172 channels_ = (is_stereo ? 2 : 1);
174 // Signal that a new callback has been received.
179 int channel_id() const {
180 base::AutoLock auto_lock(lock_);
185 base::AutoLock auto_lock(lock_);
189 int packet_size() const {
190 base::AutoLock auto_lock(lock_);
194 int sample_rate() const {
195 base::AutoLock auto_lock(lock_);
200 base::WaitableEvent* event_;
202 webrtc::ProcessingTypes type_;
206 mutable base::Lock lock_;
207 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
210 // TODO(xians): Use MediaStreamAudioSink.
211 class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
213 explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
217 virtual ~MockMediaStreamAudioSink() {}
219 // PeerConnectionAudioSink implementation.
220 virtual int OnData(const int16* audio_data,
222 int number_of_channels,
223 int number_of_frames,
224 const std::vector<int>& channels,
225 int audio_delay_milliseconds,
227 bool need_audio_processing,
228 bool key_pressed) OVERRIDE {
229 // Signal that a callback has been received.
234 // Set the format for the capture audio parameters.
235 virtual void OnSetFormat(
236 const media::AudioParameters& params) OVERRIDE {}
239 base::WaitableEvent* event_;
241 DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
244 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
246 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
250 virtual ~MockWebRtcAudioRendererSource() {}
252 // WebRtcAudioRendererSource implementation.
253 virtual void RenderData(uint8* audio_data,
254 int number_of_channels,
255 int number_of_frames,
256 int audio_delay_milliseconds) OVERRIDE {
257 // Signal that a callback has been received.
258 // Initialize the memory to zero to avoid uninitialized warning from
260 memset(audio_data, 0,
261 sizeof(int16) * number_of_channels * number_of_frames);
265 virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
268 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
271 base::WaitableEvent* event_;
273 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
276 // Prints numerical information to stdout in a controlled format so we can plot
278 void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
280 base::StringAppendF(×, "%.2f,", time_ms);
281 std::string result = base::StringPrintf(
282 "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
283 trace, "[", times.c_str(), "]", "ms");
286 printf("%s", result.c_str());
290 void ReadDataFromSpeechFile(char* data, int length) {
291 base::FilePath data_file;
292 CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
294 data_file.Append(FILE_PATH_LITERAL("media"))
295 .Append(FILE_PATH_LITERAL("test"))
296 .Append(FILE_PATH_LITERAL("data"))
297 .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
298 DCHECK(base::PathExists(data_file));
299 int64 data_file_size64 = 0;
300 DCHECK(base::GetFileSize(data_file, &data_file_size64));
301 EXPECT_EQ(length, base::ReadFile(data_file, data, length));
302 DCHECK(data_file_size64 > length);
305 void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
306 // TODO(xians): move the codec as an input param to this function, and add
307 // tests for different codecs, also add support to Android and IOS.
308 #if !defined(OS_ANDROID) && !defined(OS_IOS)
309 webrtc::CodecInst isac;
310 strcpy(isac.plname, "ISAC");
316 ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
317 EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
318 EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
322 // Returns the time in millisecond for sending packets to WebRtc for encoding,
323 // signal processing, decoding and receiving them back.
324 int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
326 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
327 new WebRtcAudioDeviceImpl());
328 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
329 EXPECT_TRUE(engine.valid());
330 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
331 EXPECT_TRUE(base.valid());
332 int err = base->Init(webrtc_audio_device.get());
335 // We use OnSetFormat() and SetRenderFormat() to configure the audio
336 // parameters so that this test can run on machine without hardware device.
337 const media::AudioParameters params = media::AudioParameters(
338 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
340 PeerConnectionAudioSink* capturer_sink =
341 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
342 WebRtcAudioRendererSource* renderer_source =
343 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
344 renderer_source->SetRenderFormat(params);
346 // Turn on/off all the signal processing components like AGC, AEC and NS.
347 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
348 EXPECT_TRUE(audio_processing.valid());
349 audio_processing->SetAgcStatus(enable_apm);
350 audio_processing->SetNsStatus(enable_apm);
351 audio_processing->SetEcStatus(enable_apm);
353 // Create a voice channel for the WebRtc.
354 int channel = base->CreateChannel();
355 EXPECT_NE(-1, channel);
356 SetChannelCodec(engine.get(), channel);
358 // Use our fake network transmission and start playout and recording.
359 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
360 EXPECT_TRUE(network.valid());
361 scoped_ptr<WebRTCTransportImpl> transport(
362 new WebRTCTransportImpl(network.get()));
363 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
364 EXPECT_EQ(0, base->StartPlayout(channel));
365 EXPECT_EQ(0, base->StartSend(channel));
367 // Read speech data from a speech test file.
368 const int input_packet_size =
369 params.frames_per_buffer() * 2 * params.channels();
370 const int num_output_channels = webrtc_audio_device->output_channels();
371 const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
373 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
374 scoped_ptr<char[]> capture_data(new char[length]);
375 ReadDataFromSpeechFile(capture_data.get(), length);
378 scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
379 base::Time start_time = base::Time::Now();
381 std::vector<int> voe_channels;
382 voe_channels.push_back(channel);
383 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
384 // Sending fake capture data to WebRtc.
385 capturer_sink->OnData(
386 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
387 params.sample_rate(),
389 params.frames_per_buffer(),
391 kHardwareLatencyInMs,
396 // Receiving data from WebRtc.
397 renderer_source->RenderData(
398 reinterpret_cast<uint8*>(buffer.get()),
399 num_output_channels, webrtc_audio_device->output_buffer_size(),
400 kHardwareLatencyInMs + delay);
401 delay = (base::Time::Now() - start_time).InMilliseconds();
404 int latency = (base::Time::Now() - start_time).InMilliseconds();
406 EXPECT_EQ(0, base->StopSend(channel));
407 EXPECT_EQ(0, base->StopPlayout(channel));
408 EXPECT_EQ(0, base->DeleteChannel(channel));
409 EXPECT_EQ(0, base->Terminate());
416 // Trivial test which verifies that one part of the test harness
417 // (HardwareSampleRatesAreValid()) works as intended for all supported
418 // hardware input sample rates.
419 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) {
420 int valid_rates[] = {16000, 32000, 44100, 48000, 96000};
422 // Verify that we will approve all rates listed in |valid_rates|.
423 for (size_t i = 0; i < arraysize(valid_rates); ++i) {
424 EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
428 // Verify that any value outside the valid range results in negative
430 int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000};
431 for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
432 EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
437 // Trivial test which verifies that one part of the test harness
438 // (HardwareSampleRatesAreValid()) works as intended for all supported
439 // hardware output sample rates.
440 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) {
441 int valid_rates[] = {44100, 48000, 96000};
443 // Verify that we will approve all rates listed in |valid_rates|.
444 for (size_t i = 0; i < arraysize(valid_rates); ++i) {
445 EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
449 // Verify that any value outside the valid range results in negative
451 int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
452 for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
453 EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
458 // Basic test that instantiates and initializes an instance of
459 // WebRtcAudioDeviceImpl.
460 TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
462 // This test crashes on Win XP bots.
463 if (base::win::GetVersion() <= base::win::VERSION_XP)
467 AudioParameters input_params(
468 AudioParameters::AUDIO_PCM_LOW_LATENCY,
469 media::CHANNEL_LAYOUT_MONO,
474 AudioParameters output_params(
475 AudioParameters::AUDIO_PCM_LOW_LATENCY,
476 media::CHANNEL_LAYOUT_STEREO,
481 media::AudioHardwareConfig audio_config(input_params, output_params);
482 SetAudioHardwareConfig(&audio_config);
484 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
485 new WebRtcAudioDeviceImpl());
487 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
488 ASSERT_TRUE(engine.valid());
490 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
491 int err = base->Init(webrtc_audio_device.get());
492 EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL);
494 EXPECT_EQ(0, base->Terminate());
497 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
498 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
499 // be utilized to implement the actual audio path. The test registers a
500 // webrtc::VoEExternalMedia implementation to hijack the output audio and
501 // verify that streaming starts correctly.
502 // TODO(henrika): include on Android as well as soon as alla race conditions
503 // in OpenSLES are resolved.
504 #if defined(OS_ANDROID)
505 #define MAYBE_StartPlayout DISABLED_StartPlayout
507 #define MAYBE_StartPlayout StartPlayout
509 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
510 if (!has_output_devices_) {
511 LOG(WARNING) << "No output device detected.";
515 scoped_ptr<media::AudioHardwareConfig> config =
516 CreateRealHardwareConfig(audio_manager_.get());
517 SetAudioHardwareConfig(config.get());
519 if (!HardwareSampleRatesAreValid())
522 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
523 ASSERT_TRUE(engine.valid());
524 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
525 ASSERT_TRUE(base.valid());
527 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
528 new WebRtcAudioDeviceImpl());
529 int err = base->Init(webrtc_audio_device.get());
532 ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
533 ASSERT_TRUE(external_media.valid());
534 base::WaitableEvent event(false, false);
535 scoped_ptr<WebRTCMediaProcessImpl> media_process(
536 new WebRTCMediaProcessImpl(&event));
537 int ch = base->CreateChannel();
539 EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
540 ch, webrtc::kPlaybackPerChannel, *media_process.get()));
542 scoped_refptr<webrtc::MediaStreamInterface> media_stream(
543 new talk_base::RefCountedObject<MockMediaStream>("label"));
545 EXPECT_EQ(0, base->StartPlayout(ch));
546 scoped_refptr<WebRtcAudioRenderer> renderer(
547 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
548 scoped_refptr<MediaStreamAudioRenderer> proxy(
549 renderer->CreateSharedAudioRendererProxy(media_stream));
550 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
554 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
555 WaitForIOThreadCompletion();
557 EXPECT_TRUE(webrtc_audio_device->Playing());
558 EXPECT_FALSE(webrtc_audio_device->Recording());
559 EXPECT_EQ(ch, media_process->channel_id());
560 EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type());
561 EXPECT_EQ(80, media_process->packet_size());
562 EXPECT_EQ(8000, media_process->sample_rate());
564 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
565 ch, webrtc::kPlaybackPerChannel));
566 EXPECT_EQ(0, base->StopPlayout(ch));
568 EXPECT_EQ(0, base->DeleteChannel(ch));
569 EXPECT_EQ(0, base->Terminate());
572 // Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
573 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
574 // be utilized to implement the actual audio path. The test registers a
575 // webrtc::VoEExternalMedia implementation to hijack the input audio and
576 // verify that streaming starts correctly. An external transport implementation
577 // is also required to ensure that "sending" can start without actually trying
578 // to send encoded packets to the network. Our main interest here is to ensure
579 // that the audio capturing starts as it should.
580 // Disabled when running headless since the bots don't have the required config.
582 // TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
583 // disable this unit test on Android for now.
584 #if defined(OS_ANDROID)
585 #define MAYBE_StartRecording DISABLED_StartRecording
586 #elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
587 // This test is failing on ARM linux: http://crbug.com/238490
588 #define MAYBE_StartRecording DISABLED_StartRecording
590 // Flakily hangs on all other platforms as well: crbug.com/268376.
591 // When the flakiness has been fixed, you probably want to leave it disabled
592 // on the above platforms.
593 #define MAYBE_StartRecording DISABLED_StartRecording
596 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
597 if (!has_input_devices_ || !has_output_devices_) {
598 LOG(WARNING) << "Missing audio devices.";
602 scoped_ptr<media::AudioHardwareConfig> config =
603 CreateRealHardwareConfig(audio_manager_.get());
604 SetAudioHardwareConfig(config.get());
606 if (!HardwareSampleRatesAreValid())
609 // TODO(tommi): extend MediaObserver and MockMediaObserver with support
610 // for new interfaces, like OnSetAudioStreamRecording(). When done, add
611 // EXPECT_CALL() macros here.
612 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
613 new WebRtcAudioDeviceImpl());
615 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
616 ASSERT_TRUE(engine.valid());
618 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
619 ASSERT_TRUE(base.valid());
620 int err = base->Init(webrtc_audio_device.get());
623 int ch = base->CreateChannel();
626 ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
627 ASSERT_TRUE(external_media.valid());
629 base::WaitableEvent event(false, false);
630 scoped_ptr<WebRTCMediaProcessImpl> media_process(
631 new WebRTCMediaProcessImpl(&event));
632 EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
633 ch, webrtc::kRecordingPerChannel, *media_process.get()));
635 // We must add an external transport implementation to be able to start
636 // recording without actually sending encoded packets to the network. All
637 // we want to do here is to verify that audio capturing starts as it should.
638 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
639 scoped_ptr<WebRTCTransportImpl> transport(
640 new WebRTCTransportImpl(network.get()));
641 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
642 EXPECT_EQ(0, base->StartSend(ch));
644 // Create the capturer which starts the source of the data flow.
645 scoped_refptr<WebRtcAudioCapturer> capturer(
646 CreateAudioCapturer(webrtc_audio_device));
647 EXPECT_TRUE(capturer);
649 // Create and start a local audio track which is bridging the data flow
650 // between the capturer and WebRtcAudioDeviceImpl.
651 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
652 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
653 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
654 CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device));
655 // connect the VoE voice channel to the audio track
656 static_cast<webrtc::AudioTrackInterface*>(
657 adapter.get())->GetRenderer()->AddChannel(ch);
659 // Verify we get the data flow.
660 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
661 WaitForIOThreadCompletion();
663 EXPECT_FALSE(webrtc_audio_device->Playing());
664 EXPECT_TRUE(webrtc_audio_device->Recording());
665 EXPECT_EQ(ch, media_process->channel_id());
666 EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type());
667 EXPECT_EQ(80, media_process->packet_size());
668 EXPECT_EQ(8000, media_process->sample_rate());
670 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
671 ch, webrtc::kRecordingPerChannel));
672 EXPECT_EQ(0, base->StopSend(ch));
675 EXPECT_EQ(0, base->DeleteChannel(ch));
676 EXPECT_EQ(0, base->Terminate());
679 // Uses WebRtcAudioDeviceImpl to play a local wave file.
680 // TODO(henrika): include on Android as well as soon as alla race conditions
681 // in OpenSLES are resolved.
682 #if defined(OS_ANDROID)
683 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile
685 #define MAYBE_PlayLocalFile PlayLocalFile
687 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
688 if (!has_output_devices_) {
689 LOG(WARNING) << "No output device detected.";
693 std::string file_path(
694 GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
696 scoped_ptr<media::AudioHardwareConfig> config =
697 CreateRealHardwareConfig(audio_manager_.get());
698 SetAudioHardwareConfig(config.get());
700 if (!HardwareSampleRatesAreValid())
703 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
704 ASSERT_TRUE(engine.valid());
705 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
706 ASSERT_TRUE(base.valid());
708 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
709 new WebRtcAudioDeviceImpl());
710 int err = base->Init(webrtc_audio_device.get());
712 int ch = base->CreateChannel();
714 EXPECT_EQ(0, base->StartPlayout(ch));
715 scoped_refptr<webrtc::MediaStreamInterface> media_stream(
716 new talk_base::RefCountedObject<MockMediaStream>("label"));
717 scoped_refptr<WebRtcAudioRenderer> renderer(
718 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
719 scoped_refptr<MediaStreamAudioRenderer> proxy(
720 renderer->CreateSharedAudioRendererProxy(media_stream));
721 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
725 ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
726 ASSERT_TRUE(file.valid());
728 EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration,
729 webrtc::kFileFormatPcm16kHzFile));
730 EXPECT_NE(0, duration);
732 EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false,
733 webrtc::kFileFormatPcm16kHzFile));
735 // Play 2 seconds worth of audio and then quit.
736 message_loop_.PostDelayedTask(FROM_HERE,
737 base::MessageLoop::QuitClosure(),
738 base::TimeDelta::FromSeconds(2));
742 EXPECT_EQ(0, base->StopSend(ch));
743 EXPECT_EQ(0, base->StopPlayout(ch));
744 EXPECT_EQ(0, base->DeleteChannel(ch));
745 EXPECT_EQ(0, base->Terminate());
748 // Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
749 // An external transport implementation is utilized to feed back RTP packets
750 // which are recorded, encoded, packetized into RTP packets and finally
751 // "transmitted". The RTP packets are then fed back into the VoiceEngine
752 // where they are decoded and played out on the default audio output device.
753 // Disabled when running headless since the bots don't have the required config.
754 // TODO(henrika): improve quality by using a wideband codec, enabling noise-
756 // FullDuplexAudioWithAGC is flaky on Android, disable it for now.
757 // Also flakily hangs on Windows: crbug.com/269348.
758 #if defined(OS_ANDROID) || defined(OS_WIN)
759 #define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
761 #define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
763 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
764 if (!has_output_devices_ || !has_input_devices_) {
765 LOG(WARNING) << "Missing audio devices.";
769 scoped_ptr<media::AudioHardwareConfig> config =
770 CreateRealHardwareConfig(audio_manager_.get());
771 SetAudioHardwareConfig(config.get());
773 if (!HardwareSampleRatesAreValid())
776 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
777 ASSERT_TRUE(engine.valid());
778 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
779 ASSERT_TRUE(base.valid());
781 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
782 new WebRtcAudioDeviceImpl());
783 int err = base->Init(webrtc_audio_device.get());
786 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
787 ASSERT_TRUE(audio_processing.valid());
788 #if defined(OS_ANDROID)
789 // On Android, by default AGC is off.
791 webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
792 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
793 EXPECT_FALSE(enabled);
795 bool enabled = false;
796 webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
797 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
798 EXPECT_TRUE(enabled);
799 EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog);
802 int ch = base->CreateChannel();
805 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
806 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
807 scoped_refptr<WebRtcAudioCapturer> capturer(
808 CreateAudioCapturer(webrtc_audio_device));
809 EXPECT_TRUE(capturer);
810 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
811 CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device));
812 // connect the VoE voice channel to the audio track adapter.
813 static_cast<webrtc::AudioTrackInterface*>(
814 adapter.get())->GetRenderer()->AddChannel(ch);
816 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
817 ASSERT_TRUE(network.valid());
818 scoped_ptr<WebRTCTransportImpl> transport(
819 new WebRTCTransportImpl(network.get()));
820 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
821 EXPECT_EQ(0, base->StartPlayout(ch));
822 EXPECT_EQ(0, base->StartSend(ch));
823 scoped_refptr<webrtc::MediaStreamInterface> media_stream(
824 new talk_base::RefCountedObject<MockMediaStream>("label"));
825 scoped_refptr<WebRtcAudioRenderer> renderer(
826 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
827 scoped_refptr<MediaStreamAudioRenderer> proxy(
828 renderer->CreateSharedAudioRendererProxy(media_stream));
829 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
833 VLOG(0) << ">> You should now be able to hear yourself in loopback...";
834 message_loop_.PostDelayedTask(FROM_HERE,
835 base::MessageLoop::QuitClosure(),
836 base::TimeDelta::FromSeconds(2));
841 EXPECT_EQ(0, base->StopSend(ch));
842 EXPECT_EQ(0, base->StopPlayout(ch));
844 EXPECT_EQ(0, base->DeleteChannel(ch));
845 EXPECT_EQ(0, base->Terminate());
848 // Test times out on bots, see http://crbug.com/247447
849 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
850 if (!has_input_devices_) {
851 LOG(WARNING) << "Missing audio capture devices.";
855 scoped_ptr<media::AudioHardwareConfig> config =
856 CreateRealHardwareConfig(audio_manager_.get());
857 SetAudioHardwareConfig(config.get());
859 if (!HardwareSampleRatesAreValid())
862 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
863 new WebRtcAudioDeviceImpl());
865 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
866 ASSERT_TRUE(engine.valid());
868 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
869 ASSERT_TRUE(base.valid());
870 int err = base->Init(webrtc_audio_device.get());
873 int ch = base->CreateChannel();
876 scoped_refptr<WebRtcAudioCapturer> capturer(
877 CreateAudioCapturer(webrtc_audio_device));
878 EXPECT_TRUE(capturer);
879 base::WaitableEvent event(false, false);
880 scoped_ptr<MockMediaStreamAudioSink> sink(
881 new MockMediaStreamAudioSink(&event));
883 // Create and start a local audio track. Starting the audio track will connect
884 // the audio track to the capturer and also start the source of the capturer.
885 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
886 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
887 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track(
888 CreateAndStartLocalAudioTrack(adapter, capturer, sink.get()));
890 // connect the VoE voice channel to the audio track adapter.
891 static_cast<webrtc::AudioTrackInterface*>(
892 adapter.get())->GetRenderer()->AddChannel(ch);
894 base::Time start_time = base::Time::Now();
895 EXPECT_EQ(0, base->StartSend(ch));
897 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
898 int delay = (base::Time::Now() - start_time).InMilliseconds();
899 PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
902 EXPECT_EQ(0, base->StopSend(ch));
903 EXPECT_EQ(0, base->DeleteChannel(ch));
904 EXPECT_EQ(0, base->Terminate());
908 // TODO(henrika): include on Android as well as soon as alla race conditions
909 // in OpenSLES are resolved.
910 #if defined(OS_ANDROID)
911 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime
913 #define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime
915 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
916 if (!has_output_devices_) {
917 LOG(WARNING) << "No output device detected.";
921 scoped_ptr<media::AudioHardwareConfig> config =
922 CreateRealHardwareConfig(audio_manager_.get());
923 SetAudioHardwareConfig(config.get());
925 if (!HardwareSampleRatesAreValid())
928 base::WaitableEvent event(false, false);
929 scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
930 new MockWebRtcAudioRendererSource(&event));
932 scoped_refptr<webrtc::MediaStreamInterface> media_stream(
933 new talk_base::RefCountedObject<MockMediaStream>("label"));
934 scoped_refptr<WebRtcAudioRenderer> renderer(
935 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream));
936 renderer->Initialize(renderer_source.get());
937 scoped_refptr<MediaStreamAudioRenderer> proxy(
938 renderer->CreateSharedAudioRendererProxy(media_stream));
941 // Start the timer and playout.
942 base::Time start_time = base::Time::Now();
944 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
945 int delay = (base::Time::Now() - start_time).InMilliseconds();
946 PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
951 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
952 // Timing out on ARM linux bot: http://crbug.com/238490
953 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
954 DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing
956 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
957 WebRtcLoopbackTimeWithoutSignalProcessing
960 TEST_F(MAYBE_WebRTCAudioDeviceTest,
961 MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) {
963 // This test hangs on WinXP: see http://crbug.com/318189.
964 if (base::win::GetVersion() <= base::win::VERSION_XP) {
965 LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
969 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
970 PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
974 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
975 // Timing out on ARM linux bot: http://crbug.com/238490
976 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
977 DISABLED_WebRtcLoopbackTimeWithSignalProcessing
979 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
980 WebRtcLoopbackTimeWithSignalProcessing
983 TEST_F(MAYBE_WebRTCAudioDeviceTest,
984 MAYBE_WebRtcLoopbackTimeWithSignalProcessing) {
986 // This test hangs on WinXP: see http://crbug.com/318189.
987 if (base::win::GetVersion() <= base::win::VERSION_XP) {
988 LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
992 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
993 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
997 } // namespace content