Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / modules / audio_device / win / audio_device_core_win.cc
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #pragma warning(disable: 4995)  // name was marked as #pragma deprecated
12
13 #if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
14 // Reports the major and minor versions of the compiler.
15 // For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version 13 and a 1.0 point release.
16 // The Visual C++ 2005 compiler version is 1400.
17 // Type cl /? at the command line to see the major and minor versions of your compiler along with the build number.
18 #pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
19 #endif
20
21 #include "webrtc/modules/audio_device/audio_device_config.h"
22
23 #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
24 #pragma message(">> INFO: WEBRTC_WINDOWS_CORE_AUDIO_BUILD is defined")
25 #else
26 #pragma message(">> INFO: WEBRTC_WINDOWS_CORE_AUDIO_BUILD is *not* defined")
27 #endif
28
29 #ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
30
31 #include "webrtc/modules/audio_device/win/audio_device_core_win.h"
32
33 #include <assert.h>
34 #include <string.h>
35
36 #include <windows.h>
37 #include <comdef.h>
38 #include <dmo.h>
39 #include <Functiondiscoverykeys_devpkey.h>
40 #include <mmsystem.h>
41 #include <strsafe.h>
42 #include <uuids.h>
43
44 #include "webrtc/modules/audio_device/audio_device_utility.h"
45 #include "webrtc/system_wrappers/interface/sleep.h"
46 #include "webrtc/system_wrappers/interface/trace.h"
47
48 // Macro that calls a COM method returning HRESULT value.
49 #define EXIT_ON_ERROR(hres)    do { if (FAILED(hres)) goto Exit; } while(0)
50
51 // Macro that continues to a COM error.
52 #define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
53
54 // Macro that releases a COM object if not NULL.
55 #define SAFE_RELEASE(p)     do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
56
57 #define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
58
59 // REFERENCE_TIME time units per millisecond
60 #define REFTIMES_PER_MILLISEC  10000
61
62 typedef struct tagTHREADNAME_INFO
63 {
64    DWORD dwType;        // must be 0x1000
65    LPCSTR szName;       // pointer to name (in user addr space)
66    DWORD dwThreadID;    // thread ID (-1=caller thread)
67    DWORD dwFlags;       // reserved for future use, must be zero
68 } THREADNAME_INFO;
69
70 namespace webrtc {
71 namespace {
72
73 enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
74
75 enum
76 {
77     kAecCaptureStreamIndex = 0,
78     kAecRenderStreamIndex = 1
79 };
80
81 // An implementation of IMediaBuffer, as required for
82 // IMediaObject::ProcessOutput(). After consuming data provided by
83 // ProcessOutput(), call SetLength() to update the buffer availability.
84 //
85 // Example implementation:
86 // http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
87 class MediaBufferImpl : public IMediaBuffer
88 {
89 public:
90     explicit MediaBufferImpl(DWORD maxLength)
91         : _data(new BYTE[maxLength]),
92           _length(0),
93           _maxLength(maxLength),
94           _refCount(0)
95     {}
96
97     // IMediaBuffer methods.
98     STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength))
99     {
100         if (!ppBuffer || !pcbLength)
101         {
102             return E_POINTER;
103         }
104
105         *ppBuffer = _data;
106         *pcbLength = _length;
107
108         return S_OK;
109     }
110
111     STDMETHOD(GetMaxLength(DWORD* pcbMaxLength))
112     {
113         if (!pcbMaxLength)
114         {
115             return E_POINTER;
116         }
117
118         *pcbMaxLength = _maxLength;
119         return S_OK;
120     }
121
122     STDMETHOD(SetLength(DWORD cbLength))
123     {
124         if (cbLength > _maxLength)
125         {
126             return E_INVALIDARG;
127         }
128
129         _length = cbLength;
130         return S_OK;
131     }
132
133     // IUnknown methods.
134     STDMETHOD_(ULONG, AddRef())
135     {
136         return InterlockedIncrement(&_refCount);
137     }
138
139     STDMETHOD(QueryInterface(REFIID riid, void** ppv))
140     {
141         if (!ppv)
142         {
143             return E_POINTER;
144         }
145         else if (riid != IID_IMediaBuffer && riid != IID_IUnknown)
146         {
147             return E_NOINTERFACE;
148         }
149
150         *ppv = static_cast<IMediaBuffer*>(this);
151         AddRef();
152         return S_OK;
153     }
154
155     STDMETHOD_(ULONG, Release())
156     {
157         LONG refCount = InterlockedDecrement(&_refCount);
158         if (refCount == 0)
159         {
160             delete this;
161         }
162
163         return refCount;
164     }
165
166 private:
167     ~MediaBufferImpl()
168     {
169         delete [] _data;
170     }
171
172     BYTE* _data;
173     DWORD _length;
174     const DWORD _maxLength;
175     LONG _refCount;
176 };
177 }  // namespace
178
179 // ============================================================================
180 //                              Static Methods
181 // ============================================================================
182
183 // ----------------------------------------------------------------------------
184 //  CoreAudioIsSupported
185 // ----------------------------------------------------------------------------
186
187 bool AudioDeviceWindowsCore::CoreAudioIsSupported()
188 {
189     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%s", __FUNCTION__);
190
191     bool MMDeviceIsAvailable(false);
192     bool coreAudioIsSupported(false);
193
194     HRESULT hr(S_OK);
195     TCHAR buf[MAXERRORLENGTH];
196     TCHAR errorText[MAXERRORLENGTH];
197
198     // 1) Check if Windows version is Vista SP1 or later.
199     //
200     // CoreAudio is only available on Vista SP1 and later.
201     //
202     OSVERSIONINFOEX osvi;
203     DWORDLONG dwlConditionMask = 0;
204     int op = VER_LESS_EQUAL;
205
206     // Initialize the OSVERSIONINFOEX structure.
207     ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
208     osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
209     osvi.dwMajorVersion = 6;
210     osvi.dwMinorVersion = 0;
211     osvi.wServicePackMajor = 0;
212     osvi.wServicePackMinor = 0;
213     osvi.wProductType = VER_NT_WORKSTATION;
214
215     // Initialize the condition mask.
216     VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
217     VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
218     VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
219     VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
220     VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
221
222     DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
223                        VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
224                        VER_PRODUCT_TYPE;
225
226     // Perform the test.
227     BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask,
228                                             dwlConditionMask);
229     if (isVistaRTMorXP != 0)
230     {
231         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
232             "*** Windows Core Audio is only supported on Vista SP1 or later "
233             "=> will revert to the Wave API ***");
234         return false;
235     }
236
237     // 2) Initializes the COM library for use by the calling thread.
238
239     // The COM init wrapper sets the thread's concurrency model to MTA,
240     // and creates a new apartment for the thread if one is required. The
241     // wrapper also ensures that each call to CoInitializeEx is balanced
242     // by a corresponding call to CoUninitialize.
243     //
244     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
245     if (!comInit.succeeded()) {
246       // Things will work even if an STA thread is calling this method but we
247       // want to ensure that MTA is used and therefore return false here.
248       return false;
249     }
250
251     // 3) Check if the MMDevice API is available.
252     //
253     // The Windows Multimedia Device (MMDevice) API enables audio clients to
254     // discover audio endpoint devices, determine their capabilities, and create
255     // driver instances for those devices.
256     // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
257     // The MMDevice API consists of several interfaces. The first of these is the
258     // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice API,
259     // a client obtains a reference to the IMMDeviceEnumerator interface of a
260     // device-enumerator object by calling the CoCreateInstance function.
261     //
262     // Through the IMMDeviceEnumerator interface, the client can obtain references
263     // to the other interfaces in the MMDevice API. The MMDevice API implements
264     // the following interfaces:
265     //
266     // IMMDevice            Represents an audio device.
267     // IMMDeviceCollection  Represents a collection of audio devices.
268     // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
269     // IMMEndpoint          Represents an audio endpoint device.
270     //
271     IMMDeviceEnumerator* pIMMD(NULL);
272     const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
273     const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
274
275     hr = CoCreateInstance(
276             CLSID_MMDeviceEnumerator,   // GUID value of MMDeviceEnumerator coclass
277             NULL,
278             CLSCTX_ALL,
279             IID_IMMDeviceEnumerator,    // GUID value of the IMMDeviceEnumerator interface
280             (void**)&pIMMD );
281
282     if (FAILED(hr))
283     {
284         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
285             "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to create the required COM object", hr);
286         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
287             "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) failed (hr=0x%x)", hr);
288
289         const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
290                               FORMAT_MESSAGE_IGNORE_INSERTS;
291         const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
292
293         // Gets the system's human readable message string for this HRESULT.
294         // All error message in English by default.
295         DWORD messageLength = ::FormatMessageW(dwFlags,
296                                                0,
297                                                hr,
298                                                dwLangID,
299                                                errorText,
300                                                MAXERRORLENGTH,
301                                                NULL);
302
303         assert(messageLength <= MAXERRORLENGTH);
304
305         // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
306         for (; messageLength && ::isspace(errorText[messageLength - 1]);
307              --messageLength)
308         {
309             errorText[messageLength - 1] = '\0';
310         }
311
312         StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
313         StringCchCat(buf, MAXERRORLENGTH, errorText);
314         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%S", buf);
315     }
316     else
317     {
318         MMDeviceIsAvailable = true;
319         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
320             "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) succeeded", hr);
321         SAFE_RELEASE(pIMMD);
322     }
323
324     // 4) Verify that we can create and initialize our Core Audio class.
325     //
326     // Also, perform a limited "API test" to ensure that Core Audio is supported for all devices.
327     //
328     if (MMDeviceIsAvailable)
329     {
330         coreAudioIsSupported = false;
331
332         AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore(-1);
333         if (p == NULL)
334         {
335             return false;
336         }
337
338         int ok(0);
339         int temp_ok(0);
340         bool available(false);
341
342         ok |= p->Init();
343
344         int16_t numDevsRec = p->RecordingDevices();
345         for (uint16_t i = 0; i < numDevsRec; i++)
346         {
347             ok |= p->SetRecordingDevice(i);
348             temp_ok = p->RecordingIsAvailable(available);
349             ok |= temp_ok;
350             ok |= (available == false);
351             if (available)
352             {
353                 ok |= p->InitMicrophone();
354             }
355             if (ok)
356             {
357                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
358                     "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Recording for device id=%i", i);
359             }
360         }
361
362         int16_t numDevsPlay = p->PlayoutDevices();
363         for (uint16_t i = 0; i < numDevsPlay; i++)
364         {
365             ok |= p->SetPlayoutDevice(i);
366             temp_ok = p->PlayoutIsAvailable(available);
367             ok |= temp_ok;
368             ok |= (available == false);
369             if (available)
370             {
371                 ok |= p->InitSpeaker();
372             }
373             if (ok)
374             {
375                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1 ,
376                     "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Playout for device id=%i", i);
377             }
378         }
379
380         ok |= p->Terminate();
381
382         if (ok == 0)
383         {
384             coreAudioIsSupported = true;
385         }
386
387         delete p;
388     }
389
390     if (coreAudioIsSupported)
391     {
392         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is supported ***");
393     }
394     else
395     {
396         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is NOT supported => will revert to the Wave API ***");
397     }
398
399     return (coreAudioIsSupported);
400 }
401
402 // ============================================================================
403 //                            Construction & Destruction
404 // ============================================================================
405
406 // ----------------------------------------------------------------------------
407 //  AudioDeviceWindowsCore() - ctor
408 // ----------------------------------------------------------------------------
409
410 AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id) :
411     _comInit(ScopedCOMInitializer::kMTA),
412     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
413     _volumeMutex(*CriticalSectionWrapper::CreateCriticalSection()),
414     _id(id),
415     _ptrAudioBuffer(NULL),
416     _ptrEnumerator(NULL),
417     _ptrRenderCollection(NULL),
418     _ptrCaptureCollection(NULL),
419     _ptrDeviceOut(NULL),
420     _ptrDeviceIn(NULL),
421     _ptrClientOut(NULL),
422     _ptrClientIn(NULL),
423     _ptrRenderClient(NULL),
424     _ptrCaptureClient(NULL),
425     _ptrCaptureVolume(NULL),
426     _ptrRenderSimpleVolume(NULL),
427     _dmo(NULL),
428     _mediaBuffer(NULL),
429     _builtInAecEnabled(false),
430     _playAudioFrameSize(0),
431     _playSampleRate(0),
432     _playBlockSize(0),
433     _playChannels(2),
434     _sndCardPlayDelay(0),
435     _sndCardRecDelay(0),
436     _writtenSamples(0),
437     _readSamples(0),
438     _playAcc(0),
439     _recAudioFrameSize(0),
440     _recSampleRate(0),
441     _recBlockSize(0),
442     _recChannels(2),
443     _avrtLibrary(NULL),
444     _winSupportAvrt(false),
445     _hRenderSamplesReadyEvent(NULL),
446     _hPlayThread(NULL),
447     _hCaptureSamplesReadyEvent(NULL),
448     _hRecThread(NULL),
449     _hShutdownRenderEvent(NULL),
450     _hShutdownCaptureEvent(NULL),
451     _hRenderStartedEvent(NULL),
452     _hCaptureStartedEvent(NULL),
453     _hGetCaptureVolumeThread(NULL),
454     _hSetCaptureVolumeThread(NULL),
455     _hSetCaptureVolumeEvent(NULL),
456     _hMmTask(NULL),
457     _initialized(false),
458     _recording(false),
459     _playing(false),
460     _recIsInitialized(false),
461     _playIsInitialized(false),
462     _speakerIsInitialized(false),
463     _microphoneIsInitialized(false),
464     _AGC(false),
465     _playWarning(0),
466     _playError(0),
467     _recWarning(0),
468     _recError(0),
469     _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
470     _playBufDelay(80),
471     _playBufDelayFixed(80),
472     _usingInputDeviceIndex(false),
473     _usingOutputDeviceIndex(false),
474     _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
475     _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
476     _inputDeviceIndex(0),
477     _outputDeviceIndex(0),
478     _newMicLevel(0)
479 {
480     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
481     assert(_comInit.succeeded());
482
483     // Try to load the Avrt DLL
484     if (!_avrtLibrary)
485     {
486         // Get handle to the Avrt DLL module.
487         _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
488         if (_avrtLibrary)
489         {
490             // Handle is valid (should only happen if OS larger than vista & win7).
491             // Try to get the function addresses.
492             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() The Avrt DLL module is now loaded");
493
494             _PAvRevertMmThreadCharacteristics = (PAvRevertMmThreadCharacteristics)GetProcAddress(_avrtLibrary, "AvRevertMmThreadCharacteristics");
495             _PAvSetMmThreadCharacteristicsA = (PAvSetMmThreadCharacteristicsA)GetProcAddress(_avrtLibrary, "AvSetMmThreadCharacteristicsA");
496             _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(_avrtLibrary, "AvSetMmThreadPriority");
497
498             if ( _PAvRevertMmThreadCharacteristics &&
499                  _PAvSetMmThreadCharacteristicsA &&
500                  _PAvSetMmThreadPriority)
501             {
502                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvRevertMmThreadCharacteristics() is OK");
503                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadCharacteristicsA() is OK");
504                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadPriority() is OK");
505                 _winSupportAvrt = true;
506             }
507         }
508     }
509
510     // Create our samples ready events - we want auto reset events that start in the not-signaled state.
511     // The state of an auto-reset event object remains signaled until a single waiting thread is released,
512     // at which time the system automatically sets the state to nonsignaled. If no threads are waiting,
513     // the event object's state remains signaled.
514     // (Except for _hShutdownCaptureEvent, which is used to shutdown multiple threads).
515     _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
516     _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
517     _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
518     _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
519     _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
520     _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
521     _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
522
523     _perfCounterFreq.QuadPart = 1;
524     _perfCounterFactor = 0.0;
525     _avgCPULoad = 0.0;
526
527     // list of number of channels to use on recording side
528     _recChannelsPrioList[0] = 2;    // stereo is prio 1
529     _recChannelsPrioList[1] = 1;    // mono is prio 2
530
531     // list of number of channels to use on playout side
532     _playChannelsPrioList[0] = 2;    // stereo is prio 1
533     _playChannelsPrioList[1] = 1;    // mono is prio 2
534
535     HRESULT hr;
536
537     // We know that this API will work since it has already been verified in
538     // CoreAudioIsSupported, hence no need to check for errors here as well.
539
540     // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
541     // TODO(henrika): we should probably move this allocation to Init() instead
542     // and deallocate in Terminate() to make the implementation more symmetric.
543     CoCreateInstance(
544       __uuidof(MMDeviceEnumerator),
545       NULL,
546       CLSCTX_ALL,
547       __uuidof(IMMDeviceEnumerator),
548       reinterpret_cast<void**>(&_ptrEnumerator));
549     assert(NULL != _ptrEnumerator);
550
551     // DMO initialization for built-in WASAPI AEC.
552     {
553         IMediaObject* ptrDMO = NULL;
554         hr = CoCreateInstance(CLSID_CWMAudioAEC,
555                               NULL,
556                               CLSCTX_INPROC_SERVER,
557                               IID_IMediaObject,
558                               reinterpret_cast<void**>(&ptrDMO));
559         if (FAILED(hr) || ptrDMO == NULL)
560         {
561             // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
562             // feature is prevented from being enabled.
563             _builtInAecEnabled = false;
564             _TraceCOMError(hr);
565         }
566         _dmo = ptrDMO;
567         SAFE_RELEASE(ptrDMO);
568     }
569 }
570
571 // ----------------------------------------------------------------------------
572 //  AudioDeviceWindowsCore() - dtor
573 // ----------------------------------------------------------------------------
574
575 AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
576 {
577     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__);
578
579     Terminate();
580
581     // The IMMDeviceEnumerator is created during construction. Must release
582     // it here and not in Terminate() since we don't recreate it in Init().
583     SAFE_RELEASE(_ptrEnumerator);
584
585     _ptrAudioBuffer = NULL;
586
587     if (NULL != _hRenderSamplesReadyEvent)
588     {
589         CloseHandle(_hRenderSamplesReadyEvent);
590         _hRenderSamplesReadyEvent = NULL;
591     }
592
593     if (NULL != _hCaptureSamplesReadyEvent)
594     {
595         CloseHandle(_hCaptureSamplesReadyEvent);
596         _hCaptureSamplesReadyEvent = NULL;
597     }
598
599     if (NULL != _hRenderStartedEvent)
600     {
601         CloseHandle(_hRenderStartedEvent);
602         _hRenderStartedEvent = NULL;
603     }
604
605     if (NULL != _hCaptureStartedEvent)
606     {
607         CloseHandle(_hCaptureStartedEvent);
608         _hCaptureStartedEvent = NULL;
609     }
610
611     if (NULL != _hShutdownRenderEvent)
612     {
613         CloseHandle(_hShutdownRenderEvent);
614         _hShutdownRenderEvent = NULL;
615     }
616
617     if (NULL != _hShutdownCaptureEvent)
618     {
619         CloseHandle(_hShutdownCaptureEvent);
620         _hShutdownCaptureEvent = NULL;
621     }
622
623     if (NULL != _hSetCaptureVolumeEvent)
624     {
625         CloseHandle(_hSetCaptureVolumeEvent);
626         _hSetCaptureVolumeEvent = NULL;
627     }
628
629     if (_avrtLibrary)
630     {
631         BOOL freeOK = FreeLibrary(_avrtLibrary);
632         if (!freeOK)
633         {
634             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
635                 "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() failed to free the loaded Avrt DLL module correctly");
636         }
637         else
638         {
639             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
640                 "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() the Avrt DLL module is now unloaded");
641         }
642     }
643
644     delete &_critSect;
645     delete &_volumeMutex;
646 }
647
648 // ============================================================================
649 //                                     API
650 // ============================================================================
651
652 // ----------------------------------------------------------------------------
653 //  AttachAudioBuffer
654 // ----------------------------------------------------------------------------
655
656 void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
657 {
658
659     _ptrAudioBuffer = audioBuffer;
660
661     // Inform the AudioBuffer about default settings for this implementation.
662     // Set all values to zero here since the actual settings will be done by
663     // InitPlayout and InitRecording later.
664     _ptrAudioBuffer->SetRecordingSampleRate(0);
665     _ptrAudioBuffer->SetPlayoutSampleRate(0);
666     _ptrAudioBuffer->SetRecordingChannels(0);
667     _ptrAudioBuffer->SetPlayoutChannels(0);
668 }
669
670 // ----------------------------------------------------------------------------
671 //  ActiveAudioLayer
672 // ----------------------------------------------------------------------------
673
674 int32_t AudioDeviceWindowsCore::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
675 {
676     audioLayer = AudioDeviceModule::kWindowsCoreAudio;
677     return 0;
678 }
679
680 // ----------------------------------------------------------------------------
681 //  Init
682 // ----------------------------------------------------------------------------
683
684 int32_t AudioDeviceWindowsCore::Init()
685 {
686
687     CriticalSectionScoped lock(&_critSect);
688
689     if (_initialized)
690     {
691         return 0;
692     }
693
694     _playWarning = 0;
695     _playError = 0;
696     _recWarning = 0;
697     _recError = 0;
698
699     // Enumerate all audio rendering and capturing endpoint devices.
700     // Note that, some of these will not be able to select by the user.
701     // The complete collection is for internal use only.
702     //
703     _EnumerateEndpointDevicesAll(eRender);
704     _EnumerateEndpointDevicesAll(eCapture);
705
706     _initialized = true;
707
708     return 0;
709 }
710
711 // ----------------------------------------------------------------------------
712 //  Terminate
713 // ----------------------------------------------------------------------------
714
715 int32_t AudioDeviceWindowsCore::Terminate()
716 {
717
718     CriticalSectionScoped lock(&_critSect);
719
720     if (!_initialized) {
721         return 0;
722     }
723
724     _initialized = false;
725     _speakerIsInitialized = false;
726     _microphoneIsInitialized = false;
727     _playing = false;
728     _recording = false;
729
730     SAFE_RELEASE(_ptrRenderCollection);
731     SAFE_RELEASE(_ptrCaptureCollection);
732     SAFE_RELEASE(_ptrDeviceOut);
733     SAFE_RELEASE(_ptrDeviceIn);
734     SAFE_RELEASE(_ptrClientOut);
735     SAFE_RELEASE(_ptrClientIn);
736     SAFE_RELEASE(_ptrRenderClient);
737     SAFE_RELEASE(_ptrCaptureClient);
738     SAFE_RELEASE(_ptrCaptureVolume);
739     SAFE_RELEASE(_ptrRenderSimpleVolume);
740
741     return 0;
742 }
743
744 // ----------------------------------------------------------------------------
745 //  Initialized
746 // ----------------------------------------------------------------------------
747
748 bool AudioDeviceWindowsCore::Initialized() const
749 {
750     return (_initialized);
751 }
752
753 // ----------------------------------------------------------------------------
754 //  InitSpeaker
755 // ----------------------------------------------------------------------------
756
757 int32_t AudioDeviceWindowsCore::InitSpeaker()
758 {
759
760     CriticalSectionScoped lock(&_critSect);
761
762     if (_playing)
763     {
764         return -1;
765     }
766
767     if (_ptrDeviceOut == NULL)
768     {
769         return -1;
770     }
771
772     if (_usingOutputDeviceIndex)
773     {
774         int16_t nDevices = PlayoutDevices();
775         if (_outputDeviceIndex > (nDevices - 1))
776         {
777             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
778             return -1;
779         }
780     }
781
782     int32_t ret(0);
783
784     SAFE_RELEASE(_ptrDeviceOut);
785     if (_usingOutputDeviceIndex)
786     {
787         // Refresh the selected rendering endpoint device using current index
788         ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
789     }
790     else
791     {
792         ERole role;
793         (_outputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
794         // Refresh the selected rendering endpoint device using role
795         ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
796     }
797
798     if (ret != 0 || (_ptrDeviceOut == NULL))
799     {
800         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the rendering enpoint device");
801         SAFE_RELEASE(_ptrDeviceOut);
802         return -1;
803     }
804
805     IAudioSessionManager* pManager = NULL;
806     ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
807                                   CLSCTX_ALL,
808                                   NULL,
809                                   (void**)&pManager);
810     if (ret != 0 || pManager == NULL)
811     {
812         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
813                     "  failed to initialize the render manager");
814         SAFE_RELEASE(pManager);
815         return -1;
816     }
817
818     SAFE_RELEASE(_ptrRenderSimpleVolume);
819     ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
820     if (ret != 0 || _ptrRenderSimpleVolume == NULL)
821     {
822         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
823                     "  failed to initialize the render simple volume");
824         SAFE_RELEASE(pManager);
825         SAFE_RELEASE(_ptrRenderSimpleVolume);
826         return -1;
827     }
828     SAFE_RELEASE(pManager);
829
830     _speakerIsInitialized = true;
831
832     return 0;
833 }
834
835 // ----------------------------------------------------------------------------
836 //  InitMicrophone
837 // ----------------------------------------------------------------------------
838
839 int32_t AudioDeviceWindowsCore::InitMicrophone()
840 {
841
842     CriticalSectionScoped lock(&_critSect);
843
844     if (_recording)
845     {
846         return -1;
847     }
848
849     if (_ptrDeviceIn == NULL)
850     {
851         return -1;
852     }
853
854     if (_usingInputDeviceIndex)
855     {
856         int16_t nDevices = RecordingDevices();
857         if (_inputDeviceIndex > (nDevices - 1))
858         {
859             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
860             return -1;
861         }
862     }
863
864     int32_t ret(0);
865
866     SAFE_RELEASE(_ptrDeviceIn);
867     if (_usingInputDeviceIndex)
868     {
869         // Refresh the selected capture endpoint device using current index
870         ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
871     }
872     else
873     {
874         ERole role;
875         (_inputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
876         // Refresh the selected capture endpoint device using role
877         ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
878     }
879
880     if (ret != 0 || (_ptrDeviceIn == NULL))
881     {
882         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the capturing enpoint device");
883         SAFE_RELEASE(_ptrDeviceIn);
884         return -1;
885     }
886
887     ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
888                                  CLSCTX_ALL,
889                                  NULL,
890                                  reinterpret_cast<void **>(&_ptrCaptureVolume));
891     if (ret != 0 || _ptrCaptureVolume == NULL)
892     {
893         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
894                     "  failed to initialize the capture volume");
895         SAFE_RELEASE(_ptrCaptureVolume);
896         return -1;
897     }
898
899     _microphoneIsInitialized = true;
900
901     return 0;
902 }
903
904 // ----------------------------------------------------------------------------
905 //  SpeakerIsInitialized
906 // ----------------------------------------------------------------------------
907
908 bool AudioDeviceWindowsCore::SpeakerIsInitialized() const
909 {
910
911     return (_speakerIsInitialized);
912 }
913
914 // ----------------------------------------------------------------------------
915 //  MicrophoneIsInitialized
916 // ----------------------------------------------------------------------------
917
918 bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const
919 {
920
921     return (_microphoneIsInitialized);
922 }
923
924 // ----------------------------------------------------------------------------
925 //  SpeakerVolumeIsAvailable
926 // ----------------------------------------------------------------------------
927
928 int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
929 {
930
931     CriticalSectionScoped lock(&_critSect);
932
933     if (_ptrDeviceOut == NULL)
934     {
935         return -1;
936     }
937
938     HRESULT hr = S_OK;
939     IAudioSessionManager* pManager = NULL;
940     ISimpleAudioVolume* pVolume = NULL;
941
942     hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
943     EXIT_ON_ERROR(hr);
944
945     hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
946     EXIT_ON_ERROR(hr);
947
948     float volume(0.0f);
949     hr = pVolume->GetMasterVolume(&volume);
950     if (FAILED(hr))
951     {
952         available = false;
953     }
954     available = true;
955
956     SAFE_RELEASE(pManager);
957     SAFE_RELEASE(pVolume);
958
959     return 0;
960
961 Exit:
962     _TraceCOMError(hr);
963     SAFE_RELEASE(pManager);
964     SAFE_RELEASE(pVolume);
965     return -1;
966 }
967
968 // ----------------------------------------------------------------------------
969 //  SetSpeakerVolume
970 // ----------------------------------------------------------------------------
971
972 int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
973 {
974
975     {
976         CriticalSectionScoped lock(&_critSect);
977
978         if (!_speakerIsInitialized)
979         {
980         return -1;
981         }
982
983         if (_ptrDeviceOut == NULL)
984         {
985             return -1;
986         }
987     }
988
989     if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
990         volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME)
991     {
992         return -1;
993     }
994
995     HRESULT hr = S_OK;
996
997     // scale input volume to valid range (0.0 to 1.0)
998     const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
999     _volumeMutex.Enter();
1000     hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
1001     _volumeMutex.Leave();
1002     EXIT_ON_ERROR(hr);
1003
1004     return 0;
1005
1006 Exit:
1007     _TraceCOMError(hr);
1008     return -1;
1009 }
1010
1011 // ----------------------------------------------------------------------------
1012 //  SpeakerVolume
1013 // ----------------------------------------------------------------------------
1014
1015 int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
1016 {
1017
1018     {
1019         CriticalSectionScoped lock(&_critSect);
1020
1021         if (!_speakerIsInitialized)
1022         {
1023             return -1;
1024         }
1025
1026         if (_ptrDeviceOut == NULL)
1027         {
1028             return -1;
1029         }
1030     }
1031
1032     HRESULT hr = S_OK;
1033     float fLevel(0.0f);
1034
1035     _volumeMutex.Enter();
1036     hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
1037     _volumeMutex.Leave();
1038     EXIT_ON_ERROR(hr);
1039
1040     // scale input volume range [0.0,1.0] to valid output range
1041     volume = static_cast<uint32_t> (fLevel*MAX_CORE_SPEAKER_VOLUME);
1042
1043     return 0;
1044
1045 Exit:
1046     _TraceCOMError(hr);
1047     return -1;
1048 }
1049
1050 // ----------------------------------------------------------------------------
1051 //  SetWaveOutVolume
1052 // ----------------------------------------------------------------------------
1053
1054 int32_t AudioDeviceWindowsCore::SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight)
1055 {
1056     return -1;
1057 }
1058
1059 // ----------------------------------------------------------------------------
1060 //  WaveOutVolume
1061 // ----------------------------------------------------------------------------
1062
1063 int32_t AudioDeviceWindowsCore::WaveOutVolume(uint16_t& volumeLeft, uint16_t& volumeRight) const
1064 {
1065     return -1;
1066 }
1067
1068 // ----------------------------------------------------------------------------
1069 //  MaxSpeakerVolume
1070 //
1071 //  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1072 //  silence and 1.0 indicates full volume (no attenuation).
1073 //  We add our (webrtc-internal) own max level to match the Wave API and
1074 //  how it is used today in VoE.
1075 // ----------------------------------------------------------------------------
1076
1077 int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const
1078 {
1079
1080     if (!_speakerIsInitialized)
1081     {
1082         return -1;
1083     }
1084
1085     maxVolume = static_cast<uint32_t> (MAX_CORE_SPEAKER_VOLUME);
1086
1087     return 0;
1088 }
1089
1090 // ----------------------------------------------------------------------------
1091 //  MinSpeakerVolume
1092 // ----------------------------------------------------------------------------
1093
1094 int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const
1095 {
1096
1097     if (!_speakerIsInitialized)
1098     {
1099         return -1;
1100     }
1101
1102     minVolume = static_cast<uint32_t> (MIN_CORE_SPEAKER_VOLUME);
1103
1104     return 0;
1105 }
1106
1107 // ----------------------------------------------------------------------------
1108 //  SpeakerVolumeStepSize
1109 // ----------------------------------------------------------------------------
1110
1111 int32_t AudioDeviceWindowsCore::SpeakerVolumeStepSize(uint16_t& stepSize) const
1112 {
1113
1114     if (!_speakerIsInitialized)
1115     {
1116         return -1;
1117     }
1118
1119     stepSize = CORE_SPEAKER_VOLUME_STEP_SIZE;
1120
1121     return 0;
1122 }
1123
1124 // ----------------------------------------------------------------------------
1125 //  SpeakerMuteIsAvailable
1126 // ----------------------------------------------------------------------------
1127
1128 int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
1129 {
1130
1131     CriticalSectionScoped lock(&_critSect);
1132
1133     if (_ptrDeviceOut == NULL)
1134     {
1135         return -1;
1136     }
1137
1138     HRESULT hr = S_OK;
1139     IAudioEndpointVolume* pVolume = NULL;
1140
1141     // Query the speaker system mute state.
1142     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
1143         CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1144     EXIT_ON_ERROR(hr);
1145
1146     BOOL mute;
1147     hr = pVolume->GetMute(&mute);
1148     if (FAILED(hr))
1149         available = false;
1150     else
1151         available = true;
1152
1153     SAFE_RELEASE(pVolume);
1154
1155     return 0;
1156
1157 Exit:
1158     _TraceCOMError(hr);
1159     SAFE_RELEASE(pVolume);
1160     return -1;
1161 }
1162
1163 // ----------------------------------------------------------------------------
1164 //  SetSpeakerMute
1165 // ----------------------------------------------------------------------------
1166
1167 int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
1168 {
1169
1170     CriticalSectionScoped lock(&_critSect);
1171
1172     if (!_speakerIsInitialized)
1173     {
1174         return -1;
1175     }
1176
1177     if (_ptrDeviceOut == NULL)
1178     {
1179         return -1;
1180     }
1181
1182     HRESULT hr = S_OK;
1183     IAudioEndpointVolume* pVolume = NULL;
1184
1185     // Set the speaker system mute state.
1186     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1187     EXIT_ON_ERROR(hr);
1188
1189     const BOOL mute(enable);
1190     hr = pVolume->SetMute(mute, NULL);
1191     EXIT_ON_ERROR(hr);
1192
1193     SAFE_RELEASE(pVolume);
1194
1195     return 0;
1196
1197 Exit:
1198     _TraceCOMError(hr);
1199     SAFE_RELEASE(pVolume);
1200     return -1;
1201 }
1202
1203 // ----------------------------------------------------------------------------
1204 //  SpeakerMute
1205 // ----------------------------------------------------------------------------
1206
1207 int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
1208 {
1209
1210     if (!_speakerIsInitialized)
1211     {
1212         return -1;
1213     }
1214
1215     if (_ptrDeviceOut == NULL)
1216     {
1217         return -1;
1218     }
1219
1220     HRESULT hr = S_OK;
1221     IAudioEndpointVolume* pVolume = NULL;
1222
1223     // Query the speaker system mute state.
1224     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1225     EXIT_ON_ERROR(hr);
1226
1227     BOOL mute;
1228     hr = pVolume->GetMute(&mute);
1229     EXIT_ON_ERROR(hr);
1230
1231     enabled = (mute == TRUE) ? true : false;
1232
1233     SAFE_RELEASE(pVolume);
1234
1235     return 0;
1236
1237 Exit:
1238     _TraceCOMError(hr);
1239     SAFE_RELEASE(pVolume);
1240     return -1;
1241 }
1242
1243 // ----------------------------------------------------------------------------
1244 //  MicrophoneMuteIsAvailable
1245 // ----------------------------------------------------------------------------
1246
1247 int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
1248 {
1249
1250     CriticalSectionScoped lock(&_critSect);
1251
1252     if (_ptrDeviceIn == NULL)
1253     {
1254         return -1;
1255     }
1256
1257     HRESULT hr = S_OK;
1258     IAudioEndpointVolume* pVolume = NULL;
1259
1260     // Query the microphone system mute state.
1261     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1262     EXIT_ON_ERROR(hr);
1263
1264     BOOL mute;
1265     hr = pVolume->GetMute(&mute);
1266     if (FAILED(hr))
1267         available = false;
1268     else
1269         available = true;
1270
1271     SAFE_RELEASE(pVolume);
1272     return 0;
1273
1274 Exit:
1275     _TraceCOMError(hr);
1276     SAFE_RELEASE(pVolume);
1277     return -1;
1278 }
1279
1280 // ----------------------------------------------------------------------------
1281 //  SetMicrophoneMute
1282 // ----------------------------------------------------------------------------
1283
1284 int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
1285 {
1286
1287     if (!_microphoneIsInitialized)
1288     {
1289         return -1;
1290     }
1291
1292     if (_ptrDeviceIn == NULL)
1293     {
1294         return -1;
1295     }
1296
1297     HRESULT hr = S_OK;
1298     IAudioEndpointVolume* pVolume = NULL;
1299
1300     // Set the microphone system mute state.
1301     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1302     EXIT_ON_ERROR(hr);
1303
1304     const BOOL mute(enable);
1305     hr = pVolume->SetMute(mute, NULL);
1306     EXIT_ON_ERROR(hr);
1307
1308     SAFE_RELEASE(pVolume);
1309     return 0;
1310
1311 Exit:
1312     _TraceCOMError(hr);
1313     SAFE_RELEASE(pVolume);
1314     return -1;
1315 }
1316
1317 // ----------------------------------------------------------------------------
1318 //  MicrophoneMute
1319 // ----------------------------------------------------------------------------
1320
1321 int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
1322 {
1323
1324     if (!_microphoneIsInitialized)
1325     {
1326         return -1;
1327     }
1328
1329     HRESULT hr = S_OK;
1330     IAudioEndpointVolume* pVolume = NULL;
1331
1332     // Query the microphone system mute state.
1333     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1334     EXIT_ON_ERROR(hr);
1335
1336     BOOL mute;
1337     hr = pVolume->GetMute(&mute);
1338     EXIT_ON_ERROR(hr);
1339
1340     enabled = (mute == TRUE) ? true : false;
1341
1342     SAFE_RELEASE(pVolume);
1343     return 0;
1344
1345 Exit:
1346     _TraceCOMError(hr);
1347     SAFE_RELEASE(pVolume);
1348     return -1;
1349 }
1350
1351 // ----------------------------------------------------------------------------
1352 //  MicrophoneBoostIsAvailable
1353 // ----------------------------------------------------------------------------
1354
1355 int32_t AudioDeviceWindowsCore::MicrophoneBoostIsAvailable(bool& available)
1356 {
1357
1358     available = false;
1359     return 0;
1360 }
1361
1362 // ----------------------------------------------------------------------------
1363 //  SetMicrophoneBoost
1364 // ----------------------------------------------------------------------------
1365
1366 int32_t AudioDeviceWindowsCore::SetMicrophoneBoost(bool enable)
1367 {
1368
1369     if (!_microphoneIsInitialized)
1370     {
1371         return -1;
1372     }
1373
1374     return -1;
1375 }
1376
1377 // ----------------------------------------------------------------------------
1378 //  MicrophoneBoost
1379 // ----------------------------------------------------------------------------
1380
1381 int32_t AudioDeviceWindowsCore::MicrophoneBoost(bool& enabled) const
1382 {
1383
1384     if (!_microphoneIsInitialized)
1385     {
1386         return -1;
1387     }
1388
1389     return -1;
1390 }
1391
1392 // ----------------------------------------------------------------------------
1393 //  StereoRecordingIsAvailable
1394 // ----------------------------------------------------------------------------
1395
1396 int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available)
1397 {
1398
1399     available = true;
1400     return 0;
1401 }
1402
1403 // ----------------------------------------------------------------------------
1404 //  SetStereoRecording
1405 // ----------------------------------------------------------------------------
1406
1407 int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable)
1408 {
1409
1410     CriticalSectionScoped lock(&_critSect);
1411
1412     if (enable)
1413     {
1414         _recChannelsPrioList[0] = 2;    // try stereo first
1415         _recChannelsPrioList[1] = 1;
1416         _recChannels = 2;
1417     }
1418     else
1419     {
1420         _recChannelsPrioList[0] = 1;    // try mono first
1421         _recChannelsPrioList[1] = 2;
1422         _recChannels = 1;
1423     }
1424
1425     return 0;
1426 }
1427
1428 // ----------------------------------------------------------------------------
1429 //  StereoRecording
1430 // ----------------------------------------------------------------------------
1431
1432 int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const
1433 {
1434
1435     if (_recChannels == 2)
1436         enabled = true;
1437     else
1438         enabled = false;
1439
1440     return 0;
1441 }
1442
1443 // ----------------------------------------------------------------------------
1444 //  StereoPlayoutIsAvailable
1445 // ----------------------------------------------------------------------------
1446
1447 int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available)
1448 {
1449
1450     available = true;
1451     return 0;
1452 }
1453
1454 // ----------------------------------------------------------------------------
1455 //  SetStereoPlayout
1456 // ----------------------------------------------------------------------------
1457
1458 int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable)
1459 {
1460
1461     CriticalSectionScoped lock(&_critSect);
1462
1463     if (enable)
1464     {
1465         _playChannelsPrioList[0] = 2;    // try stereo first
1466         _playChannelsPrioList[1] = 1;
1467         _playChannels = 2;
1468     }
1469     else
1470     {
1471         _playChannelsPrioList[0] = 1;    // try mono first
1472         _playChannelsPrioList[1] = 2;
1473         _playChannels = 1;
1474     }
1475
1476     return 0;
1477 }
1478
1479 // ----------------------------------------------------------------------------
1480 //  StereoPlayout
1481 // ----------------------------------------------------------------------------
1482
1483 int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const
1484 {
1485
1486     if (_playChannels == 2)
1487         enabled = true;
1488     else
1489         enabled = false;
1490
1491     return 0;
1492 }
1493
1494 // ----------------------------------------------------------------------------
1495 //  SetAGC
1496 // ----------------------------------------------------------------------------
1497
1498 int32_t AudioDeviceWindowsCore::SetAGC(bool enable)
1499 {
1500     CriticalSectionScoped lock(&_critSect);
1501     _AGC = enable;
1502     return 0;
1503 }
1504
1505 // ----------------------------------------------------------------------------
1506 //  AGC
1507 // ----------------------------------------------------------------------------
1508
1509 bool AudioDeviceWindowsCore::AGC() const
1510 {
1511     CriticalSectionScoped lock(&_critSect);
1512     return _AGC;
1513 }
1514
1515 // ----------------------------------------------------------------------------
1516 //  MicrophoneVolumeIsAvailable
1517 // ----------------------------------------------------------------------------
1518
1519 int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
1520 {
1521
1522     CriticalSectionScoped lock(&_critSect);
1523
1524     if (_ptrDeviceIn == NULL)
1525     {
1526         return -1;
1527     }
1528
1529     HRESULT hr = S_OK;
1530     IAudioEndpointVolume* pVolume = NULL;
1531
1532     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
1533     EXIT_ON_ERROR(hr);
1534
1535     float volume(0.0f);
1536     hr = pVolume->GetMasterVolumeLevelScalar(&volume);
1537     if (FAILED(hr))
1538     {
1539         available = false;
1540     }
1541     available = true;
1542
1543     SAFE_RELEASE(pVolume);
1544     return 0;
1545
1546 Exit:
1547     _TraceCOMError(hr);
1548     SAFE_RELEASE(pVolume);
1549     return -1;
1550 }
1551
1552 // ----------------------------------------------------------------------------
1553 //  SetMicrophoneVolume
1554 // ----------------------------------------------------------------------------
1555
1556 int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
1557 {
1558     WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::SetMicrophoneVolume(volume=%u)", volume);
1559
1560     {
1561         CriticalSectionScoped lock(&_critSect);
1562
1563         if (!_microphoneIsInitialized)
1564         {
1565             return -1;
1566         }
1567
1568         if (_ptrDeviceIn == NULL)
1569         {
1570             return -1;
1571         }
1572     }
1573
1574     if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
1575         volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME))
1576     {
1577         return -1;
1578     }
1579
1580     HRESULT hr = S_OK;
1581     // scale input volume to valid range (0.0 to 1.0)
1582     const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
1583     _volumeMutex.Enter();
1584     _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
1585     _volumeMutex.Leave();
1586     EXIT_ON_ERROR(hr);
1587
1588     return 0;
1589
1590 Exit:
1591     _TraceCOMError(hr);
1592     return -1;
1593 }
1594
1595 // ----------------------------------------------------------------------------
1596 //  MicrophoneVolume
1597 // ----------------------------------------------------------------------------
1598
1599 int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
1600 {
1601     {
1602         CriticalSectionScoped lock(&_critSect);
1603
1604         if (!_microphoneIsInitialized)
1605         {
1606             return -1;
1607         }
1608
1609         if (_ptrDeviceIn == NULL)
1610         {
1611             return -1;
1612         }
1613     }
1614
1615     HRESULT hr = S_OK;
1616     float fLevel(0.0f);
1617     volume = 0;
1618     _volumeMutex.Enter();
1619     hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
1620     _volumeMutex.Leave();
1621     EXIT_ON_ERROR(hr);
1622
1623     // scale input volume range [0.0,1.0] to valid output range
1624     volume = static_cast<uint32_t> (fLevel*MAX_CORE_MICROPHONE_VOLUME);
1625
1626     return 0;
1627
1628 Exit:
1629     _TraceCOMError(hr);
1630     return -1;
1631 }
1632
1633 // ----------------------------------------------------------------------------
1634 //  MaxMicrophoneVolume
1635 //
1636 //  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1637 //  silence and 1.0 indicates full volume (no attenuation).
1638 //  We add our (webrtc-internal) own max level to match the Wave API and
1639 //  how it is used today in VoE.
1640 // ----------------------------------------------------------------------------
1641
1642 int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const
1643 {
1644     WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1645
1646     if (!_microphoneIsInitialized)
1647     {
1648         return -1;
1649     }
1650
1651     maxVolume = static_cast<uint32_t> (MAX_CORE_MICROPHONE_VOLUME);
1652
1653     return 0;
1654 }
1655
1656 // ----------------------------------------------------------------------------
1657 //  MinMicrophoneVolume
1658 // ----------------------------------------------------------------------------
1659
1660 int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const
1661 {
1662
1663     if (!_microphoneIsInitialized)
1664     {
1665         return -1;
1666     }
1667
1668     minVolume = static_cast<uint32_t> (MIN_CORE_MICROPHONE_VOLUME);
1669
1670     return 0;
1671 }
1672
1673 // ----------------------------------------------------------------------------
1674 //  MicrophoneVolumeStepSize
1675 // ----------------------------------------------------------------------------
1676
1677 int32_t AudioDeviceWindowsCore::MicrophoneVolumeStepSize(uint16_t& stepSize) const
1678 {
1679
1680     if (!_microphoneIsInitialized)
1681     {
1682         return -1;
1683     }
1684
1685     stepSize = CORE_MICROPHONE_VOLUME_STEP_SIZE;
1686
1687     return 0;
1688 }
1689
1690 // ----------------------------------------------------------------------------
1691 //  PlayoutDevices
1692 // ----------------------------------------------------------------------------
1693
1694 int16_t AudioDeviceWindowsCore::PlayoutDevices()
1695 {
1696
1697     CriticalSectionScoped lock(&_critSect);
1698
1699     if (_RefreshDeviceList(eRender) != -1)
1700     {
1701         return (_DeviceListCount(eRender));
1702     }
1703
1704     return -1;
1705 }
1706
1707 // ----------------------------------------------------------------------------
1708 //  SetPlayoutDevice I (II)
1709 // ----------------------------------------------------------------------------
1710
1711 int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
1712 {
1713
1714     if (_playIsInitialized)
1715     {
1716         return -1;
1717     }
1718
1719     // Get current number of available rendering endpoint devices and refresh the rendering collection.
1720     UINT nDevices = PlayoutDevices();
1721
1722     if (index < 0 || index > (nDevices-1))
1723     {
1724         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
1725         return -1;
1726     }
1727
1728     CriticalSectionScoped lock(&_critSect);
1729
1730     HRESULT hr(S_OK);
1731
1732     assert(_ptrRenderCollection != NULL);
1733
1734     //  Select an endpoint rendering device given the specified index
1735     SAFE_RELEASE(_ptrDeviceOut);
1736     hr = _ptrRenderCollection->Item(
1737                                  index,
1738                                  &_ptrDeviceOut);
1739     if (FAILED(hr))
1740     {
1741         _TraceCOMError(hr);
1742         SAFE_RELEASE(_ptrDeviceOut);
1743         return -1;
1744     }
1745
1746     WCHAR szDeviceName[MAX_PATH];
1747     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1748
1749     // Get the endpoint device's friendly-name
1750     if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1751     {
1752         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1753     }
1754
1755     _usingOutputDeviceIndex = true;
1756     _outputDeviceIndex = index;
1757
1758     return 0;
1759 }
1760
1761 // ----------------------------------------------------------------------------
1762 //  SetPlayoutDevice II (II)
1763 // ----------------------------------------------------------------------------
1764
1765 int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
1766 {
1767     if (_playIsInitialized)
1768     {
1769         return -1;
1770     }
1771
1772     ERole role(eCommunications);
1773
1774     if (device == AudioDeviceModule::kDefaultDevice)
1775     {
1776         role = eConsole;
1777     }
1778     else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
1779     {
1780         role = eCommunications;
1781     }
1782
1783     CriticalSectionScoped lock(&_critSect);
1784
1785     // Refresh the list of rendering endpoint devices
1786     _RefreshDeviceList(eRender);
1787
1788     HRESULT hr(S_OK);
1789
1790     assert(_ptrEnumerator != NULL);
1791
1792     //  Select an endpoint rendering device given the specified role
1793     SAFE_RELEASE(_ptrDeviceOut);
1794     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
1795                            eRender,
1796                            role,
1797                            &_ptrDeviceOut);
1798     if (FAILED(hr))
1799     {
1800         _TraceCOMError(hr);
1801         SAFE_RELEASE(_ptrDeviceOut);
1802         return -1;
1803     }
1804
1805     WCHAR szDeviceName[MAX_PATH];
1806     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1807
1808     // Get the endpoint device's friendly-name
1809     if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1810     {
1811         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1812     }
1813
1814     _usingOutputDeviceIndex = false;
1815     _outputDevice = device;
1816
1817     return 0;
1818 }
1819
1820 // ----------------------------------------------------------------------------
1821 //  PlayoutDeviceName
1822 // ----------------------------------------------------------------------------
1823
1824 int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
1825     uint16_t index,
1826     char name[kAdmMaxDeviceNameSize],
1827     char guid[kAdmMaxGuidSize])
1828 {
1829
1830     bool defaultCommunicationDevice(false);
1831     const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
1832
1833     // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1834     if (index == (uint16_t)(-1))
1835     {
1836         defaultCommunicationDevice = true;
1837         index = 0;
1838         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1839     }
1840
1841     if ((index > (nDevices-1)) || (name == NULL))
1842     {
1843         return -1;
1844     }
1845
1846     memset(name, 0, kAdmMaxDeviceNameSize);
1847
1848     if (guid != NULL)
1849     {
1850         memset(guid, 0, kAdmMaxGuidSize);
1851     }
1852
1853     CriticalSectionScoped lock(&_critSect);
1854
1855     int32_t ret(-1);
1856     WCHAR szDeviceName[MAX_PATH];
1857     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1858
1859     // Get the endpoint device's friendly-name
1860     if (defaultCommunicationDevice)
1861     {
1862         ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, bufferLen);
1863     }
1864     else
1865     {
1866         ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
1867     }
1868
1869     if (ret == 0)
1870     {
1871         // Convert the endpoint device's friendly-name to UTF-8
1872         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1873         {
1874             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1875         }
1876     }
1877
1878     // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1879     if (defaultCommunicationDevice)
1880     {
1881         ret = _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
1882     }
1883     else
1884     {
1885         ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
1886     }
1887
1888     if (guid != NULL && ret == 0)
1889     {
1890         // Convert the endpoint device's ID string to UTF-8
1891         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1892         {
1893             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1894         }
1895     }
1896
1897     return ret;
1898 }
1899
1900 // ----------------------------------------------------------------------------
1901 //  RecordingDeviceName
1902 // ----------------------------------------------------------------------------
1903
1904 int32_t AudioDeviceWindowsCore::RecordingDeviceName(
1905     uint16_t index,
1906     char name[kAdmMaxDeviceNameSize],
1907     char guid[kAdmMaxGuidSize])
1908 {
1909
1910     bool defaultCommunicationDevice(false);
1911     const int16_t nDevices(RecordingDevices());  // also updates the list of devices
1912
1913     // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1914     if (index == (uint16_t)(-1))
1915     {
1916         defaultCommunicationDevice = true;
1917         index = 0;
1918         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1919     }
1920
1921     if ((index > (nDevices-1)) || (name == NULL))
1922     {
1923         return -1;
1924     }
1925
1926     memset(name, 0, kAdmMaxDeviceNameSize);
1927
1928     if (guid != NULL)
1929     {
1930         memset(guid, 0, kAdmMaxGuidSize);
1931     }
1932
1933     CriticalSectionScoped lock(&_critSect);
1934
1935     int32_t ret(-1);
1936     WCHAR szDeviceName[MAX_PATH];
1937     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1938
1939     // Get the endpoint device's friendly-name
1940     if (defaultCommunicationDevice)
1941     {
1942         ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, bufferLen);
1943     }
1944     else
1945     {
1946         ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
1947     }
1948
1949     if (ret == 0)
1950     {
1951         // Convert the endpoint device's friendly-name to UTF-8
1952         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1953         {
1954             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1955         }
1956     }
1957
1958     // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1959     if (defaultCommunicationDevice)
1960     {
1961         ret = _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
1962     }
1963     else
1964     {
1965         ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
1966     }
1967
1968     if (guid != NULL && ret == 0)
1969     {
1970         // Convert the endpoint device's ID string to UTF-8
1971         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1972         {
1973             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1974         }
1975     }
1976
1977     return ret;
1978 }
1979
1980 // ----------------------------------------------------------------------------
1981 //  RecordingDevices
1982 // ----------------------------------------------------------------------------
1983
1984 int16_t AudioDeviceWindowsCore::RecordingDevices()
1985 {
1986
1987     CriticalSectionScoped lock(&_critSect);
1988
1989     if (_RefreshDeviceList(eCapture) != -1)
1990     {
1991         return (_DeviceListCount(eCapture));
1992     }
1993
1994     return -1;
1995 }
1996
1997 // ----------------------------------------------------------------------------
1998 //  SetRecordingDevice I (II)
1999 // ----------------------------------------------------------------------------
2000
2001 int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
2002 {
2003
2004     if (_recIsInitialized)
2005     {
2006         return -1;
2007     }
2008
2009     // Get current number of available capture endpoint devices and refresh the capture collection.
2010     UINT nDevices = RecordingDevices();
2011
2012     if (index < 0 || index > (nDevices-1))
2013     {
2014         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
2015         return -1;
2016     }
2017
2018     CriticalSectionScoped lock(&_critSect);
2019
2020     HRESULT hr(S_OK);
2021
2022     assert(_ptrCaptureCollection != NULL);
2023
2024     // Select an endpoint capture device given the specified index
2025     SAFE_RELEASE(_ptrDeviceIn);
2026     hr = _ptrCaptureCollection->Item(
2027                                  index,
2028                                  &_ptrDeviceIn);
2029     if (FAILED(hr))
2030     {
2031         _TraceCOMError(hr);
2032         SAFE_RELEASE(_ptrDeviceIn);
2033         return -1;
2034     }
2035
2036     WCHAR szDeviceName[MAX_PATH];
2037     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2038
2039     // Get the endpoint device's friendly-name
2040     if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2041     {
2042         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2043     }
2044
2045     _usingInputDeviceIndex = true;
2046     _inputDeviceIndex = index;
2047
2048     return 0;
2049 }
2050
2051 // ----------------------------------------------------------------------------
2052 //  SetRecordingDevice II (II)
2053 // ----------------------------------------------------------------------------
2054
2055 int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
2056 {
2057     if (_recIsInitialized)
2058     {
2059         return -1;
2060     }
2061
2062     ERole role(eCommunications);
2063
2064     if (device == AudioDeviceModule::kDefaultDevice)
2065     {
2066         role = eConsole;
2067     }
2068     else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
2069     {
2070         role = eCommunications;
2071     }
2072
2073     CriticalSectionScoped lock(&_critSect);
2074
2075     // Refresh the list of capture endpoint devices
2076     _RefreshDeviceList(eCapture);
2077
2078     HRESULT hr(S_OK);
2079
2080     assert(_ptrEnumerator != NULL);
2081
2082     //  Select an endpoint capture device given the specified role
2083     SAFE_RELEASE(_ptrDeviceIn);
2084     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
2085                            eCapture,
2086                            role,
2087                            &_ptrDeviceIn);
2088     if (FAILED(hr))
2089     {
2090         _TraceCOMError(hr);
2091         SAFE_RELEASE(_ptrDeviceIn);
2092         return -1;
2093     }
2094
2095     WCHAR szDeviceName[MAX_PATH];
2096     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2097
2098     // Get the endpoint device's friendly-name
2099     if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2100     {
2101         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2102     }
2103
2104     _usingInputDeviceIndex = false;
2105     _inputDevice = device;
2106
2107     return 0;
2108 }
2109
2110 // ----------------------------------------------------------------------------
2111 //  PlayoutIsAvailable
2112 // ----------------------------------------------------------------------------
2113
2114 int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available)
2115 {
2116
2117     available = false;
2118
2119     // Try to initialize the playout side
2120     int32_t res = InitPlayout();
2121
2122     // Cancel effect of initialization
2123     StopPlayout();
2124
2125     if (res != -1)
2126     {
2127         available = true;
2128     }
2129
2130     return 0;
2131 }
2132
2133 // ----------------------------------------------------------------------------
2134 //  RecordingIsAvailable
2135 // ----------------------------------------------------------------------------
2136
2137 int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available)
2138 {
2139
2140     available = false;
2141
2142     // Try to initialize the recording side
2143     int32_t res = InitRecording();
2144
2145     // Cancel effect of initialization
2146     StopRecording();
2147
2148     if (res != -1)
2149     {
2150         available = true;
2151     }
2152
2153     return 0;
2154 }
2155
2156 // ----------------------------------------------------------------------------
2157 //  InitPlayout
2158 // ----------------------------------------------------------------------------
2159
2160 int32_t AudioDeviceWindowsCore::InitPlayout()
2161 {
2162
2163     CriticalSectionScoped lock(&_critSect);
2164
2165     if (_playing)
2166     {
2167         return -1;
2168     }
2169
2170     if (_playIsInitialized)
2171     {
2172         return 0;
2173     }
2174
2175     if (_ptrDeviceOut == NULL)
2176     {
2177         return -1;
2178     }
2179
2180     // Initialize the speaker (devices might have been added or removed)
2181     if (InitSpeaker() == -1)
2182     {
2183         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitSpeaker() failed");
2184     }
2185
2186     // Ensure that the updated rendering endpoint device is valid
2187     if (_ptrDeviceOut == NULL)
2188     {
2189         return -1;
2190     }
2191
2192     if (_builtInAecEnabled && _recIsInitialized)
2193     {
2194         // Ensure the correct render device is configured in case
2195         // InitRecording() was called before InitPlayout().
2196         if (SetDMOProperties() == -1)
2197         {
2198             return -1;
2199         }
2200     }
2201
2202     HRESULT hr = S_OK;
2203     WAVEFORMATEX* pWfxOut = NULL;
2204     WAVEFORMATEX Wfx;
2205     WAVEFORMATEX* pWfxClosestMatch = NULL;
2206
2207     // Create COM object with IAudioClient interface.
2208     SAFE_RELEASE(_ptrClientOut);
2209     hr = _ptrDeviceOut->Activate(
2210                           __uuidof(IAudioClient),
2211                           CLSCTX_ALL,
2212                           NULL,
2213                           (void**)&_ptrClientOut);
2214     EXIT_ON_ERROR(hr);
2215
2216     // Retrieve the stream format that the audio engine uses for its internal
2217     // processing (mixing) of shared-mode streams.
2218     hr = _ptrClientOut->GetMixFormat(&pWfxOut);
2219     if (SUCCEEDED(hr))
2220     {
2221         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current rendering mix format:");
2222         // format type
2223         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxOut->wFormatTag, pWfxOut->wFormatTag);
2224         // number of channels (i.e. mono, stereo...)
2225         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxOut->nChannels);
2226         // sample rate
2227         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxOut->nSamplesPerSec);
2228         // for buffer estimation
2229         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxOut->nAvgBytesPerSec);
2230         // block size of data
2231         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxOut->nBlockAlign);
2232         // number of bits per sample of mono data
2233         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxOut->wBitsPerSample);
2234         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxOut->cbSize);
2235     }
2236
2237     // Set wave format
2238     Wfx.wFormatTag = WAVE_FORMAT_PCM;
2239     Wfx.wBitsPerSample = 16;
2240     Wfx.cbSize = 0;
2241
2242     const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
2243     hr = S_FALSE;
2244
2245     // Iterate over frequencies and channels, in order of priority
2246     for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2247     {
2248         for (int chan = 0; chan < sizeof(_playChannelsPrioList)/sizeof(_playChannelsPrioList[0]); chan++)
2249         {
2250             Wfx.nChannels = _playChannelsPrioList[chan];
2251             Wfx.nSamplesPerSec = freqs[freq];
2252             Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2253             Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2254             // If the method succeeds and the audio endpoint device supports the specified stream format,
2255             // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2256             // it returns S_FALSE.
2257             hr = _ptrClientOut->IsFormatSupported(
2258                                   AUDCLNT_SHAREMODE_SHARED,
2259                                   &Wfx,
2260                                   &pWfxClosestMatch);
2261             if (hr == S_OK)
2262             {
2263                 break;
2264             }
2265             else
2266             {
2267                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2268                     Wfx.nChannels, Wfx.nSamplesPerSec);
2269             }
2270         }
2271         if (hr == S_OK)
2272             break;
2273     }
2274
2275     // TODO(andrew): what happens in the event of failure in the above loop?
2276     //   Is _ptrClientOut->Initialize expected to fail?
2277     //   Same in InitRecording().
2278     if (hr == S_OK)
2279     {
2280         _playAudioFrameSize = Wfx.nBlockAlign;
2281         _playBlockSize = Wfx.nSamplesPerSec/100;
2282         _playSampleRate = Wfx.nSamplesPerSec;
2283         _devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
2284         _devicePlayBlockSize = Wfx.nSamplesPerSec/100;
2285         _playChannels = Wfx.nChannels;
2286
2287         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this rendering format:");
2288         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag         : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2289         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels          : %d", Wfx.nChannels);
2290         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec     : %d", Wfx.nSamplesPerSec);
2291         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec    : %d", Wfx.nAvgBytesPerSec);
2292         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign        : %d", Wfx.nBlockAlign);
2293         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample     : %d", Wfx.wBitsPerSample);
2294         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize             : %d", Wfx.cbSize);
2295         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2296         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playAudioFrameSize: %d", _playAudioFrameSize);
2297         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playBlockSize     : %d", _playBlockSize);
2298         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playChannels      : %d", _playChannels);
2299     }
2300
2301     // Create a rendering stream.
2302     //
2303     // ****************************************************************************
2304     // For a shared-mode stream that uses event-driven buffering, the caller must
2305     // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
2306     // determines how large a buffer to allocate based on the scheduling period
2307     // of the audio engine. Although the client's buffer processing thread is
2308     // event driven, the basic buffer management process, as described previously,
2309     // is unaltered.
2310     // Each time the thread awakens, it should call IAudioClient::GetCurrentPadding
2311     // to determine how much data to write to a rendering buffer or read from a capture
2312     // buffer. In contrast to the two buffers that the Initialize method allocates
2313     // for an exclusive-mode stream that uses event-driven buffering, a shared-mode
2314     // stream requires a single buffer.
2315     // ****************************************************************************
2316     //
2317     REFERENCE_TIME hnsBufferDuration = 0;  // ask for minimum buffer size (default)
2318     if (_devicePlaySampleRate == 44100)
2319     {
2320         // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
2321         // There seems to be a larger risk of underruns for 44.1 compared
2322         // with the default rate (48kHz). When using default, we set the requested
2323         // buffer duration to 0, which sets the buffer to the minimum size
2324         // required by the engine thread. The actual buffer size can then be
2325         // read by GetBufferSize() and it is 20ms on most machines.
2326         hnsBufferDuration = 30*10000;
2327     }
2328     hr = _ptrClientOut->Initialize(
2329                           AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2330                           AUDCLNT_STREAMFLAGS_EVENTCALLBACK,    // processing of the audio buffer by the client will be event driven
2331                           hnsBufferDuration,                    // requested buffer capacity as a time value (in 100-nanosecond units)
2332                           0,                                    // periodicity
2333                           &Wfx,                                 // selected wave format
2334                           NULL);                                // session GUID
2335
2336     if (FAILED(hr))
2337     {
2338         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2339         if (pWfxClosestMatch != NULL)
2340         {
2341             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2342                 pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2343         }
2344         else
2345         {
2346             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2347         }
2348     }
2349     EXIT_ON_ERROR(hr);
2350
2351     if (_ptrAudioBuffer)
2352     {
2353         // Update the audio buffer with the selected parameters
2354         _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
2355         _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
2356     }
2357     else
2358     {
2359         // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2360         // has been created, hence the AudioDeviceBuffer does not exist.
2361         // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2362         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2363     }
2364
2365     // Get the actual size of the shared (endpoint buffer).
2366     // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2367     UINT bufferFrameCount(0);
2368     hr = _ptrClientOut->GetBufferSize(
2369                           &bufferFrameCount);
2370     if (SUCCEEDED(hr))
2371     {
2372         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2373             bufferFrameCount, bufferFrameCount*_playAudioFrameSize);
2374     }
2375
2376     // Set the event handle that the system signals when an audio buffer is ready
2377     // to be processed by the client.
2378     hr = _ptrClientOut->SetEventHandle(
2379                           _hRenderSamplesReadyEvent);
2380     EXIT_ON_ERROR(hr);
2381
2382     // Get an IAudioRenderClient interface.
2383     SAFE_RELEASE(_ptrRenderClient);
2384     hr = _ptrClientOut->GetService(
2385                           __uuidof(IAudioRenderClient),
2386                           (void**)&_ptrRenderClient);
2387     EXIT_ON_ERROR(hr);
2388
2389     // Mark playout side as initialized
2390     _playIsInitialized = true;
2391
2392     CoTaskMemFree(pWfxOut);
2393     CoTaskMemFree(pWfxClosestMatch);
2394
2395     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render side is now initialized");
2396     return 0;
2397
2398 Exit:
2399     _TraceCOMError(hr);
2400     CoTaskMemFree(pWfxOut);
2401     CoTaskMemFree(pWfxClosestMatch);
2402     SAFE_RELEASE(_ptrClientOut);
2403     SAFE_RELEASE(_ptrRenderClient);
2404     return -1;
2405 }
2406
2407 // Capture initialization when the built-in AEC DirectX Media Object (DMO) is
2408 // used. Called from InitRecording(), most of which is skipped over. The DMO
2409 // handles device initialization itself.
2410 // Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
2411 int32_t AudioDeviceWindowsCore::InitRecordingDMO()
2412 {
2413     assert(_builtInAecEnabled);
2414     assert(_dmo != NULL);
2415
2416     if (SetDMOProperties() == -1)
2417     {
2418         return -1;
2419     }
2420
2421     DMO_MEDIA_TYPE mt = {0};
2422     HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
2423     if (FAILED(hr))
2424     {
2425         MoFreeMediaType(&mt);
2426         _TraceCOMError(hr);
2427         return -1;
2428     }
2429     mt.majortype = MEDIATYPE_Audio;
2430     mt.subtype = MEDIASUBTYPE_PCM;
2431     mt.formattype = FORMAT_WaveFormatEx;
2432
2433     // Supported formats
2434     // nChannels: 1 (in AEC-only mode)
2435     // nSamplesPerSec: 8000, 11025, 16000, 22050
2436     // wBitsPerSample: 16
2437     WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
2438     ptrWav->wFormatTag = WAVE_FORMAT_PCM;
2439     ptrWav->nChannels = 1;
2440     // 16000 is the highest we can support with our resampler.
2441     ptrWav->nSamplesPerSec = 16000;
2442     ptrWav->nAvgBytesPerSec = 32000;
2443     ptrWav->nBlockAlign = 2;
2444     ptrWav->wBitsPerSample = 16;
2445     ptrWav->cbSize = 0;
2446
2447     // Set the VoE format equal to the AEC output format.
2448     _recAudioFrameSize = ptrWav->nBlockAlign;
2449     _recSampleRate = ptrWav->nSamplesPerSec;
2450     _recBlockSize = ptrWav->nSamplesPerSec / 100;
2451     _recChannels = ptrWav->nChannels;
2452
2453     // Set the DMO output format parameters.
2454     hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
2455     MoFreeMediaType(&mt);
2456     if (FAILED(hr))
2457     {
2458         _TraceCOMError(hr);
2459         return -1;
2460     }
2461
2462     if (_ptrAudioBuffer)
2463     {
2464         _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2465         _ptrAudioBuffer->SetRecordingChannels(_recChannels);
2466     }
2467     else
2468     {
2469         // Refer to InitRecording() for comments.
2470         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2471             "AudioDeviceBuffer must be attached before streaming can start");
2472     }
2473
2474     _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
2475
2476     // Optional, but if called, must be after media types are set.
2477     hr = _dmo->AllocateStreamingResources();
2478     if (FAILED(hr))
2479     {
2480          _TraceCOMError(hr);
2481         return -1;
2482     }
2483
2484     _recIsInitialized = true;
2485     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2486         "Capture side is now initialized");
2487
2488     return 0;
2489 }
2490
2491 // ----------------------------------------------------------------------------
2492 //  InitRecording
2493 // ----------------------------------------------------------------------------
2494
2495 int32_t AudioDeviceWindowsCore::InitRecording()
2496 {
2497
2498     CriticalSectionScoped lock(&_critSect);
2499
2500     if (_recording)
2501     {
2502         return -1;
2503     }
2504
2505     if (_recIsInitialized)
2506     {
2507         return 0;
2508     }
2509
2510     if (QueryPerformanceFrequency(&_perfCounterFreq) == 0)
2511     {
2512         return -1;
2513     }
2514     _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
2515
2516     if (_ptrDeviceIn == NULL)
2517     {
2518         return -1;
2519     }
2520
2521     // Initialize the microphone (devices might have been added or removed)
2522     if (InitMicrophone() == -1)
2523     {
2524         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitMicrophone() failed");
2525     }
2526
2527     // Ensure that the updated capturing endpoint device is valid
2528     if (_ptrDeviceIn == NULL)
2529     {
2530         return -1;
2531     }
2532
2533     if (_builtInAecEnabled)
2534     {
2535         // The DMO will configure the capture device.
2536         return InitRecordingDMO();
2537     }
2538
2539     HRESULT hr = S_OK;
2540     WAVEFORMATEX* pWfxIn = NULL;
2541     WAVEFORMATEX Wfx;
2542     WAVEFORMATEX* pWfxClosestMatch = NULL;
2543
2544     // Create COM object with IAudioClient interface.
2545     SAFE_RELEASE(_ptrClientIn);
2546     hr = _ptrDeviceIn->Activate(
2547                           __uuidof(IAudioClient),
2548                           CLSCTX_ALL,
2549                           NULL,
2550                           (void**)&_ptrClientIn);
2551     EXIT_ON_ERROR(hr);
2552
2553     // Retrieve the stream format that the audio engine uses for its internal
2554     // processing (mixing) of shared-mode streams.
2555     hr = _ptrClientIn->GetMixFormat(&pWfxIn);
2556     if (SUCCEEDED(hr))
2557     {
2558         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current capturing mix format:");
2559         // format type
2560         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxIn->wFormatTag, pWfxIn->wFormatTag);
2561         // number of channels (i.e. mono, stereo...)
2562         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxIn->nChannels);
2563         // sample rate
2564         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxIn->nSamplesPerSec);
2565         // for buffer estimation
2566         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxIn->nAvgBytesPerSec);
2567         // block size of data
2568         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxIn->nBlockAlign);
2569         // number of bits per sample of mono data
2570         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxIn->wBitsPerSample);
2571         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxIn->cbSize);
2572     }
2573
2574     // Set wave format
2575     Wfx.wFormatTag = WAVE_FORMAT_PCM;
2576     Wfx.wBitsPerSample = 16;
2577     Wfx.cbSize = 0;
2578
2579     const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
2580     hr = S_FALSE;
2581
2582     // Iterate over frequencies and channels, in order of priority
2583     for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2584     {
2585         for (int chan = 0; chan < sizeof(_recChannelsPrioList)/sizeof(_recChannelsPrioList[0]); chan++)
2586         {
2587             Wfx.nChannels = _recChannelsPrioList[chan];
2588             Wfx.nSamplesPerSec = freqs[freq];
2589             Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2590             Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2591             // If the method succeeds and the audio endpoint device supports the specified stream format,
2592             // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2593             // it returns S_FALSE.
2594             hr = _ptrClientIn->IsFormatSupported(
2595                                   AUDCLNT_SHAREMODE_SHARED,
2596                                   &Wfx,
2597                                   &pWfxClosestMatch);
2598             if (hr == S_OK)
2599             {
2600                 break;
2601             }
2602             else
2603             {
2604                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2605                     Wfx.nChannels, Wfx.nSamplesPerSec);
2606             }
2607         }
2608         if (hr == S_OK)
2609             break;
2610     }
2611
2612     if (hr == S_OK)
2613     {
2614         _recAudioFrameSize = Wfx.nBlockAlign;
2615         _recSampleRate = Wfx.nSamplesPerSec;
2616         _recBlockSize = Wfx.nSamplesPerSec/100;
2617         _recChannels = Wfx.nChannels;
2618
2619         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this capturing format:");
2620         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag        : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2621         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels         : %d", Wfx.nChannels);
2622         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec    : %d", Wfx.nSamplesPerSec);
2623         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec   : %d", Wfx.nAvgBytesPerSec);
2624         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign       : %d", Wfx.nBlockAlign);
2625         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample    : %d", Wfx.wBitsPerSample);
2626         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize            : %d", Wfx.cbSize);
2627         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2628         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recAudioFrameSize: %d", _recAudioFrameSize);
2629         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recBlockSize     : %d", _recBlockSize);
2630         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recChannels      : %d", _recChannels);
2631     }
2632
2633     // Create a capturing stream.
2634     hr = _ptrClientIn->Initialize(
2635                           AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2636                           AUDCLNT_STREAMFLAGS_EVENTCALLBACK |   // processing of the audio buffer by the client will be event driven
2637                           AUDCLNT_STREAMFLAGS_NOPERSIST,        // volume and mute settings for an audio session will not persist across system restarts
2638                           0,                                    // required for event-driven shared mode
2639                           0,                                    // periodicity
2640                           &Wfx,                                 // selected wave format
2641                           NULL);                                // session GUID
2642
2643
2644     if (hr != S_OK)
2645     {
2646         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2647         if (pWfxClosestMatch != NULL)
2648         {
2649             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2650                 pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2651         }
2652         else
2653         {
2654             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2655         }
2656     }
2657     EXIT_ON_ERROR(hr);
2658
2659     if (_ptrAudioBuffer)
2660     {
2661         // Update the audio buffer with the selected parameters
2662         _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2663         _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
2664     }
2665     else
2666     {
2667         // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2668         // has been created, hence the AudioDeviceBuffer does not exist.
2669         // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2670         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2671     }
2672
2673     // Get the actual size of the shared (endpoint buffer).
2674     // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2675     UINT bufferFrameCount(0);
2676     hr = _ptrClientIn->GetBufferSize(
2677                           &bufferFrameCount);
2678     if (SUCCEEDED(hr))
2679     {
2680         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2681             bufferFrameCount, bufferFrameCount*_recAudioFrameSize);
2682     }
2683
2684     // Set the event handle that the system signals when an audio buffer is ready
2685     // to be processed by the client.
2686     hr = _ptrClientIn->SetEventHandle(
2687                           _hCaptureSamplesReadyEvent);
2688     EXIT_ON_ERROR(hr);
2689
2690     // Get an IAudioCaptureClient interface.
2691     SAFE_RELEASE(_ptrCaptureClient);
2692     hr = _ptrClientIn->GetService(
2693                           __uuidof(IAudioCaptureClient),
2694                           (void**)&_ptrCaptureClient);
2695     EXIT_ON_ERROR(hr);
2696
2697     // Mark capture side as initialized
2698     _recIsInitialized = true;
2699
2700     CoTaskMemFree(pWfxIn);
2701     CoTaskMemFree(pWfxClosestMatch);
2702
2703     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "capture side is now initialized");
2704     return 0;
2705
2706 Exit:
2707     _TraceCOMError(hr);
2708     CoTaskMemFree(pWfxIn);
2709     CoTaskMemFree(pWfxClosestMatch);
2710     SAFE_RELEASE(_ptrClientIn);
2711     SAFE_RELEASE(_ptrCaptureClient);
2712     return -1;
2713 }
2714
2715 // ----------------------------------------------------------------------------
2716 //  StartRecording
2717 // ----------------------------------------------------------------------------
2718
2719 int32_t AudioDeviceWindowsCore::StartRecording()
2720 {
2721
2722     if (!_recIsInitialized)
2723     {
2724         return -1;
2725     }
2726
2727     if (_hRecThread != NULL)
2728     {
2729         return 0;
2730     }
2731
2732     if (_recording)
2733     {
2734         return 0;
2735     }
2736
2737     {
2738         CriticalSectionScoped critScoped(&_critSect);
2739
2740         // Create thread which will drive the capturing
2741         LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
2742         if (_builtInAecEnabled)
2743         {
2744             // Redirect to the DMO polling method.
2745             lpStartAddress = WSAPICaptureThreadPollDMO;
2746
2747             if (!_playing)
2748             {
2749                 // The DMO won't provide us captured output data unless we
2750                 // give it render data to process.
2751                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2752                     "Playout must be started before recording when using the "
2753                     "built-in AEC");
2754                 return -1;
2755             }
2756         }
2757
2758         assert(_hRecThread == NULL);
2759         _hRecThread = CreateThread(NULL,
2760                                    0,
2761                                    lpStartAddress,
2762                                    this,
2763                                    0,
2764                                    NULL);
2765         if (_hRecThread == NULL)
2766         {
2767             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2768                          "failed to create the recording thread");
2769             return -1;
2770         }
2771
2772         // Set thread priority to highest possible
2773         SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
2774
2775         assert(_hGetCaptureVolumeThread == NULL);
2776         _hGetCaptureVolumeThread = CreateThread(NULL,
2777                                                 0,
2778                                                 GetCaptureVolumeThread,
2779                                                 this,
2780                                                 0,
2781                                                 NULL);
2782         if (_hGetCaptureVolumeThread == NULL)
2783         {
2784             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2785                          "  failed to create the volume getter thread");
2786             return -1;
2787         }
2788
2789         assert(_hSetCaptureVolumeThread == NULL);
2790         _hSetCaptureVolumeThread = CreateThread(NULL,
2791                                                 0,
2792                                                 SetCaptureVolumeThread,
2793                                                 this,
2794                                                 0,
2795                                                 NULL);
2796         if (_hSetCaptureVolumeThread == NULL)
2797         {
2798             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2799                          "  failed to create the volume setter thread");
2800             return -1;
2801         }
2802     }  // critScoped
2803
2804     DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
2805     if (ret != WAIT_OBJECT_0)
2806     {
2807         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2808             "capturing did not start up properly");
2809         return -1;
2810     }
2811     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2812         "capture audio stream has now started...");
2813
2814     _avgCPULoad = 0.0f;
2815     _playAcc = 0;
2816     _recording = true;
2817
2818     return 0;
2819 }
2820
2821 // ----------------------------------------------------------------------------
2822 //  StopRecording
2823 // ----------------------------------------------------------------------------
2824
2825 int32_t AudioDeviceWindowsCore::StopRecording()
2826 {
2827     int32_t err = 0;
2828
2829     if (!_recIsInitialized)
2830     {
2831         return 0;
2832     }
2833
2834     _Lock();
2835
2836     if (_hRecThread == NULL)
2837     {
2838         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2839             "no capturing stream is active => close down WASAPI only");
2840         SAFE_RELEASE(_ptrClientIn);
2841         SAFE_RELEASE(_ptrCaptureClient);
2842         _recIsInitialized = false;
2843         _recording = false;
2844         _UnLock();
2845         return 0;
2846     }
2847
2848     // Stop the driving thread...
2849     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2850         "closing down the webrtc_core_audio_capture_thread...");
2851     // Manual-reset event; it will remain signalled to stop all capture threads.
2852     SetEvent(_hShutdownCaptureEvent);
2853
2854     _UnLock();
2855     DWORD ret = WaitForSingleObject(_hRecThread, 2000);
2856     if (ret != WAIT_OBJECT_0)
2857     {
2858         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2859             "failed to close down webrtc_core_audio_capture_thread");
2860         err = -1;
2861     }
2862     else
2863     {
2864         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2865             "webrtc_core_audio_capture_thread is now closed");
2866     }
2867
2868     ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
2869     if (ret != WAIT_OBJECT_0)
2870     {
2871         // the thread did not stop as it should
2872         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2873                      "  failed to close down volume getter thread");
2874         err = -1;
2875     }
2876     else
2877     {
2878         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2879             "  volume getter thread is now closed");
2880     }
2881
2882     ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
2883     if (ret != WAIT_OBJECT_0)
2884     {
2885         // the thread did not stop as it should
2886         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2887                      "  failed to close down volume setter thread");
2888         err = -1;
2889     }
2890     else
2891     {
2892         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2893             "  volume setter thread is now closed");
2894     }
2895     _Lock();
2896
2897     ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
2898     // Ensure that the thread has released these interfaces properly.
2899     assert(err == -1 || _ptrClientIn == NULL);
2900     assert(err == -1 || _ptrCaptureClient == NULL);
2901
2902     _recIsInitialized = false;
2903     _recording = false;
2904
2905     // These will create thread leaks in the result of an error,
2906     // but we can at least resume the call.
2907     CloseHandle(_hRecThread);
2908     _hRecThread = NULL;
2909
2910     CloseHandle(_hGetCaptureVolumeThread);
2911     _hGetCaptureVolumeThread = NULL;
2912
2913     CloseHandle(_hSetCaptureVolumeThread);
2914     _hSetCaptureVolumeThread = NULL;
2915
2916     if (_builtInAecEnabled)
2917     {
2918         assert(_dmo != NULL);
2919         // This is necessary. Otherwise the DMO can generate garbage render
2920         // audio even after rendering has stopped.
2921         HRESULT hr = _dmo->FreeStreamingResources();
2922         if (FAILED(hr))
2923         {
2924             _TraceCOMError(hr);
2925             err = -1;
2926         }
2927     }
2928
2929     // Reset the recording delay value.
2930     _sndCardRecDelay = 0;
2931
2932     _UnLock();
2933
2934     return err;
2935 }
2936
2937 // ----------------------------------------------------------------------------
2938 //  RecordingIsInitialized
2939 // ----------------------------------------------------------------------------
2940
2941 bool AudioDeviceWindowsCore::RecordingIsInitialized() const
2942 {
2943     return (_recIsInitialized);
2944 }
2945
2946 // ----------------------------------------------------------------------------
2947 //  Recording
2948 // ----------------------------------------------------------------------------
2949
2950 bool AudioDeviceWindowsCore::Recording() const
2951 {
2952     return (_recording);
2953 }
2954
2955 // ----------------------------------------------------------------------------
2956 //  PlayoutIsInitialized
2957 // ----------------------------------------------------------------------------
2958
2959 bool AudioDeviceWindowsCore::PlayoutIsInitialized() const
2960 {
2961
2962     return (_playIsInitialized);
2963 }
2964
2965 // ----------------------------------------------------------------------------
2966 //  StartPlayout
2967 // ----------------------------------------------------------------------------
2968
2969 int32_t AudioDeviceWindowsCore::StartPlayout()
2970 {
2971
2972     if (!_playIsInitialized)
2973     {
2974         return -1;
2975     }
2976
2977     if (_hPlayThread != NULL)
2978     {
2979         return 0;
2980     }
2981
2982     if (_playing)
2983     {
2984         return 0;
2985     }
2986
2987     {
2988         CriticalSectionScoped critScoped(&_critSect);
2989
2990         // Create thread which will drive the rendering.
2991         assert(_hPlayThread == NULL);
2992         _hPlayThread = CreateThread(
2993                          NULL,
2994                          0,
2995                          WSAPIRenderThread,
2996                          this,
2997                          0,
2998                          NULL);
2999         if (_hPlayThread == NULL)
3000         {
3001             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3002                 "failed to create the playout thread");
3003             return -1;
3004         }
3005
3006         // Set thread priority to highest possible.
3007         SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
3008     }  // critScoped
3009
3010     DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
3011     if (ret != WAIT_OBJECT_0)
3012     {
3013         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3014             "rendering did not start up properly");
3015         return -1;
3016     }
3017
3018     _playing = true;
3019     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3020         "rendering audio stream has now started...");
3021
3022     return 0;
3023 }
3024
3025 // ----------------------------------------------------------------------------
3026 //  StopPlayout
3027 // ----------------------------------------------------------------------------
3028
3029 int32_t AudioDeviceWindowsCore::StopPlayout()
3030 {
3031
3032     if (!_playIsInitialized)
3033     {
3034         return 0;
3035     }
3036
3037     {
3038         CriticalSectionScoped critScoped(&_critSect) ;
3039
3040         if (_hPlayThread == NULL)
3041         {
3042             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3043                 "no rendering stream is active => close down WASAPI only");
3044             SAFE_RELEASE(_ptrClientOut);
3045             SAFE_RELEASE(_ptrRenderClient);
3046             _playIsInitialized = false;
3047             _playing = false;
3048             return 0;
3049         }
3050
3051         // stop the driving thread...
3052         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3053             "closing down the webrtc_core_audio_render_thread...");
3054         SetEvent(_hShutdownRenderEvent);
3055     }  // critScoped
3056
3057     DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
3058     if (ret != WAIT_OBJECT_0)
3059     {
3060         // the thread did not stop as it should
3061         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3062             "failed to close down webrtc_core_audio_render_thread");
3063         CloseHandle(_hPlayThread);
3064         _hPlayThread = NULL;
3065         _playIsInitialized = false;
3066         _playing = false;
3067         return -1;
3068     }
3069
3070     {
3071         CriticalSectionScoped critScoped(&_critSect);
3072         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3073             "webrtc_core_audio_render_thread is now closed");
3074
3075         // to reset this event manually at each time we finish with it,
3076         // in case that the render thread has exited before StopPlayout(),
3077         // this event might be caught by the new render thread within same VoE instance.
3078         ResetEvent(_hShutdownRenderEvent);
3079
3080         SAFE_RELEASE(_ptrClientOut);
3081         SAFE_RELEASE(_ptrRenderClient);
3082
3083         _playIsInitialized = false;
3084         _playing = false;
3085
3086         CloseHandle(_hPlayThread);
3087         _hPlayThread = NULL;
3088
3089         if (_builtInAecEnabled && _recording)
3090         {
3091             // The DMO won't provide us captured output data unless we
3092             // give it render data to process.
3093             //
3094             // We still permit the playout to shutdown, and trace a warning.
3095             // Otherwise, VoE can get into a state which will never permit
3096             // playout to stop properly.
3097             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3098                 "Recording should be stopped before playout when using the "
3099                 "built-in AEC");
3100         }
3101
3102         // Reset the playout delay value.
3103         _sndCardPlayDelay = 0;
3104     }  // critScoped
3105
3106     return 0;
3107 }
3108
3109 // ----------------------------------------------------------------------------
3110 //  PlayoutDelay
3111 // ----------------------------------------------------------------------------
3112
3113 int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const
3114 {
3115     CriticalSectionScoped critScoped(&_critSect);
3116     delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
3117     return 0;
3118 }
3119
3120 // ----------------------------------------------------------------------------
3121 //  RecordingDelay
3122 // ----------------------------------------------------------------------------
3123
3124 int32_t AudioDeviceWindowsCore::RecordingDelay(uint16_t& delayMS) const
3125 {
3126     CriticalSectionScoped critScoped(&_critSect);
3127     delayMS = static_cast<uint16_t>(_sndCardRecDelay);
3128     return 0;
3129 }
3130
3131 // ----------------------------------------------------------------------------
3132 //  Playing
3133 // ----------------------------------------------------------------------------
3134
3135 bool AudioDeviceWindowsCore::Playing() const
3136 {
3137     return (_playing);
3138 }
3139 // ----------------------------------------------------------------------------
3140 //  SetPlayoutBuffer
3141 // ----------------------------------------------------------------------------
3142
3143 int32_t AudioDeviceWindowsCore::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, uint16_t sizeMS)
3144 {
3145
3146     CriticalSectionScoped lock(&_critSect);
3147
3148     _playBufType = type;
3149
3150     if (type == AudioDeviceModule::kFixedBufferSize)
3151     {
3152         _playBufDelayFixed = sizeMS;
3153     }
3154
3155     return 0;
3156 }
3157
3158 // ----------------------------------------------------------------------------
3159 //  PlayoutBuffer
3160 // ----------------------------------------------------------------------------
3161
3162 int32_t AudioDeviceWindowsCore::PlayoutBuffer(AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const
3163 {
3164     CriticalSectionScoped lock(&_critSect);
3165     type = _playBufType;
3166
3167     if (type == AudioDeviceModule::kFixedBufferSize)
3168     {
3169         sizeMS = _playBufDelayFixed;
3170     }
3171     else
3172     {
3173         // Use same value as for PlayoutDelay
3174         sizeMS = static_cast<uint16_t>(_sndCardPlayDelay);
3175     }
3176
3177     return 0;
3178 }
3179
3180 // ----------------------------------------------------------------------------
3181 //  CPULoad
3182 // ----------------------------------------------------------------------------
3183
3184 int32_t AudioDeviceWindowsCore::CPULoad(uint16_t& load) const
3185 {
3186
3187     load = static_cast<uint16_t> (100*_avgCPULoad);
3188
3189     return 0;
3190 }
3191
3192 // ----------------------------------------------------------------------------
3193 //  PlayoutWarning
3194 // ----------------------------------------------------------------------------
3195
3196 bool AudioDeviceWindowsCore::PlayoutWarning() const
3197 {
3198     return ( _playWarning > 0);
3199 }
3200
3201 // ----------------------------------------------------------------------------
3202 //  PlayoutError
3203 // ----------------------------------------------------------------------------
3204
3205 bool AudioDeviceWindowsCore::PlayoutError() const
3206 {
3207     return ( _playError > 0);
3208 }
3209
3210 // ----------------------------------------------------------------------------
3211 //  RecordingWarning
3212 // ----------------------------------------------------------------------------
3213
3214 bool AudioDeviceWindowsCore::RecordingWarning() const
3215 {
3216     return ( _recWarning > 0);
3217 }
3218
3219 // ----------------------------------------------------------------------------
3220 //  RecordingError
3221 // ----------------------------------------------------------------------------
3222
3223 bool AudioDeviceWindowsCore::RecordingError() const
3224 {
3225     return ( _recError > 0);
3226 }
3227
3228 // ----------------------------------------------------------------------------
3229 //  ClearPlayoutWarning
3230 // ----------------------------------------------------------------------------
3231
3232 void AudioDeviceWindowsCore::ClearPlayoutWarning()
3233 {
3234     _playWarning = 0;
3235 }
3236
3237 // ----------------------------------------------------------------------------
3238 //  ClearPlayoutError
3239 // ----------------------------------------------------------------------------
3240
3241 void AudioDeviceWindowsCore::ClearPlayoutError()
3242 {
3243     _playError = 0;
3244 }
3245
3246 // ----------------------------------------------------------------------------
3247 //  ClearRecordingWarning
3248 // ----------------------------------------------------------------------------
3249
3250 void AudioDeviceWindowsCore::ClearRecordingWarning()
3251 {
3252     _recWarning = 0;
3253 }
3254
3255 // ----------------------------------------------------------------------------
3256 //  ClearRecordingError
3257 // ----------------------------------------------------------------------------
3258
3259 void AudioDeviceWindowsCore::ClearRecordingError()
3260 {
3261     _recError = 0;
3262 }
3263
3264 // ============================================================================
3265 //                                 Private Methods
3266 // ============================================================================
3267
3268 // ----------------------------------------------------------------------------
3269 //  [static] WSAPIRenderThread
3270 // ----------------------------------------------------------------------------
3271
3272 DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context)
3273 {
3274     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3275         DoRenderThread();
3276 }
3277
3278 // ----------------------------------------------------------------------------
3279 //  [static] WSAPICaptureThread
3280 // ----------------------------------------------------------------------------
3281
3282 DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context)
3283 {
3284     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3285         DoCaptureThread();
3286 }
3287
3288 DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context)
3289 {
3290     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3291         DoCaptureThreadPollDMO();
3292 }
3293
3294 DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context)
3295 {
3296     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3297         DoGetCaptureVolumeThread();
3298 }
3299
3300 DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context)
3301 {
3302     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3303         DoSetCaptureVolumeThread();
3304 }
3305
3306 DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread()
3307 {
3308     HANDLE waitObject = _hShutdownCaptureEvent;
3309
3310     while (1)
3311     {
3312         if (AGC())
3313         {
3314             uint32_t currentMicLevel = 0;
3315             if (MicrophoneVolume(currentMicLevel) == 0)
3316             {
3317                 // This doesn't set the system volume, just stores it.
3318                 _Lock();
3319                 if (_ptrAudioBuffer)
3320                 {
3321                     _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
3322                 }
3323                 _UnLock();
3324             }
3325         }
3326
3327         DWORD waitResult = WaitForSingleObject(waitObject,
3328                                                GET_MIC_VOLUME_INTERVAL_MS);
3329         switch (waitResult)
3330         {
3331             case WAIT_OBJECT_0: // _hShutdownCaptureEvent
3332                 return 0;
3333             case WAIT_TIMEOUT:  // timeout notification
3334                 break;
3335             default:            // unexpected error
3336                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3337                     "  unknown wait termination on get volume thread");
3338                 return -1;
3339         }
3340     }
3341 }
3342
3343 DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread()
3344 {
3345     HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
3346
3347     while (1)
3348     {
3349         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
3350         switch (waitResult)
3351         {
3352             case WAIT_OBJECT_0:      // _hShutdownCaptureEvent
3353                 return 0;
3354             case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
3355                 break;
3356             default:                 // unexpected error
3357                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3358                     "  unknown wait termination on set volume thread");
3359                     return -1;
3360         }
3361
3362         _Lock();
3363         uint32_t newMicLevel = _newMicLevel;
3364         _UnLock();
3365
3366         if (SetMicrophoneVolume(newMicLevel) == -1)
3367         {
3368             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3369                 "  the required modification of the microphone volume failed");
3370         }
3371     }
3372 }
3373
3374 // ----------------------------------------------------------------------------
3375 //  DoRenderThread
3376 // ----------------------------------------------------------------------------
3377
3378 DWORD AudioDeviceWindowsCore::DoRenderThread()
3379 {
3380
3381     bool keepPlaying = true;
3382     HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
3383     HRESULT hr = S_OK;
3384     HANDLE hMmTask = NULL;
3385
3386     LARGE_INTEGER t1;
3387     LARGE_INTEGER t2;
3388     int32_t time(0);
3389
3390     // Initialize COM as MTA in this thread.
3391     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3392     if (!comInit.succeeded()) {
3393       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3394           "failed to initialize COM in render thread");
3395       return -1;
3396     }
3397
3398     _SetThreadName(-1, "webrtc_core_audio_render_thread");
3399
3400     // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority.
3401     //
3402     if (_winSupportAvrt)
3403     {
3404         DWORD taskIndex(0);
3405         hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3406         if (hMmTask)
3407         {
3408             if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL))
3409             {
3410                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to boost play-thread using MMCSS");
3411             }
3412             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render thread is now registered with MMCSS (taskIndex=%d)", taskIndex);
3413         }
3414         else
3415         {
3416             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to enable MMCSS on render thread (err=%d)", GetLastError());
3417             _TraceCOMError(GetLastError());
3418         }
3419     }
3420
3421     _Lock();
3422
3423     IAudioClock* clock = NULL;
3424
3425     // Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
3426     // This value is fixed during the rendering session.
3427     //
3428     UINT32 bufferLength = 0;
3429     hr = _ptrClientOut->GetBufferSize(&bufferLength);
3430     EXIT_ON_ERROR(hr);
3431     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] size of buffer       : %u", bufferLength);
3432
3433     // Get maximum latency for the current stream (will not change for the lifetime  of the IAudioClient object).
3434     //
3435     REFERENCE_TIME latency;
3436     _ptrClientOut->GetStreamLatency(&latency);
3437     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] max stream latency   : %u (%3.2f ms)",
3438         (DWORD)latency, (double)(latency/10000.0));
3439
3440     // Get the length of the periodic interval separating successive processing passes by
3441     // the audio engine on the data in the endpoint buffer.
3442     //
3443     // The period between processing passes by the audio engine is fixed for a particular
3444     // audio endpoint device and represents the smallest processing quantum for the audio engine.
3445     // This period plus the stream latency between the buffer and endpoint device represents
3446     // the minimum possible latency that an audio application can achieve.
3447     // Typical value: 100000 <=> 0.01 sec = 10ms.
3448     //
3449     REFERENCE_TIME devPeriod = 0;
3450     REFERENCE_TIME devPeriodMin = 0;
3451     _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
3452     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] device period        : %u (%3.2f ms)",
3453         (DWORD)devPeriod, (double)(devPeriod/10000.0));
3454
3455     // Derive initial rendering delay.
3456     // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
3457     //
3458     int playout_delay = 10 * (bufferLength / _playBlockSize) +
3459         (int)((latency + devPeriod) / 10000);
3460     _sndCardPlayDelay = playout_delay;
3461     _writtenSamples = 0;
3462     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3463                  "[REND] initial delay        : %u", playout_delay);
3464
3465     double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
3466     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3467
3468     // Before starting the stream, fill the rendering buffer with silence.
3469     //
3470     BYTE *pData = NULL;
3471     hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
3472     EXIT_ON_ERROR(hr);
3473
3474     hr = _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
3475     EXIT_ON_ERROR(hr);
3476
3477     _writtenSamples += bufferLength;
3478
3479     hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
3480     if (FAILED(hr)) {
3481       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3482                    "failed to get IAudioClock interface from the IAudioClient");
3483     }
3484
3485     // Start up the rendering audio stream.
3486     hr = _ptrClientOut->Start();
3487     EXIT_ON_ERROR(hr);
3488
3489     _UnLock();
3490
3491     // Set event which will ensure that the calling thread modifies the playing state to true.
3492     //
3493     SetEvent(_hRenderStartedEvent);
3494
3495     // >> ------------------ THREAD LOOP ------------------
3496
3497     while (keepPlaying)
3498     {
3499         // Wait for a render notification event or a shutdown event
3500         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3501         switch (waitResult)
3502         {
3503         case WAIT_OBJECT_0 + 0:     // _hShutdownRenderEvent
3504             keepPlaying = false;
3505             break;
3506         case WAIT_OBJECT_0 + 1:     // _hRenderSamplesReadyEvent
3507             break;
3508         case WAIT_TIMEOUT:          // timeout notification
3509             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "render event timed out after 0.5 seconds");
3510             goto Exit;
3511         default:                    // unexpected error
3512             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on render side");
3513             goto Exit;
3514         }
3515
3516         while (keepPlaying)
3517         {
3518             _Lock();
3519
3520             // Sanity check to ensure that essential states are not modified
3521             // during the unlocked period.
3522             if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3523             {
3524                 _UnLock();
3525                 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3526                     "output state has been modified during unlocked period");
3527                 goto Exit;
3528             }
3529
3530             // Get the number of frames of padding (queued up to play) in the endpoint buffer.
3531             UINT32 padding = 0;
3532             hr = _ptrClientOut->GetCurrentPadding(&padding);
3533             EXIT_ON_ERROR(hr);
3534
3535             // Derive the amount of available space in the output buffer
3536             uint32_t framesAvailable = bufferLength - padding;
3537             // WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "#avaliable audio frames = %u", framesAvailable);
3538
3539             // Do we have 10 ms available in the render buffer?
3540             if (framesAvailable < _playBlockSize)
3541             {
3542                 // Not enough space in render buffer to store next render packet.
3543                 _UnLock();
3544                 break;
3545             }
3546
3547             // Write n*10ms buffers to the render buffer
3548             const uint32_t n10msBuffers = (framesAvailable / _playBlockSize);
3549             for (uint32_t n = 0; n < n10msBuffers; n++)
3550             {
3551                 // Get pointer (i.e., grab the buffer) to next space in the shared render buffer.
3552                 hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData);
3553                 EXIT_ON_ERROR(hr);
3554
3555                 QueryPerformanceCounter(&t1);    // measure time: START
3556
3557                 if (_ptrAudioBuffer)
3558                 {
3559                     // Request data to be played out (#bytes = _playBlockSize*_audioFrameSize)
3560                     _UnLock();
3561                     int32_t nSamples =
3562                     _ptrAudioBuffer->RequestPlayoutData(_playBlockSize);
3563                     _Lock();
3564
3565                     if (nSamples == -1)
3566                     {
3567                         _UnLock();
3568                         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3569                                      "failed to read data from render client");
3570                         goto Exit;
3571                     }
3572
3573                     // Sanity check to ensure that essential states are not modified during the unlocked period
3574                     if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3575                     {
3576                         _UnLock();
3577                         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "output state has been modified during unlocked period");
3578                         goto Exit;
3579                     }
3580                     if (nSamples != static_cast<int32_t>(_playBlockSize))
3581                     {
3582                         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "nSamples(%d) != _playBlockSize(%d)", nSamples, _playBlockSize);
3583                     }
3584
3585                     // Get the actual (stored) data
3586                     nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
3587                 }
3588
3589                 QueryPerformanceCounter(&t2);    // measure time: STOP
3590                 time = (int)(t2.QuadPart-t1.QuadPart);
3591                 _playAcc += time;
3592
3593                 DWORD dwFlags(0);
3594                 hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags);
3595                 // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
3596                 // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
3597                 EXIT_ON_ERROR(hr);
3598
3599                 _writtenSamples += _playBlockSize;
3600             }
3601
3602             // Check the current delay on the playout side.
3603             if (clock) {
3604               UINT64 pos = 0;
3605               UINT64 freq = 1;
3606               clock->GetPosition(&pos, NULL);
3607               clock->GetFrequency(&freq);
3608               playout_delay = ROUND((double(_writtenSamples) /
3609                   _devicePlaySampleRate - double(pos) / freq) * 1000.0);
3610               _sndCardPlayDelay = playout_delay;
3611             }
3612
3613             _UnLock();
3614         }
3615     }
3616
3617     // ------------------ THREAD LOOP ------------------ <<
3618
3619     SleepMs(static_cast<DWORD>(endpointBufferSizeMS+0.5));
3620     hr = _ptrClientOut->Stop();
3621
3622 Exit:
3623     SAFE_RELEASE(clock);
3624
3625     if (FAILED(hr))
3626     {
3627         _ptrClientOut->Stop();
3628         _UnLock();
3629         _TraceCOMError(hr);
3630     }
3631
3632     if (_winSupportAvrt)
3633     {
3634         if (NULL != hMmTask)
3635         {
3636             _PAvRevertMmThreadCharacteristics(hMmTask);
3637         }
3638     }
3639
3640     _Lock();
3641
3642     if (keepPlaying)
3643     {
3644         if (_ptrClientOut != NULL)
3645         {
3646             hr = _ptrClientOut->Stop();
3647             if (FAILED(hr))
3648             {
3649                 _TraceCOMError(hr);
3650             }
3651             hr = _ptrClientOut->Reset();
3652             if (FAILED(hr))
3653             {
3654                 _TraceCOMError(hr);
3655             }
3656         }
3657         // Trigger callback from module process thread
3658         _playError = 1;
3659         WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kPlayoutError message posted: rendering thread has ended pre-maturely");
3660     }
3661     else
3662     {
3663         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Rendering thread is now terminated properly");
3664     }
3665
3666     _UnLock();
3667
3668     return (DWORD)hr;
3669 }
3670
3671 DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
3672 {
3673     _hMmTask = NULL;
3674
3675     _SetThreadName(-1, "webrtc_core_audio_capture_thread");
3676
3677     // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
3678     // priority.
3679     if (_winSupportAvrt)
3680     {
3681         DWORD taskIndex(0);
3682         _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3683         if (_hMmTask)
3684         {
3685             if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
3686             {
3687                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3688                     "failed to boost rec-thread using MMCSS");
3689             }
3690             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3691                 "capture thread is now registered with MMCSS (taskIndex=%d)",
3692                 taskIndex);
3693         }
3694         else
3695         {
3696             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3697                 "failed to enable MMCSS on capture thread (err=%d)",
3698                 GetLastError());
3699             _TraceCOMError(GetLastError());
3700         }
3701     }
3702
3703     return S_OK;
3704 }
3705
3706 void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
3707 {
3708     if (_winSupportAvrt)
3709     {
3710         if (NULL != _hMmTask)
3711         {
3712             _PAvRevertMmThreadCharacteristics(_hMmTask);
3713         }
3714     }
3715
3716     _hMmTask = NULL;
3717 }
3718
3719 DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
3720 {
3721     assert(_mediaBuffer != NULL);
3722     bool keepRecording = true;
3723
3724     // Initialize COM as MTA in this thread.
3725     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3726     if (!comInit.succeeded()) {
3727       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3728         "failed to initialize COM in polling DMO thread");
3729       return -1;
3730     }
3731
3732     HRESULT hr = InitCaptureThreadPriority();
3733     if (FAILED(hr))
3734     {
3735         return hr;
3736     }
3737
3738     // Set event which will ensure that the calling thread modifies the
3739     // recording state to true.
3740     SetEvent(_hCaptureStartedEvent);
3741
3742     // >> ---------------------------- THREAD LOOP ----------------------------
3743     while (keepRecording)
3744     {
3745         // Poll the DMO every 5 ms.
3746         // (The same interval used in the Wave implementation.)
3747         DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
3748         switch (waitResult)
3749         {
3750         case WAIT_OBJECT_0:         // _hShutdownCaptureEvent
3751             keepRecording = false;
3752             break;
3753         case WAIT_TIMEOUT:          // timeout notification
3754             break;
3755         default:                    // unexpected error
3756             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3757                 "Unknown wait termination on capture side");
3758             hr = -1; // To signal an error callback.
3759             keepRecording = false;
3760             break;
3761         }
3762
3763         while (keepRecording)
3764         {
3765             CriticalSectionScoped critScoped(&_critSect);
3766
3767             DWORD dwStatus = 0;
3768             {
3769                 DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
3770                 dmoBuffer.pBuffer = _mediaBuffer;
3771                 dmoBuffer.pBuffer->AddRef();
3772
3773                 // Poll the DMO for AEC processed capture data. The DMO will
3774                 // copy available data to |dmoBuffer|, and should only return
3775                 // 10 ms frames. The value of |dwStatus| should be ignored.
3776                 hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
3777                 SAFE_RELEASE(dmoBuffer.pBuffer);
3778                 dwStatus = dmoBuffer.dwStatus;
3779             }
3780             if (FAILED(hr))
3781             {
3782                 _TraceCOMError(hr);
3783                 keepRecording = false;
3784                 assert(false);
3785                 break;
3786             }
3787
3788             ULONG bytesProduced = 0;
3789             BYTE* data;
3790             // Get a pointer to the data buffer. This should be valid until
3791             // the next call to ProcessOutput.
3792             hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
3793             if (FAILED(hr))
3794             {
3795                 _TraceCOMError(hr);
3796                 keepRecording = false;
3797                 assert(false);
3798                 break;
3799             }
3800
3801             // TODO(andrew): handle AGC.
3802
3803             if (bytesProduced > 0)
3804             {
3805                 const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
3806                 // TODO(andrew): verify that this is always satisfied. It might
3807                 // be that ProcessOutput will try to return more than 10 ms if
3808                 // we fail to call it frequently enough.
3809                 assert(kSamplesProduced == static_cast<int>(_recBlockSize));
3810                 assert(sizeof(BYTE) == sizeof(int8_t));
3811                 _ptrAudioBuffer->SetRecordedBuffer(
3812                     reinterpret_cast<int8_t*>(data),
3813                     kSamplesProduced);
3814                 _ptrAudioBuffer->SetVQEData(0, 0, 0);
3815
3816                 _UnLock();  // Release lock while making the callback.
3817                 _ptrAudioBuffer->DeliverRecordedData();
3818                 _Lock();
3819             }
3820
3821             // Reset length to indicate buffer availability.
3822             hr = _mediaBuffer->SetLength(0);
3823             if (FAILED(hr))
3824             {
3825                 _TraceCOMError(hr);
3826                 keepRecording = false;
3827                 assert(false);
3828                 break;
3829             }
3830
3831             if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE))
3832             {
3833                 // The DMO cannot currently produce more data. This is the
3834                 // normal case; otherwise it means the DMO had more than 10 ms
3835                 // of data available and ProcessOutput should be called again.
3836                 break;
3837             }
3838         }
3839     }
3840     // ---------------------------- THREAD LOOP ---------------------------- <<
3841
3842     RevertCaptureThreadPriority();
3843
3844     if (FAILED(hr))
3845     {
3846         // Trigger callback from module process thread
3847         _recError = 1;
3848         WEBRTC_TRACE(kTraceError, kTraceUtility, _id,
3849             "kRecordingError message posted: capturing thread has ended "
3850             "prematurely");
3851     }
3852     else
3853     {
3854         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3855             "Capturing thread is now terminated properly");
3856     }
3857
3858     return hr;
3859 }
3860
3861
3862 // ----------------------------------------------------------------------------
3863 //  DoCaptureThread
3864 // ----------------------------------------------------------------------------
3865
3866 DWORD AudioDeviceWindowsCore::DoCaptureThread()
3867 {
3868
3869     bool keepRecording = true;
3870     HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
3871     HRESULT hr = S_OK;
3872
3873     LARGE_INTEGER t1;
3874     LARGE_INTEGER t2;
3875     int32_t time(0);
3876
3877     BYTE* syncBuffer = NULL;
3878     UINT32 syncBufIndex = 0;
3879
3880     _readSamples = 0;
3881
3882     // Initialize COM as MTA in this thread.
3883     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3884     if (!comInit.succeeded()) {
3885       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3886         "failed to initialize COM in capture thread");
3887       return -1;
3888     }
3889
3890     hr = InitCaptureThreadPriority();
3891     if (FAILED(hr))
3892     {
3893         return hr;
3894     }
3895
3896     _Lock();
3897
3898     // Get size of capturing buffer (length is expressed as the number of audio frames the buffer can hold).
3899     // This value is fixed during the capturing session.
3900     //
3901     UINT32 bufferLength = 0;
3902     hr = _ptrClientIn->GetBufferSize(&bufferLength);
3903     EXIT_ON_ERROR(hr);
3904     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of buffer       : %u", bufferLength);
3905
3906     // Allocate memory for sync buffer.
3907     // It is used for compensation between native 44.1 and internal 44.0 and
3908     // for cases when the capture buffer is larger than 10ms.
3909     //
3910     const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
3911     syncBuffer = new BYTE[syncBufferSize];
3912     if (syncBuffer == NULL)
3913     {
3914         return E_POINTER;
3915     }
3916     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of sync buffer  : %u [bytes]", syncBufferSize);
3917
3918     // Get maximum latency for the current stream (will not change for the lifetime of the IAudioClient object).
3919     //
3920     REFERENCE_TIME latency;
3921     _ptrClientIn->GetStreamLatency(&latency);
3922     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] max stream latency   : %u (%3.2f ms)",
3923         (DWORD)latency, (double)(latency / 10000.0));
3924
3925     // Get the length of the periodic interval separating successive processing passes by
3926     // the audio engine on the data in the endpoint buffer.
3927     //
3928     REFERENCE_TIME devPeriod = 0;
3929     REFERENCE_TIME devPeriodMin = 0;
3930     _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
3931     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] device period        : %u (%3.2f ms)",
3932         (DWORD)devPeriod, (double)(devPeriod / 10000.0));
3933
3934     double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
3935     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] extraDelayMS         : %3.2f", extraDelayMS);
3936
3937     double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_recBlockSize);
3938     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3939
3940     // Start up the capturing stream.
3941     //
3942     hr = _ptrClientIn->Start();
3943     EXIT_ON_ERROR(hr);
3944
3945     _UnLock();
3946
3947     // Set event which will ensure that the calling thread modifies the recording state to true.
3948     //
3949     SetEvent(_hCaptureStartedEvent);
3950
3951     // >> ---------------------------- THREAD LOOP ----------------------------
3952
3953     while (keepRecording)
3954     {
3955         // Wait for a capture notification event or a shutdown event
3956         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3957         switch (waitResult)
3958         {
3959         case WAIT_OBJECT_0 + 0:        // _hShutdownCaptureEvent
3960             keepRecording = false;
3961             break;
3962         case WAIT_OBJECT_0 + 1:        // _hCaptureSamplesReadyEvent
3963             break;
3964         case WAIT_TIMEOUT:            // timeout notification
3965             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "capture event timed out after 0.5 seconds");
3966             goto Exit;
3967         default:                    // unexpected error
3968             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on capture side");
3969             goto Exit;
3970         }
3971
3972         while (keepRecording)
3973         {
3974             BYTE *pData = 0;
3975             UINT32 framesAvailable = 0;
3976             DWORD flags = 0;
3977             UINT64 recTime = 0;
3978             UINT64 recPos = 0;
3979
3980             _Lock();
3981
3982             // Sanity check to ensure that essential states are not modified
3983             // during the unlocked period.
3984             if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
3985             {
3986                 _UnLock();
3987                 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3988                     "input state has been modified during unlocked period");
3989                 goto Exit;
3990             }
3991
3992             //  Find out how much capture data is available
3993             //
3994             hr = _ptrCaptureClient->GetBuffer(&pData,           // packet which is ready to be read by used
3995                                               &framesAvailable, // #frames in the captured packet (can be zero)
3996                                               &flags,           // support flags (check)
3997                                               &recPos,          // device position of first audio frame in data packet
3998                                               &recTime);        // value of performance counter at the time of recording the first audio frame
3999
4000             if (SUCCEEDED(hr))
4001             {
4002                 if (AUDCLNT_S_BUFFER_EMPTY == hr)
4003                 {
4004                     // Buffer was empty => start waiting for a new capture notification event
4005                     _UnLock();
4006                     break;
4007                 }
4008
4009                 if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
4010                 {
4011                     // Treat all of the data in the packet as silence and ignore the actual data values.
4012                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "AUDCLNT_BUFFERFLAGS_SILENT");
4013                     pData = NULL;
4014                 }
4015
4016                 assert(framesAvailable != 0);
4017
4018                 if (pData)
4019                 {
4020                     CopyMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], pData, framesAvailable*_recAudioFrameSize);
4021                 }
4022                 else
4023                 {
4024                     ZeroMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], framesAvailable*_recAudioFrameSize);
4025                 }
4026                 assert(syncBufferSize >= (syncBufIndex*_recAudioFrameSize)+framesAvailable*_recAudioFrameSize);
4027
4028                 // Release the capture buffer
4029                 //
4030                 hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
4031                 EXIT_ON_ERROR(hr);
4032
4033                 _readSamples += framesAvailable;
4034                 syncBufIndex += framesAvailable;
4035
4036                 QueryPerformanceCounter(&t1);
4037
4038                 // Get the current recording and playout delay.
4039                 uint32_t sndCardRecDelay = (uint32_t)
4040                     (((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime)
4041                         / 10000) + (10*syncBufIndex) / _recBlockSize - 10);
4042                 uint32_t sndCardPlayDelay =
4043                     static_cast<uint32_t>(_sndCardPlayDelay);
4044
4045                 _sndCardRecDelay = sndCardRecDelay;
4046
4047                 while (syncBufIndex >= _recBlockSize)
4048                 {
4049                     if (_ptrAudioBuffer)
4050                     {
4051                         _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
4052                         _ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
4053                                                     sndCardRecDelay,
4054                                                     0);
4055
4056                         _ptrAudioBuffer->SetTypingStatus(KeyPressed());
4057
4058                         QueryPerformanceCounter(&t1);    // measure time: START
4059
4060                         _UnLock();  // release lock while making the callback
4061                         _ptrAudioBuffer->DeliverRecordedData();
4062                         _Lock();    // restore the lock
4063
4064                         QueryPerformanceCounter(&t2);    // measure time: STOP
4065
4066                         // Measure "average CPU load".
4067                         // Basically what we do here is to measure how many percent of our 10ms period
4068                         // is used for encoding and decoding. This value shuld be used as a warning indicator
4069                         // only and not seen as an absolute value. Running at ~100% will lead to bad QoS.
4070                         time = (int)(t2.QuadPart - t1.QuadPart);
4071                         _avgCPULoad = (float)(_avgCPULoad*.99 + (time + _playAcc) / (double)(_perfCounterFreq.QuadPart));
4072                         _playAcc = 0;
4073
4074                         // Sanity check to ensure that essential states are not modified during the unlocked period
4075                         if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
4076                         {
4077                             _UnLock();
4078                             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "input state has been modified during unlocked period");
4079                             goto Exit;
4080                         }
4081                     }
4082
4083                     // store remaining data which was not able to deliver as 10ms segment
4084                     MoveMemory(&syncBuffer[0], &syncBuffer[_recBlockSize*_recAudioFrameSize], (syncBufIndex-_recBlockSize)*_recAudioFrameSize);
4085                     syncBufIndex -= _recBlockSize;
4086                     sndCardRecDelay -= 10;
4087                 }
4088
4089                 if (_AGC)
4090                 {
4091                     uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
4092                     if (newMicLevel != 0)
4093                     {
4094                         // The VQE will only deliver non-zero microphone levels when a change is needed.
4095                         // Set this new mic level (received from the observer as return value in the callback).
4096                         WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AGC change of volume: new=%u",  newMicLevel);
4097                         // We store this outside of the audio buffer to avoid
4098                         // having it overwritten by the getter thread.
4099                         _newMicLevel = newMicLevel;
4100                         SetEvent(_hSetCaptureVolumeEvent);
4101                     }
4102                 }
4103             }
4104             else
4105             {
4106                 // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the audio samples
4107                 // must wait for the next processing pass. The client might benefit from keeping a count
4108                 // of the failed GetBuffer calls. If GetBuffer returns this error repeatedly, the client
4109                 // can start a new processing loop after shutting down the current client by calling
4110                 // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio client.
4111                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4112                     "IAudioCaptureClient::GetBuffer returned AUDCLNT_E_BUFFER_ERROR, hr = 0x%08X",  hr);
4113                 goto Exit;
4114             }
4115
4116             _UnLock();
4117         }
4118     }
4119
4120     // ---------------------------- THREAD LOOP ---------------------------- <<
4121
4122     hr = _ptrClientIn->Stop();
4123
4124 Exit:
4125     if (FAILED(hr))
4126     {
4127         _ptrClientIn->Stop();
4128         _UnLock();
4129         _TraceCOMError(hr);
4130     }
4131
4132     RevertCaptureThreadPriority();
4133
4134     _Lock();
4135
4136     if (keepRecording)
4137     {
4138         if (_ptrClientIn != NULL)
4139         {
4140             hr = _ptrClientIn->Stop();
4141             if (FAILED(hr))
4142             {
4143                 _TraceCOMError(hr);
4144             }
4145             hr = _ptrClientIn->Reset();
4146             if (FAILED(hr))
4147             {
4148                 _TraceCOMError(hr);
4149             }
4150         }
4151
4152         // Trigger callback from module process thread
4153         _recError = 1;
4154         WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message posted: capturing thread has ended pre-maturely");
4155     }
4156     else
4157     {
4158         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Capturing thread is now terminated properly");
4159     }
4160
4161     SAFE_RELEASE(_ptrClientIn);
4162     SAFE_RELEASE(_ptrCaptureClient);
4163
4164     _UnLock();
4165
4166     if (syncBuffer)
4167     {
4168         delete [] syncBuffer;
4169     }
4170
4171     return (DWORD)hr;
4172 }
4173
4174 int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
4175 {
4176
4177     if (_recIsInitialized)
4178     {
4179         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4180             "Attempt to set Windows AEC with recording already initialized");
4181         return -1;
4182     }
4183
4184     if (_dmo == NULL)
4185     {
4186         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4187             "Built-in AEC DMO was not initialized properly at create time");
4188         return -1;
4189     }
4190
4191     _builtInAecEnabled = enable;
4192     return 0;
4193 }
4194
4195 bool AudioDeviceWindowsCore::BuiltInAECIsEnabled() const
4196 {
4197     return _builtInAecEnabled;
4198 }
4199
4200 int AudioDeviceWindowsCore::SetDMOProperties()
4201 {
4202     HRESULT hr = S_OK;
4203     assert(_dmo != NULL);
4204
4205     scoped_refptr<IPropertyStore> ps;
4206     {
4207         IPropertyStore* ptrPS = NULL;
4208         hr = _dmo->QueryInterface(IID_IPropertyStore,
4209                                   reinterpret_cast<void**>(&ptrPS));
4210         if (FAILED(hr) || ptrPS == NULL)
4211         {
4212             _TraceCOMError(hr);
4213             return -1;
4214         }
4215         ps = ptrPS;
4216         SAFE_RELEASE(ptrPS);
4217     }
4218
4219     // Set the AEC system mode.
4220     // SINGLE_CHANNEL_AEC - AEC processing only.
4221     if (SetVtI4Property(ps,
4222                         MFPKEY_WMAAECMA_SYSTEM_MODE,
4223                         SINGLE_CHANNEL_AEC))
4224     {
4225         return -1;
4226     }
4227
4228     // Set the AEC source mode.
4229     // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
4230     if (SetBoolProperty(ps,
4231                         MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
4232                         VARIANT_TRUE) == -1)
4233     {
4234         return -1;
4235     }
4236
4237     // Enable the feature mode.
4238     // This lets us override all the default processing settings below.
4239     if (SetBoolProperty(ps,
4240                         MFPKEY_WMAAECMA_FEATURE_MODE,
4241                         VARIANT_TRUE) == -1)
4242     {
4243         return -1;
4244     }
4245
4246     // Disable analog AGC (default enabled).
4247     if (SetBoolProperty(ps,
4248                         MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
4249                         VARIANT_FALSE) == -1)
4250     {
4251         return -1;
4252     }
4253
4254     // Disable noise suppression (default enabled).
4255     // 0 - Disabled, 1 - Enabled
4256     if (SetVtI4Property(ps,
4257                         MFPKEY_WMAAECMA_FEATR_NS,
4258                         0) == -1)
4259     {
4260         return -1;
4261     }
4262
4263     // Relevant parameters to leave at default settings:
4264     // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
4265     // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
4266     // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
4267     //   TODO(andrew): investigate decresing the length to 128 ms.
4268     // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
4269     //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
4270     //   selected 16 kHz) as long as mic array processing is disabled.
4271     // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
4272     // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
4273
4274     // Set the devices selected by VoE. If using a default device, we need to
4275     // search for the device index.
4276     int inDevIndex = _inputDeviceIndex;
4277     int outDevIndex = _outputDeviceIndex;
4278     if (!_usingInputDeviceIndex)
4279     {
4280         ERole role = eCommunications;
4281         if (_inputDevice == AudioDeviceModule::kDefaultDevice)
4282         {
4283             role = eConsole;
4284         }
4285
4286         if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1)
4287         {
4288             return -1;
4289         }
4290     }
4291
4292     if (!_usingOutputDeviceIndex)
4293     {
4294         ERole role = eCommunications;
4295         if (_outputDevice == AudioDeviceModule::kDefaultDevice)
4296         {
4297             role = eConsole;
4298         }
4299
4300         if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1)
4301         {
4302             return -1;
4303         }
4304     }
4305
4306     DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
4307                      static_cast<uint32_t>(0x0000ffff & inDevIndex);
4308     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
4309         "Capture device index: %d, render device index: %d",
4310         inDevIndex, outDevIndex);
4311     if (SetVtI4Property(ps,
4312                         MFPKEY_WMAAECMA_DEVICE_INDEXES,
4313                         devIndex) == -1)
4314     {
4315         return -1;
4316     }
4317
4318     return 0;
4319 }
4320
4321 int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
4322                                             REFPROPERTYKEY key,
4323                                             VARIANT_BOOL value)
4324 {
4325     PROPVARIANT pv;
4326     PropVariantInit(&pv);
4327     pv.vt = VT_BOOL;
4328     pv.boolVal = value;
4329     HRESULT hr = ptrPS->SetValue(key, pv);
4330     PropVariantClear(&pv);
4331     if (FAILED(hr))
4332     {
4333         _TraceCOMError(hr);
4334         return -1;
4335     }
4336     return 0;
4337 }
4338
4339 int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
4340                                             REFPROPERTYKEY key,
4341                                             LONG value)
4342 {
4343     PROPVARIANT pv;
4344     PropVariantInit(&pv);
4345     pv.vt = VT_I4;
4346     pv.lVal = value;
4347     HRESULT hr = ptrPS->SetValue(key, pv);
4348     PropVariantClear(&pv);
4349     if (FAILED(hr))
4350     {
4351         _TraceCOMError(hr);
4352         return -1;
4353     }
4354     return 0;
4355 }
4356
4357 // ----------------------------------------------------------------------------
4358 //  _RefreshDeviceList
4359 //
4360 //  Creates a new list of endpoint rendering or capture devices after
4361 //  deleting any previously created (and possibly out-of-date) list of
4362 //  such devices.
4363 // ----------------------------------------------------------------------------
4364
4365 int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
4366 {
4367     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4368
4369     HRESULT hr = S_OK;
4370     IMMDeviceCollection *pCollection = NULL;
4371
4372     assert(dir == eRender || dir == eCapture);
4373     assert(_ptrEnumerator != NULL);
4374
4375     // Create a fresh list of devices using the specified direction
4376     hr = _ptrEnumerator->EnumAudioEndpoints(
4377                            dir,
4378                            DEVICE_STATE_ACTIVE,
4379                            &pCollection);
4380     if (FAILED(hr))
4381     {
4382         _TraceCOMError(hr);
4383         SAFE_RELEASE(pCollection);
4384         return -1;
4385     }
4386
4387     if (dir == eRender)
4388     {
4389         SAFE_RELEASE(_ptrRenderCollection);
4390         _ptrRenderCollection = pCollection;
4391     }
4392     else
4393     {
4394         SAFE_RELEASE(_ptrCaptureCollection);
4395         _ptrCaptureCollection = pCollection;
4396     }
4397
4398     return 0;
4399 }
4400
4401 // ----------------------------------------------------------------------------
4402 //  _DeviceListCount
4403 //
4404 //  Gets a count of the endpoint rendering or capture devices in the
4405 //  current list of such devices.
4406 // ----------------------------------------------------------------------------
4407
4408 int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
4409 {
4410     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4411
4412     HRESULT hr = S_OK;
4413     UINT count = 0;
4414
4415     assert(eRender == dir || eCapture == dir);
4416
4417     if (eRender == dir && NULL != _ptrRenderCollection)
4418     {
4419         hr = _ptrRenderCollection->GetCount(&count);
4420     }
4421     else if (NULL != _ptrCaptureCollection)
4422     {
4423         hr = _ptrCaptureCollection->GetCount(&count);
4424     }
4425
4426     if (FAILED(hr))
4427     {
4428         _TraceCOMError(hr);
4429         return -1;
4430     }
4431
4432     return static_cast<int16_t> (count);
4433 }
4434
4435 // ----------------------------------------------------------------------------
4436 //  _GetListDeviceName
4437 //
4438 //  Gets the friendly name of an endpoint rendering or capture device
4439 //  from the current list of such devices. The caller uses an index
4440 //  into the list to identify the device.
4441 //
4442 //  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4443 //  in _RefreshDeviceList().
4444 // ----------------------------------------------------------------------------
4445
4446 int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4447 {
4448     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4449
4450     HRESULT hr = S_OK;
4451     IMMDevice *pDevice = NULL;
4452
4453     assert(dir == eRender || dir == eCapture);
4454
4455     if (eRender == dir && NULL != _ptrRenderCollection)
4456     {
4457         hr = _ptrRenderCollection->Item(index, &pDevice);
4458     }
4459     else if (NULL != _ptrCaptureCollection)
4460     {
4461         hr = _ptrCaptureCollection->Item(index, &pDevice);
4462     }
4463
4464     if (FAILED(hr))
4465     {
4466         _TraceCOMError(hr);
4467         SAFE_RELEASE(pDevice);
4468         return -1;
4469     }
4470
4471     int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4472     SAFE_RELEASE(pDevice);
4473     return res;
4474 }
4475
4476 // ----------------------------------------------------------------------------
4477 //  _GetDefaultDeviceName
4478 //
4479 //  Gets the friendly name of an endpoint rendering or capture device
4480 //  given a specified device role.
4481 //
4482 //  Uses: _ptrEnumerator
4483 // ----------------------------------------------------------------------------
4484
4485 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4486 {
4487     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4488
4489     HRESULT hr = S_OK;
4490     IMMDevice *pDevice = NULL;
4491
4492     assert(dir == eRender || dir == eCapture);
4493     assert(role == eConsole || role == eCommunications);
4494     assert(_ptrEnumerator != NULL);
4495
4496     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4497                            dir,
4498                            role,
4499                            &pDevice);
4500
4501     if (FAILED(hr))
4502     {
4503         _TraceCOMError(hr);
4504         SAFE_RELEASE(pDevice);
4505         return -1;
4506     }
4507
4508     int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4509     SAFE_RELEASE(pDevice);
4510     return res;
4511 }
4512
4513 // ----------------------------------------------------------------------------
4514 //  _GetListDeviceID
4515 //
4516 //  Gets the unique ID string of an endpoint rendering or capture device
4517 //  from the current list of such devices. The caller uses an index
4518 //  into the list to identify the device.
4519 //
4520 //  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4521 //  in _RefreshDeviceList().
4522 // ----------------------------------------------------------------------------
4523
4524 int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4525 {
4526     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4527
4528     HRESULT hr = S_OK;
4529     IMMDevice *pDevice = NULL;
4530
4531     assert(dir == eRender || dir == eCapture);
4532
4533     if (eRender == dir && NULL != _ptrRenderCollection)
4534     {
4535         hr = _ptrRenderCollection->Item(index, &pDevice);
4536     }
4537     else if (NULL != _ptrCaptureCollection)
4538     {
4539         hr = _ptrCaptureCollection->Item(index, &pDevice);
4540     }
4541
4542     if (FAILED(hr))
4543     {
4544         _TraceCOMError(hr);
4545         SAFE_RELEASE(pDevice);
4546         return -1;
4547     }
4548
4549     int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4550     SAFE_RELEASE(pDevice);
4551     return res;
4552 }
4553
4554 // ----------------------------------------------------------------------------
4555 //  _GetDefaultDeviceID
4556 //
4557 //  Gets the uniqe device ID of an endpoint rendering or capture device
4558 //  given a specified device role.
4559 //
4560 //  Uses: _ptrEnumerator
4561 // ----------------------------------------------------------------------------
4562
4563 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4564 {
4565     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4566
4567     HRESULT hr = S_OK;
4568     IMMDevice *pDevice = NULL;
4569
4570     assert(dir == eRender || dir == eCapture);
4571     assert(role == eConsole || role == eCommunications);
4572     assert(_ptrEnumerator != NULL);
4573
4574     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4575                            dir,
4576                            role,
4577                            &pDevice);
4578
4579     if (FAILED(hr))
4580     {
4581         _TraceCOMError(hr);
4582         SAFE_RELEASE(pDevice);
4583         return -1;
4584     }
4585
4586     int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4587     SAFE_RELEASE(pDevice);
4588     return res;
4589 }
4590
4591 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
4592                                                        ERole role,
4593                                                        int* index)
4594 {
4595     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4596
4597     HRESULT hr = S_OK;
4598     WCHAR szDefaultDeviceID[MAX_PATH] = {0};
4599     WCHAR szDeviceID[MAX_PATH] = {0};
4600
4601     const size_t kDeviceIDLength = sizeof(szDeviceID)/sizeof(szDeviceID[0]);
4602     assert(kDeviceIDLength ==
4603         sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
4604
4605     if (_GetDefaultDeviceID(dir,
4606                             role,
4607                             szDefaultDeviceID,
4608                             kDeviceIDLength) == -1)
4609     {
4610         return -1;
4611     }
4612
4613     IMMDeviceCollection* collection = _ptrCaptureCollection;
4614     if (dir == eRender)
4615     {
4616         collection = _ptrRenderCollection;
4617     }
4618
4619     if (!collection)
4620     {
4621         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4622             "Device collection not valid");
4623         return -1;
4624     }
4625
4626     UINT count = 0;
4627     hr = collection->GetCount(&count);
4628     if (FAILED(hr))
4629     {
4630         _TraceCOMError(hr);
4631         return -1;
4632     }
4633
4634     *index = -1;
4635     for (UINT i = 0; i < count; i++)
4636     {
4637         memset(szDeviceID, 0, sizeof(szDeviceID));
4638         scoped_refptr<IMMDevice> device;
4639         {
4640             IMMDevice* ptrDevice = NULL;
4641             hr = collection->Item(i, &ptrDevice);
4642             if (FAILED(hr) || ptrDevice == NULL)
4643             {
4644                 _TraceCOMError(hr);
4645                 return -1;
4646             }
4647             device = ptrDevice;
4648             SAFE_RELEASE(ptrDevice);
4649         }
4650
4651         if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1)
4652         {
4653            return -1;
4654         }
4655
4656         if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0)
4657         {
4658             // Found a match.
4659             *index = i;
4660             break;
4661         }
4662
4663     }
4664
4665     if (*index == -1)
4666     {
4667         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4668             "Unable to find collection index for default device");
4669         return -1;
4670     }
4671
4672     return 0;
4673 }
4674
4675 // ----------------------------------------------------------------------------
4676 //  _GetDeviceName
4677 // ----------------------------------------------------------------------------
4678
4679 int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
4680                                                LPWSTR pszBuffer,
4681                                                int bufferLen)
4682 {
4683     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4684
4685     static const WCHAR szDefault[] = L"<Device not available>";
4686
4687     HRESULT hr = E_FAIL;
4688     IPropertyStore *pProps = NULL;
4689     PROPVARIANT varName;
4690
4691     assert(pszBuffer != NULL);
4692     assert(bufferLen > 0);
4693
4694     if (pDevice != NULL)
4695     {
4696         hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
4697         if (FAILED(hr))
4698         {
4699             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4700                 "IMMDevice::OpenPropertyStore failed, hr = 0x%08X", hr);
4701         }
4702     }
4703
4704     // Initialize container for property value.
4705     PropVariantInit(&varName);
4706
4707     if (SUCCEEDED(hr))
4708     {
4709         // Get the endpoint device's friendly-name property.
4710         hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
4711         if (FAILED(hr))
4712         {
4713             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4714                 "IPropertyStore::GetValue failed, hr = 0x%08X", hr);
4715         }
4716     }
4717
4718     if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt))
4719     {
4720         hr = E_FAIL;
4721         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4722             "IPropertyStore::GetValue returned no value, hr = 0x%08X", hr);
4723     }
4724
4725     if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt))
4726     {
4727         // The returned value is not a wide null terminated string.
4728         hr = E_UNEXPECTED;
4729         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4730             "IPropertyStore::GetValue returned unexpected type, hr = 0x%08X", hr);
4731     }
4732
4733     if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
4734     {
4735         // Copy the valid device name to the provided ouput buffer.
4736         wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
4737     }
4738     else
4739     {
4740         // Failed to find the device name.
4741         wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4742     }
4743
4744     PropVariantClear(&varName);
4745     SAFE_RELEASE(pProps);
4746
4747     return 0;
4748 }
4749
4750 // ----------------------------------------------------------------------------
4751 //  _GetDeviceID
4752 // ----------------------------------------------------------------------------
4753
4754 int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen)
4755 {
4756     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4757
4758     static const WCHAR szDefault[] = L"<Device not available>";
4759
4760     HRESULT hr = E_FAIL;
4761     LPWSTR pwszID = NULL;
4762
4763     assert(pszBuffer != NULL);
4764     assert(bufferLen > 0);
4765
4766     if (pDevice != NULL)
4767     {
4768         hr = pDevice->GetId(&pwszID);
4769     }
4770
4771     if (hr == S_OK)
4772     {
4773         // Found the device ID.
4774         wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
4775     }
4776     else
4777     {
4778         // Failed to find the device ID.
4779         wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4780     }
4781
4782     CoTaskMemFree(pwszID);
4783     return 0;
4784 }
4785
4786 // ----------------------------------------------------------------------------
4787 //  _GetDefaultDevice
4788 // ----------------------------------------------------------------------------
4789
4790 int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice)
4791 {
4792     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4793
4794     HRESULT hr(S_OK);
4795
4796     assert(_ptrEnumerator != NULL);
4797
4798     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4799                                    dir,
4800                                    role,
4801                                    ppDevice);
4802     if (FAILED(hr))
4803     {
4804         _TraceCOMError(hr);
4805         return -1;
4806     }
4807
4808     return 0;
4809 }
4810
4811 // ----------------------------------------------------------------------------
4812 //  _GetListDevice
4813 // ----------------------------------------------------------------------------
4814
4815 int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice)
4816 {
4817     HRESULT hr(S_OK);
4818
4819     assert(_ptrEnumerator != NULL);
4820
4821     IMMDeviceCollection *pCollection = NULL;
4822
4823     hr = _ptrEnumerator->EnumAudioEndpoints(
4824                                dir,
4825                                DEVICE_STATE_ACTIVE,        // only active endpoints are OK
4826                                &pCollection);
4827     if (FAILED(hr))
4828     {
4829         _TraceCOMError(hr);
4830         SAFE_RELEASE(pCollection);
4831         return -1;
4832     }
4833
4834     hr = pCollection->Item(
4835                         index,
4836                         ppDevice);
4837     if (FAILED(hr))
4838     {
4839         _TraceCOMError(hr);
4840         SAFE_RELEASE(pCollection);
4841         return -1;
4842     }
4843
4844     return 0;
4845 }
4846
4847 // ----------------------------------------------------------------------------
4848 //  _EnumerateEndpointDevicesAll
4849 // ----------------------------------------------------------------------------
4850
4851 int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const
4852 {
4853     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4854
4855     assert(_ptrEnumerator != NULL);
4856
4857     HRESULT hr = S_OK;
4858     IMMDeviceCollection *pCollection = NULL;
4859     IMMDevice *pEndpoint = NULL;
4860     IPropertyStore *pProps = NULL;
4861     IAudioEndpointVolume* pEndpointVolume = NULL;
4862     LPWSTR pwszID = NULL;
4863
4864     // Generate a collection of audio endpoint devices in the system.
4865     // Get states for *all* endpoint devices.
4866     // Output: IMMDeviceCollection interface.
4867     hr = _ptrEnumerator->EnumAudioEndpoints(
4868                                  dataFlow,            // data-flow direction (input parameter)
4869                                  DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
4870                                  &pCollection);        // release interface when done
4871
4872     EXIT_ON_ERROR(hr);
4873
4874     // use the IMMDeviceCollection interface...
4875
4876     UINT count = 0;
4877
4878     // Retrieve a count of the devices in the device collection.
4879     hr = pCollection->GetCount(&count);
4880     EXIT_ON_ERROR(hr);
4881     if (dataFlow == eRender)
4882         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#rendering endpoint devices (counting all): %u", count);
4883     else if (dataFlow == eCapture)
4884         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#capturing endpoint devices (counting all): %u", count);
4885
4886     if (count == 0)
4887     {
4888         return 0;
4889     }
4890
4891     // Each loop prints the name of an endpoint device.
4892     for (ULONG i = 0; i < count; i++)
4893     {
4894         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Endpoint %d:", i);
4895
4896         // Get pointer to endpoint number i.
4897         // Output: IMMDevice interface.
4898         hr = pCollection->Item(
4899                             i,
4900                             &pEndpoint);
4901         CONTINUE_ON_ERROR(hr);
4902
4903         // use the IMMDevice interface of the specified endpoint device...
4904
4905         // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
4906         hr = pEndpoint->GetId(&pwszID);
4907         CONTINUE_ON_ERROR(hr);
4908         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "ID string    : %S", pwszID);
4909
4910         // Retrieve an interface to the device's property store.
4911         // Output: IPropertyStore interface.
4912         hr = pEndpoint->OpenPropertyStore(
4913                           STGM_READ,
4914                           &pProps);
4915         CONTINUE_ON_ERROR(hr);
4916
4917         // use the IPropertyStore interface...
4918
4919         PROPVARIANT varName;
4920         // Initialize container for property value.
4921         PropVariantInit(&varName);
4922
4923         // Get the endpoint's friendly-name property.
4924         // Example: "Speakers (Realtek High Definition Audio)"
4925         hr = pProps->GetValue(
4926                        PKEY_Device_FriendlyName,
4927                        &varName);
4928         CONTINUE_ON_ERROR(hr);
4929         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", varName.pwszVal);
4930
4931         // Get the endpoint's current device state
4932         DWORD dwState;
4933         hr = pEndpoint->GetState(&dwState);
4934         CONTINUE_ON_ERROR(hr);
4935         if (dwState & DEVICE_STATE_ACTIVE)
4936             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : *ACTIVE*", dwState);
4937         if (dwState & DEVICE_STATE_DISABLED)
4938             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : DISABLED", dwState);
4939         if (dwState & DEVICE_STATE_NOTPRESENT)
4940             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : NOTPRESENT", dwState);
4941         if (dwState & DEVICE_STATE_UNPLUGGED)
4942             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : UNPLUGGED", dwState);
4943
4944         // Check the hardware volume capabilities.
4945         DWORD dwHwSupportMask = 0;
4946         hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
4947                                NULL, (void**)&pEndpointVolume);
4948         CONTINUE_ON_ERROR(hr);
4949         hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
4950         CONTINUE_ON_ERROR(hr);
4951         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4952             // The audio endpoint device supports a hardware volume control
4953             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_VOLUME", dwHwSupportMask);
4954         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
4955             // The audio endpoint device supports a hardware mute control
4956             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_MUTE", dwHwSupportMask);
4957         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
4958             // The audio endpoint device supports a hardware peak meter
4959             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_METER", dwHwSupportMask);
4960
4961         // Check the channel count (#channels in the audio stream that enters or leaves the audio endpoint device)
4962         UINT nChannelCount(0);
4963         hr = pEndpointVolume->GetChannelCount(
4964                                 &nChannelCount);
4965         CONTINUE_ON_ERROR(hr);
4966         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#channels    : %u", nChannelCount);
4967
4968         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4969         {
4970             // Get the volume range.
4971             float fLevelMinDB(0.0);
4972             float fLevelMaxDB(0.0);
4973             float fVolumeIncrementDB(0.0);
4974             hr = pEndpointVolume->GetVolumeRange(
4975                                     &fLevelMinDB,
4976                                     &fLevelMaxDB,
4977                                     &fVolumeIncrementDB);
4978             CONTINUE_ON_ERROR(hr);
4979             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume range : %4.2f (min), %4.2f (max), %4.2f (inc) [dB]",
4980                 fLevelMinDB, fLevelMaxDB, fVolumeIncrementDB);
4981
4982             // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is divided
4983             // into n uniform intervals of size vinc = fVolumeIncrementDB, where
4984             // n = (vmax ?vmin) / vinc.
4985             // The values vmin, vmax, and vinc are measured in decibels. The client can set
4986             // the volume level to one of n + 1 discrete values in the range from vmin to vmax.
4987             int n = (int)((fLevelMaxDB-fLevelMinDB)/fVolumeIncrementDB);
4988             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#intervals   : %d", n);
4989
4990             // Get information about the current step in the volume range.
4991             // This method represents the volume level of the audio stream that enters or leaves
4992             // the audio endpoint device as an index or "step" in a range of discrete volume levels.
4993             // Output value nStepCount is the number of steps in the range. Output value nStep
4994             // is the step index of the current volume level. If the number of steps is n = nStepCount,
4995             // then step index nStep can assume values from 0 (minimum volume) to n ?1 (maximum volume).
4996             UINT nStep(0);
4997             UINT nStepCount(0);
4998             hr = pEndpointVolume->GetVolumeStepInfo(
4999                                     &nStep,
5000                                     &nStepCount);
5001             CONTINUE_ON_ERROR(hr);
5002             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume steps : %d (nStep), %d (nStepCount)", nStep, nStepCount);
5003         }
5004 Next:
5005         if (FAILED(hr)) {
5006           WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
5007                        "Error when logging device information");
5008         }
5009         CoTaskMemFree(pwszID);
5010         pwszID = NULL;
5011         PropVariantClear(&varName);
5012         SAFE_RELEASE(pProps);
5013         SAFE_RELEASE(pEndpoint);
5014         SAFE_RELEASE(pEndpointVolume);
5015     }
5016     SAFE_RELEASE(pCollection);
5017     return 0;
5018
5019 Exit:
5020     _TraceCOMError(hr);
5021     CoTaskMemFree(pwszID);
5022     pwszID = NULL;
5023     SAFE_RELEASE(pCollection);
5024     SAFE_RELEASE(pEndpoint);
5025     SAFE_RELEASE(pEndpointVolume);
5026     SAFE_RELEASE(pProps);
5027     return -1;
5028 }
5029
5030 // ----------------------------------------------------------------------------
5031 //  _TraceCOMError
5032 // ----------------------------------------------------------------------------
5033
5034 void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
5035 {
5036     TCHAR buf[MAXERRORLENGTH];
5037     TCHAR errorText[MAXERRORLENGTH];
5038
5039     const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
5040                           FORMAT_MESSAGE_IGNORE_INSERTS;
5041     const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
5042
5043     // Gets the system's human readable message string for this HRESULT.
5044     // All error message in English by default.
5045     DWORD messageLength = ::FormatMessageW(dwFlags,
5046                                            0,
5047                                            hr,
5048                                            dwLangID,
5049                                            errorText,
5050                                            MAXERRORLENGTH,
5051                                            NULL);
5052
5053     assert(messageLength <= MAXERRORLENGTH);
5054
5055     // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
5056     for (; messageLength && ::isspace(errorText[messageLength - 1]);
5057          --messageLength)
5058     {
5059         errorText[messageLength - 1] = '\0';
5060     }
5061
5062     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
5063         "Core Audio method failed (hr=0x%x)", hr);
5064     StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
5065     StringCchCat(buf, MAXERRORLENGTH, errorText);
5066     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "%s", WideToUTF8(buf));
5067 }
5068
5069 // ----------------------------------------------------------------------------
5070 //  _SetThreadName
5071 // ----------------------------------------------------------------------------
5072
5073 void AudioDeviceWindowsCore::_SetThreadName(DWORD dwThreadID, LPCSTR szThreadName)
5074 {
5075     // See http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx for details on the code
5076     // in this function. Name of article is "Setting a Thread Name (Unmanaged)".
5077
5078     THREADNAME_INFO info;
5079     info.dwType = 0x1000;
5080     info.szName = szThreadName;
5081     info.dwThreadID = dwThreadID;
5082     info.dwFlags = 0;
5083
5084     __try
5085     {
5086         RaiseException( 0x406D1388, 0, sizeof(info)/sizeof(DWORD), (ULONG_PTR *)&info );
5087     }
5088     __except (EXCEPTION_CONTINUE_EXECUTION)
5089     {
5090     }
5091 }
5092
5093 // ----------------------------------------------------------------------------
5094 //  WideToUTF8
5095 // ----------------------------------------------------------------------------
5096
5097 char* AudioDeviceWindowsCore::WideToUTF8(const TCHAR* src) const {
5098 #ifdef UNICODE
5099     const size_t kStrLen = sizeof(_str);
5100     memset(_str, 0, kStrLen);
5101     // Get required size (in bytes) to be able to complete the conversion.
5102     int required_size = WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
5103     if (required_size <= kStrLen)
5104     {
5105         // Process the entire input string, including the terminating null char.
5106         if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
5107             memset(_str, 0, kStrLen);
5108     }
5109     return _str;
5110 #else
5111     return const_cast<char*>(src);
5112 #endif
5113 }
5114
5115
5116 bool AudioDeviceWindowsCore::KeyPressed() const{
5117
5118   int key_down = 0;
5119   for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
5120     short res = GetAsyncKeyState(key);
5121     key_down |= res & 0x1; // Get the LSB
5122   }
5123   return (key_down > 0);
5124 }
5125 }  // namespace webrtc
5126
5127 #endif  // WEBRTC_WINDOWS_CORE_AUDIO_BUILD