1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/win/audio_low_latency_input_win.h"
7 #include "base/logging.h"
8 #include "base/memory/scoped_ptr.h"
9 #include "base/strings/utf_string_conversions.h"
10 #include "media/audio/win/audio_manager_win.h"
11 #include "media/audio/win/avrt_wrapper_win.h"
13 using base::win::ScopedComPtr;
14 using base::win::ScopedCOMInitializer;
18 WASAPIAudioInputStream::WASAPIAudioInputStream(
19 AudioManagerWin* manager, const AudioParameters& params,
20 const std::string& device_id)
22 capture_thread_(NULL),
25 endpoint_buffer_size_frames_(0),
26 device_id_(device_id),
30 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
31 bool avrt_init = avrt::Initialize();
32 DCHECK(avrt_init) << "Failed to load the Avrt.dll";
34 // Set up the desired capture format specified by the client.
35 format_.nSamplesPerSec = params.sample_rate();
36 format_.wFormatTag = WAVE_FORMAT_PCM;
37 format_.wBitsPerSample = params.bits_per_sample();
38 format_.nChannels = params.channels();
39 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
40 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
43 // Size in bytes of each audio frame.
44 frame_size_ = format_.nBlockAlign;
45 // Store size of audio packets which we expect to get from the audio
46 // endpoint device in each capture event.
47 packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
48 packet_size_bytes_ = params.GetBytesPerBuffer();
49 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
50 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
52 // All events are auto-reset events and non-signaled initially.
54 // Create the event which the audio engine will signal each time
55 // a buffer becomes ready to be processed by the client.
56 audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
57 DCHECK(audio_samples_ready_event_.IsValid());
59 // Create the event which will be set in Stop() when capturing shall stop.
60 stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
61 DCHECK(stop_capture_event_.IsValid());
63 ms_to_frame_count_ = static_cast<double>(params.sample_rate()) / 1000.0;
65 LARGE_INTEGER performance_frequency;
66 if (QueryPerformanceFrequency(&performance_frequency)) {
67 perf_count_to_100ns_units_ =
68 (10000000.0 / static_cast<double>(performance_frequency.QuadPart));
70 LOG(ERROR) << "High-resolution performance counters are not supported.";
71 perf_count_to_100ns_units_ = 0.0;
75 WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
77 bool WASAPIAudioInputStream::Open() {
78 DCHECK(CalledOnValidThread());
79 // Verify that we are not already opened.
83 // Obtain a reference to the IMMDevice interface of the capturing
84 // device with the specified unique identifier or role which was
85 // set at construction.
86 HRESULT hr = SetCaptureDevice();
90 // Obtain an IAudioClient interface which enables us to create and initialize
91 // an audio stream between an audio application and the audio engine.
92 hr = ActivateCaptureDevice();
96 // Retrieve the stream format which the audio engine uses for its internal
97 // processing/mixing of shared-mode streams. This function call is for
98 // diagnostic purposes only and only in debug mode.
100 hr = GetAudioEngineStreamFormat();
103 // Verify that the selected audio endpoint supports the specified format
104 // set during construction.
105 if (!DesiredFormatIsSupported())
108 // Initialize the audio stream between the client and the device using
109 // shared mode and a lowest possible glitch-free latency.
110 hr = InitializeAudioEngine();
112 opened_ = SUCCEEDED(hr);
116 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
117 DCHECK(CalledOnValidThread());
119 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
129 // Starts periodic AGC microphone measurements if the AGC has been enabled
130 // using SetAutomaticGainControl().
133 // Create and start the thread that will drive the capturing by waiting for
136 new base::DelegateSimpleThread(this, "wasapi_capture_thread");
137 capture_thread_->Start();
139 // Start streaming data between the endpoint buffer and the audio engine.
140 HRESULT hr = audio_client_->Start();
141 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
143 if (SUCCEEDED(hr) && audio_render_client_for_loopback_)
144 hr = audio_render_client_for_loopback_->Start();
146 started_ = SUCCEEDED(hr);
149 void WASAPIAudioInputStream::Stop() {
150 DCHECK(CalledOnValidThread());
151 DVLOG(1) << "WASAPIAudioInputStream::Stop()";
155 // Stops periodic AGC microphone measurements.
158 // Shut down the capture thread.
159 if (stop_capture_event_.IsValid()) {
160 SetEvent(stop_capture_event_.Get());
163 // Stop the input audio streaming.
164 HRESULT hr = audio_client_->Stop();
166 LOG(ERROR) << "Failed to stop input streaming.";
169 // Wait until the thread completes and perform cleanup.
170 if (capture_thread_) {
171 SetEvent(stop_capture_event_.Get());
172 capture_thread_->Join();
173 capture_thread_ = NULL;
180 void WASAPIAudioInputStream::Close() {
181 DVLOG(1) << "WASAPIAudioInputStream::Close()";
182 // It is valid to call Close() before calling open or Start().
183 // It is also valid to call Close() after Start() has been called.
186 // Inform the audio manager that we have been closed. This will cause our
188 manager_->ReleaseInputStream(this);
191 double WASAPIAudioInputStream::GetMaxVolume() {
192 // Verify that Open() has been called succesfully, to ensure that an audio
193 // session exists and that an ISimpleAudioVolume interface has been created.
194 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
198 // The effective volume value is always in the range 0.0 to 1.0, hence
199 // we can return a fixed value (=1.0) here.
203 void WASAPIAudioInputStream::SetVolume(double volume) {
204 DVLOG(1) << "SetVolume(volume=" << volume << ")";
205 DCHECK(CalledOnValidThread());
206 DCHECK_GE(volume, 0.0);
207 DCHECK_LE(volume, 1.0);
209 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
213 // Set a new master volume level. Valid volume levels are in the range
214 // 0.0 to 1.0. Ignore volume-change events.
215 HRESULT hr = simple_audio_volume_->SetMasterVolume(static_cast<float>(volume),
217 DLOG_IF(WARNING, FAILED(hr)) << "Failed to set new input master volume.";
219 // Update the AGC volume level based on the last setting above. Note that,
220 // the volume-level resolution is not infinite and it is therefore not
221 // possible to assume that the volume provided as input parameter can be
222 // used directly. Instead, a new query to the audio hardware is required.
223 // This method does nothing if AGC is disabled.
227 double WASAPIAudioInputStream::GetVolume() {
228 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
232 // Retrieve the current volume level. The value is in the range 0.0 to 1.0.
234 HRESULT hr = simple_audio_volume_->GetMasterVolume(&level);
235 DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
237 return static_cast<double>(level);
241 int WASAPIAudioInputStream::HardwareSampleRate(
242 const std::string& device_id) {
243 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
244 HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
248 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
252 uint32 WASAPIAudioInputStream::HardwareChannelCount(
253 const std::string& device_id) {
254 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
255 HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
259 return static_cast<uint32>(audio_engine_mix_format->nChannels);
263 HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
264 WAVEFORMATEX** device_format) {
265 // It is assumed that this static method is called from a COM thread, i.e.,
266 // CoInitializeEx() is not called here to avoid STA/MTA conflicts.
267 ScopedComPtr<IMMDeviceEnumerator> enumerator;
268 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL,
269 CLSCTX_INPROC_SERVER);
273 ScopedComPtr<IMMDevice> endpoint_device;
274 if (device_id == AudioManagerBase::kDefaultDeviceId) {
275 // Retrieve the default capture audio endpoint.
276 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
277 endpoint_device.Receive());
278 } else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
279 // Capture the default playback stream.
280 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
281 endpoint_device.Receive());
283 // Retrieve a capture endpoint device that is specified by an endpoint
284 // device-identification string.
285 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id).c_str(),
286 endpoint_device.Receive());
291 ScopedComPtr<IAudioClient> audio_client;
292 hr = endpoint_device->Activate(__uuidof(IAudioClient),
293 CLSCTX_INPROC_SERVER,
295 audio_client.ReceiveVoid());
296 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr;
299 void WASAPIAudioInputStream::Run() {
300 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
302 // Increase the thread priority.
303 capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
305 // Enable MMCSS to ensure that this thread receives prioritized access to
307 DWORD task_index = 0;
308 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
311 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
313 // Failed to enable MMCSS on this thread. It is not fatal but can lead
314 // to reduced QoS at high load.
315 DWORD err = GetLastError();
316 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
319 // Allocate a buffer with a size that enables us to take care of cases like:
320 // 1) The recorded buffer size is smaller, or does not match exactly with,
321 // the selected packet size used in each callback.
322 // 2) The selected buffer size is larger than the recorded buffer size in
324 size_t buffer_frame_index = 0;
325 size_t capture_buffer_size = std::max(
326 2 * endpoint_buffer_size_frames_ * frame_size_,
327 2 * packet_size_frames_ * frame_size_);
328 scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]);
330 LARGE_INTEGER now_count;
331 bool recording = true;
333 double volume = GetVolume();
334 HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_};
336 while (recording && !error) {
337 HRESULT hr = S_FALSE;
339 // Wait for a close-down event or a new capture event.
340 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
341 switch (wait_result) {
345 case WAIT_OBJECT_0 + 0:
346 // |stop_capture_event_| has been set.
349 case WAIT_OBJECT_0 + 1:
351 // |audio_samples_ready_event_| has been set.
352 BYTE* data_ptr = NULL;
353 UINT32 num_frames_to_read = 0;
355 UINT64 device_position = 0;
356 UINT64 first_audio_frame_timestamp = 0;
358 // Retrieve the amount of data in the capture endpoint buffer,
359 // replace it with silence if required, create callbacks for each
360 // packet and store non-delivered data for the next event.
361 hr = audio_capture_client_->GetBuffer(&data_ptr,
365 &first_audio_frame_timestamp);
367 DLOG(ERROR) << "Failed to get data from the capture buffer";
371 if (num_frames_to_read != 0) {
372 size_t pos = buffer_frame_index * frame_size_;
373 size_t num_bytes = num_frames_to_read * frame_size_;
374 DCHECK_GE(capture_buffer_size, pos + num_bytes);
376 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
377 // Clear out the local buffer since silence is reported.
378 memset(&capture_buffer[pos], 0, num_bytes);
380 // Copy captured data from audio engine buffer to local buffer.
381 memcpy(&capture_buffer[pos], data_ptr, num_bytes);
384 buffer_frame_index += num_frames_to_read;
387 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
388 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
390 // Derive a delay estimate for the captured audio packet.
391 // The value contains two parts (A+B), where A is the delay of the
392 // first audio frame in the packet and B is the extra delay
393 // contained in any stored data. Unit is in audio frames.
394 QueryPerformanceCounter(&now_count);
395 double audio_delay_frames =
396 ((perf_count_to_100ns_units_ * now_count.QuadPart -
397 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ +
398 buffer_frame_index - num_frames_to_read;
400 // Get a cached AGC volume level which is updated once every second
401 // on the audio manager thread. Note that, |volume| is also updated
402 // each time SetVolume() is called through IPC by the render-side AGC.
403 GetAgcVolume(&volume);
405 // Deliver captured data to the registered consumer using a packet
406 // size which was specified at construction.
407 uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
408 while (buffer_frame_index >= packet_size_frames_) {
410 reinterpret_cast<uint8*>(capture_buffer.get());
412 // Deliver data packet, delay estimation and volume level to
417 delay_frames * frame_size_,
420 // Store parts of the recorded data which can't be delivered
421 // using the current packet size. The stored section will be used
422 // either in the next while-loop iteration or in the next
424 memmove(&capture_buffer[0],
425 &capture_buffer[packet_size_bytes_],
426 (buffer_frame_index - packet_size_frames_) * frame_size_);
428 buffer_frame_index -= packet_size_frames_;
429 delay_frames -= packet_size_frames_;
439 if (recording && error) {
440 // TODO(henrika): perhaps it worth improving the cleanup here by e.g.
441 // stopping the audio client, joining the thread etc.?
442 NOTREACHED() << "WASAPI capturing failed with error code "
447 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
448 PLOG(WARNING) << "Failed to disable MMCSS";
452 void WASAPIAudioInputStream::HandleError(HRESULT err) {
453 NOTREACHED() << "Error code: " << err;
455 sink_->OnError(this);
458 HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
459 ScopedComPtr<IMMDeviceEnumerator> enumerator;
460 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
461 NULL, CLSCTX_INPROC_SERVER);
465 // Retrieve the IMMDevice by using the specified role or the specified
466 // unique endpoint device-identification string.
467 // TODO(henrika): possibly add support for the eCommunications as well.
468 if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
469 // Retrieve the default capture audio endpoint for the specified role.
470 // Note that, in Windows Vista, the MMDevice API supports device roles
471 // but the system-supplied user interface programs do not.
472 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
473 endpoint_device_.Receive());
474 } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
475 // Capture the default playback stream.
476 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
477 endpoint_device_.Receive());
479 // Retrieve a capture endpoint device that is specified by an endpoint
480 // device-identification string.
481 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
482 endpoint_device_.Receive());
488 // Verify that the audio endpoint device is active, i.e., the audio
489 // adapter that connects to the endpoint device is present and enabled.
490 DWORD state = DEVICE_STATE_DISABLED;
491 hr = endpoint_device_->GetState(&state);
495 if (!(state & DEVICE_STATE_ACTIVE)) {
496 DLOG(ERROR) << "Selected capture device is not active.";
503 HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() {
504 // Creates and activates an IAudioClient COM object given the selected
505 // capture endpoint device.
506 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
507 CLSCTX_INPROC_SERVER,
509 audio_client_.ReceiveVoid());
513 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() {
516 // The GetMixFormat() method retrieves the stream format that the
517 // audio engine uses for its internal processing of shared-mode streams.
518 // The method always uses a WAVEFORMATEXTENSIBLE structure, instead
519 // of a stand-alone WAVEFORMATEX structure, to specify the format.
520 // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of
521 // channels to speakers and the number of bits of precision in each sample.
522 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
523 hr = audio_client_->GetMixFormat(
524 reinterpret_cast<WAVEFORMATEX**>(&format_ex));
526 // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH
527 // for details on the WAVE file format.
528 WAVEFORMATEX format = format_ex->Format;
529 DVLOG(2) << "WAVEFORMATEX:";
530 DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag;
531 DVLOG(2) << " nChannels : " << format.nChannels;
532 DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec;
533 DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec;
534 DVLOG(2) << " nBlockAlign : " << format.nBlockAlign;
535 DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample;
536 DVLOG(2) << " cbSize : " << format.cbSize;
538 DVLOG(2) << "WAVEFORMATEXTENSIBLE:";
539 DVLOG(2) << " wValidBitsPerSample: " <<
540 format_ex->Samples.wValidBitsPerSample;
541 DVLOG(2) << " dwChannelMask : 0x" << std::hex <<
542 format_ex->dwChannelMask;
543 if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM)
544 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM";
545 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
546 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
547 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX)
548 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX";
553 bool WASAPIAudioInputStream::DesiredFormatIsSupported() {
554 // An application that uses WASAPI to manage shared-mode streams can rely
555 // on the audio engine to perform only limited format conversions. The audio
556 // engine can convert between a standard PCM sample size used by the
557 // application and the floating-point samples that the engine uses for its
558 // internal processing. However, the format for an application stream
559 // typically must have the same number of channels and the same sample
560 // rate as the stream format used by the device.
561 // Many audio devices support both PCM and non-PCM stream formats. However,
562 // the audio engine can mix only PCM streams.
563 base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
564 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
567 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
568 << "but a closest match exists.";
572 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
574 // Use event-driven mode only fo regular input devices. For loopback the
575 // EVENTCALLBACK flag is specified when intializing
576 // |audio_render_client_for_loopback_|.
577 if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
578 flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
581 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
584 // Initialize the audio stream between the client and the device.
585 // We connect indirectly through the audio engine by using shared mode.
586 // Note that, |hnsBufferDuration| is set of 0, which ensures that the
587 // buffer is never smaller than the minimum buffer size needed to ensure
588 // that glitches do not occur between the periodic processing passes.
589 // This setting should lead to lowest possible latency.
590 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
592 0, // hnsBufferDuration
599 // Retrieve the length of the endpoint buffer shared between the client
600 // and the audio engine. The buffer length determines the maximum amount
601 // of capture data that the audio engine can read from the endpoint buffer
602 // during a single processing pass.
603 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
604 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
608 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
612 // The period between processing passes by the audio engine is fixed for a
613 // particular audio endpoint device and represents the smallest processing
614 // quantum for the audio engine. This period plus the stream latency between
615 // the buffer and endpoint device represents the minimum possible latency
616 // that an audio application can achieve.
617 // TODO(henrika): possibly remove this section when all parts are ready.
618 REFERENCE_TIME device_period_shared_mode = 0;
619 REFERENCE_TIME device_period_exclusive_mode = 0;
620 HRESULT hr_dbg = audio_client_->GetDevicePeriod(
621 &device_period_shared_mode, &device_period_exclusive_mode);
622 if (SUCCEEDED(hr_dbg)) {
623 DVLOG(1) << "device period: "
624 << static_cast<double>(device_period_shared_mode / 10000.0)
628 REFERENCE_TIME latency = 0;
629 hr_dbg = audio_client_->GetStreamLatency(&latency);
630 if (SUCCEEDED(hr_dbg)) {
631 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
636 // Set the event handle that the audio engine will signal each time a buffer
637 // becomes ready to be processed by the client.
639 // In loopback case the capture device doesn't receive any events, so we
640 // need to create a separate playback client to get notifications. According
643 // A pull-mode capture client does not receive any events when a stream is
644 // initialized with event-driven buffering and is loopback-enabled. To
645 // work around this, initialize a render stream in event-driven mode. Each
646 // time the client receives an event for the render stream, it must signal
647 // the capture client to run the capture thread that reads the next set of
648 // samples from the capture endpoint buffer.
650 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
651 if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
652 hr = endpoint_device_->Activate(
653 __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL,
654 audio_render_client_for_loopback_.ReceiveVoid());
658 hr = audio_render_client_for_loopback_->Initialize(
659 AUDCLNT_SHAREMODE_SHARED,
660 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST,
661 0, 0, &format_, NULL);
665 hr = audio_render_client_for_loopback_->SetEventHandle(
666 audio_samples_ready_event_.Get());
668 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
674 // Get access to the IAudioCaptureClient interface. This interface
675 // enables us to read input data from the capture endpoint buffer.
676 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient),
677 audio_capture_client_.ReceiveVoid());
681 // Obtain a reference to the ISimpleAudioVolume interface which enables
682 // us to control the master volume level of an audio session.
683 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume),
684 simple_audio_volume_.ReceiveVoid());